Add 25/40/100Gigabit Ethernet Driver version v1.3.0 for Cavium Inc's.

Qlogic 45000 Series Adapters

MFC after:2 weeks
This commit is contained in:
David C Somayajulu 2017-04-04 06:16:59 +00:00
parent bf10f246fe
commit 11e25f0da3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=316485
97 changed files with 226248 additions and 0 deletions

View File

@ -426,6 +426,7 @@ MAN= aac.4 \
${_qlxge.4} \
${_qlxgb.4} \
${_qlxgbe.4} \
${_qlnxe.4} \
ral.4 \
random.4 \
rc.4 \
@ -846,11 +847,13 @@ _ntb_transport.4=ntb_transport.4
_qlxge.4= qlxge.4
_qlxgb.4= qlxgb.4
_qlxgbe.4= qlxgbe.4
_qlnxe.4= qlnxe.4
_sfxge.4= sfxge.4
MLINKS+=qlxge.4 if_qlxge.4
MLINKS+=qlxgb.4 if_qlxgb.4
MLINKS+=qlxgbe.4 if_qlxgbe.4
MLINKS+=qlnxe.4 if_qlnxe.4
MLINKS+=sfxge.4 if_sfxge.4
.if ${MK_BHYVE} != "no"

90
share/man/man4/qlnxe.4 Normal file
View File

@ -0,0 +1,90 @@
.\"-
.\" Copyright (c) 2017 Cavium Inc.
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd March 24, 2017
.Dt QLNXE 4
.Os
.Sh NAME
.Nm qlnxe
.Nd "Cavium 25/40/100 Gigabit Ethernet & CNA Adapter Driver"
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
kernel configuration file:
.Bd -ragged -offset indent
.Cd "device qlnxe"
.Ed
.Pp
To load the driver as a
module at boot time, place the following line in
.Xr loader.conf 5 :
.Bd -literal -offset indent
if_qlnxe_load="YES"
.Ed
.Sh DESCRIPTION
The
.Nm
driver supports IPv4 checksum offload,
TCP and UDP checksum offload for both IPv4 and IPv6,
Large Segment Offload for both IPv4 and IPv6,
Jumbo frames, VLAN Tag, Receive Side scaling, HW and Soft LRO.
For further hardware information, see
.Pa http://www.qlogic.com/ .
.Sh HARDWARE
The
.Nm
driver supports 25/40/100 Gigabit Ethernet & CNA Adapter based on the following
chipsets:
.Pp
.Bl -bullet -compact
.It
QLogic 45000 series
.El
.Sh SUPPORT
For support questions please contact your Cavium approved reseller or
Cavium Technical Support at
.Pa http://support.qlogic.com ,
or by E-mail at
.Aq Mt support@qlogic.com .
.Sh SEE ALSO
.Xr altq 4 ,
.Xr arp 4 ,
.Xr netintro 4 ,
.Xr ng_ether 4 ,
.Xr ifconfig 8
.Sh HISTORY
The
.Nm
device driver first appeared in
.Fx 12.0 .
.Sh AUTHORS
.An -nosplit
The
.Nm
driver was written by
.An David C Somayajulu
at Cavium Inc.

View File

@ -364,6 +364,20 @@ dev/qlxgbe/ql_isr.c optional qlxgbe pci
dev/qlxgbe/ql_misc.c optional qlxgbe pci
dev/qlxgbe/ql_os.c optional qlxgbe pci
dev/qlxgbe/ql_reset.c optional qlxgbe pci
dev/qlnx/qlnxe/ecore_cxt.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_dbg_fw_funcs.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_dcbx.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_dev.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_hw.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_init_fw_funcs.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_init_ops.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_int.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_l2.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_mcp.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_sp_commands.c optional qlnxe pci
dev/qlnx/qlnxe/ecore_spq.c optional qlnxe pci
dev/qlnx/qlnxe/qlnx_ioctl.c optional qlnxe pci
dev/qlnx/qlnxe/qlnx_os.c optional qlnxe pci
dev/sfxge/common/ef10_ev.c optional sfxge pci
dev/sfxge/common/ef10_filter.c optional sfxge pci
dev/sfxge/common/ef10_intr.c optional sfxge pci

View File

@ -0,0 +1,527 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __BCM_OSAL_ECORE_PACKAGE
#define __BCM_OSAL_ECORE_PACKAGE
#include "qlnx_os.h"
#include "ecore_status.h"
#include <sys/bitstring.h>
#if __FreeBSD_version >= 1100090
#include <compat/linuxkpi/common/include/linux/bitops.h>
#else
#include <ofed/include/linux/bitops.h>
#endif
/*
* prototypes of freebsd specific functions required by ecore
*/
extern uint32_t qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id);
extern uint32_t qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg,
uint8_t *reg_value);
extern uint32_t qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg,
uint16_t *reg_value);
extern uint32_t qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg,
uint32_t *reg_value);
extern void qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg,
uint8_t reg_value);
extern void qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg,
uint16_t reg_value);
extern void qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg,
uint32_t reg_value);
extern int qlnx_pci_find_capability(void *ecore_dev, int cap);
extern uint32_t qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr);
extern void qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value);
extern uint32_t qlnx_reg_rd32(void *p_hwfn, uint32_t reg_addr);
extern void qlnx_reg_wr32(void *p_hwfn, uint32_t reg_addr, uint32_t value);
extern void qlnx_reg_wr16(void *p_hwfn, uint32_t reg_addr, uint16_t value);
extern void qlnx_dbell_wr32(void *p_hwfn, uint32_t reg_addr, uint32_t value);
extern void *qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys,
uint32_t size);
extern void qlnx_dma_free_coherent(void *ecore_dev, void *v_addr,
bus_addr_t phys, uint32_t size);
extern void qlnx_link_update(void *p_hwfn);
extern void qlnx_barrier(void *p_hwfn);
extern void *qlnx_zalloc(uint32_t size);
extern void qlnx_get_protocol_stats(void *cdev, int proto_type,
void *proto_stats);
extern void qlnx_sp_isr(void *arg);
#ifdef ECORE_PACKAGE
/* Memory Types */
#define u8 uint8_t
#define u16 uint16_t
#define u32 uint32_t
#define u64 uint64_t
#define s16 uint16_t
#define s32 uint32_t
#ifndef QLNX_RDMA
typedef uint16_t __le16;
typedef uint32_t __le32;
typedef uint16_t __be16;
typedef uint32_t __be32;
static __inline unsigned long
roundup_pow_of_two(unsigned long x)
{
return (1UL << flsl(x - 1));
}
static __inline int
is_power_of_2(unsigned long n)
{
return (n == roundup_pow_of_two(n));
}
static __inline unsigned long
rounddown_pow_of_two(unsigned long x)
{
return (1UL << (flsl(x) - 1));
}
#define max_t(type, val1, val2) \
((type)(val1) > (type)(val2) ? (type)(val1) : (val2))
#define min_t(type, val1, val2) \
((type)(val1) < (type)(val2) ? (type)(val1) : (val2))
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
#define nothing do {} while(0)
#define BUILD_BUG_ON(cond) nothing
#endif /* #ifndef QLNX_RDMA */
#define OSAL_CPU_TO_BE64(val) htobe64(val)
#define OSAL_BE64_TO_CPU(val) be64toh(val)
#define OSAL_CPU_TO_BE32(val) htobe32(val)
#define OSAL_BE32_TO_CPU(val) be32toh(val)
#define OSAL_CPU_TO_LE32(val) htole32(val)
#define OSAL_LE32_TO_CPU(val) le32toh(val)
#define OSAL_CPU_TO_BE16(val) htobe16(val)
#define OSAL_BE16_TO_CPU(val) be16toh(val)
#define OSAL_CPU_TO_LE16(val) htole16(val)
#define OSAL_LE16_TO_CPU(val) le16toh(val)
#define OSAL_CACHE_LINE_SIZE CACHE_LINE_SIZE
#define OSAL_BE32 uint32_t
#define dma_addr_t bus_addr_t
#define osal_size_t size_t
typedef struct mtx osal_spinlock_t;
typedef struct mtx osal_mutex_t;
typedef void * osal_dpc_t;
typedef struct _osal_list_entry_t
{
struct _osal_list_entry_t *next, *prev;
} osal_list_entry_t;
typedef struct osal_list_t
{
osal_list_entry_t *head, *tail;
unsigned long cnt;
} osal_list_t;
/* OSAL functions */
#define OSAL_UDELAY(time) DELAY(time)
#define OSAL_MSLEEP(time) qlnx_mdelay(__func__, time)
#define OSAL_ALLOC(dev, GFP, size) qlnx_zalloc(size)
#define OSAL_ZALLOC(dev, GFP, size) qlnx_zalloc(size)
#define OSAL_VALLOC(dev, size) qlnx_zalloc(size)
#define OSAL_VZALLOC(dev, size) qlnx_zalloc(size)
#define OSAL_FREE(dev, memory) free(memory, M_QLNXBUF)
#define OSAL_VFREE(dev, memory) free(memory, M_QLNXBUF)
#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
#define OSAL_MEMCPY(dst, src, size) memcpy(dst, src, size)
#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \
qlnx_dma_alloc_coherent(dev, phys, size)
#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) \
qlnx_dma_free_coherent(dev, virt, phys, size)
#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
#define REG_WR(hwfn, addr, val) qlnx_reg_wr32(hwfn, addr, val)
#define REG_WR16(hwfn, addr, val) qlnx_reg_wr16(hwfn, addr, val)
#define DIRECT_REG_WR(p_hwfn, addr, value) qlnx_direct_reg_wr32(p_hwfn, addr, value)
#define DIRECT_REG_RD(p_hwfn, addr) qlnx_direct_reg_rd32(p_hwfn, addr)
#define REG_RD(hwfn, addr) qlnx_reg_rd32(hwfn, addr)
#define DOORBELL(hwfn, addr, value) \
qlnx_dbell_wr32(hwfn, addr, value)
#define OSAL_SPIN_LOCK_ALLOC(p_hwfn, mutex)
#define OSAL_SPIN_LOCK_DEALLOC(mutex) mtx_destroy(mutex)
#define OSAL_SPIN_LOCK_INIT(lock) {\
mtx_init(lock, __func__, MTX_NETWORK_LOCK, MTX_SPIN); \
}
#define OSAL_SPIN_UNLOCK(lock) {\
mtx_unlock(lock); \
}
#define OSAL_SPIN_LOCK(lock) {\
mtx_lock(lock); \
}
#define OSAL_MUTEX_ALLOC(p_hwfn, mutex)
#define OSAL_MUTEX_DEALLOC(mutex) mtx_destroy(mutex)
#define OSAL_MUTEX_INIT(lock) {\
mtx_init(lock, __func__, MTX_NETWORK_LOCK, MTX_DEF);\
}
#define OSAL_MUTEX_ACQUIRE(lock) mtx_lock(lock)
#define OSAL_MUTEX_RELEASE(lock) mtx_unlock(lock)
#define OSAL_DPC_ALLOC(hwfn) malloc(PAGE_SIZE, M_QLNXBUF, M_NOWAIT)
#define OSAL_DPC_INIT(dpc, hwfn) nothing
#define OSAL_SCHEDULE_RECOVERY_HANDLER(x) nothing
#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing
#define OSAL_DPC_SYNC(hwfn) nothing
static inline void OSAL_DCBX_AEN(void *p_hwfn, u32 mib_type)
{
return;
}
static inline bool OSAL_NVM_IS_ACCESS_ENABLED(void *p_hwfn)
{
return 1;
}
#define OSAL_LIST_INIT(list) \
do { \
(list)->head = NULL; \
(list)->tail = NULL; \
(list)->cnt = 0; \
} while (0)
#define OSAL_LIST_INSERT_ENTRY_AFTER(entry, entry_prev, list) \
do { \
(entry)->prev = (entry_prev); \
(entry)->next = (entry_prev)->next; \
(entry)->next->prev = (entry); \
(entry_prev)->next = (entry); \
(list)->cnt++; \
} while (0);
#define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) \
do { \
((new_list)->tail)->next = ((list)->head); \
((list)->head)->prev = ((new_list)->tail); \
(list)->head = (new_list)->head; \
(list)->cnt = (list)->cnt + (new_list)->cnt; \
OSAL_LIST_INIT(new_list); \
} while (0);
#define OSAL_LIST_PUSH_HEAD(entry, list) \
do { \
(entry)->prev = (osal_list_entry_t *)0; \
(entry)->next = (list)->head; \
if ((list)->tail == (osal_list_entry_t *)0) { \
(list)->tail = (entry); \
} else { \
(list)->head->prev = (entry); \
} \
(list)->head = (entry); \
(list)->cnt++; \
} while (0)
#define OSAL_LIST_PUSH_TAIL(entry, list) \
do { \
(entry)->next = (osal_list_entry_t *)0; \
(entry)->prev = (list)->tail; \
if ((list)->tail) { \
(list)->tail->next = (entry); \
} else { \
(list)->head = (entry); \
} \
(list)->tail = (entry); \
(list)->cnt++; \
} while (0)
#define OSAL_LIST_FIRST_ENTRY(list, type, field) \
(type *)((list)->head)
#define OSAL_LIST_REMOVE_ENTRY(entry, list) \
do { \
if ((list)->head == (entry)) { \
if ((list)->head) { \
(list)->head = (list)->head->next; \
if ((list)->head) { \
(list)->head->prev = (osal_list_entry_t *)0; \
} else { \
(list)->tail = (osal_list_entry_t *)0; \
} \
(list)->cnt--; \
} \
} else if ((list)->tail == (entry)) { \
if ((list)->tail) { \
(list)->tail = (list)->tail->prev; \
if ((list)->tail) { \
(list)->tail->next = (osal_list_entry_t *)0; \
} else { \
(list)->head = (osal_list_entry_t *)0; \
} \
(list)->cnt--; \
} \
} else { \
(entry)->prev->next = (entry)->next; \
(entry)->next->prev = (entry)->prev; \
(list)->cnt--; \
} \
} while (0)
#define OSAL_LIST_IS_EMPTY(list) \
((list)->cnt == 0)
#define OSAL_LIST_NEXT(entry, field, type) \
(type *)((&((entry)->field))->next)
#define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \
for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \
entry; \
entry = OSAL_LIST_NEXT(entry, field, type))
#define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \
for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \
tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL; \
entry != NULL; \
entry = (type *)tmp_entry, \
tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL)
#define OSAL_BAR_SIZE(dev, bar_id) qlnx_pci_bus_get_bar_size(dev, bar_id)
#define OSAL_PCI_READ_CONFIG_BYTE(dev, reg, value) \
qlnx_pci_read_config_byte(dev, reg, value);
#define OSAL_PCI_READ_CONFIG_WORD(dev, reg, value) \
qlnx_pci_read_config_word(dev, reg, value);
#define OSAL_PCI_READ_CONFIG_DWORD(dev, reg, value) \
qlnx_pci_read_config_dword(dev, reg, value);
#define OSAL_PCI_WRITE_CONFIG_BYTE(dev, reg, value) \
qlnx_pci_write_config_byte(dev, reg, value);
#define OSAL_PCI_WRITE_CONFIG_WORD(dev, reg, value) \
qlnx_pci_write_config_word(dev, reg, value);
#define OSAL_PCI_WRITE_CONFIG_DWORD(dev, reg, value) \
qlnx_pci_write_config_dword(dev, reg, value);
#define OSAL_PCI_FIND_CAPABILITY(dev, cap) qlnx_pci_find_capability(dev, cap);
#define OSAL_MMIOWB(dev) qlnx_barrier(dev)
#define OSAL_BARRIER(dev) qlnx_barrier(dev)
#define OSAL_SMP_MB(dev) mb()
#define OSAL_SMP_RMB(dev) rmb()
#define OSAL_SMP_WMB(dev) wmb()
#define OSAL_RMB(dev) rmb()
#define OSAL_WMB(dev) wmb()
#define OSAL_DMA_SYNC(dev, addr, length, is_post)
#define OSAL_FIND_FIRST_BIT find_first_bit
#define OSAL_SET_BIT(bit, bitmap) bit_set((bitstr_t *)bitmap, bit)
#define OSAL_CLEAR_BIT(bit, bitmap) bit_clear((bitstr_t *)bitmap, bit)
#define OSAL_TEST_BIT(bit, bitmap) bit_test((bitstr_t *)bitmap, bit)
#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \
find_first_zero_bit(bitmap, length)
#define OSAL_LINK_UPDATE(hwfn) qlnx_link_update(hwfn)
#define OSAL_VF_FLR_UPDATE(hwfn)
#define QLNX_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
#define QLNX_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#define OSAL_NUM_ACTIVE_CPU() mp_ncpus
#ifndef DIV_ROUND_UP
#define DIV_ROUND_UP(size, to_what) QLNX_DIV_ROUND_UP((size), (to_what))
#endif
#define ROUNDUP(value, to_what) QLNX_ROUNDUP((value), (to_what))
#define OSAL_ROUNDUP_POW_OF_TWO(val) roundup_pow_of_two((val))
static __inline uint32_t
qlnx_log2(uint32_t x)
{
uint32_t log = 0;
while (x >>= 1) log++;
return (log);
}
#define OSAL_LOG2(val) qlnx_log2(val)
#define OFFSETOF(str, field) offsetof(str, field)
#define PRINT device_printf
#define PRINT_ERR device_printf
#define OSAL_ASSERT(is_assert) nothing
#define OSAL_BEFORE_PF_START(cdev, my_id) {};
#define OSAL_AFTER_PF_STOP(cdev, my_id) {};
#define INLINE __inline
#define OSAL_INLINE __inline
#define OSAL_UNLIKELY
#define OSAL_NULL NULL
#define OSAL_MAX_T(type, __max1, __max2) max_t(type, __max1, __max2)
#define OSAL_MIN_T(type, __max1, __max2) min_t(type, __max1, __max2)
#define __iomem
#define OSAL_IOMEM
#define int_ptr_t void *
#define osal_int_ptr_t void *
#define OSAL_BUILD_BUG_ON(cond) nothing
#define REG_ADDR(hwfn, offset) (void *)((u8 *)(hwfn->regview) + (offset))
#define OSAL_REG_ADDR(hwfn, offset) (void *)((u8 *)(hwfn->regview) + (offset))
#define OSAL_PAGE_SIZE PAGE_SIZE
#define OSAL_STRCPY(dst, src) strcpy(dst, src)
#define OSAL_STRNCPY(dst, src, bytes) strncpy(dst, src, bytes)
#define OSAL_STRLEN(src) strlen(src)
#define OSAL_SPRINTF sprintf
#define OSAL_SNPRINTF snprintf
#define OSAL_MEMSET memset
#define OSAL_ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
#define osal_uintptr_t u64
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
#define OSAL_GET_PROTOCOL_STATS(p_hwfn, type, stats) \
qlnx_get_protocol_stats(p_hwfn, type, stats);
#define OSAL_POLL_MODE_DPC(hwfn) {if (cold) qlnx_sp_isr(hwfn);}
#define OSAL_WARN(cond, fmt, args...) \
if (cond) printf("%s: WARNING: " fmt, __func__, ## args);
#define OSAL_BITMAP_WEIGHT(bitmap, nbits) bitmap_weight(bitmap, nbits)
#define OSAL_GET_RDMA_SB_ID(p_hwfn, cnq_id) ecore_rdma_get_sb_id(p_hwfn, cnq_id)
static inline int
qlnx_test_and_change_bit(long bit, volatile unsigned long *var)
{
long val;
var += BIT_WORD(bit);
bit %= BITS_PER_LONG;
bit = (1UL << bit);
val = *var;
#if __FreeBSD_version >= 1100000
if (val & bit)
return (test_and_clear_bit(bit, var));
return (test_and_set_bit(bit, var));
#else
if (val & bit)
return (test_and_clear_bit(bit, (long *)var));
return (test_and_set_bit(bit, (long *)var));
#endif
}
#if __FreeBSD_version < 1100000
static inline unsigned
bitmap_weight(unsigned long *bitmap, unsigned nbits)
{
unsigned bit;
unsigned retval = 0;
for_each_set_bit(bit, bitmap, nbits)
retval++;
return (retval);
}
#endif
#define OSAL_TEST_AND_FLIP_BIT qlnx_test_and_change_bit
#define OSAL_TEST_AND_CLEAR_BIT test_and_clear_bit
#define OSAL_MEMCMP memcmp
#define OSAL_SPIN_LOCK_IRQSAVE(x,y) {y=0; mtx_lock(x);}
#define OSAL_SPIN_UNLOCK_IRQSAVE(x,y) {y= 0; mtx_unlock(x);}
static inline u32
OSAL_CRC32(u32 crc, u8 *ptr, u32 length)
{
int i;
while (length--) {
crc ^= *ptr++;
for (i = 0; i < 8; i++)
crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
}
return crc;
}
static inline void
OSAL_CRC8_POPULATE(u8 * cdu_crc8_table, u8 polynomial)
{
return;
}
static inline u8
OSAL_CRC8(u8 * cdu_crc8_table, u8 * data_to_crc, int data_to_crc_len, u8 init_value)
{
return ECORE_NOTIMPL;
}
#define OSAL_HW_INFO_CHANGE(p_hwfn, offset)
#define OSAL_MFW_TLV_REQ(p_hwfn)
#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, req, vf_sw_info) {};
#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, res) (0)
#endif /* #ifdef ECORE_PACKAGE */
#endif /* #ifdef __BCM_OSAL_ECORE_PACKAGE */

File diff suppressed because it is too large Load Diff

906
sys/dev/qlnx/qlnxe/ecore.h Normal file
View File

@ -0,0 +1,906 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_H
#define __ECORE_H
#include "ecore_hsi_common.h"
#include "ecore_hsi_debug_tools.h"
#include "ecore_hsi_init_func.h"
#include "ecore_hsi_init_tool.h"
#include "ecore_proto_if.h"
#include "mcp_public.h"
#define ECORE_MAJOR_VERSION 8
#define ECORE_MINOR_VERSION 18
#define ECORE_REVISION_VERSION 13
#define ECORE_ENGINEERING_VERSION 0
#define ECORE_VERSION \
((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
(ECORE_REVISION_VERSION << 8) | ECORE_ENGINEERING_VERSION)
#define STORM_FW_VERSION \
((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
(FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
#define MAX_HWFNS_PER_DEVICE 2
#define NAME_SIZE 16
#define ARRAY_DECL static const
#define ECORE_WFQ_UNIT 100
/* Constants */
#define ECORE_WID_SIZE (1024)
/* Configurable */
#define ECORE_PF_DEMS_SIZE (4)
/* cau states */
enum ecore_coalescing_mode {
ECORE_COAL_MODE_DISABLE,
ECORE_COAL_MODE_ENABLE
};
enum ecore_nvm_cmd {
ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM,
ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE,
ECORE_EXT_PHY_FW_UPGRADE = DRV_MSG_CODE_EXT_PHY_FW_UPGRADE,
ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE,
ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ,
ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE,
ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ,
ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE,
ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00
};
#if !defined(CONFIG_ECORE_L2) && !defined(CONFIG_ECORE_ROCE) && \
!defined(CONFIG_ECORE_FCOE) && !defined(CONFIG_ECORE_ISCSI)
#define CONFIG_ECORE_L2
#define CONFIG_ECORE_SRIOV
#define CONFIG_ECORE_ROCE
#define CONFIG_ECORE_IWARP
#define CONFIG_ECORE_FCOE
#define CONFIG_ECORE_ISCSI
#define CONFIG_ECORE_LL2
#endif
/* helpers */
#define MASK_FIELD(_name, _value) \
((_value) &= (_name##_MASK))
#define FIELD_VALUE(_name, _value) \
((_value & _name##_MASK) << _name##_SHIFT)
#define SET_FIELD(value, name, flag) \
do { \
(value) &= ~(name##_MASK << name##_SHIFT); \
(value) |= ((((u64)flag) & (u64)name##_MASK) << (name##_SHIFT));\
} while (0)
#define GET_FIELD(value, name) \
(((value) >> (name##_SHIFT)) & name##_MASK)
#define ECORE_MFW_GET_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _SHIFT))
#define ECORE_MFW_SET_FIELD(name, field, value) \
do { \
(name) &= ~((field ## _MASK) << (field ## _SHIFT)); \
(name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK)); \
} while (0)
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
(cid * ECORE_PF_DEMS_SIZE);
return db_addr;
}
static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
return db_addr;
}
#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
((sizeof(type_name) + (u32)(1<<(p_hwfn->p_dev->cache_shift))-1) & \
~((1<<(p_hwfn->p_dev->cache_shift))-1))
#ifndef U64_HI
#define U64_HI(val) ((u32)(((u64)(val)) >> 32))
#endif
#ifndef U64_LO
#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff))
#endif
#ifndef UEFI
/* Debug print definitions */
#define DP_ERR(p_dev, fmt, ...) \
do { \
PRINT_ERR((p_dev)->dp_ctx, "[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__); \
} while (0)
#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
do { \
if (OSAL_UNLIKELY((p_dev)->dp_level <= ECORE_LEVEL_NOTICE)) { \
PRINT((p_dev)->dp_ctx, "[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__); \
OSAL_ASSERT(!is_assert); \
} \
} while (0)
#define DP_INFO(p_dev, fmt, ...) \
do { \
if (OSAL_UNLIKELY((p_dev)->dp_level <= ECORE_LEVEL_INFO)) { \
PRINT((p_dev)->dp_ctx, "[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__); \
} \
} while (0)
#define DP_VERBOSE(p_dev, module, fmt, ...) \
do { \
if (OSAL_UNLIKELY(((p_dev)->dp_level <= ECORE_LEVEL_VERBOSE) && \
((p_dev)->dp_module & module))) { \
PRINT((p_dev)->dp_ctx, "[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__); \
} \
} while (0)
#endif
enum DP_LEVEL {
ECORE_LEVEL_VERBOSE = 0x0,
ECORE_LEVEL_INFO = 0x1,
ECORE_LEVEL_NOTICE = 0x2,
ECORE_LEVEL_ERR = 0x3,
};
#define ECORE_LOG_LEVEL_SHIFT (30)
#define ECORE_LOG_VERBOSE_MASK (0x3fffffff)
#define ECORE_LOG_INFO_MASK (0x40000000)
#define ECORE_LOG_NOTICE_MASK (0x80000000)
enum DP_MODULE {
ECORE_MSG_DRV = 0x0001,
ECORE_MSG_PROBE = 0x0002,
ECORE_MSG_LINK = 0x0004,
ECORE_MSG_TIMER = 0x0008,
ECORE_MSG_IFDOWN = 0x0010,
ECORE_MSG_IFUP = 0x0020,
ECORE_MSG_RX_ERR = 0x0040,
ECORE_MSG_TX_ERR = 0x0080,
ECORE_MSG_TX_QUEUED = 0x0100,
ECORE_MSG_INTR = 0x0200,
ECORE_MSG_TX_DONE = 0x0400,
ECORE_MSG_RX_STATUS = 0x0800,
ECORE_MSG_PKTDATA = 0x1000,
ECORE_MSG_HW = 0x2000,
ECORE_MSG_WOL = 0x4000,
ECORE_MSG_SPQ = 0x10000,
ECORE_MSG_STATS = 0x20000,
ECORE_MSG_DCB = 0x40000,
ECORE_MSG_IOV = 0x80000,
ECORE_MSG_SP = 0x100000,
ECORE_MSG_STORAGE = 0x200000,
ECORE_MSG_OOO = 0x200000,
ECORE_MSG_CXT = 0x800000,
ECORE_MSG_LL2 = 0x1000000,
ECORE_MSG_ILT = 0x2000000,
ECORE_MSG_RDMA = 0x4000000,
ECORE_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++)
#define D_TRINE(val, cond1, cond2, true1, true2, def) \
(val == (cond1) ? true1 : \
(val == (cond2) ? true2 : def))
/* forward */
struct ecore_ptt_pool;
struct ecore_spq;
struct ecore_sb_info;
struct ecore_sb_attn_info;
struct ecore_cxt_mngr;
struct ecore_dma_mem;
struct ecore_sb_sp_info;
struct ecore_ll2_info;
struct ecore_l2_info;
struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_dcbx_info;
struct ecore_rt_data {
u32 *init_val;
bool *b_valid;
};
enum ecore_tunn_mode {
ECORE_MODE_L2GENEVE_TUNN,
ECORE_MODE_IPGENEVE_TUNN,
ECORE_MODE_L2GRE_TUNN,
ECORE_MODE_IPGRE_TUNN,
ECORE_MODE_VXLAN_TUNN,
};
enum ecore_tunn_clss {
ECORE_TUNN_CLSS_MAC_VLAN,
ECORE_TUNN_CLSS_MAC_VNI,
ECORE_TUNN_CLSS_INNER_MAC_VLAN,
ECORE_TUNN_CLSS_INNER_MAC_VNI,
ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
MAX_ECORE_TUNN_CLSS,
};
struct ecore_tunn_update_type {
bool b_update_mode;
bool b_mode_enabled;
enum ecore_tunn_clss tun_cls;
};
struct ecore_tunn_update_udp_port {
bool b_update_port;
u16 port;
};
struct ecore_tunnel_info {
struct ecore_tunn_update_type vxlan;
struct ecore_tunn_update_type l2_geneve;
struct ecore_tunn_update_type ip_geneve;
struct ecore_tunn_update_type l2_gre;
struct ecore_tunn_update_type ip_gre;
struct ecore_tunn_update_udp_port vxlan_port;
struct ecore_tunn_update_udp_port geneve_port;
bool b_update_rx_cls;
bool b_update_tx_cls;
};
/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE/iWARP protocol
*/
enum ecore_pci_personality {
ECORE_PCI_ETH,
ECORE_PCI_FCOE,
ECORE_PCI_ISCSI,
ECORE_PCI_ETH_ROCE,
ECORE_PCI_ETH_IWARP,
ECORE_PCI_ETH_RDMA,
ECORE_PCI_DEFAULT /* default in shmem */
};
/* All VFs are symetric, all counters are PF + all VFs */
struct ecore_qm_iids {
u32 cids;
u32 vf_cids;
u32 tids;
};
#define MAX_PF_PER_PORT 8
/* HW / FW resources, output of features supported below, most information
* is received from MFW.
*/
enum ecore_resources {
ECORE_L2_QUEUE,
ECORE_VPORT,
ECORE_RSS_ENG,
ECORE_PQ,
ECORE_RL,
ECORE_MAC,
ECORE_VLAN,
ECORE_RDMA_CNQ_RAM,
ECORE_ILT,
ECORE_LL2_QUEUE,
ECORE_CMDQS_CQS,
ECORE_RDMA_STATS_QUEUE,
ECORE_BDQ,
/* This is needed only internally for matching against the IGU.
* In case of legacy MFW, would be set to `0'.
*/
ECORE_SB,
ECORE_MAX_RESC,
};
/* Features that require resources, given as input to the resource management
* algorithm, the output are the resources above
*/
enum ecore_feature {
ECORE_PF_L2_QUE,
ECORE_PF_TC,
ECORE_VF,
ECORE_EXTRA_VF_QUE,
ECORE_VMQ,
ECORE_RDMA_CNQ,
ECORE_ISCSI_CQ,
ECORE_FCOE_CQ,
ECORE_VF_L2_QUE,
ECORE_MAX_FEATURES,
};
enum ecore_port_mode {
ECORE_PORT_MODE_DE_2X40G,
ECORE_PORT_MODE_DE_2X50G,
ECORE_PORT_MODE_DE_1X100G,
ECORE_PORT_MODE_DE_4X10G_F,
ECORE_PORT_MODE_DE_4X10G_E,
ECORE_PORT_MODE_DE_4X20G,
ECORE_PORT_MODE_DE_1X40G,
ECORE_PORT_MODE_DE_2X25G,
ECORE_PORT_MODE_DE_1X25G,
ECORE_PORT_MODE_DE_4X25G,
ECORE_PORT_MODE_DE_2X10G,
};
enum ecore_dev_cap {
ECORE_DEV_CAP_ETH,
ECORE_DEV_CAP_FCOE,
ECORE_DEV_CAP_ISCSI,
ECORE_DEV_CAP_ROCE,
ECORE_DEV_CAP_IWARP
};
enum ecore_hw_err_type {
ECORE_HW_ERR_FAN_FAIL,
ECORE_HW_ERR_MFW_RESP_FAIL,
ECORE_HW_ERR_HW_ATTN,
ECORE_HW_ERR_DMAE_FAIL,
ECORE_HW_ERR_RAMROD_FAIL,
ECORE_HW_ERR_FW_ASSERT,
};
enum ecore_wol_support {
ECORE_WOL_SUPPORT_NONE,
ECORE_WOL_SUPPORT_PME,
};
struct ecore_hw_info {
/* PCI personality */
enum ecore_pci_personality personality;
#define ECORE_IS_RDMA_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
(dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
(dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
#define ECORE_IS_ROCE_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
(dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
#define ECORE_IS_IWARP_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
(dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
#define ECORE_IS_L2_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ETH || \
ECORE_IS_RDMA_PERSONALITY(dev))
#define ECORE_IS_FCOE_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_FCOE)
#define ECORE_IS_ISCSI_PERSONALITY(dev) \
((dev)->hw_info.personality == ECORE_PCI_ISCSI)
/* Resource Allocation scheme results */
u32 resc_start[ECORE_MAX_RESC];
u32 resc_num[ECORE_MAX_RESC];
u32 feat_num[ECORE_MAX_FEATURES];
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
/* Amount of traffic classes HW supports */
u8 num_hw_tc;
/* Amount of TCs which should be active according to DCBx or upper layer driver configuration */
u8 num_active_tc;
/* The traffic class used by PF for it's offloaded protocol */
u8 offload_tc;
u32 concrete_fid;
u16 opaque_fid;
u16 ovlan;
u32 part_num[4];
#ifndef ETH_ALEN
#define ETH_ALEN 6 /* @@@ TBD - define somewhere else for Windows */
#endif
unsigned char hw_mac_addr[ETH_ALEN];
u64 node_wwn; /* For FCoE only */
u64 port_wwn; /* For FCoE only */
u16 num_iscsi_conns;
u16 num_fcoe_conns;
struct ecore_igu_info *p_igu_info;
/* Sriov */
u8 max_chains_per_vf;
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
/* Default DCBX mode */
u8 dcbx_mode;
u16 mtu;
enum ecore_wol_support b_wol_support;
};
/* maximun size of read/write commands (HW limit) */
#define DMAE_MAX_RW_SIZE 0x2000
struct ecore_dmae_info {
/* Mutex for synchronizing access to functions */
osal_mutex_t mutex;
u8 channel;
dma_addr_t completion_word_phys_addr;
/* The memory location where the DMAE writes the completion
* value when an operation is finished on this context.
*/
u32 *p_completion_word;
dma_addr_t intermediate_buffer_phys_addr;
/* An intermediate buffer for DMAE operations that use virtual
* addresses - data is DMA'd to/from this buffer and then
* memcpy'd to/from the virtual address
*/
u32 *p_intermediate_buffer;
dma_addr_t dmae_cmd_phys_addr;
struct dmae_cmd *p_dmae_cmd;
};
struct ecore_wfq_data {
u32 default_min_speed; /* When wfq feature is not configured */
u32 min_speed; /* when feature is configured for any 1 vport */
bool configured;
};
struct ecore_qm_info {
struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
struct init_qm_port_params *qm_port_params;
u16 start_pq;
u8 start_vport;
u16 pure_lb_pq;
u16 offload_pq;
u16 low_latency_pq;
u16 pure_ack_pq;
u16 ooo_pq;
u16 first_vf_pq;
u16 first_mcos_pq;
u16 first_rl_pq;
u16 num_pqs;
u16 num_vf_pqs;
u8 num_vports;
u8 max_phys_tcs_per_port;
u8 ooo_tc;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
bool vport_wfq_en;
u8 pf_wfq;
u32 pf_rl;
struct ecore_wfq_data *wfq_data;
u8 num_pf_rls;
};
struct storm_stats {
u32 address;
u32 len;
};
struct ecore_fw_data {
#ifdef CONFIG_ECORE_BINARY_FW
struct fw_ver_info *fw_ver_info;
#endif
const u8 *modes_tree_buf;
union init_op *init_ops;
const u32 *arr_data;
u32 init_ops_size;
};
struct ecore_hwfn {
struct ecore_dev *p_dev;
u8 my_id; /* ID inside the PF */
#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
u8 rel_pf_id; /* Relative to engine*/
u8 abs_pf_id;
#define ECORE_PATH_ID(_p_hwfn) \
(ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
u8 port_id;
bool b_active;
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
void *dp_ctx;
bool first_on_engine;
bool hw_init_done;
u8 num_funcs_on_engine;
u8 enabled_func_idx;
/* BAR access */
void OSAL_IOMEM *regview;
void OSAL_IOMEM *doorbells;
u64 db_phys_addr;
unsigned long db_size;
/* PTT pool */
struct ecore_ptt_pool *p_ptt_pool;
/* HW info */
struct ecore_hw_info hw_info;
/* rt_array (for init-tool) */
struct ecore_rt_data rt_data;
/* SPQ */
struct ecore_spq *p_spq;
/* EQ */
struct ecore_eq *p_eq;
/* Consolidate Q*/
struct ecore_consq *p_consq;
/* Slow-Path definitions */
osal_dpc_t sp_dpc;
bool b_sp_dpc_enabled;
struct ecore_ptt *p_main_ptt;
struct ecore_ptt *p_dpc_ptt;
struct ecore_sb_sp_info *p_sp_sb;
struct ecore_sb_attn_info *p_sb_attn;
/* Protocol related */
bool using_ll2;
struct ecore_ll2_info *p_ll2_info;
struct ecore_ooo_info *p_ooo_info;
struct ecore_iscsi_info *p_iscsi_info;
struct ecore_fcoe_info *p_fcoe_info;
struct ecore_rdma_info *p_rdma_info;
struct ecore_pf_params pf_params;
bool b_rdma_enabled_in_prs;
u32 rdma_prs_search_reg;
struct ecore_cxt_mngr *p_cxt_mngr;
/* Flag indicating whether interrupts are enabled or not*/
bool b_int_enabled;
bool b_int_requested;
/* True if the driver requests for the link */
bool b_drv_link_init;
struct ecore_vf_iov *vf_iov_info;
struct ecore_pf_iov *pf_iov_info;
struct ecore_mcp_info *mcp_info;
struct ecore_dcbx_info *p_dcbx_info;
struct ecore_dmae_info dmae_info;
/* QM init */
struct ecore_qm_info qm_info;
/* Buffer for unzipping firmware data */
#ifdef CONFIG_ECORE_ZIPPED_FW
void *unzip_buf;
#endif
struct dbg_tools_data dbg_info;
/* PWM region specific data */
u16 wid_count;
u32 dpi_size;
u32 dpi_count;
u32 dpi_start_offset; /* this is used to
* calculate th
* doorbell address
*/
/* If one of the following is set then EDPM shouldn't be used */
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
/* L2-related */
struct ecore_l2_info *p_l2_info;
};
enum ecore_mf_mode {
ECORE_MF_DEFAULT,
ECORE_MF_OVLAN,
ECORE_MF_NPAR,
};
enum ecore_dev_type {
ECORE_DEV_TYPE_BB,
ECORE_DEV_TYPE_AH,
ECORE_DEV_TYPE_E5,
};
struct ecore_dev {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
void *dp_ctx;
enum ecore_dev_type type;
/* Translate type/revision combo into the proper conditions */
#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB)
#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_A0(dev))
#ifndef ASIC_ONLY
#define ECORE_IS_BB_B0(dev) ((ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev)) || \
(CHIP_REV_IS_TEDIBEAR(dev)))
#else
#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && CHIP_REV_IS_B0(dev))
#endif
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
#define ECORE_IS_E5(dev) false
#define ECORE_E5_MISSING_CODE OSAL_BUILD_BUG_ON(false)
u16 vendor_id;
u16 device_id;
#define ECORE_DEV_ID_MASK 0xff00
#define ECORE_DEV_ID_MASK_BB 0x1600
#define ECORE_DEV_ID_MASK_AH 0x8000
u16 chip_num;
#define CHIP_NUM_MASK 0xffff
#define CHIP_NUM_SHIFT 16
u16 chip_rev;
#define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12
#ifndef ASIC_ONLY
#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5)
#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe)
#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc)
#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_EMUL_B0(_p_dev))
#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf)
#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd)
#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev))
#define CHIP_REV_IS_SLOW(_p_dev) \
(CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev))
#define CHIP_REV_IS_A0(_p_dev) \
(CHIP_REV_IS_EMUL_A0(_p_dev) || \
CHIP_REV_IS_FPGA_A0(_p_dev) || \
!(_p_dev)->chip_rev)
#define CHIP_REV_IS_B0(_p_dev) \
(CHIP_REV_IS_EMUL_B0(_p_dev) || \
CHIP_REV_IS_FPGA_B0(_p_dev) || \
(_p_dev)->chip_rev == 1)
#define CHIP_REV_IS_ASIC(_p_dev) !CHIP_REV_IS_SLOW(_p_dev)
#else
#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev)
#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1)
#endif
u16 chip_metal;
#define CHIP_METAL_MASK 0xff
#define CHIP_METAL_SHIFT 4
u16 chip_bond_id;
#define CHIP_BOND_ID_MASK 0xf
#define CHIP_BOND_ID_SHIFT 0
u8 num_engines;
u8 num_ports_in_engines;
u8 num_funcs_in_port;
u8 path_id;
enum ecore_mf_mode mf_mode;
#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT)
#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR)
#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN)
int pcie_width;
int pcie_speed;
/* Add MF related configuration */
u8 mcp_rev;
u8 boot_mode;
/* WoL related configurations */
u8 wol_config;
u8 wol_mac[ETH_ALEN];
u32 int_mode;
enum ecore_coalescing_mode int_coalescing_mode;
u16 rx_coalesce_usecs;
u16 tx_coalesce_usecs;
/* Start Bar offset of first hwfn */
void OSAL_IOMEM *regview;
void OSAL_IOMEM *doorbells;
u64 db_phys_addr;
unsigned long db_size;
/* PCI */
u8 cache_shift;
/* Init */
const struct iro *iro_arr;
#define IRO (p_hwfn->p_dev->iro_arr)
/* HW functions */
u8 num_hwfns;
struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
#ifdef CONFIG_ECORE_SW_CHANNEL
bool b_hw_channel;
#endif
struct ecore_tunnel_info tunnel;
bool b_is_vf;
bool b_dont_override_vf_msix;
u32 drv_type;
u32 rdma_max_sge;
u32 rdma_max_inline;
u32 rdma_max_srq_sge;
struct ecore_eth_stats *reset_stats;
struct ecore_fw_data *fw_data;
u32 mcp_nvm_resp;
/* Recovery */
bool recov_in_prog;
/* Indicates whether should prevent attentions from being reasserted */
bool attn_clr_en;
/* Indicates whether allowing the MFW to collect a crash dump */
bool allow_mdump;
/* Indicates if the reg_fifo is checked after any register access */
bool chk_reg_fifo;
#ifndef ASIC_ONLY
bool b_is_emul_full;
#endif
};
#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \
: MAX_NUM_VFS_K2)
#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
: MAX_NUM_L2_QUEUES_K2)
#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \
: MAX_NUM_PORTS_K2)
#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
: MAX_SB_PER_PATH_K2)
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
* the concrete value.
*
* @param concrete_fid
*
* @return OSAL_INLINE u8
*/
static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
u32 concrete_fid)
{
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID);
u8 sw_fid;
if (vf_valid)
sw_fid = vfid + MAX_NUM_PFS;
else
sw_fid = pfid;
return sw_fid;
}
#define PURE_LB_TC 8
#define PKT_LB_TC 9
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
struct ecore_ptt *p_ptt,
u32 min_pf_rate);
int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw);
int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
int ecore_device_num_engines(struct ecore_dev *p_dev);
int ecore_device_num_ports(struct ecore_dev *p_dev);
void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
u8 *mac);
/* Flags for indication of required queues */
#define PQ_FLAGS_RLS (1 << 0)
#define PQ_FLAGS_MCOS (1 << 1)
#define PQ_FLAGS_LB (1 << 2)
#define PQ_FLAGS_OOO (1 << 3)
#define PQ_FLAGS_ACK (1 << 4)
#define PQ_FLAGS_OFLD (1 << 5)
#define PQ_FLAGS_VFS (1 << 6)
#define PQ_FLAGS_LLT (1 << 7)
/* physical queue index for cm context intialization */
u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
/* amount of resources used in qm init */
u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
#endif /* __ECORE_H */

View File

@ -0,0 +1,783 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_CHAIN_H__
#define __ECORE_CHAIN_H__
#include "common_hsi.h"
#include "ecore_utils.h"
enum ecore_chain_mode
{
/* Each Page contains a next pointer at its end */
ECORE_CHAIN_MODE_NEXT_PTR,
/* Chain is a single page (next ptr) is unrequired */
ECORE_CHAIN_MODE_SINGLE,
/* Page pointers are located in a side list */
ECORE_CHAIN_MODE_PBL,
};
enum ecore_chain_use_mode
{
ECORE_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */
ECORE_CHAIN_USE_TO_CONSUME, /* Chain starts full */
ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */
};
enum ecore_chain_cnt_type {
/* The chain's size/prod/cons are kept in 16-bit variables */
ECORE_CHAIN_CNT_TYPE_U16,
/* The chain's size/prod/cons are kept in 32-bit variables */
ECORE_CHAIN_CNT_TYPE_U32,
};
struct ecore_chain_next
{
struct regpair next_phys;
void *next_virt;
};
struct ecore_chain_pbl_u16 {
u16 prod_page_idx;
u16 cons_page_idx;
};
struct ecore_chain_pbl_u32 {
u32 prod_page_idx;
u32 cons_page_idx;
};
struct ecore_chain_ext_pbl
{
dma_addr_t p_pbl_phys;
void *p_pbl_virt;
};
struct ecore_chain_u16 {
/* Cyclic index of next element to produce/consme */
u16 prod_idx;
u16 cons_idx;
};
struct ecore_chain_u32 {
/* Cyclic index of next element to produce/consme */
u32 prod_idx;
u32 cons_idx;
};
struct ecore_chain
{
/* fastpath portion of the chain - required for commands such
* as produce / consume.
*/
/* Point to next element to produce/consume */
void *p_prod_elem;
void *p_cons_elem;
/* Fastpath portions of the PBL [if exists] */
struct {
/* Table for keeping the virtual addresses of the chain pages,
* respectively to the physical addresses in the pbl table.
*/
void **pp_virt_addr_tbl;
union {
struct ecore_chain_pbl_u16 pbl_u16;
struct ecore_chain_pbl_u32 pbl_u32;
} c;
} pbl;
union {
struct ecore_chain_u16 chain16;
struct ecore_chain_u32 chain32;
} u;
/* Capacity counts only usable elements */
u32 capacity;
u32 page_cnt;
/* A u8 would suffice for mode, but it would save as a lot of headaches
* on castings & defaults.
*/
enum ecore_chain_mode mode;
/* Elements information for fast calculations */
u16 elem_per_page;
u16 elem_per_page_mask;
u16 elem_size;
u16 next_page_mask;
u16 usable_per_page;
u8 elem_unusable;
u8 cnt_type;
/* Slowpath of the chain - required for initialization and destruction,
* but isn't involved in regular functionality.
*/
/* Base address of a pre-allocated buffer for pbl */
struct {
dma_addr_t p_phys_table;
void *p_virt_table;
} pbl_sp;
/* Address of first page of the chain - the address is required
* for fastpath operation [consume/produce] but only for the the SINGLE
* flavour which isn't considered fastpath [== SPQ].
*/
void *p_virt_addr;
dma_addr_t p_phys_addr;
/* Total number of elements [for entire chain] */
u32 size;
u8 intended_use;
/* TBD - do we really need this? Couldn't find usage for it */
bool b_external_pbl;
void *dp_ctx;
};
#define ECORE_CHAIN_PBL_ENTRY_SIZE (8)
#define ECORE_CHAIN_PAGE_SIZE (0x1000)
#define ELEMS_PER_PAGE(elem_size) (ECORE_CHAIN_PAGE_SIZE/(elem_size))
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \
(u8)(1 + ((sizeof(struct ecore_chain_next)-1) / \
(elem_size))) : 0)
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32) (ELEMS_PER_PAGE(elem_size) - \
UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
#define is_chain_u16(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16)
#define is_chain_u32(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32)
/* Accessors */
static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain)
{
OSAL_ASSERT(is_chain_u16(p_chain));
return p_chain->u.chain16.prod_idx;
}
static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain)
{
OSAL_ASSERT(is_chain_u32(p_chain));
return p_chain->u.chain32.prod_idx;
}
static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain)
{
OSAL_ASSERT(is_chain_u16(p_chain));
return p_chain->u.chain16.cons_idx;
}
static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain)
{
OSAL_ASSERT(is_chain_u32(p_chain));
return p_chain->u.chain32.cons_idx;
}
#define ECORE_U16_MAX ((u16)~0U)
#define ECORE_U32_MAX ((u32)~0U)
static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
{
u16 used;
OSAL_ASSERT(is_chain_u16(p_chain));
used = (u16)(((u32)ECORE_U16_MAX + 1 +
(u32)(p_chain->u.chain16.prod_idx)) -
(u32)p_chain->u.chain16.cons_idx);
if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
return (u16)(p_chain->capacity - used);
}
static OSAL_INLINE u32
ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
{
u32 used;
OSAL_ASSERT(is_chain_u32(p_chain));
used = (u32)(((u64)ECORE_U32_MAX + 1 +
(u64)(p_chain->u.chain32.prod_idx)) -
(u64)p_chain->u.chain32.cons_idx);
if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
return p_chain->capacity - used;
}
static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain)
{
if (is_chain_u16(p_chain))
return (ecore_chain_get_elem_left(p_chain) ==
p_chain->capacity);
else
return (ecore_chain_get_elem_left_u32(p_chain) ==
p_chain->capacity);
}
static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain)
{
if (is_chain_u16(p_chain))
return (ecore_chain_get_elem_left(p_chain) == 0);
else
return (ecore_chain_get_elem_left_u32(p_chain) == 0);
}
static OSAL_INLINE
u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain)
{
return p_chain->elem_per_page;
}
static OSAL_INLINE
u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
{
return p_chain->usable_per_page;
}
static OSAL_INLINE
u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
{
return p_chain->elem_unusable;
}
static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain)
{
return p_chain->size;
}
static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
{
return p_chain->page_cnt;
}
static OSAL_INLINE
dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
{
return p_chain->pbl_sp.p_phys_table;
}
/**
* @brief ecore_chain_advance_page -
*
* Advance the next element accros pages for a linked chain
*
* @param p_chain
* @param p_next_elem
* @param idx_to_inc
* @param page_to_inc
*/
static OSAL_INLINE void
ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
void *idx_to_inc, void *page_to_inc)
{
struct ecore_chain_next *p_next = OSAL_NULL;
u32 page_index = 0;
switch(p_chain->mode) {
case ECORE_CHAIN_MODE_NEXT_PTR:
p_next = (struct ecore_chain_next *)(*p_next_elem);
*p_next_elem = p_next->next_virt;
if (is_chain_u16(p_chain))
*(u16 *)idx_to_inc += (u16)p_chain->elem_unusable;
else
*(u32 *)idx_to_inc += (u16)p_chain->elem_unusable;
break;
case ECORE_CHAIN_MODE_SINGLE:
*p_next_elem = p_chain->p_virt_addr;
break;
case ECORE_CHAIN_MODE_PBL:
if (is_chain_u16(p_chain)) {
if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
*(u16 *)page_to_inc = 0;
page_index = *(u16 *)page_to_inc;
} else {
if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
*(u32 *)page_to_inc = 0;
page_index = *(u32 *)page_to_inc;
}
*p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
}
}
#define is_unusable_idx(p, idx) \
(((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
#define is_unusable_idx_u32(p, idx) \
(((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
#define is_unusable_next_idx(p, idx) \
((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
#define is_unusable_next_idx_u32(p, idx) \
((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == (p)->usable_per_page)
#define test_and_skip(p, idx) \
do { \
if (is_chain_u16(p)) { \
if (is_unusable_idx(p, idx)) \
(p)->u.chain16.idx += (p)->elem_unusable; \
} else { \
if (is_unusable_idx_u32(p, idx)) \
(p)->u.chain32.idx += (p)->elem_unusable; \
} \
} while (0)
/**
* @brief ecore_chain_return_multi_produced -
*
* A chain in which the driver "Produces" elements should use this API
* to indicate previous produced elements are now consumed.
*
* @param p_chain
* @param num
*/
static OSAL_INLINE
void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num)
{
if (is_chain_u16(p_chain))
p_chain->u.chain16.cons_idx += (u16)num;
else
p_chain->u.chain32.cons_idx += num;
test_and_skip(p_chain, cons_idx);
}
/**
* @brief ecore_chain_return_produced -
*
* A chain in which the driver "Produces" elements should use this API
* to indicate previous produced elements are now consumed.
*
* @param p_chain
*/
static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain)
{
if (is_chain_u16(p_chain))
p_chain->u.chain16.cons_idx++;
else
p_chain->u.chain32.cons_idx++;
test_and_skip(p_chain, cons_idx);
}
/**
* @brief ecore_chain_produce -
*
* A chain in which the driver "Produces" elements should use this to get
* a pointer to the next element which can be "Produced". It's driver
* responsibility to validate that the chain has room for new element.
*
* @param p_chain
*
* @return void*, a pointer to next element
*/
static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
{
void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx;
if (is_chain_u16(p_chain)) {
if ((p_chain->u.chain16.prod_idx &
p_chain->elem_per_page_mask) ==
p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain16.prod_idx;
p_prod_page_idx = &p_chain->pbl.c.pbl_u16.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
p_chain->u.chain16.prod_idx++;
} else {
if ((p_chain->u.chain32.prod_idx &
p_chain->elem_per_page_mask) ==
p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain32.prod_idx;
p_prod_page_idx = &p_chain->pbl.c.pbl_u32.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
p_chain->u.chain32.prod_idx++;
}
p_ret = p_chain->p_prod_elem;
p_chain->p_prod_elem = (void*)(((u8*)p_chain->p_prod_elem) +
p_chain->elem_size);
return p_ret;
}
/**
* @brief ecore_chain_get_capacity -
*
* Get the maximum number of BDs in chain
*
* @param p_chain
* @param num
*
* @return number of unusable BDs
*/
static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain)
{
return p_chain->capacity;
}
/**
* @brief ecore_chain_recycle_consumed -
*
* Returns an element which was previously consumed;
* Increments producers so they could be written to FW.
*
* @param p_chain
*/
static OSAL_INLINE
void ecore_chain_recycle_consumed(struct ecore_chain *p_chain)
{
test_and_skip(p_chain, prod_idx);
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx++;
else
p_chain->u.chain32.prod_idx++;
}
/**
* @brief ecore_chain_consume -
*
* A Chain in which the driver utilizes data written by a different source
* (i.e., FW) should use this to access passed buffers.
*
* @param p_chain
*
* @return void*, a pointer to the next buffer written
*/
static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
{
void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx;
if (is_chain_u16(p_chain)) {
if ((p_chain->u.chain16.cons_idx &
p_chain->elem_per_page_mask) ==
p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain16.cons_idx;
p_cons_page_idx = &p_chain->pbl.c.pbl_u16.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
p_chain->u.chain16.cons_idx++;
} else {
if ((p_chain->u.chain32.cons_idx &
p_chain->elem_per_page_mask) ==
p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain32.cons_idx;
p_cons_page_idx = &p_chain->pbl.c.pbl_u32.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
p_chain->u.chain32.cons_idx++;
}
p_ret = p_chain->p_cons_elem;
p_chain->p_cons_elem = (void*)(((u8*)p_chain->p_cons_elem) +
p_chain->elem_size);
return p_ret;
}
/**
* @brief ecore_chain_reset -
*
* Resets the chain to its start state
*
* @param p_chain pointer to a previously allocted chain
*/
static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
{
u32 i;
if (is_chain_u16(p_chain)) {
p_chain->u.chain16.prod_idx = 0;
p_chain->u.chain16.cons_idx = 0;
} else {
p_chain->u.chain32.prod_idx = 0;
p_chain->u.chain32.cons_idx = 0;
}
p_chain->p_cons_elem = p_chain->p_virt_addr;
p_chain->p_prod_elem = p_chain->p_virt_addr;
if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
/* Use (page_cnt - 1) as a reset value for the prod/cons page's
* indices, to avoid unnecessary page advancing on the first
* call to ecore_chain_produce/consume. Instead, the indices
* will be advanced to page_cnt and then will be wrapped to 0.
*/
u32 reset_val = p_chain->page_cnt - 1;
if (is_chain_u16(p_chain)) {
p_chain->pbl.c.pbl_u16.prod_page_idx = (u16)reset_val;
p_chain->pbl.c.pbl_u16.cons_page_idx = (u16)reset_val;
} else {
p_chain->pbl.c.pbl_u32.prod_page_idx = reset_val;
p_chain->pbl.c.pbl_u32.cons_page_idx = reset_val;
}
}
switch (p_chain->intended_use) {
case ECORE_CHAIN_USE_TO_CONSUME:
/* produce empty elements */
for (i = 0; i < p_chain->capacity; i++)
ecore_chain_recycle_consumed(p_chain);
break;
case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
case ECORE_CHAIN_USE_TO_PRODUCE:
default:
/* Do nothing */
break;
}
}
/**
* @brief ecore_chain_init_params -
*
* Initalizes a basic chain struct
*
* @param p_chain
* @param page_cnt number of pages in the allocated buffer
* @param elem_size size of each element in the chain
* @param intended_use
* @param mode
* @param cnt_type
* @param dp_ctx
*/
static OSAL_INLINE void
ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
enum ecore_chain_use_mode intended_use,
enum ecore_chain_mode mode,
enum ecore_chain_cnt_type cnt_type, void *dp_ctx)
{
/* chain fixed parameters */
p_chain->p_virt_addr = OSAL_NULL;
p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
p_chain->intended_use = (u8)intended_use;
p_chain->mode = mode;
p_chain->cnt_type = (u8)cnt_type;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->next_page_mask = (p_chain->usable_per_page &
p_chain->elem_per_page_mask);
p_chain->page_cnt = page_cnt;
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
p_chain->b_external_pbl = false;
p_chain->pbl_sp.p_phys_table = 0;
p_chain->pbl_sp.p_virt_table = OSAL_NULL;
p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
p_chain->dp_ctx = dp_ctx;
}
/**
* @brief ecore_chain_init_mem -
*
* Initalizes a basic chain struct with its chain buffers
*
* @param p_chain
* @param p_virt_addr virtual address of allocated buffer's beginning
* @param p_phys_addr physical address of allocated buffer's beginning
*
*/
static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain,
void *p_virt_addr,
dma_addr_t p_phys_addr)
{
p_chain->p_virt_addr = p_virt_addr;
p_chain->p_phys_addr = p_phys_addr;
}
/**
* @brief ecore_chain_init_pbl_mem -
*
* Initalizes a basic chain struct with its pbl buffers
*
* @param p_chain
* @param p_virt_pbl pointer to a pre allocated side table which will hold
* virtual page addresses.
* @param p_phys_pbl pointer to a pre-allocated side table which will hold
* physical page addresses.
* @param pp_virt_addr_tbl
* pointer to a pre-allocated side table which will hold
* the virtual addresses of the chain pages.
*
*/
static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
void *p_virt_pbl,
dma_addr_t p_phys_pbl,
void **pp_virt_addr_tbl)
{
p_chain->pbl_sp.p_phys_table = p_phys_pbl;
p_chain->pbl_sp.p_virt_table = p_virt_pbl;
p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
}
/**
* @brief ecore_chain_init_next_ptr_elem -
*
* Initalizes a next pointer element
*
* @param p_chain
* @param p_virt_curr virtual address of a chain page of which the next
* pointer element is initialized
* @param p_virt_next virtual address of the next chain page
* @param p_phys_next physical address of the next chain page
*
*/
static OSAL_INLINE void
ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr,
void *p_virt_next, dma_addr_t p_phys_next)
{
struct ecore_chain_next *p_next;
u32 size;
size = p_chain->elem_size * p_chain->usable_per_page;
p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size);
DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
p_next->next_virt = p_virt_next;
}
/**
* @brief ecore_chain_get_last_elem -
*
* Returns a pointer to the last element of the chain
*
* @param p_chain
*
* @return void*
*/
static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
{
struct ecore_chain_next *p_next = OSAL_NULL;
void *p_virt_addr = OSAL_NULL;
u32 size, last_page_idx;
if (!p_chain->p_virt_addr)
goto out;
switch (p_chain->mode) {
case ECORE_CHAIN_MODE_NEXT_PTR:
size = p_chain->elem_size * p_chain->usable_per_page;
p_virt_addr = p_chain->p_virt_addr;
p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size);
while (p_next->next_virt != p_chain->p_virt_addr) {
p_virt_addr = p_next->next_virt;
p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr +
size);
}
break;
case ECORE_CHAIN_MODE_SINGLE:
p_virt_addr = p_chain->p_virt_addr;
break;
case ECORE_CHAIN_MODE_PBL:
last_page_idx = p_chain->page_cnt - 1;
p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
break;
}
/* p_virt_addr points at this stage to the last page of the chain */
size = p_chain->elem_size * (p_chain->usable_per_page - 1);
p_virt_addr = (u8 *)p_virt_addr + size;
out:
return p_virt_addr;
}
/**
* @brief ecore_chain_set_prod - sets the prod to the given value
*
* @param prod_idx
* @param p_prod_elem
*/
static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16)prod_idx;
else
p_chain->u.chain32.prod_idx = prod_idx;
p_chain->p_prod_elem = p_prod_elem;
}
/**
* @brief ecore_chain_pbl_zero_mem - set chain memory to 0
*
* @param p_chain
*/
static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain)
{
u32 i, page_cnt;
if (p_chain->mode != ECORE_CHAIN_MODE_PBL)
return;
page_cnt = ecore_chain_get_page_cnt(p_chain);
for (i = 0; i < page_cnt; i++)
OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i],
ECORE_CHAIN_PAGE_SIZE);
}
int ecore_chain_print(struct ecore_chain *p_chain, char *buffer,
u32 buffer_size, u32 *element_indx, u32 stop_indx,
bool print_metadata,
int (*func_ptr_print_element)(struct ecore_chain *p_chain,
void *p_element,
char *buffer),
int (*func_ptr_print_metadata)(struct ecore_chain *p_chain,
char *buffer));
#endif /* __ECORE_CHAIN_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,239 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _ECORE_CID_
#define _ECORE_CID_
#include "ecore_hsi_common.h"
#include "ecore_proto_if.h"
#include "ecore_cxt_api.h"
/* Tasks segments definitions */
#define ECORE_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI /* 0 */
#define ECORE_CXT_FCOE_TID_SEG PROTOCOLID_FCOE /* 1 */
#define ECORE_CXT_ROCE_TID_SEG PROTOCOLID_ROCE /* 2 */
enum ecore_cxt_elem_type {
ECORE_ELEM_CXT,
ECORE_ELEM_SRQ,
ECORE_ELEM_TASK
};
u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
u32 *vf_cid);
u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
enum protocol_type type);
u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
enum protocol_type type);
u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn,
u32 rdma_tasks);
/**
* @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters
*
* @param p_hwfn
* @param last_line
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn,
u32 *last_line);
/**
* @brief ecore_cxt_cfg_ilt_compute_excess - how many lines can be decreased
*
* @param p_hwfn
* @param used_lines
*/
u32 ecore_cxt_cfg_ilt_compute_excess(struct ecore_hwfn *p_hwfn, u32 used_lines);
/**
* @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_mngr_free
*
* @param p_hwfn
*/
void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_mngr_setup - Reset the acquired CIDs
*
* @param p_hwfn
*/
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
*
* @param p_hwfn
*/
void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
*
* @param p_hwfn
*/
void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_qm_init_pf - Initailze the QM PF phase, per path
*
* @param p_hwfn
*/
void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
/**
* @brief Reconfigures QM pf on the fly
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#define ECORE_CXT_PF_CID (0xff)
/**
* @brief ecore_cxt_release - Release a cid
*
* @param p_hwfn
* @param cid
*/
void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
/**
* @brief ecore_cxt_release - Release a cid belonging to a vf-queue
*
* @param p_hwfn
* @param cid
* @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
*/
void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
u32 cid, u8 vfid);
/**
* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
*
* @param p_hwfn
* @param type
* @param p_cid
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
u32 *p_cid);
/**
* @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type
* for a vf-queue
*
* @param p_hwfn
* @param type
* @param p_cid
* @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
enum protocol_type type,
u32 *p_cid, u8 vfid);
/**
* @brief ecore_cxt_get_tid_mem_info - function checks if the
* page containing the iid in the ilt is already
* allocated, if it is not it allocates the page.
*
* @param p_hwfn
* @param elem_type
* @param iid
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
enum ecore_cxt_elem_type elem_type,
u32 iid);
/**
* @brief ecore_cxt_free_ilt_range - function frees ilt pages
* associated with the protocol and element type passed.
*
* @param p_hwfn
* @param proto
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
enum ecore_cxt_elem_type elem_type,
u32 start_iid, u32 count);
#define ECORE_CTX_WORKING_MEM 0
#define ECORE_CTX_FL_MEM 1
enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
u32 tid,
u8 ctx_type,
void **task_ctx);
#endif /* _ECORE_CID_ */

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_CXT_API_H__
#define __ECORE_CXT_API_H__
struct ecore_hwfn;
struct ecore_cxt_info {
void *p_cxt;
u32 iid;
enum protocol_type type;
};
#define MAX_TID_BLOCKS 512
struct ecore_tid_mem {
u32 tid_size;
u32 num_tids_per_block;
u32 waste;
u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
};
/**
* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
*
*
* @param p_hwfn
* @param p_info in/out
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info);
/**
* @brief ecore_cxt_get_tid_mem_info
*
* @param p_hwfn
* @param p_info
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
struct ecore_tid_mem *p_info);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,885 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _DBG_FW_FUNCS_H
#define _DBG_FW_FUNCS_H
/**************************** Public Functions *******************************/
/**
* @brief ecore_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
* arrays.
*
* @param bin_ptr - a pointer to the binary data with debug arrays.
*/
enum dbg_status ecore_dbg_set_bin_ptr(const u8 * const bin_ptr);
/**
* @brief ecore_dbg_set_app_ver - Sets the version of the calling app.
*
* The application should call this function with the TOOLS_VERSION
* it compiles with. Must be called before all other debug functions.
*
* @return error if one of the following holds:
* - the specified app version is not supported
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_set_app_ver(u32 ver);
/**
* @brief ecore_dbg_get_fw_func_ver - Returns the FW func version.
*
* @return the FW func version.
*/
u32 ecore_dbg_get_fw_func_ver(void);
/**
* @brief ecore_dbg_get_chip_id - Returns the FW func version.
*
* @param p_hwfn - HW device data
*
* @return the chip ID.
*/
enum chip_ids ecore_dbg_get_chip_id(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_dbg_bus_reset - Resets the Debug block.
*
* After reset:
* - The last recording is erased.
* - Recording is directed to the internal buffer.
* - Wrap-around recording is selected.
* - All HW blocks are disabled.
* - All Storms are disabled and all SEM filters are cleared.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param one_shot_en - Enable/Disable one-shot recording. If disabled,
* wrap-around recording is used instead.
* @param force_hw_dwords - If set to 0, no. of HW/Storm dwords per cycle is
* chosen automatically based on the enabled inputs.
* Otherwise, no. of HW dwords per cycle is forced to
* the specified value. Valid values: 0/2/4/8.
* @param unify_inputs - If true, all recorded data is associated with a
* single input, as if all data was received from the
* same block. Otherwise, each data unit is associated
* with its original input.
* @param grc_input_en - Enable/Disable recording GRC input. If enabled, the
* GRC input is recorded to the lsb dword of a cycle.
*
* @return error if one of the following holds:
* - the version wasn't set
* - force_hw_dwords is invalid.
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool one_shot_en,
u8 force_hw_dwords,
bool unify_inputs,
bool grc_input_en);
/**
* @brief ecore_dbg_bus_set_pci_output - Directs debug output to a PCI buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size_kb - Size of PCI buffer to allocate (in KB). Must be aligned
* to PCI request size.
*
* @return error if one of the following holds:
* - the Debug block wasn't reset since last recording
* - the version wasn't set
* - the output was already set
* - the PCI buffer size is not aligned to PCI packet size
* - the PCI buffer allocation failed
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_set_pci_output(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 buf_size_kb);
/**
* @brief ecore_dbg_bus_set_nw_output - Directs debug output to the network.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param port_id - Port ID to transmit the debug data on
* @param dest_addr_lo32 - Destination MAC address (for Eth header)
* @param dest_addr_hi16
* @param data_limit_size_kb - Data limit size in KB (valid only for one-shot)
* If set to 0, data limit won't be configured.
* @param send_to_other_engine -If true:
* 1) The NW output will be sent to the DBG block
* of the other engine.
* 2) port_id argument is ignored.
* 3) rcv_from_other_engine should be set to false
* The other engine DBG block should call this
* function with rcv_from_other_engine set to
* true.
* @param rcv_from_other_engine-If true:
* 1) the DBG block receives the NW output sent
* from the other engine DBG block, and sends
* it to a NW port in the current engine
* (according to port_id).
* 2) The src/dest addresses and eth_type
* arguments are ignored.
* 3) send_to_other_engine should be set to false.
* The other engine DBG block should call this
* function with send_to_other_engine set to
* true.
*
* @return error if one of the following holds:
* - the Debug block wasn't reset since last recording
* - the version wasn't set
* - the output was already set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_set_nw_output(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u32 dest_addr_lo32,
u16 dest_addr_hi16,
u16 data_limit_size_kb,
bool send_to_other_engine,
bool rcv_from_other_engine);
/**
* @brief ecore_dbg_bus_enable_block - Enables recording of the specified block
*
* Each recording cycle contains 4 "units". If the recorded HW data requires up
* to 4 dwords per cycle, each unit is one dword (32 bits). Otherwise, each
* unit is 2 dwords (64 bits).
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param block - block to be enabled.
* @param line_num - debug line number to select.
* @param cycle_en - 4-bit value. If bit i is set, unit i is enabled.
* @param right_shift - number of units to right the debug data (0-3).
* @param force_valid - 4-bit value. If bit i is set, unit i is forced valid.
* @param force_frame - 4-bit value. If bit i is set, the frame bit of unit i
* is forced.
*
* @return error if one of the following holds:
* - the Debug block wasn't reset since last recording
* - the version wasn't set
* - block is not valid
* - block was already enabled
* - cycle_en, force_valid or force_frame are wider than 4 bits
* - right_shift is larger than 3
* - cycle unit 0 is enabled, but GRC or timestamp were also enabled.
* - Too many inputs were enabled.
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_enable_block(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum block_id block,
u8 line_num,
u8 cycle_en,
u8 right_shift,
u8 force_valid,
u8 force_frame);
/**
* @brief ecore_dbg_bus_enable_storm - Enables recording of the specified Storm
*
* @param p_hwfn - HW device data
* @param storm - Storm to be enabled.
* @param storm_mode- Storm mode
*
* @return error if one of the following holds:
* - the Debug block wasn't reset since last recording
* - the version wasn't set
* - the specified storm or mode is invalid
* - Storm was already enabled
* - only HW data can be recorded
* - Too many inputs were enabled.
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_enable_storm(struct ecore_hwfn *p_hwfn,
enum dbg_storms storm,
enum dbg_bus_storm_modes storm_mode);
/**
* @brief ecore_dbg_bus_enable_timestamp - Enables timestamp recording.
*
* When enabled, the timestamp input is always recorded to the lsb dword of
* a cycle, with HW ID 0.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param valid_en - 3-bit value. The Timestamp will be recorded in a cycle if
* bit i is set and unit i+1 is valid.
* @param frame_en - 3-bit value. The Timestamp will be recorded in a cycle if
* bit i is set and unit i+1 has frame bit set.
* @param tick_len - timestamp tick length in cycles, minus 1. A value of 0
* means one cycle.
*
* @return error if one of the following holds:
* - the Debug block wasn't reset since last recording
* - the version wasn't set
* - valid_en or frame_en are wider than 4 bits
* - Both timestamp and GRC are enabled.
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_enable_timestamp(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 valid_en,
u8 frame_en,
u32 tick_len);
/**
* @brief ecore_dbg_bus_add_eid_range_sem_filter- Add Event ID range SEM filter
*
* @param p_hwfn - HW device data
* @param storm - Storm to be filtered.
* @param min_eid - minimal Event ID to filter on.
* @param max_eid - maximal Event ID to filter on.
*
* @return error if one of the following holds:
* - the specified Storm is invalid
* - the specified Storm wasn't enabled
* - the EID range is not valid
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_add_eid_range_sem_filter(struct ecore_hwfn *p_hwfn,
enum dbg_storms storm,
u8 min_eid,
u8 max_eid);
/**
* @brief ecore_dbg_bus_add_eid_mask_sem_filter - Add Event ID mask SEM filter
*
* @param p_hwfn - HW device data
* @param storm - Storm to be filtered.
* @param eid_val - Event ID value.
* @param eid_mask - Event ID mask. 0's in the mask = don't care bits.
*
* @return error if one of the following holds:
* - the specified Storm is invalid
* - the specified Storm wasn't enabled
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_add_eid_mask_sem_filter(struct ecore_hwfn *p_hwfn,
enum dbg_storms storm,
u8 eid_val,
u8 eid_mask);
/**
* @brief ecore_dbg_bus_add_cid_sem_filter - Adds a CID SEM filter.
*
* @param p_hwfn - HW device data
* @param storm - Storm to be filtered.
* @param cid - CID to filter on.
*
* @return error if one of the following holds:
* - the specified Storm is invalid
* - the specified Storm wasn't enabled
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_add_cid_sem_filter(struct ecore_hwfn *p_hwfn,
enum dbg_storms storm,
u32 cid);
/**
* @brief ecore_dbg_bus_enable_filter - Enables the recording filter.
*
* A filter contains up to 4 constraints. The data is "filtered in" when the
* added constraints hold.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param block - block to filter on.
* @param const_msg_len - Constant message length (in cycles) to be used for
* message-based filter constraints. If set to 0,
* message length is based only on frame bit received
* from HW (no constant message length).
*
* @return error if one of the following holds:
* - the Debug block wasn't reset since last recording
* - the version wasn't set
* - the filter was already enabled
* - block is not valid or not enabled
* - more than 4 dwords are recorded per-cycle (forbids filters)
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_enable_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum block_id block,
u8 const_msg_len);
/**
* @brief ecore_dbg_bus_enable_trigger - Enables the recording trigger.
*
* A trigger contains up to 3 states, where each state contains up to
* 4 constraints. After the constraints of a state hold for a specified number
* of times, the DBG block moves to the next state. If there's no next state,
* the DBG block triggers.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param rec_pre_trigger - if true, recording starts before the trigger.
* if false, recording starts at the trigger.
* @param pre_chunks - max number of chunks to record before the
* trigger (1-47). If set to 0, recording starts
* from time 0. Ignored if rec_pre_trigger is
* false.
* @param rec_post_trigger - if true, recording ends after the trigger.
* if false, recording ends at the trigger.
* @param post_cycles - max number of cycles to record after the
* trigger (0x1-0xffffffff). If set to 0,
* recording ends only when stopped by the user.
* Ignored if rec_post_trigger is false.
* @param filter_pre_trigger - if true, data is filtered before the trigger.
* Ignored if the filter wasn't enabled.
* @param filter_post_trigger - if true, data is filtered after the trigger.
* Ignored if the filter wasn't enabled.
*
* @return error if one of the following holds:
* - the Debug block wasn't reset since last recording
* - the version wasn't set
* - the trigger was already enabled
* - more than 4 dwords are recorded per-cycle (forbids triggers)
* - pre_chunks is not in the range 0-47.
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_enable_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool rec_pre_trigger,
u8 pre_chunks,
bool rec_post_trigger,
u32 post_cycles,
bool filter_pre_trigger,
bool filter_post_trigger);
/**
* @brief ecore_dbg_bus_add_trigger_state - Adds a trigger state.
*
* Up to 3 trigger states can be added, where each state contains up to
* 4 constraints. After the constraints of a state hold for the specified
* number of times, the DBG block moves to the next state. If there's no next
* state, the DBG block triggers.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param block - block to trigger on.
* @param const_msg_len - Constant message length (in cycles) to be used for
* message-based filter constraints. If set to 0,
* message length is based only on frame bit received
* from HW (no constant message length).
* @param count_to_next - The number of times the constraints of the state
* should hold before moving to the next state. Must be
* non-zero.
*
* @return error if one of the following holds:
* - The trigger wasn't enabled.
* - more than 3 trigger states were added
* - block is not valid or not enabled
* - count_to_next is 0
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_add_trigger_state(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum block_id block,
u8 const_msg_len,
u16 count_to_next);
/**
* @brief ecore_dbg_bus_add_constraint - Adds a filter/trigger constraint.
*
* The constraint is added to a filter or trigger state, which ever was added
* last. The filter/trigger happens if both of the following hold:
* 1. All mandatory constraints are true.
* 2. At least one optional (non-mandatory) constraints is true.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param op - constraint operation
* @param data - 32-bit data to compare with the recorded
* data.
* @param data_mask - 32-bit mask for data comparison. If mask bit
* i is 1, data bit i is compared, otherwise
* it's ignored.
* For eq/ne operations: any mask can be used.
* For other operations: the mask must be
* non-zero, and the 1's in the mask must be
* continuous.
* @param compare_frame - indicates if the frame bit should be
* compared. Must be false for all operations
* other than eq/ne.
* @param frame_bit - frame bit to compare with the recorded data
* (0/1). ignored if compare_frame is false.
* @param cycle_offset - offset in cycles from the beginning of the
* message, where cycle = 4 dwords.
* @param dword_offset_in_cycle - offset in dwords from the beginning of the
* cycle (0-3).
* @param is_mandatory - indicates if this constraint is mandatory
* (true) or optional (false). The data is
* filtered-in if all mandatory constraints hold
* AND at least one optional constraint (if
* added) holds.
*
* @return error if one of the following holds:
* - a filter or trigger state weren't enabled
* - all 4 filter constraints were added already
* - the op string is invalid
* - the data mask is invalid.
* - frame bit is not 0/1.
* - cycle_offset and dword_offset are not in the range 0-3.
* - compare_frame is true and operation is not eq/ne.
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_add_constraint(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum dbg_bus_constraint_ops constraint_op,
u32 data,
u32 data_mask,
bool compare_frame,
u8 frame_bit,
u8 cycle_offset,
u8 dword_offset_in_cycle,
bool is_mandatory);
/**
* @brief ecore_dbg_bus_start - Starts the recording.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
*
* @return error if one of the following holds:
* - the Debug block wasn't reset since last recording
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_start(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_dbg_bus_stop - Stops the recording and flushes the internal
* buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
*
* @return error if a recording is not in progress, ok otherwise.
*/
enum dbg_status ecore_dbg_bus_stop(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_dbg_bus_get_dump_buf_size - Returns the required buffer size
* for Debug Bus recording.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: the required size (in dwords) of the buffer for
* dumping the recorded Debug Bus data. If recording to the
* internal buffer, the size of the internal buffer is
* returned. If recording to PCI, the size of the PCI buffer
* is returned. Otherwise, 0 is returned.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *buf_size);
/**
* @brief ecore_dbg_bus_dump - Dumps the recorded Debug Bus data into the
* specified buffer.
*
* The dumped data starts with a header. If recording to NW, only a header is
* dumped. The dumped size is assigned to num_dumped_dwords.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to copy the recorded data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - a recording wasn't started/stopped
* - the specified dump buffer is too small
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_bus_dump(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief ecore_dbg_grc_config - Sets the value of a GRC parameter.
*
* @param p_hwfn - HW device data
* @param grc_param - GRC parameter
* @param val - Value to set.
* @return error if one of the following holds:
* - the version wasn't set
* - grc_param is invalid
* - val is outside the allowed boundaries
*/
enum dbg_status ecore_dbg_grc_config(struct ecore_hwfn *p_hwfn,
enum dbg_grc_params grc_param,
u32 val);
/**
* @brief ecore_dbg_grc_set_params_default - Reverts all GRC parameters to their
* default value.
*
* @param p_hwfn - HW device data
*/
void ecore_dbg_grc_set_params_default(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_dbg_grc_get_dump_buf_size - Returns the required buffer size
* for GRC Dump.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for GRC Dump data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_grc_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *buf_size);
/**
* @brief ecore_dbg_grc_dump - Dumps GRC data into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the collected GRC data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified dump buffer is too small
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_grc_dump(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief ecore_dbg_idle_chk_get_dump_buf_size - Returns the required buffer
* size for idle check results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for idle check data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_idle_chk_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *buf_size);
/**
* @brief ecore_dbg_idle_chk_dump - Performs idle check and writes the results
* into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the idle check data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_idle_chk_dump(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief ecore_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer
* size for mcp trace results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the trace data in MCP scratchpad contain an invalid signature
* - the bundle ID in NVRAM is invalid
* - the trace meta data cannot be found (in NVRAM or image file)
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_mcp_trace_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *buf_size);
/**
* @brief ecore_dbg_mcp_trace_dump - Performs mcp trace and writes the results
* into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the mcp trace data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* - the trace data in MCP scratchpad contain an invalid signature
* - the bundle ID in NVRAM is invalid
* - the trace meta data cannot be found (in NVRAM or image file)
* - the trace meta data cannot be read (from NVRAM or image file)
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_mcp_trace_dump(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief ecore_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer
* size for grc trace fifo results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_reg_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *buf_size);
/**
* @brief ecore_dbg_reg_fifo_dump - Reads the reg fifo and writes the results
* into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the reg fifo data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* - DMAE transaction failed
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_reg_fifo_dump(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief ecore_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer
* size for the IGU fifo results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for IGU fifo data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_igu_fifo_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *buf_size);
/**
* @brief ecore_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results
* into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the IGU fifo data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* - DMAE transaction failed
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_igu_fifo_dump(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief ecore_dbg_protection_override_get_dump_buf_size - Return the required
* buffer size for protection override window results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for protection
* override data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_protection_override_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *buf_size);
/**
* @brief ecore_dbg_protection_override_dump - Reads protection override window
* entries and writes the results into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the protection override data
* into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* - DMAE transaction failed
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_protection_override_dump(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief ecore_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
* size for FW Asserts results.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_fw_asserts_get_dump_buf_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *buf_size);
/**
* @brief ecore_dbg_fw_asserts_dump - Reads the FW Asserts and writes the
* results into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param dump_buf - Pointer to write the FW Asserts data into.
* @param buf_size_in_dwords - Size of the specified buffer in dwords.
* @param num_dumped_dwords - OUT: number of dumped dwords.
*
* @return error if one of the following holds:
* - the version wasn't set
* - the specified buffer is too small
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_fw_asserts_dump(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *dump_buf,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords);
/**
* @brief ecore_dbg_read_attn - Reads the attention registers of the specified
* block and type, and writes the results into the specified buffer.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param block - Block ID.
* @param attn_type - Attention type.
* @param clear_status - Indicates if the attention status should be cleared.
* @param results - OUT: Pointer to write the read results into
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_read_attn(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum block_id block,
enum dbg_attn_type attn_type,
bool clear_status,
struct dbg_attn_block_result *results);
/**
* @brief ecore_dbg_print_attn - Prints attention registers values in the
* specified results struct.
*
* @param p_hwfn - HW device data
* @param results - Pointer to the attention read results
*
* @return error if one of the following holds:
* - the version wasn't set
* Otherwise, returns ok.
*/
enum dbg_status ecore_dbg_print_attn(struct ecore_hwfn *p_hwfn,
struct dbg_attn_block_result *results);
/**
* @brief ecore_is_block_in_reset - Returns true if the specified block is in
* reset, false otherwise.
*
* @param p_hwfn - HW device data
* @param p_ptt - Ptt window used for writing the registers.
* @param block - Block ID.
*
* @return true if the specified block is in reset, false otherwise.
*/
bool ecore_is_block_in_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum block_id block);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,80 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_DCBX_H__
#define __ECORE_DCBX_H__
#include "ecore.h"
#include "ecore_mcp.h"
#include "mcp_public.h"
#include "reg_addr.h"
#include "ecore_hw.h"
#include "ecore_hsi_common.h"
#include "ecore_dcbx_api.h"
struct ecore_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
struct dcbx_local_params local_admin;
struct ecore_dcbx_results results;
struct dcb_dscp_map dscp_map;
bool dscp_nig_update;
struct dcbx_mib operational;
struct dcbx_mib remote;
struct ecore_dcbx_set set;
struct ecore_dcbx_get get;
u8 dcbx_cap;
u16 iwarp_port;
};
struct ecore_dcbx_mib_meta_data {
struct lldp_config_params_s *lldp_local;
struct lldp_status_params_s *lldp_remote;
struct dcbx_local_params *local_admin;
struct dcb_dscp_map *dscp_map;
struct dcbx_mib *mib;
osal_size_t size;
u32 addr;
};
/* ECORE local interface routines */
enum _ecore_status_t
ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *,
enum ecore_mib_read_type);
enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *,
struct ecore_ptt *);
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn);
void ecore_dcbx_info_free(struct ecore_hwfn *);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
#endif /* __ECORE_DCBX_H__ */

View File

@ -0,0 +1,219 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_DCBX_API_H__
#define __ECORE_DCBX_API_H__
#include "ecore_status.h"
#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
enum ecore_mib_read_type {
ECORE_DCBX_OPERATIONAL_MIB,
ECORE_DCBX_REMOTE_MIB,
ECORE_DCBX_LOCAL_MIB,
ECORE_DCBX_REMOTE_LLDP_MIB,
ECORE_DCBX_LOCAL_LLDP_MIB
};
struct ecore_dcbx_app_data {
bool enable; /* DCB enabled */
u8 update; /* Update indication */
u8 priority; /* Priority */
u8 tc; /* Traffic Class */
bool dscp_enable; /* DSCP enabled */
u8 dscp_val; /* DSCP value */
};
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
DCBX_PROTOCOL_FCOE,
DCBX_PROTOCOL_ROCE,
DCBX_PROTOCOL_ROCE_V2,
DCBX_PROTOCOL_ETH,
DCBX_PROTOCOL_IWARP,
DCBX_MAX_PROTOCOL_TYPE
};
#define ECORE_LLDP_CHASSIS_ID_STAT_LEN 4
#define ECORE_LLDP_PORT_ID_STAT_LEN 4
#define ECORE_DCBX_MAX_APP_PROTOCOL 32
#define ECORE_MAX_PFC_PRIORITIES 8
#define ECORE_DCBX_DSCP_SIZE 64
struct ecore_dcbx_lldp_remote {
u32 peer_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
u32 peer_port_id[ECORE_LLDP_PORT_ID_STAT_LEN];
bool enable_rx;
bool enable_tx;
u32 tx_interval;
u32 max_credit;
};
struct ecore_dcbx_lldp_local {
u32 local_chassis_id[ECORE_LLDP_CHASSIS_ID_STAT_LEN];
u32 local_port_id[ECORE_LLDP_PORT_ID_STAT_LEN];
};
struct ecore_dcbx_app_prio {
u8 roce;
u8 roce_v2;
u8 fcoe;
u8 iscsi;
u8 eth;
};
struct ecore_dbcx_pfc_params {
bool willing;
bool enabled;
u8 prio[ECORE_MAX_PFC_PRIORITIES];
u8 max_tc;
};
enum ecore_dcbx_sf_ieee_type {
ECORE_DCBX_SF_IEEE_ETHTYPE,
ECORE_DCBX_SF_IEEE_TCP_PORT,
ECORE_DCBX_SF_IEEE_UDP_PORT,
ECORE_DCBX_SF_IEEE_TCP_UDP_PORT
};
struct ecore_app_entry {
bool ethtype;
enum ecore_dcbx_sf_ieee_type sf_ieee;
bool enabled;
u8 prio;
u16 proto_id;
enum dcbx_protocol_type proto_type;
};
struct ecore_dcbx_params {
struct ecore_app_entry app_entry[ECORE_DCBX_MAX_APP_PROTOCOL];
u16 num_app_entries;
bool app_willing;
bool app_valid;
bool app_error;
bool ets_willing;
bool ets_enabled;
bool ets_cbs;
bool valid; /* Indicate validity of params */
u8 ets_pri_tc_tbl[ECORE_MAX_PFC_PRIORITIES];
u8 ets_tc_bw_tbl[ECORE_MAX_PFC_PRIORITIES];
u8 ets_tc_tsa_tbl[ECORE_MAX_PFC_PRIORITIES];
struct ecore_dbcx_pfc_params pfc;
u8 max_ets_tc;
};
struct ecore_dcbx_admin_params {
struct ecore_dcbx_params params;
bool valid; /* Indicate validity of params */
};
struct ecore_dcbx_remote_params {
struct ecore_dcbx_params params;
bool valid; /* Indicate validity of params */
};
struct ecore_dcbx_operational_params {
struct ecore_dcbx_app_prio app_prio;
struct ecore_dcbx_params params;
bool valid; /* Indicate validity of params */
bool enabled;
bool ieee;
bool cee;
bool local;
u32 err;
};
struct ecore_dcbx_dscp_params {
bool enabled;
u8 dscp_pri_map[ECORE_DCBX_DSCP_SIZE];
};
struct ecore_dcbx_get {
struct ecore_dcbx_operational_params operational;
struct ecore_dcbx_lldp_remote lldp_remote;
struct ecore_dcbx_lldp_local lldp_local;
struct ecore_dcbx_remote_params remote;
struct ecore_dcbx_admin_params local;
struct ecore_dcbx_dscp_params dscp;
};
#define ECORE_DCBX_VERSION_DISABLED 0
#define ECORE_DCBX_VERSION_IEEE 1
#define ECORE_DCBX_VERSION_CEE 2
#define ECORE_DCBX_VERSION_DYNAMIC 3
struct ecore_dcbx_set {
#define ECORE_DCBX_OVERRIDE_STATE (1 << 0)
#define ECORE_DCBX_OVERRIDE_PFC_CFG (1 << 1)
#define ECORE_DCBX_OVERRIDE_ETS_CFG (1 << 2)
#define ECORE_DCBX_OVERRIDE_APP_CFG (1 << 3)
#define ECORE_DCBX_OVERRIDE_DSCP_CFG (1 << 4)
u32 override_flags;
bool enabled;
struct ecore_dcbx_admin_params config;
u32 ver_num;
struct ecore_dcbx_dscp_params dscp;
};
struct ecore_dcbx_results {
bool dcbx_enabled;
u8 pf_id;
struct ecore_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
};
struct ecore_dcbx_app_metadata {
enum dcbx_protocol_type id;
char *name;
enum ecore_pci_personality personality;
};
enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *,
struct ecore_dcbx_get *,
enum ecore_mib_read_type);
enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *,
struct ecore_dcbx_set *);
enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *,
struct ecore_ptt *,
struct ecore_dcbx_set *,
bool);
static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_ISCSI, "ISCSI", ECORE_PCI_ISCSI},
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
{DCBX_PROTOCOL_ROCE, "ROCE", ECORE_PCI_ETH_ROCE},
{DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", ECORE_PCI_ETH_ROCE},
{DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH},
{DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP}
};
#endif /* __ECORE_DCBX_API_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,713 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_DEV_API_H__
#define __ECORE_DEV_API_H__
#include "ecore_status.h"
#include "ecore_chain.h"
#include "ecore_int_api.h"
struct ecore_wake_info {
u32 wk_info;
u32 wk_details;
u32 wk_pkt_len;
u8 wk_buffer[256];
};
/**
* @brief ecore_init_dp - initialize the debug level
*
* @param p_dev
* @param dp_module
* @param dp_level
* @param dp_ctx
*/
void ecore_init_dp(struct ecore_dev *p_dev,
u32 dp_module,
u8 dp_level,
void *dp_ctx);
/**
* @brief ecore_init_struct - initialize the device structure to
* its defaults
*
* @param p_dev
*/
void ecore_init_struct(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_free -
*
* @param p_dev
*/
void ecore_resc_free(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_alloc -
*
* @param p_dev
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
/**
* @brief ecore_resc_setup -
*
* @param p_dev
*/
void ecore_resc_setup(struct ecore_dev *p_dev);
enum ecore_override_force_load {
ECORE_OVERRIDE_FORCE_LOAD_NONE,
ECORE_OVERRIDE_FORCE_LOAD_ALWAYS,
ECORE_OVERRIDE_FORCE_LOAD_NEVER,
};
struct ecore_drv_load_params {
/* Indicates whether the driver is running over a crash kernel.
* As part of the load request, this will be used for providing the
* driver role to the MFW.
* In case of a crash kernel over PDA - this should be set to false.
*/
bool is_crash_kernel;
/* The timeout value that the MFW should use when locking the engine for
* the driver load process.
* A value of '0' means the default value, and '255' means no timeout.
*/
u8 mfw_timeout_val;
#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
/* Avoid engine reset when first PF loads on it */
bool avoid_eng_reset;
/* Allow overriding the default force load behavior */
enum ecore_override_force_load override_force_load;
};
struct ecore_hw_init_params {
/* Tunneling parameters */
struct ecore_tunnel_info *p_tunn;
bool b_hw_start;
/* Interrupt mode [msix, inta, etc.] to use */
enum ecore_int_mode int_mode;
/* NPAR tx switching to be used for vports configured for tx-switching */
bool allow_npar_tx_switch;
/* Binary fw data pointer in binary fw file */
const u8 *bin_fw_data;
/* Driver load parameters */
struct ecore_drv_load_params *p_drv_load_params;
};
/**
* @brief ecore_hw_init -
*
* @param p_dev
* @param p_params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params);
/**
* @brief ecore_hw_timers_stop_all -
*
* @param p_dev
*
* @return void
*/
void ecore_hw_timers_stop_all(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_stop -
*
* @param p_dev
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_stop_fastpath -should be called incase
* slowpath is still required for the device,
* but fastpath is not.
*
* @param p_dev
*
*/
void ecore_hw_stop_fastpath(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_hibernate_prepare -should be called when
* the system is going into the hibernate state
*
* @param p_dev
*
*/
void ecore_hw_hibernate_prepare(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_hibernate_resume -should be called when the system is
resuming from D3 power state and before calling ecore_hw_init.
*
* @param p_hwfn
*
*/
void ecore_hw_hibernate_resume(struct ecore_dev *p_dev);
/**
* @brief ecore_hw_start_fastpath -restart fastpath traffic,
* only if hw_stop_fastpath was called
* @param p_dev
*
*/
void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
enum ecore_hw_prepare_result {
ECORE_HW_PREPARE_SUCCESS,
/* FAILED results indicate probe has failed & cleaned up */
ECORE_HW_PREPARE_FAILED_ENG2,
ECORE_HW_PREPARE_FAILED_ME,
ECORE_HW_PREPARE_FAILED_MEM,
ECORE_HW_PREPARE_FAILED_DEV,
ECORE_HW_PREPARE_FAILED_NVM,
/* BAD results indicate probe is passed even though some wrongness
* has occurred; Trying to actually use [I.e., hw_init()] might have
* dire reprecautions.
*/
ECORE_HW_PREPARE_BAD_IOV,
ECORE_HW_PREPARE_BAD_MCP,
ECORE_HW_PREPARE_BAD_IGU,
};
struct ecore_hw_prepare_params {
/* Personality to initialize */
int personality;
/* Force the driver's default resource allocation */
bool drv_resc_alloc;
/* Check the reg_fifo after any register access */
bool chk_reg_fifo;
/* Request the MFW to initiate PF FLR */
bool initiate_pf_flr;
/* The OS Epoch time in seconds */
u32 epoch;
/* Allow the MFW to collect a crash dump */
bool allow_mdump;
/* Allow prepare to pass even if some initializations are failing.
* If set, the `p_prepare_res' field would be set with the return,
* and might allow probe to pass even if there are certain issues.
*/
bool b_relaxed_probe;
enum ecore_hw_prepare_result p_relaxed_res;
};
/**
* @brief ecore_hw_prepare -
*
* @param p_dev
* @param p_params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
struct ecore_hw_prepare_params *p_params);
/**
* @brief ecore_hw_remove -
*
* @param p_dev
*/
void ecore_hw_remove(struct ecore_dev *p_dev);
/**
* @brief ecore_set_nwuf_reg -
*
* @param p_dev
* @param wol_flag - wol_capability
* @param reg_idx - Index of the pattern register
* @param pattern_size - size of pattern
* @param crc - CRC value of patter & mask
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_set_nwuf_reg(struct ecore_dev *p_dev,
const bool b_enable,
u32 reg_idx,
u32 pattern_size,
u32 crc);
/**
* @brief ecore_get_wake_info - get magic packet buffer
*
* @param p_dev
* @param wake_info - pointer to ecore_wake_info buffer
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_get_wake_info(struct ecore_dev *p_dev,
struct ecore_wake_info *wake_info);
/**
* @brief ecore_wol_buffer_clear - Clear magic package buffer
*
* @param p_dev
*
* @return void
*/
void ecore_wol_buffer_clear(struct ecore_dev *p_dev);
/**
* @brief ecore_ptt_acquire - Allocate a PTT window
*
* Should be called at the entry point to the driver (at the beginning of an
* exported function)
*
* @param p_hwfn
*
* @return struct ecore_ptt
*/
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_release - Release PTT Window
*
* Should be called at the end of a flow - at the end of the function that
* acquired the PTT.
*
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_ptt_release(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
struct ecore_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
u64 rx_ucast_bytes;
u64 rx_mcast_bytes;
u64 rx_bcast_bytes;
u64 rx_ucast_pkts;
u64 rx_mcast_pkts;
u64 rx_bcast_pkts;
u64 mftag_filter_discards;
u64 mac_filter_discards;
u64 tx_ucast_bytes;
u64 tx_mcast_bytes;
u64 tx_bcast_bytes;
u64 tx_ucast_pkts;
u64 tx_mcast_pkts;
u64 tx_bcast_pkts;
u64 tx_err_drop_pkts;
u64 tpa_coalesced_pkts;
u64 tpa_coalesced_events;
u64 tpa_aborts_num;
u64 tpa_not_coalesced_pkts;
u64 tpa_coalesced_bytes;
/* port */
u64 rx_64_byte_packets;
u64 rx_65_to_127_byte_packets;
u64 rx_128_to_255_byte_packets;
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
u64 rx_pfc_frames;
u64 rx_align_errors;
u64 rx_carrier_errors;
u64 rx_oversize_packets;
u64 rx_jabbers;
u64 rx_undersize_packets;
u64 rx_fragments;
u64 tx_64_byte_packets;
u64 tx_65_to_127_byte_packets;
u64 tx_128_to_255_byte_packets;
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
u64 brb_truncates;
u64 brb_discards;
u64 rx_mac_bytes;
u64 rx_mac_uc_packets;
u64 rx_mac_mc_packets;
u64 rx_mac_bc_packets;
u64 rx_mac_frames_ok;
u64 tx_mac_bytes;
u64 tx_mac_uc_packets;
u64 tx_mac_mc_packets;
u64 tx_mac_bc_packets;
u64 tx_mac_ctrl_frames;
};
struct ecore_eth_stats_bb {
u64 rx_1519_to_1522_byte_packets;
u64 rx_1519_to_2047_byte_packets;
u64 rx_2048_to_4095_byte_packets;
u64 rx_4096_to_9216_byte_packets;
u64 rx_9217_to_16383_byte_packets;
u64 tx_1519_to_2047_byte_packets;
u64 tx_2048_to_4095_byte_packets;
u64 tx_4096_to_9216_byte_packets;
u64 tx_9217_to_16383_byte_packets;
u64 tx_lpi_entry_count;
u64 tx_total_collisions;
};
struct ecore_eth_stats_ah {
u64 rx_1519_to_max_byte_packets;
u64 tx_1519_to_max_byte_packets;
};
struct ecore_eth_stats {
struct ecore_eth_stats_common common;
union {
struct ecore_eth_stats_bb bb;
struct ecore_eth_stats_ah ah;
};
};
enum ecore_dmae_address_type_t {
ECORE_DMAE_ADDRESS_HOST_VIRT,
ECORE_DMAE_ADDRESS_HOST_PHYS,
ECORE_DMAE_ADDRESS_GRC
};
/* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the
* source is a block of length DMAE_MAX_RW_SIZE and the
* destination is larger, the source block will be duplicated as
* many times as required to fill the destination block. This is
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
#define ECORE_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define ECORE_DMAE_FLAG_VF_SRC 0x00000002
#define ECORE_DMAE_FLAG_VF_DST 0x00000004
#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008
struct ecore_dmae_params {
u32 flags; /* consists of ECORE_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
};
/**
* @brief ecore_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param grc_addr (dmae_data_offset)
* @param size_in_dwords
* @param flags (one of the flags defined above)
*/
enum _ecore_status_t
ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 source_addr,
u32 grc_addr,
u32 size_in_dwords,
u32 flags);
/**
* @brief ecore_dmae_grc2host - Read data from dmae data offset
* to source address using the given ptt
*
* @param p_ptt
* @param grc_addr (dmae_data_offset)
* @param dest_addr
* @param size_in_dwords
* @param flags - one of the flags defined above
*/
enum _ecore_status_t
ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 grc_addr,
dma_addr_t dest_addr,
u32 size_in_dwords,
u32 flags);
/**
* @brief ecore_dmae_host2host - copy data from to source address
* to a destination address (for SRIOV) using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param dest_addr
* @param size_in_dwords
* @param params
*/
enum _ecore_status_t
ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords,
struct ecore_dmae_params *p_params);
/**
* @brief ecore_chain_alloc - Allocate and initialize a chain
*
* @param p_hwfn
* @param intended_use
* @param mode
* @param num_elems
* @param elem_size
* @param p_chain
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_chain_alloc(struct ecore_dev *p_dev,
enum ecore_chain_use_mode intended_use,
enum ecore_chain_mode mode,
enum ecore_chain_cnt_type cnt_type,
u32 num_elems,
osal_size_t elem_size,
struct ecore_chain *p_chain,
struct ecore_chain_ext_pbl *ext_pbl);
/**
* @brief ecore_chain_free - Free chain DMA memory
*
* @param p_hwfn
* @param p_chain
*/
void ecore_chain_free(struct ecore_dev *p_dev,
struct ecore_chain *p_chain);
/**
* @@brief ecore_fw_l2_queue - Get absolute L2 queue ID
*
* @param p_hwfn
* @param src_id - relative to p_hwfn
* @param dst_id - absolute per engine
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
u16 src_id,
u16 *dst_id);
/**
* @@brief ecore_fw_vport - Get absolute vport ID
*
* @param p_hwfn
* @param src_id - relative to p_hwfn
* @param dst_id - absolute per engine
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
* @@brief ecore_fw_rss_eng - Get absolute RSS engine ID
*
* @param p_hwfn
* @param src_id - relative to p_hwfn
* @param dst_id - absolute per engine
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
* @brief ecore_llh_add_mac_filter - configures a MAC filter in llh
*
* @param p_hwfn
* @param p_ptt
* @param p_filter - MAC to add
*/
enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 *p_filter);
/**
* @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh
*
* @param p_hwfn
* @param p_ptt
* @param p_filter - MAC to remove
*/
void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 *p_filter);
enum ecore_llh_port_filter_type_t {
ECORE_LLH_FILTER_ETHERTYPE,
ECORE_LLH_FILTER_TCP_SRC_PORT,
ECORE_LLH_FILTER_TCP_DEST_PORT,
ECORE_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
ECORE_LLH_FILTER_UDP_SRC_PORT,
ECORE_LLH_FILTER_UDP_DEST_PORT,
ECORE_LLH_FILTER_UDP_SRC_AND_DEST_PORT
};
/**
* @brief ecore_llh_add_protocol_filter - configures a protocol filter in llh
*
* @param p_hwfn
* @param p_ptt
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
* @param type - type of filters and comparing
*/
enum _ecore_status_t
ecore_llh_add_protocol_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 source_port_or_eth_type,
u16 dest_port,
enum ecore_llh_port_filter_type_t type);
/**
* @brief ecore_llh_remove_protocol_filter - remove a protocol filter in llh
*
* @param p_hwfn
* @param p_ptt
* @param source_port_or_eth_type - source port or ethertype to add
* @param dest_port - destination port to add
* @param type - type of filters and comparing
*/
void
ecore_llh_remove_protocol_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 source_port_or_eth_type,
u16 dest_port,
enum ecore_llh_port_filter_type_t type);
/**
* @brief ecore_llh_clear_all_filters - removes all MAC filters from llh
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_llh_set_function_as_default - set function as defult per port
*
* @param p_hwfn
* @param p_ptt
*/
enum _ecore_status_t
ecore_llh_set_function_as_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
*@brief Cleanup of previous driver remains prior to load
*
* @param p_hwfn
* @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative.
* @param is_vf - true iff cleanup is made for a VF.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id,
bool is_vf);
/**
* @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on
* varying accuracy [the bigger the value the less accurate] up to a mistake
* of 3usec for the highest values.
* While the API allows setting coalescing per-qid, all queues sharing a SB
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
* @param p_hwfn
* @param rx_coal - Rx Coalesce value in micro seconds.
* @param tx_coal - TX Coalesce value in micro seconds.
* @param p_handle
*
* @return enum _ecore_status_t
**/
enum _ecore_status_t
ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
u16 tx_coal, void *p_handle);
/**
* @brief - Recalculate feature distributions based on HW resources and
* user inputs. Currently this affects RDMA_CNQ, PF_L2_QUE and VF_L2_QUE.
* As a result, this must not be called while RDMA is active or while VFs
* are enabled.
*
* @param p_hwfn
*/
void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_change_pci_hwfn - Enable or disable PCI BUS MASTER
*
* @param p_hwfn
* @param p_ptt
* @param enable - true/false
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 enable);
#endif

View File

@ -0,0 +1,64 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_FCOE_H__
#define __ECORE_FCOE_H__
#include "ecore.h"
#include "ecore_chain.h"
#include "ecore_hsi_common.h"
#include "ecore_hsi_fcoe.h"
#include "ecore_fcoe_api.h"
struct ecore_fcoe_info {
osal_spinlock_t lock;
osal_list_t free_list;
};
enum _ecore_status_t ecore_fcoe_alloc(struct ecore_hwfn *p_hwfn);
void ecore_fcoe_setup(struct ecore_hwfn *p_hwfn);
void ecore_fcoe_free(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t
ecore_sp_fcoe_conn_offload(struct ecore_hwfn *p_hwfn,
struct ecore_fcoe_conn *p_conn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
enum _ecore_status_t
ecore_sp_fcoe_conn_destroy(struct ecore_hwfn *p_hwfn,
struct ecore_fcoe_conn *p_conn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
#endif /*__ECORE_FCOE_H__*/

View File

@ -0,0 +1,139 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_FCOE_API_H__
#define __ECORE_FCOE_API_H__
#include "ecore_sp_api.h"
struct ecore_fcoe_conn {
osal_list_entry_t list_entry;
bool free_on_delete;
u16 conn_id;
u32 icid;
u32 fw_cid;
u8 layer_code;
dma_addr_t sq_pbl_addr;
dma_addr_t sq_curr_page_addr;
dma_addr_t sq_next_page_addr;
dma_addr_t xferq_pbl_addr;
void *xferq_pbl_addr_virt_addr;
dma_addr_t xferq_addr[4];
void *xferq_addr_virt_addr[4];
dma_addr_t confq_pbl_addr;
void *confq_pbl_addr_virt_addr;
dma_addr_t confq_addr[2];
void *confq_addr_virt_addr[2];
dma_addr_t terminate_params;
u16 dst_mac_addr_lo;
u16 dst_mac_addr_mid;
u16 dst_mac_addr_hi;
u16 src_mac_addr_lo;
u16 src_mac_addr_mid;
u16 src_mac_addr_hi;
u16 tx_max_fc_pay_len;
u16 e_d_tov_timer_val;
u16 rec_tov_timer_val;
u16 rx_max_fc_pay_len;
u16 vlan_tag;
u16 physical_q0;
struct fc_addr_nw s_id;
u8 max_conc_seqs_c3;
struct fc_addr_nw d_id;
u8 flags;
u8 def_q_idx;
};
struct ecore_fcoe_stats {
u64 fcoe_rx_byte_cnt;
u64 fcoe_rx_data_pkt_cnt;
u64 fcoe_rx_xfer_pkt_cnt;
u64 fcoe_rx_other_pkt_cnt;
u32 fcoe_silent_drop_pkt_cmdq_full_cnt;
u32 fcoe_silent_drop_pkt_rq_full_cnt;
u32 fcoe_silent_drop_pkt_crc_error_cnt;
u32 fcoe_silent_drop_pkt_task_invalid_cnt;
u32 fcoe_silent_drop_total_pkt_cnt;
u64 fcoe_tx_byte_cnt;
u64 fcoe_tx_data_pkt_cnt;
u64 fcoe_tx_xfer_pkt_cnt;
u64 fcoe_tx_other_pkt_cnt;
};
enum _ecore_status_t
ecore_fcoe_acquire_connection(struct ecore_hwfn *p_hwfn,
struct ecore_fcoe_conn *p_in_conn,
struct ecore_fcoe_conn **p_out_conn);
void OSAL_IOMEM *ecore_fcoe_get_db_addr(struct ecore_hwfn *p_hwfn,
u32 cid);
void OSAL_IOMEM *ecore_fcoe_get_global_cmdq_cons(struct ecore_hwfn *p_hwfn,
u8 relative_q_id);
void OSAL_IOMEM *ecore_fcoe_get_primary_bdq_prod(struct ecore_hwfn *p_hwfn,
u8 bdq_id);
void OSAL_IOMEM *ecore_fcoe_get_secondary_bdq_prod(struct ecore_hwfn *p_hwfn,
u8 bdq_id);
enum _ecore_status_t
ecore_fcoe_offload_connection(struct ecore_hwfn *p_hwfn,
struct ecore_fcoe_conn *p_conn);
enum _ecore_status_t
ecore_fcoe_terminate_connection(struct ecore_hwfn *p_hwfn,
struct ecore_fcoe_conn *p_conn);
void ecore_fcoe_release_connection(struct ecore_hwfn *p_hwfn,
struct ecore_fcoe_conn *p_conn);
enum _ecore_status_t
ecore_sp_fcoe_func_start(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
enum _ecore_status_t
ecore_sp_fcoe_func_stop(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
enum _ecore_status_t
ecore_fcoe_get_stats(struct ecore_hwfn *p_hwfn,
struct ecore_fcoe_stats *stats);
#endif

View File

@ -0,0 +1,65 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef GTT_REG_ADDR_H
#define GTT_REG_ADDR_H
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL //Access:RW DataWidth:0x20 //
/* Win 3 */
#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL //Access:RW DataWidth:0x20 //
/* Win 4 */
#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL //Access:RW DataWidth:0x20 //
/* Win 5 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL //Access:RW DataWidth:0x20 //
/* Win 6 */
#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL //Access:RW DataWidth:0x20 //
/* Win 7 */
#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL //Access:RW DataWidth:0x20 //
/* Win 8 */
#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL //Access:RW DataWidth:0x20 //
/* Win 9 */
#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL //Access:RW DataWidth:0x20 //
/* Win 10 */
#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL //Access:RW DataWidth:0x20 //
/* Win 11 */
#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL //Access:RW DataWidth:0x20 //
#endif

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __PREVENT_PXP_GLOBAL_WIN__
static u32 pxp_global_win[] = {
0,
0,
0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
0,
0,
0,
0,
0,
0,
0,
};
#endif /* __PREVENT_PXP_GLOBAL_WIN__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,143 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_HSI_INIT_FUNC__
#define __ECORE_HSI_INIT_FUNC__
/********************************/
/* HSI Init Functions constants */
/********************************/
/* Number of VLAN priorities */
#define NUM_OF_VLAN_PRIORITIES 8
/* Size of CRC8 lookup table */
#define CRC8_TABLE_SIZE 256
/*
* BRB RAM init requirements
*/
struct init_brb_ram_req
{
__le32 guranteed_per_tc /* guaranteed size per TC, in bytes */;
__le32 headroom_per_tc /* headroom size per TC, in bytes */;
__le32 min_pkt_size /* min packet size, in bytes */;
__le32 max_ports_per_engine /* min packet size, in bytes */;
u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */;
};
/*
* ETS per-TC init requirements
*/
struct init_ets_tc_req
{
u8 use_sp /* if set, this TC participates in the arbitration with a strict priority (the priority is equal to the TC ID) */;
u8 use_wfq /* if set, this TC participates in the arbitration with a WFQ weight (indicated by the weight field) */;
__le16 weight /* An arbitration weight. Valid only if use_wfq is set. */;
};
/*
* ETS init requirements
*/
struct init_ets_req
{
__le32 mtu /* Max packet size (in bytes) */;
struct init_ets_tc_req tc_req[NUM_OF_TCS] /* ETS initialization requirements per TC. */;
};
/*
* NIG LB RL init requirements
*/
struct init_nig_lb_rl_req
{
__le16 lb_mac_rate /* Global MAC+LB RL rate (in Mbps). If set to 0, the RL will be disabled. */;
__le16 lb_rate /* Global LB RL rate (in Mbps). If set to 0, the RL will be disabled. */;
__le32 mtu /* Max packet size (in bytes) */;
__le16 tc_rate[NUM_OF_PHYS_TCS] /* RL rate per physical TC (in Mbps). If set to 0, the RL will be disabled. */;
};
/*
* NIG TC mapping for each priority
*/
struct init_nig_pri_tc_map_entry
{
u8 tc_id /* the mapped TC ID */;
u8 valid /* indicates if the mapping entry is valid */;
};
/*
* NIG priority to TC map init requirements
*/
struct init_nig_pri_tc_map_req
{
struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
};
/*
* QM per-port init parameters
*/
struct init_qm_port_params
{
u8 active /* Indicates if this port is active */;
u8 active_phys_tcs /* Vector of valid bits for active TCs used by this port */;
__le16 num_pbf_cmd_lines /* number of PBF command lines that can be used by this port */;
__le16 num_btb_blocks /* number of BTB blocks that can be used by this port */;
__le16 reserved;
};
/*
* QM per-PQ init parameters
*/
struct init_qm_pq_params
{
u8 vport_id /* VPORT ID */;
u8 tc_id /* TC ID */;
u8 wrr_group /* WRR group */;
u8 rl_valid /* Indicates if a rate limiter should be allocated for the PQ (0/1) */;
};
/*
* QM per-vport init parameters
*/
struct init_qm_vport_params
{
__le32 vport_rl /* rate limit in Mb/sec units. a value of 0 means dont configure. ignored if VPORT RL is globally disabled. */;
__le16 vport_wfq /* WFQ weight. A value of 0 means dont configure. ignored if VPORT WFQ is globally disabled. */;
__le16 first_tx_pq_id[NUM_OF_TCS] /* the first Tx PQ ID associated with this VPORT for each TC. */;
};
#endif /* __ECORE_HSI_INIT_FUNC__ */

View File

@ -0,0 +1,473 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_HSI_INIT_TOOL__
#define __ECORE_HSI_INIT_TOOL__
/**************************************/
/* Init Tool HSI constants and macros */
/**************************************/
/* Width of GRC address in bits (addresses are specified in dwords) */
#define GRC_ADDR_BITS 23
#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
/* indicates an init that should be applied to any phase ID */
#define ANY_PHASE_ID 0xffff
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
enum chip_ids
{
CHIP_BB,
CHIP_K2,
CHIP_E5,
MAX_CHIP_IDS
};
struct fw_asserts_ram_section
{
__le16 section_ram_line_offset /* The offset of the section in the RAM in RAM lines (64-bit units) */;
__le16 section_ram_line_size /* The size of the section in RAM lines (64-bit units) */;
u8 list_dword_offset /* The offset of the asserts list within the section in dwords */;
u8 list_element_dword_size /* The size of an assert list element in dwords */;
u8 list_num_elements /* The number of elements in the asserts list */;
u8 list_next_index_dword_offset /* The offset of the next list index field within the section in dwords */;
};
struct fw_ver_num
{
u8 major /* Firmware major version number */;
u8 minor /* Firmware minor version number */;
u8 rev /* Firmware revision version number */;
u8 eng /* Firmware engineering version number (for bootleg versions) */;
};
struct fw_ver_info
{
__le16 tools_ver /* Tools version number */;
u8 image_id /* FW image ID (e.g. main, l2b, kuku) */;
u8 reserved1;
struct fw_ver_num num /* FW version number */;
__le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */;
__le32 reserved2;
};
struct fw_info
{
struct fw_ver_info ver /* FW version information */;
struct fw_asserts_ram_section fw_asserts_section /* Info regarding the FW asserts section in the Storm RAM */;
};
struct fw_info_location
{
__le32 grc_addr /* GRC address where the fw_info struct is located. */;
__le32 size /* Size of the fw_info structure (thats located at the grc_addr). */;
};
enum init_modes
{
MODE_BB_A0_DEPRECATED,
MODE_BB,
MODE_K2,
MODE_ASIC,
MODE_EMUL_REDUCED,
MODE_EMUL_FULL,
MODE_FPGA,
MODE_CHIPSIM,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
MODE_PORTS_PER_ENG_1,
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
MODE_E5,
MAX_INIT_MODES
};
enum init_phases
{
PHASE_ENGINE,
PHASE_PORT,
PHASE_PF,
PHASE_VF,
PHASE_QM_PF,
MAX_INIT_PHASES
};
enum init_split_types
{
SPLIT_TYPE_NONE,
SPLIT_TYPE_PORT,
SPLIT_TYPE_PF,
SPLIT_TYPE_PORT_PF,
SPLIT_TYPE_VF,
MAX_INIT_SPLIT_TYPES
};
/*
* Binary buffer header
*/
struct bin_buffer_hdr
{
__le32 offset /* buffer offset in bytes from the beginning of the binary file */;
__le32 length /* buffer length in bytes */;
};
/*
* binary init buffer types
*/
enum bin_init_buffer_type
{
BIN_BUF_INIT_FW_VER_INFO /* fw_ver_info struct */,
BIN_BUF_INIT_CMD /* init commands */,
BIN_BUF_INIT_VAL /* init data */,
BIN_BUF_INIT_MODE_TREE /* init modes tree */,
BIN_BUF_INIT_IRO /* internal RAM offsets */,
MAX_BIN_INIT_BUFFER_TYPE
};
/*
* init array header: raw
*/
struct init_array_raw_hdr
{
__le32 data;
#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF /* Init array type, from init_array_types enum */
#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */
#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
};
/*
* init array header: standard
*/
struct init_array_standard_hdr
{
__le32 data;
#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF /* Init array type, from init_array_types enum */
#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF /* Init array size (in dwords) */
#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
};
/*
* init array header: zipped
*/
struct init_array_zipped_hdr
{
__le32 data;
#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF /* Init array type, from init_array_types enum */
#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF /* Init array zipped size (in bytes) */
#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
};
/*
* init array header: pattern
*/
struct init_array_pattern_hdr
{
__le32 data;
#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF /* Init array type, from init_array_types enum */
#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF /* pattern size in dword */
#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF /* pattern repetitions */
#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
};
/*
* init array header union
*/
union init_array_hdr
{
struct init_array_raw_hdr raw /* raw init array header */;
struct init_array_standard_hdr standard /* standard init array header */;
struct init_array_zipped_hdr zipped /* zipped init array header */;
struct init_array_pattern_hdr pattern /* pattern init array header */;
};
/*
* init array types
*/
enum init_array_types
{
INIT_ARR_STANDARD /* standard init array */,
INIT_ARR_ZIPPED /* zipped init array */,
INIT_ARR_PATTERN /* a repeated pattern */,
MAX_INIT_ARRAY_TYPES
};
/*
* init operation: callback
*/
struct init_callback_op
{
__le32 op_data;
#define INIT_CALLBACK_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_CALLBACK_OP_OP_SHIFT 0
#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
__le16 callback_id /* Callback ID */;
__le16 block_id /* Blocks ID */;
};
/*
* init operation: delay
*/
struct init_delay_op
{
__le32 op_data;
#define INIT_DELAY_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_DELAY_OP_OP_SHIFT 0
#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
#define INIT_DELAY_OP_RESERVED_SHIFT 4
__le32 delay /* delay in us */;
};
/*
* init operation: if_mode
*/
struct init_if_mode_op
{
__le32 op_data;
#define INIT_IF_MODE_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_IF_MODE_OP_OP_SHIFT 0
#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF /* Commands to skip if the modes dont match */
#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
__le16 reserved2;
__le16 modes_buf_offset /* offset (in bytes) in modes expression buffer */;
};
/*
* init operation: if_phase
*/
struct init_if_phase_op
{
__le32 op_data;
#define INIT_IF_PHASE_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_IF_PHASE_OP_OP_SHIFT 0
#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 /* Indicates if DMAE is enabled in this phase */
#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF /* Commands to skip if the phases dont match */
#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
__le32 phase_data;
#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
};
/*
* init mode operators
*/
enum init_mode_ops
{
INIT_MODE_OP_NOT /* init mode not operator */,
INIT_MODE_OP_OR /* init mode or operator */,
INIT_MODE_OP_AND /* init mode and operator */,
MAX_INIT_MODE_OPS
};
/*
* init operation: raw
*/
struct init_raw_op
{
__le32 op_data;
#define INIT_RAW_OP_OP_MASK 0xF /* Init operation, from init_op_types enum */
#define INIT_RAW_OP_OP_SHIFT 0
#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
#define INIT_RAW_OP_PARAM1_SHIFT 4
__le32 param2 /* Init param 2 */;
};
/*
* init array params
*/
struct init_op_array_params
{
__le16 size /* array size in dwords */;
__le16 offset /* array start offset in dwords */;
};
/*
* Write init operation arguments
*/
union init_write_args
{
__le32 inline_val /* value to write, used when init source is INIT_SRC_INLINE */;
__le32 zeros_count /* number of zeros to write, used when init source is INIT_SRC_ZEROS */;
__le32 array_offset /* array offset to write, used when init source is INIT_SRC_ARRAY */;
struct init_op_array_params runtime /* runtime array params to write, used when init source is INIT_SRC_RUNTIME */;
};
/*
* init operation: write
*/
struct init_write_op
{
__le32 data;
#define INIT_WRITE_OP_OP_MASK 0xF /* init operation, from init_op_types enum */
#define INIT_WRITE_OP_OP_SHIFT 0
#define INIT_WRITE_OP_SOURCE_MASK 0x7 /* init source type, taken from init_source_types enum */
#define INIT_WRITE_OP_SOURCE_SHIFT 4
#define INIT_WRITE_OP_RESERVED_MASK 0x1
#define INIT_WRITE_OP_RESERVED_SHIFT 7
#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 /* indicates if the register is wide-bus */
#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF /* internal (absolute) GRC address, in dwords */
#define INIT_WRITE_OP_ADDRESS_SHIFT 9
union init_write_args args /* Write init operation arguments */;
};
/*
* init operation: read
*/
struct init_read_op
{
__le32 op_data;
#define INIT_READ_OP_OP_MASK 0xF /* init operation, from init_op_types enum */
#define INIT_READ_OP_OP_SHIFT 0
#define INIT_READ_OP_POLL_TYPE_MASK 0xF /* polling type, from init_poll_types enum */
#define INIT_READ_OP_POLL_TYPE_SHIFT 4
#define INIT_READ_OP_RESERVED_MASK 0x1
#define INIT_READ_OP_RESERVED_SHIFT 8
#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF /* internal (absolute) GRC address, in dwords */
#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val /* expected polling value, used only when polling is done */;
};
/*
* Init operations union
*/
union init_op
{
struct init_raw_op raw /* raw init operation */;
struct init_write_op write /* write init operation */;
struct init_read_op read /* read init operation */;
struct init_if_mode_op if_mode /* if_mode init operation */;
struct init_if_phase_op if_phase /* if_phase init operation */;
struct init_callback_op callback /* callback init operation */;
struct init_delay_op delay /* delay init operation */;
};
/*
* Init command operation types
*/
enum init_op_types
{
INIT_OP_READ /* GRC read init command */,
INIT_OP_WRITE /* GRC write init command */,
INIT_OP_IF_MODE /* Skip init commands if the init modes expression doesnt match */,
INIT_OP_IF_PHASE /* Skip init commands if the init phase doesnt match */,
INIT_OP_DELAY /* delay init command */,
INIT_OP_CALLBACK /* callback init command */,
MAX_INIT_OP_TYPES
};
/*
* init polling types
*/
enum init_poll_types
{
INIT_POLL_NONE /* No polling */,
INIT_POLL_EQ /* init value is included in the init command */,
INIT_POLL_OR /* init value is all zeros */,
INIT_POLL_AND /* init value is an array of values */,
MAX_INIT_POLL_TYPES
};
/*
* init source types
*/
enum init_source_types
{
INIT_SRC_INLINE /* init value is included in the init command */,
INIT_SRC_ZEROS /* init value is all zeros */,
INIT_SRC_ARRAY /* init value is an array of values */,
INIT_SRC_RUNTIME /* init value is provided during runtime */,
MAX_INIT_SOURCE_TYPES
};
/*
* Internal RAM Offsets macro data
*/
struct iro
{
__le32 base /* RAM field offset */;
__le16 m1 /* multiplier 1 */;
__le16 m2 /* multiplier 2 */;
__le16 m3 /* multiplier 3 */;
__le16 size /* RAM field size */;
};
#endif /* __ECORE_HSI_INIT_TOOL__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,319 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_HW_H__
#define __ECORE_HW_H__
#include "ecore.h"
#include "ecore_dev_api.h"
/* Forward decleration */
struct ecore_ptt;
enum reserved_ptts {
RESERVED_PTT_EDIAG,
RESERVED_PTT_USER_SPACE,
RESERVED_PTT_MAIN,
RESERVED_PTT_DPC,
RESERVED_PTT_MAX
};
/* @@@TMP - in earlier versions of the emulation, the HW lock started from 1
* instead of 0, this should be fixed in later HW versions.
*/
#ifndef MISC_REG_DRIVER_CONTROL_0
#define MISC_REG_DRIVER_CONTROL_0 MISC_REG_DRIVER_CONTROL_1
#endif
#ifndef MISC_REG_DRIVER_CONTROL_0_SIZE
#define MISC_REG_DRIVER_CONTROL_0_SIZE MISC_REG_DRIVER_CONTROL_1_SIZE
#endif
enum _dmae_cmd_dst_mask {
DMAE_CMD_DST_MASK_NONE = 0,
DMAE_CMD_DST_MASK_PCIE = 1,
DMAE_CMD_DST_MASK_GRC = 2
};
enum _dmae_cmd_src_mask {
DMAE_CMD_SRC_MASK_PCIE = 0,
DMAE_CMD_SRC_MASK_GRC = 1
};
enum _dmae_cmd_crc_mask {
DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
};
/* definitions for DMA constants */
#define DMAE_GO_VALUE 0x1
#ifdef __BIG_ENDIAN
#define DMAE_COMPLETION_VAL 0xAED10000
#define DMAE_CMD_ENDIANITY 0x3
#else
#define DMAE_COMPLETION_VAL 0xD1AE
#define DMAE_CMD_ENDIANITY 0x2
#endif
#define DMAE_CMD_SIZE 14
/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */
#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
/* Minimum wait for dmae opertaion to complete 2 milliseconds */
#define DMAE_MIN_WAIT_TIME 0x2
#define DMAE_MAX_CLIENTS 32
/**
* @brief ecore_gtt_init - Initialize GTT windows
*
* @param p_hwfn
*/
void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured
*
* @param p_hwfn
*/
void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_pool_alloc - Allocate and initialize PTT pool
*
* @param p_hwfn
*
* @return _ecore_status_t - success (0), negative - error.
*/
enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_pool_free -
*
* @param p_hwfn
*/
void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address
*
* @param p_hwfn
* @param p_ptt
*
* @return u32
*/
u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address
*
* @param p_hwfn
* @param p_ptt
*
* @return u32
*/
u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt);
/**
* @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address
*
* @param p_hwfn
* @param new_hw_addr
* @param p_ptt
*/
void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 new_hw_addr);
/**
* @brief ecore_get_reserved_ptt - Get a specific reserved PTT
*
* @param p_hwfn
* @param ptt_idx
*
* @return struct ecore_ptt *
*/
struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
enum reserved_ptts ptt_idx);
/**
* @brief ecore_wr - Write value to BAR using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param val
* @param hw_addr
*/
void ecore_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 hw_addr,
u32 val);
/**
* @brief ecore_rd - Read value from BAR using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param val
* @param hw_addr
*/
u32 ecore_rd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 hw_addr);
/**
* @brief ecore_memcpy_from - copy n bytes from BAR using the given
* ptt
*
* @param p_hwfn
* @param p_ptt
* @param dest
* @param hw_addr
* @param n
*/
void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
void *dest,
u32 hw_addr,
osal_size_t n);
/**
* @brief ecore_memcpy_to - copy n bytes to BAR using the given
* ptt
*
* @param p_hwfn
* @param p_ptt
* @param hw_addr
* @param src
* @param n
*/
void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 hw_addr,
void *src,
osal_size_t n);
/**
* @brief ecore_fid_pretend - pretend to another function when
* accessing the ptt window. There is no way to unpretend
* a function. The only way to cancel a pretend is to
* pretend back to the original function.
*
* @param p_hwfn
* @param p_ptt
* @param fid - fid field of pxp_pretend structure. Can contain
* either pf / vf, port/path fields are don't care.
*/
void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 fid);
/**
* @brief ecore_port_pretend - pretend to another port when
* accessing the ptt window
*
* @param p_hwfn
* @param p_ptt
* @param port_id - the port to pretend to
*/
void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id);
/**
* @brief ecore_port_unpretend - cancel any previously set port
* pretend
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_port_unpretend(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_vfid_to_concrete - build a concrete FID for a
* given VF ID
*
* @param p_hwfn
* @param p_ptt
* @param vfid
*/
u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid);
/**
* @brief ecore_dmae_info_alloc - Init the dmae_info structure
* which is part of p_hwfn.
* @param p_hwfn
*/
enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_dmae_info_free - Free the dmae_info structure
* which is part of p_hwfn
*
* @param p_hwfn
*/
void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn);
union ecore_qm_pq_params {
struct {
u8 q_idx;
} iscsi;
struct {
u8 tc;
} core;
struct {
u8 is_vf;
u8 vf_id;
u8 tc;
} eth;
struct {
u8 dcqcn;
u8 qpid; /* roce relative */
} roce;
struct {
u8 qidx;
} iwarp;
};
u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
enum protocol_type proto,
union ecore_qm_pq_params *params);
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *fw_data);
void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
enum ecore_hw_err_type err_type);
#endif /* __ECORE_HW_H__ */

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _ECORE_IGU_DEF_H_
#define _ECORE_IGU_DEF_H_
/* Fields of IGU PF CONFIGRATION REGISTER */
#define IGU_PF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_PF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_PF_CONF_INT_LINE_EN (0x1<<2) /* INT enable */
#define IGU_PF_CONF_ATTN_BIT_EN (0x1<<3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1<<5) /* simd all ones mode */
/* Fields of IGU VF CONFIGRATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1<<0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1<<1) /* MSI/MSIX enable */
#define IGU_VF_CONF_SINGLE_ISR_EN (0x1<<4) /* single ISR mode enable */
#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands
*/
enum igu_ctrl_cmd
{
IGU_CTRL_CMD_TYPE_RD,
IGU_CTRL_CMD_TYPE_WR,
MAX_IGU_CTRL_CMD
};
/* Control register for the IGU command register
*/
struct igu_ctrl_reg
{
u32 ctrl_data;
#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
#define IGU_CTRL_REG_FID_SHIFT 0
#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
#define IGU_CTRL_REG_RESERVED_MASK 0x1
#define IGU_CTRL_REG_RESERVED_SHIFT 28
#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
#define IGU_CTRL_REG_TYPE_SHIFT 31
};
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,539 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _INIT_FW_FUNCS_H
#define _INIT_FW_FUNCS_H
/* Forward declarations */
struct init_qm_pq_params;
/**
* @brief ecore_qm_pf_mem_size - Prepare QM ILT sizes
*
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
* @param pf_id - physical function ID
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
* @param num_pf_pqs - number of PQs used by this PF
* @param num_vf_pqs - number of PQs used by VFs of this PF
*
* @return The required host memory size in 4KB units.
*/
u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs);
/**
* @brief ecore_qm_common_rt_init - Prepare QM runtime init values for the
* engine phase.
*
* @param p_hwfn - HW device data
* @param max_ports_per_engine - max number of ports per engine in HW
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
* @param pf_rl_en - enable per-PF rate limiters
* @param pf_wfq_en - enable per-PF WFQ
* @param vport_rl_en - enable per-VPORT rate limiters
* @param vport_wfq_en - enable per-VPORT WFQ
* @param port_params - array of size MAX_NUM_PORTS with parameters
* for each port
*
* @return 0 on success, -1 on error.
*/
int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
u8 max_ports_per_engine,
u8 max_phys_tcs_per_port,
bool pf_rl_en,
bool pf_wfq_en,
bool vport_rl_en,
bool vport_wfq_en,
struct init_qm_port_params port_params[MAX_NUM_PORTS]);
/**
* @brief ecore_qm_pf_rt_init - Prepare QM runtime init values for the PF phase
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param port_id - port ID
* @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
* @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
* @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
* @param num_tids - number of tasks used by this PF
* @param start_pq - first Tx PQ ID associated with this PF
* @param num_pf_pqs - number of Tx PQs associated with this PF
* (non-VF)
* @param num_vf_pqs - number of Tx PQs associated with a VF
* @param start_vport - first VPORT ID associated with this PF
* @param num_vports - number of VPORTs associated with this PF
* @param pf_wfq - WFQ weight. if PF WFQ is globally disabled,
* the weight must be 0. otherwise, the weight
* must be non-zero.
* @param pf_rl - rate limit in Mb/sec units. a value of 0
* means don't configure. ignored if PF RL is
* globally disabled.
* @param pq_params - array of size (num_pf_pqs + num_vf_pqs) with
* parameters for each Tx PQ associated with the
* specified PF.
* @param vport_params - array of size num_vports with parameters for
* each associated VPORT.
*
* @return 0 on success, -1 on error.
*/
int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 port_id,
u8 pf_id,
u8 max_phys_tcs_per_port,
bool is_first_pf,
u32 num_pf_cids,
u32 num_vf_cids,
u32 num_tids,
u16 start_pq,
u16 num_pf_pqs,
u16 num_vf_pqs,
u8 start_vport,
u8 num_vports,
u16 pf_wfq,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
/**
* @brief ecore_init_pf_wfq - Initializes the WFQ weight of the specified PF
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param pf_id - PF ID
* @param pf_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 pf_id,
u16 pf_wfq);
/**
* @brief ecore_init_pf_rl - Initializes the rate limit of the specified PF
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
* @param pf_id - PF ID
* @param pf_rl - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 pf_id,
u32 pf_rl);
/**
* @brief ecore_init_vport_wfq - Initializes the WFQ weight of the specified VPORT
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param first_tx_pq_id - An array containing the first Tx PQ ID associated
* with the VPORT for each TC. This array is filled by
* ecore_qm_pf_rt_init
* @param vport_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS],
u16 vport_wfq);
/**
* @brief ecore_init_vport_rl - Initializes the rate limit of the specified
* VPORT.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vport_id,
u32 vport_rl);
/**
* @brief ecore_send_qm_stop_cmd - Sends a stop command to the QM
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param is_release_cmd - true for release, false for stop.
* @param is_tx_pq - true for Tx PQs, false for Other PQs.
* @param start_pq - first PQ ID to stop
* @param num_pqs - Number of PQs to stop, starting from start_pq.
*
* @return bool, true if successful, false if timeout occured while waiting for
* QM command done.
*/
bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool is_release_cmd,
bool is_tx_pq,
u16 start_pq,
u16 num_pqs);
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_nig_ets - Initializes the NIG ETS arbiter
*
* Based on weight/priority requirements per-TC.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param req - the NIG ETS initialization requirements.
* @param is_lb - if set, the loopback port arbiter is initialized, otherwise
* the physical port arbiter is initialized. The pure-LB TC
* requirements are ignored when is_lb is cleared.
*/
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req* req,
bool is_lb);
/**
* @brief ecore_init_nig_lb_rl - Initializes the NIG LB RLs
*
* Based on global and per-TC rate requirements
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param req - the NIG LB RLs initialization requirements.
*/
void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_lb_rl_req* req);
#endif /* UNUSED_HSI_FUNC */
/**
* @brief ecore_init_nig_pri_tc_map - Initializes the NIG priority to TC map.
*
* Assumes valid arguments.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param req - required mapping from prioirties to TCs.
*/
void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_pri_tc_map_req* req);
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_prs_ets - Initializes the PRS Rx ETS arbiter
*
* Based on weight/priority requirements per-TC.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param req - the PRS ETS initialization requirements.
*/
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req* req);
#endif /* UNUSED_HSI_FUNC */
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_brb_ram - Initializes BRB RAM sizes per TC.
*
* Based on weight/priority requirements per-TC.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param req - the BRB RAM initialization requirements.
*/
void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_brb_ram_req* req);
#endif /* UNUSED_HSI_FUNC */
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_set_engine_mf_ovlan_eth_type - Initializes Nig,Prs,Pbf and llh
* ethType Regs to input ethType. Should Be called once per engine if engine
* is in BD mode.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 ethType);
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
* input ethType. should Be called once per port.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 ethType);
#endif /* UNUSED_HSI_FUNC */
/**
* @brief ecore_set_vxlan_dest_port - Initializes vxlan tunnel destination udp
* port.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - vxlan destination udp port.
*/
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 dest_port);
/**
* @brief ecore_set_vxlan_enable - Enable or disable VXLAN tunnel in HW
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param vxlan_enable - vxlan enable flag.
*/
void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool vxlan_enable);
/**
* @brief ecore_set_gre_enable - Enable or disable GRE tunnel in HW
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param eth_gre_enable - eth GRE enable enable flag.
* @param ip_gre_enable - IP GRE enable enable flag.
*/
void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_gre_enable,
bool ip_gre_enable);
/**
* @brief ecore_set_geneve_dest_port - Initializes geneve tunnel destination
* udp port.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param dest_port - geneve destination udp port.
*/
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 dest_port);
/**
* @brief ecore_set_geneve_enable - Enable or disable GRE tunnel in HW
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param eth_geneve_enable - eth GENEVE enable enable flag.
* @param ip_geneve_enable - IP GENEVE enable enable flag.
*/
void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_geneve_enable,
bool ip_geneve_enable);
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_set_gft_event_id_cm_hdr - Configure GFT event id and cm header
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
*/
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_set_rfs_mode_disable - Disable and configure HW for RFS
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param pf_id - pf on which to disable RFS.
*/
void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id);
/**
* @brief ecore_set_rfs_mode_enable - Enable and configure HW for RFS
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
* @param pf_id - pf on which to enable RFS.
* @param tcp - set profile tcp packets.
* @param udp - set profile udp packet.
* @param ipv4 - set profile ipv4 packet.
* @param ipv6 - set profile ipv6 packet.
*/
void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 pf_id,
bool tcp,
bool udp,
bool ipv4,
bool ipv6);
#endif /* UNUSED_HSI_FUNC */
/**
* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
* used before first ETH queue started.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers. Don't care
* if runtime_init used.
* @param mode - VF zone size mode. Use enum vf_zone_size_mode.
* @param runtime_init - Set 1 to init runtime registers in engine phase.
* Set 0 if VF zone size mode configured after engine
* phase.
*/
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 mode,
bool runtime_init);
/**
* @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by
* VF zone size mode.
*
* @param p_hwfn - HW device data
* @param stat_cnt_id - statistic counter id
* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
*/
u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
u16 stat_cnt_id,
u16 vf_zone_size_mode);
/**
* @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
* size mode.
*
* @param p_hwfn - HW device data
* @param vf_id - vf id.
* @param vf_queue_id - per VF rx queue id.
* @param vf_zone_size_mode - vf zone size mode. Use enum vf_zone_size_mode.
*/
u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
u8 vf_id,
u8 vf_queue_id,
u16 vf_zone_size_mode);
/**
* @brief ecore_enable_context_validation - Enable and configure context
* validation.
*
* @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers.
*/
void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
* session context.
*
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param cid - context cid.
*/
void ecore_calc_session_ctx_validation(struct ecore_hwfn * p_hwfn,
void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 cid);
/**
* @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
* context.
*
* @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - context size.
* @param ctx_type - context type.
* @param tid - context tid.
*/
void ecore_calc_task_ctx_validation(struct ecore_hwfn * p_hwfn,
void *p_ctx_mem,
u16 ctx_size,
u8 ctx_type,
u32 tid);
/**
* @brief ecore_memset_session_ctx - Memset session context to 0 while
* preserving validation bytes.
*
* @param p_hwfn - HW device data
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - size to initialzie.
* @param ctx_type - context type.
*/
void ecore_memset_session_ctx(void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
/**
* @brief ecore_memset_task_ctx - Memset task context to 0 while preserving
* validation bytes.
*
* @param p_ctx_mem - pointer to context memory.
* @param ctx_size - size to initialzie.
* @param ctx_type - context type.
*/
void ecore_memset_task_ctx(void *p_ctx_mem,
u32 ctx_size,
u8 ctx_type);
#endif

View File

@ -0,0 +1,641 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File : ecore_init_ops.c
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/* include the precompiled configuration values - only once */
#include "bcm_osal.h"
#include "ecore_hsi_common.h"
#include "ecore.h"
#include "ecore_hw.h"
#include "ecore_status.h"
#include "ecore_rt_defs.h"
#include "ecore_init_fw_funcs.h"
#ifndef CONFIG_ECORE_BINARY_FW
#ifdef CONFIG_ECORE_ZIPPED_FW
#include "ecore_init_values_zipped.h"
#else
#include "ecore_init_values.h"
#endif
#endif
#include "ecore_iro_values.h"
#include "ecore_sriov.h"
#include "ecore_gtt_values.h"
#include "reg_addr.h"
#include "ecore_init_ops.h"
#define ECORE_INIT_MAX_POLL_COUNT 100
#define ECORE_INIT_POLL_PERIOD_US 500
void ecore_init_iro_array(struct ecore_dev *p_dev)
{
p_dev->iro_arr = iro_arr;
}
/* Runtime configuration helpers */
void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
{
int i;
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
p_hwfn->rt_data.b_valid[i] = false;
}
void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
u32 rt_offset, u32 *p_val,
osal_size_t size)
{
osal_size_t i;
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
}
}
static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr,
u16 rt_offset,
u16 size,
bool b_must_dmae)
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
u16 i, segment;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Since not all RT entries are initialized, go over the RT and
* for each segment of initialized values use DMA.
*/
for (i = 0; i < size; i++) {
if (!p_valid[i])
continue;
/* In case there isn't any wide-bus configuration here,
* simply write the data instead of using dmae.
*/
if (!b_must_dmae) {
ecore_wr(p_hwfn, p_ptt, addr + (i << 2),
p_init_val[i]);
continue;
}
/* Start of a new segment */
for (segment = 1; i + segment < size; segment++)
if (!p_valid[i + segment])
break;
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_init_val + i),
addr + (i << 2), segment, 0);
if (rc != ECORE_SUCCESS)
return rc;
/* Jump over the entire segment, including invalid entry */
i += segment;
}
return rc;
}
enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
if (IS_VF(p_hwfn->p_dev))
return ECORE_SUCCESS;
rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(bool) * RUNTIME_ARRAY_SIZE);
if (!rt_data->b_valid)
return ECORE_NOMEM;
rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
sizeof(u32) * RUNTIME_ARRAY_SIZE);
if (!rt_data->init_val) {
OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
rt_data->b_valid = OSAL_NULL;
return ECORE_NOMEM;
}
return ECORE_SUCCESS;
}
void ecore_init_free(struct ecore_hwfn *p_hwfn)
{
OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
p_hwfn->rt_data.init_val = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
p_hwfn->rt_data.b_valid = OSAL_NULL;
}
static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr, u32 dmae_data_offset,
u32 size, const u32 *p_buf,
bool b_must_dmae, bool b_can_dmae)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Perform DMAE only for lengthy enough sections or for wide-bus */
#ifndef ASIC_ONLY
if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
!b_can_dmae || (!b_must_dmae && (size < 16))) {
#else
if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
#endif
const u32 *data = p_buf + dmae_data_offset;
u32 i;
for (i = 0; i < size; i++)
ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
} else {
rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(p_buf +
dmae_data_offset),
addr, size, 0);
}
return rc;
}
static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr, u32 fill,
u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
return ecore_dmae_host2grc(p_hwfn, p_ptt,
(osal_uintptr_t)(&(zero_buffer[0])),
addr, fill_count,
ECORE_DMAE_FLAG_RW_REPL_SRC);
}
static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 addr, u32 fill, u32 fill_count)
{
u32 i;
for (i = 0; i < fill_count; i++, addr += sizeof(u32))
ecore_wr(p_hwfn, p_ptt, addr, fill);
}
static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_write_op *cmd,
bool b_must_dmae,
bool b_can_dmae)
{
u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
u32 data = OSAL_LE32_TO_CPU(cmd->data);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
#ifdef CONFIG_ECORE_ZIPPED_FW
u32 offset, output_len, input_len, max_size;
#endif
struct ecore_dev *p_dev = p_hwfn->p_dev;
union init_array_hdr *hdr;
const u32 *array_data;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 size;
array_data = p_dev->fw_data->arr_data;
hdr = (union init_array_hdr *) (array_data +
dmae_array_offset);
data = OSAL_LE32_TO_CPU(hdr->raw.data);
switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
case INIT_ARR_ZIPPED:
#ifdef CONFIG_ECORE_ZIPPED_FW
offset = dmae_array_offset + 1;
input_len = GET_FIELD(data,
INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
max_size = MAX_ZIPPED_SIZE * 4;
OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
(u8 *)&array_data[offset],
max_size, (u8 *)p_hwfn->unzip_buf);
if (output_len) {
rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
output_len,
p_hwfn->unzip_buf,
b_must_dmae, b_can_dmae);
} else {
DP_NOTICE(p_hwfn, true,
"Failed to unzip dmae data\n");
rc = ECORE_INVAL;
}
#else
DP_NOTICE(p_hwfn, true,
"Using zipped firmware without config enabled\n");
rc = ECORE_INVAL;
#endif
break;
case INIT_ARR_PATTERN:
{
u32 repeats = GET_FIELD(data,
INIT_ARRAY_PATTERN_HDR_REPETITIONS);
u32 i;
size = GET_FIELD(data,
INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
for (i = 0; i < repeats; i++, addr += size << 2) {
rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
dmae_array_offset + 1,
size, array_data,
b_must_dmae, b_can_dmae);
if (rc)
break;
}
break;
}
case INIT_ARR_STANDARD:
size = GET_FIELD(data,
INIT_ARRAY_STANDARD_HDR_SIZE);
rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
dmae_array_offset + 1,
size, array_data,
b_must_dmae, b_can_dmae);
break;
}
return rc;
}
/* init_ops write command */
static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_write_op *p_cmd,
bool b_can_dmae)
{
u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Sanitize */
if (b_must_dmae && !b_can_dmae) {
DP_NOTICE(p_hwfn, true,
"Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
addr);
return ECORE_INVAL;
}
switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
case INIT_SRC_INLINE:
data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
ecore_wr(p_hwfn, p_ptt, addr, data);
break;
case INIT_SRC_ZEROS:
data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
rc = ecore_init_fill_dmae(p_hwfn, p_ptt,
addr, 0, data);
else
ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
case INIT_SRC_ARRAY:
rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
b_must_dmae, b_can_dmae);
break;
case INIT_SRC_RUNTIME:
ecore_init_rt(p_hwfn, p_ptt, addr,
OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
b_must_dmae);
break;
}
return rc;
}
static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
{
return (val == expected_val);
}
static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
{
return (val & expected_val) == expected_val;
}
static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
{
return (val | expected_val) > 0;
}
/* init_ops read/poll commands */
static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_read_op *cmd)
{
bool (*comp_check)(u32 val, u32 expected_val);
u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
u32 data, addr, poll;
int i;
data = OSAL_LE32_TO_CPU(cmd->op_data);
addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
delay *= 100;
#endif
val = ecore_rd(p_hwfn, p_ptt, addr);
if (poll == INIT_POLL_NONE)
return;
switch (poll) {
case INIT_POLL_EQ:
comp_check = comp_eq;
break;
case INIT_POLL_OR:
comp_check = comp_or;
break;
case INIT_POLL_AND:
comp_check = comp_and;
break;
default:
DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
cmd->op_data);
return;
}
data = OSAL_LE32_TO_CPU(cmd->expected_val);
for (i = 0;
i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data);
i++) {
OSAL_UDELAY(delay);
val = ecore_rd(p_hwfn, p_ptt, addr);
}
if (i == ECORE_INIT_MAX_POLL_COUNT)
DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
addr,
OSAL_LE32_TO_CPU(cmd->expected_val), val,
OSAL_LE32_TO_CPU(cmd->op_data));
}
/* init_ops callbacks entry point */
static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_callback_op *p_cmd)
{
DP_NOTICE(p_hwfn, true, "Currently init values have no need of callbacks\n");
}
static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
u16 *p_offset, int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
const u8 *modes_tree_buf;
u8 arg1, arg2, tree_val;
modes_tree_buf = p_dev->fw_data->modes_tree_buf;
tree_val = modes_tree_buf[(*p_offset)++];
switch(tree_val) {
case INIT_MODE_OP_NOT:
return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
case INIT_MODE_OP_OR:
arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 | arg2;
case INIT_MODE_OP_AND:
arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
return arg1 & arg2;
default:
tree_val -= MAX_INIT_MODE_OPS;
return (modes & (1 << tree_val)) ? 1 : 0;
}
}
static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
struct init_if_mode_op *p_cmd, int modes)
{
u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
return 0;
else
return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
INIT_IF_MODE_OP_CMD_OFFSET);
}
static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn,
struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
INIT_IF_PHASE_OP_CMD_OFFSET);
else
return 0;
}
enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int phase,
int phase_id,
int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
bool b_dmae = false;
enum _ecore_status_t rc = ECORE_SUCCESS;
num_init_ops = p_dev->fw_data->init_ops_size;
init_ops = p_dev->fw_data->init_ops;
#ifdef CONFIG_ECORE_ZIPPED_FW
p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
MAX_ZIPPED_SIZE * 4);
if (!p_hwfn->unzip_buf) {
DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
return ECORE_NOMEM;
}
#endif
for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
union init_op *cmd = &init_ops[cmd_num];
u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
case INIT_OP_WRITE:
rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
b_dmae);
break;
case INIT_OP_READ:
ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
break;
case INIT_OP_IF_MODE:
cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
modes);
break;
case INIT_OP_IF_PHASE:
cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
b_dmae = GET_FIELD(data,
INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* ecore_init_run is always invoked from
* sleep-able context
*/
OSAL_UDELAY(cmd->delay.delay);
break;
case INIT_OP_CALLBACK:
ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break;
}
if (rc)
break;
}
#ifdef CONFIG_ECORE_ZIPPED_FW
OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
p_hwfn->unzip_buf = OSAL_NULL;
#endif
return rc;
}
void ecore_gtt_init(struct ecore_hwfn *p_hwfn)
{
u32 gtt_base;
u32 i;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
/* This is done by MFW on ASIC; regardless, this should only
* be done once per chip [i.e., common]. Implementation is
* not too bright, but it should work on the simple FPGA/EMUL
* scenarios.
*/
static bool initialized = false;
int poll_cnt = 500;
u32 val;
/* initialize PTT/GTT (poll for completion) */
if (!initialized) {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_START_INIT_PTT_GTT, 1);
initialized = true;
}
do {
/* ptt might be overrided by HW until this is done */
OSAL_UDELAY(10);
ecore_ptt_invalidate(p_hwfn);
val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
PGLUE_B_REG_INIT_DONE_PTT_GTT);
} while ((val != 1) && --poll_cnt);
if (!poll_cnt)
DP_ERR(p_hwfn, "PGLUE_B_REG_INIT_DONE didn't complete\n");
}
#endif
/* Set the global windows */
gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
if (pxp_global_win[i])
REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
pxp_global_win[i]);
}
enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
const u8 *data)
{
struct ecore_fw_data *fw = p_dev->fw_data;
#ifdef CONFIG_ECORE_BINARY_FW
struct bin_buffer_hdr *buf_hdr;
u32 offset, len;
if (!data) {
DP_NOTICE(p_dev, true, "Invalid fw data\n");
return ECORE_INVAL;
}
buf_hdr = (struct bin_buffer_hdr *)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
fw->init_ops = (union init_op *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
fw->arr_data = (u32 *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
fw->modes_tree_buf = (u8 *)(data + offset);
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
#else
fw->init_ops = (union init_op *)init_ops;
fw->arr_data = (u32 *)init_val;
fw->modes_tree_buf = (u8 *)modes_tree_buf;
fw->init_ops_size = init_ops_size;
#endif
return ECORE_SUCCESS;
}

View File

@ -0,0 +1,134 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_INIT_OPS__
#define __ECORE_INIT_OPS__
#include "ecore.h"
/**
* @brief ecore_init_iro_array - init iro_arr.
*
*
* @param p_dev
*/
void ecore_init_iro_array(struct ecore_dev *p_dev);
/**
* @brief ecore_init_run - Run the init-sequence.
*
*
* @param p_hwfn
* @param p_ptt
* @param phase
* @param phase_id
* @param modes
* @return _ecore_status_t
*/
enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int phase,
int phase_id,
int modes);
/**
* @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
*
*
* @param p_hwfn
*
* @return _ecore_status_t
*/
enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_init_hwfn_deallocate
*
*
* @param p_hwfn
*/
void ecore_init_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_init_clear_rt_data - Clears the runtime init array.
*
*
* @param p_hwfn
*/
void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_init_store_rt_reg - Store a configuration value in the RT array.
*
*
* @param p_hwfn
* @param rt_offset
* @param val
*/
void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn,
u32 rt_offset,
u32 val);
#define STORE_RT_REG(hwfn, offset, val) \
ecore_init_store_rt_reg(hwfn, offset, val)
#define OVERWRITE_RT_REG(hwfn, offset, val) \
ecore_init_store_rt_reg(hwfn, offset, val)
/**
* @brief
*
*
* @param p_hwfn
* @param rt_offset
* @param val
* @param size
*/
void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
u32 rt_offset,
u32 *val,
osal_size_t size);
#define STORE_RT_REG_AGG(hwfn, offset, val) \
ecore_init_store_rt_agg(hwfn, offset, (u32*)&val, sizeof(val))
/**
* @brief
* Initialize GTT global windows and set admin window
* related params of GTT/PTT to default values.
*
* @param p_hwfn
*/
void ecore_gtt_init(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_INIT_OPS__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,291 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_INT_H__
#define __ECORE_INT_H__
#include "ecore.h"
#include "ecore_int_api.h"
#define ECORE_CAU_DEF_RX_TIMER_RES 0
#define ECORE_CAU_DEF_TX_TIMER_RES 0
#define ECORE_SB_ATT_IDX 0x0001
#define ECORE_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
#define ECORE_SB_INVALID_IDX 0xffff
struct ecore_igu_block
{
u8 status;
#define ECORE_IGU_STATUS_FREE 0x01
#define ECORE_IGU_STATUS_VALID 0x02
#define ECORE_IGU_STATUS_PF 0x04
#define ECORE_IGU_STATUS_DSB 0x08
u8 vector_number;
u8 function_id;
u8 is_pf;
/* Index inside IGU [meant for back reference] */
u16 igu_sb_id;
struct ecore_sb_info *sb_info;
};
struct ecore_igu_info
{
struct ecore_igu_block entry[MAX_TOT_SB_PER_PATH];
u16 igu_dsb_id;
/* The numbers can shift when using APIs to switch SBs between PF and
* VF.
*/
struct ecore_sb_cnt_info usage;
/* Determine whether we can shift SBs between VFs and PFs */
bool b_allow_pf_vf_change;
};
/**
* @brief - Make sure the IGU CAM reflects the resources provided by MFW
*
* @param p_hwfn
* @param p_ptt
*/
int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Make sure IGU CAM reflects the default resources once again,
* starting with a 'dirty' SW database.
* @param p_hwfn
* @param p_ptt
*/
int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Translate the weakly-defined client sb-id into an IGU sb-id
*
* @param p_hwfn
* @param sb_id - user provided sb_id
*
* @return an index inside IGU CAM where the SB resides
*/
u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
/**
* @brief return a pointer to an unused valid SB
*
* @param p_hwfn
* @param b_is_pf - true iff we want a SB belonging to a PF
*
* @return point to an igu_block, OSAL_NULL if none is available
*/
struct ecore_igu_block *
ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf);
/* TODO Names of function may change... */
void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool b_set,
bool b_slowpath);
void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_int_igu_read_cam - Reads the IGU CAM.
* This function needs to be called during hardware
* prepare. It reads the info from igu cam to know which
* status block is the default / base status block etc.
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
typedef enum _ecore_status_t(*ecore_int_comp_cb_t)(struct ecore_hwfn *p_hwfn,
void *cookie);
/**
* @brief ecore_int_register_cb - Register callback func for
* slowhwfn statusblock.
*
* Every protocol that uses the slowhwfn status block
* should register a callback function that will be called
* once there is an update of the sp status block.
*
* @param p_hwfn
* @param comp_cb - function to be called when there is an
* interrupt on the sp sb
*
* @param cookie - passed to the callback function
* @param sb_idx - OUT parameter which gives the chosen index
* for this protocol.
* @param p_fw_cons - pointer to the actual address of the
* consumer for this protocol.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
ecore_int_comp_cb_t comp_cb,
void *cookie,
u8 *sb_idx,
__le16 **p_fw_cons);
/**
* @brief ecore_int_unregister_cb - Unregisters callback
* function from sp sb.
* Partner of ecore_int_register_cb -> should be called
* when no longer required.
*
* @param p_hwfn
* @param pi
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn,
u8 pi);
/**
* @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id.
*
* @param p_hwfn
*
* @return u16
*/
u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn);
/**
* @brief Status block cleanup. Should be called for each status
* block that will be used -> both PF / VF
*
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
* @param cleanup_set - set(1) / clear(0)
*/
void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 sb_id,
u16 opaque,
bool b_set);
/**
* @brief ecore_int_cau_conf - configure cau for a given status
* block
*
* @param p_hwfn
* @param ptt
* @param sb_phys
* @param igu_sb_id
* @param vf_number
* @param vf_valid
*/
void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
dma_addr_t sb_phys,
u16 igu_sb_id,
u16 vf_number,
u8 vf_valid);
/**
* @brief ecore_int_alloc
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_int_free
*
* @param p_hwfn
*/
void ecore_int_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_int_setup
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_int_setup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
* @param p_ptt
* @param int_mode
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_igu_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode);
/**
* @brief - Initialize CAU status block entry
*
* @param p_hwfn
* @param p_sb_entry
* @param pf_id
* @param vf_number
* @param vf_valid
*/
void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry, u8 pf_id,
u16 vf_number, u8 vf_valid);
enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx);
#ifndef ASIC_ONLY
#define ECORE_MAPPING_MEMORY_SIZE(dev) \
((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \
136 : NUM_OF_SBS(dev))
#else
#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev)
#endif
#endif /* __ECORE_INT_H__ */

View File

@ -0,0 +1,352 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_INT_API_H__
#define __ECORE_INT_API_H__
#define ECORE_SB_IDX 0x0002
#define RX_PI 0
#define TX_PI(tc) (RX_PI + 1 + tc)
#ifndef ECORE_INT_MODE
#define ECORE_INT_MODE
enum ecore_int_mode {
ECORE_INT_MODE_INTA,
ECORE_INT_MODE_MSIX,
ECORE_INT_MODE_MSI,
ECORE_INT_MODE_POLL,
};
#endif
struct ecore_sb_info {
struct status_block *sb_virt;
dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */
u16 igu_sb_id;
void OSAL_IOMEM *igu_addr;
u8 flags;
#define ECORE_SB_INFO_INIT 0x1
#define ECORE_SB_INFO_SETUP 0x2
#ifdef ECORE_CONFIG_DIRECT_HWFN
struct ecore_hwfn *p_hwfn;
#endif
struct ecore_dev *p_dev;
};
struct ecore_sb_info_dbg {
u32 igu_prod;
u32 igu_cons;
u16 pi[PIS_PER_SB];
};
struct ecore_sb_cnt_info {
/* Original, current, and free SBs for PF */
int orig;
int cnt;
int free_cnt;
/* Original, current and free SBS for child VFs */
int iov_orig;
int iov_cnt;
int free_cnt_iov;
};
static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info)
{
u32 prod = 0;
u16 rc = 0;
// barrier(); /* status block is written to by the chip */
// FIXME: need some sort of barrier.
prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) &
STATUS_BLOCK_PROD_INDEX_MASK;
if (sb_info->sb_ack != prod) {
sb_info->sb_ack = prod;
rc |= ECORE_SB_IDX;
}
OSAL_MMIOWB(sb_info->p_dev);
return rc;
}
/**
* @brief This function creates an update command for interrupts that is
* written to the IGU.
*
* @param sb_info - This is the structure allocated and
* initialized per status block. Assumption is
* that it was initialized using ecore_sb_init
* @param int_cmd - Enable/Disable/Nop
* @param upd_flg - whether igu consumer should be
* updated.
*
* @return OSAL_INLINE void
*/
static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info,
enum igu_int_cmd int_cmd, u8 upd_flg)
{
struct igu_prod_cons_update igu_ack = { 0 };
igu_ack.sb_id_and_flags =
((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
(upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
(int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
(IGU_SEG_ACCESS_REG <<
IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
#ifdef ECORE_CONFIG_DIRECT_HWFN
DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr,
igu_ack.sb_id_and_flags);
#else
DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags);
#endif
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
OSAL_MMIOWB(sb_info->p_dev);
OSAL_BARRIER(sb_info->p_dev);
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#else
static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#endif
{
unsigned int i;
for (i = 0; i < size / sizeof(*data); i++)
DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]);
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
{
__internal_ram_wr(p_hwfn, addr, size, data);
}
#else
static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
int size, u32 *data)
{
__internal_ram_wr(OSAL_NULL, addr, size, data);
}
#endif
struct ecore_hwfn;
struct ecore_ptt;
enum ecore_coalescing_fsm {
ECORE_COAL_RX_STATE_MACHINE,
ECORE_COAL_TX_STATE_MACHINE
};
/**
* @brief ecore_int_cau_conf_pi - configure cau for a given
* status block
*
* @param p_hwfn
* @param p_ptt
* @param p_sb
* @param pi_index
* @param state
* @param timeset
*/
void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_sb_info *p_sb,
u32 pi_index,
enum ecore_coalescing_fsm coalescing_fsm,
u8 timeset);
/**
* @brief ecore_int_igu_enable_int - enable device interrupts
*
* @param p_hwfn
* @param p_ptt
* @param int_mode - interrupt mode to use
*/
void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_int_mode int_mode);
/**
* @brief ecore_int_igu_disable_int - disable device interrupts
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc
* register from igu.
*
* @param p_hwfn
*
* @return u64
*/
u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn);
#define ECORE_SP_SB_ID 0xffff
/**
* @brief ecore_int_sb_init - Initializes the sb_info structure.
*
* once the structure is initialized it can be passed to sb related functions.
*
* @param p_hwfn
* @param p_ptt
* @param sb_info points to an uninitialized (but
* allocated) sb_info structure
* @param sb_virt_addr
* @param sb_phy_addr
* @param sb_id the sb_id to be used (zero based in driver)
* should use ECORE_SP_SB_ID for SP Status block
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_sb_info *sb_info,
void *sb_virt_addr,
dma_addr_t sb_phy_addr,
u16 sb_id);
/**
* @brief ecore_int_sb_setup - Setup the sb.
*
* @param p_hwfn
* @param p_ptt
* @param sb_info initialized sb_info structure
*/
void ecore_int_sb_setup(
struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_sb_info *sb_info);
/**
* @brief ecore_int_sb_release - releases the sb_info structure.
*
* once the structure is released, it's memory can be freed
*
* @param p_hwfn
* @param sb_info points to an allocated sb_info structure
* @param sb_id the sb_id to be used (zero based in driver)
* should never be equal to ECORE_SP_SB_ID
* (SP Status block)
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
struct ecore_sb_info *sb_info,
u16 sb_id);
/**
* @brief ecore_int_sp_dpc - To be called when an interrupt is received on the
* default status block.
*
* @param p_hwfn - pointer to hwfn
*
*/
void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie);
/**
* @brief ecore_int_get_num_sbs - get the number of status
* blocks configured for this funciton in the igu.
*
* @param p_hwfn
* @param p_sb_cnt_info
*
* @return
*/
void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
struct ecore_sb_cnt_info *p_sb_cnt_info);
/**
* @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR
* release. The API need to be called after releasing all slowpath IRQs
* of the device.
*
* @param p_dev
*
*/
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
/**
* @brief ecore_int_attn_clr_enable - sets whether the general behavior is
* preventing attentions from being reasserted, or following the
* attributes of the specific attention.
*
* @param p_dev
* @param clr_enable
*
*/
void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable);
/**
* @brief Read debug information regarding a given SB.
*
* @param p_hwfn
* @param p_ptt
* @param p_sb - point to Status block for which we want to get info.
* @param p_info - pointer to struct to fill with information regarding SB.
*
* @return ECORE_SUCCESS if pointer is filled; failure otherwise.
*/
enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_sb_info *p_sb,
struct ecore_sb_info_dbg *p_info);
/**
* @brief - Move a free Status block between PF and child VF
*
* @param p_hwfn
* @param p_ptt
* @param sb_id - The PF fastpath vector to be moved [re-assigned if claiming
* from VF, given-up if moving to VF]
* @param b_to_vf - PF->VF == true, VF->PF == false
*
* @return ECORE_SUCCESS if SB successfully moved.
*/
enum _ecore_status_t
ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u16 sb_id, bool b_to_vf);
#endif

View File

@ -0,0 +1,801 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_SRIOV_API_H__
#define __ECORE_SRIOV_API_H__
#include "common_hsi.h"
#include "ecore_status.h"
#define ECORE_ETH_VF_NUM_MAC_FILTERS 1
#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2
#define ECORE_VF_ARRAY_LENGTH (3)
#define IS_VF(p_dev) ((p_dev)->b_is_vf)
#define IS_PF(p_dev) (!((p_dev)->b_is_vf))
#ifdef CONFIG_ECORE_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->p_iov_info))
#else
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */
/* @@@ TBD MichalK - what should this number be*/
#define ECORE_MAX_VF_CHAINS_PER_PF 16
/* vport update extended feature tlvs flags */
enum ecore_iov_vport_update_flag {
ECORE_IOV_VP_UPDATE_ACTIVATE = 0,
ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1,
ECORE_IOV_VP_UPDATE_TX_SWITCH = 2,
ECORE_IOV_VP_UPDATE_MCAST = 3,
ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4,
ECORE_IOV_VP_UPDATE_RSS = 5,
ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6,
ECORE_IOV_VP_UPDATE_SGE_TPA = 7,
ECORE_IOV_VP_UPDATE_MAX = 8,
};
/*PF to VF STATUS is part of vfpf-channel API
and must be forward compatible */
enum ecore_iov_pf_to_vf_status {
PFVF_STATUS_WAITING = 0,
PFVF_STATUS_SUCCESS,
PFVF_STATUS_FAILURE,
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
PFVF_STATUS_MALICIOUS,
};
struct ecore_mcp_link_params;
struct ecore_mcp_link_state;
struct ecore_mcp_link_capabilities;
/* These defines are used by the hw-channel; should never change order */
#define VFPF_ACQUIRE_OS_LINUX (0)
#define VFPF_ACQUIRE_OS_WINDOWS (1)
#define VFPF_ACQUIRE_OS_ESX (2)
#define VFPF_ACQUIRE_OS_SOLARIS (3)
#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
struct ecore_vf_acquire_sw_info {
u32 driver_version;
u8 os_type;
};
struct ecore_public_vf_info {
/* These copies will later be reflected in the bulletin board,
* but this copy should be newer.
*/
u8 forced_mac[ETH_ALEN];
u16 forced_vlan;
};
struct ecore_iov_vf_init_params {
u16 rel_vf_id;
/* Number of requested Queues; Currently, don't support different
* number of Rx/Tx queues.
*/
/* TODO - remove this limitation */
u16 num_queues;
/* Allow the client to choose which qzones to use for Rx/Tx,
* and which queue_base to use for Tx queues on a per-queue basis.
* Notice values should be relative to the PF resources.
*/
u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
u8 vport_id;
/* Should be set in case RSS is going to be used for VF */
u8 rss_eng_id;
};
#ifdef CONFIG_ECORE_SW_CHANNEL
/* This is SW channel related only... */
enum mbx_state {
VF_PF_UNKNOWN_STATE = 0,
VF_PF_WAIT_FOR_START_REQUEST = 1,
VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2,
VF_PF_REQUEST_IN_PROCESSING = 3,
VF_PF_RESPONSE_READY = 4,
};
struct ecore_iov_sw_mbx {
enum mbx_state mbx_state;
u32 request_size;
u32 request_offset;
u32 response_size;
u32 response_offset;
};
/**
* @brief Get the vf sw mailbox params
*
* @param p_hwfn
* @param rel_vf_id
*
* @return struct ecore_iov_sw_mbx*
*/
struct ecore_iov_sw_mbx*
ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
#endif
/* This struct is part of ecore_dev and contains data relevant to all hwfns;
* Initialized only if SR-IOV cpabability is exposed in PCIe config space.
*/
struct ecore_hw_sriov_info {
/* standard SRIOV capability fields, mostly for debugging */
int pos; /* capability position */
int nres; /* number of resources */
u32 cap; /* SR-IOV Capabilities */
u16 ctrl; /* SR-IOV Control */
u16 total_vfs; /* total VFs associated with the PF */
u16 num_vfs; /* number of vfs that have been started */
u16 initial_vfs; /* initial VFs associated with the PF */
u16 nr_virtfn; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
u16 vf_device_id; /* VF device id */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
u32 first_vf_in_pf;
};
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief mark/clear all VFs before/after an incoming PCIe sriov
* disable.
*
* @param p_dev
* @param to_disable
*/
void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
u8 to_disable);
/**
* @brief mark/clear chosen VF before/after an incoming PCIe
* sriov disable.
*
* @param p_dev
* @param rel_vf_id
* @param to_disable
*/
void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
u16 rel_vf_id,
u8 to_disable);
/**
* @brief ecore_iov_init_hw_for_vf - initialize the HW for
* enabling access of a VF. Also includes preparing the
* IGU for VF access. This needs to be called AFTER hw is
* initialized and BEFORE VF is loaded inside the VM.
*
* @param p_hwfn
* @param p_ptt
* @param p_params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_iov_vf_init_params *p_params);
/**
* @brief ecore_iov_process_mbx_req - process a request received
* from the VF
*
* @param p_hwfn
* @param p_ptt
* @param vfid
*/
void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid);
/**
* @brief ecore_iov_release_hw_for_vf - called once upper layer
* knows VF is done with - can release any resources
* allocated for VF at this point. this must be done once
* we know VF is no longer loaded in VM.
*
* @param p_hwfn
* @param p_ptt
* @param rel_vf_id
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id);
/**
* @brief ecore_iov_set_vf_ctx - set a context for a given VF
*
* @param p_hwfn
* @param vf_id
* @param ctx
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
u16 vf_id,
void *ctx);
/**
* @brief FLR cleanup for all VFs
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief FLR cleanup for single VF
*
* @param p_hwfn
* @param p_ptt
* @param rel_vf_id
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id);
/**
* @brief Update the bulletin with link information. Notice this does NOT
* send a bulletin update, only updates the PF's bulletin.
*
* @param p_hwfn
* @param p_vf
* @param params - the link params to use for the VF link configuration
* @param link - the link output to use for the VF link configuration
* @param p_caps - the link default capabilities.
*/
void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
u16 vfid,
struct ecore_mcp_link_params *params,
struct ecore_mcp_link_state *link,
struct ecore_mcp_link_capabilities *p_caps);
/**
* @brief Returns link information as perceived by VF.
*
* @param p_hwfn
* @param p_vf
* @param p_params - the link params visible to vf.
* @param p_link - the link state visible to vf.
* @param p_caps - the link default capabilities visible to vf.
*/
void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
u16 vfid,
struct ecore_mcp_link_params *params,
struct ecore_mcp_link_state *link,
struct ecore_mcp_link_capabilities *p_caps);
/**
* @brief return if the VF is pending FLR
*
* @param p_hwfn
* @param rel_vf_id
*
* @return bool
*/
bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief Check if given VF ID @vfid is valid
* w.r.t. @b_enabled_only value
* if b_enabled_only = true - only enabled VF id is valid
* else any VF id less than max_vfs is valid
*
* @param p_hwfn
* @param rel_vf_id - Relative VF ID
* @param b_enabled_only - consider only enabled VF
* @param b_non_malicious - true iff we want to validate vf isn't malicious.
*
* @return bool - true for valid VF ID
*/
bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
int rel_vf_id,
bool b_enabled_only, bool b_non_malicious);
/**
* @brief Get VF's public info structure
*
* @param p_hwfn
* @param vfid - Relative VF ID
* @param b_enabled_only - false if want to access even if vf is disabled
*
* @return struct ecore_public_vf_info *
*/
struct ecore_public_vf_info*
ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
u16 vfid, bool b_enabled_only);
/**
* @brief fills a bitmask of all VFs which have pending unhandled
* messages.
*
* @param p_hwfn
*/
void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
u64 *events);
/**
* @brief Copy VF's message to PF's buffer
*
* @param p_hwfn
* @param ptt
* @param vfid
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *ptt,
int vfid);
/**
* @brief Set forced MAC address in PFs copy of bulletin board
* and configures FW/HW to support the configuration.
*
* @param p_hwfn
* @param mac
* @param vfid
*/
void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
u8 *mac, int vfid);
/**
* @brief Set MAC address in PFs copy of bulletin board without
* configuring FW/HW.
*
* @param p_hwfn
* @param mac
* @param vfid
*/
enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
u8 *mac, int vfid);
/**
* @brief Set default behaviour of VF in case no vlans are configured for it
* whether to accept only untagged traffic or all.
* Must be called prior to the VF vport-start.
*
* @param p_hwfn
* @param b_untagged_only
* @param vfid
*
* @return ECORE_SUCCESS if configuration would stick.
*/
enum _ecore_status_t
ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
bool b_untagged_only,
int vfid);
/**
* @brief Get VFs opaque fid.
*
* @param p_hwfn
* @param vfid
* @param opaque_fid
*/
void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
u16 *opaque_fid);
/**
* @brief Set forced VLAN [pvid] in PFs copy of bulletin board
* and configures FW/HW to support the configuration.
* Setting of pvid 0 would clear the feature.
* @param p_hwfn
* @param pvid
* @param vfid
*/
void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 pvid, int vfid);
/**
* @brief Check if VF has VPORT instance. This can be used
* to check if VPORT is active.
*
* @param p_hwfn
*/
bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief PF posts the bulletin to the VF
*
* @param p_hwfn
* @param p_vf
* @param p_ptt
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
int vfid,
struct ecore_ptt *p_ptt);
/**
* @brief Check if given VF (@vfid) is marked as stopped
*
* @param p_hwfn
* @param vfid
*
* @return bool : true if stopped
*/
bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief Configure VF anti spoofing
*
* @param p_hwfn
* @param vfid
* @param val - spoofchk value - true/false
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
int vfid, bool val);
/**
* @brief Get VF's configured spoof value.
*
* @param p_hwfn
* @param vfid
*
* @return bool - spoofchk value - true/false
*/
bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief Check for SRIOV sanity by PF.
*
* @param p_hwfn
* @param vfid
*
* @return bool - true if sanity checks passes, else false
*/
bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief Get the num of VF chains.
*
* @param p_hwfn
*
* @return u8
*/
u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn);
/**
* @brief Get vf request mailbox params
*
* @param p_hwfn
* @param rel_vf_id
* @param pp_req_virt_addr
* @param p_req_virt_size
*/
void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id,
void **pp_req_virt_addr,
u16 *p_req_virt_size);
/**
* @brief Get vf mailbox params
*
* @param p_hwfn
* @param rel_vf_id
* @param pp_reply_virt_addr
* @param p_reply_virt_size
*/
void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id,
void **pp_reply_virt_addr,
u16 *p_reply_virt_size);
/**
* @brief Validate if the given length is a valid vfpf message
* length
*
* @param length
*
* @return bool
*/
bool ecore_iov_is_valid_vfpf_msg_length(u32 length);
/**
* @brief Return the max pfvf message length
*
* @return u32
*/
u32 ecore_iov_pfvf_msg_length(void);
/**
* @brief Returns forced MAC address if one is configured
*
* @parm p_hwfn
* @parm rel_vf_id
*
* @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC.
*/
u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief Returns pvid if one is configured
*
* @parm p_hwfn
* @parm rel_vf_id
*
* @return 0 if no pvid is configured, otherwise the pvid.
*/
u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief Configure VFs tx rate
*
* @param p_hwfn
* @param p_ptt
* @param vfid
* @param val - tx rate value in Mb/sec.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid, int val);
/**
* @brief - Retrieves the statistics associated with a VF
*
* @param p_hwfn
* @param p_ptt
* @param vfid
* @param p_stats - this will be filled with the VF statistics
*
* @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise.
*/
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid,
struct ecore_eth_stats *p_stats);
/**
* @brief - Retrieves num of rxqs chains
*
* @param p_hwfn
* @param rel_vf_id
*
* @return num of rxqs chains.
*/
u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief - Retrieves num of active rxqs chains
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief - Retrieves ctx pointer
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief - Retrieves VF`s num sbs
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief - Returm true if VF is waiting for acquire
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief - Returm true if VF is acquired but not initialized
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief - Returm true if VF is acquired and initialized
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief - Returm true if VF has started in FW
*
* @param p_hwfn
* @param rel_vf_id
*
* @return
*/
bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
* @brief - Get VF's vport min rate configured.
* @param p_hwfn
* @param rel_vf_id
*
* @return - rate in Mbps
*/
int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
/**
* @brief - Configure min rate for VF's vport.
* @param p_dev
* @param vfid
* @param - rate in Mbps
*
* @return
*/
enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
int vfid, u32 rate);
/**
* @brief ecore_pf_configure_vf_queue_coalesce - PF configure coalesce parameters
* of VFs for Rx and Tx queue.
* While the API allows setting coalescing per-qid, all queues sharing a SB
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
* @param p_hwfn
* @param rx_coal - Rx Coalesce value in micro seconds.
* @param tx_coal - TX Coalesce value in micro seconds.
* @param vf_id
* @param qid
*
* @return int
**/
enum _ecore_status_t
ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
u16 rx_coal, u16 tx_coal,
u16 vf_id, u16 qid);
/**
* @brief - Given a VF index, return index of next [including that] active VF.
*
* @param p_hwfn
* @param rel_vf_id
*
* @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
u16 vxlan_port, u16 geneve_port);
#else
static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev, u8 to_disable) {}
static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev, u16 rel_vf_id, u8 to_disable) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_iov_vf_init_params *p_params) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id) {return ECORE_SUCCESS;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn, u16 vf_id, void *ctx) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, u16 vfid, struct ecore_mcp_link_params *params, struct ecore_mcp_link_state *link, struct ecore_mcp_link_capabilities *p_caps) {}
static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, u16 vfid, struct ecore_mcp_link_params *params, struct ecore_mcp_link_state *link, struct ecore_mcp_link_capabilities *p_caps) {}
static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only) {return false;}
static OSAL_INLINE struct ecore_public_vf_info* ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, u16 vfid, bool b_enabled_only) {return OSAL_NULL;}
static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid) {}
static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn, u64 *events) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn, struct ecore_ptt *ptt, int vfid) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, u8 *mac, int vfid) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, u8 *mac, int vfid) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, bool b_untagged_only, int vfid) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, u16 *opaque_fid) {}
static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn p_hwfn, u16 pvid, int vfid) {}
static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn, int vfid, struct ecore_ptt *p_ptt) {return ECORE_INVAL;}
static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, int vfid, bool val) {return ECORE_INVAL;}
static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid) {return false;}
static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn) {return 0;}
static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn, u16 rel_vf_id, void **pp_req_virt_addr, u16 *p_req_virt_size) {}
static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn, u16 rel_vf_id, void **pp_reply_virt_addr, u16 *p_reply_virt_size) {}
static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 length) {return false;}
static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void) {return 0;}
static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return OSAL_NULL;}
static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid, int val) { return ECORE_INVAL; }
static OSAL_INLINE enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, int vfid, struct ecore_eth_stats *p_stats) { return ECORE_INVAL; }
static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return OSAL_NULL;}
static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return 0;}
static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) {return false;}
static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid) { return 0; }
static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev, int vfid, u32 rate) { return ECORE_INVAL; }
#endif
static OSAL_INLINE void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid, u16 vxlan_port, u16 geneve_port) { return; }
static OSAL_INLINE u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) { return E4_MAX_NUM_VFS; }
#define ecore_for_each_vf(_p_hwfn, _i) \
for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
_i < E4_MAX_NUM_VFS; \
_i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif

View File

@ -0,0 +1,183 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __IRO_H__
#define __IRO_H__
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
/* Tstorm port statistics */
#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
/* Tstorm ll2 port statistics */
#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) (IRO[2].base + ((port_id) * IRO[2].m1))
#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
/* Ustorm VF-PF Channel ready flag */
#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[3].base + ((vf_id) * IRO[3].m1))
#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
/* Ustorm Final flr cleanup ack */
#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
/* Ustorm Event ring consumer */
#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
#define USTORM_EQE_CONS_SIZE (IRO[5].size)
/* Ustorm eth queue zone */
#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
/* Ustorm Common Queue ring consumer */
#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
/* Xstorm Integration Test Data */
#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
/* Ystorm Integration Test Data */
#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
/* Pstorm Integration Test Data */
#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
/* Tstorm Integration Test Data */
#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
/* Mstorm Integration Test Data */
#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
/* Ustorm Integration Test Data */
#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
#define USTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
/* Tstorm producers */
#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
#define TSTORM_LL2_RX_PRODS_SIZE (IRO[14].size)
/* Tstorm LightL2 queue statistics */
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
/* Ustorm LiteL2 queue statistics */
#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) (IRO[16].base + ((core_rx_queue_id) * IRO[16].m1))
#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
/* Pstorm LiteL2 queue statistics */
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) (IRO[17].base + ((core_tx_stats_id) * IRO[17].m1))
#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[17].size)
/* Mstorm queue statistics */
#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
/* Mstorm ETH PF queues producers */
#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) (IRO[19].base + ((queue_id) * IRO[19].m1))
#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size mode. */
#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) (IRO[20].base + ((vf_id) * IRO[20].m1) + ((vf_queue_id) * IRO[20].m2))
#define MSTORM_ETH_VF_PRODS_SIZE (IRO[20].size)
/* TPA agregation timeout in us resolution (on ASIC) */
#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[21].base)
#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[21].size)
/* Mstorm pf statistics */
#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[22].base + ((pf_id) * IRO[22].m1))
#define MSTORM_ETH_PF_STAT_SIZE (IRO[22].size)
/* Ustorm queue statistics */
#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[23].base + ((stat_counter_id) * IRO[23].m1))
#define USTORM_QUEUE_STAT_SIZE (IRO[23].size)
/* Ustorm pf statistics */
#define USTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
#define USTORM_ETH_PF_STAT_SIZE (IRO[24].size)
/* Pstorm queue statistics */
#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
#define PSTORM_QUEUE_STAT_SIZE (IRO[25].size)
/* Pstorm pf statistics */
#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) (IRO[26].base + ((pf_id) * IRO[26].m1))
#define PSTORM_ETH_PF_STAT_SIZE (IRO[26].size)
/* Control frame's EthType configuration for TX control frame security */
#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) (IRO[27].base + ((ethType_id) * IRO[27].m1))
#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[27].size)
/* Tstorm last parser message */
#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[28].base)
#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[28].size)
/* Tstorm Eth limit Rx rate */
#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[29].base + ((pf_id) * IRO[29].m1))
#define ETH_RX_RATE_LIMIT_SIZE (IRO[29].size)
/* Xstorm queue zone */
#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[30].base + ((queue_id) * IRO[30].m1))
#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[30].size)
/* Ystorm cqe producer */
#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[31].base + ((rss_id) * IRO[31].m1))
#define YSTORM_TOE_CQ_PROD_SIZE (IRO[31].size)
/* Ustorm cqe producer */
#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[32].base + ((rss_id) * IRO[32].m1))
#define USTORM_TOE_CQ_PROD_SIZE (IRO[32].size)
/* Ustorm grq producer */
#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[33].base + ((pf_id) * IRO[33].m1))
#define USTORM_TOE_GRQ_PROD_SIZE (IRO[33].size)
/* Tstorm cmdq-cons of given command queue-id */
#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[34].base + ((cmdq_queue_id) * IRO[34].m1))
#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[34].size)
/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, BDqueue-id */
#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) (IRO[35].base + ((func_id) * IRO[35].m1) + ((bdq_id) * IRO[35].m2))
#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[35].size)
/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) (IRO[36].base + ((func_id) * IRO[36].m1) + ((bdq_id) * IRO[36].m2))
#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[36].size)
/* Tstorm iSCSI RX stats */
#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[37].base + ((pf_id) * IRO[37].m1))
#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[37].size)
/* Mstorm iSCSI RX stats */
#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[38].base + ((pf_id) * IRO[38].m1))
#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[38].size)
/* Ustorm iSCSI RX stats */
#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) (IRO[39].base + ((pf_id) * IRO[39].m1))
#define USTORM_ISCSI_RX_STATS_SIZE (IRO[39].size)
/* Xstorm iSCSI TX stats */
#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[40].base + ((pf_id) * IRO[40].m1))
#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[40].size)
/* Ystorm iSCSI TX stats */
#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[41].base + ((pf_id) * IRO[41].m1))
#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[41].size)
/* Pstorm iSCSI TX stats */
#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) (IRO[42].base + ((pf_id) * IRO[42].m1))
#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[42].size)
/* Tstorm FCoE RX stats */
#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) (IRO[43].base + ((pf_id) * IRO[43].m1))
#define TSTORM_FCOE_RX_STATS_SIZE (IRO[43].size)
/* Pstorm FCoE TX stats */
#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) (IRO[44].base + ((pf_id) * IRO[44].m1))
#define PSTORM_FCOE_TX_STATS_SIZE (IRO[44].size)
/* Pstorm RDMA queue statistics */
#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[45].base + ((rdma_stat_counter_id) * IRO[45].m1))
#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[45].size)
/* Tstorm RDMA queue statistics */
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + ((rdma_stat_counter_id) * IRO[46].m1))
#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
/* Xstorm iWARP rxmit stats */
#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[47].base + ((pf_id) * IRO[47].m1))
#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
/* Tstorm RoCE Event Statistics */
#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + ((roce_pf_id) * IRO[48].m1))
#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
#endif /* __IRO_H__ */

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
ARRAY_DECL struct iro iro_arr[49] = {
{ 0x0, 0x0, 0x0, 0x0, 0x8}, /* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x4cb0, 0x80, 0x0, 0x0, 0x80}, /* TSTORM_PORT_STAT_OFFSET(port_id) */
{ 0x6518, 0x20, 0x0, 0x0, 0x20}, /* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
{ 0xb00, 0x8, 0x0, 0x0, 0x4}, /* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
{ 0xa80, 0x8, 0x0, 0x0, 0x4}, /* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
{ 0x0, 0x8, 0x0, 0x0, 0x2}, /* USTORM_EQE_CONS_OFFSET(pf_id) */
{ 0x80, 0x8, 0x0, 0x0, 0x4}, /* USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) */
{ 0x84, 0x8, 0x0, 0x0, 0x2}, /* USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) */
{ 0x4c40, 0x0, 0x0, 0x0, 0x78}, /* XSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x3df0, 0x0, 0x0, 0x0, 0x78}, /* YSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x29b0, 0x0, 0x0, 0x0, 0x78}, /* PSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4c38, 0x0, 0x0, 0x0, 0x78}, /* TSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x4990, 0x0, 0x0, 0x0, 0x78}, /* MSTORM_INTEG_TEST_DATA_OFFSET */
{ 0x7e48, 0x0, 0x0, 0x0, 0x78}, /* USTORM_INTEG_TEST_DATA_OFFSET */
{ 0xa28, 0x8, 0x0, 0x0, 0x8}, /* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0x61f8, 0x10, 0x0, 0x0, 0x10}, /* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0xb820, 0x30, 0x0, 0x0, 0x30}, /* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0x95b8, 0x30, 0x0, 0x0, 0x30}, /* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
{ 0x4b60, 0x80, 0x0, 0x0, 0x40}, /* MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* MSTORM_ETH_PF_PRODS_OFFSET(queue_id) */
{ 0x53a0, 0x80, 0x4, 0x0, 0x4}, /* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
{ 0xc7c8, 0x0, 0x0, 0x0, 0x4}, /* MSTORM_TPA_TIMEOUT_US_OFFSET */
{ 0x4ba0, 0x80, 0x0, 0x0, 0x20}, /* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x8050, 0x40, 0x0, 0x0, 0x30}, /* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0xe770, 0x60, 0x0, 0x0, 0x60}, /* USTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x2b48, 0x80, 0x0, 0x0, 0x38}, /* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0xf1b0, 0x78, 0x0, 0x0, 0x78}, /* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4}, /* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0xaef8, 0x0, 0x0, 0x0, 0xf0}, /* TSTORM_ETH_PRS_INPUT_OFFSET */
{ 0xafe8, 0x8, 0x0, 0x0, 0x8}, /* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
{ 0x1f8, 0x8, 0x0, 0x0, 0x8}, /* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
{ 0xac0, 0x8, 0x0, 0x0, 0x8}, /* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
{ 0x2578, 0x8, 0x0, 0x0, 0x8}, /* USTORM_TOE_CQ_PROD_OFFSET(rss_id) */
{ 0x24f8, 0x8, 0x0, 0x0, 0x8}, /* USTORM_TOE_GRQ_PROD_OFFSET(pf_id) */
{ 0x0, 0x8, 0x0, 0x0, 0x8}, /* TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) */
{ 0x200, 0x10, 0x8, 0x0, 0x8}, /* TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xb78, 0x10, 0x8, 0x0, 0x2}, /* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xd9a8, 0x38, 0x0, 0x0, 0x24}, /* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x12988, 0x10, 0x0, 0x0, 0x8}, /* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x11aa0, 0x38, 0x0, 0x0, 0x18}, /* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0xa580, 0x38, 0x0, 0x0, 0x10}, /* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0x86f8, 0x30, 0x0, 0x0, 0x18}, /* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0x101f8, 0x10, 0x0, 0x0, 0x10}, /* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0xde28, 0x48, 0x0, 0x0, 0x38}, /* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
{ 0x10660, 0x20, 0x0, 0x0, 0x20}, /* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
{ 0x2b80, 0x80, 0x0, 0x0, 0x10}, /* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x5020, 0x10, 0x0, 0x0, 0x10}, /* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0xc9b0, 0x30, 0x0, 0x0, 0x10}, /* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
{ 0xeec0, 0x10, 0x0, 0x0, 0x10}, /* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
};
#endif /* __IRO_VALUES_H__ */

View File

@ -0,0 +1,153 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_ISCSI_H__
#define __ECORE_ISCSI_H__
#include "ecore.h"
#include "ecore_chain.h"
#include "ecore_hsi_common.h"
#include "tcp_common.h"
#include "ecore_hsi_iscsi.h"
#include "ecore_sp_commands.h"
#include "ecore_iscsi_api.h"
struct ecore_iscsi_info {
osal_spinlock_t lock;
osal_list_t free_list;
u16 max_num_outstanding_tasks;
void *event_context;
iscsi_event_cb_t event_cb;
};
enum _ecore_status_t ecore_iscsi_alloc(struct ecore_hwfn *p_hwfn);
void ecore_iscsi_setup(struct ecore_hwfn *p_hwfn);
void ecore_iscsi_free(struct ecore_hwfn *p_hwfn);
void ecore_iscsi_free_connection(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
/**
* @brief ecore_sp_iscsi_conn_offload - iSCSI connection offload
*
* This ramrod offloads iSCSI connection to FW
*
* @param p_path
* @param p_conn
* @param comp_mode
* @param comp_addr
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_iscsi_conn_offload(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
/**
* @brief ecore_sp_iscsi_conn_update - iSCSI connection update
*
* This ramrod updatess iSCSI ofloadedconnection in FW
*
* @param p_path
* @param p_conn
* @param comp_mode
* @param comp_addr
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_iscsi_conn_update(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
/**
* @brief ecore_sp_iscsi_mac_update - iSCSI connection's MAC update
*
* This ramrod updates remote MAC for iSCSI offloaded connection in FW
*
* @param p_path
* @param p_conn
* @param comp_mode
* @param comp_addr
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_iscsi_mac_update(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
/**
* @brief ecore_sp_iscsi_conn_terminate - iSCSI connection
* terminate
*
* This ramrod deletes iSCSI offloaded connection in FW
*
* @param p_path
* @param p_conn
* @param comp_mode
* @param comp_addr
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_iscsi_conn_terminate(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
/**
* @brief ecore_sp_iscsi_conn_clear_sq - iSCSI connection
* clear SQ
*
* This ramrod clears connection's SQ in FW
*
* @param p_path
* @param p_conn
* @param comp_mode
* @param comp_addr
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_iscsi_conn_clear_sq(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
#endif /*__ECORE_ISCSI_H__*/

View File

@ -0,0 +1,283 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_ISCSI_API_H__
#define __ECORE_ISCSI_API_H__
typedef enum _ecore_status_t (*iscsi_event_cb_t)(void *context,
u8 fw_event_code,
void *fw_handle);
struct ecore_iscsi_conn {
osal_list_entry_t list_entry;
bool free_on_delete;
u16 conn_id;
u32 icid;
u32 fw_cid;
u8 layer_code;
u8 offl_flags;
u8 connect_mode;
u32 initial_ack;
dma_addr_t sq_pbl_addr;
struct ecore_chain r2tq;
struct ecore_chain xhq;
struct ecore_chain uhq;
struct tcp_upload_params *tcp_upload_params_virt_addr;
dma_addr_t tcp_upload_params_phys_addr;
struct scsi_terminate_extra_params *queue_cnts_virt_addr;
dma_addr_t queue_cnts_phys_addr;
dma_addr_t syn_phy_addr;
u16 syn_ip_payload_length;
u8 local_mac[6];
u8 remote_mac[6];
u16 vlan_id;
u8 tcp_flags;
u8 ip_version;
u32 remote_ip[4];
u32 local_ip[4];
u8 ka_max_probe_cnt;
u8 dup_ack_theshold;
u32 rcv_next;
u32 snd_una;
u32 snd_next;
u32 snd_max;
u32 snd_wnd;
u32 rcv_wnd;
u32 snd_wl1;
u32 cwnd;
u32 ss_thresh;
u16 srtt;
u16 rtt_var;
u32 ts_recent;
u32 ts_recent_age;
u32 total_rt;
u32 ka_timeout_delta;
u32 rt_timeout_delta;
u8 dup_ack_cnt;
u8 snd_wnd_probe_cnt;
u8 ka_probe_cnt;
u8 rt_cnt;
u32 flow_label;
u32 ka_timeout;
u32 ka_interval;
u32 max_rt_time;
u32 initial_rcv_wnd;
u8 ttl;
u8 tos_or_tc;
u16 remote_port;
u16 local_port;
u16 mss;
u8 snd_wnd_scale;
u8 rcv_wnd_scale;
u16 da_timeout_value;
u8 ack_frequency;
u8 update_flag;
#define ECORE_ISCSI_CONN_HD_EN 0x01
#define ECORE_ISCSI_CONN_DD_EN 0x02
#define ECORE_ISCSI_CONN_INITIAL_R2T 0x04
#define ECORE_ISCSI_CONN_IMMEDIATE_DATA 0x08
u8 default_cq;
u32 max_seq_size;
u32 max_recv_pdu_length;
u32 max_send_pdu_length;
u32 first_seq_length;
u32 exp_stat_sn;
u32 stat_sn;
u16 physical_q0;
u16 physical_q1;
u8 abortive_dsconnect;
};
struct ecore_iscsi_stats
{
u64 iscsi_rx_bytes_cnt;
u64 iscsi_rx_packet_cnt;
u64 iscsi_rx_new_ooo_isle_events_cnt;
u32 iscsi_cmdq_threshold_cnt;
u32 iscsi_rq_threshold_cnt;
u32 iscsi_immq_threshold_cnt;
u64 iscsi_rx_dropped_pdus_task_not_valid;
u64 iscsi_rx_data_pdu_cnt;
u64 iscsi_rx_r2t_pdu_cnt;
u64 iscsi_rx_total_pdu_cnt;
u64 iscsi_tx_go_to_slow_start_event_cnt;
u64 iscsi_tx_fast_retransmit_event_cnt;
u64 iscsi_tx_data_pdu_cnt;
u64 iscsi_tx_r2t_pdu_cnt;
u64 iscsi_tx_total_pdu_cnt;
u64 iscsi_tx_bytes_cnt;
u64 iscsi_tx_packet_cnt;
};
/**
* @brief ecore_iscsi_acquire_connection - allocate resources,
* provides connecion handle (CID)as out parameter.
*
* @param p_path
* @param p_conn partially initialized incoming container of
* iSCSI connection data
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iscsi_acquire_connection(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_in_conn,
struct ecore_iscsi_conn **p_out_conn);
void OSAL_IOMEM *ecore_iscsi_get_db_addr(struct ecore_hwfn *p_hwfn,
u32 cid);
void OSAL_IOMEM *ecore_iscsi_get_global_cmdq_cons(struct ecore_hwfn *p_hwfn,
u8 relative_q_id);
void OSAL_IOMEM *ecore_iscsi_get_primary_bdq_prod(struct ecore_hwfn *p_hwfn,
u8 bdq_id);
void OSAL_IOMEM *ecore_iscsi_get_secondary_bdq_prod(struct ecore_hwfn *p_hwfn,
u8 bdq_id);
/**
* @brief ecore_iscsi_offload_connection - offload previously
* allocated iSCSI connection
*
* @param p_path
* @param p_conn container of iSCSI connection data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iscsi_offload_connection(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
/**
* @brief ecore_iscsi_release_connection - deletes connecton
* resources (incliding container of iSCSI connection
* data)
*
* @param p_path
* @param p_conn container of iSCSI connection data
*/
void ecore_iscsi_release_connection(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
/**
* @brief ecore_iscsi_terminate_connection - destroys previously
* offloaded iSCSI connection
*
* @param p_path
* @param p_conn container of iSCSI connection data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iscsi_terminate_connection(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
/**
* @brief ecore_iscsi_update_connection - updates previously
* offloaded iSCSI connection
*
*
* @param p_path
* @param p_conn container of iSCSI connection data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iscsi_update_connection(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
/**
* @brief ecore_iscsi_mac_update_connection - updates remote MAC for previously
* offloaded iSCSI connection
*
*
* @param p_path
* @param p_conn container of iSCSI connection data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iscsi_update_remote_mac(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
/**
* @brief ecore_iscsi_clear_connection_sq - clear SQ
* offloaded iSCSI connection
*
*
* @param p_path
* @param p_conn container of iSCSI connection data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_iscsi_clear_connection_sq(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_conn *p_conn);
/**
* @brief ecore_sp_iscsi_func_start
*
* This ramrod inits iSCSI functionality in FW
*
* @param p_path
* @param comp_mode
* @param comp_addr
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_iscsi_func_start(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr,
void *async_event_context,
iscsi_event_cb_t async_event_cb);
enum _ecore_status_t
ecore_sp_iscsi_func_stop(struct ecore_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_addr);
enum _ecore_status_t
ecore_iscsi_get_stats(struct ecore_hwfn *p_hwfn,
struct ecore_iscsi_stats *stats);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,194 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_L2_H__
#define __ECORE_L2_H__
#include "ecore.h"
#include "ecore_hw.h"
#include "ecore_spq.h"
#include "ecore_l2_api.h"
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define ECORE_QUEUE_CID_PF (0xff)
/* Almost identical to the ecore_queue_start_common_params,
* but here we maintain the SB index in IGU CAM.
*/
struct ecore_queue_cid_params {
u8 vport_id;
u16 queue_id;
u8 stats_id;
};
/* Additional parameters required for initialization of the queue_cid
* and are relevant only for a PF initializing one for its VFs.
*/
struct ecore_queue_cid_vf_params {
/* Should match the VF's relative index */
u8 vfid;
/* 0-based queue index. Should reflect the relative qzone the
* VF thinks is associated with it [in its range].
*/
u8 vf_qid;
/* Indicates a VF is legacy, making it differ in several things:
* - Producers would be placed in a different place.
* - Makes assumptions regarding the CIDs.
*/
u8 vf_legacy;
/* For VFs, this index arrives via TLV to diffrentiate between
* different queues opened on the same qzone, and is passed
* [where the PF would have allocated it internally for its own].
*/
u8 qid_usage_idx;
};
struct ecore_queue_cid {
/* For stats-id, the `rel' is actually absolute as well */
struct ecore_queue_cid_params rel;
struct ecore_queue_cid_params abs;
/* These have no 'relative' meaning */
u16 sb_igu_id;
u8 sb_idx;
u32 cid;
u16 opaque_fid;
/* VFs queues are mapped differently, so we need to know the
* relative queue associated with them [0-based].
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
* and not on the VF itself.
*/
u8 vfid;
u8 vf_qid;
/* We need an additional index to diffrentiate between queues opened
* for same queue-zone, as VFs would have to communicate the info
* to the PF [otherwise PF has no way to diffrentiate].
*/
u8 qid_usage_idx;
/* Legacy VFs might have Rx producer located elsewhere */
u8 vf_legacy;
#define ECORE_QCID_LEGACY_VF_RX_PROD (1 << 0)
#define ECORE_QCID_LEGACY_VF_CID (1 << 1)
struct ecore_hwfn *p_owner;
};
enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn);
void ecore_l2_setup(struct ecore_hwfn *p_hwfn);
void ecore_l2_free(struct ecore_hwfn *p_hwfn);
void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid);
struct ecore_queue_cid *
ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
struct ecore_queue_cid_vf_params *p_vf_params);
enum _ecore_status_t
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params);
/**
* @brief - Starts an Rx queue, when queue_cid is already prepared
*
* @param p_hwfn
* @param p_cid
* @param bd_max_bytes
* @param bd_chain_phys_addr
* @param cqe_pbl_addr
* @param cqe_pbl_size
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size);
/**
* @brief - Starts a Tx queue, where queue_cid is already prepared
*
* @param p_hwfn
* @param p_cid
* @param pbl_addr
* @param pbl_size
* @param p_pq_params - parameters for choosing the PQ for this Tx queue
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid,
dma_addr_t pbl_addr, u16 pbl_size,
u16 pq_id);
u8 ecore_mcast_bin_from_mac(u8 *mac);
/**
* @brief - ecore_configure_rfs_ntuple_filter
*
* This ramrod should be used to add or remove arfs hw filter
*
* @params p_hwfn
* @params p_ptt
* @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
it with cookie and callback function address, if not
using this mode then client must pass NULL.
* @params p_addr p_addr is an actual packet header that needs to be
* filter. It has to mapped with IO to read prior to
* calling this, [contains 4 tuples- src ip, dest ip,
* src port, dest port].
* @params length length of p_addr header up to past the transport header.
* @params qid receive packet will be directed to this queue.
* @params vport_id
* @params b_is_add flag to add or remove filter.
*
*/
enum _ecore_status_t
ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length,
u16 qid, u8 vport_id,
bool b_is_add);
#endif

View File

@ -0,0 +1,463 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_L2_API_H__
#define __ECORE_L2_API_H__
#include "ecore_status.h"
#include "ecore_sp_api.h"
#include "ecore_int_api.h"
enum ecore_rss_caps {
ECORE_RSS_IPV4 = 0x1,
ECORE_RSS_IPV6 = 0x2,
ECORE_RSS_IPV4_TCP = 0x4,
ECORE_RSS_IPV6_TCP = 0x8,
ECORE_RSS_IPV4_UDP = 0x10,
ECORE_RSS_IPV6_UDP = 0x20,
};
/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
#define ECORE_RSS_IND_TABLE_SIZE 128
#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */
#define ECORE_MAX_PHC_DRIFT_PPB 291666666
enum ecore_ptp_filter_type {
ECORE_PTP_FILTER_L2,
ECORE_PTP_FILTER_IPV4,
ECORE_PTP_FILTER_IPV4_IPV6,
ECORE_PTP_FILTER_L2_IPV4_IPV6
};
struct ecore_queue_start_common_params {
/* Should always be relative to entity sending this. */
u8 vport_id;
u16 queue_id;
/* Relative, but relevant only for PFs */
u8 stats_id;
struct ecore_sb_info *p_sb;
u8 sb_idx;
};
struct ecore_rxq_start_ret_params {
void OSAL_IOMEM *p_prod;
void *p_handle;
};
struct ecore_txq_start_ret_params {
void OSAL_IOMEM *p_doorbell;
void *p_handle;
};
struct ecore_rss_params {
u8 update_rss_config;
u8 rss_enable;
u8 rss_eng_id;
u8 update_rss_capabilities;
u8 update_rss_ind_table;
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
/* Indirection table consist of rx queue handles */
void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
u32 rss_key[ECORE_RSS_KEY_SIZE];
};
struct ecore_sge_tpa_params {
u8 max_buffers_per_cqe;
u8 update_tpa_en_flg;
u8 tpa_ipv4_en_flg;
u8 tpa_ipv6_en_flg;
u8 tpa_ipv4_tunn_en_flg;
u8 tpa_ipv6_tunn_en_flg;
u8 update_tpa_param_flg;
u8 tpa_pkt_split_flg;
u8 tpa_hdr_data_split_flg;
u8 tpa_gro_consistent_flg;
u8 tpa_max_aggs_num;
u16 tpa_max_size;
u16 tpa_min_size_to_start;
u16 tpa_min_size_to_cont;
};
enum ecore_filter_opcode {
ECORE_FILTER_ADD,
ECORE_FILTER_REMOVE,
ECORE_FILTER_MOVE,
ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */
ECORE_FILTER_FLUSH, /* Removes all filters */
};
enum ecore_filter_ucast_type {
ECORE_FILTER_MAC,
ECORE_FILTER_VLAN,
ECORE_FILTER_MAC_VLAN,
ECORE_FILTER_INNER_MAC,
ECORE_FILTER_INNER_VLAN,
ECORE_FILTER_INNER_PAIR,
ECORE_FILTER_INNER_MAC_VNI_PAIR,
ECORE_FILTER_MAC_VNI_PAIR,
ECORE_FILTER_VNI,
};
struct ecore_filter_ucast {
enum ecore_filter_opcode opcode;
enum ecore_filter_ucast_type type;
u8 is_rx_filter;
u8 is_tx_filter;
u8 vport_to_add_to;
u8 vport_to_remove_from;
unsigned char mac[ETH_ALEN];
u8 assert_on_error;
u16 vlan;
u32 vni;
};
struct ecore_filter_mcast {
/* MOVE is not supported for multicast */
enum ecore_filter_opcode opcode;
u8 vport_to_add_to;
u8 vport_to_remove_from;
u8 num_mc_addrs;
#define ECORE_MAX_MC_ADDRS 64
unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN];
};
struct ecore_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
u8 rx_accept_filter;
u8 tx_accept_filter;
#define ECORE_ACCEPT_NONE 0x01
#define ECORE_ACCEPT_UCAST_MATCHED 0x02
#define ECORE_ACCEPT_UCAST_UNMATCHED 0x04
#define ECORE_ACCEPT_MCAST_MATCHED 0x08
#define ECORE_ACCEPT_MCAST_UNMATCHED 0x10
#define ECORE_ACCEPT_BCAST 0x20
};
struct ecore_arfs_config_params {
bool tcp;
bool udp;
bool ipv4;
bool ipv6;
bool arfs_enable; /* Enable or disable arfs mode */
};
/* Add / remove / move / remove-all unicast MAC-VLAN filters.
* FW will assert in the following cases, so driver should take care...:
* 1. Adding a filter to a full table.
* 2. Adding a filter which already exists on that vport.
* 3. Removing a filter which doesn't exist.
*/
enum _ecore_status_t
ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
struct ecore_filter_ucast *p_filter_cmd,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
/* Add / remove / move multicast MAC filters. */
enum _ecore_status_t
ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
struct ecore_filter_mcast *p_filter_cmd,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
/* Set "accept" filters */
enum _ecore_status_t
ecore_filter_accept_cmd(
struct ecore_dev *p_dev,
u8 vport,
struct ecore_filter_accept_flags accept_flags,
u8 update_accept_any_vlan,
u8 accept_any_vlan,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
/**
* @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
*
* This ramrod initializes an RX Queue for a VPort. An Assert is generated if
* the VPort ID is not currently initialized.
*
* @param p_hwfn
* @param opaque_fid
* @p_params Inputs; Relative for PF [SB being an exception]
* @param bd_max_bytes Maximum bytes that can be placed on a BD
* @param bd_chain_phys_addr Physical address of BDs for receive.
* @param cqe_pbl_addr Physical address of the CQE PBL Table.
* @param cqe_pbl_size Size of the CQE PBL Table
* @param p_ret_params Pointed struct to be filled with outputs.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
struct ecore_rxq_start_ret_params *p_ret_params);
/**
* @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
*
* @param p_hwfn
* @param p_rxq Handler of queue to close
* @param eq_completion_only If True completion will be on
* EQe, if False completion will be
* on EQe if p_hwfn opaque
* different from the RXQ opaque
* otherwise on CQe.
* @param cqe_completion If True completion will be
* receive on CQe.
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
void *p_rxq,
bool eq_completion_only,
bool cqe_completion);
/**
* @brief - TX Queue Start Ramrod
*
* This ramrod initializes a TX Queue for a VPort. An Assert is generated if
* the VPort is not currently initialized.
*
* @param p_hwfn
* @param opaque_fid
* @p_params
* @param tc traffic class to use with this L2 txq
* @param pbl_addr address of the pbl array
* @param pbl_size number of entries in pbl
* @oaram p_ret_params Pointer to fill the return parameters in.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
struct ecore_queue_start_common_params *p_params,
u8 tc,
dma_addr_t pbl_addr,
u16 pbl_size,
struct ecore_txq_start_ret_params *p_ret_params);
/**
* @brief ecore_eth_tx_queue_stop - closes a Tx queue
*
* @param p_hwfn
* @param p_txq - handle to Tx queue needed to be closed
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
void *p_txq);
enum ecore_tpa_mode {
ECORE_TPA_MODE_NONE,
ECORE_TPA_MODE_RSC,
ECORE_TPA_MODE_GRO,
ECORE_TPA_MODE_MAX
};
struct ecore_sp_vport_start_params {
enum ecore_tpa_mode tpa_mode;
bool remove_inner_vlan; /* Inner VLAN removal is enabled */
bool tx_switching; /* Vport supports tx-switching */
bool handle_ptp_pkts; /* Handle PTP packets */
bool only_untagged; /* Untagged pkt control */
bool drop_ttl0; /* Drop packets with TTL = 0 */
u8 max_buffers_per_cqe;
u32 concrete_fid;
u16 opaque_fid;
u8 vport_id; /* VPORT ID */
u16 mtu; /* VPORT MTU */
bool zero_placement_offset;
bool check_mac;
bool check_ethtype;
/* Strict behavior on transmission errors */
bool b_err_illegal_vlan_mode;
bool b_err_illegal_inband_mode;
bool b_err_vlan_insert_with_inband;
bool b_err_small_pkt;
bool b_err_big_pkt;
bool b_err_anti_spoof;
bool b_err_ctrl_frame;
};
/**
* @brief ecore_sp_vport_start -
*
* This ramrod initializes a VPort. An Assert if generated if the Function ID
* of the VPort is not enabled.
*
* @param p_hwfn
* @param p_params VPORT start params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params);
struct ecore_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
u8 update_vport_active_rx_flg;
u8 vport_active_rx_flg;
u8 update_vport_active_tx_flg;
u8 vport_active_tx_flg;
u8 update_inner_vlan_removal_flg;
u8 inner_vlan_removal_flg;
u8 silent_vlan_removal_flg;
u8 update_default_vlan_enable_flg;
u8 default_vlan_enable_flg;
u8 update_default_vlan_flg;
u16 default_vlan;
u8 update_tx_switching_flg;
u8 tx_switching_flg;
u8 update_approx_mcast_flg;
u8 update_anti_spoofing_en_flg;
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
unsigned long bins[8];
struct ecore_rss_params *rss_params;
struct ecore_filter_accept_flags accept_flags;
struct ecore_sge_tpa_params *sge_tpa_params;
};
/**
* @brief ecore_sp_vport_update -
*
* This ramrod updates the parameters of the VPort. Every field can be updated
* independently, according to flags.
*
* This ramrod is also used to set the VPort state to active after creation.
* An Assert is generated if the VPort does not contain an RX queue.
*
* @param p_hwfn
* @param p_params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_update_params *p_params,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
/**
* @brief ecore_sp_vport_stop -
*
* This ramrod closes a VPort after all its RX and TX queues are terminated.
* An Assert is generated if any queues are left open.
*
* @param p_hwfn
* @param opaque_fid
* @param vport_id VPort ID
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
u8 vport_id);
enum _ecore_status_t
ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
struct ecore_filter_ucast *p_filter_cmd,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
/**
* @brief ecore_sp_rx_eth_queues_update -
*
* This ramrod updates an RX queue. It is used for setting the active state
* of the queue and updating the TPA and SGE parameters.
*
* @note Final phase API.
*
* @param p_hwfn
* @param pp_rxq_handlers An array of queue handlers to be updated.
* @param num_rxqs number of queues to update.
* @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set
* @param comp_mode
* @param p_comp_data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
void **pp_rxq_handlers,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_eth_stats *stats,
u16 statistics_bin, bool b_get_port_stats);
void ecore_get_vport_stats(struct ecore_dev *p_dev,
struct ecore_eth_stats *stats);
void ecore_reset_vport_stats(struct ecore_dev *p_dev);
/**
*@brief ecore_arfs_mode_configure -
*
*Enable or disable rfs mode. It must accept atleast one of tcp or udp true
*and atleast one of ipv4 or ipv6 true to enable rfs mode.
*
*@param p_hwfn
*@param p_ptt
*@param p_cfg_params arfs mode configuration parameters.
*
*/
void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_arfs_config_params *p_cfg_params);
#endif

View File

@ -0,0 +1,179 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_LL2_H__
#define __ECORE_LL2_H__
#include "ecore.h"
#include "ecore_hsi_eth.h"
#include "ecore_chain.h"
#include "ecore_hsi_common.h"
#include "ecore_ll2_api.h"
#include "ecore_sp_api.h"
/* ECORE LL2: internal structures and functions*/
#define ECORE_MAX_NUM_OF_LL2_CONNECTIONS (4)
static OSAL_INLINE u8 ecore_ll2_handle_to_queue_id(struct ecore_hwfn *p_hwfn,
u8 handle)
{
return p_hwfn->hw_info.resc_start[ECORE_LL2_QUEUE] + handle;
}
struct ecore_ll2_rx_packet
{
osal_list_entry_t list_entry;
struct core_rx_bd_with_buff_len *rxq_bd;
dma_addr_t rx_buf_addr;
u16 buf_length;
void *cookie;
u8 placement_offset;
u16 parse_flags;
u16 packet_length;
u16 vlan;
u32 opaque_data[2];
};
struct ecore_ll2_tx_packet
{
osal_list_entry_t list_entry;
u16 bd_used;
bool notify_fw;
void *cookie;
struct {
struct core_tx_bd *txq_bd;
dma_addr_t tx_frag;
u16 frag_len;
} bds_set[1];
/* Flexible Array of bds_set determined by max_bds_per_packet */
};
struct ecore_ll2_rx_queue {
osal_spinlock_t lock;
struct ecore_chain rxq_chain;
struct ecore_chain rcq_chain;
u8 rx_sb_index;
bool b_cb_registred;
__le16 *p_fw_cons;
osal_list_t active_descq;
osal_list_t free_descq;
osal_list_t posting_descq;
struct ecore_ll2_rx_packet *descq_array;
void OSAL_IOMEM *set_prod_addr;
};
struct ecore_ll2_tx_queue {
osal_spinlock_t lock;
struct ecore_chain txq_chain;
u8 tx_sb_index;
bool b_cb_registred;
__le16 *p_fw_cons;
osal_list_t active_descq;
osal_list_t free_descq;
osal_list_t sending_descq;
struct ecore_ll2_tx_packet *descq_array;
struct ecore_ll2_tx_packet *cur_send_packet;
struct ecore_ll2_tx_packet cur_completing_packet;
u16 cur_completing_bd_idx;
void OSAL_IOMEM *doorbell_addr;
u16 bds_idx;
u16 cur_send_frag_num;
u16 cur_completing_frag_num;
bool b_completing_packet;
};
struct ecore_ll2_info {
osal_mutex_t mutex;
enum ecore_ll2_conn_type conn_type;
u32 cid;
u8 my_id;
u8 queue_id;
u8 tx_stats_id;
bool b_active;
u16 mtu;
u8 rx_drop_ttl0_flg;
u8 rx_vlan_removal_en;
u8 tx_tc;
u8 tx_max_bds_per_packet;
enum core_tx_dest tx_dest;
enum core_error_handle ai_err_packet_too_big;
enum core_error_handle ai_err_no_buf;
u8 gsi_enable;
u8 tx_stats_en;
u8 main_func_queue;
struct ecore_ll2_rx_queue rx_queue;
struct ecore_ll2_tx_queue tx_queue;
struct ecore_ll2_cbs cbs;
};
/**
* @brief ecore_ll2_alloc - Allocates LL2 connections set
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_ll2_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ll2_setup - Inits LL2 connections set
*
* @param p_hwfn
*
*/
void ecore_ll2_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ll2_free - Releases LL2 connections set
*
* @param p_hwfn
*
*/
void ecore_ll2_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_ll2_get_fragment_of_tx_packet
*
* @param p_hwfn
* @param connection_handle LL2 connection's handle
* obtained from
* ecore_ll2_require_connection
* @param addr
* @param last_fragment)
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_ll2_get_fragment_of_tx_packet(struct ecore_hwfn *p_hwfn,
u8 connection_handle,
dma_addr_t *addr,
bool *last_fragment);
#endif /*__ECORE_LL2_H__*/

View File

@ -0,0 +1,325 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_LL2_API_H__
#define __ECORE_LL2_API_H__
/* ECORE LL2 API: called by ECORE's upper level client */
/* must be the asme as core_rx_conn_type */
enum ecore_ll2_conn_type {
ECORE_LL2_TYPE_FCOE /* FCoE L2 connection */,
ECORE_LL2_TYPE_ISCSI /* Iscsi L2 connection */,
ECORE_LL2_TYPE_TEST /* Eth TB test connection */,
ECORE_LL2_TYPE_OOO /* Iscsi OOO L2 connection */,
ECORE_LL2_TYPE_TOE /* toe L2 connection */,
ECORE_LL2_TYPE_ROCE /* RoCE L2 connection */,
ECORE_LL2_TYPE_IWARP,
MAX_ECORE_LL2_RX_CONN_TYPE
};
enum ecore_ll2_roce_flavor_type {
ECORE_LL2_ROCE, /* use this as default or d/c */
ECORE_LL2_RROCE,
MAX_ECORE_LL2_ROCE_FLAVOR_TYPE
};
enum ecore_ll2_tx_dest
{
ECORE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
ECORE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
ECORE_LL2_TX_DEST_DROP /* Light L2 Drop the TX packet */,
ECORE_LL2_TX_DEST_MAX
};
enum ecore_ll2_error_handle
{
ECORE_LL2_DROP_PACKET /* If error occurs drop packet */,
ECORE_LL2_DO_NOTHING /* If error occurs do nothing */,
ECORE_LL2_ASSERT /* If error occurs assert */,
};
struct ecore_ll2_stats {
u64 gsi_invalid_hdr;
u64 gsi_invalid_pkt_length;
u64 gsi_unsupported_pkt_typ;
u64 gsi_crcchksm_error;
u64 packet_too_big_discard;
u64 no_buff_discard;
u64 rcv_ucast_bytes;
u64 rcv_mcast_bytes;
u64 rcv_bcast_bytes;
u64 rcv_ucast_pkts;
u64 rcv_mcast_pkts;
u64 rcv_bcast_pkts;
u64 sent_ucast_bytes;
u64 sent_mcast_bytes;
u64 sent_bcast_bytes;
u64 sent_ucast_pkts;
u64 sent_mcast_pkts;
u64 sent_bcast_pkts;
};
struct ecore_ll2_comp_rx_data {
u8 connection_handle;
void *cookie;
dma_addr_t rx_buf_addr;
u16 parse_flags;
u16 err_flags;
u16 vlan;
bool b_last_packet;
union {
u8 placement_offset;
u8 data_length_error;
} u;
union {
u16 packet_length;
u16 data_length;
} length;
u32 opaque_data_0; /* src_mac_addr_hi */
u32 opaque_data_1; /* src_mac_addr_lo */
/* GSI only */
u32 gid_dst[4];
u16 qp_id;
};
typedef
void (*ecore_ll2_complete_rx_packet_cb)(void *cxt,
struct ecore_ll2_comp_rx_data *data);
typedef
void (*ecore_ll2_release_rx_packet_cb)(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t rx_buf_addr,
bool b_last_packet);
typedef
void (*ecore_ll2_complete_tx_packet_cb)(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet);
typedef
void (*ecore_ll2_release_tx_packet_cb)(void *cxt,
u8 connection_handle,
void *cookie,
dma_addr_t first_frag_addr,
bool b_last_fragment,
bool b_last_packet);
struct ecore_ll2_cbs {
ecore_ll2_complete_rx_packet_cb rx_comp_cb;
ecore_ll2_release_rx_packet_cb rx_release_cb;
ecore_ll2_complete_tx_packet_cb tx_comp_cb;
ecore_ll2_release_tx_packet_cb tx_release_cb;
void *cookie;
};
struct ecore_ll2_acquire_data {
enum ecore_ll2_conn_type conn_type;
u16 mtu; /* Maximum bytes that can be placed on a BD*/
u16 rx_num_desc;
/* Relevant only for OOO connection if 0 OOO rx buffers=2*rx_num_desc */
u16 rx_num_ooo_buffers;
u8 rx_drop_ttl0_flg;
/* if set, 802.1q tags will be removed and copied to CQE */
u8 rx_vlan_removal_en;
u16 tx_num_desc;
u8 tx_max_bds_per_packet;
u8 tx_tc;
enum ecore_ll2_tx_dest tx_dest;
enum ecore_ll2_error_handle ai_err_packet_too_big;
enum ecore_ll2_error_handle ai_err_no_buf;
u8 secondary_queue;
u8 gsi_enable;
/* Output container for LL2 connection's handle */
u8 *p_connection_handle;
const struct ecore_ll2_cbs *cbs;
};
/**
* @brief ecore_ll2_acquire_connection - allocate resources,
* starts rx & tx (if relevant) queues pair. Provides
* connecion handler as output parameter.
*
*
* @param p_hwfn
* @param data - describes connection parameters
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_ll2_acquire_connection(void *cxt,
struct ecore_ll2_acquire_data *data);
/**
* @brief ecore_ll2_establish_connection - start previously
* allocated LL2 queues pair
*
* @param p_hwfn
* @param p_ptt
* @param connection_handle LL2 connection's handle
* obtained from
* ecore_ll2_require_connection
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_ll2_establish_connection(void *cxt,
u8 connection_handle);
/**
* @brief ecore_ll2_post_rx_buffers - submit buffers to LL2 RxQ.
*
* @param p_hwfn
* @param connection_handle LL2 connection's handle
* obtained from
* ecore_ll2_require_connection
* @param addr rx (physical address) buffers to
* submit
* @param cookie
* @param notify_fw produce corresponding Rx BD
* immediately
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_ll2_post_rx_buffer(void *cxt,
u8 connection_handle,
dma_addr_t addr,
u16 buf_len,
void *cookie,
u8 notify_fw);
struct ecore_ll2_tx_pkt_info {
u8 num_of_bds;
u16 vlan;
u8 bd_flags;
u16 l4_hdr_offset_w; /* from start of packet */
enum ecore_ll2_tx_dest tx_dest;
enum ecore_ll2_roce_flavor_type ecore_roce_flavor;
dma_addr_t first_frag;
u16 first_frag_len;
bool enable_ip_cksum;
bool enable_l4_cksum;
bool calc_ip_len;
void *cookie;
};
/**
* @brief ecore_ll2_prepare_tx_packet - request for start Tx BD
* to prepare Tx packet submission to FW.
*
*
* @param p_hwfn
* @param pkt - info regarding the tx packet
* @param notify_fw - issue doorbell to fw for this packet
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_ll2_prepare_tx_packet(
void *cxt,
u8 connection_handle,
struct ecore_ll2_tx_pkt_info *pkt,
bool notify_fw);
/**
* @brief ecore_ll2_release_connection - releases resources
* allocated for LL2 connection
*
* @param p_hwfn
* @param connection_handle LL2 connection's handle
* obtained from
* ecore_ll2_require_connection
*/
void ecore_ll2_release_connection(void *cxt,
u8 connection_handle);
/**
* @brief ecore_ll2_set_fragment_of_tx_packet - provides
* fragments to fill Tx BD of BDs requested by
* ecore_ll2_prepare_tx_packet..
*
*
* @param p_hwfn
* @param connection_handle LL2 connection's handle
* obtained from
* ecore_ll2_require_connection
* @param addr
* @param nbytes
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle,
dma_addr_t addr,
u16 nbytes);
/**
* @brief ecore_ll2_terminate_connection - stops Tx/Rx queues
*
*
* @param p_hwfn
* @param connection_handle LL2 connection's handle
* obtained from
* ecore_ll2_require_connection
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_ll2_terminate_connection(void *cxt,
u8 connection_handle);
/**
* @brief ecore_ll2_get_stats - get LL2 queue's statistics
*
*
* @param p_hwfn
* @param connection_handle LL2 connection's handle
* obtained from
* ecore_ll2_require_connection
* @param p_stats
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_ll2_get_stats(void *cxt,
u8 connection_handle,
struct ecore_ll2_stats *p_stats);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,614 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_MCP_H__
#define __ECORE_MCP_H__
#include "bcm_osal.h"
#include "mcp_public.h"
#include "ecore.h"
#include "ecore_mcp_api.h"
#include "ecore_dev_api.h"
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
* required during hw-init will be placed in their correct place in shmem
* we need it in ecore_dev.c [for readin the nvram reflection in shmem].
*/
#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \
((rel_pfid) | \
((p_hwfn)->abs_pf_id & 1) << 3) : \
rel_pfid)
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->p_dev->num_ports_in_engines * \
ecore_device_num_engines((_p_hwfn)->p_dev)))
struct ecore_mcp_info {
/* Spinlock used for protecting the access to the MFW mailbox */
osal_spinlock_t lock;
/* Spinglock used for syncing SW link-changes and link-changes
* originating from attention context.
*/
osal_spinlock_t link_lock;
/* Flag to indicate whether sending a MFW mailbox is forbidden */
bool block_mb_sending;
/* Address of the MCP public area */
u32 public_base;
/* Address of the driver mailbox */
u32 drv_mb_addr;
/* Address of the MFW mailbox */
u32 mfw_mb_addr;
/* Address of the port configuration (link) */
u32 port_addr;
/* Current driver mailbox sequence */
u16 drv_mb_seq;
/* Current driver pulse sequence */
u16 drv_pulse_seq;
struct ecore_mcp_link_params link_input;
struct ecore_mcp_link_state link_output;
struct ecore_mcp_link_capabilities link_capabilities;
struct ecore_mcp_function_info func_info;
u8 *mfw_mb_cur;
u8 *mfw_mb_shadow;
u16 mfw_mb_length;
u16 mcp_hist;
/* Capabilties negotiated with the MFW */
u32 capabilities;
};
struct ecore_mcp_mb_params {
u32 cmd;
u32 param;
void *p_data_src;
u8 data_src_size;
void *p_data_dst;
u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
};
enum ecore_ov_eswitch {
ECORE_OV_ESWITCH_NONE,
ECORE_OV_ESWITCH_VEB,
ECORE_OV_ESWITCH_VEPA
};
struct ecore_drv_tlv_hdr {
u8 tlv_type; /* According to the enum below */
u8 tlv_length; /* In dwords - not including this header */
u8 tlv_reserved;
#define ECORE_DRV_TLV_FLAGS_CHANGED 0x01
u8 tlv_flags;
};
/**
* @brief Initialize the interface with the MCP
*
* @param p_hwfn - HW func
* @param p_ptt - PTT required for register access
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Intialize the port interface with the MCP
*
* @param p_hwfn
* @param p_ptt
* Can only be called after `num_ports_in_engines' is set
*/
void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Releases resources allocated during the init process.
*
* @param p_hwfn - HW func
* @param p_ptt - PTT required for register access
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn);
/**
* @brief This function is called from the DPC context. After
* pointing PTT to the mfw mb, check for events sent by the MCP
* to the driver and ack them. In case a critical event
* detected, it will be handled here, otherwise the work will be
* queued to a sleepable work-queue.
*
* @param p_hwfn - HW function
* @param p_ptt - PTT required for register access
* @return enum _ecore_status_t - ECORE_SUCCESS - operation
* was successul.
*/
enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief When MFW doesn't get driver pulse for couple of seconds, at some
* threshold before timeout expires, it will generate interrupt
* through a dedicated status block (DPSB - Driver Pulse Status
* Block), which the driver should respond immediately, by
* providing keepalive indication after setting the PTT to the
* driver-MFW mailbox. This function is called directly from the
* DPC upon receiving the DPSB attention.
*
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @return enum _ecore_status_t - ECORE_SUCCESS - operation
* was successful.
*/
enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
enum ecore_drv_role {
ECORE_DRV_ROLE_OS,
ECORE_DRV_ROLE_KDUMP,
};
struct ecore_load_req_params {
/* Input params */
enum ecore_drv_role drv_role;
u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */
bool avoid_eng_reset;
enum ecore_override_force_load override_force_load;
/* Output params */
u32 load_code;
};
/**
* @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
* returns whether this PF is the first on the engine/port or function.
*
* @param p_hwfn
* @param p_ptt
* @param p_params
*
* @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
*/
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_load_req_params *p_params);
/**
* @brief Sends a UNLOAD_REQ message to the MFW
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
*/
enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Sends a UNLOAD_DONE message to the MFW
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
*/
enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Read the MFW mailbox into Current buffer.
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Ack to mfw that driver finished FLR process for VFs
*
* @param p_hwfn
* @param p_ptt
* @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
*
* @param return enum _ecore_status_t - ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 *vfs_to_ack);
/**
* @brief - calls during init to read shmem of all function-related info.
*
* @param p_hwfn
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Reset the MCP using mailbox command.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Sends an NVM write command request to the MFW with
* payload.
*
* @param p_hwfn
* @param p_ptt
* @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or
* DRV_MSG_CODE_NVM_PUT_FILE_DATA
* @param param - [0:23] - Offset [24:31] - Size
* @param o_mcp_resp - MCP response
* @param o_mcp_param - MCP response param
* @param i_txn_size - Buffer size
* @param i_buf - Pointer to the buffer
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
u32 i_txn_size,
u32 *i_buf);
/**
* @brief - Sends an NVM read command request to the MFW to get
* a buffer.
*
* @param p_hwfn
* @param p_ptt
* @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
* DRV_MSG_CODE_NVM_READ_NVRAM commands
* @param param - [0:23] - Offset [24:31] - Size
* @param o_mcp_resp - MCP response
* @param o_mcp_param - MCP response param
* @param o_txn_size - Buffer size output
* @param o_buf - Pointer to the buffer returned by the MFW.
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
u32 *o_mcp_param,
u32 *o_txn_size,
u32 *o_buf);
/**
* @brief indicates whether the MFW objects [under mcp_info] are accessible
*
* @param p_hwfn
*
* @return true iff MFW is running and mcp_info is initialized
*/
bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn);
/**
* @brief request MFW to configure MSI-X for a VF
*
* @param p_hwfn
* @param p_ptt
* @param vf_id - absolute inside engine
* @param num_sbs - number of entries to request
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vf_id, u8 num);
/**
* @brief - Halt the MCP.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Wake up the MCP.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_link_state *p_link,
u8 max_bw);
int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_link_state *p_link,
u8 min_bw);
enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 mask_parities);
#if 0
enum _ecore_status_t ecore_hw_init_first_eth(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 *p_pf);
#endif
/**
* @brief - Sends crash mdump related info to the MFW.
*
* @param p_hwfn
* @param p_ptt
* @param epoch
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 epoch);
/**
* @brief - Triggers a MFW crash dump procedure.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
struct ecore_mdump_retain_data {
u32 valid;
u32 epoch;
u32 pf;
u32 status;
};
/**
* @brief - Gets the mdump retained data from the MFW.
*
* @param p_hwfn
* @param p_ptt
* @param p_mdump_retain
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t
ecore_mcp_mdump_get_retain(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mdump_retain_data *p_mdump_retain);
/**
* @brief - Clear the mdump retained data.
*
* @param p_hwfn
* @param p_ptt
*
* @param return ECORE_SUCCESS upon success.
*/
enum _ecore_status_t ecore_mcp_mdump_clr_retain(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief - Sets the MFW's max value for the given resource
*
* @param p_hwfn
* @param p_ptt
* @param res_id
* @param resc_max_val
* @param p_mcp_resp
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t
ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum ecore_resources res_id, u32 resc_max_val,
u32 *p_mcp_resp);
/**
* @brief - Gets the MFW allocation info for the given resource
*
* @param p_hwfn
* @param p_ptt
* @param res_id
* @param p_mcp_resp
* @param p_resc_num
* @param p_resc_start
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t
ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum ecore_resources res_id, u32 *p_mcp_resp,
u32 *p_resc_num, u32 *p_resc_start);
/**
* @brief - Initiates PF FLR
*
* @param p_hwfn
* @param p_ptt
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Send eswitch mode to MFW
*
* @param p_hwfn
* @param p_ptt
* @param eswitch - eswitch mode
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t
ecore_mcp_ov_update_eswitch(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum ecore_ov_eswitch eswitch);
#define ECORE_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP /* 0 */
#define ECORE_MCP_RESC_LOCK_MAX_VAL 31
enum ecore_resc_lock {
ECORE_RESC_LOCK_DBG_DUMP = ECORE_MCP_RESC_LOCK_MIN_VAL,
/* Locks that the MFW is aware of should be added here downwards */
/* Ecore only locks should be added here upwards */
ECORE_RESC_LOCK_PTP_PORT0,
ECORE_RESC_LOCK_PTP_PORT1,
ECORE_RESC_LOCK_PTP_PORT2,
ECORE_RESC_LOCK_PTP_PORT3,
ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL
};
struct ecore_resc_lock_params {
/* Resource number [valid values are 0..31] */
u8 resource;
/* Lock timeout value in seconds [default, none or 1..254] */
u8 timeout;
#define ECORE_MCP_RESC_LOCK_TO_DEFAULT 0
#define ECORE_MCP_RESC_LOCK_TO_NONE 255
/* Number of times to retry locking */
u8 retry_num;
/* The interval in usec between retries */
u16 retry_interval;
/* Use sleep or delay between retries */
bool sleep_b4_retry;
/* Will be set as true if the resource is free and granted */
bool b_granted;
/* Will be filled with the resource owner.
* [0..15 = PF0-15, 16 = MFW, 17 = diag over serial]
*/
u8 owner;
};
/**
* @brief Acquires MFW generic resource lock
*
* @param p_hwfn
* @param p_ptt
* @param p_params
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t
ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_lock_params *p_params);
struct ecore_resc_unlock_params {
/* Resource number [valid values are 0..31] */
u8 resource;
/* Allow to release a resource even if belongs to another PF */
bool b_force;
/* Will be set as true if the resource is released */
bool b_released;
};
/**
* @brief Releases MFW generic resource lock
*
* @param p_hwfn
* @param p_ptt
* @param p_params
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
enum _ecore_status_t
ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_resc_unlock_params *p_params);
void ecore_mcp_wol_wr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
u32 offset, u32 val);
/**
* @brief Learn of supported MFW features; To be done during early init
*
* @param p_hwfn
* @param p_ptt
*/
enum _ecore_status_t ecore_mcp_get_capabilities(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Inform MFW of set of features supported by driver. Should be done
* inside the contet of the LOAD_REQ.
*
* @param p_hwfn
* @param p_ptt
*/
enum _ecore_status_t ecore_mcp_set_capabilities(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief Initialize MFW mailbox and sequence values for driver interaction.
*
* @param p_hwfn
* @param p_ptt
*/
enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
#endif /* __ECORE_MCP_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,145 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_OOO_H__
#define __ECORE_OOO_H__
#include "ecore.h"
#define ECORE_MAX_NUM_ISLES 256
#define ECORE_MAX_NUM_OOO_HISTORY_ENTRIES 512
#define ECORE_OOO_LEFT_BUF 0
#define ECORE_OOO_RIGHT_BUF 1
struct ecore_ooo_buffer {
osal_list_entry_t list_entry;
void *rx_buffer_virt_addr;
dma_addr_t rx_buffer_phys_addr;
u32 rx_buffer_size;
u16 packet_length;
u16 parse_flags;
u16 vlan;
u8 placement_offset;
};
struct ecore_ooo_isle {
osal_list_entry_t list_entry;
osal_list_t buffers_list;
};
struct ecore_ooo_archipelago {
osal_list_t isles_list;
};
struct ecore_ooo_history {
struct ooo_opaque *p_cqes;
u32 head_idx;
u32 num_of_cqes;
};
struct ecore_ooo_info {
osal_list_t free_buffers_list;
osal_list_t ready_buffers_list;
osal_list_t free_isles_list;
struct ecore_ooo_archipelago *p_archipelagos_mem;
struct ecore_ooo_isle *p_isles_mem;
struct ecore_ooo_history ooo_history;
u32 cur_isles_number;
u32 max_isles_number;
u32 gen_isles_number;
u16 max_num_archipelagos;
u16 cid_base;
};
enum _ecore_status_t ecore_ooo_alloc(struct ecore_hwfn *p_hwfn);
void ecore_ooo_setup(struct ecore_hwfn *p_hwfn);
void ecore_ooo_free(struct ecore_hwfn *p_hwfn);
void ecore_ooo_save_history_entry(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
struct ooo_opaque *p_cqe);
void ecore_ooo_release_connection_isles(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid);
void ecore_ooo_release_all_isles(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info);
void ecore_ooo_put_free_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
struct ecore_ooo_buffer *p_buffer);
struct ecore_ooo_buffer *
ecore_ooo_get_free_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info);
void ecore_ooo_put_ready_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
struct ecore_ooo_buffer *p_buffer,
u8 on_tail);
struct ecore_ooo_buffer *
ecore_ooo_get_ready_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info);
void ecore_ooo_delete_isles(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid,
u8 drop_isle,
u8 drop_size);
void ecore_ooo_add_new_isle(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle,
struct ecore_ooo_buffer *p_buffer);
void ecore_ooo_add_new_buffer(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid,
u8 ooo_isle,
struct ecore_ooo_buffer *p_buffer,
u8 buffer_side);
void ecore_ooo_join_isles(struct ecore_hwfn *p_hwfn,
struct ecore_ooo_info *p_ooo_info,
u32 cid,
u8 left_isle);
void ecore_ooo_dump_rx_event(struct ecore_hwfn *p_hwfn,
struct ooo_opaque *iscsi_ooo,
struct ecore_ooo_buffer *p_buffer);
#endif /*__ECORE_OOO_H__*/

View File

@ -0,0 +1,173 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_PROTO_IF_H__
#define __ECORE_PROTO_IF_H__
/*
* PF parameters (according to personality/protocol)
*/
#define ECORE_ROCE_PROTOCOL_INDEX (3)
struct ecore_eth_pf_params {
/* The following parameters are used during HW-init
* and these parameters need to be passed as arguments
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
/* per-VF number of CIDs */
u8 num_vf_cons;
#define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
/* To enable arfs, previous to HW-init a positive number needs to be
* set [as filters require allocated searcher ILT memory].
* This will set the maximal number of configured steering-filters.
*/
u32 num_arfs_filters;
};
/* Most of the the parameters below are described in the FW FCoE HSI */
struct ecore_fcoe_pf_params {
/* The following parameters are used during protocol-init */
u64 glbl_q_params_addr;
u64 bdq_pbl_base_addr[2];
/* The following parameters are used during HW-init
* and these parameters need to be passed as arguments
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
u16 num_tasks;
/* The following parameters are used during protocol-init */
u16 sq_num_pbl_pages;
u16 cq_num_entries;
u16 cmdq_num_entries;
u16 rq_buffer_log_size;
u16 mtu;
u16 dummy_icid;
u16 bdq_xoff_threshold[2];
u16 bdq_xon_threshold[2];
u16 rq_buffer_size;
u8 num_cqs; /* num of global CQs */
u8 log_page_size;
u8 gl_rq_pi;
u8 gl_cmd_pi;
u8 debug_mode;
u8 is_target;
u8 bdq_pbl_num_entries[2];
};
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
struct ecore_iscsi_pf_params {
u64 glbl_q_params_addr;
u64 bdq_pbl_base_addr[2];
u16 cq_num_entries;
u16 cmdq_num_entries;
u32 two_msl_timer;
u16 tx_sws_timer;
/* The following parameters are used during HW-init
* and these parameters need to be passed as arguments
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
u16 num_tasks;
/* The following parameters are used during protocol-init */
u16 half_way_close_timeout;
u16 bdq_xoff_threshold[2];
u16 bdq_xon_threshold[2];
u16 cmdq_xoff_threshold;
u16 cmdq_xon_threshold;
u16 rq_buffer_size;
u8 num_sq_pages_in_ring;
u8 num_r2tq_pages_in_ring;
u8 num_uhq_pages_in_ring;
u8 num_queues;
u8 log_page_size;
u8 rqe_log_size;
u8 max_fin_rt;
u8 gl_rq_pi;
u8 gl_cmd_pi;
u8 debug_mode;
u8 ll2_ooo_queue_id;
u8 ooo_enable;
u8 is_target;
u8 bdq_pbl_num_entries[2];
};
enum ecore_rdma_protocol {
ECORE_RDMA_PROTOCOL_DEFAULT,
ECORE_RDMA_PROTOCOL_NONE,
ECORE_RDMA_PROTOCOL_ROCE,
ECORE_RDMA_PROTOCOL_IWARP,
};
struct ecore_rdma_pf_params {
/* Supplied to ECORE during resource allocation (may affect the ILT and
* the doorbell BAR).
*/
u32 min_dpis; /* number of requested DPIs */
u32 num_qps; /* number of requested Queue Pairs */
u32 num_srqs; /* number of requested SRQ */
u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
u8 gl_pi; /* protocol index */
/* Will allocate rate limiters to be used with QPs */
u8 enable_dcqcn;
/* Max number of CNQs - limits number of ECORE_RDMA_CNQ feature,
* Allowing an incrementation in ECORE_PF_L2_QUE.
* To disable CNQs, use dedicated value instead of `0'.
*/
#define ECORE_RDMA_PF_PARAMS_CNQS_NONE (0xffff)
u16 max_cnqs;
/* TCP port number used for the iwarp traffic */
u16 iwarp_port;
enum ecore_rdma_protocol rdma_protocol;
};
struct ecore_pf_params {
struct ecore_eth_pf_params eth_pf_params;
struct ecore_fcoe_pf_params fcoe_pf_params;
struct ecore_iscsi_pf_params iscsi_pf_params;
struct ecore_rdma_pf_params rdma_pf_params;
};
#endif

View File

@ -0,0 +1,387 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_RDMA_H__
#define __ECORE_RDMA_H__
#include "ecore_status.h"
#include "ecore.h"
#include "ecore_hsi_common.h"
#include "ecore_proto_if.h"
#include "ecore_roce_api.h"
#include "ecore_dev_api.h"
/* Constants */
/* HW/FW RoCE Limitations (internal. For external see ecore_rdma_api.h) */
#define ECORE_RDMA_MAX_FMR (RDMA_MAX_TIDS) /* 2^17 - 1 */
#define ECORE_RDMA_MAX_P_KEY (1)
#define ECORE_RDMA_MAX_WQE (0x7FFF) /* 2^15 -1 */
#define ECORE_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF) /* 2^15 -1 */
#define ECORE_RDMA_PAGE_SIZE_CAPS (0xFFFFF000) /* TODO: > 4k?! */
#define ECORE_RDMA_ACK_DELAY (15) /* 131 milliseconds */
#define ECORE_RDMA_MAX_MR_SIZE (0x10000000000ULL) /* 2^40 */
#define ECORE_RDMA_MAX_CQS (RDMA_MAX_CQS) /* 64k */
#define ECORE_RDMA_MAX_MRS (RDMA_MAX_TIDS) /* 2^17 - 1 */
/* Add 1 for header element */
#define ECORE_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
#define ECORE_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
#define ECORE_RDMA_SRQ_WQE_ELEM_SIZE (16)
#define ECORE_RDMA_MAX_SRQS (32 * 1024) /* 32k */
/* Configurable */
/* Max CQE is derived from u16/32 size, halved and decremented by 1 to handle
* wrap properly and then decremented by 1 again. The latter decrement comes
* from a requirement to create a chain that is bigger than what the user
* requested by one:
* The CQE size is 32 bytes but the FW writes in chunks of 64
* bytes, for performance purposes. Allocating an extra entry and telling the
* FW we have less prevents overwriting the first entry in case of a wrap i.e.
* when the FW writes the last entry and the application hasn't read the first
* one.
*/
#define ECORE_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
#define ECORE_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
enum ecore_rdma_toggle_bit {
ECORE_RDMA_TOGGLE_BIT_CLEAR = 0,
ECORE_RDMA_TOGGLE_BIT_SET = 1
};
/* @@@TBD Currently we support only affilited events
* enum ecore_rdma_unaffiliated_event_code {
* ECORE_RDMA_PORT_ACTIVE, // Link Up
* ECORE_RDMA_PORT_CHANGED, // SGID table has changed
* ECORE_RDMA_LOCAL_CATASTROPHIC_ERR, // Fatal device error
* ECORE_RDMA_PORT_ERR, // Link down
* };
*/
#define QEDR_MAX_BMAP_NAME (10)
struct ecore_bmap {
u32 max_count;
unsigned long *bitmap;
char name[QEDR_MAX_BMAP_NAME];
};
/* functions for enabling/disabling edpm in rdma PFs according to existence of
* qps during DCBx update or bar size
*/
void ecore_roce_dpm_dcbx(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
void ecore_rdma_dpm_bar(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
#ifdef CONFIG_ECORE_IWARP
#define ECORE_IWARP_LL2_SYN_TX_SIZE (128)
#define ECORE_IWARP_LL2_SYN_RX_SIZE (256)
#define ECORE_IWARP_LL2_OOO_DEF_TX_SIZE (256)
#define ECORE_IWARP_LL2_OOO_DEF_RX_SIZE (4096)
#define ECORE_IWARP_MAX_SYN_PKT_SIZE (128)
#define ECORE_IWARP_HANDLE_INVAL (0xff)
struct ecore_iwarp_ll2_buff {
struct ecore_iwarp_ll2_buff *piggy_buf;
void *data;
dma_addr_t data_phys_addr;
u32 buff_size;
};
struct ecore_iwarp_ll2_mpa_buf {
osal_list_entry_t list_entry;
struct ecore_iwarp_ll2_buff *ll2_buf;
struct unaligned_opaque_data data;
u16 tcp_payload_len;
u8 placement_offset;
};
/* In some cases a fpdu will arrive with only one byte of the header, in this
* case the fpdu_length will be partial ( contain only higher byte and
* incomplete bytes will contain the invalid value */
#define ECORE_IWARP_INVALID_INCOMPLETE_BYTES 0xffff
struct ecore_iwarp_fpdu {
struct ecore_iwarp_ll2_buff *mpa_buf;
dma_addr_t pkt_hdr;
u8 pkt_hdr_size;
dma_addr_t mpa_frag;
void *mpa_frag_virt;
u16 mpa_frag_len;
u16 fpdu_length;
u16 incomplete_bytes;
};
struct ecore_iwarp_info {
osal_list_t listen_list; /* ecore_iwarp_listener */
osal_list_t ep_list; /* ecore_iwarp_ep */
osal_list_t ep_free_list;/* pre-allocated ep's */
osal_list_t mpa_buf_list;/* list of mpa_bufs */
osal_list_t mpa_buf_pending_list;
osal_spinlock_t iw_lock;
osal_spinlock_t qp_lock; /* for teardown races */
struct iwarp_rxmit_stats_drv stats;
u32 rcv_wnd_scale;
u16 max_mtu;
u16 num_ooo_rx_bufs;
u8 mac_addr[ETH_ALEN];
u8 crc_needed;
u8 tcp_flags;
u8 ll2_syn_handle;
u8 ll2_ooo_handle;
u8 ll2_mpa_handle;
u8 peer2peer;
u8 _pad;
enum mpa_negotiation_mode mpa_rev;
enum mpa_rtr_type rtr_type;
struct ecore_iwarp_fpdu *partial_fpdus;
struct ecore_iwarp_ll2_mpa_buf *mpa_bufs;
u8 *mpa_intermediate_buf;
u16 max_num_partial_fpdus;
/* MPA statistics */
u64 unalign_rx_comp;
};
#endif
#define IS_ECORE_DCQCN(p_hwfn) \
(!!(p_hwfn->pf_params.rdma_pf_params.enable_dcqcn))
struct ecore_roce_info {
struct roce_events_stats event_stats;
u8 dcqcn_enabled;
u8 dcqcn_reaction_point;
};
struct ecore_rdma_info {
osal_spinlock_t lock;
struct ecore_bmap cq_map;
struct ecore_bmap pd_map;
struct ecore_bmap tid_map;
struct ecore_bmap srq_map;
struct ecore_bmap cid_map;
struct ecore_bmap tcp_cid_map;
struct ecore_bmap real_cid_map;
struct ecore_bmap dpi_map;
struct ecore_bmap toggle_bits;
struct ecore_rdma_events events;
struct ecore_rdma_device *dev;
struct ecore_rdma_port *port;
u32 last_tid;
u8 num_cnqs;
struct rdma_sent_stats rdma_sent_pstats;
struct rdma_rcv_stats rdma_rcv_tstats;
u32 num_qps;
u32 num_mrs;
u32 num_srqs;
u16 queue_zone_base;
u16 max_queue_zones;
enum protocol_type proto;
struct ecore_roce_info roce;
#ifdef CONFIG_ECORE_IWARP
struct ecore_iwarp_info iwarp;
#endif
};
#ifdef CONFIG_ECORE_IWARP
enum ecore_iwarp_qp_state {
ECORE_IWARP_QP_STATE_IDLE,
ECORE_IWARP_QP_STATE_RTS,
ECORE_IWARP_QP_STATE_TERMINATE,
ECORE_IWARP_QP_STATE_CLOSING,
ECORE_IWARP_QP_STATE_ERROR,
};
#endif
struct ecore_rdma_qp {
struct regpair qp_handle;
struct regpair qp_handle_async;
u32 qpid; /* iwarp: may differ from icid */
u16 icid;
enum ecore_roce_qp_state cur_state;
#ifdef CONFIG_ECORE_IWARP
enum ecore_iwarp_qp_state iwarp_state;
#endif
bool use_srq;
bool signal_all;
bool fmr_and_reserved_lkey;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
u16 pd; /* Protection domain */
u16 pkey; /* Primary P_key index */
u32 dest_qp;
u16 mtu;
u16 srq_id;
u8 traffic_class_tos; /* IPv6/GRH traffic class; IPv4 TOS */
u8 hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
u16 dpi;
u32 flow_label; /* ignored in IPv4 */
u16 vlan_id;
u32 ack_timeout;
u8 retry_cnt;
u8 rnr_retry_cnt;
u8 min_rnr_nak_timer;
bool sqd_async;
union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
enum roce_mode roce_mode;
u16 udp_src_port; /* RoCEv2 only */
u8 stats_queue;
/* requeseter */
u8 max_rd_atomic_req;
u32 sq_psn;
u16 sq_cq_id; /* The cq to be associated with the send queue*/
u16 sq_num_pages;
dma_addr_t sq_pbl_ptr;
void *orq;
dma_addr_t orq_phys_addr;
u8 orq_num_pages;
bool req_offloaded;
/* responder */
u8 max_rd_atomic_resp;
u32 rq_psn;
u16 rq_cq_id; /* The cq to be associated with the receive queue */
u16 rq_num_pages;
dma_addr_t rq_pbl_ptr;
void *irq;
dma_addr_t irq_phys_addr;
u8 irq_num_pages;
bool resp_offloaded;
u32 cq_prod;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
void *shared_queue;
dma_addr_t shared_queue_phys_addr;
#ifdef CONFIG_ECORE_IWARP
struct ecore_iwarp_ep *ep;
#endif
};
#ifdef CONFIG_ECORE_IWARP
enum ecore_iwarp_ep_state {
ECORE_IWARP_EP_INIT,
ECORE_IWARP_EP_MPA_REQ_RCVD,
ECORE_IWARP_EP_ESTABLISHED,
ECORE_IWARP_EP_CLOSED
};
union async_output {
struct iwarp_eqe_data_mpa_async_completion mpa_response;
struct iwarp_eqe_data_tcp_async_completion mpa_request;
};
/* Endpoint structure represents a TCP connection. This connection can be
* associated with a QP or not (in which case QP==NULL)
*/
struct ecore_iwarp_ep {
osal_list_entry_t list_entry;
int sig;
struct ecore_rdma_qp *qp;
enum ecore_iwarp_ep_state state;
/* This contains entire buffer required for ep memories. This is the
* only one actually allocated and freed. The rest are pointers into
* this buffer
*/
void *ep_buffer_virt;
dma_addr_t ep_buffer_phys;
/* Asynce EQE events contain only the ep pointer on the completion. The
* rest of the data is written to an output buffer pre-allocated by
* the driver. This buffer points to a location in the ep_buffer.
*/
union async_output *async_output_virt;
dma_addr_t async_output_phys;
struct ecore_iwarp_cm_info cm_info;
enum tcp_connect_mode connect_mode;
enum mpa_rtr_type rtr_type;
enum mpa_negotiation_mode mpa_rev;
u32 tcp_cid;
u32 cid;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
u16 mss;
bool mpa_reply_processed;
/* The event_cb function is called for asynchrounous events associated
* with the ep. It is initialized at different entry points depending
* on whether the ep is the tcp connection active side or passive side
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
/* For Passive side - syn packet related data */
struct ecore_iwarp_ll2_buff *syn;
u16 syn_ip_payload_length;
dma_addr_t syn_phy_addr;
};
struct ecore_iwarp_listener {
osal_list_entry_t list_entry;
/* The event_cb function is called for connection requests.
* The cb_context is passed to the event_cb function.
*/
iwarp_event_handler event_cb;
void *cb_context;
u32 max_backlog;
u8 ip_version;
u32 ip_addr[4];
u16 port;
u16 vlan;
};
void ecore_iwarp_async_event(struct ecore_hwfn *p_hwfn,
u8 fw_event_code,
struct regpair *fw_handle,
u8 fw_return_code);
#endif /* CONFIG_ECORE_IWARP */
void ecore_roce_async_event(struct ecore_hwfn *p_hwfn,
u8 fw_event_code,
union rdma_eqe_data *rdma_data);
#endif /*__ECORE_RDMA_H__*/

View File

@ -0,0 +1,865 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_RDMA_API_H__
#define __ECORE_RDMA_API_H__
#define ETH_ALEN 6
enum ecore_roce_ll2_tx_dest
{
ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
ECORE_ROCE_LL2_TX_DEST_MAX
};
/* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
/* CNQ size Limitation
* The CNQ size should be set as twice the amount of CQs, since for each CQ one
* element may be inserted into the CNQ and another element is used per CQ to
* accommodate for a possible race in the arm mechanism.
* The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
* that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
* Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
* of performance.
*/
#define ECORE_RDMA_MAX_CNQ_SIZE (0xFFFF) /* 2^16 - 1 */
/* rdma interface */
enum ecore_rdma_tid_type
{
ECORE_RDMA_TID_REGISTERED_MR,
ECORE_RDMA_TID_FMR,
ECORE_RDMA_TID_MW_TYPE1,
ECORE_RDMA_TID_MW_TYPE2A
};
enum ecore_roce_qp_state {
ECORE_ROCE_QP_STATE_RESET, /* Reset */
ECORE_ROCE_QP_STATE_INIT, /* Initialized */
ECORE_ROCE_QP_STATE_RTR, /* Ready to Receive */
ECORE_ROCE_QP_STATE_RTS, /* Ready to Send */
ECORE_ROCE_QP_STATE_SQD, /* Send Queue Draining */
ECORE_ROCE_QP_STATE_ERR, /* Error */
ECORE_ROCE_QP_STATE_SQE /* Send Queue Error */
};
typedef
void (*affiliated_event_t)(void *context,
u8 fw_event_code,
void *fw_handle);
typedef
void (*unaffiliated_event_t)(void *context,
u8 event_code);
struct ecore_rdma_events {
void *context;
affiliated_event_t affiliated_event;
unaffiliated_event_t unaffiliated_event;
};
struct ecore_rdma_device {
/* Vendor specific information */
u32 vendor_id;
u32 vendor_part_id;
u32 hw_ver;
u64 fw_ver;
u64 node_guid; /* node GUID */
u64 sys_image_guid; /* System image GUID */
u8 max_cnq;
u8 max_sge; /* The maximum number of scatter/gather entries
* per Work Request supported
*/
u8 max_srq_sge; /* The maximum number of scatter/gather entries
* per Work Request supported for SRQ
*/
u16 max_inline;
u32 max_wqe; /* The maximum number of outstanding work
* requests on any Work Queue supported
*/
u32 max_srq_wqe; /* The maximum number of outstanding work
* requests on any Work Queue supported for SRQ
*/
u8 max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
* & atomic operation that can be
* outstanding per QP
*/
u8 max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
* initiation of RDMA Read
* & atomic operations
*/
u64 max_dev_resp_rd_atomic_resc;
u32 max_cq;
u32 max_qp;
u32 max_srq; /* Maximum number of SRQs */
u32 max_mr; /* Maximum number of MRs supported by this device */
u64 max_mr_size; /* Size (in bytes) of the largest contiguous memory
* block that can be registered by this device
*/
u32 max_cqe;
u32 max_mw; /* The maximum number of memory windows supported */
u32 max_fmr;
u32 max_mr_mw_fmr_pbl;
u64 max_mr_mw_fmr_size;
u32 max_pd; /* The maximum number of protection domains supported */
u32 max_ah;
u8 max_pkey;
u16 max_srq_wr; /* Maximum number of WRs per SRQ */
u8 max_stats_queues; /* Maximum number of statistics queues */
u32 dev_caps;
/* Abilty to support RNR-NAK generation */
#define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
#define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
/* Abilty to support shutdown port */
#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
/* Abilty to support port active event */
#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
/* Abilty to support port change event */
#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
/* Abilty to support system image GUID */
#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
#define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
/* Abilty to support bad P_Key counter support */
#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
/* Abilty to support atomic operations */
#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
#define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
#define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
/* Abilty to support modifying the maximum number of
* outstanding work requests per QP
*/
#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
#define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
/* Abilty to support automatic path migration */
#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
#define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
/* Abilty to support the base memory management extensions */
#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
/* Abilty to support multipile page sizes per memory region */
#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
#define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
/* Abilty to support block list physical buffer list */
#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
#define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
/* Abilty to support zero based virtual addresses */
#define ECORE_RDMA_DEV_CAP_ZBVA_MASK 0x1
#define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT 14
/* Abilty to support local invalidate fencing */
#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
#define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
/* Abilty to support Loopback on QP */
#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
#define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
u64 page_size_caps;
u8 dev_ack_delay;
u32 reserved_lkey; /* Value of reserved L_key */
u32 bad_pkey_counter; /* Bad P_key counter support indicator */
struct ecore_rdma_events events;
};
enum ecore_port_state {
ECORE_RDMA_PORT_UP,
ECORE_RDMA_PORT_DOWN,
};
enum ecore_roce_capability {
ECORE_ROCE_V1 = 1 << 0,
ECORE_ROCE_V2 = 1 << 1,
};
struct ecore_rdma_port {
enum ecore_port_state port_state;
int link_speed;
u64 max_msg_size;
u8 source_gid_table_len;
void *source_gid_table_ptr;
u8 pkey_table_len;
void *pkey_table_ptr;
u32 pkey_bad_counter;
enum ecore_roce_capability capability;
};
struct ecore_rdma_cnq_params
{
u8 num_pbl_pages; /* Number of pages in the PBL allocated
* for this queue
*/
u64 pbl_ptr; /* Address to the first entry of the queue PBL */
};
/* The CQ Mode affects the CQ doorbell transaction size.
* 64/32 bit machines should configure to 32/16 bits respectively.
*/
enum ecore_rdma_cq_mode {
ECORE_RDMA_CQ_MODE_16_BITS,
ECORE_RDMA_CQ_MODE_32_BITS,
};
struct ecore_roce_dcqcn_params {
u8 notification_point;
u8 reaction_point;
/* fields for notification point */
u32 cnp_send_timeout;
/* fields for reaction point */
u32 rl_bc_rate; /* Byte Counter Limit. */
u16 rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
u16 rl_r_ai; /* Active increase rate */
u16 rl_r_hai; /* Hyper active increase rate */
u16 dcqcn_g; /* Alpha update gain in 1/64K resolution */
u32 dcqcn_k_us; /* Alpha update interval */
u32 dcqcn_timeout_us;
};
#ifdef CONFIG_ECORE_IWARP
#define ECORE_MPA_RTR_TYPE_NONE 0 /* No RTR type */
#define ECORE_MPA_RTR_TYPE_ZERO_SEND (1 << 0)
#define ECORE_MPA_RTR_TYPE_ZERO_WRITE (1 << 1)
#define ECORE_MPA_RTR_TYPE_ZERO_READ (1 << 2)
enum ecore_mpa_rev {
ECORE_MPA_REV1,
ECORE_MPA_REV2,
};
struct ecore_iwarp_params {
u32 rcv_wnd_size;
u16 ooo_num_rx_bufs;
#define ECORE_IWARP_TS_EN (1 << 0)
#define ECORE_IWARP_DA_EN (1 << 1)
u8 flags;
u8 crc_needed;
enum ecore_mpa_rev mpa_rev;
u8 mpa_rtr;
u8 mpa_peer2peer;
};
#endif
struct ecore_roce_params {
enum ecore_rdma_cq_mode cq_mode;
struct ecore_roce_dcqcn_params dcqcn_params;
u8 ll2_handle; /* required for UD QPs */
};
struct ecore_rdma_start_in_params {
struct ecore_rdma_events *events;
struct ecore_rdma_cnq_params cnq_pbl_list[128];
u8 desired_cnq;
u16 max_mtu;
u8 mac_addr[ETH_ALEN];
#ifdef CONFIG_ECORE_IWARP
struct ecore_iwarp_params iwarp;
#endif
struct ecore_roce_params roce;
};
struct ecore_rdma_add_user_out_params {
/* output variables (given to miniport) */
u16 dpi;
u64 dpi_addr;
u64 dpi_phys_addr;
u32 dpi_size;
u16 wid_count;
};
/*Returns the CQ CID or zero in case of failure */
struct ecore_rdma_create_cq_in_params {
/* input variables (given by miniport) */
u32 cq_handle_lo; /* CQ handle to be written in CNQ */
u32 cq_handle_hi;
u32 cq_size;
u16 dpi;
bool pbl_two_level;
u64 pbl_ptr;
u16 pbl_num_pages;
u8 pbl_page_size_log; /* for the pages that contain the
* pointers to the CQ pages
*/
u8 cnq_id;
u16 int_timeout;
};
struct ecore_rdma_resize_cq_in_params {
/* input variables (given by miniport) */
u16 icid;
u32 cq_size;
bool pbl_two_level;
u64 pbl_ptr;
u16 pbl_num_pages;
u8 pbl_page_size_log; /* for the pages that contain the
* pointers to the CQ pages
*/
};
enum roce_mode
{
ROCE_V1,
ROCE_V2_IPV4,
ROCE_V2_IPV6,
MAX_ROCE_MODE
};
struct ecore_rdma_create_qp_in_params {
/* input variables (given by miniport) */
u32 qp_handle_lo; /* QP handle to be written in CQE */
u32 qp_handle_hi;
u32 qp_handle_async_lo; /* QP handle to be written in async event */
u32 qp_handle_async_hi;
bool use_srq;
bool signal_all;
bool fmr_and_reserved_lkey;
u16 pd;
u16 dpi;
u16 sq_cq_id;
u16 sq_num_pages;
u64 sq_pbl_ptr; /* Not relevant for iWARP */
u8 max_sq_sges;
u16 rq_cq_id;
u16 rq_num_pages;
u64 rq_pbl_ptr; /* Not relevant for iWARP */
u16 srq_id;
u8 stats_queue;
};
struct ecore_rdma_create_qp_out_params {
/* output variables (given to miniport) */
u32 qp_id;
u16 icid;
void *rq_pbl_virt;
dma_addr_t rq_pbl_phys;
void *sq_pbl_virt;
dma_addr_t sq_pbl_phys;
};
struct ecore_rdma_destroy_cq_in_params {
/* input variables (given by miniport) */
u16 icid;
};
struct ecore_rdma_destroy_cq_out_params {
/* output variables, provided to the upper layer */
/* Sequence number of completion notification sent for the CQ on
* the associated CNQ
*/
u16 num_cq_notif;
};
/* ECORE GID can be used as IPv4/6 address in RoCE v2 */
union ecore_gid {
u8 bytes[16];
u16 words[8];
u32 dwords[4];
u64 qwords[2];
u32 ipv4_addr;
};
struct ecore_rdma_modify_qp_in_params {
/* input variables (given by miniport) */
u32 modify_flags;
#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
#define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
#define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
#define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
#define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
enum ecore_roce_qp_state new_state;
u16 pkey;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
u32 dest_qp;
u16 mtu;
u8 traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
u8 hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
u32 flow_label; /* ignored in IPv4 */
union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
u16 udp_src_port; /* RoCEv2 only */
u16 vlan_id;
u32 rq_psn;
u32 sq_psn;
u8 max_rd_atomic_resp;
u8 max_rd_atomic_req;
u32 ack_timeout;
u8 retry_cnt;
u8 rnr_retry_cnt;
u8 min_rnr_nak_timer;
bool sqd_async;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
bool use_local_mac;
enum roce_mode roce_mode;
};
struct ecore_rdma_query_qp_out_params {
/* output variables (given to miniport) */
enum ecore_roce_qp_state state;
u32 rq_psn; /* responder */
u32 sq_psn; /* requester */
bool draining; /* send queue is draining */
u16 mtu;
u32 dest_qp;
bool incoming_rdma_read_en;
bool incoming_rdma_write_en;
bool incoming_atomic_en;
bool e2e_flow_control_en;
union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
u32 flow_label; /* ignored in IPv4 */
u8 hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
u8 traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
u32 timeout;
u8 rnr_retry;
u8 retry_cnt;
u8 min_rnr_nak_timer;
u16 pkey_index;
u8 max_rd_atomic;
u8 max_dest_rd_atomic;
bool sqd_async;
};
struct ecore_rdma_register_tid_in_params {
/* input variables (given by miniport) */
u32 itid; /* index only, 18 bit long, lkey = itid << 8 | key */
enum ecore_rdma_tid_type tid_type;
u8 key;
u16 pd;
bool local_read;
bool local_write;
bool remote_read;
bool remote_write;
bool remote_atomic;
bool mw_bind;
u64 pbl_ptr;
bool pbl_two_level;
u8 pbl_page_size_log; /* for the pages that contain the pointers
* to the MR pages
*/
u8 page_size_log; /* for the MR pages */
u32 fbo;
u64 length; /* only lower 40 bits are valid */
u64 vaddr;
bool zbva;
bool phy_mr;
bool dma_mr;
/* DIF related fields */
bool dif_enabled;
u64 dif_error_addr;
u64 dif_runt_addr;
};
struct ecore_rdma_create_srq_in_params {
u64 pbl_base_addr;
u64 prod_pair_addr;
u16 num_pages;
u16 pd_id;
u16 page_size;
};
struct ecore_rdma_create_srq_out_params {
u16 srq_id;
};
struct ecore_rdma_destroy_srq_in_params {
u16 srq_id;
};
struct ecore_rdma_modify_srq_in_params {
u32 wqe_limit;
u16 srq_id;
};
struct ecore_rdma_resize_cq_out_params {
/* output variables, provided to the upper layer */
u32 prod; /* CQ producer value on old PBL */
u32 cons; /* CQ consumer value on old PBL */
};
struct ecore_rdma_resize_cnq_in_params {
/* input variables (given by miniport) */
u32 cnq_id;
u32 pbl_page_size_log; /* for the pages that contain the
* pointers to the cnq pages
*/
u64 pbl_ptr;
};
struct ecore_rdma_stats_out_params {
u64 sent_bytes;
u64 sent_pkts;
u64 rcv_bytes;
u64 rcv_pkts;
/* RoCE only */
u64 icrc_errors; /* wraps at 32 bits */
u64 retransmit_events; /* wraps at 32 bits */
u64 silent_drops; /* wraps at 16 bits */
u64 rnr_nacks_sent; /* wraps at 16 bits */
/* iWARP only */
u64 iwarp_tx_fast_rxmit_cnt;
u64 iwarp_tx_slow_start_cnt;
u64 unalign_rx_comp;
};
struct ecore_rdma_counters_out_params {
u64 pd_count;
u64 max_pd;
u64 dpi_count;
u64 max_dpi;
u64 cq_count;
u64 max_cq;
u64 qp_count;
u64 max_qp;
u64 tid_count;
u64 max_tid;
};
enum _ecore_status_t
ecore_rdma_add_user(void *rdma_cxt,
struct ecore_rdma_add_user_out_params *out_params);
enum _ecore_status_t
ecore_rdma_alloc_pd(void *rdma_cxt,
u16 *pd);
enum _ecore_status_t
ecore_rdma_alloc_tid(void *rdma_cxt,
u32 *tid);
enum _ecore_status_t
ecore_rdma_create_cq(void *rdma_cxt,
struct ecore_rdma_create_cq_in_params *params,
u16 *icid);
/* Returns a pointer to the responders' CID, which is also a pointer to the
* ecore_qp_params struct. Returns NULL in case of failure.
*/
struct ecore_rdma_qp*
ecore_rdma_create_qp(void *rdma_cxt,
struct ecore_rdma_create_qp_in_params *in_params,
struct ecore_rdma_create_qp_out_params *out_params);
enum _ecore_status_t
ecore_roce_create_ud_qp(void *rdma_cxt,
struct ecore_rdma_create_qp_out_params *out_params);
enum _ecore_status_t
ecore_rdma_deregister_tid(void *rdma_cxt,
u32 tid);
enum _ecore_status_t
ecore_rdma_destroy_cq(void *rdma_cxt,
struct ecore_rdma_destroy_cq_in_params *in_params,
struct ecore_rdma_destroy_cq_out_params *out_params);
enum _ecore_status_t
ecore_rdma_destroy_qp(void *rdma_cxt,
struct ecore_rdma_qp *qp);
enum _ecore_status_t
ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
void
ecore_rdma_free_pd(void *rdma_cxt,
u16 pd);
void
ecore_rdma_free_tid(void *rdma_cxt,
u32 tid);
enum _ecore_status_t
ecore_rdma_modify_qp(void *rdma_cxt,
struct ecore_rdma_qp *qp,
struct ecore_rdma_modify_qp_in_params *params);
struct ecore_rdma_device*
ecore_rdma_query_device(void *rdma_cxt);
struct ecore_rdma_port*
ecore_rdma_query_port(void *rdma_cxt);
enum _ecore_status_t
ecore_rdma_query_qp(void *rdma_cxt,
struct ecore_rdma_qp *qp,
struct ecore_rdma_query_qp_out_params *out_params);
enum _ecore_status_t
ecore_rdma_register_tid(void *rdma_cxt,
struct ecore_rdma_register_tid_in_params *params);
void ecore_rdma_remove_user(void *rdma_cxt,
u16 dpi);
enum _ecore_status_t
ecore_rdma_resize_cnq(void *rdma_cxt,
struct ecore_rdma_resize_cnq_in_params *in_params);
/*Returns the CQ CID or zero in case of failure */
enum _ecore_status_t
ecore_rdma_resize_cq(void *rdma_cxt,
struct ecore_rdma_resize_cq_in_params *in_params,
struct ecore_rdma_resize_cq_out_params *out_params);
/* Before calling rdma_start upper layer (VBD/qed) should fill the
* page-size and mtu in hwfn context
*/
enum _ecore_status_t
ecore_rdma_start(void *p_hwfn,
struct ecore_rdma_start_in_params *params);
enum _ecore_status_t
ecore_rdma_stop(void *rdma_cxt);
enum _ecore_status_t
ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
struct ecore_rdma_stats_out_params *out_parms);
enum _ecore_status_t
ecore_rdma_query_counters(void *rdma_cxt,
struct ecore_rdma_counters_out_params *out_parms);
u32 ecore_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id);
u32 ecore_rdma_query_cau_timer_res(void *p_hwfn);
void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
#ifdef CONFIG_ECORE_IWARP
/* iWARP API */
enum ecore_iwarp_event_type {
ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
* ( ack on mpa response )
*/
ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
ECORE_IWARP_EVENT_DISCONNECT,
ECORE_IWARP_EVENT_CLOSE,
ECORE_IWARP_EVENT_IRQ_FULL,
ECORE_IWARP_EVENT_RQ_EMPTY,
ECORE_IWARP_EVENT_LLP_TIMEOUT,
ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
ECORE_IWARP_EVENT_CQ_OVERFLOW,
ECORE_IWARP_EVENT_QP_CATASTROPHIC,
ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
ECORE_IWARP_EVENT_TERMINATE_RECEIVED
};
enum ecore_tcp_ip_version
{
ECORE_TCP_IPV4,
ECORE_TCP_IPV6,
};
struct ecore_iwarp_cm_info {
enum ecore_tcp_ip_version ip_version;
u32 remote_ip[4];
u32 local_ip[4];
u16 remote_port;
u16 local_port;
u16 vlan;
const void *private_data;
u16 private_data_len;
u8 ord;
u8 ird;
};
struct ecore_iwarp_cm_event_params {
enum ecore_iwarp_event_type event;
const struct ecore_iwarp_cm_info *cm_info;
void *ep_context; /* To be passed to accept call */
int status;
};
typedef int (*iwarp_event_handler)(void *context,
struct ecore_iwarp_cm_event_params *event);
/* Active Side Connect Flow:
* upper layer driver calls ecore_iwarp_connect
* Function is blocking: i.e. returns after tcp connection is established
* After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
* will be passed to upperlayer driver using the event_cb passed in
* ecore_iwarp_connect_in. Information of the established connection will be
* initialized in event data.
*/
struct ecore_iwarp_connect_in {
iwarp_event_handler event_cb;
void *cb_context;
struct ecore_rdma_qp *qp;
struct ecore_iwarp_cm_info cm_info;
u16 mss;
u8 remote_mac_addr[6];
u8 local_mac_addr[6];
};
struct ecore_iwarp_connect_out {
void *ep_context;
};
/* Passive side connect flow:
* upper layer driver calls ecore_iwarp_create_listen
* once Syn packet that matches a ip/port that is listened on arrives, ecore
* will offload the tcp connection. After MPA Request is received on the
* offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
* to upper layer driver using the event_cb passed below. The event data
* will be placed in event parameter. After upper layer driver processes the
* event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
* MPA negotiation. Once negotiation is complete the event
* ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
* originally in ecore_iwarp_listen_in structure.
*/
struct ecore_iwarp_listen_in {
iwarp_event_handler event_cb; /* Callback func for delivering events */
void *cb_context; /* passed to event_cb */
u32 max_backlog; /* Max num of pending incoming connection requests */
enum ecore_tcp_ip_version ip_version;
u32 ip_addr[4];
u16 port;
u16 vlan;
};
struct ecore_iwarp_listen_out {
void *handle; /* to be sent to destroy */
};
struct ecore_iwarp_accept_in {
void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
void *cb_context; /* context to be passed to event_cb */
struct ecore_rdma_qp *qp;
const void *private_data;
u16 private_data_len;
u8 ord;
u8 ird;
};
struct ecore_iwarp_reject_in {
void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
void *cb_context; /* context to be passed to event_cb */
const void *private_data;
u16 private_data_len;
};
struct ecore_iwarp_send_rtr_in {
void *ep_context;
};
struct ecore_iwarp_tcp_abort_in {
void *ep_context;
};
enum _ecore_status_t
ecore_iwarp_connect(void *rdma_cxt,
struct ecore_iwarp_connect_in *iparams,
struct ecore_iwarp_connect_out *oparams);
enum _ecore_status_t
ecore_iwarp_create_listen(void *rdma_cxt,
struct ecore_iwarp_listen_in *iparams,
struct ecore_iwarp_listen_out *oparams);
enum _ecore_status_t
ecore_iwarp_accept(void *rdma_cxt,
struct ecore_iwarp_accept_in *iparams);
enum _ecore_status_t
ecore_iwarp_reject(void *rdma_cxt,
struct ecore_iwarp_reject_in *iparams);
enum _ecore_status_t
ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
enum _ecore_status_t
ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
enum _ecore_status_t
ecore_iwarp_tcp_abort(void *rdma_cxt, struct ecore_iwarp_tcp_abort_in *iparams);
#endif /* CONFIG_ECORE_IWARP */
#endif

View File

@ -0,0 +1,473 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __RT_DEFS_H__
#define __RT_DEFS_H__
/* Runtime array offsets */
#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
#define CAU_REG_PI_MEMORY_RT_SIZE 4416
#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
#define SRC_REG_FIRSTFREE_RT_SIZE 2
#define SRC_REG_LASTFREE_RT_OFFSET 6667
#define SRC_REG_LASTFREE_RT_SIZE 2
#define SRC_REG_COUNTFREE_RT_OFFSET 6669
#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6700
#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6701
#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6702
#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28702
#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 28703
#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 28704
#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29738
#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29739
#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29740
#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29741
#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29742
#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29743
#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29744
#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29745
#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29746
#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29747
#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29748
#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29749
#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29750
#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29751
#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29752
#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29753
#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29754
#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29755
#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29756
#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29757
#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29758
#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29759
#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29760
#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29761
#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29762
#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29763
#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29764
#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29765
#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29766
#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29767
#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29768
#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29769
#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29770
#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29771
#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29772
#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29773
#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29774
#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29775
#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29776
#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29777
#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29778
#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29779
#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29780
#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29781
#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29782
#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29783
#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29784
#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29785
#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29786
#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29787
#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29788
#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29789
#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29790
#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29791
#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29792
#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29793
#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29794
#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29795
#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29796
#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29797
#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29798
#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29799
#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29800
#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29801
#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29802
#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29803
#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29804
#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29805
#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29933
#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29934
#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29935
#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29936
#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29937
#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29938
#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29939
#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29940
#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29941
#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29942
#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29943
#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29944
#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29945
#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29946
#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29947
#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29948
#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29949
#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29950
#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29951
#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29952
#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29953
#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29954
#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29955
#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29956
#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29957
#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29958
#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29959
#define QM_REG_PQTX2PF_0_RT_OFFSET 29960
#define QM_REG_PQTX2PF_1_RT_OFFSET 29961
#define QM_REG_PQTX2PF_2_RT_OFFSET 29962
#define QM_REG_PQTX2PF_3_RT_OFFSET 29963
#define QM_REG_PQTX2PF_4_RT_OFFSET 29964
#define QM_REG_PQTX2PF_5_RT_OFFSET 29965
#define QM_REG_PQTX2PF_6_RT_OFFSET 29966
#define QM_REG_PQTX2PF_7_RT_OFFSET 29967
#define QM_REG_PQTX2PF_8_RT_OFFSET 29968
#define QM_REG_PQTX2PF_9_RT_OFFSET 29969
#define QM_REG_PQTX2PF_10_RT_OFFSET 29970
#define QM_REG_PQTX2PF_11_RT_OFFSET 29971
#define QM_REG_PQTX2PF_12_RT_OFFSET 29972
#define QM_REG_PQTX2PF_13_RT_OFFSET 29973
#define QM_REG_PQTX2PF_14_RT_OFFSET 29974
#define QM_REG_PQTX2PF_15_RT_OFFSET 29975
#define QM_REG_PQTX2PF_16_RT_OFFSET 29976
#define QM_REG_PQTX2PF_17_RT_OFFSET 29977
#define QM_REG_PQTX2PF_18_RT_OFFSET 29978
#define QM_REG_PQTX2PF_19_RT_OFFSET 29979
#define QM_REG_PQTX2PF_20_RT_OFFSET 29980
#define QM_REG_PQTX2PF_21_RT_OFFSET 29981
#define QM_REG_PQTX2PF_22_RT_OFFSET 29982
#define QM_REG_PQTX2PF_23_RT_OFFSET 29983
#define QM_REG_PQTX2PF_24_RT_OFFSET 29984
#define QM_REG_PQTX2PF_25_RT_OFFSET 29985
#define QM_REG_PQTX2PF_26_RT_OFFSET 29986
#define QM_REG_PQTX2PF_27_RT_OFFSET 29987
#define QM_REG_PQTX2PF_28_RT_OFFSET 29988
#define QM_REG_PQTX2PF_29_RT_OFFSET 29989
#define QM_REG_PQTX2PF_30_RT_OFFSET 29990
#define QM_REG_PQTX2PF_31_RT_OFFSET 29991
#define QM_REG_PQTX2PF_32_RT_OFFSET 29992
#define QM_REG_PQTX2PF_33_RT_OFFSET 29993
#define QM_REG_PQTX2PF_34_RT_OFFSET 29994
#define QM_REG_PQTX2PF_35_RT_OFFSET 29995
#define QM_REG_PQTX2PF_36_RT_OFFSET 29996
#define QM_REG_PQTX2PF_37_RT_OFFSET 29997
#define QM_REG_PQTX2PF_38_RT_OFFSET 29998
#define QM_REG_PQTX2PF_39_RT_OFFSET 29999
#define QM_REG_PQTX2PF_40_RT_OFFSET 30000
#define QM_REG_PQTX2PF_41_RT_OFFSET 30001
#define QM_REG_PQTX2PF_42_RT_OFFSET 30002
#define QM_REG_PQTX2PF_43_RT_OFFSET 30003
#define QM_REG_PQTX2PF_44_RT_OFFSET 30004
#define QM_REG_PQTX2PF_45_RT_OFFSET 30005
#define QM_REG_PQTX2PF_46_RT_OFFSET 30006
#define QM_REG_PQTX2PF_47_RT_OFFSET 30007
#define QM_REG_PQTX2PF_48_RT_OFFSET 30008
#define QM_REG_PQTX2PF_49_RT_OFFSET 30009
#define QM_REG_PQTX2PF_50_RT_OFFSET 30010
#define QM_REG_PQTX2PF_51_RT_OFFSET 30011
#define QM_REG_PQTX2PF_52_RT_OFFSET 30012
#define QM_REG_PQTX2PF_53_RT_OFFSET 30013
#define QM_REG_PQTX2PF_54_RT_OFFSET 30014
#define QM_REG_PQTX2PF_55_RT_OFFSET 30015
#define QM_REG_PQTX2PF_56_RT_OFFSET 30016
#define QM_REG_PQTX2PF_57_RT_OFFSET 30017
#define QM_REG_PQTX2PF_58_RT_OFFSET 30018
#define QM_REG_PQTX2PF_59_RT_OFFSET 30019
#define QM_REG_PQTX2PF_60_RT_OFFSET 30020
#define QM_REG_PQTX2PF_61_RT_OFFSET 30021
#define QM_REG_PQTX2PF_62_RT_OFFSET 30022
#define QM_REG_PQTX2PF_63_RT_OFFSET 30023
#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30024
#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30025
#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30026
#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30027
#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30028
#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30029
#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30030
#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30031
#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30032
#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30033
#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30034
#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30035
#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30036
#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30037
#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30038
#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30039
#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30040
#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30041
#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30042
#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30043
#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30044
#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30045
#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30046
#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30047
#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30048
#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30049
#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30050
#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30051
#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30052
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30308
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
#define QM_REG_RLGLBLCRD_RT_OFFSET 30564
#define QM_REG_RLGLBLCRD_RT_SIZE 256
#define QM_REG_RLGLBLENABLE_RT_OFFSET 30820
#define QM_REG_RLPFPERIOD_RT_OFFSET 30821
#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30822
#define QM_REG_RLPFINCVAL_RT_OFFSET 30823
#define QM_REG_RLPFINCVAL_RT_SIZE 16
#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30839
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
#define QM_REG_RLPFCRD_RT_OFFSET 30855
#define QM_REG_RLPFCRD_RT_SIZE 16
#define QM_REG_RLPFENABLE_RT_OFFSET 30871
#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30872
#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30873
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30889
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
#define QM_REG_WFQPFCRD_RT_OFFSET 30905
#define QM_REG_WFQPFCRD_RT_SIZE 256
#define QM_REG_WFQPFENABLE_RT_OFFSET 31161
#define QM_REG_WFQVPENABLE_RT_OFFSET 31162
#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31163
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
#define QM_REG_TXPQMAP_RT_OFFSET 31675
#define QM_REG_TXPQMAP_RT_SIZE 512
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32187
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
#define QM_REG_WFQVPCRD_RT_OFFSET 32699
#define QM_REG_WFQVPCRD_RT_SIZE 512
#define QM_REG_WFQVPMAP_RT_OFFSET 33211
#define QM_REG_WFQVPMAP_RT_SIZE 512
#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33723
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
#define QM_REG_VOQCRDLINE_RT_OFFSET 34043
#define QM_REG_VOQCRDLINE_RT_SIZE 36
#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34079
#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34115
#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34116
#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34117
#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34118
#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34119
#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34120
#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34121
#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34122
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34126
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34130
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34134
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34135
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34167
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34183
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34199
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34215
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34231
#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34232
#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34233
#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34234
#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34235
#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34236
#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34237
#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34238
#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34239
#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34240
#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34241
#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34242
#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34243
#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34244
#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34245
#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34246
#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34247
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34248
#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34249
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34250
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34251
#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34252
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34253
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34254
#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34255
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34256
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34257
#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34258
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34259
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34260
#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34261
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34262
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34263
#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34264
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34265
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34266
#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34267
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34268
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34269
#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34270
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34271
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34272
#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34273
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34274
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34275
#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34276
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34277
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34278
#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34279
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34280
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34281
#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34282
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34283
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34284
#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34285
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34286
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34287
#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34288
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34289
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34290
#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34291
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34292
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34293
#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34294
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34295
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34296
#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34297
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34298
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34299
#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34300
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34301
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34302
#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34303
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34304
#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34305
#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34306
#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34307
#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34308
#define RUNTIME_ARRAY_SIZE 34309
#endif /* __RT_DEFS_H__ */

View File

@ -0,0 +1,86 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_SP_API_H__
#define __ECORE_SP_API_H__
#include "ecore_status.h"
enum spq_mode {
ECORE_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
ECORE_SPQ_MODE_CB, /* Client supplies a callback */
ECORE_SPQ_MODE_EBLOCK, /* ECORE should block until completion */
};
struct ecore_hwfn;
union event_ring_data;
struct eth_slow_path_rx_cqe;
struct ecore_spq_comp_cb {
void (*function)(struct ecore_hwfn *,
void *,
union event_ring_data *,
u8 fw_return_code);
void *cookie;
};
/**
* @brief ecore_eth_cqe_completion - handles the completion of a
* ramrod on the cqe ring
*
* @param p_hwfn
* @param cqe
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe);
/**
* @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
* update Ramrod
*
* This ramrod is sent to update a tunneling configuration
* for a physical function (PF).
*
* @param p_hwfn
* @param p_tunn - pf update tunneling parameters
* @param comp_mode - completion mode
* @param p_comp_data - callback function
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data);
#endif

View File

@ -0,0 +1,589 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File : ecore_sp_commands.c
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "bcm_osal.h"
#include "ecore.h"
#include "ecore_status.h"
#include "ecore_chain.h"
#include "ecore_spq.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_cxt.h"
#include "ecore_sp_commands.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
#include "reg_addr.h"
#include "ecore_int.h"
#include "ecore_hw.h"
#include "ecore_dcbx.h"
#include "ecore_sriov.h"
#include "ecore_vf.h"
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent,
u8 cmd,
u8 protocol,
struct ecore_sp_init_data *p_data)
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct ecore_spq_entry *p_ent = OSAL_NULL;
enum _ecore_status_t rc;
if (!pp_ent)
return ECORE_INVAL;
/* Get an SPQ entry */
rc = ecore_spq_get_entry(p_hwfn, pp_ent);
if (rc != ECORE_SUCCESS)
return rc;
/* Fill the SPQ entry */
p_ent = *pp_ent;
p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
p_ent->elem.hdr.cmd_id = cmd;
p_ent->elem.hdr.protocol_id = protocol;
p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
p_ent->comp_mode = p_data->comp_mode;
p_ent->comp_done.done = 0;
switch (p_ent->comp_mode) {
case ECORE_SPQ_MODE_EBLOCK:
p_ent->comp_cb.cookie = &p_ent->comp_done;
break;
case ECORE_SPQ_MODE_BLOCK:
if (!p_data->p_comp_data)
return ECORE_INVAL;
p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
break;
case ECORE_SPQ_MODE_CB:
if (!p_data->p_comp_data)
p_ent->comp_cb.function = OSAL_NULL;
else
p_ent->comp_cb = *p_data->p_comp_data;
break;
default:
DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
p_ent->comp_mode);
return ECORE_INVAL;
}
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
opaque_cid, cmd, protocol,
(unsigned long)&p_ent->ramrod,
D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
"MODE_CB"));
OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
return ECORE_SUCCESS;
}
static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
{
switch (type) {
case ECORE_TUNN_CLSS_MAC_VLAN:
return TUNNEL_CLSS_MAC_VLAN;
case ECORE_TUNN_CLSS_MAC_VNI:
return TUNNEL_CLSS_MAC_VNI;
case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
return TUNNEL_CLSS_INNER_MAC_VLAN;
case ECORE_TUNN_CLSS_INNER_MAC_VNI:
return TUNNEL_CLSS_INNER_MAC_VNI;
case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
default:
return TUNNEL_CLSS_MAC_VLAN;
}
}
static void
ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
struct ecore_tunnel_info *p_src,
bool b_pf_start)
{
if (p_src->vxlan.b_update_mode || b_pf_start)
p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
if (p_src->l2_gre.b_update_mode || b_pf_start)
p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
if (p_src->ip_gre.b_update_mode || b_pf_start)
p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
if (p_src->l2_geneve.b_update_mode || b_pf_start)
p_tun->l2_geneve.b_mode_enabled =
p_src->l2_geneve.b_mode_enabled;
if (p_src->ip_geneve.b_update_mode || b_pf_start)
p_tun->ip_geneve.b_mode_enabled =
p_src->ip_geneve.b_mode_enabled;
}
static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
struct ecore_tunnel_info *p_src)
{
enum tunnel_clss type;
p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
}
static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
struct ecore_tunnel_info *p_src)
{
p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
if (p_src->geneve_port.b_update_port)
p_tun->geneve_port.port = p_src->geneve_port.port;
if (p_src->vxlan_port.b_update_port)
p_tun->vxlan_port.port = p_src->vxlan_port.port;
}
static void
__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct ecore_tunn_update_type *tun_type)
{
*p_tunn_cls = tun_type->tun_cls;
}
static void
ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
struct ecore_tunn_update_type *tun_type,
u8 *p_update_port, __le16 *p_port,
struct ecore_tunn_update_udp_port *p_udp_port)
{
__ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
if (p_udp_port->b_update_port) {
*p_update_port = 1;
*p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
}
}
static void
ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_src,
struct pf_update_tunnel_config *p_tunn_cfg)
{
struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
ecore_set_tunn_cls_info(p_tun, p_src);
ecore_set_tunn_ports(p_tun, p_src);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
&p_tun->ip_geneve);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
&p_tun->l2_gre);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
&p_tun->ip_gre);
p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
}
static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_tunnel_info *p_tun)
{
ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
p_tun->ip_gre.b_mode_enabled);
ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
p_tun->ip_geneve.b_mode_enabled);
}
static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn)
{
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
DP_NOTICE(p_hwfn, true,
"A0 chip: tunnel hw config is not supported\n");
return;
}
if (p_tunn->vxlan_port.b_update_port)
ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port)
ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
p_tunn->geneve_port.port);
ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
}
static void
ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_src,
struct pf_start_tunnel_config *p_tunn_cfg)
{
struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
DP_NOTICE(p_hwfn, true,
"A0 chip: tunnel pf start config is not supported\n");
return;
}
if (!p_src)
return;
ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
ecore_set_tunn_cls_info(p_tun, p_src);
ecore_set_tunn_ports(p_tun, p_src);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
&p_tun->vxlan,
&p_tunn_cfg->set_vxlan_udp_port_flg,
&p_tunn_cfg->vxlan_udp_port,
&p_tun->vxlan_port);
ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
&p_tun->l2_geneve,
&p_tunn_cfg->set_geneve_udp_port_flg,
&p_tunn_cfg->geneve_udp_port,
&p_tun->geneve_port);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
&p_tun->ip_geneve);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
&p_tun->l2_gre);
__ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
&p_tun->ip_gre);
}
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
u8 sb_index = p_hwfn->p_eq->eq_sb_index;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 page_cnt;
/* update initial eq producer */
ecore_eq_prod_update(p_hwfn,
ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
/* Initialize the SPQ entry for the ramrod */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_START,
PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
/* Fill the ramrod data */
p_ramrod = &p_ent->ramrod.pf_start;
p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
p_ramrod->event_ring_sb_index = sb_index;
p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
/* For easier debugging */
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
switch (mode) {
case ECORE_MF_DEFAULT:
case ECORE_MF_NPAR:
p_ramrod->mf_mode = MF_NPAR;
break;
case ECORE_MF_OVLAN:
p_ramrod->mf_mode = MF_OVLAN;
break;
default:
DP_NOTICE(p_hwfn, true, "Unsupported MF mode, init as DEFAULT\n");
p_ramrod->mf_mode = MF_NPAR;
}
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
switch (p_hwfn->hw_info.personality) {
case ECORE_PCI_ETH:
p_ramrod->personality = PERSONALITY_ETH;
break;
case ECORE_PCI_FCOE:
p_ramrod->personality = PERSONALITY_FCOE;
break;
case ECORE_PCI_ISCSI:
p_ramrod->personality = PERSONALITY_ISCSI;
break;
case ECORE_PCI_ETH_IWARP:
case ECORE_PCI_ETH_ROCE:
case ECORE_PCI_ETH_RDMA:
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
p_hwfn->hw_info.personality);
p_ramrod->personality = PERSONALITY_ETH;
}
if (p_hwfn->p_dev->p_iov_info) {
struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
p_ramrod->num_vfs = (u8)p_iov->total_vfs;
}
/* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
* version is available.
*/
p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index, p_ramrod->outer_tag);
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if (p_tunn)
ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
return rc;
}
enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_CB;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
&p_ent->ramrod.pf_update);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
struct ecore_rl_update_params *params)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
enum _ecore_status_t rc = ECORE_NOTIMPL;
struct rl_update_ramrod_data *rl_update;
struct ecore_sp_init_data init_data;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
rl_update = &p_ent->ramrod.rl_update;
rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
rl_update->rl_init_flg = params->rl_init_flg;
rl_update->rl_start_flg = params->rl_start_flg;
rl_update->rl_stop_flg = params->rl_stop_flg;
rl_update->rl_id_first = params->rl_id_first;
rl_update->rl_id_last = params->rl_id_last;
rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
params->dcqcn_timeuot_us);
rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
DP_NOTICE(p_hwfn, true,
"A0 chip: tunnel pf update config is not supported\n");
return rc;
}
if (!p_tunn)
return ECORE_INVAL;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
&p_ent->ramrod.pf_update.tunnel_config);
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
if (rc != ECORE_SUCCESS)
return rc;
ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
return rc;
}
enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
&init_data);
if (rc != ECORE_SUCCESS)
return rc;
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}

View File

@ -0,0 +1,168 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_SP_COMMANDS_H__
#define __ECORE_SP_COMMANDS_H__
#include "ecore.h"
#include "ecore_spq.h"
#include "ecore_sp_api.h"
#define ECORE_SP_EQ_COMPLETION 0x01
#define ECORE_SP_CQE_COMPLETION 0x02
struct ecore_sp_init_data {
/* The CID and FID aren't necessarily derived from hwfn,
* e.g., in IOV scenarios. CID might defer between SPQ and
* other elements.
*/
u32 cid;
u16 opaque_fid;
/* Information regarding operation upon sending & completion */
enum spq_mode comp_mode;
struct ecore_spq_comp_cb *p_comp_data;
};
/**
* @brief Acquire and initialize and SPQ entry for a given ramrod.
*
* @param p_hwfn
* @param pp_ent - will be filled with a pointer to an entry upon success
* @param cmd - dependent upon protocol
* @param protocol
* @param p_data - various configuration required for ramrod
*
* @return ECORE_SUCCESS upon success, otherwise failure.
*/
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent,
u8 cmd,
u8 protocol,
struct ecore_sp_init_data *p_data);
/**
* @brief ecore_sp_pf_start - PF Function Start Ramrod
*
* This ramrod is sent to initialize a physical function (PF). It will
* configure the function related parameters and write its completion to the
* event ring specified in the parameters.
*
* Ramrods complete on the common event ring for the PF. This ring is
* allocated by the driver on host memory and its parameters are written
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
* @param p_tunn - pf start tunneling configuration
* @param mode
* @param allow_npar_tx_switch - npar tx switching to be used
* for vports configured for tx-switching.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
/**
* @brief ecore_sp_pf_update - PF Function Update Ramrod
*
* This ramrod updates function-related parameters. Every parameter can be
* updated independently, according to configuration flags.
*
* @note Final phase API.
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sp_pf_stop - PF Function Stop Ramrod
*
* This ramrod is sent to close a Physical Function (PF). It is the last ramrod
* sent and the last completion written to the PFs Event Ring. This ramrod also
* deletes the context for the Slowhwfn connection on this PF.
*
* @note Not required for first packet.
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_sp_heartbeat_ramrod - Send empty Ramrod
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn);
struct ecore_rl_update_params {
u8 qcn_update_param_flg;
u8 dcqcn_update_param_flg;
u8 rl_init_flg;
u8 rl_start_flg;
u8 rl_stop_flg;
u8 rl_id_first;
u8 rl_id_last;
u8 rl_dc_qcn_flg; /* If set, RL will used for DCQCN */
u32 rl_bc_rate; /* Byte Counter Limit */
u16 rl_max_rate; /* Maximum rate in 1.6 Mbps resolution */
u16 rl_r_ai; /* Active increase rate */
u16 rl_r_hai; /* Hyper active increase rate */
u16 dcqcn_g; /* DCQCN Alpha update gain in 1/64K resolution */
u32 dcqcn_k_us; /* DCQCN Alpha update interval */
u32 dcqcn_timeuot_us;
u32 qcn_timeuot_us;
};
/**
* @brief ecore_sp_rl_update - Update rate limiters
*
* @param p_hwfn
* @param params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
struct ecore_rl_update_params *params);
#endif /*__ECORE_SP_COMMANDS_H__*/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,352 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_SPQ_H__
#define __ECORE_SPQ_H__
#include "ecore_hsi_common.h"
#include "ecore_status.h"
#include "ecore_hsi_eth.h"
#include "ecore_hsi_fcoe.h"
#include "tcp_common.h"
#include "ecore_hsi_iscsi.h"
#include "ecore_hsi_roce.h"
#include "ecore_hsi_iwarp.h"
#include "ecore_chain.h"
#include "ecore_sp_api.h"
union ramrod_data
{
struct pf_start_ramrod_data pf_start;
struct pf_update_ramrod_data pf_update;
struct rl_update_ramrod_data rl_update;
struct rx_queue_start_ramrod_data rx_queue_start;
struct rx_queue_update_ramrod_data rx_queue_update;
struct rx_queue_stop_ramrod_data rx_queue_stop;
struct tx_queue_start_ramrod_data tx_queue_start;
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
struct rx_update_gft_filter_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
struct core_tx_start_ramrod_data core_tx_queue_start;
struct core_tx_stop_ramrod_data core_tx_queue_stop;
struct vport_filter_update_ramrod_data vport_filter_update;
struct rdma_init_func_ramrod_data rdma_init_func;
struct rdma_close_func_ramrod_data rdma_close_func;
struct rdma_register_tid_ramrod_data rdma_register_tid;
struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
struct roce_create_qp_req_ramrod_data roce_create_qp_req;
struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
struct roce_query_qp_req_ramrod_data roce_query_qp_req;
struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
struct roce_init_func_ramrod_data roce_init_func;
struct rdma_create_cq_ramrod_data rdma_create_cq;
struct rdma_resize_cq_ramrod_data rdma_resize_cq;
struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
struct rdma_srq_create_ramrod_data rdma_create_srq;
struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
struct rdma_srq_modify_ramrod_data rdma_modify_srq;
struct iwarp_create_qp_ramrod_data iwarp_create_qp;
struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
struct iwarp_query_qp_ramrod_data iwarp_query_qp;
struct iwarp_init_func_ramrod_data iwarp_init_func;
struct fcoe_init_ramrod_params fcoe_init;
struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
struct fcoe_stat_ramrod_params fcoe_stat;
struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
struct iscsi_spe_func_dstry iscsi_destroy;
struct iscsi_spe_conn_offload iscsi_conn_offload;
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
struct iscsi_spe_conn_termination iscsi_conn_terminate;
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
};
#define EQ_MAX_CREDIT 0xffffffff
enum spq_priority {
ECORE_SPQ_PRIORITY_NORMAL,
ECORE_SPQ_PRIORITY_HIGH,
};
union ecore_spq_req_comp {
struct ecore_spq_comp_cb cb;
u64 *done_addr;
};
/* SPQ_MODE_EBLOCK */
struct ecore_spq_comp_done {
u64 done;
u8 fw_return_code;
};
struct ecore_spq_entry {
osal_list_entry_t list;
u8 flags;
/* HSI slow path element */
struct slow_path_element elem;
union ramrod_data ramrod;
enum spq_priority priority;
/* pending queue for this entry */
osal_list_t *queue;
enum spq_mode comp_mode;
struct ecore_spq_comp_cb comp_cb;
struct ecore_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
};
struct ecore_eq {
struct ecore_chain chain;
u8 eq_sb_index; /* index within the SB */
__le16 *p_fw_cons; /* ptr to index value */
};
struct ecore_consq {
struct ecore_chain chain;
};
struct ecore_spq {
osal_spinlock_t lock;
osal_list_t unlimited_pending;
osal_list_t pending;
osal_list_t completion_pending;
osal_list_t free_pool;
struct ecore_chain chain;
/* allocated dma-able memory for spq entries (+ramrod data) */
dma_addr_t p_phys;
struct ecore_spq_entry *p_virt;
/* Bitmap for handling out-of-order completions */
#define SPQ_RING_SIZE \
(CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
#define SPQ_COMP_BMAP_SIZE (SPQ_RING_SIZE / (sizeof(unsigned long) * 8 /* BITS_PER_LONG */))
unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE];
u8 comp_bitmap_idx;
#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \
do { \
OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap); \
} while (0)
#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \
do { \
OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap); \
} while (0)
#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \
(OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap))
/* Statistics */
u32 unlimited_pending_count;
u32 normal_count;
u32 high_count;
u32 comp_sent_count;
u32 comp_count;
u32 cid;
};
struct ecore_port;
struct ecore_hwfn;
/**
* @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that
* Pends it to the future list.
*
* @param p_hwfn
* @param p_req
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent,
u8 *fw_return_code);
/**
* @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ.
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_spq_setup - Reset the SPQ to its start state.
*
* @param p_hwfn
*/
void ecore_spq_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_spq_deallocate - Deallocates the given SPQ struct.
*
* @param p_hwfn
*/
void ecore_spq_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_spq_get_entry - Obtain an entrry from the spq
* free pool list.
*
*
*
* @param p_hwfn
* @param pp_ent
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_spq_get_entry(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent);
/**
* @brief ecore_spq_return_entry - Return an entry to spq free
* pool list
*
* @param p_hwfn
* @param p_ent
*/
void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry *p_ent);
/**
* @brief ecore_eq_allocate - Allocates & initializes an EQ struct
*
* @param p_hwfn
* @param num_elem number of elements in the eq
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
/**
* @brief ecore_eq_setup - Reset the EQ to its start state.
*
* @param p_hwfn
*/
void ecore_eq_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_eq_free - deallocates the given EQ struct.
*
* @param p_hwfn
*/
void ecore_eq_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_eq_prod_update - update the FW with default EQ producer
*
* @param p_hwfn
* @param prod
*/
void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn,
u16 prod);
/**
* @brief ecore_eq_completion - Completes currently pending EQ elements
*
* @param p_hwfn
* @param cookie
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
void *cookie);
/**
* @brief ecore_spq_completion - Completes a single event
*
* @param p_hwfn
* @param echo - echo value from cookie (used for determining completion)
* @param p_data - data from cookie (used in callback function if applicable)
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
__le16 echo,
u8 fw_return_code,
union event_ring_data *p_data);
/**
* @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
*
* @param p_hwfn
*
* @return u32 - SPQ CID
*/
u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_consq_alloc - Allocates & initializes an ConsQ struct
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_consq_setup - Reset the ConsQ to its start state.
*
* @param p_hwfn
*/
void ecore_consq_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_consq_free - deallocates the given ConsQ struct.
*
* @param p_hwfn
*/
void ecore_consq_free(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_SPQ_H__ */

View File

@ -0,0 +1,349 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_SRIOV_H__
#define __ECORE_SRIOV_H__
#include "ecore_status.h"
#include "ecore_vfpf_if.h"
#include "ecore_iov_api.h"
#include "ecore_hsi_common.h"
#include "ecore_l2.h"
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
(E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
/* Represents a full message. Both the request filled by VF
* and the response filled by the PF. The VF needs one copy
* of this message, it fills the request part and sends it to
* the PF. The PF will copy the response to the response part for
* the VF to later read it. The PF needs to hold a message like this
* per VF, the request that is copied to the PF is placed in the
* request size, and the response is filled by the PF before sending
* it to the VF.
*/
struct ecore_vf_mbx_msg {
union vfpf_tlvs req;
union pfvf_tlvs resp;
};
/* This mailbox is maintained per VF in its PF
* contains all information required for sending / receiving
* a message
*/
struct ecore_iov_vf_mbx {
union vfpf_tlvs *req_virt;
dma_addr_t req_phys;
union pfvf_tlvs *reply_virt;
dma_addr_t reply_phys;
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
/* Message from VF awaits handling */
bool b_pending_msg;
u8 *offset;
#ifdef CONFIG_ECORE_SW_CHANNEL
struct ecore_iov_sw_mbx sw_mbx;
#endif
/* VF GPA address */
u32 vf_addr_lo;
u32 vf_addr_hi;
struct vfpf_first_tlv first_tlv; /* saved VF request header */
u8 flags;
#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent
* more then one pending msg
*/
};
#define ECORE_IOV_LEGACY_QID_RX (0)
#define ECORE_IOV_LEGACY_QID_TX (1)
#define ECORE_IOV_QID_INVALID (0xFE)
struct ecore_vf_queue_cid {
bool b_is_tx;
struct ecore_queue_cid *p_cid;
};
/* Describes a qzone associated with the VF */
struct ecore_vf_queue {
/* Input from upper-layer, mapping relateive queue to queue-zone */
u16 fw_rx_qid;
u16 fw_tx_qid;
struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
};
enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
VF_ACQUIRED = 1, /* VF, acquired, but not initalized */
VF_ENABLED = 2, /* VF, Enabled */
VF_RESET = 3, /* VF, FLR'd, pending cleanup */
VF_STOPPED = 4 /* VF, Stopped */
};
struct ecore_vf_vlan_shadow {
bool used;
u16 vid;
};
struct ecore_vf_shadow_config {
/* Shadow copy of all guest vlans */
struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1];
/* Shadow copy of all configured MACs; Empty if forcing MACs */
u8 macs[ECORE_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
u8 inner_vlan_removal;
};
/* PFs maintain an array of this structure, per VF */
struct ecore_vf_info {
struct ecore_iov_vf_mbx vf_mbx;
enum vf_state state;
bool b_init;
bool b_malicious;
u8 to_disable;
struct ecore_bulletin bulletin;
dma_addr_t vf_bulletin;
/* PF saves a copy of the last VF acquire message */
struct vfpf_acquire_tlv acquire;
u32 concrete_fid;
u16 opaque_fid;
u16 mtu;
u8 vport_id;
u8 rss_eng_id;
u8 relative_vf_id;
u8 abs_vf_id;
#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
(p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
(p_vf)->abs_vf_id)
u8 vport_instance; /* Number of active vports */
u8 num_rxqs;
u8 num_txqs;
u16 rx_coal;
u16 tx_coal;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
/* TODO - Only windows is using it - should be removed */
u8 was_malicious;
u8 num_active_rxqs;
void *ctx;
struct ecore_public_vf_info p_vf_info;
bool spoof_chk; /* Current configured on HW */
bool req_spoofchk_val; /* Requested value */
/* Stores the configuration requested by VF */
struct ecore_vf_shadow_config shadow_config;
/* A bitfield using bulletin's valid-map bits, used to indicate
* which of the bulletin board features have been configured.
*/
u64 configured_features;
#define ECORE_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
(1 << VLAN_ADDR_FORCED))
};
/* This structure is part of ecore_hwfn and used only for PFs that have sriov
* capability enabled.
*/
struct ecore_pf_iov {
struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
#ifndef REMOVE_DBG
/* This doesn't serve anything functionally, but it makes windows
* debugging of IOV related issues easier.
*/
u64 active_vfs[ECORE_VF_ARRAY_LENGTH];
#endif
/* Allocate message address continuosuly and split to each VF */
void *mbx_msg_virt_addr;
dma_addr_t mbx_msg_phys_addr;
u32 mbx_msg_size;
void *mbx_reply_virt_addr;
dma_addr_t mbx_reply_phys_addr;
u32 mbx_reply_size;
void *p_bulletins;
dma_addr_t bulletins_phys;
u32 bulletins_size;
};
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, etc.
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset
*
* @param p_hwfn
* @param p_iov
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
u8 **offset,
u16 type,
u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
*
* @param p_hwfn
* @param tlvs_list
*/
void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn,
void *tlvs_list);
/**
* @brief ecore_iov_alloc - allocate sriov related resources
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_iov_setup - setup sriov related resources
*
* @param p_hwfn
* @param p_ptt
*/
void ecore_iov_setup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
* @brief ecore_iov_free - free sriov related resources
*
* @param p_hwfn
*/
void ecore_iov_free(struct ecore_hwfn *p_hwfn);
/**
* @brief free sriov related memory that was allocated during hw_prepare
*
* @param p_dev
*/
void ecore_iov_free_hw_info(struct ecore_dev *p_dev);
/**
* @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
* @param opcode
* @param echo
* @param data
*/
enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
union event_ring_data *data);
/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
*/
bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
*
* @param p_hwfn
* @param p_tlvs_list - Pointer to tlvs list
* @param req_type - Type of TLV
*
* @return pointer to tlv type if found, otherwise returns NULL.
*/
void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
/**
* @brief ecore_iov_get_vf_info - return the database of a
* specific VF
*
* @param p_hwfn
* @param relative_vf_id - relative id of the VF for which info
* is requested
* @param b_enabled_only - false iff want to access even if vf is disabled
*
* @return struct ecore_vf_info*
*/
struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
u16 relative_vf_id,
bool b_enabled_only);
#else
static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn) {return ECORE_SUCCESS;}
static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) {return OSAL_NULL;}
static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list) {}
static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) {return ECORE_SUCCESS;}
static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) {}
static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn *p_hwfn) {}
static OSAL_INLINE void ecore_iov_free_hw_info(struct ecore_dev *p_dev) {}
static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, u8 opcode, __le16 echo, union event_ring_data *data) {return ECORE_INVAL;}
static OSAL_INLINE u32 ecore_crc32(u32 crc, u8 *ptr, u32 length) {return 0;}
static OSAL_INLINE bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *disabled_vfs) {return 0;}
static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type) {return OSAL_NULL;}
static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn, u16 relative_vf_id, bool b_enabled_only) {return OSAL_NULL;}
#endif
#endif /* __ECORE_SRIOV_H__ */

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_STATUS_H__
#define __ECORE_STATUS_H__
enum _ecore_status_t {
ECORE_CONN_REFUSED = -14,
ECORE_CONN_RESET = -13,
ECORE_UNKNOWN_ERROR = -12,
ECORE_NORESOURCES = -11,
ECORE_NODEV = -10,
ECORE_ABORTED = -9,
ECORE_AGAIN = -8,
ECORE_NOTIMPL = -7,
ECORE_EXISTS = -6,
ECORE_IO = -5,
ECORE_TIMEOUT = -4,
ECORE_INVAL = -3,
ECORE_BUSY = -2,
ECORE_NOMEM = -1,
ECORE_SUCCESS = 0,
/* PENDING is not an error and should be positive */
ECORE_PENDING = 1,
};
#endif /* __ECORE_STATUS_H__ */

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_UTILS_H__
#define __ECORE_UTILS_H__
/* dma_addr_t manip */
/* Suppress "right shift count >= width of type" warning when that quantity is
* 32-bits rquires the >> 16) >> 16)
*/
#define PTR_LO(x) ((u32)(((osal_uintptr_t)(x)) & 0xffffffff))
#define PTR_HI(x) ((u32)((((osal_uintptr_t)(x)) >> 16) >> 16))
#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff))
#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32))
#define DMA_LO_LE(x) OSAL_CPU_TO_LE32(DMA_LO(x))
#define DMA_HI_LE(x) OSAL_CPU_TO_LE32(DMA_HI(x))
/* It's assumed that whoever includes this has previously included an hsi
* file defining the regpair.
*/
#define DMA_REGPAIR_LE(x, val) (x).hi = DMA_HI_LE((val)); \
(x).lo = DMA_LO_LE((val))
#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo))
#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t)
#define HILO_64(hi, lo) HILO_GEN(hi, lo, u64)
#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo))
#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo))
#endif

View File

@ -0,0 +1,355 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_VF_H__
#define __ECORE_VF_H__
#include "ecore_status.h"
#include "ecore_vf_api.h"
#include "ecore_l2_api.h"
#include "ecore_vfpf_if.h"
/* Default number of CIDs [total of both Rx and Tx] to be requested
* by default.
*/
#define ECORE_ETH_VF_DEFAULT_NUM_CIDS (32)
/* This data is held in the ecore_hwfn structure for VFs only. */
struct ecore_vf_iov {
union vfpf_tlvs *vf2pf_request;
dma_addr_t vf2pf_request_phys;
union pfvf_tlvs *pf2vf_reply;
dma_addr_t pf2vf_reply_phys;
/* Should be taken whenever the mailbox buffers are accessed */
osal_mutex_t mutex;
u8 *offset;
/* Bulletin Board */
struct ecore_bulletin bulletin;
struct ecore_bulletin_content bulletin_shadow;
/* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp;
/* In case PF originates prior to the fp-hsi version comparison,
* this has to be propagated as it affects the fastpath.
*/
bool b_pre_fp_hsi;
/* Current day VFs are passing the SBs physical address on vport
* start, and as they lack an IGU mapping they need to store the
* addresses of previously registered SBs.
* Even if we were to change configuration flow, due to backward
* compatibility [with older PFs] we'd still need to store these.
*/
struct ecore_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
};
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 coalesce,
struct ecore_queue_cid *p_cid);
/**
* @brief VF - Set Rx/Tx coalesce per VF's relative queue.
* Coalesce value '0' will omit the configuration.
*
* @param p_hwfn
* @param rx_coal - coalesce value in micro second for rx queue
* @param tx_coal - coalesce value in micro second for tx queue
* @param queue_cid
*
**/
enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
u16 rx_coal, u16 tx_coal,
struct ecore_queue_cid *p_cid);
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief hw preparation for VF
* sends ACQUIRE message
*
* @param p_hwfn
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
/**
* @brief VF - start the RX Queue by sending a message to the PF
*
* @param p_hwfn
* @param p_cid - Only relative fields are relevant
* @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl
* @param cqe_pbl_size - pbl size
* @param pp_prod - pointer to the producer to be
* used in fasthpath
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM **pp_prod);
/**
* @brief VF - start the TX queue by sending a message to the
* PF.
*
* @param p_hwfn
* @param p_cid
* @param bd_chain_phys_addr - physical address of tx chain
* @param pp_doorbell - pointer to address to which to
* write the doorbell too..
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid,
dma_addr_t pbl_addr, u16 pbl_size,
void OSAL_IOMEM **pp_doorbell);
/**
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
* @param p_cid
* @param cqe_completion
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid,
bool cqe_completion);
/**
* @brief VF - stop the TX queue by sending a message to the PF
*
* @param p_hwfn
* @param p_cid
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid *p_cid);
/* TODO - fix all the !SRIOV prototypes */
/**
* @brief VF - update the RX queue by sending a message to the
* PF
*
* @param p_hwfn
* @param pp_cid - list of queue-cids which we want to update
* @param num_rxqs
* @param comp_cqe_flg
* @param comp_event_flg
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
struct ecore_queue_cid **pp_cid,
u8 num_rxqs,
u8 comp_cqe_flg,
u8 comp_event_flg);
/**
* @brief VF - send a vport update command
*
* @param p_hwfn
* @param params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_update_params *p_params);
/**
* @brief VF - send a close message to PF
*
* @param p_hwfn
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn);
/**
* @brief VF - free vf`s memories
*
* @param p_hwfn
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given
* sb_id. For VFs igu sbs don't have to be contiguous
*
* @param p_hwfn
* @param sb_id
*
* @return INLINE u16
*/
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id);
/**
* @brief Stores [or removes] a configured sb_info.
*
* @param p_hwfn
* @param sb_id - zero-based SB index [for fastpath]
* @param sb_info - may be OSAL_NULL [during removal].
*/
void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn,
u16 sb_id, struct ecore_sb_info *p_sb);
/**
* @brief ecore_vf_pf_vport_start - perform vport start for VF.
*
* @param p_hwfn
* @param vport_id
* @param mtu
* @param inner_vlan_removal
* @param tpa_mode
* @param max_buffers_per_cqe,
* @param only_untagged - default behavior regarding vlan acceptance
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_vport_start(
struct ecore_hwfn *p_hwfn,
u8 vport_id,
u16 mtu,
u8 inner_vlan_removal,
enum ecore_tpa_mode tpa_mode,
u8 max_buffers_per_cqe,
u8 only_untagged);
/**
* @brief ecore_vf_pf_vport_stop - stop the VF's vport
*
* @param p_hwfn
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_vf_pf_filter_ucast(
struct ecore_hwfn *p_hwfn,
struct ecore_filter_ucast *p_param);
void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
struct ecore_filter_mcast *p_filter_cmd);
/**
* @brief ecore_vf_pf_int_cleanup - clean the SB of the VF
*
* @param p_hwfn
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn);
/**
* @brief - return the link params in a given bulletin board
*
* @param p_hwfn
* @param p_params - pointer to a struct to fill with link params
* @param p_bulletin
*/
void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params *p_params,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link state in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link state
* @param p_bulletin
*/
void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state *p_link,
struct ecore_bulletin_content *p_bulletin);
/**
* @brief - return the link capabilities in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link capabilities
* @param p_bulletin
*/
void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
enum _ecore_status_t
ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
struct ecore_tunnel_info *p_tunn);
void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
#else
static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid, u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, void OSAL_IOMEM **pp_prod) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid, dma_addr_t pbl_addr, u16 pbl_size, void OSAL_IOMEM **pp_doorbell) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid, bool cqe_completion) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid **pp_cid, u8 num_rxqs, u8 comp_cqe_flg, u8 comp_event_flg) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, struct ecore_sp_vport_update_params *p_params) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) {return 0;}
static OSAL_INLINE void ecore_vf_set_sb_info(struct ecore_hwfn *p_hwfn, u16 sb_id, struct ecore_sb_info *p_sb) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id, u16 mtu, u8 inner_vlan_removal, enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe, u8 only_untagged) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn, struct ecore_filter_ucast *p_param) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, struct ecore_filter_mcast *p_filter_cmd) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn) {return ECORE_INVAL;}
static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_params *p_params, struct ecore_bulletin_content *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_state *p_link, struct ecore_bulletin_content *p_bulletin) {}
static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_capabilities *p_link_caps, struct ecore_bulletin_content *p_bulletin) {}
static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, struct ecore_tunnel_info *p_tunn) { return ECORE_INVAL; }
static OSAL_INLINE void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun) { return; }
#endif
#endif /* __ECORE_VF_H__ */

View File

@ -0,0 +1,198 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_VF_API_H__
#define __ECORE_VF_API_H__
#include "ecore_sp_api.h"
#include "ecore_mcp_api.h"
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief Read the VF bulletin and act on it if needed
*
* @param p_hwfn
* @param p_change - ecore fills 1 iff bulletin board has changed, 0 otherwise.
*
* @return enum _ecore_status
*/
enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
u8 *p_change);
/**
* @brief Get link parameters for VF from ecore
*
* @param p_hwfn
* @param params - the link params structure to be filled for the VF
*/
void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_params *params);
/**
* @brief Get link state for VF from ecore
*
* @param p_hwfn
* @param link - the link state structure to be filled for the VF
*/
void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_state *link);
/**
* @brief Get link capabilities for VF from ecore
*
* @param p_hwfn
* @param p_link_caps - the link capabilities structure to be filled for the VF
*/
void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps);
/**
* @brief Get number of Rx queues allocated for VF by ecore
*
* @param p_hwfn
* @param num_rxqs - allocated RX queues
*/
void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
u8 *num_rxqs);
/**
* @brief Get number of Rx queues allocated for VF by ecore
*
* @param p_hwfn
* @param num_txqs - allocated RX queues
*/
void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
u8 *num_txqs);
/**
* @brief Get port mac address for VF
*
* @param p_hwfn
* @param port_mac - destination location for port mac
*/
void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
u8 *port_mac);
/**
* @brief Get number of VLAN filters allocated for VF by ecore
*
* @param p_hwfn
* @param num_rxqs - allocated VLAN filters
*/
void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
u8 *num_vlan_filters);
/**
* @brief Get number of MAC filters allocated for VF by ecore
*
* @param p_hwfn
* @param num_rxqs - allocated MAC filters
*/
void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
u8 *num_mac_filters);
/**
* @brief Check if VF can set a MAC address
*
* @param p_hwfn
* @param mac
*
* @return bool
*/
bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac);
/**
* @brief Copy forced MAC address from bulletin board
*
* @param hwfn
* @param dst_mac
* @param p_is_forced - out param which indicate in case mac
* exist if it forced or not.
*
* @return bool - return true if mac exist and false if
* not.
*/
bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
u8 *p_is_forced);
/**
* @brief Check if force vlan is set and copy the forced vlan
* from bulletin board
*
* @param hwfn
* @param dst_pvid
* @return bool
*/
bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid);
/**
* @brief Check if VF is based on PF whose driver is pre-fp-hsi version;
* This affects the fastpath implementation of the driver.
*
* @param p_hwfn
*
* @return bool - true iff PF is pre-fp-hsi version.
*/
bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn);
/**
* @brief Set firmware version information in dev_info from VFs acquire response tlv
*
* @param p_hwfn
* @param fw_major
* @param fw_minor
* @param fw_rev
* @param fw_eng
*/
void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_major,
u16 *fw_minor,
u16 *fw_rev,
u16 *fw_eng);
void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
u16 *p_vxlan_port, u16 *p_geneve_port);
#else
static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, u8 *p_change) {return ECORE_INVAL;}
static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_params *params) {}
static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_state *link) {}
static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_capabilities *p_link_caps) {}
static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs) {}
static OSAL_INLINE void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn, u8 *num_txqs) {}
static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac) {}
static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, u8 *num_vlan_filters) {}
static OSAL_INLINE void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, u8 *num_mac_filters) {}
static OSAL_INLINE bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac) {return false;}
static OSAL_INLINE bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, u8 *p_is_forced) {return false;}
static OSAL_INLINE bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn) {return false; }
static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng) {}
static OSAL_INLINE void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn, u16 *p_vxlan_port, u16 *p_geneve_port) { return; }
#endif
#endif

View File

@ -0,0 +1,672 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ECORE_VF_PF_IF_H__
#define __ECORE_VF_PF_IF_H__
#define T_ETH_INDIRECTION_TABLE_SIZE 128 /* @@@ TBD MichalK this should be HSI? */
#define T_ETH_RSS_KEY_SIZE 10 /* @@@ TBD this should be HSI? */
#define ETH_ALEN 6 /* @@@ TBD MichalK - should this be defined here?*/
/***********************************************
*
* Common definitions for all HVs
*
**/
struct vf_pf_resc_request {
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters; /* No limit so superfluous */
u8 num_cids;
u8 padding;
};
struct hw_sb_info {
u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */
u8 sb_qid; /* used to update DHC for sb */
u8 padding[5];
};
/***********************************************
*
* HW VF-PF channel definitions
*
* A.K.A VF-PF mailbox
*
**/
#define TLV_BUFFER_SIZE 1024
/* vf pf channel tlvs */
/* general tlv header (used for both vf->pf request and pf->vf response) */
struct channel_tlv {
u16 type;
u16 length;
};
/* header of first vf->pf tlv carries the offset used to calculate reponse
* buffer address
*/
struct vfpf_first_tlv {
struct channel_tlv tl;
u32 padding;
u64 reply_address;
};
/* header of pf->vf tlvs, carries the status of handling the request */
struct pfvf_tlv {
struct channel_tlv tl;
u8 status;
u8 padding[3];
};
/* response tlv used for most tlvs */
struct pfvf_def_resp_tlv {
struct pfvf_tlv hdr;
};
/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
struct channel_tlv tl;
u8 padding[4];
};
/* Acquire */
struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
/* First bit was used on 8.7.x and 8.8.x versions, which had different
* FWs used but with the same faspath HSI. As this was prior to the
* fastpath versioning, wanted to have ability to override fw matching
* and allow them to interact.
*/
#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
/* A requirement for supporting multi-Tx queues on a single queue-zone,
* VF would pass qids as additional information whenever passing queue
* references.
* TODO - due to the CID limitations in Bar0, VFs currently don't pass
* this, and use the legacy CID scheme.
*/
#define VFPF_ACQUIRE_CAP_QUEUE_QIDS (1 << 2)
u64 capabilities;
u8 fw_major;
u8 fw_minor;
u8 fw_revision;
u8 fw_engineering;
u32 driver_version;
u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */
u8 eth_fp_hsi_major;
u8 eth_fp_hsi_minor;
u8 padding[3];
} vfdev_info;
struct vf_pf_resc_request resc_request;
u64 bulletin_addr;
u32 bulletin_size;
u32 padding;
};
/* receive side scaling tlv */
struct vfpf_vport_update_rss_tlv {
struct channel_tlv tl;
u8 update_rss_flags;
#define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0)
#define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1)
#define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2)
#define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3)
u8 rss_enable;
u8 rss_caps;
u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
u32 rss_key[T_ETH_RSS_KEY_SIZE];
};
struct pfvf_storm_stats {
u32 address;
u32 len;
};
struct pfvf_stats_info {
struct pfvf_storm_stats mstats;
struct pfvf_storm_stats pstats;
struct pfvf_storm_stats tstats;
struct pfvf_storm_stats ustats;
};
/* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
struct pf_vf_pfdev_info {
u32 chip_num;
u32 mfw_ver;
u16 fw_major;
u16 fw_minor;
u16 fw_rev;
u16 fw_eng;
u64 capabilities;
#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED (1 << 0)
#define PFVF_ACQUIRE_CAP_100G (1 << 1) /* If set, 100g PF */
/* There are old PF versions where the PF might mistakenly override the sanity
* mechanism [version-based] and allow a VF that can't be supported to pass
* the acquisition phase.
* To overcome this, PFs now indicate that they're past that point and the new
* VFs would fail probe on the older PFs that fail to do so.
*/
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE (1 << 2)
/* PF expects queues to be received with additional qids */
#define PFVF_ACQUIRE_CAP_QUEUE_QIDS (1 << 3)
u16 db_size;
u8 indices_per_sb;
u8 os_type;
/* These should match the PF's ecore_dev values */
u16 chip_rev;
u8 dev_type;
u8 padding;
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
/* It's possible PF had to configure an older fastpath HSI
* [in case VF is newer than PF]. This is communicated back
* to the VF. It can also be used in case of error due to
* non-matching versions to shed light in VF about failure.
*/
u8 major_fp_hsi;
u8 minor_fp_hsi;
} pfdev_info;
struct pf_vf_resc {
/* in case of status NO_RESOURCE in message hdr, pf will fill
* this struct with suggested amount of resources for next
* acquire request
*/
#define PFVF_MAX_QUEUES_PER_VF 16
#define PFVF_MAX_SBS_PER_VF 16
struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
u8 cid[PFVF_MAX_QUEUES_PER_VF];
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u8 num_cids;
u8 padding;
} resc;
u32 bulletin_size;
u32 padding;
};
struct pfvf_start_queue_resp_tlv {
struct pfvf_tlv hdr;
u32 offset; /* offset to consumer/producer of queue */
u8 padding[4];
};
/* Extended queue information - additional index for reference inside qzone.
* If commmunicated between VF/PF, each TLV relating to queues should be
* extended by one such [or have a future base TLV that already contains info].
*/
struct vfpf_qid_tlv {
struct channel_tlv tl;
u8 qid;
u8 padding[3];
};
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
/* physical addresses */
u64 rxq_addr;
u64 deprecated_sge_addr;
u64 cqe_pbl_addr;
u16 cqe_pbl_size;
u16 hw_sb;
u16 rx_qid;
u16 hc_rate; /* desired interrupts per sec. */
u16 bd_max_bytes;
u16 stat_id;
u8 sb_index;
u8 padding[3];
};
struct vfpf_start_txq_tlv {
struct vfpf_first_tlv first_tlv;
/* physical addresses */
u64 pbl_addr;
u16 pbl_size;
u16 stat_id;
u16 tx_qid;
u16 hw_sb;
u32 flags; /* VFPF_QUEUE_FLG_X flags */
u16 hc_rate; /* desired interrupts per sec. */
u8 sb_index;
u8 padding[3];
};
/* Stop RX Queue */
struct vfpf_stop_rxqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 rx_qid;
/* While the API supports multiple Rx-queues on a single TLV
* message, in practice older VFs always used it as one [ecore].
* And there are PFs [starting with the CHANNEL_TLV_QID] which
* would start assuming this is always a '1'. So in practice this
* field should be considered deprecated and *Always* set to '1'.
*/
u8 num_rxqs;
u8 cqe_completion;
u8 padding[4];
};
/* Stop TX Queues */
struct vfpf_stop_txqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 tx_qid;
/* While the API supports multiple Tx-queues on a single TLV
* message, in practice older VFs always used it as one [ecore].
* And there are PFs [starting with the CHANNEL_TLV_QID] which
* would start assuming this is always a '1'. So in practice this
* field should be considered deprecated and *Always* set to '1'.
*/
u8 num_txqs;
u8 padding[5];
};
struct vfpf_update_rxq_tlv {
struct vfpf_first_tlv first_tlv;
u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
u16 rx_qid;
u8 num_rxqs;
u8 flags;
#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0)
#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1)
#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2)
u8 padding[4];
};
/* Set Queue Filters */
struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
u8 padding[4];
};
/* Start a vport */
struct vfpf_vport_start_tlv {
struct vfpf_first_tlv first_tlv;
u64 sb_addr[PFVF_MAX_SBS_PER_VF];
u32 tpa_mode;
u16 dep1;
u16 mtu;
u8 vport_id;
u8 inner_vlan_removal;
u8 only_untagged;
u8 max_buffers_per_cqe;
u8 padding[4];
};
/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
struct vfpf_vport_update_activate_tlv {
struct channel_tlv tl;
u8 update_rx;
u8 update_tx;
u8 active_rx;
u8 active_tx;
};
struct vfpf_vport_update_tx_switch_tlv {
struct channel_tlv tl;
u8 tx_switching;
u8 padding[3];
};
struct vfpf_vport_update_vlan_strip_tlv {
struct channel_tlv tl;
u8 remove_vlan;
u8 padding[3];
};
struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
u64 bins[8];
};
struct vfpf_vport_update_accept_param_tlv {
struct channel_tlv tl;
u8 update_rx_mode;
u8 update_tx_mode;
u8 rx_accept_filter;
u8 tx_accept_filter;
};
struct vfpf_vport_update_accept_any_vlan_tlv {
struct channel_tlv tl;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
u8 padding[2];
};
struct vfpf_vport_update_sge_tpa_tlv {
struct channel_tlv tl;
u16 sge_tpa_flags;
#define VFPF_TPA_IPV4_EN_FLAG (1 << 0)
#define VFPF_TPA_IPV6_EN_FLAG (1 << 1)
#define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2)
#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3)
#define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4)
u8 update_sge_tpa_flags;
#define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0)
#define VFPF_UPDATE_TPA_EN_FLAG (1 << 1)
#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2)
u8 max_buffers_per_cqe;
u16 deprecated_sge_buff_size;
u16 tpa_max_size;
u16 tpa_min_size_to_start;
u16 tpa_min_size_to_cont;
u8 tpa_max_aggs_num;
u8 padding[7];
};
/* Primary tlv as a header for various extended tlvs for
* various functionalities in vport update ramrod.
*/
struct vfpf_vport_update_tlv {
struct vfpf_first_tlv first_tlv;
};
struct vfpf_ucast_filter_tlv {
struct vfpf_first_tlv first_tlv;
u8 opcode;
u8 type;
u8 mac[ETH_ALEN];
u16 vlan;
u16 padding[3];
};
/* tunnel update param tlv */
struct vfpf_update_tunn_param_tlv {
struct vfpf_first_tlv first_tlv;
u8 tun_mode_update_mask;
u8 tunn_mode;
u8 update_tun_cls;
u8 vxlan_clss;
u8 l2gre_clss;
u8 ipgre_clss;
u8 l2geneve_clss;
u8 ipgeneve_clss;
u8 update_geneve_port;
u8 update_vxlan_port;
u16 geneve_port;
u16 vxlan_port;
u8 padding[2];
};
struct pfvf_update_tunn_param_tlv {
struct pfvf_tlv hdr;
u16 tunn_feature_mask;
u8 vxlan_mode;
u8 l2geneve_mode;
u8 ipgeneve_mode;
u8 l2gre_mode;
u8 ipgre_mode;
u8 vxlan_clss;
u8 l2gre_clss;
u8 ipgre_clss;
u8 l2geneve_clss;
u8 ipgeneve_clss;
u16 vxlan_udp_port;
u16 geneve_udp_port;
};
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
struct vfpf_update_coalesce {
struct vfpf_first_tlv first_tlv;
u16 rx_coal;
u16 tx_coal;
u16 qid;
u8 padding[2];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
struct vfpf_start_rxq_tlv start_rxq;
struct vfpf_start_txq_tlv start_txq;
struct vfpf_stop_rxqs_tlv stop_rxqs;
struct vfpf_stop_txqs_tlv stop_txqs;
struct vfpf_update_rxq_tlv update_rxq;
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
struct vfpf_update_tunn_param_tlv tunn_param_update;
struct vfpf_update_coalesce update_coalesce;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
struct pfvf_def_resp_tlv default_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
struct pfvf_update_tunn_param_tlv tunn_param_resp;
};
/* This is a structure which is allocated in the VF, which the PF may update
* when it deems it necessary to do so. The bulletin board is sampled
* periodically by the VF. A copy per VF is maintained in the PF (to prevent
* loss of data upon multiple updates (or the need for read modify write)).
*/
enum ecore_bulletin_bit {
/* Alert the VF that a forced MAC was set by the PF */
MAC_ADDR_FORCED = 0,
/* The VF should not access the vfpf channel */
VFPF_CHANNEL_INVALID = 1,
/* Alert the VF that a forced VLAN was set by the PF */
VLAN_ADDR_FORCED = 2,
/* Indicate that `default_only_untagged' contains actual data */
VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
/* Alert the VF that suggested mac was sent by the PF.
* MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set
*/
VFPF_BULLETIN_MAC_ADDR = 5
};
struct ecore_bulletin_content {
/* crc of structure to ensure is not in mid-update */
u32 crc;
u32 version;
/* bitmap indicating which fields hold valid values */
u64 valid_bitmap;
/* used for MAC_ADDR or MAC_ADDR_FORCED */
u8 mac[ETH_ALEN];
/* If valid, 1 => only untagged Rx if no vlan is configured */
u8 default_only_untagged;
u8 padding;
/* The following is a 'copy' of ecore_mcp_link_state,
* ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's
* possible the structs will increase further along the road we cannot
* have it here; Instead we need to have all of its fields.
*/
u8 req_autoneg;
u8 req_autoneg_pause;
u8 req_forced_rx;
u8 req_forced_tx;
u8 padding2[4];
u32 req_adv_speed;
u32 req_forced_speed;
u32 req_loopback;
u32 padding3;
u8 link_up;
u8 full_duplex;
u8 autoneg;
u8 autoneg_complete;
u8 parallel_detection;
u8 pfc_enabled;
u8 partner_tx_flow_ctrl_en;
u8 partner_rx_flow_ctrl_en;
u8 partner_adv_pause;
u8 sfp_tx_fault;
u16 vxlan_udp_port;
u16 geneve_udp_port;
u8 padding4[2];
u32 speed;
u32 partner_adv_speed;
u32 capability_speed;
/* Forced vlan */
u16 pvid;
u16 padding5;
};
struct ecore_bulletin {
dma_addr_t phys;
struct ecore_bulletin_content *p_virt;
u32 size;
};
enum {
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
CHANNEL_TLV_NONE, /* ends tlv sequence */
CHANNEL_TLV_ACQUIRE,
CHANNEL_TLV_VPORT_START,
CHANNEL_TLV_VPORT_UPDATE,
CHANNEL_TLV_VPORT_TEARDOWN,
CHANNEL_TLV_START_RXQ,
CHANNEL_TLV_START_TXQ,
CHANNEL_TLV_STOP_RXQS,
CHANNEL_TLV_STOP_TXQS,
CHANNEL_TLV_UPDATE_RXQ,
CHANNEL_TLV_INT_CLEANUP,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_UCAST_FILTER,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
CHANNEL_TLV_VPORT_UPDATE_MCAST,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_UPDATE_TUNN_PARAM,
CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_QID,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
* Will break in case non-sequential vport-update tlvs.
*/
CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/
};
extern const char *ecore_channel_tlvs_string[];
#endif /* __ECORE_VF_PF_IF_H__ */

View File

@ -0,0 +1,588 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ETH_COMMON__
#define __ETH_COMMON__
/********************/
/* ETH FW CONSTANTS */
/********************/
/* FP HSI version. FP HSI is compatible if (fwVer.major == drvVer.major && fwVer.minor >= drvVer.minor) */
#define ETH_HSI_VER_MAJOR 3 /* ETH FP HSI Major version */
#define ETH_HSI_VER_MINOR 10 /* ETH FP HSI Minor version */
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 /* Alias for 8.7.x.x/8.8.x.x ETH FP HSI MINOR version. In this version driver is not required to set pkt_len field in eth_tx_1st_bd struct, and tunneling offload is not supported. */
#define ETH_CACHE_LINE_SIZE 64
#define ETH_RX_CQE_GAP 32
#define ETH_MAX_RAMROD_PER_CON 8
#define ETH_TX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_BD_PAGE_SIZE_BYTES 4096
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
/* Limitation for Tunneled LSO Packets on the offset (in bytes) of the inner IP header (relevant to LSO for tunneled packet): */
#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 /* Offset is limited to 253 bytes (inclusive). */
#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 /* Offset is limited to 251 bytes (inclusive). */
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
#define ETH_TX_MAX_LSO_HDR_NBD 4
#define ETH_TX_MIN_BDS_PER_LSO_PKT 3
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) /* (QM_REG_TASKBYTECRDCOST_0, QM_VOQ_BYTE_CRD_TASK_COST) - (VLAN-TAG + CRC + IPG + PREAMBLE) */
#define ETH_TX_MAX_LSO_HDR_BYTES 510
#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) /* Number of BDs to consider for LSO sliding window restriction is (ETH_TX_LSO_WINDOW_BDS_NUM - hdr_nbd) */
#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 /* Minimum data length (in bytes) in LSO sliding window */
#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 /* Maximum LSO packet TCP payload length (in bytes) */
#define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 /* Number of same-as-last resources in tx switching */
#define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF /* Value for a connection for which same as last feature is disabled */
#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS /* Maximum number of statistics counters */
#define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS/2) /* Maximum number of statistics counters when doubled VF zone used */
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE (ETH_NUM_STATISTIC_COUNTERS - 3*MAX_NUM_VFS/4) /* Maximum number of statistics counters when quad VF zone used */
#define ETH_RX_MAX_BUFF_PER_PKT 5 /* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_BD_THRESHOLD 12 /* Minimum number of free BDs in RX ring, that guarantee receiving of at least one RX packet. */
/* num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
#define ETH_NUM_VLAN_FILTERS 512
/* approx. multicast constants */
#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 /* CRC seed for multicast bin calculation */
#define ETH_MULTICAST_MAC_BINS 256
#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
/* ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 /* number of RSS indirection table entries, per Vport) */
#define ETH_RSS_KEY_SIZE_REGS 10 /* Length of RSS key (in regs) */
#define ETH_RSS_ENGINE_NUM_K2 207 /* number of available RSS engines in K2 */
#define ETH_RSS_ENGINE_NUM_BB 127 /* number of available RSS engines in BB */
/* TPA constants */
#define ETH_TPA_MAX_AGGS_NUM 64 /* Maximum number of open TPA aggregations */
#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT /* Maximum number of additional buffers, reported by TPA-start CQE */
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 /* Maximum number of buffers, reported by TPA-continue CQE */
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Maximum number of buffers, reported by TPA-end CQE */
/* Control frame check constants */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4 /* Number of etherType values configured by the driver for control frame check */
/*
* Destination port mode
*/
enum dest_port_mode
{
DEST_PORT_PHY /* Send to physical port. */,
DEST_PORT_LOOPBACK /* Send to loopback port. */,
DEST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */,
DEST_PORT_DROP /* Drop the packet in PBF. */,
MAX_DEST_PORT_MODE
};
/*
* Ethernet address type
*/
enum eth_addr_type
{
BROADCAST_ADDRESS,
MULTICAST_ADDRESS,
UNICAST_ADDRESS,
UNKNOWN_ADDRESS,
MAX_ETH_ADDR_TYPE
};
struct eth_tx_1st_bd_flags
{
u8 bitfields;
#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 /* Set to 1 in the first BD. (for debug) */
#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 /* Do not allow additional VLAN manipulations on this packet. */
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 /* Recalculate IP checksum. For tunneled packet - relevant to inner header. */
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 /* Recalculate TCP/UDP checksum. For tunneled packet - relevant to inner header. */
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 /* If set, insert VLAN tag from vlan field to the packet. For tunneled packet - relevant to outer header. */
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 /* If set, this is an LSO packet. Note: For Tunneled LSO packets, the offset of the inner IPV4 (and IPV6) header is limited to 253 (and 251 respectively) bytes, inclusive. */
#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 /* Recalculate Tunnel IP Checksum (if Tunnel IP Header is IPv4) */
#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 /* Recalculate Tunnel UDP/GRE Checksum (Depending on Tunnel Type) */
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
};
/*
* The parsing information data for the first tx bd of a given packet.
*/
struct eth_tx_data_1st_bd
{
__le16 vlan /* VLAN tag to insert to packet (if enabled by vlan_insertion flag). */;
u8 nbds /* Number of BDs in packet. Should be at least 1 in non-LSO packet and at least 3 in LSO (or Tunnel with IPv6+ext) packet. */;
struct eth_tx_1st_bd_flags bd_flags;
__le16 bitfields;
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 /* Indicates a tunneled packet. Must be set for encapsulated packet. */
#define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0
#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1
#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1
#define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF /* Total packet length - must be filled for non-LSO packets. */
#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
};
/*
* The parsing information data for the second tx bd of a given packet.
*/
struct eth_tx_data_2nd_bd
{
__le16 tunn_ip_size /* For tunnel with IPv6+ext - Tunnel header IP datagram length (in BYTEs) */;
__le16 bitfields1;
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF /* For Tunnel header with IPv6 ext. - Inner L2 Header Size (in 2-byte WORDs) */
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 /* For Tunnel header with IPv6 ext. - Inner L2 Header MAC DA Type (use enum eth_addr_type) */
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 /* Destination port mode. (use enum dest_port_mode) */
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 /* Should be 0 in all the BDs, except the first one. (for debug) */
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 /* For Tunnel header with IPv6 ext. - Tunnel Type (use enum eth_tx_tunn_type) */
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 /* For LSO / Tunnel header with IPv6+ext - Set if inner header is IPv6 */
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 /* In tunneling mode - Set to 1 when the Inner header is IPv6 with extension. Otherwise set to 1 if the header is IPv6 with extension. */
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 /* Set to 1 if Tunnel (outer = encapsulating) header has IPv6 ext. (Note: 3rd BD is required, hence EDPM does not support Tunnel [outer] header with Ipv6Ext) */
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 /* Set if (inner) L4 protocol is UDP. (Required when IPv6+ext (or tunnel with inner or outer Ipv6+ext) and l4_csum is set) */
#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 /* The pseudo header checksum type in the L4 checksum field. Required when IPv6+ext and l4_csum is set. (use enum eth_l4_pseudo_checksum_mode) */
#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15
__le16 bitfields2;
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF /* For inner/outer header IPv6+ext - (inner) L4 header offset (in 2-byte WORDs). For regular packet - offset from the beginning of the packet. For tunneled packet - offset from the beginning of the inner header */
#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
/*
* Firmware data for L2-EDPM packet.
*/
struct eth_edpm_fw_data
{
struct eth_tx_data_1st_bd data_1st_bd /* Parsing information data from the 1st BD. */;
struct eth_tx_data_2nd_bd data_2nd_bd /* Parsing information data from the 2nd BD. */;
__le32 reserved;
};
/*
* FW debug.
*/
struct eth_fast_path_cqe_fw_debug
{
__le16 reserved2 /* FW reserved. */;
};
/*
* tunneling parsing flags
*/
struct eth_tunnel_parsing_flags
{
u8 flags;
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 /* 0 - no tunneling, 1 - GENEVE, 2 - GRE, 3 - VXLAN (use enum eth_rx_tunn_type) */
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0
#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 /* If it s not an encapsulated packet then put 0x0. If it s an encapsulated packet but the tenant-id doesn t exist then put 0x0. Else put 0x1 */
#define ETH_TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2
#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 /* Type of the next header above the tunneling: 0 - unknown, 1 - L2, 2 - Ipv4, 3 - IPv6 (use enum tunnel_next_protocol) */
#define ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3
#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 /* The result of comparing the DA-ip of the tunnel header. */
#define ETH_TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1
#define ETH_TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7
};
/*
* PMD flow control bits
*/
struct eth_pmd_flow_flags
{
u8 flags;
#define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 /* CQE valid bit */
#define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0
#define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 /* CQE ring toggle bit */
#define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1
#define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F
#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
};
/*
* Regular ETH Rx FP CQE.
*/
struct eth_fast_path_rx_reg_cqe
{
u8 type /* CQE type */;
u8 bitfields;
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 /* Type of calculated RSS hash (use enum rss_hash_type) */
#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF /* Traffic Class */
#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7
__le16 pkt_len /* Total packet length (from the parser) */;
struct parsing_and_err_flags pars_flags /* Parsing and error flags from the parser */;
__le16 vlan_tag /* 802.1q VLAN tag */;
__le32 rss_hash /* RSS hash result */;
__le16 len_on_first_bd /* Number of bytes placed on first BD */;
u8 placement_offset /* Offset of placement from BD start */;
struct eth_tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */;
u8 bd_num /* Number of BDs, used for packet */;
u8 reserved[9];
struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
u8 reserved1[3];
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* TPA-continue ETH Rx FP CQE.
*/
struct eth_fast_path_rx_tpa_cont_cqe
{
u8 type /* CQE type */;
u8 tpa_agg_index /* TPA aggregation index */;
__le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* List of the segment sizes */;
u8 reserved;
u8 reserved1 /* FW reserved. */;
__le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */;
u8 reserved3[3];
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* TPA-end ETH Rx FP CQE .
*/
struct eth_fast_path_rx_tpa_end_cqe
{
u8 type /* CQE type */;
u8 tpa_agg_index /* TPA aggregation index */;
__le16 total_packet_len /* Total aggregated packet length */;
u8 num_of_bds /* Total number of BDs comprising the packet */;
u8 end_reason /* Aggregation end reason. Use enum eth_tpa_end_reason */;
__le16 num_of_coalesced_segs /* Number of coalesced TCP segments */;
__le32 ts_delta /* TCP timestamp delta */;
__le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* List of the segment sizes */;
__le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */;
__le16 reserved1;
u8 reserved2 /* FW reserved. */;
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* TPA-start ETH Rx FP CQE.
*/
struct eth_fast_path_rx_tpa_start_cqe
{
u8 type /* CQE type */;
u8 bitfields;
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 /* Type of calculated RSS hash (use enum rss_hash_type) */
#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF /* Traffic Class */
#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1
#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7
__le16 seg_len /* Segment length (packetLen from the parser) */;
struct parsing_and_err_flags pars_flags /* Parsing and error flags from the parser */;
__le16 vlan_tag /* 802.1q VLAN tag */;
__le32 rss_hash /* RSS hash result */;
__le16 len_on_first_bd /* Number of bytes placed on first BD */;
u8 placement_offset /* Offset of placement from BD start */;
struct eth_tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */;
u8 tpa_agg_index /* TPA aggregation index */;
u8 header_len /* Packet L2+L3+L4 header length */;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE] /* Additional BDs length list. */;
struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */;
u8 reserved;
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* The L4 pseudo checksum mode for Ethernet
*/
enum eth_l4_pseudo_checksum_mode
{
ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH /* Pseudo Header checksum on packet is calculated with the correct packet length field. */,
ETH_L4_PSEUDO_CSUM_ZERO_LENGTH /* Pseudo Header checksum on packet is calculated with zero length field. */,
MAX_ETH_L4_PSEUDO_CHECKSUM_MODE
};
struct eth_rx_bd
{
struct regpair addr /* single continues buffer */;
};
/*
* regular ETH Rx SP CQE
*/
struct eth_slow_path_rx_cqe
{
u8 type /* CQE type */;
u8 ramrod_cmd_id;
u8 error_flag;
u8 reserved[25];
__le16 echo;
u8 reserved1;
struct eth_pmd_flow_flags pmd_flags /* CQE valid and toggle bits */;
};
/*
* union for all ETH Rx CQE types
*/
union eth_rx_cqe
{
struct eth_fast_path_rx_reg_cqe fast_path_regular /* Regular FP CQE */;
struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start /* TPA-start CQE */;
struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont /* TPA-continue CQE */;
struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end /* TPA-end CQE */;
struct eth_slow_path_rx_cqe slow_path /* SP CQE */;
};
/*
* ETH Rx CQE type
*/
enum eth_rx_cqe_type
{
ETH_RX_CQE_TYPE_UNUSED,
ETH_RX_CQE_TYPE_REGULAR /* Regular FP ETH Rx CQE */,
ETH_RX_CQE_TYPE_SLOW_PATH /* Slow path ETH Rx CQE */,
ETH_RX_CQE_TYPE_TPA_START /* TPA start ETH Rx CQE */,
ETH_RX_CQE_TYPE_TPA_CONT /* TPA Continue ETH Rx CQE */,
ETH_RX_CQE_TYPE_TPA_END /* TPA end ETH Rx CQE */,
MAX_ETH_RX_CQE_TYPE
};
/*
* Wrapper for PD RX CQE - used in order to cover full cache line when writing CQE
*/
struct eth_rx_pmd_cqe
{
union eth_rx_cqe cqe /* CQE data itself */;
u8 reserved[ETH_RX_CQE_GAP];
};
/*
* Eth RX Tunnel Type
*/
enum eth_rx_tunn_type
{
ETH_RX_NO_TUNN /* No Tunnel. */,
ETH_RX_TUNN_GENEVE /* GENEVE Tunnel. */,
ETH_RX_TUNN_GRE /* GRE Tunnel. */,
ETH_RX_TUNN_VXLAN /* VXLAN Tunnel. */,
MAX_ETH_RX_TUNN_TYPE
};
/*
* Aggregation end reason.
*/
enum eth_tpa_end_reason
{
ETH_AGG_END_UNUSED,
ETH_AGG_END_SP_UPDATE /* SP configuration update */,
ETH_AGG_END_MAX_LEN /* Maximum aggregation length or maximum buffer number used. */,
ETH_AGG_END_LAST_SEG /* TCP PSH flag or TCP payload length below continue threshold. */,
ETH_AGG_END_TIMEOUT /* Timeout expiration. */,
ETH_AGG_END_NOT_CONSISTENT /* Packet header not consistency: different IPv4 TOS, TTL or flags, IPv6 TC, Hop limit or Flow label, TCP header length or TS options. In GRO different TS value, SMAC, DMAC, ackNum, windowSize or VLAN */,
ETH_AGG_END_OUT_OF_ORDER /* Out of order or retransmission packet: sequence, ack or timestamp not consistent with previous segment. */,
ETH_AGG_END_NON_TPA_SEG /* Next segment cant be aggregated due to LLC/SNAP, IP error, IP fragment, IPv4 options, IPv6 extension, IP ECN = CE, TCP errors, TCP options, zero TCP payload length , TCP flags or not supported tunnel header options. */,
MAX_ETH_TPA_END_REASON
};
/*
* The first tx bd of a given packet
*/
struct eth_tx_1st_bd
{
struct regpair addr /* Single continuous buffer */;
__le16 nbytes /* Number of bytes in this BD. */;
struct eth_tx_data_1st_bd data /* Parsing information data. */;
};
/*
* The second tx bd of a given packet
*/
struct eth_tx_2nd_bd
{
struct regpair addr /* Single continuous buffer */;
__le16 nbytes /* Number of bytes in this BD. */;
struct eth_tx_data_2nd_bd data /* Parsing information data. */;
};
/*
* The parsing information data for the third tx bd of a given packet.
*/
struct eth_tx_data_3rd_bd
{
__le16 lso_mss /* For LSO packet - the MSS in bytes. */;
__le16 bitfields;
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF /* For LSO with inner/outer IPv6+ext - TCP header length (in 4-byte WORDs) */
#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0
#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF /* LSO - number of BDs which contain headers. value should be in range (1..ETH_TX_MAX_LSO_HDR_NBD). */
#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4
#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 /* Should be 0 in all the BDs, except the first one. (for debug) */
#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F
#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9
u8 tunn_l4_hdr_start_offset_w /* For tunnel with IPv6+ext - Pointer to the tunnel L4 Header (in 2-byte WORDs) */;
u8 tunn_hdr_size_w /* For tunnel with IPv6+ext - Total size of the Tunnel Header (in 2-byte WORDs) */;
};
/*
* The third tx bd of a given packet
*/
struct eth_tx_3rd_bd
{
struct regpair addr /* Single continuous buffer */;
__le16 nbytes /* Number of bytes in this BD. */;
struct eth_tx_data_3rd_bd data /* Parsing information data. */;
};
/*
* Complementary information for the regular tx bd of a given packet.
*/
struct eth_tx_data_bd
{
__le16 reserved0;
__le16 bitfields;
#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF
#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0
#define ETH_TX_DATA_BD_START_BD_MASK 0x1 /* Should be 0 in all the BDs, except the first one. (for debug) */
#define ETH_TX_DATA_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F
#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9
__le16 reserved3;
};
/*
* The common regular TX BD ring element
*/
struct eth_tx_bd
{
struct regpair addr /* Single continuous buffer */;
__le16 nbytes /* Number of bytes in this BD. */;
struct eth_tx_data_bd data /* Complementary information. */;
};
union eth_tx_bd_types
{
struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */;
struct eth_tx_2nd_bd second_bd /* The second tx bd of a given packet */;
struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */;
struct eth_tx_bd reg_bd /* The common non-special bd */;
};
/*
* Eth Tx Tunnel Type
*/
enum eth_tx_tunn_type
{
ETH_TX_TUNN_GENEVE /* GENEVE Tunnel. */,
ETH_TX_TUNN_TTAG /* T-Tag Tunnel. */,
ETH_TX_TUNN_GRE /* GRE Tunnel. */,
ETH_TX_TUNN_VXLAN /* VXLAN Tunnel. */,
MAX_ETH_TX_TUNN_TYPE
};
/*
* Ystorm Queue Zone
*/
struct xstorm_eth_queue_zone
{
struct coalescing_timeset int_coalescing_timeset /* Tx interrupt coalescing TimeSet */;
u8 reserved[7];
};
/*
* ETH doorbell data
*/
struct eth_db_data
{
u8 params;
#define ETH_DB_DATA_DEST_MASK 0x3 /* destination of doorbell (use enum db_dest) */
#define ETH_DB_DATA_DEST_SHIFT 0
#define ETH_DB_DATA_AGG_CMD_MASK 0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
#define ETH_DB_DATA_AGG_CMD_SHIFT 2
#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 /* enable QM bypass */
#define ETH_DB_DATA_BYPASS_EN_SHIFT 4
#define ETH_DB_DATA_RESERVED_MASK 0x1
#define ETH_DB_DATA_RESERVED_SHIFT 5
#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 /* aggregative value selection */
#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6
u8 agg_flags /* bit for every DQ counter flags in CM context that DQ can increment */;
__le16 bd_prod;
};
#endif /* __ETH_COMMON__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,55 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __IWARP_COMMON__
#define __IWARP_COMMON__
/************************************************************************/
/* Add include to common rdma target for both eCore and protocol rdma driver */
/************************************************************************/
#include "rdma_common.h"
/************************/
/* IWARP FW CONSTANTS */
/************************/
#define IWARP_ACTIVE_MODE 0
#define IWARP_PASSIVE_MODE 1
#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) //32KB page for Shared Queue Page
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) //First 12KB of Shared Queue Page is reserved for FW
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) //Max RQ PBL Size is 4KB
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) //Max SQ PBL Size is 12KB
#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) //max size of inline data in single request
#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) //Maximum size of single SQ WQE (rdma wqe and inline data)
#define IWARP_MAX_QPS (64*1024)
#endif /* __IWARP_COMMON__ */

View File

@ -0,0 +1,382 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
/****************************************************************************
*
* Name: mcp_private.h
*
* Description: MCP private data. Located in HSI only to provide debug access
* for diag.
*
****************************************************************************/
#ifndef MCP_PRIVATE_H
#define MCP_PRIVATE_H
#if (!defined MFW_SIM) && (!defined RECOVERY)
#include "eth.h"
#include "pmm.h"
#include "ah_eth.h"
#endif
#include "mcp_public.h"
typedef enum active_mf_mode {
MF_MODE_SF = 0,
MF_MODE_MF_ALLOWED,
MF_MODE_MF_SWITCH_INDEPENDENT,
MF_MODE_NIV
} active_mf_mode_t;
enum ov_current_cfg {
CURR_CFG_NONE = 0,
CURR_CFG_OS,
CURR_CFG_VENDOR_SPEC,
CURR_CFG_OTHER,
CURR_CFG_VC_CLP,
CURR_CFG_CNU,
CURR_CFG_DCI,
CURR_CFG_HII,
};
struct dci_info_global {
enum ov_current_cfg current_cfg;
u8 pci_bus_num;
u8 boot_progress;
};
/* Resource allocation information of one resource */
struct resource_info_private {
u16 size; /* number of allocated resources */
u16 offset; /* Offset of the 1st resource */
u8 flags;
};
/* Cache for resource allocation of one PF */
struct res_alloc_cache {
u8 pf_num;
struct resource_info_private res[RESOURCE_MAX_NUM];
};
struct pf_sb_t {
u8 sb_for_pf_size;
u8 sb_for_pf_offset;
u8 sb_for_vf_size;
u8 sb_for_vf_offset;
};
/**************************************/
/* */
/* P R I V A T E G L O B A L */
/* */
/**************************************/
struct private_global {
active_mf_mode_t mf_mode; /* TBD - require initialization */
u32 exp_rom_nvm_addr;
/* The pmm_config structure holds all active phy/link configuration */
#ifndef RECOVERY
#ifdef b900
struct pmm_config eth_cfg;
#else
struct ah_eth eth_cfg;
#endif
#endif
u32 lldp_counter;
u32 avs_init_timestamp;
u32 seconds_since_mcp_reset;
u32 last_malloc_dir_used_timestamp;
#define MAX_USED_DIR_ALLOWED_TIME (3) /* Seconds */
u32 drv_nvm_state;
/* Per PF bitmask */
#define DRV_NVM_STATE_IN_PROGRESS_MASK (0x0000ffff)
#define DRV_NVM_STATE_IN_PROGRESS_OFFSET (0)
u32 storm_fw_ver;
/* OneView data*/
struct dci_info_global dci_global;
/* Resource allocation cached data */
struct res_alloc_cache res_alloc;
#define G_RES_ALLOC_P (&g_spad.private_data.global.res_alloc)
u32 resource_max_values[RESOURCE_MAX_NUM];
};
/**************************************/
/* */
/* P R I V A T E P A T H */
/* */
/**************************************/
struct private_path {
u32 recovery_countdown; /* Counting down 2 seconds, using TMR3 */
#define RECOVERY_MAX_COUNTDOWN_SECONDS 2
u32 drv_load_vars; /* When the seconds_since_mcp_reset gets here */
#define DRV_LOAD_TIMEOUT_MASK 0x0000ffff
#define DRV_LOAD_TIMEOUT_SHIFT 0
#define DRV_LOAD_NEED_FORCE_MASK 0xffff0000
#define DRV_LOAD_NEED_FORCE_SHIFT 16
struct load_rsp_stc drv_load_params;
};
/**************************************/
/* */
/* P R I V A T E P O R T */
/* */
/**************************************/
struct drv_port_info_t {
u32_t port_state;
#define DRV_STATE_LINK_LOCK_FLAG 0x00000001
#define DRV_WAIT_DBG_PRN 0x00000002
/* There are maximum 8 PFs per port */
#define DRV_STATE_LOADED_MASK 0x0000ff00
#define DRV_STATE_LOADED_SHIFT 8
#define DRV_STATE_PF_TRANSITION_MASK 0x00ff0000
#define DRV_STATE_PF_TRANSITION_SHIFT 16
#define DRV_STATE_PF_PHY_INIT_MASK 0xff000000
#define DRV_STATE_PF_PHY_INIT_SHIFT 24
};
typedef enum _lldp_subscriber_e {
LLDP_SUBSCRIBER_MANDATORY = 0,
LLDP_SUBSCRIBER_DCBX_IEEE,
LLDP_SUBSCRIBER_DCBX_CEE,
LLDP_SUBSCRIBER_EEE,
LLDP_SUBSCRIBER_DCI,
MAX_SUBSCRIBERS
} lldp_subscriber_e;
typedef struct {
u16 valid;
u16 type_len;
#define LLDP_LEN_MASK (0x01ff)
#define LLDP_LEN_SHIFT (0)
#define LLDP_TYPE_MASK (0xfe00)
#define LLDP_TYPE_SHIFT (9)
u8 *value_p;
} tlv_s;
typedef u16(*lldp_prepare_tlv_func)(u8 port, lldp_agent_e lldp_agent, u8 *buffer);
typedef struct {
u16 valid;
lldp_prepare_tlv_func func;
} subscriber_callback_send_s;
typedef u8(*lldp_process_func)(u8 port, u8 num, u8 **tlvs);
#define MAX_NUM_SUBTYPES 4
typedef struct {
u8 valid;
u8 oui[3];
u8 subtype_list[MAX_NUM_SUBTYPES];
u8 num_subtypes;
lldp_process_func func;
} subscriber_callback_receive_s;
#define MAX_ETH_HEADER 14 /* TODO: to be extended per requirements */
#define MAX_PACKET_SIZE (1516) /* So it can be divided by 4 */
#define LLDP_CHASSIS_ID_TLV_LEN 7
#define LLDP_PORT_ID_TLV_LEN 7
#define MAX_TLV_BUFFER 128 /* In dwords. 512 in bytes*/
typedef struct {
u16 len;
u8 header[MAX_ETH_HEADER];
} lldp_eth_header_s;
typedef struct {
struct lldp_config_params_s lldp_config_params;
u16 lldp_ttl;
u8 lldp_cur_credit;
subscriber_callback_send_s subscriber_callback_send[MAX_SUBSCRIBERS];
lldp_eth_header_s lldp_eth_header;
u32 lldp_time_to_send;
u32 lldp_ttl_expired;
u32 lldp_sent;
u8 first_lldp;
subscriber_callback_receive_s subscriber_callback_receive[MAX_SUBSCRIBERS];
} lldp_params_s;
#define MAX_TLVS 20
typedef struct {
u8 current_received_tlv_index;
u8 *received_tlvs[MAX_TLVS];
} lldp_receive_data_s;
#define MAX_REGISTERED_TLVS 6
typedef struct {
u32 config; /* Uses same defines as local config plus some more below*/
#define DCBX_MODE_MASK 0x00000010
#define DCBX_MODE_SHIFT 4
#define DCBX_MODE_DRIVER 0
#define DCBX_MODE_DEFAULT 1
#define DCBX_CHANGED_MASK 0x00000f00
#define DCBX_CHANGED_SHIFT 8
#define DCBX_CONTROL_CHANGED_MASK 0x00000100
#define DCBX_CONTROL_CHANGED_SHIFT 8
#define DCBX_PFC_CHANGED_MASK 0x00000200
#define DCBX_PFC_CHANGED_SHIFT 9
#define DCBX_ETS_CHANGED_MASK 0x00000400
#define DCBX_ETS_CHANGED_SHIFT 10
#define DCBX_APP_CHANGED_MASK 0x00000800
#define DCBX_APP_CHANGED_SHIFT 11
u32 seq_no;
u32 ack_no;
u32 received_seq_no;
u8 tc_map[8];
u8 num_used_tcs;
} dcbx_state_s;
#ifdef CONFIG_HP_DCI_SUPPORT
struct dci_info_port {
u32 config;
#define DCI_PORT_CFG_ENABLE_SHIFT (0)
#define DCI_PORT_CFG_ENABLE_MASK (1 << DCI_PORT_CFG_ENABLE_SHIFT)
#define DCI_PORT_CFG_ENABLE_DIAG_SHIFT (1)
#define DCI_PORT_CFG_ENABLE_DIAG_MASK (1 << DCI_PORT_CFG_ENABLE_DIAG_SHIFT)
#define DCI_PORT_CFG_DIAG_L_LOOP_SHIFT (2)
#define DCI_PORT_CFG_DIAG_L_LOOP_MASK (1 << DCI_PORT_CFG_DIAG_L_LOOP_SHIFT)
#define DCI_PORT_CFG_DIAG_R_LOOP_SHIFT (3)
#define DCI_PORT_CFG_DIAG_R_LOOP_MASK (1 << DCI_PORT_CFG_DIAG_R_LOOP_SHIFT)
};
#endif
struct private_port {
struct drv_port_info_t port_info;
active_mf_mode_t mf_mode;
u32 prev_link_change_count;
/* LLDP structures */
lldp_params_s lldp_params[LLDP_MAX_LLDP_AGENTS];
lldp_receive_data_s lldp_receive_data[MAX_SUBSCRIBERS];
/* DCBX */
dcbx_state_s dcbx_state;
u32 net_buffer[MAX_PACKET_SIZE / 4]; /* Buffer to send any packet to network */
/* time stamp of the end of NIG drain time for the TX drain */
u32 nig_drain_end_ts;
/* time stamp of the end of NIG drain time for the TC pause drain, this timer is used togther for all TC */
u32 nig_drain_tc_end_ts;
u32 tc_drain_en_bitmap;
u32 recv_lldp_tlvs[LLDP_MAX_LLDP_AGENTS][MAX_TLV_BUFFER];
tlv_s lldp_core_tlv_desc[LLDP_MAX_LLDP_AGENTS][MAX_REGISTERED_TLVS];
u8 current_core_tlv_num[LLDP_MAX_LLDP_AGENTS];
struct mcp_mac lldp_mac;
#ifdef CONFIG_HP_DCI_SUPPORT
struct dci_info_port dci_port;
#endif
u32 temperature;
};
/**************************************/
/* */
/* P R I V A T E F U N C */
/* */
/**************************************/
struct drv_func_info_t {
u32_t func_state;
#define DRV_STATE_UNKNOWN 0x00000000
#define DRV_STATE_UNLOADED 0x00000001
#define DRV_STATE_D3 0x00000004
#define DRV_STATE_PRESENT_FLAG 0x00000100
#define DRV_STATE_RUNNING (0x00000002 | DRV_STATE_PRESENT_FLAG)
#define DRV_STATE_NOT_RESPONDING 0x00000003 /* Will result with non-zero value when compared with DRV_STATE_RUNNING or with DRV_STATE_UNLOADED */
#define DRV_STATE_BACK_AFTER_TO (DRV_STATE_NOT_RESPONDING | DRV_STATE_PRESENT_FLAG)
#define DRV_STATE_DIAG (0x00000010 | DRV_STATE_PRESENT_FLAG)
#define DRV_STATE_TRANSITION_FLAG 0x00001000
#define DRV_STATE_LOADING_TRANSITION (DRV_STATE_TRANSITION_FLAG | DRV_STATE_PRESENT_FLAG)
#define DRV_STATE_UNLOADING_TRANSITION (DRV_STATE_TRANSITION_FLAG | DRV_STATE_PRESENT_FLAG | DRV_STATE_UNLOADED)
u32_t driver_last_activity;
u32_t wol_mac_addr[2];
u32_t drv_feature_support; /* See DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_* */
u8_t unload_wol_param; /* See drv_mb_param */
u8_t eswitch_mode;
};
struct dci_info_func {
u8 config;
#define DCI_FUNC_CFG_FNIC_ENABLE_SHIFT (0)
#define DCI_FUNC_CFG_FNIC_ENABLE_MASK (1 << DCI_FUNC_CFG_FNIC_ENABLE_SHIFT)
#define DCI_FUNC_CFG_OS_MTU_OVERRIDE_SHIFT (1)
#define DCI_FUNC_CFG_OS_MTU_OVERRIDE_MASK (1 << DCI_FUNC_CFG_OS_MTU_OVERRIDE_SHIFT)
#define DCI_FUNC_CFG_DIAG_WOL_ENABLE_SHIFT (2)
#define DCI_FUNC_CFG_DIAG_WOL_ENABLE_MASK (1 << DCI_FUNC_CFG_DIAG_WOL_ENABLE_SHIFT)
u8 drv_state;
u16 fcoe_cvid;
u8 fcoe_fabric_name[8];
};
struct private_func {
struct drv_func_info_t func_info;
u32 init_hw_page;
u32 num_of_msix;
struct pf_sb_t sb;
struct dci_info_func dci_func;
};
/**************************************/
/* */
/* P R I V A T E D A T A */
/* */
/**************************************/
struct mcp_private_data {
/* Basically no need for section offsets here, since this is private data.
* TBD - should consider adding section offsets if we want diag to parse this correctly !!
*/
struct private_global global;
struct private_path path[MCP_GLOB_PATH_MAX];
struct private_port port[MCP_GLOB_PORT_MAX];
struct private_func func[MCP_GLOB_FUNC_MAX];
};
#endif /* MCP_PRIVATE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
/****************************************************************************
*
* Name: mfw_hsi.h
*
* Description: Global definitions
*
****************************************************************************/
#ifndef MFW_HSI_H
#define MFW_HSI_H
#define MFW_TRACE_SIGNATURE 0x25071946
/* The trace in the buffer */
#define MFW_TRACE_EVENTID_MASK 0x00ffff
#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
#define MFW_TRACE_PRM_SIZE_SHIFT 16
#define MFW_TRACE_ENTRY_SIZE 3
struct mcp_trace {
u32 signature; /* Help to identify that the trace is valid */
u32 size; /* the size of the trace buffer in bytes*/
u32 curr_level; /* 2 - all will be written to the buffer
* 1 - debug trace will not be written
* 0 - just errors will be written to the buffer
*/
u32 modules_mask[2];/* a bit per module, 1 means write it, 0 means mask it */
/* Warning: the following pointers are assumed to be 32bits as they are used only in the MFW */
u32 trace_prod; /* The next trace will be written to this offset */
u32 trace_oldest; /* The oldest valid trace starts at this offset (usually very close after the current producer) */
};
#endif /* MFW_HSI_H */

1947
sys/dev/qlnx/qlnxe/nvm_cfg.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,302 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
/****************************************************************************
* Name: nvm_map.h
*
* Description: Everest NVRAM map
*
****************************************************************************/
#ifndef NVM_MAP_H
#define NVM_MAP_H
#define CRC_MAGIC_VALUE 0xDEBB20E3
#define CRC32_POLYNOMIAL 0xEDB88320
#define NVM_CRC_SIZE (sizeof(u32))
enum nvm_sw_arbitrator {
NVM_SW_ARB_HOST,
NVM_SW_ARB_MCP,
NVM_SW_ARB_UART,
NVM_SW_ARB_RESERVED
};
/****************************************************************************
* Boot Strap Region *
****************************************************************************/
struct legacy_bootstrap_region {
u32 magic_value; /* a pattern not likely to occur randomly */
#define NVM_MAGIC_VALUE 0x669955aa
u32 sram_start_addr; /* where to locate LIM code (byte addr) */
u32 code_len; /* boot code length (in dwords) */
u32 code_start_addr; /* location of code on media (media byte addr) */
u32 crc; /* 32-bit CRC */
};
/****************************************************************************
* Directories Region *
****************************************************************************/
struct nvm_code_entry {
u32 image_type; /* Image type */
u32 nvm_start_addr; /* NVM address of the image */
u32 len; /* Include CRC */
u32 sram_start_addr; /* Where to load the image on the scratchpad */
u32 sram_run_addr; /* Relevant in case of MIM only */
};
enum nvm_image_type {
NVM_TYPE_TIM1 = 0x01,
NVM_TYPE_TIM2 = 0x02,
NVM_TYPE_MIM1 = 0x03,
NVM_TYPE_MIM2 = 0x04,
NVM_TYPE_MBA = 0x05,
NVM_TYPE_MODULES_PN = 0x06,
NVM_TYPE_VPD = 0x07,
NVM_TYPE_MFW_TRACE1 = 0x08,
NVM_TYPE_MFW_TRACE2 = 0x09,
NVM_TYPE_NVM_CFG1 = 0x0a,
NVM_TYPE_L2B = 0x0b,
NVM_TYPE_DIR1 = 0x0c,
NVM_TYPE_EAGLE_FW1 = 0x0d,
NVM_TYPE_FALCON_FW1 = 0x0e,
NVM_TYPE_PCIE_FW1 = 0x0f,
NVM_TYPE_HW_SET = 0x10,
NVM_TYPE_LIM = 0x11,
NVM_TYPE_AVS_FW1 = 0x12,
NVM_TYPE_DIR2 = 0x13,
NVM_TYPE_CCM = 0x14,
NVM_TYPE_EAGLE_FW2 = 0x15,
NVM_TYPE_FALCON_FW2 = 0x16,
NVM_TYPE_PCIE_FW2 = 0x17,
NVM_TYPE_AVS_FW2 = 0x18,
NVM_TYPE_INIT_HW = 0x19,
NVM_TYPE_DEFAULT_CFG= 0x1a,
NVM_TYPE_MDUMP = 0x1b,
NVM_TYPE_NVM_META = 0x1c,
NVM_TYPE_ISCSI_CFG = 0x1d,
NVM_TYPE_FCOE_CFG = 0x1f,
NVM_TYPE_ETH_PHY_FW1 = 0x20,
NVM_TYPE_ETH_PHY_FW2 = 0x21,
NVM_TYPE_BDN = 0x22,
NVM_TYPE_8485X_PHY_FW = 0x23,
NVM_TYPE_PUB_KEY = 0x24,
NVM_TYPE_RECOVERY = 0x25,
NVM_TYPE_MAX,
};
#ifdef DEFINE_IMAGE_TABLE
struct image_map {
char name[32];
char option[32];
u32 image_type;
};
struct image_map g_image_table[] = {
{"TIM1", "-tim1", NVM_TYPE_TIM1},
{"TIM2", "-tim2", NVM_TYPE_TIM2},
{"MIM1", "-mim1", NVM_TYPE_MIM1},
{"MIM2", "-mim2", NVM_TYPE_MIM2},
{"MBA", "-mba", NVM_TYPE_MBA},
{"OPT_MODULES", "-optm", NVM_TYPE_MODULES_PN},
{"VPD", "-vpd", NVM_TYPE_VPD},
{"MFW_TRACE1", "-mfwt1", NVM_TYPE_MFW_TRACE1},
{"MFW_TRACE2", "-mfwt2", NVM_TYPE_MFW_TRACE2},
{"NVM_CFG1", "-cfg", NVM_TYPE_NVM_CFG1},
{"L2B", "-l2b", NVM_TYPE_L2B},
{"DIR1", "-dir1", NVM_TYPE_DIR1},
{"EAGLE_FW1", "-eagle1", NVM_TYPE_EAGLE_FW1},
{"FALCON_FW1", "-falcon1", NVM_TYPE_FALCON_FW1},
{"PCIE_FW1", "-pcie1", NVM_TYPE_PCIE_FW1},
{"HW_SET", "-hw_set", NVM_TYPE_HW_SET},
{"LIM", "-lim", NVM_TYPE_LIM},
{"AVS_FW1", "-avs1", NVM_TYPE_AVS_FW1},
{"DIR2", "-dir2", NVM_TYPE_DIR2},
{"CCM", "-ccm", NVM_TYPE_CCM},
{"EAGLE_FW2", "-eagle2", NVM_TYPE_EAGLE_FW2},
{"FALCON_FW2", "-falcon2", NVM_TYPE_FALCON_FW2},
{"PCIE_FW2", "-pcie2", NVM_TYPE_PCIE_FW2},
{"AVS_FW2", "-avs2", NVM_TYPE_AVS_FW2},
{"INIT_HW", "-init_hw", NVM_TYPE_INIT_HW},
{"DEFAULT_CFG", "-def_cfg", NVM_TYPE_DEFAULT_CFG},
{"CRASH_DUMP", "-mdump", NVM_TYPE_MDUMP},
{"META", "-meta", NVM_TYPE_NVM_META},
{"ISCSI_CFG", "-iscsi_cfg", NVM_TYPE_ISCSI_CFG},
{"FCOE_CFG", "-fcoe_cfg",NVM_TYPE_FCOE_CFG},
{"ETH_PHY_FW1", "-ethphy1", NVM_TYPE_ETH_PHY_FW1},
{"ETH_PHY_FW2", "-ethphy2", NVM_TYPE_ETH_PHY_FW2},
{"BDN", "-bdn", NVM_TYPE_BDN},
{"PK", "-pk", NVM_TYPE_PUB_KEY},
{"RECOVERY", "-recovery",NVM_TYPE_RECOVERY}
};
#define IMAGE_TABLE_SIZE (sizeof(g_image_table) / sizeof(struct image_map))
#endif /* #ifdef DEFINE_IMAGE_TABLE */
#define MAX_NVM_DIR_ENTRIES 150
/* Note: The has given 150 possible entries since anyway each file captures at least one page. */
struct nvm_dir {
s32 seq; /* This dword is used to indicate whether this dir is valid, and whether it is more updated than the other dir */
#define NVM_DIR_NEXT_MFW_MASK 0x00000001
#define NVM_DIR_SEQ_MASK 0xfffffffe
#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
#define NVM_DIR_UPDATE_SEQ(_seq, swap_mfw) \
do { \
_seq = (((_seq + 2) & NVM_DIR_SEQ_MASK) | (NVM_DIR_NEXT_MFW(_seq ^ swap_mfw))); \
} while (0)
#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
u32 num_images;
u32 rsrv;
struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
};
#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + (_num_images - 1) * sizeof(struct nvm_code_entry) + NVM_CRC_SIZE)
struct nvm_vpd_image {
u32 format_revision;
#define VPD_IMAGE_VERSION 1
/* This array length depends on the number of VPD fields */
u8 vpd_data[1];
};
/****************************************************************************
* NVRAM FULL MAP *
****************************************************************************/
#define DIR_ID_1 (0)
#define DIR_ID_2 (1)
#define MAX_DIR_IDS (2)
#define MFW_BUNDLE_1 (0)
#define MFW_BUNDLE_2 (1)
#define MAX_MFW_BUNDLES (2)
#define FLASH_PAGE_SIZE 0x1000
#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
#define ASIC_MIM_MAX_SIZE (300*FLASH_PAGE_SIZE) /* 1.2Mb */
#define FPGA_MIM_MAX_SIZE (62*FLASH_PAGE_SIZE) /* 250Kb */
/* Each image must start on its own page. Bootstrap and LIM are bound together, so they can share the same page.
* The LIM itself should be very small, so limit it to 8Kb, but in order to open a new page, we decrement the bootstrap size out of it.
*/
#define LIM_MAX_SIZE ((2*FLASH_PAGE_SIZE) - sizeof(struct legacy_bootstrap_region) - NVM_RSV_SIZE)
#define LIM_OFFSET (NVM_OFFSET(lim_image))
#define NVM_RSV_SIZE (44)
#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : FPGA_MIM_MAX_SIZE )
#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + ((idx == NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + MIM_MAX_SIZE(is_asic)*2)
union nvm_dir_union {
struct nvm_dir dir;
u8 page[FLASH_PAGE_SIZE];
};
/* Address
* +-------------------+ 0x000000
* | Bootstrap: |
* | magic_number |
* | sram_start_addr |
* | code_len |
* | code_start_addr |
* | crc |
* +-------------------+ 0x000014
* | rsrv |
* +-------------------+ 0x000040
* | LIM |
* +-------------------+ 0x002000
* | Dir1 |
* +-------------------+ 0x003000
* | Dir2 |
* +-------------------+ 0x004000
* | MIM1 |
* +-------------------+ 0x130000
* | MIM2 |
* +-------------------+ 0x25C000
* | Rest Images: |
* | TIM1/2 |
* | MFW_TRACE1/2 |
* | Eagle/Falcon FW |
* | PCIE/AVS FW |
* | MBA/CCM/L2B |
* | VPD |
* | optic_modules |
* | ... |
* +-------------------+ 0x400000
*/
struct nvm_image {
/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
/* NVM Offset (size) */
struct legacy_bootstrap_region bootstrap; /* 0x000000 (0x000014) */
u8 rsrv[NVM_RSV_SIZE]; /* 0x000014 (0x00002c) */
u8 lim_image[LIM_MAX_SIZE]; /* 0x000040 (0x001fc0) */
union nvm_dir_union dir[MAX_MFW_BUNDLES]; /* 0x002000 (0x001000)x2 */
/* MIM1_IMAGE 0x004000 (0x12c000) */
/* MIM2_IMAGE 0x130000 (0x12c000) */
/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
}; /* 0x134 */
#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image*)0)->f))))
struct hw_set_info {
u32 reg_type;
#define GRC_REG_TYPE 1
#define PHY_REG_TYPE 2
#define PCI_REG_TYPE 4
u32 bank_num;
u32 pf_num;
u32 operation;
#define READ_OP 1
#define WRITE_OP 2
#define RMW_SET_OP 3
#define RMW_CLR_OP 4
u32 reg_addr;
u32 reg_data;
u32 reset_type;
#define POR_RESET_TYPE (1 << 0)
#define HARD_RESET_TYPE (1 << 1)
#define CORE_RESET_TYPE (1 << 2)
#define MCP_RESET_TYPE (1 << 3)
#define PERSET_ASSERT (1 << 4)
#define PERSET_DEASSERT (1 << 5)
};
struct hw_set_image {
u32 format_version;
#define HW_SET_IMAGE_VERSION 1
u32 no_hw_sets;
/* This array length depends on the no_hw_sets */
struct hw_set_info hw_sets[1];
};
#endif //NVM_MAP_H

View File

@ -0,0 +1,667 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _PCICS_REG_DRIVER_H
#define _PCICS_REG_DRIVER_H
/* offset of configuration space in the pci core register */
#define PCICFG_OFFSET 0x2000
#define PCICFG_VENDOR_ID_OFFSET 0x00
#define PCICFG_DEVICE_ID_OFFSET 0x02
#define PCICFG_COMMAND_OFFSET 0x04
#define PCICFG_COMMAND_IO_SPACE (1<<0)
#define PCICFG_COMMAND_MEM_SPACE (1<<1)
#define PCICFG_COMMAND_BUS_MASTER (1<<2)
#define PCICFG_COMMAND_SPECIAL_CYCLES (1<<3)
#define PCICFG_COMMAND_MWI_CYCLES (1<<4)
#define PCICFG_COMMAND_VGA_SNOOP (1<<5)
#define PCICFG_COMMAND_PERR_ENA (1<<6)
#define PCICFG_COMMAND_STEPPING (1<<7)
#define PCICFG_COMMAND_SERR_ENA (1<<8)
#define PCICFG_COMMAND_FAST_B2B (1<<9)
#define PCICFG_COMMAND_INT_DISABLE (1<<10)
#define PCICFG_COMMAND_RESERVED (0x1f<<11)
#define PCICFG_STATUS_OFFSET 0x06
#define PCICFG_REVISION_ID_OFFSET 0x08
#define PCICFG_REVESION_ID_MASK 0xff
#define PCICFG_REVESION_ID_ERROR_VAL 0xff
#define PCICFG_CACHE_LINE_SIZE 0x0c
#define PCICFG_LATENCY_TIMER 0x0d
#define PCICFG_HEADER_TYPE 0x0e
#define PCICFG_HEADER_TYPE_NORMAL 0
#define PCICFG_HEADER_TYPE_BRIDGE 1
#define PCICFG_HEADER_TYPE_CARDBUS 2
#define PCICFG_BAR_1_LOW 0x10
#define PCICFG_BAR_1_HIGH 0x14
#define PCICFG_BAR_2_LOW 0x18
#define PCICFG_BAR_2_HIGH 0x1c
#define PCICFG_BAR_3_LOW 0x20
#define PCICFG_BAR_3_HIGH 0x24
#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET 0x2c
#define PCICFG_SUBSYSTEM_ID_OFFSET 0x2e
#define PCICFG_INT_LINE 0x3c
#define PCICFG_INT_PIN 0x3d
#define PCICFG_PM_CAPABILITY 0x48
#define PCICFG_PM_CAPABILITY_VERSION (0x3<<16)
#define PCICFG_PM_CAPABILITY_CLOCK (1<<19)
#define PCICFG_PM_CAPABILITY_RESERVED (1<<20)
#define PCICFG_PM_CAPABILITY_DSI (1<<21)
#define PCICFG_PM_CAPABILITY_AUX_CURRENT (0x7<<22)
#define PCICFG_PM_CAPABILITY_D1_SUPPORT (1<<25)
#define PCICFG_PM_CAPABILITY_D2_SUPPORT (1<<26)
#define PCICFG_PM_CAPABILITY_PME_IN_D0 (1<<27)
#define PCICFG_PM_CAPABILITY_PME_IN_D1 (1<<28)
#define PCICFG_PM_CAPABILITY_PME_IN_D2 (1<<29)
#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT (1<<30)
#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD (1<<31)
#define PCICFG_PM_CSR_OFFSET 0x4c
#define PCICFG_PM_CSR_STATE (0x3<<0)
#define PCICFG_PM_CSR_PME_ENABLE (1<<8)
#define PCICFG_PM_CSR_PME_STATUS (1<<15)
#define PCICFG_MSI_CAP_ID_OFFSET 0x58
#define PCICFG_MSI_CONTROL_ENABLE (0x1<<16)
#define PCICFG_MSI_CONTROL_MCAP (0x7<<17)
#define PCICFG_MSI_CONTROL_MENA (0x7<<20)
#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
#define PCICFG_GRC_ADDRESS 0x78
#define PCICFG_GRC_DATA 0x80
#define PCICFG_ME_REGISTER 0x98
#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
#define PCICFG_MSIX_CONTROL_FUNC_MASK (0x1<<30)
#define PCICFG_MSIX_CONTROL_MSIX_ENABLE (0x1<<31)
#define PCICFG_DEVICE_CONTROL 0xb4
#define PCICFG_DEVICE_CONTROL_NP_TRANSACTION_PEND (1<<21)
#define PCICFG_DEVICE_STATUS 0xb6
#define PCICFG_DEVICE_STATUS_CORR_ERR_DET (1<<0)
#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET (1<<1)
#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET (1<<2)
#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET (1<<3)
#define PCICFG_DEVICE_STATUS_AUX_PWR_DET (1<<4)
#define PCICFG_DEVICE_STATUS_NO_PEND (1<<5)
#define PCICFG_LINK_CONTROL 0xbc
#define PCICFG_DEVICE_STATUS_CONTROL_2 (0xd4)
#define PCICFG_DEVICE_STATUS_CONTROL_2_ATOMIC_REQ_ENABLE (1<<6)
/* config_2 offset */
#define GRC_CONFIG_2_SIZE_REG 0x408
#define PCI_CONFIG_2_BAR1_SIZE (0xfL<<0)
#define PCI_CONFIG_2_BAR1_SIZE_DISABLED (0L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_64K (1L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_128K (2L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_256K (3L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_512K (4L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_1M (5L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_2M (6L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_4M (7L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_8M (8L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_16M (9L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_32M (10L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_64M (11L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_128M (12L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_256M (13L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_512M (14L<<0)
#define PCI_CONFIG_2_BAR1_SIZE_1G (15L<<0)
#define PCI_CONFIG_2_BAR1_64ENA (1L<<4)
#define PCI_CONFIG_2_EXP_ROM_RETRY (1L<<5)
#define PCI_CONFIG_2_CFG_CYCLE_RETRY (1L<<6)
#define PCI_CONFIG_2_FIRST_CFG_DONE (1L<<7)
#define PCI_CONFIG_2_EXP_ROM_SIZE (0xffL<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED (0L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_2K (1L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_4K (2L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_8K (3L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_16K (4L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_32K (5L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_64K (6L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_128K (7L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_256K (8L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_512K (9L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_1M (10L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_2M (11L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_4M (12L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_8M (13L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_16M (14L<<8)
#define PCI_CONFIG_2_EXP_ROM_SIZE_32M (15L<<8)
#define PCI_CONFIG_2_BAR_PREFETCH (1L<<16)
#define PCI_CONFIG_2_RESERVED0 (0x7fffL<<17)
/* config_3 offset */
#define GRC_CONFIG_3_SIZE_REG 0x40c
#define PCI_CONFIG_3_STICKY_BYTE (0xffL<<0)
#define PCI_CONFIG_3_FORCE_PME (1L<<24)
#define PCI_CONFIG_3_PME_STATUS (1L<<25)
#define PCI_CONFIG_3_PME_ENABLE (1L<<26)
#define PCI_CONFIG_3_PM_STATE (0x3L<<27)
#define PCI_CONFIG_3_VAUX_PRESET (1L<<30)
#define PCI_CONFIG_3_PCI_POWER (1L<<31)
#define GRC_REG_DEVICE_CONTROL 0x4d8
/* When VF Enable is cleared(after it was previously set),
* this register will read a value of 1, indicating that all the
* VFs that belong to this PF should be flushed.
* Software should clear this bit within 1 second of VF Enable
* being set by writing a 1 to it, so that VFs are visible to the system
* again.WC
*/
#define PCIE_SRIOV_DISABLE_IN_PROGRESS (1 << 29)
/* When FLR is initiated, this register will read a value of 1 indicating
* that the Function is in FLR state. Func can be brought out of FLR state
* either bywriting 1 to this register (at least 50 ms after FLR was
* initiated),or it can also be cleared automatically after 55 ms if
* auto_clear bit in private reg space is set. This bit also exists in
* VF register space WC
*/
#define PCIE_FLR_IN_PROGRESS (1 << 27)
#define GRC_BAR2_CONFIG 0x4e0
#define PCI_CONFIG_2_BAR2_SIZE (0xfL<<0)
#define PCI_CONFIG_2_BAR2_SIZE_DISABLED (0L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_64K (1L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_128K (2L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_256K (3L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_512K (4L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_1M (5L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_2M (6L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_4M (7L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_8M (8L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_16M (9L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_32M (10L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_64M (11L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_128M (12L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_256M (13L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_512M (14L<<0)
#define PCI_CONFIG_2_BAR2_SIZE_1G (15L<<0)
#define PCI_CONFIG_2_BAR2_64ENA (1L<<4)
#define GRC_BAR3_CONFIG 0x4f4
#define PCI_CONFIG_2_BAR3_SIZE (0xfL<<0)
#define PCI_CONFIG_2_BAR3_SIZE_DISABLED (0L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_64K (1L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_128K (2L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_256K (3L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_512K (4L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_1M (5L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_2M (6L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_4M (7L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_8M (8L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_16M (9L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_32M (10L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_64M (11L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_128M (12L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_256M (13L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_512M (14L<<0)
#define PCI_CONFIG_2_BAR3_SIZE_1G (15L<<0)
#define PCI_CONFIG_2_BAR3_64ENA (1L<<4)
#define PCI_PM_DATA_A 0x410
#define PCI_PM_DATA_B 0x414
#define PCI_ID_VAL1 0x434
#define PCI_ID_VAL2 0x438
#define PCI_ID_VAL3 0x43c
#define PCI_ID_VAL3_REVISION_ID_ERROR (0xffL<<24)
#define GRC_CONFIG_REG_VF_BAR_REG_1 0x608
#define GRC_CONFIG_REG_VF_BAR_REG_BAR0_SIZE 0xf
#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
/* This field resides in VF only and does not exist in PF.
* This register controls the read value of the MSIX_CONTROL[10:0] register
* in the VF configuration space. A value of "00000000011" indicates
* a table size of 4. The value is controlled by IOV_MSIX_TBL_SIZ
* define in version.v
*/
#define GRC_CR_VF_MSIX_CTRL_VF_MSIX_TBL_SIZE_MASK 0x3F
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
/* First VF_NUM for PF is encoded in this register.
* The number of VFs assigned to a PF is assumed to be a multiple of 8.
* Software should program these bits based on Total Number of VFs programmed
* for each PF.
* Since registers from 0x000-0x7ff are spilt across functions, each PF will
* have the same location for the same 4 bits
*/
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff
#define PXPCS_TL_CONTROL_5 0x814
#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN (1 << 29) /*WC*/
#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN (1 << 28) /*WC*/
#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN (1 << 27) /*WC*/
#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN (1 << 26) /*WC*/
#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR (1 << 25) /*WC*/
#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW (1 << 24) /*WC*/
#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN (1 << 23) /*RO*/
#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN (1 << 22) /*RO*/
#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE (1 << 21) /*WC*/
#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG (1 << 20) /*WC*/
#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1 (1 << 19) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 (1 << 18) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_ECRC1 (1 << 17) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1 (1 << 16) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1 (1 << 15) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1 (1 << 14) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1 (1 << 13) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1 (1 << 12) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1 (1 << 11) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1 (1 << 10) /*WC*/
#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT (1 << 9) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT (1 << 8) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_ECRC (1 << 7) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP (1 << 6) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW (1 << 5) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL (1 << 4) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT (1 << 3) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT (1 << 2) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL (1 << 1) /*WC*/
#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP (1 << 0) /*WC*/
#define PXPCS_TL_FUNC345_STAT 0x854
#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4 (1 << 29) /* WC */
/*Unsupported Request Error Status in function4, if set, generate
*pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 (1 << 28)
/*ECRC Error TLP Status Status in function 4, if set,
*generate pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4 (1 << 27)
/*Malformed TLP Status Status in function 4, if set,
*generate pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4 (1 << 26)
/*Receiver Overflow Status Status in function 4, if set,
*generate pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4 (1 << 25)
/*Unexpected Completion Status Status in function 4, if set,
*generate pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4 (1 << 24)
/* Receive UR Statusin function 4. If set, generate pcie_err_attn output
* when this error is seen. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4 (1 << 23)
/* Completer Timeout Status Status in function 4, if set,
* generate pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4 (1 << 22)
/* Flow Control Protocol Error Status Status in function 4,
* if set, generate pcie_err_attn output when this error is seen.
* WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4 (1 << 21)
/* Poisoned Error Status Status in function 4, if set, generate
* pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4 (1 << 20)
#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3 (1 << 19) /* WC */
/* Unsupported Request Error Status in function3, if set, generate
* pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 (1 << 18)
/* ECRC Error TLP Status Status in function 3, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3 (1 << 17)
/* Malformed TLP Status Status in function 3, if set, generate
* pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3 (1 << 16)
/* Receiver Overflow Status Status in function 3, if set, generate
* pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3 (1 << 15)
/* Unexpected Completion Status Status in function 3, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3 (1 << 14)
/* Receive UR Statusin function 3. If set, generate pcie_err_attn output
* when this error is seen. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3 (1 << 13)
/* Completer Timeout Status Status in function 3, if set, generate
* pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3 (1 << 12)
/* Flow Control Protocol Error Status Status in function 3, if set,
* generate pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3 (1 << 11)
/* Poisoned Error Status Status in function 3, if set, generate
* pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3 (1 << 10)
#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2 (1 << 9) /* WC */
/* Unsupported Request Error Status for Function 2, if set,
* generate pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2 (1 << 8)
/* ECRC Error TLP Status Status for Function 2, if set, generate
* pcie_err_attn output when this error is seen..WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2 (1 << 7)
/* Malformed TLP Status Status for Function 2, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2 (1 << 6)
/* Receiver Overflow Status Status for Function 2, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2 (1 << 5)
/* Unexpected Completion Status Status for Function 2, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2 (1 << 4)
/* Receive UR Statusfor Function 2. If set, generate pcie_err_attn output
* when this error is seen. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2 (1 << 3)
/* Completer Timeout Status Status for Function 2, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2 (1 << 2)
/* Flow Control Protocol Error Status Status for Function 2, if set,
* generate pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2 (1 << 1)
/* Poisoned Error Status Status for Function 2, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2 (1 << 0)
#define PXPCS_TL_FUNC678_STAT 0x85C
#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7 (1 << 29) /* WC */
/* Unsupported Request Error Status in function7, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 (1 << 28)
/* ECRC Error TLP Status Status in function 7, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7 (1 << 27)
/* Malformed TLP Status Status in function 7, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7 (1 << 26)
/* Receiver Overflow Status Status in function 7, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7 (1 << 25)
/* Unexpected Completion Status Status in function 7, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7 (1 << 24)
/* Receive UR Statusin function 7. If set, generate pcie_err_attn
* output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7 (1 << 23)
/* Completer Timeout Status Status in function 7, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7 (1 << 22)
/* Flow Control Protocol Error Status Status in function 7, if set,
* generate pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7 (1 << 21)
/* Poisoned Error Status Status in function 7, if set,
* generate pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7 (1 << 20)
#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6 (1 << 19) /* WC */
/* Unsupported Request Error Status in function6, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 (1 << 18)
/* ECRC Error TLP Status Status in function 6, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6 (1 << 17)
/* Malformed TLP Status Status in function 6, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6 (1 << 16)
/* Receiver Overflow Status Status in function 6, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6 (1 << 15)
/* Unexpected Completion Status Status in function 6, if set,
* generate pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6 (1 << 14)
/* Receive UR Statusin function 6. If set, generate pcie_err_attn
* output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6 (1 << 13)
/* Completer Timeout Status Status in function 6, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6 (1 << 12)
/* Flow Control Protocol Error Status Status in function 6, if set,
* generate pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6 (1 << 11)
/* Poisoned Error Status Status in function 6, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6 (1 << 10)
#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5 (1 << 9) /* WC */
/* Unsupported Request Error Status for Function 5, if set,
* generate pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5 (1 << 8)
/* ECRC Error TLP Status Status for Function 5, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5 (1 << 7)
/* Malformed TLP Status Status for Function 5, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5 (1 << 6)
/* Receiver Overflow Status Status for Function 5, if set, generate
* pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5 (1 << 5)
/* Unexpected Completion Status Status for Function 5, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5 (1 << 4)
/* Receive UR Statusfor Function 5. If set, generate pcie_err_attn output
* when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5 (1 << 3)
/* Completer Timeout Status Status for Function 5, if set, generate
* pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5 (1 << 2)
/* Flow Control Protocol Error Status Status for Function 5, if set,
* generate pcie_err_attn output when this error is seen. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5 (1 << 1)
/* Poisoned Error Status Status for Function 5, if set,
* generate pcie_err_attn output when this error is seen.. WC
*/
#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5 (1 << 0)
/* PCI CAPABILITIES
*/
#define PCI_CAP_PCIE 0x10 /*PCIe capability ID*/
#define PCIE_DEV_CAPS 0x04
#ifndef PCIE_DEV_CAPS_FLR_CAPABILITY
#define PCIE_DEV_CAPS_FLR_CAPABILITY (1 << 28)
#endif
#define PCIE_DEV_CTRL 0x08
#define PCIE_DEV_CTRL_FLR 0x8000
#define PCIE_DEV_STATUS 0x0A
#ifndef PCIE_DEV_STATUS_PENDING_TRANSACTION
#define PCIE_DEV_STATUS_PENDING_TRANSACTION (1 << 5)
#endif
#ifndef PCI_CAPABILITY_LIST
/* Ofset of first capability list entry */
#define PCI_CAPABILITY_LIST 0x34
#endif
#define PCI_CAPABILITY_LIST_MASK 0xff
#ifndef PCI_CB_CAPABILITY_LIST
#define PCI_CB_CAPABILITY_LIST 0x14
#endif
#if (defined(__LINUX)) || (defined(PCI_CAP_LIST_ID))
#define PCI_CAP_LIST_ID_DEF
#endif
#if (defined(__LINUX)) || (defined(PCI_CAP_LIST_NEXT))
#define PCI_CAP_LIST_NEXT_DEF
#endif
#if (defined(__LINUX)) || (defined(PCI_STATUS))
#define PCI_STATUS_DEF
#endif
#if (defined(__LINUX)) || (defined(PCI_STATUS_CAP_LIST))
#define PCI_STATUS_CAP_LIST_DEF
#endif
#ifndef PCI_CAP_LIST_ID_DEF
#define PCI_CAP_LIST_ID 0x0 /* Capability ID */
#endif
#define PCI_CAP_LIST_ID_MASK 0xff
#ifndef PCI_CAP_LIST_NEXT_DEF
/* Next capability in the list */
#define PCI_CAP_LIST_NEXT 0x1
#endif
#define PCI_CAP_LIST_NEXT_MASK 0xff
#ifndef PCI_STATUS_DEF
#define PCI_STATUS 0x6 /* 16 bits */
#endif
#ifndef PCI_STATUS_CAP_LIST_DEF
/* Support Capability List */
#define PCI_STATUS_CAP_LIST 0x10
#endif
#ifndef PCI_SRIOV_CAP
/* Some PCI Config defines... need to put this in a better location... */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */
#define PCI_EXT_CAP_ID_SRIOV 0x10 /* Single Root I/O Virtualization */
#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */
#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */
#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */
#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */
#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */
#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */
#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */
#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */
#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */
#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */
#define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */
#define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */
#define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */
#define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */
#define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */
#define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */
#endif
#ifndef PCI_CAP_ID_EXP
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#endif
#ifndef PCI_EXP_DEVCTL
#define PCI_EXP_DEVCTL 8 /* Device Control */
#endif
#ifndef PCI_EXP_DEVCTL_RELAX_EN
#define PCI_EXP_DEVCTL_RELAX_EN 0x0010 /* Enable relaxed ordering */
#endif
#endif

View File

@ -0,0 +1,599 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
/*
* File: qlnx_def.h
* Author : David C Somayajulu, Cavium Inc., San Jose, CA 95131.
*/
#ifndef _QLNX_DEF_H_
#define _QLNX_DEF_H_
#define VER_SIZE 16
struct qlnx_ivec {
uint32_t rss_idx;
void *ha;
struct resource *irq;
void *handle;
int irq_rid;
};
typedef struct qlnx_ivec qlnx_ivec_t;
//#define QLNX_MAX_RSS 30
#define QLNX_MAX_RSS 16
#define QLNX_MAX_TC 1
enum QLNX_STATE {
QLNX_STATE_CLOSED,
QLNX_STATE_OPEN,
};
#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
#define MAX_NUM_TC 8
#define MAX_NUM_PRI 8
#ifndef BITS_PER_BYTE
#define BITS_PER_BYTE 8
#endif /* #ifndef BITS_PER_BYTE */
/*
* RX ring buffer contains pointer to kmalloc() data only,
*/
struct sw_rx_data {
void *data;
bus_dmamap_t map;
dma_addr_t dma_addr;
};
enum qlnx_agg_state {
QLNX_AGG_STATE_NONE = 0,
QLNX_AGG_STATE_START = 1,
QLNX_AGG_STATE_ERROR = 2
};
struct qlnx_agg_info {
/* rx_buf is a data buffer that can be placed /consumed from rx bd
* chain. It has two purposes: We will preallocate the data buffer
* for each aggregation when we open the interface and will place this
* buffer on the rx-bd-ring when we receive TPA_START. We don't want
* to be in a state where allocation fails, as we can't reuse the
* consumer buffer in the rx-chain since FW may still be writing to it
* (since header needs to be modified for TPA.
* The second purpose is to keep a pointer to the bd buffer during
* aggregation.
*/
struct sw_rx_data rx_buf;
enum qlnx_agg_state agg_state;
uint16_t placement_offset;
struct mbuf *mpf; /* first mbuf in chain */
struct mbuf *mpl; /* last mbuf in chain */
};
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE (1 << RX_RING_SIZE_POW)
#define TX_RING_SIZE_POW 14
#define TX_RING_SIZE (1 << TX_RING_SIZE_POW)
struct qlnx_rx_queue {
volatile __le16 *hw_cons_ptr;
struct sw_rx_data sw_rx_ring[RX_RING_SIZE];
uint16_t sw_rx_cons;
uint16_t sw_rx_prod;
struct ecore_chain rx_bd_ring;
struct ecore_chain rx_comp_ring;
void __iomem *hw_rxq_prod_addr;
void *handle;
/* LRO */
struct qlnx_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
uint32_t rx_buf_size;
uint16_t num_rx_buffers;
uint16_t rxq_id;
#ifdef QLNX_SOFT_LRO
struct lro_ctrl lro;
#endif
};
union db_prod {
struct eth_db_data data;
uint32_t raw;
};
struct sw_tx_bd {
struct mbuf *mp;
bus_dmamap_t map;
uint8_t flags;
int nsegs;
/* Set on the first BD descriptor when there is a split BD */
#define QLNX_TSO_SPLIT_BD (1<<0)
};
#define QLNX_MAX_SEGMENTS 255
struct qlnx_tx_queue {
int index; /* Queue index */
volatile __le16 *hw_cons_ptr;
struct sw_tx_bd sw_tx_ring[TX_RING_SIZE];
uint16_t sw_tx_cons;
uint16_t sw_tx_prod;
struct ecore_chain tx_pbl;
void __iomem *doorbell_addr;
void *handle;
union db_prod tx_db;
bus_dma_segment_t segs[QLNX_MAX_SEGMENTS];
uint16_t num_tx_buffers;
};
#define BD_UNMAP_ADDR(bd) HILO_U64(le32toh((bd)->addr.hi), \
le32toh((bd)->addr.lo))
#define BD_UNMAP_LEN(bd) (le16toh((bd)->nbytes))
#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
do { \
(bd)->addr.hi = htole32(U64_HI(maddr)); \
(bd)->addr.lo = htole32(U64_LO(maddr)); \
(bd)->nbytes = htole16(len); \
} while (0);
#define QLNX_FP_MAX_SEGS 24
struct qlnx_fastpath {
void *edev;
uint8_t rss_id;
struct ecore_sb_info *sb_info;
struct qlnx_rx_queue *rxq;
struct qlnx_tx_queue *txq[MAX_NUM_TC];
char name[64];
struct mtx tx_mtx;
char tx_mtx_name[32];
struct buf_ring *tx_br;
struct task fp_task;
struct taskqueue *fp_taskqueue;
/* transmit statistics */
uint64_t tx_pkts_processed;
uint64_t tx_pkts_freed;
uint64_t tx_pkts_transmitted;
uint64_t tx_pkts_completed;
uint64_t tx_lso_wnd_min_len;
uint64_t tx_defrag;
uint64_t tx_nsegs_gt_elem_left;
uint32_t tx_tso_max_nsegs;
uint32_t tx_tso_min_nsegs;
uint32_t tx_tso_max_pkt_len;
uint32_t tx_tso_min_pkt_len;
uint64_t tx_pkts[QLNX_FP_MAX_SEGS];
uint64_t err_tx_nsegs_gt_elem_left;
uint64_t err_tx_dmamap_create;
uint64_t err_tx_defrag_dmamap_load;
uint64_t err_tx_non_tso_max_seg;
uint64_t err_tx_dmamap_load;
uint64_t err_tx_defrag;
uint64_t err_tx_free_pkt_null;
uint64_t err_tx_cons_idx_conflict;
uint64_t lro_cnt_64;
uint64_t lro_cnt_128;
uint64_t lro_cnt_256;
uint64_t lro_cnt_512;
uint64_t lro_cnt_1024;
/* receive statistics */
uint64_t rx_pkts;
uint64_t tpa_start;
uint64_t tpa_cont;
uint64_t tpa_end;
uint64_t err_m_getcl;
uint64_t err_m_getjcl;
uint64_t err_rx_hw_errors;
uint64_t err_rx_alloc_errors;
uint64_t err_rx_jumbo_chain_pkts;
uint64_t err_rx_mp_null;
uint64_t err_rx_tpa_invalid_agg_num;
};
struct qlnx_update_vport_params {
uint8_t vport_id;
uint8_t update_vport_active_rx_flg;
uint8_t vport_active_rx_flg;
uint8_t update_vport_active_tx_flg;
uint8_t vport_active_tx_flg;
uint8_t update_inner_vlan_removal_flg;
uint8_t inner_vlan_removal_flg;
struct ecore_rss_params *rss_params;
struct ecore_sge_tpa_params *sge_tpa_params;
};
/*
* link related
*/
struct qlnx_link_output {
bool link_up;
uint32_t supported_caps;
uint32_t advertised_caps;
uint32_t link_partner_caps;
uint32_t speed; /* In Mb/s */
bool autoneg;
uint32_t media_type;
uint32_t duplex;
};
typedef struct qlnx_link_output qlnx_link_output_t;
#define QLNX_LINK_DUPLEX 0x0001
#define QLNX_LINK_CAP_FIBRE 0x0001
#define QLNX_LINK_CAP_Autoneg 0x0002
#define QLNX_LINK_CAP_Pause 0x0004
#define QLNX_LINK_CAP_Asym_Pause 0x0008
#define QLNX_LINK_CAP_1000baseT_Half 0x0010
#define QLNX_LINK_CAP_1000baseT_Full 0x0020
#define QLNX_LINK_CAP_10000baseKR_Full 0x0040
#define QLNX_LINK_CAP_25000baseKR_Full 0x0080
#define QLNX_LINK_CAP_40000baseLR4_Full 0x0100
#define QLNX_LINK_CAP_50000baseKR2_Full 0x0200
#define QLNX_LINK_CAP_100000baseKR4_Full 0x0400
/* Functions definition */
#define XMIT_PLAIN 0
#define XMIT_L4_CSUM (1 << 0)
#define XMIT_LSO (1 << 1)
#define CQE_FLAGS_ERR (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << \
PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT | \
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << \
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT | \
PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << \
PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | \
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << \
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT)
#define RX_COPY_THRESH 92
#define ETH_MAX_PACKET_SIZE 1500
#define QLNX_MFW_VERSION_LENGTH 32
#define QLNX_STORMFW_VERSION_LENGTH 32
#define QLNX_TX_ELEM_RESERVE 2
#define QLNX_TPA_MAX_AGG_BUFFERS (20)
#define QLNX_MAX_NUM_MULTICAST_ADDRS ECORE_MAX_MC_ADDRS
typedef struct _qlnx_mcast {
uint16_t rsrvd;
uint8_t addr[6];
} __packed qlnx_mcast_t;
/*
* Adapter structure contains the hardware independent information of the
* pci function.
*/
struct qlnx_host {
/* interface to ecore */
struct ecore_dev cdev;
uint32_t state;
/* some flags */
volatile struct {
volatile uint32_t
callout_init :1,
slowpath_start :1,
parent_tag :1,
lock_init :1;
} flags;
/* interface to o.s */
device_t pci_dev;
uint8_t pci_func;
uint8_t dev_unit;
struct ifnet *ifp;
int if_flags;
volatile int link_up;
struct ifmedia media;
uint16_t max_frame_size;
struct cdev *ioctl_dev;
/* resources */
struct resource *pci_reg;
int reg_rid;
struct resource *pci_dbells;
int dbells_rid;
uint64_t dbells_phys_addr;
uint32_t dbells_size;
struct resource *msix_bar;
int msix_rid;
int msix_count;
struct mtx hw_lock;
/* debug */
uint32_t dbg_level;
uint32_t dp_level;
uint32_t dp_module;
/* misc */
uint8_t mfw_ver[QLNX_MFW_VERSION_LENGTH];
uint8_t stormfw_ver[QLNX_STORMFW_VERSION_LENGTH];
uint32_t flash_size;
/* dma related */
bus_dma_tag_t parent_tag;
bus_dma_tag_t tx_tag;
bus_dma_tag_t rx_tag;
struct ecore_sb_info sb_array[QLNX_MAX_RSS];
struct qlnx_rx_queue rxq_array[QLNX_MAX_RSS];
struct qlnx_tx_queue txq_array[(QLNX_MAX_RSS * MAX_NUM_TC)];
struct qlnx_fastpath fp_array[QLNX_MAX_RSS];
/* tx related */
struct callout tx_callout;
struct mtx tx_lock;
uint32_t txr_idx;
/* rx related */
uint32_t rx_pkt_threshold;
uint32_t rx_jumbo_buf_eq_mtu;
/* slow path related */
struct resource *sp_irq[MAX_HWFNS_PER_DEVICE];
void *sp_handle[MAX_HWFNS_PER_DEVICE];
int sp_irq_rid[MAX_HWFNS_PER_DEVICE];
struct task sp_task[MAX_HWFNS_PER_DEVICE];
struct taskqueue *sp_taskqueue[MAX_HWFNS_PER_DEVICE];
struct callout qlnx_callout;
/* fast path related */
int num_rss;
int num_tc;
#define QLNX_MAX_TSS_CNT(ha) ((ha->num_rss) * (ha->num_tc))
qlnx_ivec_t irq_vec[QLNX_MAX_RSS];
uint8_t filter;
uint32_t nmcast;
qlnx_mcast_t mcast[QLNX_MAX_NUM_MULTICAST_ADDRS];
struct ecore_filter_mcast ecore_mcast;
uint8_t primary_mac[ETH_ALEN];
uint8_t prio_to_tc[MAX_NUM_PRI];
struct ecore_eth_stats hw_stats;
struct ecore_rss_params rss_params;
uint32_t rx_buf_size;
bool rx_csum_offload;
uint32_t rx_coalesce_usecs;
uint32_t tx_coalesce_usecs;
/* link related */
qlnx_link_output_t if_link;
/* global counters */
uint64_t sp_interrupts;
uint64_t err_illegal_intr;
uint64_t err_fp_null;
uint64_t err_get_proto_invalid_type;
/* grcdump related */
uint32_t err_inject;
uint32_t grcdump_taken;
uint32_t grcdump_dwords[QLNX_MAX_HW_FUNCS];
uint32_t grcdump_size[QLNX_MAX_HW_FUNCS];
void *grcdump[QLNX_MAX_HW_FUNCS];
uint32_t idle_chk_taken;
uint32_t idle_chk_dwords[QLNX_MAX_HW_FUNCS];
uint32_t idle_chk_size[QLNX_MAX_HW_FUNCS];
void *idle_chk[QLNX_MAX_HW_FUNCS];
/* storm stats related */
#define QLNX_STORM_STATS_TOTAL \
(QLNX_MAX_HW_FUNCS * QLNX_STORM_STATS_SAMPLES_PER_HWFN)
qlnx_storm_stats_t storm_stats[QLNX_STORM_STATS_TOTAL];
uint32_t storm_stats_index;
uint32_t storm_stats_enable;
uint32_t personality;
};
typedef struct qlnx_host qlnx_host_t;
/* note that align has to be a power of 2 */
#define QL_ALIGN(size, align) (size + (align - 1)) & ~(align - 1);
#define QL_MIN(x, y) ((x < y) ? x : y)
#define QL_RUNNING(ifp) \
((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) == \
IFF_DRV_RUNNING)
#define QLNX_MAX_MTU 9000
#define QLNX_MAX_SEGMENTS_NON_TSO (ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1)
#define QLNX_MAX_TSO_FRAME_SIZE ((64 * 1024 - 1) + 22)
#define QL_MAC_CMP(mac1, mac2) \
((((*(uint32_t *) mac1) == (*(uint32_t *) mac2) && \
(*(uint16_t *)(mac1 + 4)) == (*(uint16_t *)(mac2 + 4)))) ? 0 : 1)
#define for_each_rss(i) for (i = 0; i < ha->num_rss; i++)
/*
* Debug Related
*/
#ifdef QLNX_DEBUG
#define QL_DPRINT1(ha, x) if (ha->dbg_level & 0x0001) device_printf x
#define QL_DPRINT2(ha, x) if (ha->dbg_level & 0x0002) device_printf x
#define QL_DPRINT3(ha, x) if (ha->dbg_level & 0x0004) device_printf x
#define QL_DPRINT4(ha, x) if (ha->dbg_level & 0x0008) device_printf x
#define QL_DPRINT5(ha, x) if (ha->dbg_level & 0x0010) device_printf x
#define QL_DPRINT6(ha, x) if (ha->dbg_level & 0x0020) device_printf x
#define QL_DPRINT7(ha, x) if (ha->dbg_level & 0x0040) device_printf x
#define QL_DPRINT8(ha, x) if (ha->dbg_level & 0x0080) device_printf x
#define QL_DPRINT9(ha, x) if (ha->dbg_level & 0x0100) device_printf x
#define QL_DPRINT11(ha, x) if (ha->dbg_level & 0x0400) device_printf x
#define QL_DPRINT12(ha, x) if (ha->dbg_level & 0x0800) device_printf x
#define QL_DPRINT13(ha, x) if (ha->dbg_level & 0x1000) device_printf x
#define QL_DPRINT14(ha, x) if (ha->dbg_level & 0x2000) device_printf x
#else
#define QL_DPRINT1(ha, x)
#define QL_DPRINT2(ha, x)
#define QL_DPRINT3(ha, x)
#define QL_DPRINT4(ha, x)
#define QL_DPRINT5(ha, x)
#define QL_DPRINT6(ha, x)
#define QL_DPRINT7(ha, x)
#define QL_DPRINT8(ha, x)
#define QL_DPRINT9(ha, x)
#define QL_DPRINT11(ha, x)
#define QL_DPRINT12(ha, x)
#define QL_DPRINT13(ha, x)
#define QL_DPRINT14(ha, x)
#endif /* #ifdef QLNX_DEBUG */
#define QL_ASSERT(ha, x, y) if (!x) panic y
#define QL_ERR_INJECT(ha, val) (ha->err_inject == val)
#define QL_RESET_ERR_INJECT(ha, val) {if (ha->err_inject == val) ha->err_inject = 0;}
#define QL_ERR_INJCT_TX_INT_DIFF 0x0001
#define QL_ERR_INJCT_TX_INT_MBUF_NULL 0x0002
/*
* exported functions
*/
extern int qlnx_make_cdev(qlnx_host_t *ha);
extern void qlnx_del_cdev(qlnx_host_t *ha);
extern int qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
int hwfn_index);
extern int qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords,
int hwfn_index);
extern uint8_t *qlnx_get_mac_addr(qlnx_host_t *ha);
extern void qlnx_fill_link(struct ecore_hwfn *hwfn,
struct qlnx_link_output *if_link);
/*
* Some OS specific stuff
*/
#if (defined IFM_100G_SR4)
#define QLNX_IFM_100G_SR4 IFM_100G_SR4
#define QLNX_IFM_100G_LR4 IFM_100G_LR4
#define QLNX_IFM_100G_CR4 IFM_100G_CR4
#else
#define QLNX_IFM_100G_SR4 IFM_UNKNOWN
#define QLNX_IFM_100G_LR4 IFM_UNKNOWN
#endif
#if (defined IFM_25G_SR)
#define QLNX_IFM_25G_SR IFM_25G_SR
#define QLNX_IFM_25G_CR IFM_25G_CR
#else
#define QLNX_IFM_25G_SR IFM_UNKNOWN
#define QLNX_IFM_25G_CR IFM_UNKNOWN
#endif
#if __FreeBSD_version < 1100000
#define QLNX_INC_IERRORS(ifp) ifp->if_ierrors++
#define QLNX_INC_IQDROPS(ifp) ifp->if_iqdrops++
#define QLNX_INC_IPACKETS(ifp) ifp->if_ipackets++
#define QLNX_INC_OPACKETS(ifp) ifp->if_opackets++
#define QLNX_INC_OBYTES(ifp, len) ifp->if_obytes += len
#define QLNX_INC_IBYTES(ifp, len) ifp->if_ibytes += len
#else
#define QLNX_INC_IERRORS(ifp) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
#define QLNX_INC_IQDROPS(ifp) if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
#define QLNX_INC_IPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1)
#define QLNX_INC_OPACKETS(ifp) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1)
#define QLNX_INC_OBYTES(ifp, len) \
if_inc_counter(ifp, IFCOUNTER_OBYTES, len)
#define QLNX_INC_IBYTES(ifp, len) \
if_inc_counter(ha->ifp, IFCOUNTER_IBYTES, len)
#endif /* #if __FreeBSD_version < 1100000 */
#define CQE_L3_PACKET(flags) \
((((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv4) || \
(((flags) & PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == e_l3Type_ipv6))
#define CQE_IP_HDR_ERR(flags) \
((flags) & (PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK \
<< PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT))
#define CQE_L4_HAS_CSUM(flags) \
((flags) & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK \
<< PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT))
#define CQE_HAS_VLAN(flags) \
((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \
<< PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT))
#endif /* #ifndef _QLNX_DEF_H_ */

View File

@ -0,0 +1,882 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* File: qlnx_ioctl.c
* Author : David C Somayajulu, Qlogic Corporation, Aliso Viejo, CA 92656.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "qlnx_os.h"
#include "bcm_osal.h"
#include "reg_addr.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore.h"
#include "ecore_chain.h"
#include "ecore_status.h"
#include "ecore_hw.h"
#include "ecore_rt_defs.h"
#include "ecore_init_ops.h"
#include "ecore_int.h"
#include "ecore_cxt.h"
#include "ecore_spq.h"
#include "ecore_init_fw_funcs.h"
#include "ecore_sp_commands.h"
#include "ecore_dev_api.h"
#include "ecore_l2_api.h"
#include "ecore_mcp.h"
#include "ecore_hw_defs.h"
#include "mcp_public.h"
#include "ecore_iro.h"
#include "nvm_cfg.h"
#include "ecore_dev_api.h"
#include "ecore_dbg_fw_funcs.h"
#include "qlnx_ioctl.h"
#include "qlnx_def.h"
#include "qlnx_ver.h"
#include <sys/smp.h>
static int qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
struct thread *td);
static struct cdevsw qlnx_cdevsw = {
.d_version = D_VERSION,
.d_ioctl = qlnx_eioctl,
.d_name = "qlnxioctl",
};
int
qlnx_make_cdev(qlnx_host_t *ha)
{
ha->ioctl_dev = make_dev(&qlnx_cdevsw,
ha->ifp->if_dunit,
UID_ROOT,
GID_WHEEL,
0600,
"%s",
if_name(ha->ifp));
if (ha->ioctl_dev == NULL)
return (-1);
ha->ioctl_dev->si_drv1 = ha;
return (0);
}
void
qlnx_del_cdev(qlnx_host_t *ha)
{
if (ha->ioctl_dev != NULL)
destroy_dev(ha->ioctl_dev);
return;
}
int
qlnx_grc_dump(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
{
int rval = EINVAL;
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
if (ha->grcdump_dwords[hwfn_index]) {
/* the grcdump is already available */
*num_dumped_dwords = ha->grcdump_dwords[hwfn_index];
return (0);
}
ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
p_hwfn = &ha->cdev.hwfns[hwfn_index];
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
QL_DPRINT1(ha, (ha->pci_dev, "%s : ecore_ptt_acquire failed\n",
__func__));
return (rval);
}
if ((rval = ecore_dbg_grc_dump(p_hwfn, p_ptt,
ha->grcdump[hwfn_index],
(ha->grcdump_size[hwfn_index] >> 2),
num_dumped_dwords)) == DBG_STATUS_OK) {
rval = 0;
ha->grcdump_taken = 1;
} else
QL_DPRINT1(ha, (ha->pci_dev,
"%s : ecore_dbg_grc_dump failed [%d, 0x%x]\n",
__func__, hwfn_index, rval));
ecore_ptt_release(p_hwfn, p_ptt);
return (rval);
}
static void
qlnx_get_grc_dump_size(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
{
int i;
grcdump->pci_func = ha->pci_func;
for (i = 0; i < ha->cdev.num_hwfns; i++)
grcdump->grcdump_size[i] = ha->grcdump_size[i];
return;
}
static int
qlnx_get_grc_dump(qlnx_host_t *ha, qlnx_grcdump_t *grcdump)
{
int i;
int rval = 0;
uint32_t dwords = 0;
grcdump->pci_func = ha->pci_func;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if ((ha->grcdump[i] == NULL) || (grcdump->grcdump[i] == NULL) ||
(grcdump->grcdump_size[i] < ha->grcdump_size[i]))
return (EINVAL);
rval = qlnx_grc_dump(ha, &dwords, i);
if (rval)
break;
grcdump->grcdump_dwords[i] = dwords;
QL_DPRINT1(ha, (ha->pci_dev, "%s: grcdump_dwords[%d] = 0x%x\n",
__func__, i, dwords));
rval = copyout(ha->grcdump[i], grcdump->grcdump[i],
ha->grcdump_size[i]);
if (rval)
break;
ha->grcdump_dwords[i] = 0;
}
ha->grcdump_taken = 0;
return (rval);
}
int
qlnx_idle_chk(qlnx_host_t *ha, uint32_t *num_dumped_dwords, int hwfn_index)
{
int rval = EINVAL;
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
if (ha->idle_chk_dwords[hwfn_index]) {
/* the idle check is already available */
*num_dumped_dwords = ha->idle_chk_dwords[hwfn_index];
return (0);
}
ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
p_hwfn = &ha->cdev.hwfns[hwfn_index];
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
QL_DPRINT1(ha, (ha->pci_dev,
"%s : ecore_ptt_acquire failed\n", __func__));
return (rval);
}
if ((rval = ecore_dbg_idle_chk_dump(p_hwfn, p_ptt,
ha->idle_chk[hwfn_index],
(ha->idle_chk_size[hwfn_index] >> 2),
num_dumped_dwords)) == DBG_STATUS_OK) {
rval = 0;
ha->idle_chk_taken = 1;
} else
QL_DPRINT1(ha, (ha->pci_dev,
"%s : ecore_dbg_idle_chk_dump failed [%d, 0x%x]\n",
__func__, hwfn_index, rval));
ecore_ptt_release(p_hwfn, p_ptt);
return (rval);
}
static void
qlnx_get_idle_chk_size(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
{
int i;
idle_chk->pci_func = ha->pci_func;
for (i = 0; i < ha->cdev.num_hwfns; i++)
idle_chk->idle_chk_size[i] = ha->idle_chk_size[i];
return;
}
static int
qlnx_get_idle_chk(qlnx_host_t *ha, qlnx_idle_chk_t *idle_chk)
{
int i;
int rval = 0;
uint32_t dwords = 0;
idle_chk->pci_func = ha->pci_func;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if ((ha->idle_chk[i] == NULL) ||
(idle_chk->idle_chk[i] == NULL) ||
(idle_chk->idle_chk_size[i] <
ha->idle_chk_size[i]))
return (EINVAL);
rval = qlnx_idle_chk(ha, &dwords, i);
if (rval)
break;
idle_chk->idle_chk_dwords[i] = dwords;
QL_DPRINT1(ha, (ha->pci_dev, "%s: idle_chk_dwords[%d] = 0x%x\n",
__func__, i, dwords));
rval = copyout(ha->idle_chk[i], idle_chk->idle_chk[i],
ha->idle_chk_size[i]);
if (rval)
break;
ha->idle_chk_dwords[i] = 0;
}
ha->idle_chk_taken = 0;
return (rval);
}
static uint32_t
qlnx_get_trace_cmd_size(qlnx_host_t *ha, int hwfn_index, uint16_t cmd)
{
int rval = -1;
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
uint32_t num_dwords = 0;
p_hwfn = &ha->cdev.hwfns[hwfn_index];
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
QL_DPRINT1(ha, (ha->pci_dev,
"%s: ecore_ptt_acquire [%d, 0x%x]failed\n",
__func__, hwfn_index, cmd));
return (0);
}
switch (cmd) {
case QLNX_MCP_TRACE:
rval = ecore_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
p_ptt, &num_dwords);
break;
case QLNX_REG_FIFO:
rval = ecore_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
p_ptt, &num_dwords);
break;
case QLNX_IGU_FIFO:
rval = ecore_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
p_ptt, &num_dwords);
break;
case QLNX_PROTECTION_OVERRIDE:
rval = ecore_dbg_protection_override_get_dump_buf_size(p_hwfn,
p_ptt, &num_dwords);
break;
case QLNX_FW_ASSERTS:
rval = ecore_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
p_ptt, &num_dwords);
break;
}
if (rval != DBG_STATUS_OK) {
QL_DPRINT1(ha, (ha->pci_dev, "%s : cmd = 0x%x failed [0x%x]\n",
__func__, cmd, rval));
num_dwords = 0;
}
ecore_ptt_release(p_hwfn, p_ptt);
return ((num_dwords * sizeof (uint32_t)));
}
static void
qlnx_get_trace_size(qlnx_host_t *ha, qlnx_trace_t *trace)
{
int i;
trace->pci_func = ha->pci_func;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
trace->size[i] = qlnx_get_trace_cmd_size(ha, i, trace->cmd);
}
return;
}
static int
qlnx_get_trace(qlnx_host_t *ha, int hwfn_index, qlnx_trace_t *trace)
{
int rval = -1;
struct ecore_hwfn *p_hwfn;
struct ecore_ptt *p_ptt;
uint32_t num_dwords = 0;
void *buffer;
buffer = qlnx_zalloc(trace->size[hwfn_index]);
if (buffer == NULL) {
QL_DPRINT1(ha, (ha->pci_dev,
"%s: qlnx_zalloc [%d, 0x%x]failed\n",
__func__, hwfn_index, trace->cmd));
return (ENXIO);
}
ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver());
p_hwfn = &ha->cdev.hwfns[hwfn_index];
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
QL_DPRINT1(ha, (ha->pci_dev,
"%s: ecore_ptt_acquire [%d, 0x%x]failed\n",
__func__, hwfn_index, trace->cmd));
return (ENXIO);
}
switch (trace->cmd) {
case QLNX_MCP_TRACE:
rval = ecore_dbg_mcp_trace_dump(p_hwfn, p_ptt,
buffer, (trace->size[hwfn_index] >> 2),
&num_dwords);
break;
case QLNX_REG_FIFO:
rval = ecore_dbg_reg_fifo_dump(p_hwfn, p_ptt,
buffer, (trace->size[hwfn_index] >> 2),
&num_dwords);
break;
case QLNX_IGU_FIFO:
rval = ecore_dbg_igu_fifo_dump(p_hwfn, p_ptt,
buffer, (trace->size[hwfn_index] >> 2),
&num_dwords);
break;
case QLNX_PROTECTION_OVERRIDE:
rval = ecore_dbg_protection_override_dump(p_hwfn, p_ptt,
buffer, (trace->size[hwfn_index] >> 2),
&num_dwords);
break;
case QLNX_FW_ASSERTS:
rval = ecore_dbg_fw_asserts_dump(p_hwfn, p_ptt,
buffer, (trace->size[hwfn_index] >> 2),
&num_dwords);
break;
}
if (rval != DBG_STATUS_OK) {
QL_DPRINT1(ha, (ha->pci_dev, "%s : cmd = 0x%x failed [0x%x]\n",
__func__, trace->cmd, rval));
num_dwords = 0;
}
ecore_ptt_release(p_hwfn, p_ptt);
trace->dwords[hwfn_index] = num_dwords;
if (num_dwords) {
rval = copyout(buffer, trace->buffer[hwfn_index],
(num_dwords << 2));
}
return (rval);
}
static int
qlnx_reg_rd_wr(qlnx_host_t *ha, qlnx_reg_rd_wr_t *reg_rd_wr)
{
int rval = 0;
struct ecore_hwfn *p_hwfn;
if (reg_rd_wr->hwfn_index >= QLNX_MAX_HW_FUNCS) {
return (EINVAL);
}
p_hwfn = &ha->cdev.hwfns[reg_rd_wr->hwfn_index];
switch (reg_rd_wr->cmd) {
case QLNX_REG_READ_CMD:
if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
reg_rd_wr->val = qlnx_reg_rd32(p_hwfn,
reg_rd_wr->addr);
}
break;
case QLNX_REG_WRITE_CMD:
if (reg_rd_wr->access_type == QLNX_REG_ACCESS_DIRECT) {
qlnx_reg_wr32(p_hwfn, reg_rd_wr->addr,
reg_rd_wr->val);
}
break;
default:
rval = EINVAL;
break;
}
return (rval);
}
static int
qlnx_rd_wr_pci_config(qlnx_host_t *ha, qlnx_pcicfg_rd_wr_t *pci_cfg_rd_wr)
{
int rval = 0;
switch (pci_cfg_rd_wr->cmd) {
case QLNX_PCICFG_READ:
pci_cfg_rd_wr->val = pci_read_config(ha->pci_dev,
pci_cfg_rd_wr->reg,
pci_cfg_rd_wr->width);
break;
case QLNX_PCICFG_WRITE:
pci_write_config(ha->pci_dev, pci_cfg_rd_wr->reg,
pci_cfg_rd_wr->val, pci_cfg_rd_wr->width);
break;
default:
rval = EINVAL;
break;
}
return (rval);
}
static void
qlnx_mac_addr(qlnx_host_t *ha, qlnx_perm_mac_addr_t *mac_addr)
{
bzero(mac_addr->addr, sizeof(mac_addr->addr));
snprintf(mac_addr->addr, sizeof(mac_addr->addr),
"%02x:%02x:%02x:%02x:%02x:%02x",
ha->primary_mac[0], ha->primary_mac[1], ha->primary_mac[2],
ha->primary_mac[3], ha->primary_mac[4], ha->primary_mac[5]);
return;
}
static int
qlnx_get_regs(qlnx_host_t *ha, qlnx_get_regs_t *regs)
{
int i;
int rval = 0;
uint32_t dwords = 0;
uint8_t *outb;
regs->reg_buf_len = 0;
outb = regs->reg_buf;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
rval = qlnx_grc_dump(ha, &dwords, i);
if (rval)
break;
regs->reg_buf_len += (dwords << 2);
rval = copyout(ha->grcdump[i], outb, ha->grcdump_size[i]);
if (rval)
break;
ha->grcdump_dwords[i] = 0;
outb += regs->reg_buf_len;
}
ha->grcdump_taken = 0;
return (rval);
}
static int
qlnx_drv_info(qlnx_host_t *ha, qlnx_drvinfo_t *drv_info)
{
int i;
extern char qlnx_name_str[];
extern char qlnx_ver_str[];
bzero(drv_info, sizeof(qlnx_drvinfo_t));
snprintf(drv_info->drv_name, sizeof(drv_info->drv_name), "%s",
qlnx_name_str);
snprintf(drv_info->drv_version, sizeof(drv_info->drv_version), "%s",
qlnx_ver_str);
snprintf(drv_info->mfw_version, sizeof(drv_info->mfw_version), "%s",
ha->mfw_ver);
snprintf(drv_info->stormfw_version, sizeof(drv_info->stormfw_version),
"%s", ha->stormfw_ver);
drv_info->eeprom_dump_len = ha->flash_size;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
drv_info->reg_dump_len += ha->grcdump_size[i];
}
snprintf(drv_info->bus_info, sizeof(drv_info->bus_info),
"%d:%d:%d", pci_get_bus(ha->pci_dev),
pci_get_slot(ha->pci_dev), ha->pci_func);
return (0);
}
static int
qlnx_dev_settings(qlnx_host_t *ha, qlnx_dev_setting_t *dev_info)
{
struct ecore_hwfn *p_hwfn;
struct qlnx_link_output if_link;
p_hwfn = &ha->cdev.hwfns[0];
qlnx_fill_link(p_hwfn, &if_link);
dev_info->supported = if_link.supported_caps;
dev_info->advertising = if_link.advertised_caps;
dev_info->speed = if_link.speed;
dev_info->duplex = if_link.duplex;
dev_info->port = ha->pci_func & 0x1;
dev_info->autoneg = if_link.autoneg;
return (0);
}
static int
qlnx_write_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram, uint32_t cmd)
{
uint8_t *buf;
int ret = 0;
if ((nvram->data == NULL) || (nvram->data_len == 0))
return (EINVAL);
buf = qlnx_zalloc(nvram->data_len);
ret = copyin(nvram->data, buf, nvram->data_len);
QL_DPRINT9(ha,
(ha->pci_dev, "%s: issue cmd = 0x%x data = %p "
" data_len = 0x%x ret = 0x%x exit\n", __func__,
cmd, nvram->data, nvram->data_len, ret));
if (ret == 0) {
ret = ecore_mcp_nvm_write(&ha->cdev, cmd,
nvram->offset, buf, nvram->data_len);
}
QL_DPRINT9(ha,
(ha->pci_dev, "%s: cmd = 0x%x data = %p "
" data_len = 0x%x resp = 0x%x ret = 0x%x exit\n",
__func__, cmd, nvram->data, nvram->data_len,
ha->cdev.mcp_nvm_resp, ret));
free(buf, M_QLNXBUF);
return (ret);
}
static int
qlnx_read_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
{
uint8_t *buf;
int ret = 0;
if ((nvram->data == NULL) || (nvram->data_len == 0))
return (EINVAL);
buf = qlnx_zalloc(nvram->data_len);
ret = ecore_mcp_nvm_read(&ha->cdev, nvram->offset, buf,
nvram->data_len);
QL_DPRINT9(ha, (ha->pci_dev, "%s: data = %p data_len = 0x%x "
" resp = 0x%x ret = 0x%x exit\n", __func__,
nvram->data, nvram->data_len,
ha->cdev.mcp_nvm_resp, ret));
if (ret == 0) {
ret = copyout(buf, nvram->data, nvram->data_len);
}
free(buf, M_QLNXBUF);
return (ret);
}
static int
qlnx_get_nvram_resp(qlnx_host_t *ha, qlnx_nvram_t *nvram)
{
uint8_t *buf;
int ret = 0;
if ((nvram->data == NULL) || (nvram->data_len == 0))
return (EINVAL);
buf = qlnx_zalloc(nvram->data_len);
ret = ecore_mcp_nvm_resp(&ha->cdev, buf);
QL_DPRINT9(ha, (ha->pci_dev, "%s: data = %p data_len = 0x%x "
" resp = 0x%x ret = 0x%x exit\n", __func__,
nvram->data, nvram->data_len,
ha->cdev.mcp_nvm_resp, ret));
if (ret == 0) {
ret = copyout(buf, nvram->data, nvram->data_len);
}
free(buf, M_QLNXBUF);
return (ret);
}
static int
qlnx_nvram(qlnx_host_t *ha, qlnx_nvram_t *nvram)
{
int ret = 0;
switch (nvram->cmd) {
case QLNX_NVRAM_CMD_WRITE_NVRAM:
ret = qlnx_write_nvram(ha, nvram, ECORE_NVM_WRITE_NVRAM);
break;
case QLNX_NVRAM_CMD_PUT_FILE_DATA:
ret = qlnx_write_nvram(ha, nvram, ECORE_PUT_FILE_DATA);
break;
case QLNX_NVRAM_CMD_READ_NVRAM:
ret = qlnx_read_nvram(ha, nvram);
break;
case QLNX_NVRAM_CMD_SET_SECURE_MODE:
ret = ecore_mcp_nvm_set_secure_mode(&ha->cdev, nvram->offset);
QL_DPRINT9(ha, (ha->pci_dev,
"%s: QLNX_NVRAM_CMD_SET_SECURE_MODE "
" resp = 0x%x ret = 0x%x exit\n", __func__,
ha->cdev.mcp_nvm_resp, ret));
break;
case QLNX_NVRAM_CMD_DEL_FILE:
ret = ecore_mcp_nvm_del_file(&ha->cdev, nvram->offset);
QL_DPRINT9(ha, (ha->pci_dev,
"%s: QLNX_NVRAM_CMD_DEL_FILE "
" resp = 0x%x ret = 0x%x exit\n", __func__,
ha->cdev.mcp_nvm_resp, ret));
break;
case QLNX_NVRAM_CMD_PUT_FILE_BEGIN:
ret = ecore_mcp_nvm_put_file_begin(&ha->cdev, nvram->offset);
QL_DPRINT9(ha, (ha->pci_dev,
"%s: QLNX_NVRAM_CMD_PUT_FILE_BEGIN "
" resp = 0x%x ret = 0x%x exit\n", __func__,
ha->cdev.mcp_nvm_resp, ret));
break;
case QLNX_NVRAM_CMD_GET_NVRAM_RESP:
ret = qlnx_get_nvram_resp(ha, nvram);
break;
default:
ret = EINVAL;
break;
}
return (ret);
}
static void
qlnx_storm_stats(qlnx_host_t *ha, qlnx_storm_stats_dump_t *s_stats)
{
int i;
int index;
int ret;
int stats_copied = 0;
s_stats->num_hwfns = ha->cdev.num_hwfns;
// if (ha->storm_stats_index < QLNX_STORM_STATS_SAMPLES_PER_HWFN)
// return;
s_stats->num_samples = ha->storm_stats_index;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
index = (QLNX_STORM_STATS_SAMPLES_PER_HWFN * i);
if (s_stats->buffer[i]) {
ret = copyout(&ha->storm_stats[index],
s_stats->buffer[i],
QLNX_STORM_STATS_BYTES_PER_HWFN);
if (ret) {
printf("%s [%d]: failed\n", __func__, i);
}
if (s_stats->num_samples ==
QLNX_STORM_STATS_SAMPLES_PER_HWFN) {
bzero((void *)&ha->storm_stats[i],
QLNX_STORM_STATS_BYTES_PER_HWFN);
stats_copied = 1;
}
}
}
if (stats_copied)
ha->storm_stats_index = 0;
return;
}
static int
qlnx_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag,
struct thread *td)
{
qlnx_host_t *ha;
int rval = 0;
struct ifnet *ifp;
qlnx_trace_t *trace;
int i;
if ((ha = (qlnx_host_t *)dev->si_drv1) == NULL)
return ENXIO;
ifp = ha->ifp;
switch (cmd) {
case QLNX_GRC_DUMP_SIZE:
qlnx_get_grc_dump_size(ha, (qlnx_grcdump_t *)data);
break;
case QLNX_GRC_DUMP:
rval = qlnx_get_grc_dump(ha, (qlnx_grcdump_t *)data);
break;
case QLNX_IDLE_CHK_SIZE:
qlnx_get_idle_chk_size(ha, (qlnx_idle_chk_t *)data);
break;
case QLNX_IDLE_CHK:
rval = qlnx_get_idle_chk(ha, (qlnx_idle_chk_t *)data);
break;
case QLNX_DRV_INFO:
rval = qlnx_drv_info(ha, (qlnx_drvinfo_t *)data);
break;
case QLNX_DEV_SETTING:
rval = qlnx_dev_settings(ha, (qlnx_dev_setting_t *)data);
break;
case QLNX_GET_REGS:
rval = qlnx_get_regs(ha, (qlnx_get_regs_t *)data);
break;
case QLNX_NVRAM:
rval = qlnx_nvram(ha, (qlnx_nvram_t *)data);
break;
case QLNX_RD_WR_REG:
rval = qlnx_reg_rd_wr(ha, (qlnx_reg_rd_wr_t *)data);
break;
case QLNX_RD_WR_PCICFG:
rval = qlnx_rd_wr_pci_config(ha, (qlnx_pcicfg_rd_wr_t *)data);
break;
case QLNX_MAC_ADDR:
qlnx_mac_addr(ha, (qlnx_perm_mac_addr_t *)data);
break;
case QLNX_STORM_STATS:
qlnx_storm_stats(ha, (qlnx_storm_stats_dump_t *)data);
break;
case QLNX_TRACE_SIZE:
qlnx_get_trace_size(ha, (qlnx_trace_t *)data);
break;
case QLNX_TRACE:
trace = (qlnx_trace_t *)data;
for (i = 0; i < ha->cdev.num_hwfns; i++) {
if (trace->size[i] && trace->cmd && trace->buffer[i])
rval = qlnx_get_trace(ha, i, trace);
if (rval)
break;
}
break;
default:
rval = EINVAL;
break;
}
return (rval);
}

View File

@ -0,0 +1,316 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _QLNX_IOCTL_H_
#define _QLNX_IOCTL_H_
#include <sys/ioccom.h>
#define QLNX_MAX_HW_FUNCS 2
/*
* Read grcdump and grcdump size
*/
struct qlnx_grcdump {
uint16_t pci_func;
uint32_t grcdump_size[QLNX_MAX_HW_FUNCS];
void *grcdump[QLNX_MAX_HW_FUNCS];
uint32_t grcdump_dwords[QLNX_MAX_HW_FUNCS];
};
typedef struct qlnx_grcdump qlnx_grcdump_t;
/*
* Read idle_chk and idle_chk size
*/
struct qlnx_idle_chk {
uint16_t pci_func;
uint32_t idle_chk_size[QLNX_MAX_HW_FUNCS];
void *idle_chk[QLNX_MAX_HW_FUNCS];
uint32_t idle_chk_dwords[QLNX_MAX_HW_FUNCS];
};
typedef struct qlnx_idle_chk qlnx_idle_chk_t;
/*
* Retrive traces
*/
struct qlnx_trace {
uint16_t pci_func;
uint16_t cmd;
#define QLNX_MCP_TRACE 0x01
#define QLNX_REG_FIFO 0x02
#define QLNX_IGU_FIFO 0x03
#define QLNX_PROTECTION_OVERRIDE 0x04
#define QLNX_FW_ASSERTS 0x05
uint32_t size[QLNX_MAX_HW_FUNCS];
void *buffer[QLNX_MAX_HW_FUNCS];
uint32_t dwords[QLNX_MAX_HW_FUNCS];
};
typedef struct qlnx_trace qlnx_trace_t;
/*
* Read driver info
*/
#define QLNX_DRV_INFO_NAME_LENGTH 32
#define QLNX_DRV_INFO_VERSION_LENGTH 32
#define QLNX_DRV_INFO_MFW_VERSION_LENGTH 32
#define QLNX_DRV_INFO_STORMFW_VERSION_LENGTH 32
#define QLNX_DRV_INFO_BUS_INFO_LENGTH 32
struct qlnx_drvinfo {
char drv_name[QLNX_DRV_INFO_NAME_LENGTH];
char drv_version[QLNX_DRV_INFO_VERSION_LENGTH];
char mfw_version[QLNX_DRV_INFO_MFW_VERSION_LENGTH];
char stormfw_version[QLNX_DRV_INFO_STORMFW_VERSION_LENGTH];
uint32_t eeprom_dump_len; /* in bytes */
uint32_t reg_dump_len; /* in bytes */
char bus_info[QLNX_DRV_INFO_BUS_INFO_LENGTH];
};
typedef struct qlnx_drvinfo qlnx_drvinfo_t;
/*
* Read Device Setting
*/
struct qlnx_dev_setting {
uint32_t supported; /* Features this interface supports */
uint32_t advertising; /* Features this interface advertises */
uint32_t speed; /* The forced speed, 10Mb, 100Mb, gigabit */
uint32_t duplex; /* Duplex, half or full */
uint32_t port; /* Which connector port */
uint32_t phy_address; /* port number*/
uint32_t autoneg; /* Enable or disable autonegotiation */
};
typedef struct qlnx_dev_setting qlnx_dev_setting_t;
/*
* Get Registers
*/
struct qlnx_get_regs {
void *reg_buf;
uint32_t reg_buf_len;
};
typedef struct qlnx_get_regs qlnx_get_regs_t;
/*
* Get/Set NVRAM
*/
struct qlnx_nvram {
uint32_t cmd;
#define QLNX_NVRAM_CMD_WRITE_NVRAM 0x01
#define QLNX_NVRAM_CMD_READ_NVRAM 0x02
#define QLNX_NVRAM_CMD_SET_SECURE_MODE 0x03
#define QLNX_NVRAM_CMD_DEL_FILE 0x04
#define QLNX_NVRAM_CMD_PUT_FILE_BEGIN 0x05
#define QLNX_NVRAM_CMD_GET_NVRAM_RESP 0x06
#define QLNX_NVRAM_CMD_PUT_FILE_DATA 0x07
void *data;
uint32_t offset;
uint32_t data_len;
uint32_t magic;
};
typedef struct qlnx_nvram qlnx_nvram_t;
/*
* Get/Set Device registers
*/
struct qlnx_reg_rd_wr {
uint32_t cmd;
#define QLNX_REG_READ_CMD 0x01
#define QLNX_REG_WRITE_CMD 0x02
uint32_t addr;
uint32_t val;
uint32_t access_type;
#define QLNX_REG_ACCESS_DIRECT 0x01
#define QLNX_REG_ACCESS_INDIRECT 0x02
uint32_t hwfn_index;
};
typedef struct qlnx_reg_rd_wr qlnx_reg_rd_wr_t;
/*
* Read/Write PCI Configuration
*/
struct qlnx_pcicfg_rd_wr {
uint32_t cmd;
#define QLNX_PCICFG_READ 0x01
#define QLNX_PCICFG_WRITE 0x02
uint32_t reg;
uint32_t val;
uint32_t width;
};
typedef struct qlnx_pcicfg_rd_wr qlnx_pcicfg_rd_wr_t;
/*
* Read MAC address
*/
struct qlnx_perm_mac_addr {
char addr[32];
};
typedef struct qlnx_perm_mac_addr qlnx_perm_mac_addr_t;
/*
* Read STORM statistics registers
*/
struct qlnx_storm_stats {
/* xstorm */
uint32_t xstorm_active_cycles;
uint32_t xstorm_stall_cycles;
uint32_t xstorm_sleeping_cycles;
uint32_t xstorm_inactive_cycles;
/* ystorm */
uint32_t ystorm_active_cycles;
uint32_t ystorm_stall_cycles;
uint32_t ystorm_sleeping_cycles;
uint32_t ystorm_inactive_cycles;
/* pstorm */
uint32_t pstorm_active_cycles;
uint32_t pstorm_stall_cycles;
uint32_t pstorm_sleeping_cycles;
uint32_t pstorm_inactive_cycles;
/* tstorm */
uint32_t tstorm_active_cycles;
uint32_t tstorm_stall_cycles;
uint32_t tstorm_sleeping_cycles;
uint32_t tstorm_inactive_cycles;
/* mstorm */
uint32_t mstorm_active_cycles;
uint32_t mstorm_stall_cycles;
uint32_t mstorm_sleeping_cycles;
uint32_t mstorm_inactive_cycles;
/* ustorm */
uint32_t ustorm_active_cycles;
uint32_t ustorm_stall_cycles;
uint32_t ustorm_sleeping_cycles;
uint32_t ustorm_inactive_cycles;
};
typedef struct qlnx_storm_stats qlnx_storm_stats_t;
#define QLNX_STORM_STATS_SAMPLES_PER_HWFN (10000)
#define QLNX_STORM_STATS_BYTES_PER_HWFN (sizeof(qlnx_storm_stats_t) * \
QLNX_STORM_STATS_SAMPLES_PER_HWFN)
struct qlnx_storm_stats_dump {
int num_hwfns;
int num_samples;
void *buffer[QLNX_MAX_HW_FUNCS];
};
typedef struct qlnx_storm_stats_dump qlnx_storm_stats_dump_t;
/*
* Read grcdump size
*/
#define QLNX_GRC_DUMP_SIZE _IOWR('q', 1, qlnx_grcdump_t)
/*
* Read grcdump
*/
#define QLNX_GRC_DUMP _IOWR('q', 2, qlnx_grcdump_t)
/*
* Read idle_chk size
*/
#define QLNX_IDLE_CHK_SIZE _IOWR('q', 3, qlnx_idle_chk_t)
/*
* Read idle_chk
*/
#define QLNX_IDLE_CHK _IOWR('q', 4, qlnx_idle_chk_t)
/*
* Read driver info
*/
#define QLNX_DRV_INFO _IOWR('q', 5, qlnx_drvinfo_t)
/*
* Read Device Setting
*/
#define QLNX_DEV_SETTING _IOR('q', 6, qlnx_dev_setting_t)
/*
* Get Registers
*/
#define QLNX_GET_REGS _IOR('q', 7, qlnx_get_regs_t)
/*
* Get/Set NVRAM
*/
#define QLNX_NVRAM _IOWR('q', 8, qlnx_nvram_t)
/*
* Get/Set Device registers
*/
#define QLNX_RD_WR_REG _IOWR('q', 9, qlnx_reg_rd_wr_t)
/*
* Read/Write PCI Configuration
*/
#define QLNX_RD_WR_PCICFG _IOWR('q', 10, qlnx_pcicfg_rd_wr_t)
/*
* Read MAC address
*/
#define QLNX_MAC_ADDR _IOWR('q', 11, qlnx_perm_mac_addr_t)
/*
* Read STORM statistics
*/
#define QLNX_STORM_STATS _IOWR('q', 12, qlnx_storm_stats_dump_t)
/*
* Read trace size
*/
#define QLNX_TRACE_SIZE _IOWR('q', 13, qlnx_trace_t)
/*
* Read trace
*/
#define QLNX_TRACE _IOWR('q', 14, qlnx_trace_t)
#endif /* #ifndef _QLNX_IOCTL_H_ */

7040
sys/dev/qlnx/qlnxe/qlnx_os.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
/*
* File: qlnx_os.h
* Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
*/
#ifndef _QLNX_OS_H_
#define _QLNX_OS_H_
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/sockio.h>
#include <sys/types.h>
#include <machine/atomic.h>
#include <machine/_inttypes.h>
#include <sys/conf.h>
#if __FreeBSD_version < 1000000
#error FreeBSD Version not supported - use version >= 1000000
#endif
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <net/if_vlan_var.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ip.h>
#include <netinet/ip6.h>
#include <netinet/tcp.h>
#include <netinet/udp.h>
#include <netinet/in_var.h>
#include <netinet/tcp_lro.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <sys/mutex.h>
#include <sys/condvar.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
#include <sys/taskqueue.h>
#include <sys/pcpu.h>
#include <sys/unistd.h>
#include <sys/kthread.h>
#include <sys/libkern.h>
#include <sys/smp.h>
static __inline int qlnx_ms_to_hz(int ms)
{
int qlnx_hz;
struct timeval t;
t.tv_sec = ms / 1000;
t.tv_usec = (ms % 1000) * 1000;
qlnx_hz = tvtohz(&t);
if (qlnx_hz < 0)
qlnx_hz = 0x7fffffff;
if (!qlnx_hz)
qlnx_hz = 1;
return (qlnx_hz);
}
static __inline int qlnx_sec_to_hz(int sec)
{
struct timeval t;
t.tv_sec = sec;
t.tv_usec = 0;
return (tvtohz(&t));
}
MALLOC_DECLARE(M_QLNXBUF);
#define qlnx_mdelay(fn, msecs) \
{\
if (cold) \
DELAY((msecs * 1000)); \
else \
pause(fn, qlnx_ms_to_hz(msecs)); \
}
/*
* Locks
*/
#define QLNX_LOCK(ha) mtx_lock(&ha->hw_lock)
#define QLNX_UNLOCK(ha) mtx_unlock(&ha->hw_lock)
#define QLNX_TX_LOCK(ha) mtx_lock(&ha->tx_lock);
#define QLNX_TX_UNLOCK(ha) mtx_unlock(&ha->tx_lock);
/*
* structure encapsulating a DMA buffer
*/
struct qlnx_dma {
bus_size_t alignment;
uint32_t size;
void *dma_b;
bus_addr_t dma_addr;
bus_dmamap_t dma_map;
bus_dma_tag_t dma_tag;
};
typedef struct qlnx_dma qlnx_dma_t;
#endif /* #ifndef _QLNX_OS_H_ */

View File

@ -0,0 +1,43 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
/*
* File: qlnx_ver.h
* Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
*/
/*
* version numbers
*/
#define QLNX_VERSION_MAJOR 1
#define QLNX_VERSION_MINOR 3
#define QLNX_VERSION_BUILD 0

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __RDMA_COMMON__
#define __RDMA_COMMON__
/************************/
/* RDMA FW CONSTANTS */
/************************/
#define RDMA_RESERVED_LKEY (0) //Reserved lkey
#define RDMA_RING_PAGE_SIZE (0x1000) //4KB pages
#define RDMA_MAX_SGE_PER_SQ_WQE (4) //max number of SGEs in a single request
#define RDMA_MAX_SGE_PER_RQ_WQE (4) //max number of SGEs in a single request
#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) //max size of data in single request
#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
#define RDMA_MAX_CQS (64*1024)
#define RDMA_MAX_TIDS (128*1024-1)
#define RDMA_MAX_PDS (64*1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
struct rdma_srq_id
{
__le16 srq_idx /* SRQ index */;
__le16 opaque_fid;
};
struct rdma_srq_producers
{
__le32 sge_prod /* Current produced sge in SRQ */;
__le32 wqe_prod /* Current produced WQE to SRQ */;
};
#endif /* __RDMA_COMMON__ */

84438
sys/dev/qlnx/qlnxe/reg_addr.h Normal file

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,70 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __ROCE_COMMON__
#define __ROCE_COMMON__
/************************************************************************/
/* Add include to common rdma target for both eCore and protocol rdma driver */
/************************************************************************/
#include "rdma_common.h"
/************************/
/* ROCE FW CONSTANTS */
/************************/
#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) //max size of inline data in single request
#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) //Maximum size of single SQ WQE (rdma wqe and inline data)
#define ROCE_MAX_QPS (32*1024)
#define ROCE_DCQCN_NP_MAX_QPS (64) /* notification point max QPs*/
#define ROCE_DCQCN_RP_MAX_QPS (64) /* reaction point max QPs*/
/*
* Affiliated asynchronous events / errors enumeration
*/
enum roce_async_events_type
{
ROCE_ASYNC_EVENT_NONE=0,
ROCE_ASYNC_EVENT_COMM_EST=1,
ROCE_ASYNC_EVENT_SQ_DRAINED,
ROCE_ASYNC_EVENT_SRQ_LIMIT,
ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
ROCE_ASYNC_EVENT_CQ_ERR,
ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
ROCE_ASYNC_EVENT_SRQ_EMPTY,
ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
MAX_ROCE_ASYNC_EVENTS_TYPE
};
#endif /* __ROCE_COMMON__ */

View File

@ -0,0 +1,199 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
/****************************************************************************
* Name: spad_layout.h
*
* Description: Global definitions
*
* Created: 01/09/2013
*
****************************************************************************/
/*
* Spad Layout NVM CFG MCP public
*==========================================================================================================
* MCP_REG_SCRATCH REG_RD(MISC_REG_GEN_PURP_CR0) REG_RD(MISC_REG_SHARED_MEM_ADDR)
* +------------------+ +-------------------------+ +-------------------+
* | Num Sections(4B)|Currently 4 | Num Sections(4B) | | Num Sections(4B)|Currently 6
* +------------------+ +-------------------------+ +-------------------+
* | Offsize(Trace) |4B -+ +-- | Offset(NVM_CFG1) | | Offsize(drv_mb) |
* +-| Offsize(NVM_CFG) |4B | | | (Size is fixed) | | Offsize(mfw_mb) |
*+-|-| Offsize(Public) |4B | +-> +-------------------------+ | Offsize(global) |
*| | | Offsize(Private) |4B | | | | Offsize(path) |
*| | +------------------+ <--+ | nvm_cfg1_glob | | Offsize(port) |
*| | | | +-------------------------+ | Offsize(func) |
*| | | Trace | | nvm_cfg1_path 0 | +-------------------+
*| +>+------------------+ | nvm_cfg1_path 1 | | drv_mb PF0/2/4..|8 Funcs of engine0
*| | | +-------------------------+ | drv_mb PF1/3/5..|8 Funcs of engine1
*| | NVM_CFG | | nvm_cfg1_port 0 | +-------------------+
*+-> +------------------+ | .... | | mfw_mb PF0/2/4..|8 Funcs of engine0
* | | | nvm_cfg1_port 3 | | mfw_mb PF1/3/5..|8 Funcs of engine1
* | Public Data | +-------------------------+ +-------------------+
* +------------------+ 8 Funcs of Engine 0| nvm_cfg1_func PF0/2/4/..| | |
* | | 8 Funcs of Engine 1| nvm_cfg1_func PF1/3/5/..| | public_global |
* | Private Data | +-------------------------+ +-------------------+
* +------------------+ | public_path 0 |
* | Code | | public_path 1 |
* | Static Area | +-------------------+
* +--- ---+ | public_port 0 |
* | Code | | .... |
* | PIM Area | | public_port 3 |
* +------------------+ +-------------------+
* | public_func 0/2/4.|8 Funcs of engine0
* | public_func 1/3/5.|8 Funcs of engine1
* +-------------------+
*/
#ifndef SPAD_LAYOUT_H
#define SPAD_LAYOUT_H
#ifndef MDUMP_PARSE_TOOL
#define PORT_0 0
#define PORT_1 1
#define PORT_2 2
#define PORT_3 3
#include "mcp_public.h"
#include "mfw_hsi.h"
#include "nvm_cfg.h"
#ifdef MFW
#include "mcp_private.h"
#endif
extern struct spad_layout g_spad;
/* TBD - Consider renaming to MCP_STATIC_SPAD_SIZE, since the real size includes another 64kb */
#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
#endif /* MDUMP_PARSE_TOOL */
#define TO_OFFSIZE(_offset, _size) \
(u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
(((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
enum spad_sections {
SPAD_SECTION_TRACE,
SPAD_SECTION_NVM_CFG,
SPAD_SECTION_PUBLIC,
SPAD_SECTION_PRIVATE,
SPAD_SECTION_MAX
};
#ifndef MDUMP_PARSE_TOOL
struct spad_layout {
struct nvm_cfg nvm_cfg;
struct mcp_public_data public_data;
#ifdef MFW /* Drivers will not be compiled with this flag. */
/* Linux should remove this appearance at all. */
struct mcp_private_data private_data;
#endif
};
#endif /* MDUMP_PARSE_TOOL */
#define MCP_TRACE_SIZE 2048 /* 2kb */
#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + __builtin_offsetof(struct static_init, f))
/* This section is located at a fixed location in the beginning of the scratchpad,
* to ensure that the MCP trace is not run over during MFW upgrade.
* All the rest of data has a floating location which differs from version to version,
* and is pointed by the mcp_meta_data below.
* Moreover, the spad_layout section is part of the MFW firmware, and is loaded with it
* from nvram in order to clear this portion.
*/
struct static_init {
u32 num_sections; /* 0xe20000 */
offsize_t sections[SPAD_SECTION_MAX]; /* 0xe20004 */
#define SECTION(_sec_) *((offsize_t*)(STRUCT_OFFSET(sections[_sec_])))
struct mcp_trace trace; /* 0xe20014 */
#define MCP_TRACE_P ((struct mcp_trace*)(STRUCT_OFFSET(trace)))
u8 trace_buffer[MCP_TRACE_SIZE]; /* 0xe20030 */
#define MCP_TRACE_BUF ((u8*)(STRUCT_OFFSET(trace_buffer)))
/* running_mfw has the same definition as in nvm_map.h.
* This bit indicate both the running dir, and the running bundle.
* It is set once when the LIM is loaded.
*/
u32 running_mfw; /* 0xe20830 */
#define RUNNING_MFW *((u32*)(STRUCT_OFFSET(running_mfw)))
u32 build_time; /* 0xe20834 */
#define MFW_BUILD_TIME *((u32*)(STRUCT_OFFSET(build_time)))
u32 reset_type; /* 0xe20838 */
#define RESET_TYPE *((u32*)(STRUCT_OFFSET(reset_type)))
u32 mfw_secure_mode; /* 0xe2083c */
#define MFW_SECURE_MODE *((u32*)(STRUCT_OFFSET(mfw_secure_mode)))
u16 pme_status_pf_bitmap; /* 0xe20840 */
#define PME_STATUS_PF_BITMAP *((u16*)(STRUCT_OFFSET(pme_status_pf_bitmap)))
u16 pme_enable_pf_bitmap;
#define PME_ENABLE_PF_BITMAP *((u16*)(STRUCT_OFFSET(pme_enable_pf_bitmap)))
u32 mim_nvm_addr; /* 0xe20844 */
u32 mim_start_addr; /* 0xe20848 */
u32 ah_pcie_link_params; /* 0xe20850 Stores PCIe link configuration at start, so they can be used later also for Hot-Reset, without the need to re-reading them from nvm cfg. */
#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
#define AH_PCIE_LINK_PARAMS *((u32*)(STRUCT_OFFSET(ah_pcie_link_params)))
u32 flags; /* 0xe20850 */
#define M_GLOB_FLAGS *((u32*)(STRUCT_OFFSET(flags)))
#define FLAGS_VAUX_REQUIRED (1 << 0)
#define FLAGS_WAIT_AVS_READY (1 << 1)
#define FLAGS_FAILURE_ISSUED (1 << 2)
#define FLAGS_FAILURE_DETECTED (1 << 3)
#define FLAGS_VAUX (1 << 4)
#define FLAGS_PERST_ASSERT_OCCURED (1 << 5)
#define FLAGS_HOT_RESET_STEP2 (1 << 6)
#define FLAGS_MSIX_SYNC_ALLOWED (1 << 7)
#define FLAGS_PROGRAM_PCI_COMPLETED (1 << 8)
#define FLAGS_SMBUS_AUX_MODE (1 << 9)
#define FLAGS_PEND_SMBUS_VMAIN_TO_AUX (1 << 10)
#define FLAGS_NVM_CFG_EFUSE_FAILURE (1 << 11)
#define FLAGS_OS_DRV_LOADED (1 << 29)
#define FLAGS_OVER_TEMP_OCCUR (1 << 30)
#define FLAGS_FAN_FAIL_OCCUR (1 << 31)
u32 rsrv_persist[4]; /* Persist reserved for MFW upgrades */ /* 0xe20854 */
};
#ifndef MDUMP_PARSE_TOOL
#define NVM_CFG1(x) g_spad.nvm_cfg.cfg1.x
#define NVM_GLOB(x) NVM_CFG1(glob).x
#define NVM_GLOB_VAL(n, m, o) ((NVM_GLOB(n) & m) >> o)
#endif /* MDUMP_PARSE_TOOL */
#endif /* SPAD_LAYOUT_H */

View File

@ -0,0 +1,197 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __STORAGE_COMMON__
#define __STORAGE_COMMON__
/*********************/
/* SCSI CONSTANTS */
/*********************/
#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
// Each Resource ID is one-one-valued mapped by the driver to a BDQ Resource ID (for instance per port)
#define BDQ_NUM_RESOURCES (4)
// ID 0 : RQ, ID 1 : IMMEDIATE_DATA:
#define BDQ_ID_RQ (0)
#define BDQ_ID_IMM_DATA (1)
#define BDQ_NUM_IDS (2)
#define SCSI_NUM_SGES_SLOW_SGL_THR 8
#define BDQ_MAX_EXTERNAL_RING_SIZE (1<<15)
/*
* SCSI buffer descriptor
*/
struct scsi_bd
{
struct regpair address /* Physical Address of buffer */;
struct regpair opaque /* Driver Metadata (preferably Virtual Address of buffer) */;
};
/*
* Scsi Drv BDQ struct
*/
struct scsi_bdq_ram_drv_data
{
__le16 external_producer /* BDQ External Producer; updated by driver when it loads BDs to External Ring */;
__le16 reserved0[3];
};
/*
* SCSI SGE entry
*/
struct scsi_sge
{
struct regpair sge_addr /* SGE address */;
__le32 sge_len /* SGE length */;
__le32 reserved;
};
/*
* Cached SGEs section
*/
struct scsi_cached_sges
{
struct scsi_sge sge[4] /* Cached SGEs section */;
};
/*
* Scsi Drv CMDQ struct
*/
struct scsi_drv_cmdq
{
__le16 cmdq_cons /* CMDQ consumer - updated by driver when CMDQ is consumed */;
__le16 reserved0;
__le32 reserved1;
};
/*
* Common SCSI init params passed by driver to FW in function init ramrod
*/
struct scsi_init_func_params
{
__le16 num_tasks /* Number of tasks in global task list */;
u8 log_page_size /* log of page size value */;
u8 debug_mode /* Use iscsi_debug_mode enum */;
u8 reserved2[12];
};
/*
* SCSI RQ/CQ/CMDQ firmware function init parameters
*/
struct scsi_init_func_queues
{
struct regpair glbl_q_params_addr /* Global Qs (CQ/RQ/CMDQ) params host address */;
__le16 rq_buffer_size /* The buffer size of RQ BDQ */;
__le16 cq_num_entries /* CQ num entries */;
__le16 cmdq_num_entries /* CMDQ num entries */;
u8 bdq_resource_id /* Each function-init Ramrod maps its funciton ID to a BDQ function ID, each BDQ function ID contains per-BDQ-ID BDQs */;
u8 q_validity;
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
u8 num_queues /* Number of continuous global queues used */;
u8 queue_relative_offset /* offset of continuous global queues used */;
u8 cq_sb_pi /* Protocol Index of CQ in status block (CQ consumer) */;
u8 cmdq_sb_pi /* Protocol Index of CMDQ in status block (CMDQ consumer) */;
__le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS] /* CQ/CMDQ status block number array */;
__le16 reserved0 /* reserved */;
u8 bdq_pbl_num_entries[BDQ_NUM_IDS] /* Per BDQ ID, the PBL page size (number of entries in PBL) */;
struct regpair bdq_pbl_base_address[BDQ_NUM_IDS] /* Per BDQ ID, the PBL page Base Address */;
__le16 bdq_xoff_threshold[BDQ_NUM_IDS] /* BDQ XOFF threshold - when number of entries will be below that TH, it will send XOFF */;
__le16 bdq_xon_threshold[BDQ_NUM_IDS] /* BDQ XON threshold - when number of entries will be above that TH, it will send XON */;
__le16 cmdq_xoff_threshold /* CMDQ XOFF threshold - when number of entries will be below that TH, it will send XOFF */;
__le16 cmdq_xon_threshold /* CMDQ XON threshold - when number of entries will be above that TH, it will send XON */;
__le32 reserved1 /* reserved */;
};
/*
* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data)
*/
struct scsi_ram_per_bdq_resource_drv_data
{
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS] /* External ring data */;
};
/*
* SCSI SGL types
*/
enum scsi_sgl_mode
{
SCSI_TX_SLOW_SGL /* Slow-SGL: More than SCSI_NUM_SGES_SLOW_SGL_THR SGEs and there is at least 1 middle SGE than is smaller than a page size. May be only at TX */,
SCSI_FAST_SGL /* Fast SGL: Less than SCSI_NUM_SGES_SLOW_SGL_THR SGEs or all middle SGEs are at least a page size */,
MAX_SCSI_SGL_MODE
};
/*
* SCSI SGL parameters
*/
struct scsi_sgl_params
{
struct regpair sgl_addr /* SGL base address */;
__le32 sgl_total_length /* SGL total legnth (bytes) */;
__le32 sge_offset /* Offset in SGE (bytes) */;
__le16 sgl_num_sges /* Number of SGLs sges */;
u8 sgl_index /* SGL index */;
u8 reserved;
};
/*
* SCSI terminate connection params
*/
struct scsi_terminate_extra_params
{
__le16 unsolicited_cq_count /* Counts number of CQ placements done due to arrival of unsolicited packets on this connection */;
__le16 cmdq_count /* Counts number of CMDQ placements on this connection */;
u8 reserved[4];
};
#endif /* __STORAGE_COMMON__ */

View File

@ -0,0 +1,300 @@
/*
* Copyright (c) 2017-2018 Cavium, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef __TCP_COMMON__
#define __TCP_COMMON__
/********************/
/* TCP FW CONSTANTS */
/********************/
#define TCP_INVALID_TIMEOUT_VAL -1
/*
* OOO opaque data received from LL2
*/
struct ooo_opaque
{
__le32 cid /* connection ID */;
u8 drop_isle /* isle number of the first isle to drop */;
u8 drop_size /* number of isles to drop */;
u8 ooo_opcode /* (use enum tcp_seg_placement_event) */;
u8 ooo_isle /* OOO isle number to add the packet to */;
};
/*
* tcp connect mode enum
*/
enum tcp_connect_mode
{
TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE,
MAX_TCP_CONNECT_MODE
};
/*
* tcp function init parameters
*/
struct tcp_init_params
{
__le32 two_msl_timer /* 2MSL (used for TIME_WAIT state) timeout value */;
__le16 tx_sws_timer /* Transmission silly window syndrom timeout value */;
u8 maxFinRT /* Minimum Fin RT */;
u8 reserved[9];
};
/*
* tcp IPv4/IPv6 enum
*/
enum tcp_ip_version
{
TCP_IPV4,
TCP_IPV6,
MAX_TCP_IP_VERSION
};
/*
* tcp offload parameters
*/
struct tcp_offload_params
{
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
__le16 local_mac_addr_hi;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
u8 flags;
#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 /* timestamp enable */
#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 /* delayed ack enabled */
#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 /* keep alive enabled */
#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 /* nagle algorithm enabled */
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 /* delayed ack counter enabled */
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 /* fin already sent to far end */
#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 /* fin received */
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
u8 ip_version;
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
u8 ttl;
u8 tos_or_tc;
__le16 remote_port;
__le16 local_port;
__le16 mss /* the mss derived from remote mss and local mtu, ipVersion options and tags */;
u8 rcv_wnd_scale;
u8 connect_mode /* TCP connect mode: use enum tcp_connect_mode */;
__le16 srtt /* in ms */;
__le32 cwnd /* absolute congestion window */;
__le32 ss_thresh;
__le16 reserved1;
u8 ka_max_probe_cnt;
u8 dup_ack_theshold;
__le32 rcv_next;
__le32 snd_una;
__le32 snd_next;
__le32 snd_max;
__le32 snd_wnd /* absolute send window (not scaled) */;
__le32 rcv_wnd /* absolute receive window (not scaled) */;
__le32 snd_wl1 /* the segment sequence number used for the last window update */;
__le32 ts_recent /* The timestamp value to send in the next ACK */;
__le32 ts_recent_age /* The length of time, in ms, since the most recent timestamp was received */;
__le32 total_rt /* The total time, in ms, that has been spent retransmitting the current TCP segment */;
__le32 ka_timeout_delta /* The time remaining, in clock ticks, until the next keepalive timeout. A value of -1 indicates that the keepalive timer was not running when the connection was offloaded. */;
__le32 rt_timeout_delta /* The time remaining, in clock ticks, until the next retransmit timeout. A value of -1 indicates that the retransmit timer was not running when the connection was offloaded. */;
u8 dup_ack_cnt /* The number of ACKs that have been accepted for the same sequence number */;
u8 snd_wnd_probe_cnt /* The current send window probe round */;
u8 ka_probe_cnt /* the number of keepalive probes that have been sent that have not received a response */;
u8 rt_cnt /* The number of retransmits that have been sent */;
__le16 rtt_var /* in ms */;
__le16 fw_internal /* fw internal use - initialize value = 0 */;
__le32 ka_timeout /* This member specifies, in ms, the timeout interval for inactivity before sending a keepalive probe */;
__le32 ka_interval /* This member specifies, in ms, the timeout after which to retransmit a keepalive frame if no response is received to a keepalive probe */;
__le32 max_rt_time /* This member specifies, in ms, the maximum time that the offload target should spend retransmitting a segment */;
__le32 initial_rcv_wnd /* Initial receive window */;
u8 snd_wnd_scale;
u8 ack_frequency /* delayed ack counter threshold */;
__le16 da_timeout_value /* delayed ack timeout value in ms */;
__le32 reserved3[2];
};
/*
* tcp offload parameters
*/
struct tcp_offload_params_opt2
{
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
__le16 local_mac_addr_hi;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 vlan_id;
u8 flags;
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 /* timestamp enable */
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 /* delayed ack enabled */
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 /* keep alive enabled */
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
u8 ip_version;
__le32 remote_ip[4];
__le32 local_ip[4];
__le32 flow_label;
u8 ttl;
u8 tos_or_tc;
__le16 remote_port;
__le16 local_port;
__le16 mss /* the mss derived from remote mss and local mtu, ipVersion options and tags */;
u8 rcv_wnd_scale;
u8 connect_mode /* TCP connect mode: use enum tcp_connect_mode */;
__le16 syn_ip_payload_length /* length of Tcp header in SYN packet - relevent for passive mode */;
__le32 syn_phy_addr_lo /* physical address (low) of SYN buffer - relevent for passive mode */;
__le32 syn_phy_addr_hi /* physical address (high) of SYN buffer - relevent for passive mode */;
__le32 reserved1[22];
};
/*
* tcp IPv4/IPv6 enum
*/
enum tcp_seg_placement_event
{
TCP_EVENT_ADD_PEN,
TCP_EVENT_ADD_NEW_ISLE,
TCP_EVENT_ADD_ISLE_RIGHT,
TCP_EVENT_ADD_ISLE_LEFT,
TCP_EVENT_JOIN,
TCP_EVENT_DELETE_ISLES,
TCP_EVENT_NOP,
MAX_TCP_SEG_PLACEMENT_EVENT
};
/*
* tcp init parameters
*/
struct tcp_update_params
{
__le16 flags;
#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
__le16 mss;
u8 ttl;
u8 tos_or_tc;
__le32 ka_timeout;
__le32 ka_interval;
__le32 max_rt_time;
__le32 flow_label;
__le32 initial_rcv_wnd;
u8 ka_max_probe_cnt;
u8 reserved1[7];
};
/*
* toe upload parameters
*/
struct tcp_upload_params
{
__le32 rcv_next;
__le32 snd_una;
__le32 snd_next;
__le32 snd_max;
__le32 snd_wnd /* absolute send window (not scaled) */;
__le32 rcv_wnd /* absolute receive window (not scaled) */;
__le32 snd_wl1 /* the segment sequence number used for the last window update */;
__le32 cwnd /* absolute congestion window */;
__le32 ss_thresh;
__le16 srtt /* in ms */;
__le16 rtt_var /* in ms */;
__le32 ts_time /* The current value of the adjusted timestamp */;
__le32 ts_recent /* The timestamp value to send in the next ACK */;
__le32 ts_recent_age /* The length of time, in ms, since the most recent timestamp was received */;
__le32 total_rt /* The total time, in ms, that has been spent retransmitting the current TCP segment */;
__le32 ka_timeout_delta /* The time remaining, in clock ticks, until the next keepalive timeout. A value of -1 indicates that the keepalive timer was not running when the connection was offloaded. */;
__le32 rt_timeout_delta /* The time remaining, in clock ticks, until the next retransmit timeout. A value of -1 indicates that the retransmit timer was not running when the connection was offloaded. */;
u8 dup_ack_cnt /* The number of ACKs that have been accepted for the same sequence number */;
u8 snd_wnd_probe_cnt /* The current send window probe round */;
u8 ka_probe_cnt /* the number of keepalive probes that have been sent that have not received a response */;
u8 rt_cnt /* The number of retransmits that have been sent */;
__le32 reserved;
};
#endif /* __TCP_COMMON__ */

View File

@ -310,6 +310,7 @@ SUBDIR= \
${_qlxge} \
${_qlxgb} \
${_qlxgbe} \
${_qlnx} \
ral \
${_ralfw} \
${_random_fortuna} \
@ -697,6 +698,7 @@ _pms= pms
_qlxge= qlxge
_qlxgb= qlxgb
_qlxgbe= qlxgbe
_qlnx= qlnx
_sfxge= sfxge
.if ${MK_BHYVE} != "no" || defined(ALL_MODULES)

38
sys/modules/qlnx/Makefile Normal file
View File

@ -0,0 +1,38 @@
#/*
# * Copyright (c) 2017-2018 Cavium, Inc.
# * All rights reserved.
# *
# * Redistribution and use in source and binary forms, with or without
# * modification, are permitted provided that the following conditions
# * are met:
# *
# * 1. Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * 2. Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in the
# * documentation and/or other materials provided with the distribution.
# *
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# * POSSIBILITY OF SUCH DAMAGE.
# */
#/*
# * File : Makefile
# * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
# */
#
# $FreeBSD$
#
SUBDIR=qlnxe
.include <bsd.subdir.mk>

View File

@ -0,0 +1,71 @@
#/*
# * Copyright (c) 2017-2018 Cavium, Inc.
# * All rights reserved.
# *
# * Redistribution and use in source and binary forms, with or without
# * modification, are permitted provided that the following conditions
# * are met:
# *
# * 1. Redistributions of source code must retain the above copyright
# * notice, this list of conditions and the following disclaimer.
# * 2. Redistributions in binary form must reproduce the above copyright
# * notice, this list of conditions and the following disclaimer in the
# * documentation and/or other materials provided with the distribution.
# *
# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# * POSSIBILITY OF SUCH DAMAGE.
# */
#/*
# * File : Makefile
# * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
# */
#
# $FreeBSD$
#
.PATH: ${SRCTOP}/sys/dev/qlnx/qlnxe
#.PATH: ${.CURDIR}
KMOD=if_qlnxe
SRCS=ecore_cxt.c ecore_dcbx.c ecore_dev.c ecore_hw.c
SRCS+=ecore_init_fw_funcs.c ecore_int.c ecore_mcp.c
SRCS+=ecore_sp_commands.c ecore_spq.c ecore_l2.c
SRCS+=ecore_init_ops.c ecore_dbg_fw_funcs.c
SRCS+=qlnx_ioctl.c
SRCS+=qlnx_os.c
SRCS+= device_if.h
SRCS+= bus_if.h
SRCS+= pci_if.h
CWARNEXTRA += -Wno-cast-qual
CFLAGS += -DQLNX_DEBUG
CFLAGS += -DECORE_PACKAGE
CFLAGS += -DCONFIG_ECORE_L2
CFLAGS += -DECORE_CONFIG_DIRECT_HWFN
#CFLAGS += -g
#CFLAGS += -fno-inline
#CFLAGS += -DQLNX_SOFT_LRO
#CFLAGS += -DQLNX_QSORT_LRO
#CFLAGS += -DQLNX_MAX_COALESCE
#CFLAGS += -DQLNX_TRACE_LRO_CNT
#CFLAGS += -DQLNX_TRACE_TSO_PKT_LEN
.include <bsd.kmod.mk>