Add the sfxge(4) device driver, providing support for 10Gb Ethernet adapters

based on Solarflare SFC9000 family controllers.  The driver supports jumbo
frames, transmit/receive checksum offload, TCP Segmentation Offload (TSO),
Large Receive Offload (LRO), VLAN checksum offload, VLAN TSO, and Receive Side
Scaling (RSS) using MSI-X interrupts.

This work was sponsored by Solarflare Communications, Inc.

My sincere thanks to Ben Hutchings for doing a lot of the hard work!

Sponsored by:	Solarflare Communications, Inc.
MFC after:	3 weeks
This commit is contained in:
Philip Paeps 2011-11-16 17:11:13 +00:00
parent fbd80bd047
commit e948693ed7
50 changed files with 38419 additions and 0 deletions

View File

@ -377,6 +377,7 @@ MAN= aac.4 \
send.4 \
ses.4 \
sf.4 \
sfxge.4 \
sge.4 \
si.4 \
siba.4 \

92
share/man/man4/sfxge.4 Normal file
View File

@ -0,0 +1,92 @@
.\" Copyright (c) 2011 Solarflare Communications, Inc.
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd November 16, 2011
.Dt SFXGE 4
.Os
.Sh NAME
.Nm sfxge
.Nd "Solarflare 10Gb Ethernet adapter driver"
.Sh SYNOPSIS
To compile this driver into the kernel,
place the following lines in your
kernel configuration file:
.Bd -ragged -offset indent
.Cd "device sfxge"
.Ed
.Pp
To load the driver as a
module at boot time, place the following line in
.Xr loader.conf 5 :
.Bd -literal -offset indent
sfxge_load="YES"
.Ed
.Sh DESCRIPTION
The
.Nm
driver provides support for 10Gb Ethernet adapters based on
Solarflare SFC9000 family controllers. The driver supports jumbo
frames, transmit/receive checksum offload, TCP Segmentation Offload
(TSO), Large Receive Offload (LRO), VLAN checksum offload, VLAN TSO,
and Receive Side Scaling (RSS) using MSI-X interrupts.
.Pp
The driver allocates 1 receive queue, transmit queue, event queue and
IRQ per CPU up to a maximum of 64. IRQ affinities should be spread
out using
.Xr cpuset 8 .
Interrupt moderation may be controlled through the sysctl
dev.sfxge.\fIindex\fR.int_mod (units are microseconds).
.Pp
For more information on configuring this device, see
.Xr ifconfig 8 .
.Pp
A large number of MAC, PHY and data path statistics are available
under the sysctl dev.sfxge.\fIindex\fR.stats. The adapter's VPD
fields including its serial number are available under the sysctl
dev.sfxge.\fIindex\fR.vpd.
.Sh HARDWARE
The
.Nm
driver supports all 10Gb Ethernet adapters based on Solarflare SFC9000
family controllers.
.Sh SUPPORT
For general information and support,
go to the Solarflare support website at:
.Pa https://support.solarflare.com .
.Sh SEE ALSO
.Xr arp 4 ,
.Xr netintro 4 ,
.Xr ng_ether 4 ,
.Xr vlan 4 ,
.Xr cpuset 8 ,
.Xr ifconfig 8
.Sh AUTHORS
The
.Nm
driver was written by
.An Philip Paeps
and
.An Solarflare Communications, Inc.

View File

@ -1671,6 +1671,37 @@ dev/scd/scd.c optional scd isa
dev/scd/scd_isa.c optional scd isa
dev/sdhci/sdhci.c optional sdhci pci
dev/sf/if_sf.c optional sf pci
dev/sfxge/common/efx_bootcfg.c optional sfxge inet pci
dev/sfxge/common/efx_ev.c optional sfxge inet pci
dev/sfxge/common/efx_filter.c optional sfxge inet pci
dev/sfxge/common/efx_intr.c optional sfxge inet pci
dev/sfxge/common/efx_mac.c optional sfxge inet pci
dev/sfxge/common/efx_mcdi.c optional sfxge inet pci
dev/sfxge/common/efx_mon.c optional sfxge inet pci
dev/sfxge/common/efx_nic.c optional sfxge inet pci
dev/sfxge/common/efx_nvram.c optional sfxge inet pci
dev/sfxge/common/efx_phy.c optional sfxge inet pci
dev/sfxge/common/efx_port.c optional sfxge inet pci
dev/sfxge/common/efx_rx.c optional sfxge inet pci
dev/sfxge/common/efx_sram.c optional sfxge inet pci
dev/sfxge/common/efx_tx.c optional sfxge inet pci
dev/sfxge/common/efx_vpd.c optional sfxge inet pci
dev/sfxge/common/efx_wol.c optional sfxge inet pci
dev/sfxge/common/siena_mac.c optional sfxge inet pci
dev/sfxge/common/siena_mon.c optional sfxge inet pci
dev/sfxge/common/siena_nic.c optional sfxge inet pci
dev/sfxge/common/siena_nvram.c optional sfxge inet pci
dev/sfxge/common/siena_phy.c optional sfxge inet pci
dev/sfxge/common/siena_sram.c optional sfxge inet pci
dev/sfxge/common/siena_vpd.c optional sfxge inet pci
dev/sfxge/sfxge.c optional sfxge inet pci
dev/sfxge/sfxge_dma.c optional sfxge inet pci
dev/sfxge/sfxge_ev.c optional sfxge inet pci
dev/sfxge/sfxge_intr.c optional sfxge inet pci
dev/sfxge/sfxge_mcdi.c optional sfxge inet pci
dev/sfxge/sfxge_port.c optional sfxge inet pci
dev/sfxge/sfxge_rx.c optional sfxge inet pci
dev/sfxge/sfxge_tx.c optional sfxge inet pci
dev/sge/if_sge.c optional sge pci
dev/si/si.c optional si
dev/si/si2_z280.c optional si

View File

@ -0,0 +1,800 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_EFSYS_H
#define _SYS_EFSYS_H
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifdef __cplusplus
extern "C" {
#endif
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/rwlock.h>
#include <sys/sdt.h>
#include <sys/systm.h>
#include <machine/bus.h>
#include <machine/endian.h>
#define EFSYS_HAS_UINT64 1
#define EFSYS_USE_UINT64 0
#if _BYTE_ORDER == _BIG_ENDIAN
#define EFSYS_IS_BIG_ENDIAN 1
#define EFSYS_IS_LITTLE_ENDIAN 0
#elif _BYTE_ORDER == _LITTLE_ENDIAN
#define EFSYS_IS_BIG_ENDIAN 0
#define EFSYS_IS_LITTLE_ENDIAN 1
#endif
#include "efx_types.h"
/* Common code requires this */
#if __FreeBSD_version < 800068
#define memmove(d, s, l) bcopy(s, d, l)
#endif
/* FreeBSD equivalents of Solaris things */
#ifndef _NOTE
#define _NOTE(s)
#endif
#ifndef B_FALSE
#define B_FALSE FALSE
#endif
#ifndef B_TRUE
#define B_TRUE TRUE
#endif
#ifndef IS_P2ALIGNED
#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
#endif
#ifndef P2ROUNDUP
#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
#endif
#ifndef IS2P
#define ISP2(x) (((x) & ((x) - 1)) == 0)
#endif
#define ENOTACTIVE EINVAL
/* Memory type to use on FreeBSD */
MALLOC_DECLARE(M_SFXGE);
/* Machine dependend prefetch wrappers */
#if defined(__i386) || defined(__amd64)
static __inline void
prefetch_read_many(void *addr)
{
__asm__(
"prefetcht0 (%0)"
:
: "r" (addr));
}
static __inline void
prefetch_read_once(void *addr)
{
__asm__(
"prefetchnta (%0)"
:
: "r" (addr));
}
#endif
#if defined(__i386__) || defined(__amd64__)
#include <vm/vm.h>
#include <vm/pmap.h>
#endif
static __inline void
sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
struct mbuf *m, bus_dma_segment_t *seg)
{
#if defined(__i386__) || defined(__amd64__)
seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
seg->ds_len = m->m_len;
#else
int nsegstmp;
bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
#endif
}
/* Modifiers used for DOS builds */
#define __cs
#define __far
/* Modifiers used for Windows builds */
#define __in
#define __in_opt
#define __in_ecount(_n)
#define __in_ecount_opt(_n)
#define __in_bcount(_n)
#define __in_bcount_opt(_n)
#define __out
#define __out_opt
#define __out_ecount(_n)
#define __out_ecount_opt(_n)
#define __out_bcount(_n)
#define __out_bcount_opt(_n)
#define __deref_out
#define __inout
#define __inout_opt
#define __inout_ecount(_n)
#define __inout_ecount_opt(_n)
#define __inout_bcount(_n)
#define __inout_bcount_opt(_n)
#define __inout_bcount_full_opt(_n)
#define __deref_out_bcount_opt(n)
#define __checkReturn
#define __drv_when(_p, _c)
/* Code inclusion options */
#define EFSYS_OPT_NAMES 1
#define EFSYS_OPT_FALCON 0
#define EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE 0
#define EFSYS_OPT_SIENA 1
#ifdef DEBUG
#define EFSYS_OPT_CHECK_REG 1
#else
#define EFSYS_OPT_CHECK_REG 0
#endif
#define EFSYS_OPT_MCDI 1
#define EFSYS_OPT_MAC_FALCON_GMAC 0
#define EFSYS_OPT_MAC_FALCON_XMAC 0
#define EFSYS_OPT_MAC_STATS 1
#define EFSYS_OPT_LOOPBACK 0
#define EFSYS_OPT_MON_NULL 0
#define EFSYS_OPT_MON_LM87 0
#define EFSYS_OPT_MON_MAX6647 0
#define EFSYS_OPT_MON_SIENA 0
#define EFSYS_OPT_MON_STATS 0
#define EFSYS_OPT_PHY_NULL 0
#define EFSYS_OPT_PHY_QT2022C2 0
#define EFSYS_OPT_PHY_SFX7101 0
#define EFSYS_OPT_PHY_TXC43128 0
#define EFSYS_OPT_PHY_PM8358 0
#define EFSYS_OPT_PHY_SFT9001 0
#define EFSYS_OPT_PHY_QT2025C 0
#define EFSYS_OPT_PHY_STATS 1
#define EFSYS_OPT_PHY_PROPS 0
#define EFSYS_OPT_PHY_BIST 1
#define EFSYS_OPT_PHY_LED_CONTROL 1
#define EFSYS_OPT_PHY_FLAGS 0
#define EFSYS_OPT_VPD 1
#define EFSYS_OPT_NVRAM 1
#define EFSYS_OPT_NVRAM_FALCON_BOOTROM 0
#define EFSYS_OPT_NVRAM_SFT9001 0
#define EFSYS_OPT_NVRAM_SFX7101 0
#define EFSYS_OPT_BOOTCFG 0
#define EFSYS_OPT_PCIE_TUNE 0
#define EFSYS_OPT_DIAG 0
#define EFSYS_OPT_WOL 1
#define EFSYS_OPT_RX_SCALE 1
#define EFSYS_OPT_QSTATS 1
#define EFSYS_OPT_FILTER 0
#define EFSYS_OPT_RX_SCATTER 0
#define EFSYS_OPT_RX_HDR_SPLIT 0
#define EFSYS_OPT_EV_PREFETCH 0
#define EFSYS_OPT_DECODE_INTR_FATAL 1
/* ID */
typedef struct __efsys_identifier_s efsys_identifier_t;
/* PROBE */
#ifndef KDTRACE_HOOKS
#define EFSYS_PROBE(_name)
#define EFSYS_PROBE1(_name, _type1, _arg1)
#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3)
#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4)
#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5)
#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5, \
_type6, _arg6)
#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5, \
_type6, _arg6, _type7, _arg7)
#else /* KDTRACE_HOOKS */
#define EFSYS_PROBE(_name) \
DTRACE_PROBE(_name)
#define EFSYS_PROBE1(_name, _type1, _arg1) \
DTRACE_PROBE1(_name, _type1, _arg1)
#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3) \
DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3)
#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4) \
DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4)
#ifdef DTRACE_PROBE5
#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5) \
DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5)
#else
#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5) \
DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4)
#endif
#ifdef DTRACE_PROBE6
#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5, \
_type6, _arg6) \
DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5, \
_type6, _arg6)
#else
#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5, \
_type6, _arg6) \
EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5)
#endif
#ifdef DTRACE_PROBE7
#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5, \
_type6, _arg6, _type7, _arg7) \
DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5, \
_type6, _arg6, _type7, _arg7)
#else
#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5, \
_type6, _arg6, _type7, _arg7) \
EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
_type3, _arg3, _type4, _arg4, _type5, _arg5, \
_type6, _arg6)
#endif
#endif /* KDTRACE_HOOKS */
/* DMA */
typedef uint64_t efsys_dma_addr_t;
typedef struct efsys_mem_s {
bus_dma_tag_t esm_tag;
bus_dmamap_t esm_map;
caddr_t esm_base;
efsys_dma_addr_t esm_addr;
size_t esm_size;
} efsys_mem_t;
#define EFSYS_MEM_ZERO(_esmp, _size) \
do { \
(void) memset((_esmp)->esm_base, 0, (_size)); \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_MEM_READD(_esmp, _offset, _edp) \
do { \
uint32_t *addr; \
\
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
("not power of 2 aligned")); \
\
addr = (void *)((_esmp)->esm_base + (_offset)); \
\
(_edp)->ed_u32[0] = *addr; \
\
EFSYS_PROBE2(mem_readd, unsigned int, (_offset), \
uint32_t, (_edp)->ed_u32[0]); \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
do { \
uint32_t *addr; \
\
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
("not power of 2 aligned")); \
\
addr = (void *)((_esmp)->esm_base + (_offset)); \
\
(_eqp)->eq_u32[0] = *addr++; \
(_eqp)->eq_u32[1] = *addr; \
\
EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
uint32_t, (_eqp)->eq_u32[1], \
uint32_t, (_eqp)->eq_u32[0]); \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
do { \
uint32_t *addr; \
\
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
("not power of 2 aligned")); \
\
addr = (void *)((_esmp)->esm_base + (_offset)); \
\
(_eop)->eo_u32[0] = *addr++; \
(_eop)->eo_u32[1] = *addr++; \
(_eop)->eo_u32[2] = *addr++; \
(_eop)->eo_u32[3] = *addr; \
\
EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
uint32_t, (_eop)->eo_u32[3], \
uint32_t, (_eop)->eo_u32[2], \
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
do { \
uint32_t *addr; \
\
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
("not power of 2 aligned")); \
\
EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
uint32_t, (_edp)->ed_u32[0]); \
\
addr = (void *)((_esmp)->esm_base + (_offset)); \
\
*addr = (_edp)->ed_u32[0]; \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
do { \
uint32_t *addr; \
\
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
("not power of 2 aligned")); \
\
EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
uint32_t, (_eqp)->eq_u32[1], \
uint32_t, (_eqp)->eq_u32[0]); \
\
addr = (void *)((_esmp)->esm_base + (_offset)); \
\
*addr++ = (_eqp)->eq_u32[0]; \
*addr = (_eqp)->eq_u32[1]; \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
do { \
uint32_t *addr; \
\
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
("not power of 2 aligned")); \
\
EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
uint32_t, (_eop)->eo_u32[3], \
uint32_t, (_eop)->eo_u32[2], \
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
\
addr = (void *)((_esmp)->esm_base + (_offset)); \
\
*addr++ = (_eop)->eo_u32[0]; \
*addr++ = (_eop)->eo_u32[1]; \
*addr++ = (_eop)->eo_u32[2]; \
*addr = (_eop)->eo_u32[3]; \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_MEM_ADDR(_esmp) \
((_esmp)->esm_addr)
/* BAR */
typedef struct efsys_bar_s {
struct mtx esb_lock;
bus_space_tag_t esb_tag;
bus_space_handle_t esb_handle;
int esb_rid;
struct resource *esb_res;
} efsys_bar_t;
#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
do { \
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
("not power of 2 aligned")); \
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
mtx_lock(&((_esbp)->esb_lock)); \
\
(_edp)->ed_u32[0] = bus_space_read_4((_esbp)->esb_tag, \
(_esbp)->esb_handle, (_offset)); \
\
EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
uint32_t, (_edp)->ed_u32[0]); \
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
mtx_unlock(&((_esbp)->esb_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
do { \
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
("not power of 2 aligned")); \
\
mtx_lock(&((_esbp)->esb_lock)); \
\
(_eqp)->eq_u32[0] = bus_space_read_4((_esbp)->esb_tag, \
(_esbp)->esb_handle, (_offset)); \
(_eqp)->eq_u32[1] = bus_space_read_4((_esbp)->esb_tag, \
(_esbp)->esb_handle, (_offset+4)); \
\
EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
uint32_t, (_eqp)->eq_u32[1], \
uint32_t, (_eqp)->eq_u32[0]); \
\
mtx_unlock(&((_esbp)->esb_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
do { \
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
("not power of 2 aligned")); \
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
mtx_lock(&((_esbp)->esb_lock)); \
\
(_eop)->eo_u32[0] = bus_space_read_4((_esbp)->esb_tag, \
(_esbp)->esb_handle, (_offset)); \
(_eop)->eo_u32[1] = bus_space_read_4((_esbp)->esb_tag, \
(_esbp)->esb_handle, (_offset+4)); \
(_eop)->eo_u32[2] = bus_space_read_4((_esbp)->esb_tag, \
(_esbp)->esb_handle, (_offset+8)); \
(_eop)->eo_u32[3] = bus_space_read_4((_esbp)->esb_tag, \
(_esbp)->esb_handle, (_offset+12)); \
\
EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
uint32_t, (_eop)->eo_u32[3], \
uint32_t, (_eop)->eo_u32[2], \
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
mtx_unlock(&((_esbp)->esb_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
do { \
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
("not power of 2 aligned")); \
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
mtx_lock(&((_esbp)->esb_lock)); \
\
EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
uint32_t, (_edp)->ed_u32[0]); \
\
bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
(_offset), (_edp)->ed_u32[0]); \
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
mtx_unlock(&((_esbp)->esb_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
do { \
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
("not power of 2 aligned")); \
\
mtx_lock(&((_esbp)->esb_lock)); \
\
EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
uint32_t, (_eqp)->eq_u32[1], \
uint32_t, (_eqp)->eq_u32[0]); \
\
bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
(_offset), (_eqp)->eq_u32[0]); \
bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
(_offset+4), (_eqp)->eq_u32[1]); \
\
mtx_unlock(&((_esbp)->esb_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
do { \
_NOTE(CONSTANTCONDITION) \
KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
("not power of 2 aligned")); \
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
mtx_lock(&((_esbp)->esb_lock)); \
\
EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
uint32_t, (_eop)->eo_u32[3], \
uint32_t, (_eop)->eo_u32[2], \
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
\
bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
(_offset), (_eop)->eo_u32[0]); \
bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
(_offset+4), (_eop)->eo_u32[1]); \
bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
(_offset+8), (_eop)->eo_u32[2]); \
bus_space_write_4((_esbp)->esb_tag, (_esbp)->esb_handle,\
(_offset+12), (_eop)->eo_u32[3]); \
\
_NOTE(CONSTANTCONDITION) \
if (_lock) \
mtx_unlock(&((_esbp)->esb_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
/* SPIN */
#define EFSYS_SPIN(_us) \
do { \
DELAY(_us); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_SLEEP EFSYS_SPIN
/* BARRIERS */
/* Strict ordering guaranteed by devacc.devacc_attr_dataorder */
#define EFSYS_MEM_READ_BARRIER()
#define EFSYS_PIO_WRITE_BARRIER()
/* TIMESTAMP */
typedef clock_t efsys_timestamp_t;
#define EFSYS_TIMESTAMP(_usp) \
do { \
clock_t now; \
\
now = ticks; \
*(_usp) = now * hz / 1000000; \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
/* KMEM */
#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
do { \
(_esip) = (_esip); \
(_p) = malloc((_size), M_SFXGE, M_WAITOK|M_ZERO); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_KMEM_FREE(_esip, _size, _p) \
do { \
(void) (_esip); \
(void) (_size); \
free((_p), M_SFXGE); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
/* LOCK */
typedef struct mtx efsys_lock_t;
#define EFSYS_LOCK_MAGIC 0x000010c4
#define EFSYS_LOCK(_lockp, _state) \
do { \
mtx_lock(_lockp); \
(_state) = EFSYS_LOCK_MAGIC; \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_UNLOCK(_lockp, _state) \
do { \
if ((_state) != EFSYS_LOCK_MAGIC) \
KASSERT(B_FALSE, ("not locked")); \
mtx_unlock(_lockp); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
/* PREEMPT */
#define EFSYS_PREEMPT_DISABLE(_state) \
do { \
(_state) = (_state); \
critical_enter(); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_PREEMPT_ENABLE(_state) \
do { \
(_state) = (_state); \
critical_exit(_state); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
/* STAT */
typedef uint64_t efsys_stat_t;
#define EFSYS_STAT_INCR(_knp, _delta) \
do { \
*(_knp) += (_delta); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_STAT_DECR(_knp, _delta) \
do { \
*(_knp) -= (_delta); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_STAT_SET(_knp, _val) \
do { \
*(_knp) = (_val); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_STAT_SET_QWORD(_knp, _valp) \
do { \
*(_knp) = le64toh((_valp)->eq_u64[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_STAT_SET_DWORD(_knp, _valp) \
do { \
*(_knp) = le32toh((_valp)->ed_u32[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
do { \
*(_knp) += le64toh((_valp)->eq_u64[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
do { \
*(_knp) -= le64toh((_valp)->eq_u64[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
/* ERR */
extern void sfxge_err(efsys_identifier_t *, unsigned int,
uint32_t, uint32_t);
#if EFSYS_OPT_DECODE_INTR_FATAL
#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
do { \
sfxge_err((_esip), (_code), (_dword0), (_dword1)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#endif
/* ASSERT */
#define EFSYS_ASSERT(_exp) do { \
if (!(_exp)) \
panic(#_exp); \
} while (0)
#define EFSYS_ASSERT3(_x, _op, _y, _t) do { \
const _t __x = (_t)(_x); \
const _t __y = (_t)(_y); \
if (!(__x _op __y)) \
panic("assertion failed at %s:%u", __FILE__, __LINE__); \
} while(0)
#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
#ifdef __cplusplus
}
#endif
#endif /* _SYS_EFSYS_H */

1893
sys/dev/sfxge/common/efx.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,342 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_impl.h"
#if EFSYS_OPT_BOOTCFG
/*
* Maximum size of BOOTCFG block across all nics as understood by SFCgPXE.
* A multiple of 0x100 so trailing 0xff characters don't contrinbute to the
* checksum.
*/
#define BOOTCFG_MAX_SIZE 0x1000
#define DHCP_END (uint8_t)0xff
#define DHCP_PAD (uint8_t)0
static __checkReturn uint8_t
efx_bootcfg_csum(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size)
{
_NOTE(ARGUNUSED(enp))
unsigned int pos;
uint8_t checksum = 0;
for (pos = 0; pos < size; pos++)
checksum += data[pos];
return (checksum);
}
static __checkReturn int
efx_bootcfg_verify(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__out size_t *usedp)
{
size_t offset = 0;
size_t used = 0;
int rc;
/* Start parsing tags immediatly after the checksum */
for (offset = 1; offset < size; ) {
uint8_t tag;
uint8_t length;
/* Consume tag */
tag = data[offset];
if (tag == DHCP_END) {
offset++;
used = offset;
break;
}
if (tag == DHCP_PAD) {
offset++;
continue;
}
/* Consume length */
if (offset + 1 >= size) {
rc = ENOSPC;
goto fail1;
}
length = data[offset + 1];
/* Consume *length */
if (offset + 1 + length >= size) {
rc = ENOSPC;
goto fail2;
}
offset += 2 + length;
used = offset;
}
/* Checksum the entire sector, including bytes after any DHCP_END */
if (efx_bootcfg_csum(enp, data, size) != 0) {
rc = EINVAL;
goto fail3;
}
if (usedp != NULL)
*usedp = used;
return (0);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
int
efx_bootcfg_read(
__in efx_nic_t *enp,
__out_bcount(size) caddr_t data,
__in size_t size)
{
uint8_t *payload = NULL;
size_t used_bytes;
size_t sector_length;
int rc;
rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &sector_length);
if (rc != 0)
goto fail1;
/*
* We need to read the entire BOOTCFG area to ensure we read all the
* tags, because legacy bootcfg sectors are not guaranteed to end with
* a DHCP_END character. If the user hasn't supplied a sufficiently
* large buffer then use our own buffer.
*/
if (sector_length > BOOTCFG_MAX_SIZE)
sector_length = BOOTCFG_MAX_SIZE;
if (sector_length > size) {
EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
if (payload == NULL) {
rc = ENOMEM;
goto fail2;
}
} else
payload = (uint8_t *)data;
if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
goto fail3;
rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0,
(caddr_t)payload, sector_length);
efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
if (rc != 0)
goto fail4;
/* Verify that the area is correctly formatted and checksummed */
rc = efx_bootcfg_verify(enp, (caddr_t)payload, sector_length,
&used_bytes);
if (rc != 0 || used_bytes == 0) {
payload[0] = (uint8_t)~DHCP_END;
payload[1] = DHCP_END;
used_bytes = 2;
}
EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
EFSYS_ASSERT(used_bytes <= sector_length);
/*
* Legacy bootcfg sectors don't terminate with a DHCP_END character.
* Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by
* definition large enough for any valid (per-port) bootcfg sector,
* so reinitialise the sector if there isn't room for the character.
*/
if (payload[used_bytes - 1] != DHCP_END) {
if (used_bytes + 1 > sector_length) {
payload[0] = 0;
used_bytes = 1;
}
payload[used_bytes] = DHCP_END;
++used_bytes;
}
/*
* Verify that the user supplied buffer is large enough for the
* entire used bootcfg area, then copy into the user supplied buffer.
*/
if (used_bytes > size) {
rc = ENOSPC;
goto fail5;
}
if (sector_length > size) {
memcpy(data, payload, used_bytes);
EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
}
/* Zero out the unused portion of the user buffer */
if (used_bytes < size)
(void) memset(data + used_bytes, 0, size - used_bytes);
/*
* The checksum includes trailing data after any DHCP_END character,
* which we've just modified (by truncation or appending DHCP_END).
*/
data[0] -= efx_bootcfg_csum(enp, data, size);
return (0);
fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
if (sector_length > size)
EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
int
efx_bootcfg_write(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size)
{
uint8_t *chunk;
uint8_t checksum;
size_t sector_length;
size_t chunk_length;
size_t used_bytes;
size_t offset;
size_t remaining;
int rc;
rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &sector_length);
if (rc != 0)
goto fail1;
if (sector_length > BOOTCFG_MAX_SIZE)
sector_length = BOOTCFG_MAX_SIZE;
if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0)
goto fail2;
/* The caller *must* terminate their block with a DHCP_END character */
EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
if ((uint8_t)data[used_bytes - 1] != DHCP_END) {
rc = ENOENT;
goto fail3;
}
/* Check that the hardware has support for this much data */
if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) {
rc = ENOSPC;
goto fail4;
}
rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, &chunk_length);
if (rc != 0)
goto fail5;
EFSYS_KMEM_ALLOC(enp->en_esip, chunk_length, chunk);
if (chunk == NULL) {
rc = ENOMEM;
goto fail6;
}
if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
goto fail7;
/*
* Write the entire sector_length bytes of data in chunks. Zero out
* all data following the DHCP_END, and adjust the checksum
*/
checksum = efx_bootcfg_csum(enp, data, used_bytes);
for (offset = 0; offset < sector_length; offset += remaining) {
remaining = MIN(chunk_length, sector_length - offset);
/* Fill chunk */
(void) memset(chunk, 0x0, chunk_length);
if (offset < used_bytes)
memcpy(chunk, data + offset,
MIN(remaining, used_bytes - offset));
/* Adjust checksum */
if (offset == 0)
chunk[0] -= checksum;
if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
offset, (caddr_t)chunk, remaining)) != 0)
goto fail8;
}
efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
EFSYS_KMEM_FREE(enp->en_esip, chunk_length, chunk);
return (0);
fail8:
EFSYS_PROBE(fail8);
fail7:
EFSYS_PROBE(fail7);
EFSYS_KMEM_FREE(enp->en_esip, chunk_length, chunk);
fail6:
EFSYS_PROBE(fail6);
efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_BOOTCFG */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,734 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_EFX_IMPL_H
#define _SYS_EFX_IMPL_H
#include "efsys.h"
#include "efx.h"
#include "efx_regs.h"
#if EFSYS_OPT_FALCON
#include "falcon_impl.h"
#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
#include "siena_impl.h"
#endif /* EFSYS_OPT_SIENA */
#ifdef __cplusplus
extern "C" {
#endif
#define EFX_MOD_MCDI 0x00000001
#define EFX_MOD_PROBE 0x00000002
#define EFX_MOD_NVRAM 0x00000004
#define EFX_MOD_VPD 0x00000008
#define EFX_MOD_NIC 0x00000010
#define EFX_MOD_INTR 0x00000020
#define EFX_MOD_EV 0x00000040
#define EFX_MOD_RX 0x00000080
#define EFX_MOD_TX 0x00000100
#define EFX_MOD_PORT 0x00000200
#define EFX_MOD_MON 0x00000400
#define EFX_MOD_WOL 0x00000800
#define EFX_MOD_FILTER 0x00001000
#define EFX_RESET_MAC 0x00000001
#define EFX_RESET_PHY 0x00000002
typedef enum efx_mac_type_e {
EFX_MAC_INVALID = 0,
EFX_MAC_FALCON_GMAC,
EFX_MAC_FALCON_XMAC,
EFX_MAC_SIENA,
EFX_MAC_NTYPES
} efx_mac_type_t;
typedef struct efx_mac_ops_s {
int (*emo_reset)(efx_nic_t *); /* optional */
int (*emo_poll)(efx_nic_t *, efx_link_mode_t *);
int (*emo_up)(efx_nic_t *, boolean_t *);
int (*emo_reconfigure)(efx_nic_t *);
#if EFSYS_OPT_LOOPBACK
int (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t,
efx_loopback_type_t);
#endif /* EFSYS_OPT_LOOPBACK */
#if EFSYS_OPT_MAC_STATS
int (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *);
int (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *,
uint16_t, boolean_t);
int (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
efsys_stat_t *, uint32_t *);
#endif /* EFSYS_OPT_MAC_STATS */
} efx_mac_ops_t;
typedef struct efx_phy_ops_s {
int (*epo_power)(efx_nic_t *, boolean_t); /* optional */
int (*epo_reset)(efx_nic_t *);
int (*epo_reconfigure)(efx_nic_t *);
int (*epo_verify)(efx_nic_t *);
int (*epo_uplink_check)(efx_nic_t *,
boolean_t *); /* optional */
int (*epo_downlink_check)(efx_nic_t *, efx_link_mode_t *,
unsigned int *, uint32_t *);
int (*epo_oui_get)(efx_nic_t *, uint32_t *);
#if EFSYS_OPT_PHY_STATS
int (*epo_stats_update)(efx_nic_t *, efsys_mem_t *,
uint32_t *);
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
const char __cs *(*epo_prop_name)(efx_nic_t *, unsigned int);
#endif /* EFSYS_OPT_PHY_PROPS */
int (*epo_prop_get)(efx_nic_t *, unsigned int, uint32_t,
uint32_t *);
int (*epo_prop_set)(efx_nic_t *, unsigned int, uint32_t);
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
int (*epo_bist_start)(efx_nic_t *, efx_phy_bist_type_t);
int (*epo_bist_poll)(efx_nic_t *, efx_phy_bist_type_t,
efx_phy_bist_result_t *, uint32_t *,
unsigned long *, size_t);
void (*epo_bist_stop)(efx_nic_t *, efx_phy_bist_type_t);
#endif /* EFSYS_OPT_PHY_BIST */
} efx_phy_ops_t;
typedef struct efx_port_s {
efx_mac_type_t ep_mac_type;
uint32_t ep_phy_type;
uint8_t ep_port;
uint32_t ep_mac_pdu;
uint8_t ep_mac_addr[6];
efx_link_mode_t ep_link_mode;
boolean_t ep_unicst;
boolean_t ep_brdcst;
unsigned int ep_fcntl;
boolean_t ep_fcntl_autoneg;
efx_oword_t ep_multicst_hash[2];
#if EFSYS_OPT_LOOPBACK
efx_loopback_type_t ep_loopback_type;
efx_link_mode_t ep_loopback_link_mode;
#endif /* EFSYS_OPT_LOOPBACK */
#if EFSYS_OPT_PHY_FLAGS
uint32_t ep_phy_flags;
#endif /* EFSYS_OPT_PHY_FLAGS */
#if EFSYS_OPT_PHY_LED_CONTROL
efx_phy_led_mode_t ep_phy_led_mode;
#endif /* EFSYS_OPT_PHY_LED_CONTROL */
efx_phy_media_type_t ep_fixed_port_type;
efx_phy_media_type_t ep_module_type;
uint32_t ep_adv_cap_mask;
uint32_t ep_lp_cap_mask;
uint32_t ep_default_adv_cap_mask;
uint32_t ep_phy_cap_mask;
#if EFSYS_OPT_PHY_TXC43128 || EFSYS_OPT_PHY_QT2025C
union {
struct {
unsigned int bug10934_count;
} ep_txc43128;
struct {
unsigned int bug17190_count;
} ep_qt2025c;
};
#endif
boolean_t ep_mac_poll_needed; /* falcon only */
boolean_t ep_mac_up; /* falcon only */
uint32_t ep_fwver; /* falcon only */
boolean_t ep_mac_drain;
boolean_t ep_mac_stats_pending;
#if EFSYS_OPT_PHY_BIST
efx_phy_bist_type_t ep_current_bist;
#endif
efx_mac_ops_t *ep_emop;
efx_phy_ops_t *ep_epop;
} efx_port_t;
typedef struct efx_mon_ops_s {
int (*emo_reset)(efx_nic_t *);
int (*emo_reconfigure)(efx_nic_t *);
#if EFSYS_OPT_MON_STATS
int (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
efx_mon_stat_value_t *);
#endif /* EFSYS_OPT_MON_STATS */
} efx_mon_ops_t;
typedef struct efx_mon_s {
efx_mon_type_t em_type;
efx_mon_ops_t *em_emop;
} efx_mon_t;
typedef struct efx_intr_s {
efx_intr_type_t ei_type;
efsys_mem_t *ei_esmp;
unsigned int ei_level;
} efx_intr_t;
typedef struct efx_nic_ops_s {
int (*eno_probe)(efx_nic_t *);
int (*eno_reset)(efx_nic_t *);
int (*eno_init)(efx_nic_t *);
#if EFSYS_OPT_DIAG
int (*eno_sram_test)(efx_nic_t *, efx_sram_pattern_fn_t);
int (*eno_register_test)(efx_nic_t *);
#endif /* EFSYS_OPT_DIAG */
void (*eno_fini)(efx_nic_t *);
void (*eno_unprobe)(efx_nic_t *);
} efx_nic_ops_t;
#define EFX_TXQ_LIMIT_TARGET 259
#define EFX_RXQ_LIMIT_TARGET 768
#if EFSYS_OPT_FILTER
typedef enum efx_filter_type_e {
EFX_FILTER_RX_TCP_FULL, /* TCP/IPv4 4-tuple {dIP,dTCP,sIP,sTCP} */
EFX_FILTER_RX_TCP_WILD, /* TCP/IPv4 dest {dIP,dTCP, -, -} */
EFX_FILTER_RX_UDP_FULL, /* UDP/IPv4 4-tuple {dIP,dUDP,sIP,sUDP} */
EFX_FILTER_RX_UDP_WILD, /* UDP/IPv4 dest {dIP,dUDP, -, -} */
#if EFSYS_OPT_SIENA
EFX_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */
EFX_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */
EFX_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
EFX_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */
EFX_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */
EFX_FILTER_TX_UDP_WILD, /* UDP/IPv4 source (host, port) */
EFX_FILTER_TX_MAC_FULL, /* Ethernet source (MAC address, VLAN ID) */
EFX_FILTER_TX_MAC_WILD, /* Ethernet source (MAC address) */
#endif /* EFSYS_OPT_SIENA */
EFX_FILTER_NTYPES
} efx_filter_type_t;
typedef enum efx_filter_tbl_id_e {
EFX_FILTER_TBL_RX_IP = 0,
EFX_FILTER_TBL_RX_MAC,
EFX_FILTER_TBL_TX_IP,
EFX_FILTER_TBL_TX_MAC,
EFX_FILTER_NTBLS
} efx_filter_tbl_id_t;
typedef struct efx_filter_tbl_s {
int eft_size; /* number of entries */
int eft_used; /* active count */
uint32_t *eft_bitmap; /* active bitmap */
efx_filter_spec_t *eft_spec; /* array of saved specs */
} efx_filter_tbl_t;
typedef struct efx_filter_s {
efx_filter_tbl_t ef_tbl[EFX_FILTER_NTBLS];
unsigned int ef_depth[EFX_FILTER_NTYPES];
} efx_filter_t;
extern __checkReturn int
efx_filter_insert_filter(
__in efx_nic_t *enp,
__in efx_filter_spec_t *spec,
__in boolean_t replace);
extern __checkReturn int
efx_filter_remove_filter(
__in efx_nic_t *enp,
__in efx_filter_spec_t *spec);
extern void
efx_filter_remove_index(
__inout efx_nic_t *enp,
__in efx_filter_type_t type,
__in int filter_idx);
extern void
efx_filter_redirect_index(
__inout efx_nic_t *enp,
__in efx_filter_type_t type,
__in int filter_index,
__in int rxq_index);
extern __checkReturn int
efx_filter_clear_tbl(
__in efx_nic_t *enp,
__in efx_filter_tbl_id_t tbl);
#endif /* EFSYS_OPT_FILTER */
#if EFSYS_OPT_NVRAM
typedef struct efx_nvram_ops_s {
#if EFSYS_OPT_DIAG
int (*envo_test)(efx_nic_t *);
#endif /* EFSYS_OPT_DIAG */
int (*envo_size)(efx_nic_t *, efx_nvram_type_t, size_t *);
int (*envo_get_version)(efx_nic_t *, efx_nvram_type_t,
uint32_t *, uint16_t *);
int (*envo_rw_start)(efx_nic_t *, efx_nvram_type_t, size_t *);
int (*envo_read_chunk)(efx_nic_t *, efx_nvram_type_t,
unsigned int, caddr_t, size_t);
int (*envo_erase)(efx_nic_t *, efx_nvram_type_t);
int (*envo_write_chunk)(efx_nic_t *, efx_nvram_type_t,
unsigned int, caddr_t, size_t);
void (*envo_rw_finish)(efx_nic_t *, efx_nvram_type_t);
int (*envo_set_version)(efx_nic_t *, efx_nvram_type_t, uint16_t *);
} efx_nvram_ops_t;
#endif /* EFSYS_OPT_NVRAM */
#if EFSYS_OPT_VPD
typedef struct efx_vpd_ops_s {
int (*evpdo_init)(efx_nic_t *);
int (*evpdo_size)(efx_nic_t *, size_t *);
int (*evpdo_read)(efx_nic_t *, caddr_t, size_t);
int (*evpdo_verify)(efx_nic_t *, caddr_t, size_t);
int (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t);
int (*evpdo_get)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *);
int (*evpdo_set)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *);
int (*evpdo_next)(efx_nic_t *, caddr_t, size_t, efx_vpd_value_t *,
unsigned int *);
int (*evpdo_write)(efx_nic_t *, caddr_t, size_t);
void (*evpdo_fini)(efx_nic_t *);
} efx_vpd_ops_t;
#endif /* EFSYS_OPT_VPD */
struct efx_nic_s {
uint32_t en_magic;
efx_family_t en_family;
uint32_t en_features;
efsys_identifier_t *en_esip;
efsys_lock_t *en_eslp;
efsys_bar_t *en_esbp;
unsigned int en_mod_flags;
unsigned int en_reset_flags;
efx_nic_cfg_t en_nic_cfg;
efx_port_t en_port;
efx_mon_t en_mon;
efx_intr_t en_intr;
uint32_t en_ev_qcount;
uint32_t en_rx_qcount;
uint32_t en_tx_qcount;
efx_nic_ops_t *en_enop;
#if EFSYS_OPT_FILTER
efx_filter_t en_filter;
#endif /* EFSYS_OPT_FILTER */
#if EFSYS_OPT_NVRAM
efx_nvram_type_t en_nvram_locked;
efx_nvram_ops_t *en_envop;
#endif /* EFSYS_OPT_NVRAM */
#if EFSYS_OPT_VPD
efx_vpd_ops_t *en_evpdop;
#endif /* EFSYS_OPT_VPD */
union {
#if EFSYS_OPT_FALCON
struct {
falcon_spi_dev_t enu_fsd[FALCON_SPI_NTYPES];
falcon_i2c_t enu_fip;
boolean_t enu_i2c_locked;
#if EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
const uint8_t *enu_forced_cfg;
#endif /* EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE */
uint8_t enu_mon_devid;
#if EFSYS_OPT_PCIE_TUNE
unsigned int enu_nlanes;
#endif /* EFSYS_OPT_PCIE_TUNE */
uint16_t enu_board_rev;
boolean_t enu_internal_sram;
uint8_t enu_sram_num_bank;
uint8_t enu_sram_bank_size;
} falcon;
#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
struct {
#if EFSYS_OPT_MCDI
efx_mcdi_iface_t enu_mip;
#endif /* EFSYS_OPT_MCDI */
#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
unsigned int enu_partn_mask;
#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
#if EFSYS_OPT_VPD
caddr_t enu_svpd;
size_t enu_svpd_length;
#endif /* EFSYS_OPT_VPD */
} siena;
#endif /* EFSYS_OPT_SIENA */
} en_u;
};
#define EFX_NIC_MAGIC 0x02121996
typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *,
const efx_ev_callbacks_t *, void *);
struct efx_evq_s {
uint32_t ee_magic;
efx_nic_t *ee_enp;
unsigned int ee_index;
unsigned int ee_mask;
efsys_mem_t *ee_esmp;
#if EFSYS_OPT_QSTATS
uint32_t ee_stat[EV_NQSTATS];
#endif /* EFSYS_OPT_QSTATS */
efx_ev_handler_t ee_handler[1 << FSF_AZ_EV_CODE_WIDTH];
};
#define EFX_EVQ_MAGIC 0x08081997
#define EFX_EV_TIMER_QUANTUM 5
struct efx_rxq_s {
uint32_t er_magic;
efx_nic_t *er_enp;
unsigned int er_index;
unsigned int er_mask;
efsys_mem_t *er_esmp;
};
#define EFX_RXQ_MAGIC 0x15022005
struct efx_txq_s {
uint32_t et_magic;
efx_nic_t *et_enp;
unsigned int et_index;
unsigned int et_mask;
efsys_mem_t *et_esmp;
#if EFSYS_OPT_QSTATS
uint32_t et_stat[TX_NQSTATS];
#endif /* EFSYS_OPT_QSTATS */
};
#define EFX_TXQ_MAGIC 0x05092005
#define EFX_MAC_ADDR_COPY(_dst, _src) \
do { \
(_dst)[0] = (_src)[0]; \
(_dst)[1] = (_src)[1]; \
(_dst)[2] = (_src)[2]; \
(_dst)[3] = (_src)[3]; \
(_dst)[4] = (_src)[4]; \
(_dst)[5] = (_src)[5]; \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#if EFSYS_OPT_CHECK_REG
#define EFX_CHECK_REG(_enp, _reg) \
do { \
const char __cs *name = #_reg; \
char min = name[4]; \
char max = name[5]; \
char rev; \
\
switch ((_enp)->en_family) { \
case EFX_FAMILY_FALCON: \
rev = 'B'; \
break; \
\
case EFX_FAMILY_SIENA: \
rev = 'C'; \
break; \
\
default: \
rev = '?'; \
break; \
} \
\
EFSYS_ASSERT3S(rev, >=, min); \
EFSYS_ASSERT3S(rev, <=, max); \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#else
#define EFX_CHECK_REG(_enp, _reg) do { \
_NOTE(CONSTANTCONDITION) \
} while(B_FALSE)
#endif
#define EFX_BAR_READD(_enp, _reg, _edp, _lock) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \
(_edp), (_lock)); \
EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \
uint32_t, _reg ## _OFST, \
uint32_t, (_edp)->ed_u32[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \
uint32_t, _reg ## _OFST, \
uint32_t, (_edp)->ed_u32[0]); \
EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \
(_edp), (_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_READQ(_enp, _reg, _eqp) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \
(_eqp)); \
EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \
uint32_t, _reg ## _OFST, \
uint32_t, (_eqp)->eq_u32[1], \
uint32_t, (_eqp)->eq_u32[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \
uint32_t, _reg ## _OFST, \
uint32_t, (_eqp)->eq_u32[1], \
uint32_t, (_eqp)->eq_u32[0]); \
EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \
(_eqp)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_READO(_enp, _reg, _eop) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \
(_eop), B_TRUE); \
EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \
uint32_t, _reg ## _OFST, \
uint32_t, (_eop)->eo_u32[3], \
uint32_t, (_eop)->eo_u32[2], \
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_WRITEO(_enp, _reg, _eop) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \
uint32_t, _reg ## _OFST, \
uint32_t, (_eop)->eo_u32[3], \
uint32_t, (_eop)->eo_u32[2], \
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \
(_eop), B_TRUE); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_BAR_READD((_enp)->en_esbp, \
(_reg ## _OFST + ((_index) * _reg ## _STEP)), \
(_edp), (_lock)); \
EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
uint32_t, (_edp)->ed_u32[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
uint32_t, (_edp)->ed_u32[0]); \
EFSYS_BAR_WRITED((_enp)->en_esbp, \
(_reg ## _OFST + ((_index) * _reg ## _STEP)), \
(_edp), (_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
uint32_t, (_edp)->ed_u32[0]); \
EFSYS_BAR_WRITED((_enp)->en_esbp, \
(_reg ## _OFST + \
(3 * sizeof (efx_dword_t)) + \
((_index) * _reg ## _STEP)), \
(_edp), (_lock)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_BAR_READQ((_enp)->en_esbp, \
(_reg ## _OFST + ((_index) * _reg ## _STEP)), \
(_eqp)); \
EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
uint32_t, (_eqp)->eq_u32[1], \
uint32_t, (_eqp)->eq_u32[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
uint32_t, (_eqp)->eq_u32[1], \
uint32_t, (_eqp)->eq_u32[0]); \
EFSYS_BAR_WRITEQ((_enp)->en_esbp, \
(_reg ## _OFST + ((_index) * _reg ## _STEP)), \
(_eqp)); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_BAR_READO((_enp)->en_esbp, \
(_reg ## _OFST + ((_index) * _reg ## _STEP)), \
(_eop), B_TRUE); \
EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
uint32_t, (_eop)->eo_u32[3], \
uint32_t, (_eop)->eo_u32[2], \
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop) \
do { \
EFX_CHECK_REG((_enp), (_reg)); \
EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \
uint32_t, (_index), \
uint32_t, _reg ## _OFST, \
uint32_t, (_eop)->eo_u32[3], \
uint32_t, (_eop)->eo_u32[2], \
uint32_t, (_eop)->eo_u32[1], \
uint32_t, (_eop)->eo_u32[0]); \
EFSYS_BAR_WRITEO((_enp)->en_esbp, \
(_reg ## _OFST + ((_index) * _reg ## _STEP)), \
(_eop), B_TRUE); \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
extern __checkReturn int
efx_mac_select(
__in efx_nic_t *enp);
extern __checkReturn int
efx_phy_probe(
__in efx_nic_t *enp);
extern void
efx_phy_unprobe(
__in efx_nic_t *enp);
#if EFSYS_OPT_VPD
/* VPD utility functions */
extern __checkReturn int
efx_vpd_hunk_length(
__in_bcount(size) caddr_t data,
__in size_t size,
__out size_t *lengthp);
extern __checkReturn int
efx_vpd_hunk_verify(
__in_bcount(size) caddr_t data,
__in size_t size,
__out_opt boolean_t *cksummedp);
extern __checkReturn int
efx_vpd_hunk_reinit(
__in caddr_t data,
__in size_t size,
__in boolean_t wantpid);
extern __checkReturn int
efx_vpd_hunk_get(
__in_bcount(size) caddr_t data,
__in size_t size,
__in efx_vpd_tag_t tag,
__in efx_vpd_keyword_t keyword,
__out unsigned int *payloadp,
__out uint8_t *paylenp);
extern __checkReturn int
efx_vpd_hunk_next(
__in_bcount(size) caddr_t data,
__in size_t size,
__out efx_vpd_tag_t *tagp,
__out efx_vpd_keyword_t *keyword,
__out_bcount_opt(*paylenp) unsigned int *payloadp,
__out_opt uint8_t *paylenp,
__inout unsigned int *contp);
extern __checkReturn int
efx_vpd_hunk_set(
__in_bcount(size) caddr_t data,
__in size_t size,
__in efx_vpd_value_t *evvp);
#endif /* EFSYS_OPT_VPD */
#if EFSYS_OPT_DIAG
extern efx_sram_pattern_fn_t __cs __efx_sram_pattern_fns[];
typedef struct efx_register_set_s {
unsigned int address;
unsigned int step;
unsigned int rows;
efx_oword_t mask;
} efx_register_set_t;
extern __checkReturn int
efx_nic_test_registers(
__in efx_nic_t *enp,
__in efx_register_set_t *rsp,
__in size_t count);
extern __checkReturn int
efx_nic_test_tables(
__in efx_nic_t *enp,
__in efx_register_set_t *rsp,
__in efx_pattern_type_t pattern,
__in size_t count);
#endif /* EFSYS_OPT_DIAG */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_EFX_IMPL_H */

View File

@ -0,0 +1,354 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
__checkReturn int
efx_intr_init(
__in efx_nic_t *enp,
__in efx_intr_type_t type,
__in efsys_mem_t *esmp)
{
efx_intr_t *eip = &(enp->en_intr);
efx_oword_t oword;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
if (enp->en_mod_flags & EFX_MOD_INTR) {
rc = EINVAL;
goto fail1;
}
enp->en_mod_flags |= EFX_MOD_INTR;
eip->ei_type = type;
eip->ei_esmp = esmp;
/*
* bug17213 workaround.
*
* Under legacy interrupts, don't share a level between fatal
* interrupts and event queue interrupts. Under MSI-X, they
* must share, or we won't get an interrupt.
*/
if (enp->en_family == EFX_FAMILY_SIENA &&
eip->ei_type == EFX_INTR_LINE)
eip->ei_level = 0x1f;
else
eip->ei_level = 0;
/* Enable all the genuinely fatal interrupts */
EFX_SET_OWORD(oword);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0);
if (enp->en_family >= EFX_FAMILY_SIENA)
EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0);
EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword);
/* Set up the interrupt address register */
EFX_POPULATE_OWORD_3(oword,
FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0,
FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff,
FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32);
EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_intr_enable(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
efx_oword_t oword;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 1);
EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
}
void
efx_intr_disable(
__in efx_nic_t *enp)
{
efx_oword_t oword;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
EFSYS_SPIN(10);
}
void
efx_intr_disable_unlocked(
__in efx_nic_t *enp)
{
efx_oword_t oword;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
EFSYS_BAR_READO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
&oword, B_FALSE);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
EFSYS_BAR_WRITEO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
&oword, B_FALSE);
}
__checkReturn int
efx_intr_trigger(
__in efx_nic_t *enp,
__in unsigned int level)
{
efx_intr_t *eip = &(enp->en_intr);
efx_oword_t oword;
unsigned int count;
uint32_t sel;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
/* bug16757: No event queues can be initialized */
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
switch (enp->en_family) {
case EFX_FAMILY_FALCON:
if (level > EFX_NINTR_FALCON) {
rc = EINVAL;
goto fail1;
}
break;
case EFX_FAMILY_SIENA:
if (level > EFX_NINTR_SIENA) {
rc = EINVAL;
goto fail1;
}
break;
default:
EFSYS_ASSERT(B_FALSE);
break;
}
if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL))
return (ENOTSUP); /* avoid EFSYS_PROBE() */
sel = level;
/* Trigger a test interrupt */
EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1);
EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
/*
* Wait up to 100ms for the interrupt to be raised before restoring
* KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will
* observe this soon enough anyway), but always reset KER_INT_LEVE_SEL
*/
count = 0;
do {
EFSYS_SPIN(100); /* 100us */
EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
} while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static __checkReturn boolean_t
efx_intr_check_fatal(
__in efx_nic_t *enp)
{
efx_intr_t *eip = &(enp->en_intr);
efsys_mem_t *esmp = eip->ei_esmp;
efx_oword_t oword;
/* Read the syndrome */
EFSYS_MEM_READO(esmp, 0, &oword);
if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) {
EFSYS_PROBE(fatal);
/* Clear the fatal interrupt condition */
EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0);
EFSYS_MEM_WRITEO(esmp, 0, &oword);
return (B_TRUE);
}
return (B_FALSE);
}
void
efx_intr_status_line(
__in efx_nic_t *enp,
__out boolean_t *fatalp,
__out uint32_t *qmaskp)
{
efx_intr_t *eip = &(enp->en_intr);
efx_dword_t dword;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
/*
* Read the queue mask and implicitly acknowledge the
* interrupt.
*/
EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE);
*qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
if (*qmaskp & (1U << eip->ei_level))
*fatalp = efx_intr_check_fatal(enp);
else
*fatalp = B_FALSE;
}
void
efx_intr_status_message(
__in efx_nic_t *enp,
__in unsigned int message,
__out boolean_t *fatalp)
{
efx_intr_t *eip = &(enp->en_intr);
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
if (message == eip->ei_level)
*fatalp = efx_intr_check_fatal(enp);
else
*fatalp = B_FALSE;
}
void
efx_intr_fatal(
__in efx_nic_t *enp)
{
#if EFSYS_OPT_DECODE_INTR_FATAL
efx_oword_t fatal;
efx_oword_t mem_per;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal);
EFX_ZERO_OWORD(mem_per);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 ||
EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR,
EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0);
if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0)
EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR,
EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
#else
EFSYS_ASSERT(0);
#endif
}
void
efx_intr_fini(
__in efx_nic_t *enp)
{
efx_oword_t oword;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
/* Clear the interrupt address register */
EFX_ZERO_OWORD(oword);
EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
enp->en_mod_flags &= ~EFX_MOD_INTR;
}

View File

@ -0,0 +1,684 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_impl.h"
#if EFSYS_OPT_MAC_FALCON_GMAC
#include "falcon_gmac.h"
#endif
#if EFSYS_OPT_MAC_FALCON_XMAC
#include "falcon_xmac.h"
#endif
#if EFSYS_OPT_MAC_FALCON_GMAC
static efx_mac_ops_t __cs __efx_falcon_gmac_ops = {
falcon_gmac_reset, /* emo_reset */
falcon_mac_poll, /* emo_poll */
falcon_mac_up, /* emo_up */
falcon_gmac_reconfigure, /* emo_reconfigure */
#if EFSYS_OPT_LOOPBACK
falcon_mac_loopback_set, /* emo_loopback_set */
#endif /* EFSYS_OPT_LOOPBACK */
#if EFSYS_OPT_MAC_STATS
falcon_mac_stats_upload, /* emo_stats_upload */
NULL, /* emo_stats_periodic */
falcon_gmac_stats_update /* emo_stats_update */
#endif /* EFSYS_OPT_MAC_STATS */
};
#endif /* EFSYS_OPT_MAC_FALCON_GMAC */
#if EFSYS_OPT_MAC_FALCON_XMAC
static efx_mac_ops_t __cs __efx_falcon_xmac_ops = {
falcon_xmac_reset, /* emo_reset */
falcon_mac_poll, /* emo_poll */
falcon_mac_up, /* emo_up */
falcon_xmac_reconfigure, /* emo_reconfigure */
#if EFSYS_OPT_LOOPBACK
falcon_mac_loopback_set, /* emo_loopback_set */
#endif /* EFSYS_OPT_LOOPBACK */
#if EFSYS_OPT_MAC_STATS
falcon_mac_stats_upload, /* emo_stats_upload */
NULL, /* emo_stats_periodic */
falcon_xmac_stats_update /* emo_stats_update */
#endif /* EFSYS_OPT_MAC_STATS */
};
#endif /* EFSYS_OPT_MAC_FALCON_XMAC */
#if EFSYS_OPT_SIENA
static efx_mac_ops_t __cs __efx_siena_mac_ops = {
NULL, /* emo_reset */
siena_mac_poll, /* emo_poll */
siena_mac_up, /* emo_up */
siena_mac_reconfigure, /* emo_reconfigure */
#if EFSYS_OPT_LOOPBACK
siena_mac_loopback_set, /* emo_loopback_set */
#endif /* EFSYS_OPT_LOOPBACK */
#if EFSYS_OPT_MAC_STATS
siena_mac_stats_upload, /* emo_stats_upload */
siena_mac_stats_periodic, /* emo_stats_periodic */
siena_mac_stats_update /* emo_stats_update */
#endif /* EFSYS_OPT_MAC_STATS */
};
#endif /* EFSYS_OPT_SIENA */
static efx_mac_ops_t __cs * __cs __efx_mac_ops[] = {
NULL,
#if EFSYS_OPT_MAC_FALCON_GMAC
&__efx_falcon_gmac_ops,
#else
NULL,
#endif /* EFSYS_OPT_MAC_FALCON_GMAC */
#if EFSYS_OPT_MAC_FALCON_XMAC
&__efx_falcon_xmac_ops,
#else
NULL,
#endif /* EFSYS_OPT_MAC_FALCON_XMAC */
#if EFSYS_OPT_SIENA
&__efx_siena_mac_ops,
#else
NULL,
#endif /* EFSYS_OPT_SIENA */
};
__checkReturn int
efx_mac_pdu_set(
__in efx_nic_t *enp,
__in size_t pdu)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
uint32_t old_pdu;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
if (pdu < EFX_MAC_PDU_MIN) {
rc = EINVAL;
goto fail1;
}
if (pdu > EFX_MAC_PDU_MAX) {
rc = EINVAL;
goto fail2;
}
old_pdu = epp->ep_mac_pdu;
epp->ep_mac_pdu = (uint32_t)pdu;
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail3;
return (0);
fail3:
EFSYS_PROBE(fail3);
epp->ep_mac_pdu = old_pdu;
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_mac_addr_set(
__in efx_nic_t *enp,
__in uint8_t *addr)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
uint8_t old_addr[6];
uint32_t oui;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
if (addr[0] & 0x01) {
rc = EINVAL;
goto fail1;
}
oui = addr[0] << 16 | addr[1] << 8 | addr[2];
if (oui == 0x000000) {
rc = EINVAL;
goto fail2;
}
EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr);
EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr);
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail3;
return (0);
fail3:
EFSYS_PROBE(fail3);
EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_mac_filter_set(
__in efx_nic_t *enp,
__in boolean_t unicst,
__in boolean_t brdcst)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
boolean_t old_unicst;
boolean_t old_brdcst;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
old_unicst = unicst;
old_brdcst = brdcst;
epp->ep_unicst = unicst;
epp->ep_brdcst = brdcst;
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
epp->ep_unicst = old_unicst;
epp->ep_brdcst = old_brdcst;
return (rc);
}
__checkReturn int
efx_mac_drain(
__in efx_nic_t *enp,
__in boolean_t enabled)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
if (epp->ep_mac_drain == enabled)
return (0);
epp->ep_mac_drain = enabled;
if (enabled && emop->emo_reset != NULL) {
if ((rc = emop->emo_reset(enp)) != 0)
goto fail1;
EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_MAC);
enp->en_reset_flags &= ~EFX_RESET_PHY;
}
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_mac_up(
__in efx_nic_t *enp,
__out boolean_t *mac_upp)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
if ((rc = emop->emo_up(enp, mac_upp)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_mac_fcntl_set(
__in efx_nic_t *enp,
__in unsigned int fcntl,
__in boolean_t autoneg)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
efx_phy_ops_t *epop = epp->ep_epop;
unsigned int old_fcntl;
boolean_t old_autoneg;
unsigned int old_adv_cap;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) {
rc = EINVAL;
goto fail1;
}
/*
* Ignore a request to set flow control autonegotiation
* if the PHY doesn't support it.
*/
if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
autoneg = B_FALSE;
old_fcntl = epp->ep_fcntl;
old_autoneg = autoneg;
old_adv_cap = epp->ep_adv_cap_mask;
epp->ep_fcntl = fcntl;
epp->ep_fcntl_autoneg = autoneg;
/*
* If the PHY supports autonegotiation, then encode the flow control
* settings in the advertised capabilities, and restart AN. Otherwise,
* just push the new settings directly to the MAC.
*/
if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) {
if (fcntl & EFX_FCNTL_RESPOND)
epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE |
1 << EFX_PHY_CAP_ASYM);
else
epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE |
1 << EFX_PHY_CAP_ASYM);
if (fcntl & EFX_FCNTL_GENERATE)
epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM);
if ((rc = epop->epo_reconfigure(enp)) != 0)
goto fail2;
} else {
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail2;
}
return (0);
fail2:
EFSYS_PROBE(fail2);
epp->ep_fcntl = old_fcntl;
epp->ep_fcntl_autoneg = old_autoneg;
epp->ep_adv_cap_mask = old_adv_cap;
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_mac_fcntl_get(
__in efx_nic_t *enp,
__out unsigned int *fcntl_wantedp,
__out unsigned int *fcntl_linkp)
{
efx_port_t *epp = &(enp->en_port);
unsigned int wanted;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
/*
* If the PHY supports auto negotiation, then the requested flow
* control settings are encoded in the advertised capabilities.
*/
if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN)) {
wanted = 0;
if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE))
wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM))
wanted ^= EFX_FCNTL_GENERATE;
} else
wanted = epp->ep_fcntl;
*fcntl_linkp = epp->ep_fcntl;
*fcntl_wantedp = wanted;
}
__checkReturn int
efx_mac_hash_set(
__in efx_nic_t *enp,
__in_ecount(EFX_MAC_HASH_BITS) unsigned int const *bucket)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
efx_oword_t old_hash[2];
unsigned int index;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash));
/* Set the lower 128 bits of the hash */
EFX_ZERO_OWORD(epp->ep_multicst_hash[0]);
for (index = 0; index < 128; index++) {
if (bucket[index] != 0)
EFX_SET_OWORD_BIT(epp->ep_multicst_hash[0], index);
}
/* Set the upper 128 bits of the hash */
EFX_ZERO_OWORD(epp->ep_multicst_hash[1]);
for (index = 0; index < 128; index++) {
if (bucket[index + 128] != 0)
EFX_SET_OWORD_BIT(epp->ep_multicst_hash[1], index);
}
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash));
return (rc);
}
#if EFSYS_OPT_MAC_STATS
#if EFSYS_OPT_NAMES
/* START MKCONFIG GENERATED EfxMacStatNamesBlock adf707adba80813e */
static const char __cs * __cs __efx_mac_stat_name[] = {
"rx_octets",
"rx_pkts",
"rx_unicst_pkts",
"rx_multicst_pkts",
"rx_brdcst_pkts",
"rx_pause_pkts",
"rx_le_64_pkts",
"rx_65_to_127_pkts",
"rx_128_to_255_pkts",
"rx_256_to_511_pkts",
"rx_512_to_1023_pkts",
"rx_1024_to_15xx_pkts",
"rx_ge_15xx_pkts",
"rx_errors",
"rx_fcs_errors",
"rx_drop_events",
"rx_false_carrier_errors",
"rx_symbol_errors",
"rx_align_errors",
"rx_internal_errors",
"rx_jabber_pkts",
"rx_lane0_char_err",
"rx_lane1_char_err",
"rx_lane2_char_err",
"rx_lane3_char_err",
"rx_lane0_disp_err",
"rx_lane1_disp_err",
"rx_lane2_disp_err",
"rx_lane3_disp_err",
"rx_match_fault",
"rx_nodesc_drop_cnt",
"tx_octets",
"tx_pkts",
"tx_unicst_pkts",
"tx_multicst_pkts",
"tx_brdcst_pkts",
"tx_pause_pkts",
"tx_le_64_pkts",
"tx_65_to_127_pkts",
"tx_128_to_255_pkts",
"tx_256_to_511_pkts",
"tx_512_to_1023_pkts",
"tx_1024_to_15xx_pkts",
"tx_ge_15xx_pkts",
"tx_errors",
"tx_sgl_col_pkts",
"tx_mult_col_pkts",
"tx_ex_col_pkts",
"tx_late_col_pkts",
"tx_def_pkts",
"tx_ex_def_pkts",
};
/* END MKCONFIG GENERATED EfxMacStatNamesBlock */
__checkReturn const char __cs *
efx_mac_stat_name(
__in efx_nic_t *enp,
__in unsigned int id)
{
_NOTE(ARGUNUSED(enp))
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS);
return (__efx_mac_stat_name[id]);
}
#endif /* EFSYS_OPT_STAT_NAME */
__checkReturn int
efx_mac_stats_upload(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
/*
* Don't assert !ep_mac_stats_pending, because the client might
* have failed to finalise statistics when previously stopping
* the port.
*/
if ((rc = emop->emo_stats_upload(enp, esmp)) != 0)
goto fail1;
epp->ep_mac_stats_pending = B_TRUE;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_mac_stats_periodic(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__in uint16_t period_ms,
__in boolean_t events)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
if (emop->emo_stats_periodic == NULL) {
rc = EINVAL;
goto fail1;
}
if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_mac_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp,
__in uint32_t *generationp)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
rc = emop->emo_stats_update(enp, esmp, essp, generationp);
if (rc == 0)
epp->ep_mac_stats_pending = B_FALSE;
return (rc);
}
#endif /* EFSYS_OPT_MAC_STATS */
__checkReturn int
efx_mac_select(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_type_t type = EFX_MAC_INVALID;
efx_mac_ops_t *emop;
int rc = EINVAL;
#if EFSYS_OPT_SIENA
if (enp->en_family == EFX_FAMILY_SIENA) {
type = EFX_MAC_SIENA;
goto chosen;
}
#endif
#if EFSYS_OPT_FALCON
switch (epp->ep_link_mode) {
#if EFSYS_OPT_MAC_FALCON_GMAC
case EFX_LINK_100HDX:
case EFX_LINK_100FDX:
case EFX_LINK_1000HDX:
case EFX_LINK_1000FDX:
type = EFX_MAC_FALCON_GMAC;
goto chosen;
#endif /* EFSYS_OPT_FALCON_GMAC */
#if EFSYS_OPT_MAC_FALCON_XMAC
case EFX_LINK_10000FDX:
type = EFX_MAC_FALCON_XMAC;
goto chosen;
#endif /* EFSYS_OPT_FALCON_XMAC */
default:
#if EFSYS_OPT_MAC_FALCON_GMAC && EFSYS_OPT_MAC_FALCON_XMAC
/* Only initialise a MAC supported by the PHY */
if (epp->ep_phy_cap_mask &
((1 << EFX_PHY_CAP_1000FDX) |
(1 << EFX_PHY_CAP_1000HDX) |
(1 << EFX_PHY_CAP_100FDX) |
(1 << EFX_PHY_CAP_100HDX) |
(1 << EFX_PHY_CAP_10FDX) |
(1 << EFX_PHY_CAP_10FDX)))
type = EFX_MAC_FALCON_GMAC;
else
type = EFX_MAC_FALCON_XMAC;
#elif EFSYS_OPT_MAC_FALCON_GMAC
type = EFX_MAC_FALCON_GMAC;
#else
type = EFX_MAC_FALCON_XMAC;
#endif
goto chosen;
}
#endif /* EFSYS_OPT_FALCON */
chosen:
EFSYS_ASSERT(type != EFX_MAC_INVALID);
EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES);
emop = epp->ep_emop = (efx_mac_ops_t *)__efx_mac_ops[type];
EFSYS_ASSERT(emop != NULL);
epp->ep_mac_type = type;
if (emop->emo_reset != NULL) {
if ((rc = emop->emo_reset(enp)) != 0)
goto fail1;
EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_MAC);
enp->en_reset_flags &= ~EFX_RESET_MAC;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}

View File

@ -0,0 +1,733 @@
/*-
* Copyright 2008-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_regs_mcdi.h"
#include "efx_impl.h"
#if EFSYS_OPT_MCDI
/* Shared memory layout */
#define MCDI_P1_DBL_OFST 0x0
#define MCDI_P2_DBL_OFST 0x1
#define MCDI_P1_PDU_OFST 0x2
#define MCDI_P2_PDU_OFST 0x42
#define MCDI_P1_REBOOT_OFST 0x1fe
#define MCDI_P2_REBOOT_OFST 0x1ff
/* A reboot/assertion causes the MCDI status word to be set after the
* command word is set or a REBOOT event is sent. If we notice a reboot
* via these mechanisms then wait 10ms for the status word to be set.
*/
#define MCDI_STATUS_SLEEP_US 10000
void
efx_mcdi_request_start(
__in efx_nic_t *enp,
__in efx_mcdi_req_t *emrp,
__in boolean_t ev_cpl)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
efx_dword_t dword;
unsigned int seq;
unsigned int xflags;
unsigned int pdur;
unsigned int dbr;
unsigned int pos;
int state;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
switch (emip->emi_port) {
case 1:
pdur = MCDI_P1_PDU_OFST;
dbr = MCDI_P1_DBL_OFST;
break;
case 2:
pdur = MCDI_P2_PDU_OFST;
dbr = MCDI_P2_DBL_OFST;
break;
default:
EFSYS_ASSERT(0);
pdur = dbr = 0;
};
/*
* efx_mcdi_request_start() is naturally serialised against both
* efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
* by virtue of there only being one oustanding MCDI request.
* Unfortunately, upper layers may also call efx_mcdi_request_abort()
* at any time, to timeout a pending mcdi request, That request may
* then subsequently complete, meaning efx_mcdi_ev_cpl() or
* efx_mcdi_ev_death() may end up running in parallel with
* efx_mcdi_request_start(). This race is handled by ensuring that
* %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
* en_eslp lock.
*/
EFSYS_LOCK(enp->en_eslp, state);
EFSYS_ASSERT(emip->emi_pending_req == NULL);
emip->emi_pending_req = emrp;
emip->emi_ev_cpl = ev_cpl;
emip->emi_poll_cnt = 0;
seq = emip->emi_seq++ & 0xf;
EFSYS_UNLOCK(enp->en_eslp, state);
xflags = 0;
if (ev_cpl)
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
/* Construct the header in shared memory */
EFX_POPULATE_DWORD_6(dword,
MCDI_HEADER_CODE, emrp->emr_cmd,
MCDI_HEADER_RESYNC, 1,
MCDI_HEADER_DATALEN, emrp->emr_in_length,
MCDI_HEADER_SEQ, seq,
MCDI_HEADER_RESPONSE, 0,
MCDI_HEADER_XFLAGS, xflags);
EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_TRUE);
for (pos = 0; pos < emrp->emr_in_length; pos += sizeof (efx_dword_t)) {
memcpy(&dword, MCDI_IN(*emrp, efx_dword_t, pos),
MIN(sizeof (dword), emrp->emr_in_length - pos));
EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM,
pdur + 1 + (pos >> 2), &dword, B_FALSE);
}
/* Ring the doorbell */
EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xd004be11);
EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, dbr, &dword, B_FALSE);
}
static void
efx_mcdi_request_copyout(
__in efx_nic_t *enp,
__in efx_mcdi_req_t *emrp)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
unsigned int pos;
unsigned int pdur;
efx_dword_t data;
pdur = (emip->emi_port == 1) ? MCDI_P1_PDU_OFST : MCDI_P2_PDU_OFST;
/* Copy payload out if caller supplied buffer */
if (emrp->emr_out_buf != NULL) {
size_t bytes = MIN(emrp->emr_out_length_used,
emrp->emr_out_length);
for (pos = 0; pos < bytes; pos += sizeof (efx_dword_t)) {
EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
pdur + 1 + (pos >> 2), &data, B_FALSE);
memcpy(MCDI_OUT(*emrp, efx_dword_t, pos), &data,
MIN(sizeof (data), bytes - pos));
}
}
}
static int
efx_mcdi_request_errcode(
__in unsigned int err)
{
switch (err) {
case MC_CMD_ERR_ENOENT:
return (ENOENT);
case MC_CMD_ERR_EINTR:
return (EINTR);
case MC_CMD_ERR_EACCES:
return (EACCES);
case MC_CMD_ERR_EBUSY:
return (EBUSY);
case MC_CMD_ERR_EINVAL:
return (EINVAL);
case MC_CMD_ERR_EDEADLK:
return (EDEADLK);
case MC_CMD_ERR_ENOSYS:
return (ENOTSUP);
case MC_CMD_ERR_ETIME:
return (ETIMEDOUT);
#ifdef WITH_MCDI_V2
case MC_CMD_ERR_EAGAIN:
return (EAGAIN);
case MC_CMD_ERR_ENOSPC:
return (ENOSPC);
#endif
default:
EFSYS_PROBE1(mc_pcol_error, int, err);
return (EIO);
}
}
static void
efx_mcdi_raise_exception(
__in efx_nic_t *enp,
__in_opt efx_mcdi_req_t *emrp,
__in int rc)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
const efx_mcdi_transport_t *emtp = emip->emi_mtp;
efx_mcdi_exception_t exception;
/* Reboot or Assertion failure only */
EFSYS_ASSERT(rc == EIO || rc == EINTR);
/*
* If MC_CMD_REBOOT causes a reboot (dependent on parameters),
* then the EIO is not worthy of an exception.
*/
if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
return;
exception = (rc == EIO)
? EFX_MCDI_EXCEPTION_MC_REBOOT
: EFX_MCDI_EXCEPTION_MC_BADASSERT;
emtp->emt_exception(emtp->emt_context, exception);
}
static int
efx_mcdi_poll_reboot(
__in efx_nic_t *enp)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
unsigned int rebootr;
efx_dword_t dword;
uint32_t value;
EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
rebootr = ((emip->emi_port == 1)
? MCDI_P1_REBOOT_OFST
: MCDI_P2_REBOOT_OFST);
EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
value = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
if (value == 0)
return (0);
EFX_ZERO_DWORD(dword);
EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
if (value == MC_STATUS_DWORD_ASSERT)
return (EINTR);
else
return (EIO);
}
__checkReturn boolean_t
efx_mcdi_request_poll(
__in efx_nic_t *enp)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
efx_mcdi_req_t *emrp;
efx_dword_t dword;
unsigned int pdur;
unsigned int seq;
unsigned int length;
int state;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
/* Serialise against post-watchdog efx_mcdi_ev* */
EFSYS_LOCK(enp->en_eslp, state);
EFSYS_ASSERT(emip->emi_pending_req != NULL);
EFSYS_ASSERT(!emip->emi_ev_cpl);
emrp = emip->emi_pending_req;
/* Check for reboot atomically w.r.t efx_mcdi_request_start */
if (emip->emi_poll_cnt++ == 0) {
if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
emip->emi_pending_req = NULL;
EFSYS_UNLOCK(enp->en_eslp, state);
goto fail1;
}
}
EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
pdur = (emip->emi_port == 1) ? MCDI_P1_PDU_OFST : MCDI_P2_PDU_OFST;
/* Read the command header */
EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_FALSE);
if (EFX_DWORD_FIELD(dword, MCDI_HEADER_RESPONSE) == 0) {
EFSYS_UNLOCK(enp->en_eslp, state);
return (B_FALSE);
}
/* Request complete */
emip->emi_pending_req = NULL;
seq = (emip->emi_seq - 1) & 0xf;
/* Check for synchronous reboot */
if (EFX_DWORD_FIELD(dword, MCDI_HEADER_ERROR) != 0 &&
EFX_DWORD_FIELD(dword, MCDI_HEADER_DATALEN) == 0) {
/* Consume status word */
EFSYS_SPIN(MCDI_STATUS_SLEEP_US);
efx_mcdi_poll_reboot(enp);
EFSYS_UNLOCK(enp->en_eslp, state);
rc = EIO;
goto fail2;
}
EFSYS_UNLOCK(enp->en_eslp, state);
/* Check that the returned data is consistent */
if (EFX_DWORD_FIELD(dword, MCDI_HEADER_CODE) != emrp->emr_cmd ||
EFX_DWORD_FIELD(dword, MCDI_HEADER_SEQ) != seq) {
/* Response is for a different request */
rc = EIO;
goto fail3;
}
length = EFX_DWORD_FIELD(dword, MCDI_HEADER_DATALEN);
if (EFX_DWORD_FIELD(dword, MCDI_HEADER_ERROR)) {
efx_dword_t errdword;
int errcode;
EFSYS_ASSERT3U(length, ==, 4);
EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
pdur + 1 + (MC_CMD_ERR_CODE_OFST >> 2),
&errdword, B_FALSE);
errcode = EFX_DWORD_FIELD(errdword, EFX_DWORD_0);
rc = efx_mcdi_request_errcode(errcode);
EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, int, errcode);
goto fail4;
} else {
emrp->emr_out_length_used = length;
emrp->emr_rc = 0;
efx_mcdi_request_copyout(enp, emrp);
}
goto out;
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
/* Fill out error state */
emrp->emr_rc = rc;
emrp->emr_out_length_used = 0;
/* Reboot/Assertion */
if (rc == EIO || rc == EINTR)
efx_mcdi_raise_exception(enp, emrp, rc);
out:
return (B_TRUE);
}
void
efx_mcdi_execute(
__in efx_nic_t *enp,
__in efx_mcdi_req_t *emrp)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
const efx_mcdi_transport_t *emtp = emip->emi_mtp;
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
emtp->emt_execute(emtp->emt_context, emrp);
}
void
efx_mcdi_ev_cpl(
__in efx_nic_t *enp,
__in unsigned int seq,
__in unsigned int outlen,
__in int errcode)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
const efx_mcdi_transport_t *emtp = emip->emi_mtp;
efx_mcdi_req_t *emrp;
int state;
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
/*
* Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
* when we're completing an aborted request.
*/
EFSYS_LOCK(enp->en_eslp, state);
if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
(seq != ((emip->emi_seq - 1) & 0xf))) {
EFSYS_ASSERT(emip->emi_aborted > 0);
if (emip->emi_aborted > 0)
--emip->emi_aborted;
EFSYS_UNLOCK(enp->en_eslp, state);
return;
}
emrp = emip->emi_pending_req;
emip->emi_pending_req = NULL;
EFSYS_UNLOCK(enp->en_eslp, state);
/*
* Fill out the remaining hdr fields, and copyout the payload
* if the user supplied an output buffer.
*/
if (errcode != 0) {
EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
int, errcode);
emrp->emr_out_length_used = 0;
emrp->emr_rc = efx_mcdi_request_errcode(errcode);
} else {
emrp->emr_out_length_used = outlen;
emrp->emr_rc = 0;
efx_mcdi_request_copyout(enp, emrp);
}
emtp->emt_ev_cpl(emtp->emt_context);
}
void
efx_mcdi_ev_death(
__in efx_nic_t *enp,
__in int rc)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
const efx_mcdi_transport_t *emtp = emip->emi_mtp;
efx_mcdi_req_t *emrp = NULL;
boolean_t ev_cpl;
int state;
/*
* The MCDI request (if there is one) has been terminated, either
* by a BADASSERT or REBOOT event.
*
* If there is an oustanding event-completed MCDI operation, then we
* will never receive the completion event (because both MCDI
* completions and BADASSERT events are sent to the same evq). So
* complete this MCDI op.
*
* This function might run in parallel with efx_mcdi_request_poll()
* for poll completed mcdi requests, and also with
* efx_mcdi_request_start() for post-watchdog completions.
*/
EFSYS_LOCK(enp->en_eslp, state);
emrp = emip->emi_pending_req;
ev_cpl = emip->emi_ev_cpl;
if (emrp != NULL && emip->emi_ev_cpl) {
emip->emi_pending_req = NULL;
emrp->emr_out_length_used = 0;
emrp->emr_rc = rc;
++emip->emi_aborted;
}
/* Since we're running in parallel with a request, consume the
* status word before dropping the lock.
*/
if (rc == EIO || rc == EINTR) {
EFSYS_SPIN(MCDI_STATUS_SLEEP_US);
(void) efx_mcdi_poll_reboot(enp);
}
EFSYS_UNLOCK(enp->en_eslp, state);
efx_mcdi_raise_exception(enp, emrp, rc);
if (emrp != NULL && ev_cpl)
emtp->emt_ev_cpl(emtp->emt_context);
}
__checkReturn int
efx_mcdi_version(
__in efx_nic_t *enp,
__out_ecount_opt(4) uint16_t versionp[4],
__out_opt uint32_t *buildp,
__out_opt efx_mcdi_boot_t *statusp)
{
uint8_t outbuf[MAX(MC_CMD_GET_VERSION_OUT_LEN,
MC_CMD_GET_BOOT_STATUS_OUT_LEN)];
efx_mcdi_req_t req;
efx_word_t *ver_words;
uint16_t version[4];
uint32_t build;
efx_mcdi_boot_t status;
int rc;
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
EFX_STATIC_ASSERT(MC_CMD_GET_VERSION_IN_LEN == 0);
req.emr_cmd = MC_CMD_GET_VERSION;
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
/* bootrom support */
if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
version[0] = version[1] = version[2] = version[3] = 0;
build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
goto version;
}
if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
version:
/* The bootrom doesn't understand BOOT_STATUS */
if (build == MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM) {
status = EFX_MCDI_BOOT_ROM;
goto out;
}
req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
EFX_STATIC_ASSERT(MC_CMD_GET_BOOT_STATUS_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail3;
}
if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
rc = EMSGSIZE;
goto fail4;
}
if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
status = EFX_MCDI_BOOT_PRIMARY;
else
status = EFX_MCDI_BOOT_SECONDARY;
out:
if (versionp != NULL)
memcpy(versionp, version, sizeof (version));
if (buildp != NULL)
*buildp = build;
if (statusp != NULL)
*statusp = status;
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_mcdi_init(
__in efx_nic_t *enp,
__in const efx_mcdi_transport_t *mtp)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
efx_oword_t oword;
unsigned int portnum;
int rc;
EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
enp->en_mod_flags |= EFX_MOD_MCDI;
if (enp->en_family == EFX_FAMILY_FALCON)
return (0);
emip->emi_mtp = mtp;
/* Determine the port number to use for MCDI */
EFX_BAR_READO(enp, FR_AZ_CS_DEBUG_REG, &oword);
portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
if (portnum == 0) {
/* Presumably booted from ROM; only MCDI port 1 will work */
emip->emi_port = 1;
} else if (portnum <= 2) {
emip->emi_port = portnum;
} else {
rc = EINVAL;
goto fail1;
}
/*
* Wipe the atomic reboot status so subsequent MCDI requests succeed.
* BOOT_STATUS is preserved so eno_nic_probe() can boot out of the
* assertion handler.
*/
(void) efx_mcdi_poll_reboot(enp);
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
enp->en_mod_flags &= ~EFX_MOD_MCDI;
return (rc);
}
__checkReturn int
efx_mcdi_reboot(
__in efx_nic_t *enp)
{
uint8_t payload[MC_CMD_REBOOT_IN_LEN];
efx_mcdi_req_t req;
int rc;
/*
* We could require the caller to have caused en_mod_flags=0 to
* call this function. This doesn't help the other port though,
* who's about to get the MC ripped out from underneath them.
* Since they have to cope with the subsequent fallout of MCDI
* failures, we should as well.
*/
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
req.emr_cmd = MC_CMD_REBOOT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS, 0);
efx_mcdi_execute(enp, &req);
/* Invert EIO */
if (req.emr_rc != EIO) {
rc = EIO;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn boolean_t
efx_mcdi_request_abort(
__in efx_nic_t *enp)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
efx_mcdi_req_t *emrp;
boolean_t aborted;
int state;
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
/*
* efx_mcdi_ev_* may have already completed this event, and be
* spinning/blocked on the upper layer lock. So it *is* legitimate
* to for emi_pending_req to be NULL. If there is a pending event
* completed request, then provide a "credit" to allow
* efx_mcdi_ev_cpl() to accept a single spurious completion.
*/
EFSYS_LOCK(enp->en_eslp, state);
emrp = emip->emi_pending_req;
aborted = (emrp != NULL);
if (aborted) {
emip->emi_pending_req = NULL;
/* Error the request */
emrp->emr_out_length_used = 0;
emrp->emr_rc = ETIMEDOUT;
/* Provide a credit for seqno/emr_pending_req mismatches */
if (emip->emi_ev_cpl)
++emip->emi_aborted;
/*
* The upper layer has called us, so we don't
* need to complete the request.
*/
}
EFSYS_UNLOCK(enp->en_eslp, state);
return (aborted);
}
void
efx_mcdi_fini(
__in efx_nic_t *enp)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
enp->en_mod_flags &= ~EFX_MOD_MCDI;
if (~(enp->en_features) & EFX_FEATURE_MCDI)
return;
emip->emi_mtp = NULL;
emip->emi_port = 0;
emip->emi_aborted = 0;
}
#endif /* EFSYS_OPT_MCDI */

View File

@ -0,0 +1,238 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_EFX_MCDI_H
#define _SYS_EFX_MCDI_H
#include "efx.h"
#include "efx_regs.h"
#include "efx_regs_mcdi.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Number of retries attempted for init code */
#define EFX_MCDI_REQ_RETRY_INIT 2
struct efx_mcdi_req_s {
/* Inputs: Command #, input buffer and length */
unsigned int emr_cmd;
uint8_t *emr_in_buf;
size_t emr_in_length;
/* Outputs: retcode, buffer, length, and length used*/
int emr_rc;
uint8_t *emr_out_buf;
size_t emr_out_length;
size_t emr_out_length_used;
};
typedef struct efx_mcdi_iface_s {
const efx_mcdi_transport_t *emi_mtp;
unsigned int emi_port;
unsigned int emi_seq;
efx_mcdi_req_t *emi_pending_req;
boolean_t emi_ev_cpl;
int emi_aborted;
uint32_t emi_poll_cnt;
} efx_mcdi_iface_t;
extern void
efx_mcdi_execute(
__in efx_nic_t *enp,
__in efx_mcdi_req_t *emrp);
extern void
efx_mcdi_ev_cpl(
__in efx_nic_t *enp,
__in unsigned int seq,
__in unsigned int outlen,
__in int errcode);
extern void
efx_mcdi_ev_death(
__in efx_nic_t *enp,
__in int rc);
typedef enum efx_mcdi_boot_e {
EFX_MCDI_BOOT_PRIMARY,
EFX_MCDI_BOOT_SECONDARY,
EFX_MCDI_BOOT_ROM,
} efx_mcdi_boot_t;
extern __checkReturn int
efx_mcdi_version(
__in efx_nic_t *enp,
__out_ecount_opt(4) uint16_t versionp[4],
__out_opt uint32_t *buildp,
__out_opt efx_mcdi_boot_t *statusp);
#define MCDI_IN(_emr, _type, _ofst) \
((_type *)((_emr).emr_in_buf + (_ofst)))
#define MCDI_IN2(_emr, _type, _ofst) \
MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
#define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \
EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \
EFX_BYTE_0, _value)
#define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \
EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
EFX_DWORD_0, _value)
#define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \
EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1)
#define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \
_field2, _value2) \
EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1, \
MC_CMD_ ## _field2, _value2)
#define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \
_field2, _value2, _field3, _value3) \
EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1, \
MC_CMD_ ## _field2, _value2, \
MC_CMD_ ## _field3, _value3)
#define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \
_field2, _value2, _field3, _value3, _field4, _value4) \
EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1, \
MC_CMD_ ## _field2, _value2, \
MC_CMD_ ## _field3, _value3, \
MC_CMD_ ## _field4, _value4)
#define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \
_field2, _value2, _field3, _value3, _field4, _value4, \
_field5, _value5) \
EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1, \
MC_CMD_ ## _field2, _value2, \
MC_CMD_ ## _field3, _value3, \
MC_CMD_ ## _field4, _value4, \
MC_CMD_ ## _field5, _value5)
#define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \
_field2, _value2, _field3, _value3, _field4, _value4, \
_field5, _value5, _field6, _value6) \
EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1, \
MC_CMD_ ## _field2, _value2, \
MC_CMD_ ## _field3, _value3, \
MC_CMD_ ## _field4, _value4, \
MC_CMD_ ## _field5, _value5, \
MC_CMD_ ## _field6, _value6)
#define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \
_field2, _value2, _field3, _value3, _field4, _value4, \
_field5, _value5, _field6, _value6, _field7, _value7) \
EFX_POPULATE_DWORD_7(MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1, \
MC_CMD_ ## _field2, _value2, \
MC_CMD_ ## _field3, _value3, \
MC_CMD_ ## _field4, _value4, \
MC_CMD_ ## _field5, _value5, \
MC_CMD_ ## _field6, _value6, \
MC_CMD_ ## _field7, _value7)
#define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \
_field2, _value2, _field3, _value3, _field4, _value4, \
_field5, _value5, _field6, _value6, _field7, _value7, \
_field8, _value8) \
EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1, \
MC_CMD_ ## _field2, _value2, \
MC_CMD_ ## _field3, _value3, \
MC_CMD_ ## _field4, _value4, \
MC_CMD_ ## _field5, _value5, \
MC_CMD_ ## _field6, _value6, \
MC_CMD_ ## _field7, _value7, \
MC_CMD_ ## _field8, _value8)
#define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \
_field2, _value2, _field3, _value3, _field4, _value4, \
_field5, _value5, _field6, _value6, _field7, _value7, \
_field8, _value8, _field9, _value9) \
EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1, \
MC_CMD_ ## _field2, _value2, \
MC_CMD_ ## _field3, _value3, \
MC_CMD_ ## _field4, _value4, \
MC_CMD_ ## _field5, _value5, \
MC_CMD_ ## _field6, _value6, \
MC_CMD_ ## _field7, _value7, \
MC_CMD_ ## _field8, _value8, \
MC_CMD_ ## _field9, _value9)
#define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \
_field2, _value2, _field3, _value3, _field4, _value4, \
_field5, _value5, _field6, _value6, _field7, _value7, \
_field8, _value8, _field9, _value9, _field10, _value10) \
EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field1, _value1, \
MC_CMD_ ## _field2, _value2, \
MC_CMD_ ## _field3, _value3, \
MC_CMD_ ## _field4, _value4, \
MC_CMD_ ## _field5, _value5, \
MC_CMD_ ## _field6, _value6, \
MC_CMD_ ## _field7, _value7, \
MC_CMD_ ## _field8, _value8, \
MC_CMD_ ## _field9, _value9, \
MC_CMD_ ## _field10, _value10)
#define MCDI_OUT(_emr, _type, _ofst) \
((_type *)((_emr).emr_out_buf + (_ofst)))
#define MCDI_OUT2(_emr, _type, _ofst) \
MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
#define MCDI_OUT_BYTE(_emr, _ofst) \
EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \
EFX_BYTE_0)
#define MCDI_OUT_WORD(_emr, _ofst) \
EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
EFX_WORD_0)
#define MCDI_OUT_DWORD(_emr, _ofst) \
EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
EFX_DWORD_0)
#define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \
EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
MC_CMD_ ## _field)
#define MCDI_EV_FIELD(_eqp, _field) \
EFX_QWORD_FIELD(*eqp, MCDI_EVENT_ ## _field)
#ifdef __cplusplus
}
#endif
#endif /* _SYS_EFX_MCDI_H */

View File

@ -0,0 +1,269 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
#if EFSYS_OPT_MON_NULL
#include "nullmon.h"
#endif
#if EFSYS_OPT_MON_LM87
#include "lm87.h"
#endif
#if EFSYS_OPT_MON_MAX6647
#include "max6647.h"
#endif
#if EFSYS_OPT_NAMES
static const char __cs * __cs __efx_mon_name[] = {
"",
"nullmon",
"lm87",
"max6647",
"sfx90x0"
};
const char __cs *
efx_mon_name(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES);
return (__efx_mon_name[encp->enc_mon_type]);
}
#endif /* EFSYS_OPT_NAMES */
#if EFSYS_OPT_MON_NULL
static efx_mon_ops_t __cs __efx_mon_null_ops = {
nullmon_reset, /* emo_reset */
nullmon_reconfigure, /* emo_reconfigure */
#if EFSYS_OPT_MON_STATS
nullmon_stats_update /* emo_stat_update */
#endif /* EFSYS_OPT_MON_STATS */
};
#endif
#if EFSYS_OPT_MON_LM87
static efx_mon_ops_t __cs __efx_mon_lm87_ops = {
lm87_reset, /* emo_reset */
lm87_reconfigure, /* emo_reconfigure */
#if EFSYS_OPT_MON_STATS
lm87_stats_update /* emo_stat_update */
#endif /* EFSYS_OPT_MON_STATS */
};
#endif
#if EFSYS_OPT_MON_MAX6647
static efx_mon_ops_t __cs __efx_mon_max6647_ops = {
max6647_reset, /* emo_reset */
max6647_reconfigure, /* emo_reconfigure */
#if EFSYS_OPT_MON_STATS
max6647_stats_update /* emo_stat_update */
#endif /* EFSYS_OPT_MON_STATS */
};
#endif
#if EFSYS_OPT_MON_SIENA
static efx_mon_ops_t __cs __efx_mon_siena_ops = {
siena_mon_reset, /* emo_reset */
siena_mon_reconfigure, /* emo_reconfigure */
#if EFSYS_OPT_MON_STATS
siena_mon_stats_update /* emo_stat_update */
#endif /* EFSYS_OPT_MON_STATS */
};
#endif
static efx_mon_ops_t __cs * __cs __efx_mon_ops[] = {
NULL,
#if EFSYS_OPT_MON_NULL
&__efx_mon_null_ops,
#else
NULL,
#endif
#if EFSYS_OPT_MON_LM87
&__efx_mon_lm87_ops,
#else
NULL,
#endif
#if EFSYS_OPT_MON_MAX6647
&__efx_mon_max6647_ops,
#else
NULL,
#endif
#if EFSYS_OPT_MON_SIENA
&__efx_mon_siena_ops
#else
NULL
#endif
};
__checkReturn int
efx_mon_init(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mon_t *emp = &(enp->en_mon);
efx_mon_ops_t *emop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
if (enp->en_mod_flags & EFX_MOD_MON) {
rc = EINVAL;
goto fail1;
}
enp->en_mod_flags |= EFX_MOD_MON;
emp->em_type = encp->enc_mon_type;
EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
EFSYS_ASSERT3U(emp->em_type, <, EFX_MON_NTYPES);
if ((emop = (efx_mon_ops_t *)__efx_mon_ops[emp->em_type]) == NULL) {
rc = ENOTSUP;
goto fail2;
}
if ((rc = emop->emo_reset(enp)) != 0)
goto fail3;
if ((rc = emop->emo_reconfigure(enp)) != 0)
goto fail4;
emp->em_emop = emop;
return (0);
fail4:
EFSYS_PROBE(fail5);
(void) emop->emo_reset(enp);
fail3:
EFSYS_PROBE(fail4);
fail2:
EFSYS_PROBE(fail3);
emp->em_type = EFX_MON_INVALID;
enp->en_mod_flags &= ~EFX_MOD_MON;
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_MON_STATS
#if EFSYS_OPT_NAMES
/* START MKCONFIG GENERATED MonitorStatNamesBlock 08518fd1fb4e2612 */
static const char __cs * __cs __mon_stat_name[] = {
"value_2_5v",
"value_vccp1",
"value_vcc",
"value_5v",
"value_12v",
"value_vccp2",
"value_ext_temp",
"value_int_temp",
"value_ain1",
"value_ain2",
"controller_cooling",
"ext_cooling",
"1v",
"1_2v",
"1_8v",
"3_3v",
};
/* END MKCONFIG GENERATED MonitorStatNamesBlock */
extern const char __cs *
efx_mon_stat_name(
__in efx_nic_t *enp,
__in efx_mon_stat_t id)
{
_NOTE(ARGUNUSED(enp))
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS);
return (__mon_stat_name[id]);
}
#endif /* EFSYS_OPT_NAMES */
__checkReturn int
efx_mon_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__out_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
{
efx_mon_t *emp = &(enp->en_mon);
efx_mon_ops_t *emop = emp->em_emop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
return (emop->emo_stats_update(enp, esmp, values));
}
#endif /* EFSYS_OPT_MON_STATS */
void
efx_mon_fini(
__in efx_nic_t *enp)
{
efx_mon_t *emp = &(enp->en_mon);
efx_mon_ops_t *emop = emp->em_emop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
emp->em_emop = NULL;
rc = emop->emo_reset(enp);
if (rc != 0)
EFSYS_PROBE1(fail1, int, rc);
emp->em_type = EFX_MON_INVALID;
enp->en_mod_flags &= ~EFX_MOD_MON;
}

View File

@ -0,0 +1,674 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
__checkReturn int
efx_family(
__in uint16_t venid,
__in uint16_t devid,
__out efx_family_t *efp)
{
#if EFSYS_OPT_FALCON
if (venid == EFX_PCI_VENID_SFC && devid == EFX_PCI_DEVID_FALCON) {
*efp = EFX_FAMILY_FALCON;
return (0);
}
#endif
#if EFSYS_OPT_SIENA
if (venid == EFX_PCI_VENID_SFC && devid == EFX_PCI_DEVID_BETHPAGE) {
*efp = EFX_FAMILY_SIENA;
return (0);
}
if (venid == EFX_PCI_VENID_SFC && devid == EFX_PCI_DEVID_SIENA) {
*efp = EFX_FAMILY_SIENA;
return (0);
}
if (venid == EFX_PCI_VENID_SFC &&
devid == EFX_PCI_DEVID_SIENA_F1_UNINIT) {
*efp = EFX_FAMILY_SIENA;
return (0);
}
#endif
return (ENOTSUP);
}
/*
* To support clients which aren't provided with any PCI context infer
* the hardware family by inspecting the hardware. Obviously the caller
* must be damn sure they're really talking to a supported device.
*/
__checkReturn int
efx_infer_family(
__in efsys_bar_t *esbp,
__out efx_family_t *efp)
{
efx_family_t family;
efx_oword_t oword;
unsigned int portnum;
int rc;
EFSYS_BAR_READO(esbp, FR_AZ_CS_DEBUG_REG_OFST, &oword, B_TRUE);
portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
switch (portnum) {
#if EFSYS_OPT_FALCON
case 0:
family = EFX_FAMILY_FALCON;
break;
#endif
#if EFSYS_OPT_SIENA
case 1:
case 2:
family = EFX_FAMILY_SIENA;
break;
#endif
default:
rc = ENOTSUP;
goto fail1;
}
if (efp != NULL)
*efp = family;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
/*
* The built-in default value device id for port 1 of Siena is 0x0810.
* manftest needs to be able to cope with that.
*/
#define EFX_BIU_MAGIC0 0x01234567
#define EFX_BIU_MAGIC1 0xfedcba98
static __checkReturn int
efx_nic_biu_test(
__in efx_nic_t *enp)
{
efx_oword_t oword;
int rc;
/*
* Write magic values to scratch registers 0 and 1, then
* verify that the values were written correctly. Interleave
* the accesses to ensure that the BIU is not just reading
* back the cached value that was last written.
*/
EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword);
EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword);
EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword);
if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
rc = EIO;
goto fail1;
}
EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword);
if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
rc = EIO;
goto fail2;
}
/*
* Perform the same test, with the values swapped. This
* ensures that subsequent tests don't start with the correct
* values already written into the scratch registers.
*/
EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword);
EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword);
EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword);
if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
rc = EIO;
goto fail3;
}
EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword);
if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
rc = EIO;
goto fail4;
}
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_FALCON
static efx_nic_ops_t __cs __efx_nic_falcon_ops = {
falcon_nic_probe, /* eno_probe */
falcon_nic_reset, /* eno_reset */
falcon_nic_init, /* eno_init */
#if EFSYS_OPT_DIAG
falcon_sram_test, /* eno_sram_test */
falcon_nic_register_test, /* eno_register_test */
#endif /* EFSYS_OPT_DIAG */
falcon_nic_fini, /* eno_fini */
falcon_nic_unprobe, /* eno_unprobe */
};
#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
static efx_nic_ops_t __cs __efx_nic_siena_ops = {
siena_nic_probe, /* eno_probe */
siena_nic_reset, /* eno_reset */
siena_nic_init, /* eno_init */
#if EFSYS_OPT_DIAG
siena_sram_test, /* eno_sram_test */
siena_nic_register_test, /* eno_register_test */
#endif /* EFSYS_OPT_DIAG */
siena_nic_fini, /* eno_fini */
siena_nic_unprobe, /* eno_unprobe */
};
#endif /* EFSYS_OPT_SIENA */
__checkReturn int
efx_nic_create(
__in efx_family_t family,
__in efsys_identifier_t *esip,
__in efsys_bar_t *esbp,
__in efsys_lock_t *eslp,
__deref_out efx_nic_t **enpp)
{
efx_nic_t *enp;
int rc;
EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID);
EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES);
/* Allocate a NIC object */
EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp);
if (enp == NULL) {
rc = ENOMEM;
goto fail1;
}
enp->en_magic = EFX_NIC_MAGIC;
switch (family) {
#if EFSYS_OPT_FALCON
case EFX_FAMILY_FALCON:
enp->en_enop = (efx_nic_ops_t *)&__efx_nic_falcon_ops;
enp->en_features = 0;
break;
#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
enp->en_enop = (efx_nic_ops_t *)&__efx_nic_siena_ops;
enp->en_features = EFX_FEATURE_IPV6 |
EFX_FEATURE_LFSR_HASH_INSERT |
EFX_FEATURE_LINK_EVENTS | EFX_FEATURE_PERIODIC_MAC_STATS |
EFX_FEATURE_WOL | EFX_FEATURE_MCDI |
EFX_FEATURE_LOOKAHEAD_SPLIT | EFX_FEATURE_MAC_HEADER_FILTERS;
break;
#endif /* EFSYS_OPT_SIENA */
default:
rc = ENOTSUP;
goto fail2;
}
enp->en_family = family;
enp->en_esip = esip;
enp->en_esbp = esbp;
enp->en_eslp = eslp;
*enpp = enp;
return (0);
fail2:
EFSYS_PROBE(fail3);
enp->en_magic = 0;
/* Free the NIC object */
EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_nic_probe(
__in efx_nic_t *enp)
{
efx_nic_ops_t *enop;
efx_oword_t oword;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
#if EFSYS_OPT_MCDI
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
#endif /* EFSYS_OPT_MCDI */
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
/* Test BIU */
if ((rc = efx_nic_biu_test(enp)) != 0)
goto fail1;
/* Clear the region register */
EFX_POPULATE_OWORD_4(oword,
FRF_AZ_ADR_REGION0, 0,
FRF_AZ_ADR_REGION1, (1 << 16),
FRF_AZ_ADR_REGION2, (2 << 16),
FRF_AZ_ADR_REGION3, (3 << 16));
EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword);
enop = enp->en_enop;
if ((rc = enop->eno_probe(enp)) != 0)
goto fail2;
if ((rc = efx_phy_probe(enp)) != 0)
goto fail3;
enp->en_mod_flags |= EFX_MOD_PROBE;
return (0);
fail3:
EFSYS_PROBE(fail3);
enop->eno_unprobe(enp);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_PCIE_TUNE
__checkReturn int
efx_nic_pcie_tune(
__in efx_nic_t *enp,
unsigned int nlanes)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
#if EFSYS_OPT_FALCON
if (enp->en_family == EFX_FAMILY_FALCON)
return (falcon_nic_pcie_tune(enp, nlanes));
#endif
return (ENOTSUP);
}
__checkReturn int
efx_nic_pcie_extended_sync(
__in efx_nic_t *enp)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
#if EFSYS_OPT_SIENA
if (enp->en_family == EFX_FAMILY_SIENA)
return (siena_nic_pcie_extended_sync(enp));
#endif
return (ENOTSUP);
}
#endif /* EFSYS_OPT_PCIE_TUNE */
__checkReturn int
efx_nic_init(
__in efx_nic_t *enp)
{
efx_nic_ops_t *enop = enp->en_enop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
if (enp->en_mod_flags & EFX_MOD_NIC) {
rc = EINVAL;
goto fail1;
}
if ((rc = enop->eno_init(enp)) != 0)
goto fail2;
enp->en_mod_flags |= EFX_MOD_NIC;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_nic_fini(
__in efx_nic_t *enp)
{
efx_nic_ops_t *enop = enp->en_enop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
enop->eno_fini(enp);
enp->en_mod_flags &= ~EFX_MOD_NIC;
}
void
efx_nic_unprobe(
__in efx_nic_t *enp)
{
efx_nic_ops_t *enop = enp->en_enop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
#if EFSYS_OPT_MCDI
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
#endif /* EFSYS_OPT_MCDI */
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
efx_phy_unprobe(enp);
enop->eno_unprobe(enp);
enp->en_mod_flags &= ~EFX_MOD_PROBE;
}
void
efx_nic_destroy(
__in efx_nic_t *enp)
{
efsys_identifier_t *esip = enp->en_esip;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
enp->en_family = 0;
enp->en_esip = NULL;
enp->en_esbp = NULL;
enp->en_eslp = NULL;
enp->en_enop = NULL;
enp->en_magic = 0;
/* Free the NIC object */
EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
}
__checkReturn int
efx_nic_reset(
__in efx_nic_t *enp)
{
efx_nic_ops_t *enop = enp->en_enop;
unsigned int mod_flags;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
/*
* All modules except the MCDI, PROBE, NVRAM, VPD, MON (which we
* do not reset here) must have been shut down or never initialized.
*
* A rule of thumb here is: If the controller or MC reboots, is *any*
* state lost. If it's lost and needs reapplying, then the module
* *must* not be initialised during the reset.
*/
mod_flags = enp->en_mod_flags;
mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
EFX_MOD_VPD | EFX_MOD_MON);
EFSYS_ASSERT3U(mod_flags, ==, 0);
if (mod_flags != 0) {
rc = EINVAL;
goto fail1;
}
if ((rc = enop->eno_reset(enp)) != 0)
goto fail2;
enp->en_reset_flags |= EFX_RESET_MAC;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
const efx_nic_cfg_t *
efx_nic_cfg_get(
__in efx_nic_t *enp)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
return (&(enp->en_nic_cfg));
}
#if EFSYS_OPT_DIAG
__checkReturn int
efx_nic_register_test(
__in efx_nic_t *enp)
{
efx_nic_ops_t *enop = enp->en_enop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
if ((rc = enop->eno_register_test(enp)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_nic_test_registers(
__in efx_nic_t *enp,
__in efx_register_set_t *rsp,
__in size_t count)
{
unsigned int bit;
efx_oword_t original;
efx_oword_t reg;
efx_oword_t buf;
int rc;
while (count > 0) {
/* This function is only suitable for registers */
EFSYS_ASSERT(rsp->rows == 1);
/* bit sweep on and off */
EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original,
B_TRUE);
for (bit = 0; bit < 128; bit++) {
/* Is this bit in the mask? */
if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit))
continue;
/* Test this bit can be set in isolation */
reg = original;
EFX_AND_OWORD(reg, rsp->mask);
EFX_SET_OWORD_BIT(reg, bit);
EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
B_TRUE);
EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
B_TRUE);
EFX_AND_OWORD(buf, rsp->mask);
if (memcmp(&reg, &buf, sizeof (reg))) {
rc = EIO;
goto fail1;
}
/* Test this bit can be cleared in isolation */
EFX_OR_OWORD(reg, rsp->mask);
EFX_CLEAR_OWORD_BIT(reg, bit);
EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
B_TRUE);
EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
B_TRUE);
EFX_AND_OWORD(buf, rsp->mask);
if (memcmp(&reg, &buf, sizeof (reg))) {
rc = EIO;
goto fail2;
}
}
/* Restore the old value */
EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original,
B_TRUE);
--count;
++rsp;
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
/* Restore the old value */
EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE);
return (rc);
}
__checkReturn int
efx_nic_test_tables(
__in efx_nic_t *enp,
__in efx_register_set_t *rsp,
__in efx_pattern_type_t pattern,
__in size_t count)
{
efx_sram_pattern_fn_t func;
unsigned int index;
unsigned int address;
efx_oword_t reg;
efx_oword_t buf;
int rc;
EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES);
func = __efx_sram_pattern_fns[pattern];
while (count > 0) {
/* Write */
address = rsp->address;
for (index = 0; index < rsp->rows; ++index) {
func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
EFX_AND_OWORD(reg, rsp->mask);
EFSYS_BAR_WRITEO(enp->en_esbp, address, &reg, B_TRUE);
address += rsp->step;
}
/* Read */
address = rsp->address;
for (index = 0; index < rsp->rows; ++index) {
func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
EFX_AND_OWORD(reg, rsp->mask);
EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE);
if (memcmp(&reg, &buf, sizeof (reg))) {
rc = EIO;
goto fail1;
}
address += rsp->step;
}
++rsp;
--count;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_DIAG */

View File

@ -0,0 +1,372 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
#if EFSYS_OPT_NVRAM
#if EFSYS_OPT_FALCON
static efx_nvram_ops_t __cs __efx_nvram_falcon_ops = {
#if EFSYS_OPT_DIAG
falcon_nvram_test, /* envo_test */
#endif /* EFSYS_OPT_DIAG */
falcon_nvram_size, /* envo_size */
falcon_nvram_get_version, /* envo_get_version */
falcon_nvram_rw_start, /* envo_rw_start */
falcon_nvram_read_chunk, /* envo_read_chunk */
falcon_nvram_erase, /* envo_erase */
falcon_nvram_write_chunk, /* envo_write_chunk */
falcon_nvram_rw_finish, /* envo_rw_finish */
falcon_nvram_set_version, /* envo_set_version */
};
#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
static efx_nvram_ops_t __cs __efx_nvram_siena_ops = {
#if EFSYS_OPT_DIAG
siena_nvram_test, /* envo_test */
#endif /* EFSYS_OPT_DIAG */
siena_nvram_size, /* envo_size */
siena_nvram_get_version, /* envo_get_version */
siena_nvram_rw_start, /* envo_rw_start */
siena_nvram_read_chunk, /* envo_read_chunk */
siena_nvram_erase, /* envo_erase */
siena_nvram_write_chunk, /* envo_write_chunk */
siena_nvram_rw_finish, /* envo_rw_finish */
siena_nvram_set_version, /* envo_set_version */
};
#endif /* EFSYS_OPT_SIENA */
__checkReturn int
efx_nvram_init(
__in efx_nic_t *enp)
{
efx_nvram_ops_t *envop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM));
switch (enp->en_family) {
#if EFSYS_OPT_FALCON
case EFX_FAMILY_FALCON:
envop = (efx_nvram_ops_t *)&__efx_nvram_falcon_ops;
break;
#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
envop = (efx_nvram_ops_t *)&__efx_nvram_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
goto fail1;
}
enp->en_envop = envop;
enp->en_mod_flags |= EFX_MOD_NVRAM;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_DIAG
__checkReturn int
efx_nvram_test(
__in efx_nic_t *enp)
{
efx_nvram_ops_t *envop = enp->en_envop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
if ((rc = envop->envo_test(enp)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_DIAG */
__checkReturn int
efx_nvram_size(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out size_t *sizep)
{
efx_nvram_ops_t *envop = enp->en_envop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
if ((rc = envop->envo_size(enp, type, sizep)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_nvram_get_version(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out uint32_t *subtypep,
__out_ecount(4) uint16_t version[4])
{
efx_nvram_ops_t *envop = enp->en_envop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
if ((rc = envop->envo_get_version(enp, type, subtypep, version)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_nvram_rw_start(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out_opt size_t *chunk_sizep)
{
efx_nvram_ops_t *envop = enp->en_envop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
if ((rc = envop->envo_rw_start(enp, type, chunk_sizep)) != 0)
goto fail1;
enp->en_nvram_locked = type;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_nvram_read_chunk(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size)
{
efx_nvram_ops_t *envop = enp->en_envop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
if ((rc = envop->envo_read_chunk(enp, type, offset, data, size)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_nvram_erase(
__in efx_nic_t *enp,
__in efx_nvram_type_t type)
{
efx_nvram_ops_t *envop = enp->en_envop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
if ((rc = envop->envo_erase(enp, type)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_nvram_write_chunk(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__in unsigned int offset,
__in_bcount(size) caddr_t data,
__in size_t size)
{
efx_nvram_ops_t *envop = enp->en_envop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
if ((rc = envop->envo_write_chunk(enp, type, offset, data, size)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_nvram_rw_finish(
__in efx_nic_t *enp,
__in efx_nvram_type_t type)
{
efx_nvram_ops_t *envop = enp->en_envop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
envop->envo_rw_finish(enp, type);
enp->en_nvram_locked = EFX_NVRAM_INVALID;
}
__checkReturn int
efx_nvram_set_version(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out uint16_t version[4])
{
efx_nvram_ops_t *envop = enp->en_envop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
/*
* The Siena implementation of envo_set_version() will attempt to
* acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG sector.
* Therefore, you can't have already acquired the NVRAM_UPDATE lock.
*/
EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
if ((rc = envop->envo_set_version(enp, type, version)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_nvram_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
enp->en_envop = NULL;
enp->en_mod_flags &= ~EFX_MOD_NVRAM;
}
#endif /* EFSYS_OPT_NVRAM */

View File

@ -0,0 +1,752 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
#if EFSYS_OPT_FALCON
#include "falcon_nvram.h"
#endif
#if EFSYS_OPT_MAC_FALCON_XMAC
#include "falcon_xmac.h"
#endif
#if EFSYS_OPT_MAC_FALCON_GMAC
#include "falcon_gmac.h"
#endif
#if EFSYS_OPT_PHY_NULL
#include "nullphy.h"
#endif
#if EFSYS_OPT_PHY_QT2022C2
#include "qt2022c2.h"
#endif
#if EFSYS_OPT_PHY_SFX7101
#include "sfx7101.h"
#endif
#if EFSYS_OPT_PHY_TXC43128
#include "txc43128.h"
#endif
#if EFSYS_OPT_PHY_SFT9001
#include "sft9001.h"
#endif
#if EFSYS_OPT_PHY_QT2025C
#include "qt2025c.h"
#endif
#if EFSYS_OPT_PHY_NULL
static efx_phy_ops_t __cs __efx_phy_null_ops = {
NULL, /* epo_power */
nullphy_reset, /* epo_reset */
nullphy_reconfigure, /* epo_reconfigure */
nullphy_verify, /* epo_verify */
NULL, /* epo_uplink_check */
nullphy_downlink_check, /* epo_downlink_check */
nullphy_oui_get, /* epo_oui_get */
#if EFSYS_OPT_PHY_STATS
nullphy_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
nullphy_prop_name, /* epo_prop_name */
#endif
nullphy_prop_get, /* epo_prop_get */
nullphy_prop_set, /* epo_prop_set */
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
NULL, /* epo_bist_start */
NULL, /* epo_bist_poll */
NULL, /* epo_bist_stop */
#endif /* EFSYS_OPT_PHY_BIST */
};
#endif /* EFSYS_OPT_PHY_NULL */
#if EFSYS_OPT_PHY_QT2022C2
static efx_phy_ops_t __cs __efx_phy_qt2022c2_ops = {
NULL, /* epo_power */
qt2022c2_reset, /* epo_reset */
qt2022c2_reconfigure, /* epo_reconfigure */
qt2022c2_verify, /* epo_verify */
qt2022c2_uplink_check, /* epo_uplink_check */
qt2022c2_downlink_check, /* epo_downlink_check */
qt2022c2_oui_get, /* epo_oui_get */
#if EFSYS_OPT_PHY_STATS
qt2022c2_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
qt2022c2_prop_name, /* epo_prop_name */
#endif
qt2022c2_prop_get, /* epo_prop_get */
qt2022c2_prop_set, /* epo_prop_set */
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
NULL, /* epo_bist_start */
NULL, /* epo_bist_poll */
NULL, /* epo_bist_stop */
#endif /* EFSYS_OPT_PHY_BIST */
};
#endif /* EFSYS_OPT_PHY_QT2022C2 */
#if EFSYS_OPT_PHY_SFX7101
static efx_phy_ops_t __cs __efx_phy_sfx7101_ops = {
sfx7101_power, /* epo_power */
sfx7101_reset, /* epo_reset */
sfx7101_reconfigure, /* epo_reconfigure */
sfx7101_verify, /* epo_verify */
sfx7101_uplink_check, /* epo_uplink_check */
sfx7101_downlink_check, /* epo_downlink_check */
sfx7101_oui_get, /* epo_oui_get */
#if EFSYS_OPT_PHY_STATS
sfx7101_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
sfx7101_prop_name, /* epo_prop_name */
#endif
sfx7101_prop_get, /* epo_prop_get */
sfx7101_prop_set, /* epo_prop_set */
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
NULL, /* epo_bist_start */
NULL, /* epo_bist_poll */
NULL, /* epo_bist_stop */
#endif /* EFSYS_OPT_PHY_BIST */
};
#endif /* EFSYS_OPT_PHY_SFX7101 */
#if EFSYS_OPT_PHY_TXC43128
static efx_phy_ops_t __cs __efx_phy_txc43128_ops = {
NULL, /* epo_power */
txc43128_reset, /* epo_reset */
txc43128_reconfigure, /* epo_reconfigure */
txc43128_verify, /* epo_verify */
txc43128_uplink_check, /* epo_uplink_check */
txc43128_downlink_check, /* epo_downlink_check */
txc43128_oui_get, /* epo_oui_get */
#if EFSYS_OPT_PHY_STATS
txc43128_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
txc43128_prop_name, /* epo_prop_name */
#endif
txc43128_prop_get, /* epo_prop_get */
txc43128_prop_set, /* epo_prop_set */
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
NULL, /* epo_bist_start */
NULL, /* epo_bist_poll */
NULL, /* epo_bist_stop */
#endif /* EFSYS_OPT_PHY_BIST */
};
#endif /* EFSYS_OPT_PHY_TXC43128 */
#if EFSYS_OPT_PHY_SFT9001
static efx_phy_ops_t __cs __efx_phy_sft9001_ops = {
NULL, /* epo_power */
sft9001_reset, /* epo_reset */
sft9001_reconfigure, /* epo_reconfigure */
sft9001_verify, /* epo_verify */
sft9001_uplink_check, /* epo_uplink_check */
sft9001_downlink_check, /* epo_downlink_check */
sft9001_oui_get, /* epo_oui_get */
#if EFSYS_OPT_PHY_STATS
sft9001_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
sft9001_prop_name, /* epo_prop_name */
#endif
sft9001_prop_get, /* epo_prop_get */
sft9001_prop_set, /* epo_prop_set */
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
sft9001_bist_start, /* epo_bist_start */
sft9001_bist_poll, /* epo_bist_poll */
sft9001_bist_stop, /* epo_bist_stop */
#endif /* EFSYS_OPT_PHY_BIST */
};
#endif /* EFSYS_OPT_PHY_SFT9001 */
#if EFSYS_OPT_PHY_QT2025C
static efx_phy_ops_t __cs __efx_phy_qt2025c_ops = {
NULL, /* epo_power */
qt2025c_reset, /* epo_reset */
qt2025c_reconfigure, /* epo_reconfigure */
qt2025c_verify, /* epo_verify */
qt2025c_uplink_check, /* epo_uplink_check */
qt2025c_downlink_check, /* epo_downlink_check */
qt2025c_oui_get, /* epo_oui_get */
#if EFSYS_OPT_PHY_STATS
qt2025c_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
qt2025c_prop_name, /* epo_prop_name */
#endif
qt2025c_prop_get, /* epo_prop_get */
qt2025c_prop_set, /* epo_prop_set */
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
NULL, /* epo_bist_start */
NULL, /* epo_bist_poll */
NULL, /* epo_bist_stop */
#endif /* EFSYS_OPT_PHY_BIST */
};
#endif /* EFSYS_OPT_PHY_QT2025C */
#if EFSYS_OPT_SIENA
static efx_phy_ops_t __cs __efx_phy_siena_ops = {
siena_phy_power, /* epo_power */
NULL, /* epo_reset */
siena_phy_reconfigure, /* epo_reconfigure */
siena_phy_verify, /* epo_verify */
NULL, /* epo_uplink_check */
NULL, /* epo_downlink_check */
siena_phy_oui_get, /* epo_oui_get */
#if EFSYS_OPT_PHY_STATS
siena_phy_stats_update, /* epo_stats_update */
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
siena_phy_prop_name, /* epo_prop_name */
#endif
siena_phy_prop_get, /* epo_prop_get */
siena_phy_prop_set, /* epo_prop_set */
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
siena_phy_bist_start, /* epo_bist_start */
siena_phy_bist_poll, /* epo_bist_poll */
siena_phy_bist_stop, /* epo_bist_stop */
#endif /* EFSYS_OPT_PHY_BIST */
};
#endif /* EFSYS_OPT_SIENA */
__checkReturn int
efx_phy_probe(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_phy_ops_t *epop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
epp->ep_port = encp->enc_port;
epp->ep_phy_type = encp->enc_phy_type;
/* Hook in operations structure */
switch (enp->en_family) {
#if EFSYS_OPT_FALCON
case EFX_FAMILY_FALCON:
switch (epp->ep_phy_type) {
#if EFSYS_OPT_PHY_NULL
case PHY_TYPE_NONE_DECODE:
epop = (efx_phy_ops_t *)&__efx_phy_null_ops;
break;
#endif
#if EFSYS_OPT_PHY_QT2022C2
case PHY_TYPE_QT2022C2_DECODE:
epop = (efx_phy_ops_t *)&__efx_phy_qt2022c2_ops;
break;
#endif
#if EFSYS_OPT_PHY_SFX7101
case PHY_TYPE_SFX7101_DECODE:
epop = (efx_phy_ops_t *)&__efx_phy_sfx7101_ops;
break;
#endif
#if EFSYS_OPT_PHY_TXC43128
case PHY_TYPE_TXC43128_DECODE:
epop = (efx_phy_ops_t *)&__efx_phy_txc43128_ops;
break;
#endif
#if EFSYS_OPT_PHY_SFT9001
case PHY_TYPE_SFT9001A_DECODE:
case PHY_TYPE_SFT9001B_DECODE:
epop = (efx_phy_ops_t *)&__efx_phy_sft9001_ops;
break;
#endif
#if EFSYS_OPT_PHY_QT2025C
case EFX_PHY_QT2025C:
epop = (efx_phy_ops_t *)&__efx_phy_qt2025c_ops;
break;
#endif
default:
rc = ENOTSUP;
goto fail1;
}
break;
#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
epop = (efx_phy_ops_t *)&__efx_phy_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
default:
rc = ENOTSUP;
goto fail1;
}
epp->ep_epop = epop;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
epp->ep_port = 0;
epp->ep_phy_type = 0;
return (rc);
}
__checkReturn int
efx_phy_verify(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
return (epop->epo_verify(enp));
}
#if EFSYS_OPT_PHY_LED_CONTROL
__checkReturn int
efx_phy_led_set(
__in efx_nic_t *enp,
__in efx_phy_led_mode_t mode)
{
efx_nic_cfg_t *encp = (&enp->en_nic_cfg);
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
uint32_t mask;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
if (epp->ep_phy_led_mode == mode)
goto done;
mask = (1 << EFX_PHY_LED_DEFAULT);
mask |= encp->enc_led_mask;
if (!((1 << mode) & mask)) {
rc = ENOTSUP;
goto fail1;
}
EFSYS_ASSERT3U(mode, <, EFX_PHY_LED_NMODES);
epp->ep_phy_led_mode = mode;
if ((rc = epop->epo_reconfigure(enp)) != 0)
goto fail2;
done:
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_PHY_LED_CONTROL */
void
efx_phy_adv_cap_get(
__in efx_nic_t *enp,
__in uint32_t flag,
__out uint32_t *maskp)
{
efx_port_t *epp = &(enp->en_port);
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
switch (flag) {
case EFX_PHY_CAP_CURRENT:
*maskp = epp->ep_adv_cap_mask;
break;
case EFX_PHY_CAP_DEFAULT:
*maskp = epp->ep_default_adv_cap_mask;
break;
case EFX_PHY_CAP_PERM:
*maskp = epp->ep_phy_cap_mask;
break;
default:
EFSYS_ASSERT(B_FALSE);
break;
}
}
__checkReturn int
efx_phy_adv_cap_set(
__in efx_nic_t *enp,
__in uint32_t mask)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
if ((mask & ~epp->ep_phy_cap_mask) != 0) {
rc = ENOTSUP;
goto fail1;
}
if (epp->ep_adv_cap_mask == mask)
goto done;
epp->ep_adv_cap_mask = mask;
if ((rc = epop->epo_reconfigure(enp)) != 0)
goto fail2;
done:
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_phy_lp_cap_get(
__in efx_nic_t *enp,
__out uint32_t *maskp)
{
efx_port_t *epp = &(enp->en_port);
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
*maskp = epp->ep_lp_cap_mask;
}
__checkReturn int
efx_phy_oui_get(
__in efx_nic_t *enp,
__out uint32_t *ouip)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
return (epop->epo_oui_get(enp, ouip));
}
void
efx_phy_media_type_get(
__in efx_nic_t *enp,
__out efx_phy_media_type_t *typep)
{
efx_port_t *epp = &(enp->en_port);
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
if (epp->ep_module_type != EFX_PHY_MEDIA_INVALID)
*typep = epp->ep_module_type;
else
*typep = epp->ep_fixed_port_type;
}
#if EFSYS_OPT_PHY_STATS
#if EFSYS_OPT_NAMES
/* START MKCONFIG GENERATED PhyStatNamesBlock 271268f3da0e804f */
static const char __cs * __cs __efx_phy_stat_name[] = {
"oui",
"pma_pmd_link_up",
"pma_pmd_rx_fault",
"pma_pmd_tx_fault",
"pma_pmd_rev_a",
"pma_pmd_rev_b",
"pma_pmd_rev_c",
"pma_pmd_rev_d",
"pcs_link_up",
"pcs_rx_fault",
"pcs_tx_fault",
"pcs_ber",
"pcs_block_errors",
"phy_xs_link_up",
"phy_xs_rx_fault",
"phy_xs_tx_fault",
"phy_xs_align",
"phy_xs_sync_a",
"phy_xs_sync_b",
"phy_xs_sync_c",
"phy_xs_sync_d",
"an_link_up",
"an_master",
"an_local_rx_ok",
"an_remote_rx_ok",
"cl22ext_link_up",
"snr_a",
"snr_b",
"snr_c",
"snr_d",
"pma_pmd_signal_a",
"pma_pmd_signal_b",
"pma_pmd_signal_c",
"pma_pmd_signal_d",
"an_complete",
"pma_pmd_rev_major",
"pma_pmd_rev_minor",
"pma_pmd_rev_micro",
"pcs_fw_version_0",
"pcs_fw_version_1",
"pcs_fw_version_2",
"pcs_fw_version_3",
"pcs_fw_build_yy",
"pcs_fw_build_mm",
"pcs_fw_build_dd",
"pcs_op_mode",
};
/* END MKCONFIG GENERATED PhyStatNamesBlock */
const char __cs *
efx_phy_stat_name(
__in efx_nic_t *enp,
__in efx_phy_stat_t type)
{
_NOTE(ARGUNUSED(enp))
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(type, <, EFX_PHY_NSTATS);
return (__efx_phy_stat_name[type]);
}
#endif /* EFSYS_OPT_NAMES */
__checkReturn int
efx_phy_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__out_ecount(EFX_PHY_NSTATS) uint32_t *stat)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
return (epop->epo_stats_update(enp, esmp, stat));
}
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
const char __cs *
efx_phy_prop_name(
__in efx_nic_t *enp,
__in unsigned int id)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
return (epop->epo_prop_name(enp, id));
}
#endif /* EFSYS_OPT_NAMES */
__checkReturn int
efx_phy_prop_get(
__in efx_nic_t *enp,
__in unsigned int id,
__in uint32_t flags,
__out uint32_t *valp)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
return (epop->epo_prop_get(enp, id, flags, valp));
}
__checkReturn int
efx_phy_prop_set(
__in efx_nic_t *enp,
__in unsigned int id,
__in uint32_t val)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
return (epop->epo_prop_set(enp, id, val));
}
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_BIST
__checkReturn int
efx_phy_bist_start(
__in efx_nic_t *enp,
__in efx_phy_bist_type_t type)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT3U(type, !=, EFX_PHY_BIST_TYPE_UNKNOWN);
EFSYS_ASSERT3U(type, <, EFX_PHY_BIST_TYPE_NTYPES);
EFSYS_ASSERT3U(epp->ep_current_bist, ==, EFX_PHY_BIST_TYPE_UNKNOWN);
if (epop->epo_bist_start == NULL) {
rc = ENOTSUP;
goto fail1;
}
if ((rc = epop->epo_bist_start(enp, type)) != 0)
goto fail2;
epp->ep_current_bist = type;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_phy_bist_poll(
__in efx_nic_t *enp,
__in efx_phy_bist_type_t type,
__out efx_phy_bist_result_t *resultp,
__out_opt uint32_t *value_maskp,
__out_ecount_opt(count) unsigned long *valuesp,
__in size_t count)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT3U(type, !=, EFX_PHY_BIST_TYPE_UNKNOWN);
EFSYS_ASSERT3U(type, <, EFX_PHY_BIST_TYPE_NTYPES);
EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
EFSYS_ASSERT(epop->epo_bist_poll != NULL);
if (epop->epo_bist_poll == NULL) {
rc = ENOTSUP;
goto fail1;
}
if ((rc = epop->epo_bist_poll(enp, type, resultp, value_maskp,
valuesp, count)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_phy_bist_stop(
__in efx_nic_t *enp,
__in efx_phy_bist_type_t type)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT3U(type, !=, EFX_PHY_BIST_TYPE_UNKNOWN);
EFSYS_ASSERT3U(type, <, EFX_PHY_BIST_TYPE_NTYPES);
EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
EFSYS_ASSERT(epop->epo_bist_stop != NULL);
if (epop->epo_bist_stop != NULL)
epop->epo_bist_stop(enp, type);
epp->ep_current_bist = EFX_PHY_BIST_TYPE_UNKNOWN;
}
#endif /* EFSYS_OPT_PHY_BIST */
void
efx_phy_unprobe(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
epp->ep_epop = NULL;
epp->ep_adv_cap_mask = 0;
epp->ep_port = 0;
epp->ep_phy_type = 0;
}

View File

@ -0,0 +1,226 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_impl.h"
__checkReturn int
efx_port_init(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
if (enp->en_mod_flags & EFX_MOD_PORT) {
rc = EINVAL;
goto fail1;
}
enp->en_mod_flags |= EFX_MOD_PORT;
epp->ep_mac_type = EFX_MAC_INVALID;
epp->ep_link_mode = EFX_LINK_UNKNOWN;
epp->ep_mac_poll_needed = B_TRUE;
epp->ep_mac_drain = B_TRUE;
/* Configure the MAC */
if ((rc = efx_mac_select(enp)) != 0)
goto fail1;
epp->ep_emop->emo_reconfigure(enp);
/*
* Turn on the PHY if available, otherwise reset it, and
* reconfigure it with the current configuration.
*/
if (epop->epo_power != NULL) {
if ((rc = epop->epo_power(enp, B_TRUE)) != 0)
goto fail2;
} else {
if ((rc = epop->epo_reset(enp)) != 0)
goto fail2;
}
EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY);
enp->en_reset_flags &= ~EFX_RESET_PHY;
if ((rc = epop->epo_reconfigure(enp)) != 0)
goto fail3;
return (0);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
enp->en_mod_flags &= ~EFX_MOD_PORT;
return (rc);
}
__checkReturn int
efx_port_poll(
__in efx_nic_t *enp,
__out efx_link_mode_t *link_modep)
{
efx_port_t *epp = &(enp->en_port);
efx_mac_ops_t *emop = epp->ep_emop;
efx_link_mode_t ignore_link_mode;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
EFSYS_ASSERT(!epp->ep_mac_stats_pending);
if (link_modep == NULL)
link_modep = &ignore_link_mode;
if ((rc = emop->emo_poll(enp, link_modep)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_LOOPBACK
__checkReturn int
efx_port_loopback_set(
__in efx_nic_t *enp,
__in efx_link_mode_t link_mode,
__in efx_loopback_type_t loopback_type)
{
efx_port_t *epp = &(enp->en_port);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mac_ops_t *emop = epp->ep_emop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(emop != NULL);
EFSYS_ASSERT(link_mode < EFX_LINK_NMODES);
if ((1 << loopback_type) & ~encp->enc_loopback_types[link_mode]) {
rc = ENOTSUP;
goto fail1;
}
if (epp->ep_loopback_type == loopback_type &&
epp->ep_loopback_link_mode == link_mode)
return (0);
if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_NAMES
static const char __cs * __cs __efx_loopback_type_name[] = {
"OFF",
"DATA",
"GMAC",
"XGMII",
"XGXS",
"XAUI",
"GMII",
"SGMII",
"XGBR",
"XFI",
"XAUI_FAR",
"GMII_FAR",
"SGMII_FAR",
"XFI_FAR",
"GPHY",
"PHY_XS",
"PCS",
"PMA_PMD",
};
__checkReturn const char __cs *
efx_loopback_type_name(
__in efx_nic_t *enp,
__in efx_loopback_type_t type)
{
_NOTE(ARGUNUSED(enp))
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES);
return (__efx_loopback_type_name[type]);
}
#endif /* EFSYS_OPT_NAMES */
#endif /* EFSYS_OPT_LOOPBACK */
void
efx_port_fini(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
EFSYS_ASSERT(epp->ep_mac_drain);
epp->ep_emop = NULL;
epp->ep_mac_type = EFX_MAC_INVALID;
epp->ep_mac_drain = B_FALSE;
epp->ep_mac_poll_needed = B_FALSE;
/* Turn off the PHY */
if (epop->epo_power != NULL)
(void) epop->epo_power(enp, B_FALSE);
enp->en_mod_flags &= ~EFX_MOD_PORT;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,816 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
__checkReturn int
efx_rx_init(
__in efx_nic_t *enp)
{
efx_oword_t oword;
unsigned int index;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
if (!(enp->en_mod_flags & EFX_MOD_EV)) {
rc = EINVAL;
goto fail1;
}
if (enp->en_mod_flags & EFX_MOD_RX) {
rc = EINVAL;
goto fail2;
}
EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32);
EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
/* Zero the RSS table */
for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS;
index++) {
EFX_ZERO_OWORD(oword);
EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
index, &oword);
}
enp->en_mod_flags |= EFX_MOD_RX;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_RX_HDR_SPLIT
__checkReturn int
efx_rx_hdr_split_enable(
__in efx_nic_t *enp,
__in unsigned int hdr_buf_size,
__in unsigned int pld_buf_size)
{
unsigned int nhdr32;
unsigned int npld32;
efx_oword_t oword;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
EFSYS_ASSERT3U(enp->en_family, >=, EFX_FAMILY_SIENA);
nhdr32 = hdr_buf_size / 32;
if ((nhdr32 == 0) ||
(nhdr32 >= (1 << FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH)) ||
((hdr_buf_size % 32) != 0)) {
rc = EINVAL;
goto fail1;
}
npld32 = pld_buf_size / 32;
if ((npld32 == 0) ||
(npld32 >= (1 << FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH)) ||
((pld_buf_size % 32) != 0)) {
rc = EINVAL;
goto fail2;
}
if (enp->en_rx_qcount > 0) {
rc = EBUSY;
goto fail3;
}
EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_HDR_SPLIT_EN, 1);
EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE, nhdr32);
EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE, npld32);
EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
return (0);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_RX_HDR_SPLIT */
#if EFSYS_OPT_RX_SCATTER
__checkReturn int
efx_rx_scatter_enable(
__in efx_nic_t *enp,
__in unsigned int buf_size)
{
unsigned int nbuf32;
efx_oword_t oword;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
EFSYS_ASSERT3U(enp->en_family, >=, EFX_FAMILY_FALCON);
nbuf32 = buf_size / 32;
if ((nbuf32 == 0) ||
(nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) ||
((buf_size % 32) != 0)) {
rc = EINVAL;
goto fail1;
}
if (enp->en_rx_qcount > 0) {
rc = EBUSY;
goto fail2;
}
/* Set scatter buffer size */
EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32);
EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
/* Enable scatter for packets not matching a filter */
EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1);
EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_RX_SCATTER */
#define EFX_RX_LFSR_HASH(_enp, _insert) \
do { \
efx_oword_t oword; \
\
EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
(_insert) ? 1 : 0); \
EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
\
if ((_enp)->en_family == EFX_FAMILY_SIENA) { \
EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
&oword); \
EFX_SET_OWORD_FIELD(oword, \
FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \
EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
&oword); \
} \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \
do { \
efx_oword_t oword; \
\
EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \
(_ip) ? 1 : 0); \
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \
(_tcp) ? 0 : 1); \
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
(_insert) ? 1 : 0); \
EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \
do { \
efx_oword_t oword; \
\
if ((_enp)->en_family == EFX_FAMILY_FALCON) { \
(_rc) = ((_ip) || (_tcp)) ? ENOTSUP : 0; \
break; \
} \
\
EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
EFX_SET_OWORD_FIELD(oword, \
FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
EFX_SET_OWORD_FIELD(oword, \
FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \
EFX_SET_OWORD_FIELD(oword, \
FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \
EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
\
(_rc) = 0; \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#if EFSYS_OPT_RX_SCALE
__checkReturn int
efx_rx_scale_mode_set(
__in efx_nic_t *enp,
__in efx_rx_hash_alg_t alg,
__in efx_rx_hash_type_t type,
__in boolean_t insert)
{
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
EFSYS_ASSERT3U(enp->en_family, >=, EFX_FAMILY_FALCON);
switch (alg) {
case EFX_RX_HASHALG_LFSR:
EFX_RX_LFSR_HASH(enp, insert);
break;
case EFX_RX_HASHALG_TOEPLITZ:
EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
type & (1 << EFX_RX_HASH_IPV4),
type & (1 << EFX_RX_HASH_TCPIPV4));
EFX_RX_TOEPLITZ_IPV6_HASH(enp,
type & (1 << EFX_RX_HASH_IPV6),
type & (1 << EFX_RX_HASH_TCPIPV6),
rc);
if (rc != 0)
goto fail1;
break;
default:
rc = EINVAL;
goto fail2;
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
EFX_RX_LFSR_HASH(enp, B_FALSE);
return (rc);
}
#endif
#if EFSYS_OPT_RX_SCALE
__checkReturn int
efx_rx_scale_toeplitz_ipv4_key_set(
__in efx_nic_t *enp,
__in_ecount(n) uint8_t *key,
__in size_t n)
{
efx_oword_t oword;
unsigned int byte;
unsigned int offset;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
byte = 0;
/* Write toeplitz hash key */
EFX_ZERO_OWORD(oword);
for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
offset > 0 && byte < n;
--offset)
oword.eo_u8[offset - 1] = key[byte++];
EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
byte = 0;
/* Verify toeplitz hash key */
EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
offset > 0 && byte < n;
--offset) {
if (oword.eo_u8[offset - 1] != key[byte++]) {
rc = EFAULT;
goto fail1;
}
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif
#if EFSYS_OPT_RX_SCALE
__checkReturn int
efx_rx_scale_toeplitz_ipv6_key_set(
__in efx_nic_t *enp,
__in_ecount(n) uint8_t *key,
__in size_t n)
{
efx_oword_t oword;
unsigned int byte;
int offset;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
byte = 0;
/* Write toeplitz hash key 3 */
EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
offset > 0 && byte < n;
--offset)
oword.eo_u8[offset - 1] = key[byte++];
EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
/* Write toeplitz hash key 2 */
EFX_ZERO_OWORD(oword);
for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
offset > 0 && byte < n;
--offset)
oword.eo_u8[offset - 1] = key[byte++];
EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
/* Write toeplitz hash key 1 */
EFX_ZERO_OWORD(oword);
for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
offset > 0 && byte < n;
--offset)
oword.eo_u8[offset - 1] = key[byte++];
EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
byte = 0;
/* Verify toeplitz hash key 3 */
EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
offset > 0 && byte < n;
--offset) {
if (oword.eo_u8[offset - 1] != key[byte++]) {
rc = EFAULT;
goto fail1;
}
}
/* Verify toeplitz hash key 2 */
EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
offset > 0 && byte < n;
--offset) {
if (oword.eo_u8[offset - 1] != key[byte++]) {
rc = EFAULT;
goto fail2;
}
}
/* Verify toeplitz hash key 1 */
EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
offset > 0 && byte < n;
--offset) {
if (oword.eo_u8[offset - 1] != key[byte++]) {
rc = EFAULT;
goto fail3;
}
}
return (0);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif
#if EFSYS_OPT_RX_SCALE
__checkReturn int
efx_rx_scale_tbl_set(
__in efx_nic_t *enp,
__in_ecount(n) unsigned int *table,
__in size_t n)
{
efx_oword_t oword;
int index;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
rc = EINVAL;
goto fail1;
}
for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
uint32_t byte;
/* Calculate the entry to place in the table */
byte = (uint32_t)table[index % n];
EFSYS_PROBE2(table, int, index, uint32_t, byte);
EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte);
/* Write the table */
EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
index, &oword);
}
for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) {
uint32_t byte;
/* Determine if we're starting a new batch */
byte = (uint32_t)table[index % n];
/* Read the table */
EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL,
index, &oword);
/* Verify the entry */
if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
rc = EFAULT;
goto fail2;
}
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif
#if EFSYS_OPT_FILTER
extern __checkReturn int
efx_rx_filter_insert(
__in efx_rxq_t *erp,
__inout efx_filter_spec_t *spec)
{
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
EFSYS_ASSERT3P(spec, !=, NULL);
spec->efs_dmaq_id = (uint16_t)erp->er_index;
return efx_filter_insert_filter(erp->er_enp, spec, B_FALSE);
}
#endif
#if EFSYS_OPT_FILTER
extern __checkReturn int
efx_rx_filter_remove(
__in efx_rxq_t *erp,
__inout efx_filter_spec_t *spec)
{
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
EFSYS_ASSERT3P(spec, !=, NULL);
spec->efs_dmaq_id = (uint16_t)erp->er_index;
return efx_filter_remove_filter(erp->er_enp, spec);
}
#endif
extern void
efx_rx_qpost(
__in efx_rxq_t *erp,
__in_ecount(n) efsys_dma_addr_t *addrp,
__in size_t size,
__in unsigned int n,
__in unsigned int completed,
__in unsigned int added)
{
efx_qword_t qword;
unsigned int i;
unsigned int offset;
unsigned int id;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
/* The client driver must not overfill the queue */
EFSYS_ASSERT3U(added - completed + n, <=,
EFX_RXQ_LIMIT(erp->er_mask + 1));
id = added & (erp->er_mask);
for (i = 0; i < n; i++) {
EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
unsigned int, id, efsys_dma_addr_t, addrp[i],
size_t, size);
EFX_POPULATE_QWORD_3(qword,
FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size),
FSF_AZ_RX_KER_BUF_ADDR_DW0,
(uint32_t)(addrp[i] & 0xffffffff),
FSF_AZ_RX_KER_BUF_ADDR_DW1,
(uint32_t)(addrp[i] >> 32));
offset = id * sizeof (efx_qword_t);
EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
id = (id + 1) & (erp->er_mask);
}
}
void
efx_rx_qpush(
__in efx_rxq_t *erp,
__in unsigned int added)
{
efx_nic_t *enp = erp->er_enp;
uint32_t wptr;
efx_oword_t oword;
efx_dword_t dword;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
EFSYS_PIO_WRITE_BARRIER();
/* Push the populated descriptors out */
wptr = added & erp->er_mask;
EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr);
/* Only write the third DWORD */
EFX_POPULATE_DWORD_1(dword,
EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0,
erp->er_index, &dword, B_FALSE);
}
void
efx_rx_qflush(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
efx_oword_t oword;
uint32_t label;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
label = erp->er_index;
/* Flush the queue */
EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_RX_FLUSH_DESCQ, label);
EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword);
}
void
efx_rx_qenable(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
efx_oword_t oword;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL,
erp->er_index, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
erp->er_index, &oword);
}
__checkReturn int
efx_rx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
__in efx_rxq_type_t type,
__in efsys_mem_t *esmp,
__in size_t n,
__in uint32_t id,
__in efx_evq_t *eep,
__deref_out efx_rxq_t **erpp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_rxq_t *erp;
efx_oword_t oword;
uint32_t size;
boolean_t split;
boolean_t jumbo;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
if (!ISP2(n) || !(n & EFX_RXQ_NDESCS_MASK)) {
rc = EINVAL;
goto fail1;
}
if (index >= encp->enc_rxq_limit) {
rc = EINVAL;
goto fail2;
}
for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS);
size++)
if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS))
break;
if (id + (1 << size) >= encp->enc_buftbl_limit) {
rc = EINVAL;
goto fail3;
}
switch (type) {
case EFX_RXQ_TYPE_DEFAULT:
split = B_FALSE;
jumbo = B_FALSE;
break;
#if EFSYS_OPT_RX_HDR_SPLIT
case EFX_RXQ_TYPE_SPLIT_HEADER:
if ((enp->en_family < EFX_FAMILY_SIENA) || ((index & 1) != 0)) {
rc = EINVAL;
goto fail4;
}
split = B_TRUE;
jumbo = B_TRUE;
break;
case EFX_RXQ_TYPE_SPLIT_PAYLOAD:
if ((enp->en_family < EFX_FAMILY_SIENA) || ((index & 1) == 0)) {
rc = EINVAL;
goto fail4;
}
split = B_FALSE;
jumbo = B_TRUE;
break;
#endif /* EFSYS_OPT_RX_HDR_SPLIT */
#if EFSYS_OPT_RX_SCATTER
case EFX_RXQ_TYPE_SCATTER:
if (enp->en_family < EFX_FAMILY_SIENA) {
rc = EINVAL;
goto fail4;
}
split = B_FALSE;
jumbo = B_TRUE;
break;
#endif /* EFSYS_OPT_RX_SCATTER */
default:
rc = EINVAL;
goto fail4;
}
/* Allocate an RXQ object */
EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp);
if (erp == NULL) {
rc = ENOMEM;
goto fail5;
}
erp->er_magic = EFX_RXQ_MAGIC;
erp->er_enp = enp;
erp->er_index = index;
erp->er_mask = n - 1;
erp->er_esmp = esmp;
/* Set up the new descriptor queue */
EFX_POPULATE_OWORD_10(oword,
FRF_CZ_RX_HDR_SPLIT, split,
FRF_AZ_RX_ISCSI_DDIG_EN, 0,
FRF_AZ_RX_ISCSI_HDIG_EN, 0,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index,
FRF_AZ_RX_DESCQ_OWNER_ID, 0,
FRF_AZ_RX_DESCQ_LABEL, label,
FRF_AZ_RX_DESCQ_SIZE, size,
FRF_AZ_RX_DESCQ_TYPE, 0,
FRF_AZ_RX_DESCQ_JUMBO, jumbo);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
erp->er_index, &oword);
enp->en_rx_qcount++;
*erpp = erp;
return (0);
fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_rx_qdestroy(
__in efx_rxq_t *erp)
{
efx_nic_t *enp = erp->er_enp;
efx_oword_t oword;
EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
EFSYS_ASSERT(enp->en_rx_qcount != 0);
--enp->en_rx_qcount;
/* Purge descriptor queue */
EFX_ZERO_OWORD(oword);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
erp->er_index, &oword);
/* Free the RXQ object */
EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
}
void
efx_rx_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0);
enp->en_mod_flags &= ~EFX_MOD_RX;
}

View File

@ -0,0 +1,294 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
__checkReturn int
efx_sram_buf_tbl_set(
__in efx_nic_t *enp,
__in uint32_t id,
__in efsys_mem_t *esmp,
__in size_t n)
{
efx_qword_t qword;
uint32_t start = id;
uint32_t stop = start + n;
efsys_dma_addr_t addr;
efx_oword_t oword;
unsigned int count;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
if (stop >= EFX_BUF_TBL_SIZE) {
rc = EFBIG;
goto fail1;
}
/* Add the entries into the buffer table */
addr = EFSYS_MEM_ADDR(esmp);
for (id = start; id != stop; id++) {
EFX_POPULATE_QWORD_5(qword,
FRF_AZ_IP_DAT_BUF_SIZE, 0, FRF_AZ_BUF_ADR_REGION, 0,
FRF_AZ_BUF_ADR_FBUF_DW0,
(uint32_t)((addr >> 12) & 0xffffffff),
FRF_AZ_BUF_ADR_FBUF_DW1,
(uint32_t)((addr >> 12) >> 32),
FRF_AZ_BUF_OWNER_ID_FBUF, 0);
EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_FULL_TBL,
id, &qword);
addr += EFX_BUF_SIZE;
}
EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
/* Flush the write buffer */
EFX_POPULATE_OWORD_2(oword, FRF_AZ_BUF_UPD_CMD, 1,
FRF_AZ_BUF_CLR_CMD, 0);
EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
/* Poll for the last entry being written to the buffer table */
EFSYS_ASSERT3U(id, ==, stop);
addr -= EFX_BUF_SIZE;
count = 0;
do {
EFSYS_PROBE1(wait, unsigned int, count);
/* Spin for 1 ms */
EFSYS_SPIN(1000);
EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
id - 1, &qword);
if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) ==
(uint32_t)((addr >> 12) & 0xffffffff) &&
EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) ==
(uint32_t)((addr >> 12) >> 32))
goto verify;
} while (++count < 100);
rc = ETIMEDOUT;
goto fail2;
verify:
/* Verify the rest of the entries in the buffer table */
while (--id != start) {
addr -= EFX_BUF_SIZE;
/* Read the buffer table entry */
EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
id - 1, &qword);
if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) !=
(uint32_t)((addr >> 12) & 0xffffffff) ||
EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) !=
(uint32_t)((addr >> 12) >> 32)) {
rc = EFAULT;
goto fail3;
}
}
return (0);
fail3:
EFSYS_PROBE(fail3);
id = stop;
fail2:
EFSYS_PROBE(fail2);
EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, id - 1,
FRF_AZ_BUF_CLR_START_ID, start);
EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_sram_buf_tbl_clear(
__in efx_nic_t *enp,
__in uint32_t id,
__in size_t n)
{
efx_oword_t oword;
uint32_t start = id;
uint32_t stop = start + n;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, stop - 1,
FRF_AZ_BUF_CLR_START_ID, start);
EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
}
#if EFSYS_OPT_DIAG
static void
efx_sram_byte_increment_set(
__in size_t row,
__in boolean_t negate,
__out efx_qword_t *eqp)
{
size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
unsigned int index;
_NOTE(ARGUNUSED(negate))
for (index = 0; index < sizeof (efx_qword_t); index++)
eqp->eq_u8[index] = offset + index;
}
static void
efx_sram_all_the_same_set(
__in size_t row,
__in boolean_t negate,
__out efx_qword_t *eqp)
{
_NOTE(ARGUNUSED(row))
if (negate)
EFX_SET_QWORD(*eqp);
else
EFX_ZERO_QWORD(*eqp);
}
static void
efx_sram_bit_alternate_set(
__in size_t row,
__in boolean_t negate,
__out efx_qword_t *eqp)
{
_NOTE(ARGUNUSED(row))
EFX_POPULATE_QWORD_2(*eqp,
EFX_DWORD_0, (negate) ? 0x55555555 : 0xaaaaaaaa,
EFX_DWORD_1, (negate) ? 0x55555555 : 0xaaaaaaaa);
}
static void
efx_sram_byte_alternate_set(
__in size_t row,
__in boolean_t negate,
__out efx_qword_t *eqp)
{
_NOTE(ARGUNUSED(row))
EFX_POPULATE_QWORD_2(*eqp,
EFX_DWORD_0, (negate) ? 0x00ff00ff : 0xff00ff00,
EFX_DWORD_1, (negate) ? 0x00ff00ff : 0xff00ff00);
}
static void
efx_sram_byte_changing_set(
__in size_t row,
__in boolean_t negate,
__out efx_qword_t *eqp)
{
size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
unsigned int index;
for (index = 0; index < sizeof (efx_qword_t); index++) {
uint8_t byte;
if (offset / 256 == 0)
byte = (uint8_t)((offset % 257) % 256);
else
byte = (uint8_t)(~((offset - 8) % 257) % 256);
eqp->eq_u8[index] = (negate) ? ~byte : byte;
}
}
static void
efx_sram_bit_sweep_set(
__in size_t row,
__in boolean_t negate,
__out efx_qword_t *eqp)
{
size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
if (negate) {
EFX_SET_QWORD(*eqp);
EFX_CLEAR_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
} else {
EFX_ZERO_QWORD(*eqp);
EFX_SET_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
}
}
efx_sram_pattern_fn_t __cs __efx_sram_pattern_fns[] = {
efx_sram_byte_increment_set,
efx_sram_all_the_same_set,
efx_sram_bit_alternate_set,
efx_sram_byte_alternate_set,
efx_sram_byte_changing_set,
efx_sram_bit_sweep_set
};
__checkReturn int
efx_sram_test(
__in efx_nic_t *enp,
__in efx_pattern_type_t type)
{
efx_nic_ops_t *enop = enp->en_enop;
efx_sram_pattern_fn_t func;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
/* Select pattern generator */
EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES);
func = __efx_sram_pattern_fns[type];
return (enop->eno_sram_test(enp, func));
}
#endif /* EFSYS_OPT_DIAG */

View File

@ -0,0 +1,430 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
#if EFSYS_OPT_QSTATS
#define EFX_TX_QSTAT_INCR(_etp, _stat) \
do { \
(_etp)->et_stat[_stat]++; \
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
#else
#define EFX_TX_QSTAT_INCR(_etp, _stat)
#endif
__checkReturn int
efx_tx_init(
__in efx_nic_t *enp)
{
efx_oword_t oword;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
if (!(enp->en_mod_flags & EFX_MOD_EV)) {
rc = EINVAL;
goto fail1;
}
if (enp->en_mod_flags & EFX_MOD_TX) {
rc = EINVAL;
goto fail2;
}
EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
/*
* Disable the timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level (although always allow a
* minimal trickle).
*/
EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/*
* Filter all packets less than 14 bytes to avoid parsing
* errors.
*/
EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword);
/*
* Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16
* descriptors (which is bad).
*/
EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
enp->en_mod_flags |= EFX_MOD_TX;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_FILTER
extern __checkReturn int
efx_tx_filter_insert(
__in efx_txq_t *etp,
__inout efx_filter_spec_t *spec)
{
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFSYS_ASSERT3P(spec, !=, NULL);
spec->efs_dmaq_id = (uint16_t)etp->et_index;
return efx_filter_insert_filter(etp->et_enp, spec, B_FALSE);
}
#endif
#if EFSYS_OPT_FILTER
extern __checkReturn int
efx_tx_filter_remove(
__in efx_txq_t *etp,
__inout efx_filter_spec_t *spec)
{
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFSYS_ASSERT3P(spec, !=, NULL);
spec->efs_dmaq_id = (uint16_t)etp->et_index;
return efx_filter_remove_filter(etp->et_enp, spec);
}
#endif
#define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \
do { \
unsigned int id; \
size_t offset; \
efx_qword_t qword; \
\
id = (_added)++ & (_etp)->et_mask; \
offset = id * sizeof (efx_qword_t); \
\
EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \
unsigned int, id, efsys_dma_addr_t, (_addr), \
size_t, (_size), boolean_t, (_eop)); \
\
EFX_POPULATE_QWORD_4(qword, \
FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \
FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \
FSF_AZ_TX_KER_BUF_ADDR_DW0, \
(uint32_t)((_addr) & 0xffffffff), \
FSF_AZ_TX_KER_BUF_ADDR_DW1, \
(uint32_t)((_addr) >> 32)); \
EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \
\
_NOTE(CONSTANTCONDITION) \
} while (B_FALSE)
__checkReturn int
efx_tx_qpost(
__in efx_txq_t *etp,
__in_ecount(n) efx_buffer_t *eb,
__in unsigned int n,
__in unsigned int completed,
__inout unsigned int *addedp)
{
unsigned int added = *addedp;
unsigned int i;
int rc = ENOSPC;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1))
goto fail1;
for (i = 0; i < n; i++) {
efx_buffer_t *ebp = &eb[i];
efsys_dma_addr_t start = ebp->eb_addr;
size_t size = ebp->eb_size;
efsys_dma_addr_t end = start + size;
/* Fragments must not span 4k boundaries. */
EFSYS_ASSERT(P2ROUNDUP(start + 1, 4096) >= end);
EFX_TX_DESC(etp, start, size, ebp->eb_eop, added);
}
EFX_TX_QSTAT_INCR(etp, TX_POST);
*addedp = added;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_tx_qpush(
__in efx_txq_t *etp,
__in unsigned int added)
{
efx_nic_t *enp = etp->et_enp;
uint32_t wptr;
efx_dword_t dword;
efx_oword_t oword;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
/* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
EFSYS_PIO_WRITE_BARRIER();
/* Push the populated descriptors out */
wptr = added & etp->et_mask;
EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr);
/* Only write the third DWORD */
EFX_POPULATE_DWORD_1(dword,
EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0,
etp->et_index, &dword, B_FALSE);
}
void
efx_tx_qflush(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
efx_oword_t oword;
uint32_t label;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
label = etp->et_index;
/* Flush the queue */
EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_TX_FLUSH_DESCQ, label);
EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword);
}
void
efx_tx_qenable(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
efx_oword_t oword;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL,
etp->et_index, &oword);
EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index,
uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3),
uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2),
uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1),
uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0));
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0);
EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
etp->et_index, &oword);
}
__checkReturn int
efx_tx_qcreate(
__in efx_nic_t *enp,
__in unsigned int index,
__in unsigned int label,
__in efsys_mem_t *esmp,
__in size_t n,
__in uint32_t id,
__in uint16_t flags,
__in efx_evq_t *eep,
__deref_out efx_txq_t **etpp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_txq_t *etp;
efx_oword_t oword;
uint32_t size;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS == (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH));
EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS);
EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit);
if (!ISP2(n) || !(n & EFX_TXQ_NDESCS_MASK)) {
rc = EINVAL;
goto fail1;
}
if (index >= encp->enc_txq_limit) {
rc = EINVAL;
goto fail2;
}
for (size = 0; (1 << size) <= (EFX_TXQ_MAXNDESCS / EFX_TXQ_MINNDESCS);
size++)
if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS))
break;
if (id + (1 << size) >= encp->enc_buftbl_limit) {
rc = EINVAL;
goto fail3;
}
/* Allocate an TXQ object */
EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp);
if (etp == NULL) {
rc = ENOMEM;
goto fail4;
}
etp->et_magic = EFX_TXQ_MAGIC;
etp->et_enp = enp;
etp->et_index = index;
etp->et_mask = n - 1;
etp->et_esmp = esmp;
/* Set up the new descriptor queue */
EFX_POPULATE_OWORD_6(oword,
FRF_AZ_TX_DESCQ_BUF_BASE_ID, id,
FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index,
FRF_AZ_TX_DESCQ_OWNER_ID, 0,
FRF_AZ_TX_DESCQ_LABEL, label,
FRF_AZ_TX_DESCQ_SIZE, size,
FRF_AZ_TX_DESCQ_TYPE, 0);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS,
(flags & EFX_CKSUM_IPV4) ? 0 : 1);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS,
(flags & EFX_CKSUM_TCPUDP) ? 0 : 1);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
etp->et_index, &oword);
enp->en_tx_qcount++;
*etpp = etp;
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_NAMES
/* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 78ca9ab00287fffb */
static const char __cs * __cs __efx_tx_qstat_name[] = {
"post",
"unaligned_split",
};
/* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */
const char __cs *
efx_tx_qstat_name(
__in efx_nic_t *enp,
__in unsigned int id)
{
_NOTE(ARGUNUSED(enp))
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(id, <, TX_NQSTATS);
return (__efx_tx_qstat_name[id]);
}
#endif /* EFSYS_OPT_NAMES */
#if EFSYS_OPT_QSTATS
void
efx_tx_qstats_update(
__in efx_txq_t *etp,
__inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
{
unsigned int id;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
for (id = 0; id < TX_NQSTATS; id++) {
efsys_stat_t *essp = &stat[id];
EFSYS_STAT_INCR(essp, etp->et_stat[id]);
etp->et_stat[id] = 0;
}
}
#endif /* EFSYS_OPT_QSTATS */
void
efx_tx_qdestroy(
__in efx_txq_t *etp)
{
efx_nic_t *enp = etp->et_enp;
efx_oword_t oword;
EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
EFSYS_ASSERT(enp->en_tx_qcount != 0);
--enp->en_tx_qcount;
/* Purge descriptor queue */
EFX_ZERO_OWORD(oword);
EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
etp->et_index, &oword);
/* Free the TXQ object */
EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
}
void
efx_tx_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
enp->en_mod_flags &= ~EFX_MOD_TX;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,999 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
#if EFSYS_OPT_VPD
#define TAG_TYPE_LBN 7
#define TAG_TYPE_WIDTH 1
#define TAG_TYPE_LARGE_ITEM_DECODE 1
#define TAG_TYPE_SMALL_ITEM_DECODE 0
#define TAG_SMALL_ITEM_NAME_LBN 3
#define TAG_SMALL_ITEM_NAME_WIDTH 4
#define TAG_SMALL_ITEM_SIZE_LBN 0
#define TAG_SMALL_ITEM_SIZE_WIDTH 3
#define TAG_LARGE_ITEM_NAME_LBN 0
#define TAG_LARGE_ITEM_NAME_WIDTH 7
#define TAG_NAME_END_DECODE 0x0f
#define TAG_NAME_ID_STRING_DECODE 0x02
#define TAG_NAME_VPD_R_DECODE 0x10
#define TAG_NAME_VPD_W_DECODE 0x11
#if EFSYS_OPT_FALCON
static efx_vpd_ops_t __cs __efx_vpd_falcon_ops = {
NULL, /* evpdo_init */
falcon_vpd_size, /* evpdo_size */
falcon_vpd_read, /* evpdo_read */
falcon_vpd_verify, /* evpdo_verify */
NULL, /* evpdo_reinit */
falcon_vpd_get, /* evpdo_get */
falcon_vpd_set, /* evpdo_set */
falcon_vpd_next, /* evpdo_next */
falcon_vpd_write, /* evpdo_write */
NULL, /* evpdo_fini */
};
#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
static efx_vpd_ops_t __cs __efx_vpd_siena_ops = {
siena_vpd_init, /* evpdo_init */
siena_vpd_size, /* evpdo_size */
siena_vpd_read, /* evpdo_read */
siena_vpd_verify, /* evpdo_verify */
siena_vpd_reinit, /* evpdo_reinit */
siena_vpd_get, /* evpdo_get */
siena_vpd_set, /* evpdo_set */
siena_vpd_next, /* evpdo_next */
siena_vpd_write, /* evpdo_write */
siena_vpd_fini, /* evpdo_fini */
};
#endif /* EFSYS_OPT_SIENA */
__checkReturn int
efx_vpd_init(
__in efx_nic_t *enp)
{
efx_vpd_ops_t *evpdop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD));
switch (enp->en_family) {
#if EFSYS_OPT_FALCON
case EFX_FAMILY_FALCON:
evpdop = (efx_vpd_ops_t *)&__efx_vpd_falcon_ops;
break;
#endif /* EFSYS_OPT_FALCON */
#if EFSYS_OPT_SIENA
case EFX_FAMILY_SIENA:
evpdop = (efx_vpd_ops_t *)&__efx_vpd_siena_ops;
break;
#endif /* EFSYS_OPT_SIENA */
default:
EFSYS_ASSERT(0);
rc = ENOTSUP;
goto fail1;
}
if (evpdop->evpdo_init != NULL) {
if ((rc = evpdop->evpdo_init(enp)) != 0)
goto fail2;
}
enp->en_evpdop = evpdop;
enp->en_mod_flags |= EFX_MOD_VPD;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_size(
__in efx_nic_t *enp,
__out size_t *sizep)
{
efx_vpd_ops_t *evpdop = enp->en_evpdop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
if ((rc = evpdop->evpdo_size(enp, sizep)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_read(
__in efx_nic_t *enp,
__out_bcount(size) caddr_t data,
__in size_t size)
{
efx_vpd_ops_t *evpdop = enp->en_evpdop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
if ((rc = evpdop->evpdo_read(enp, data, size)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_verify(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size)
{
efx_vpd_ops_t *evpdop = enp->en_evpdop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_reinit(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size)
{
efx_vpd_ops_t *evpdop = enp->en_evpdop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
if (evpdop->evpdo_reinit == NULL) {
rc = ENOTSUP;
goto fail1;
}
if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_get(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__inout efx_vpd_value_t *evvp)
{
efx_vpd_ops_t *evpdop = enp->en_evpdop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_set(
__in efx_nic_t *enp,
__inout_bcount(size) caddr_t data,
__in size_t size,
__in efx_vpd_value_t *evvp)
{
efx_vpd_ops_t *evpdop = enp->en_evpdop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_next(
__in efx_nic_t *enp,
__inout_bcount(size) caddr_t data,
__in size_t size,
__out efx_vpd_value_t *evvp,
__inout unsigned int *contp)
{
efx_vpd_ops_t *evpdop = enp->en_evpdop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_write(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size)
{
efx_vpd_ops_t *evpdop = enp->en_evpdop;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
if ((rc = evpdop->evpdo_write(enp, data, size)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static __checkReturn int
efx_vpd_next_tag(
__in caddr_t data,
__in size_t size,
__inout unsigned int *offsetp,
__out efx_vpd_tag_t *tagp,
__out uint16_t *lengthp)
{
efx_byte_t byte;
efx_word_t word;
uint8_t name;
uint16_t length;
size_t headlen;
int rc;
if (*offsetp >= size) {
rc = EFAULT;
goto fail1;
}
EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]);
switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) {
case TAG_TYPE_SMALL_ITEM_DECODE:
headlen = 1;
name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME);
length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE);
break;
case TAG_TYPE_LARGE_ITEM_DECODE:
headlen = 3;
if (*offsetp + headlen > size) {
rc = EFAULT;
goto fail2;
}
name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME);
EFX_POPULATE_WORD_2(word,
EFX_BYTE_0, data[*offsetp + 1],
EFX_BYTE_1, data[*offsetp + 2]);
length = EFX_WORD_FIELD(word, EFX_WORD_0);
break;
default:
rc = EFAULT;
goto fail2;
}
if (*offsetp + headlen + length > size) {
rc = EFAULT;
goto fail3;
}
EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END);
EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID);
EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO);
EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW);
if (name != EFX_VPD_END && name != EFX_VPD_ID &&
name != EFX_VPD_RO) {
rc = EFAULT;
goto fail4;
}
*tagp = name;
*lengthp = length;
*offsetp += headlen;
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static __checkReturn int
efx_vpd_next_keyword(
__in_bcount(size) caddr_t tag,
__in size_t size,
__in unsigned int pos,
__out efx_vpd_keyword_t *keywordp,
__out uint8_t *lengthp)
{
efx_vpd_keyword_t keyword;
uint8_t length;
int rc;
if (pos + 3U > size) {
rc = EFAULT;
goto fail1;
}
keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]);
length = tag[pos + 2];
if (length == 0 || pos + 3U + length > size) {
rc = EFAULT;
goto fail2;
}
*keywordp = keyword;
*lengthp = length;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_hunk_length(
__in_bcount(size) caddr_t data,
__in size_t size,
__out size_t *lengthp)
{
efx_vpd_tag_t tag;
unsigned int offset;
uint16_t taglen;
int rc;
offset = 0;
_NOTE(CONSTANTCONDITION)
while (1) {
if ((rc = efx_vpd_next_tag(data, size, &offset,
&tag, &taglen)) != 0)
goto fail1;
offset += taglen;
if (tag == EFX_VPD_END)
break;
}
*lengthp = offset;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_hunk_verify(
__in_bcount(size) caddr_t data,
__in size_t size,
__out_opt boolean_t *cksummedp)
{
efx_vpd_tag_t tag;
efx_vpd_keyword_t keyword;
unsigned int offset;
unsigned int pos;
unsigned int i;
uint16_t taglen;
uint8_t keylen;
uint8_t cksum;
boolean_t cksummed = B_FALSE;
int rc;
/*
* Parse every tag,keyword in the existing VPD. If the csum is present,
* the assert it is correct, and is the final keyword in the RO block.
*/
offset = 0;
_NOTE(CONSTANTCONDITION)
while (1) {
if ((rc = efx_vpd_next_tag(data, size, &offset,
&tag, &taglen)) != 0)
goto fail1;
if (tag == EFX_VPD_END)
break;
else if (tag == EFX_VPD_ID)
goto done;
for (pos = 0; pos != taglen; pos += 3 + keylen) {
/* RV keyword must be the last in the block */
if (cksummed)
goto fail2;
if ((rc = efx_vpd_next_keyword(data + offset,
taglen, pos, &keyword, &keylen)) != 0)
goto fail3;
if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
cksum = 0;
for (i = 0; i < offset + pos + 4; i++)
cksum += data[i];
if (cksum != 0) {
rc = EFAULT;
goto fail4;
}
cksummed = B_TRUE;
}
}
done:
offset += taglen;
}
if (!cksummed) {
rc = EFAULT;
goto fail5;
}
if (cksummedp != NULL)
*cksummedp = cksummed;
return (0);
fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static uint8_t __cs __efx_vpd_blank_pid[] = {
/* Large resource type ID length 1 */
0x82, 0x01, 0x00,
/* Product name ' ' */
0x32,
};
static uint8_t __cs __efx_vpd_blank_r[] = {
/* Large resource type VPD-R length 4 */
0x90, 0x04, 0x00,
/* RV keyword length 1 */
'R', 'V', 0x01,
/* RV payload checksum */
0x00,
};
__checkReturn int
efx_vpd_hunk_reinit(
__in caddr_t data,
__in size_t size,
__in boolean_t wantpid)
{
unsigned int offset = 0;
unsigned int pos;
efx_byte_t byte;
uint8_t cksum;
int rc;
if (size < 0x100) {
rc = ENOSPC;
goto fail1;
}
if (wantpid) {
memcpy(data + offset, __efx_vpd_blank_pid,
sizeof (__efx_vpd_blank_pid));
offset += sizeof (__efx_vpd_blank_pid);
}
memcpy(data + offset, __efx_vpd_blank_r, sizeof (__efx_vpd_blank_r));
offset += sizeof (__efx_vpd_blank_r);
/* Update checksum */
cksum = 0;
for (pos = 0; pos < offset; pos++)
cksum += data[pos];
data[offset - 1] -= cksum;
/* Append trailing tag */
EFX_POPULATE_BYTE_3(byte,
TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE,
TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE,
TAG_SMALL_ITEM_SIZE, 0);
data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0);
offset++;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_hunk_next(
__in_bcount(size) caddr_t data,
__in size_t size,
__out efx_vpd_tag_t *tagp,
__out efx_vpd_keyword_t *keywordp,
__out_bcount_opt(*paylenp) unsigned int *payloadp,
__out_opt uint8_t *paylenp,
__inout unsigned int *contp)
{
efx_vpd_tag_t tag;
efx_vpd_keyword_t keyword = 0;
unsigned int offset;
unsigned int pos;
unsigned int index;
uint16_t taglen;
uint8_t keylen;
uint8_t paylen;
int rc;
offset = index = 0;
_NOTE(CONSTANTCONDITION)
while (1) {
if ((rc = efx_vpd_next_tag(data, size, &offset,
&tag, &taglen)) != 0)
goto fail1;
if (tag == EFX_VPD_END)
break;
if (tag == EFX_VPD_ID) {
if (index == *contp) {
EFSYS_ASSERT3U(taglen, <, 0x100);
paylen = (uint8_t)MIN(taglen, 0xff);
goto done;
}
} else {
for (pos = 0; pos != taglen; pos += 3 + keylen) {
if ((rc = efx_vpd_next_keyword(data + offset,
taglen, pos, &keyword, &keylen)) != 0)
goto fail2;
if (index == *contp) {
offset += pos + 3;
paylen = keylen;
goto done;
}
}
}
offset += taglen;
}
*contp = 0;
return (0);
done:
*tagp = tag;
*keywordp = keyword;
if (payloadp != NULL)
*payloadp = offset;
if (paylenp != NULL)
*paylenp = paylen;
++(*contp);
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_hunk_get(
__in_bcount(size) caddr_t data,
__in size_t size,
__in efx_vpd_tag_t tag,
__in efx_vpd_keyword_t keyword,
__out unsigned int *payloadp,
__out uint8_t *paylenp)
{
efx_vpd_tag_t itag;
efx_vpd_keyword_t ikeyword;
unsigned int offset;
unsigned int pos;
uint16_t taglen;
uint8_t keylen;
int rc;
offset = 0;
_NOTE(CONSTANTCONDITION)
while (1) {
if ((rc = efx_vpd_next_tag(data, size, &offset,
&itag, &taglen)) != 0)
goto fail1;
if (itag == EFX_VPD_END)
break;
if (itag == tag) {
if (itag == EFX_VPD_ID) {
EFSYS_ASSERT3U(taglen, <, 0x100);
*paylenp = (uint8_t)MIN(taglen, 0xff);
*payloadp = offset;
return (0);
}
for (pos = 0; pos != taglen; pos += 3 + keylen) {
if ((rc = efx_vpd_next_keyword(data + offset,
taglen, pos, &ikeyword, &keylen)) != 0)
goto fail2;
if (ikeyword == keyword) {
*paylenp = keylen;
*payloadp = offset + pos + 3;
return (0);
}
}
}
offset += taglen;
}
/* Not an error */
return (ENOENT);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_vpd_hunk_set(
__in_bcount(size) caddr_t data,
__in size_t size,
__in efx_vpd_value_t *evvp)
{
efx_word_t word;
efx_vpd_tag_t tag;
efx_vpd_keyword_t keyword;
unsigned int offset;
unsigned int pos;
unsigned int taghead;
unsigned int source;
unsigned int dest;
unsigned int i;
uint16_t taglen;
uint8_t keylen;
uint8_t cksum;
size_t used;
int rc;
switch (evvp->evv_tag) {
case EFX_VPD_ID:
if (evvp->evv_keyword != 0) {
rc = EINVAL;
goto fail1;
}
/* Can't delete the ID keyword */
if (evvp->evv_length == 0) {
rc = EINVAL;
goto fail1;
}
break;
case EFX_VPD_RO:
if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) {
rc = EINVAL;
goto fail1;
}
break;
default:
rc = EINVAL;
goto fail1;
}
/* Determine total size of all current tags */
if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0)
goto fail2;
offset = 0;
_NOTE(CONSTANTCONDITION)
while (1) {
taghead = offset;
if ((rc = efx_vpd_next_tag(data, size, &offset,
&tag, &taglen)) != 0)
goto fail3;
if (tag == EFX_VPD_END)
break;
else if (tag != evvp->evv_tag) {
offset += taglen;
continue;
}
/* We only support modifying large resource tags */
if (offset - taghead != 3) {
rc = EINVAL;
goto fail4;
}
/*
* Work out the offset of the byte immediately after the
* old (=source) and new (=dest) new keyword/tag
*/
pos = 0;
if (tag == EFX_VPD_ID) {
source = offset + taglen;
dest = offset + evvp->evv_length;
goto check_space;
}
EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO);
source = dest = 0;
for (pos = 0; pos != taglen; pos += 3 + keylen) {
if ((rc = efx_vpd_next_keyword(data + offset,
taglen, pos, &keyword, &keylen)) != 0)
goto fail5;
if (keyword == evvp->evv_keyword &&
evvp->evv_length == 0) {
/* Deleting this keyword */
source = offset + pos + 3 + keylen;
dest = offset + pos;
break;
} else if (keyword == evvp->evv_keyword) {
/* Adjusting this keyword */
source = offset + pos + 3 + keylen;
dest = offset + pos + 3 + evvp->evv_length;
break;
} else if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
/* The RV keyword must be at the end */
EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen);
/*
* The keyword doesn't already exist. If the
* user deleting a non-existant keyword then
* this is a no-op.
*/
if (evvp->evv_length == 0)
return (0);
/* Insert this keyword before the RV keyword */
source = offset + pos;
dest = offset + pos + 3 + evvp->evv_length;
break;
}
}
check_space:
if (used + dest > size + source) {
rc = ENOSPC;
goto fail6;
}
/* Move trailing data */
(void) memmove(data + dest, data + source, used - source);
/* Copy contents */
memcpy(data + dest - evvp->evv_length, evvp->evv_value,
evvp->evv_length);
/* Insert new keyword header if required */
if (tag != EFX_VPD_ID && evvp->evv_length > 0) {
EFX_POPULATE_WORD_1(word, EFX_WORD_0,
evvp->evv_keyword);
data[offset + pos + 0] =
EFX_WORD_FIELD(word, EFX_BYTE_0);
data[offset + pos + 1] =
EFX_WORD_FIELD(word, EFX_BYTE_1);
data[offset + pos + 2] = evvp->evv_length;
}
/* Modify tag length (large resource type) */
taglen += (dest - source);
EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen);
data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0);
data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1);
goto checksum;
}
/* Unable to find the matching tag */
rc = ENOENT;
goto fail7;
checksum:
/* Find the RV tag, and update the checksum */
offset = 0;
_NOTE(CONSTANTCONDITION)
while (1) {
if ((rc = efx_vpd_next_tag(data, size, &offset,
&tag, &taglen)) != 0)
goto fail8;
if (tag == EFX_VPD_END)
break;
if (tag == EFX_VPD_RO) {
for (pos = 0; pos != taglen; pos += 3 + keylen) {
if ((rc = efx_vpd_next_keyword(data + offset,
taglen, pos, &keyword, &keylen)) != 0)
goto fail9;
if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
cksum = 0;
for (i = 0; i < offset + pos + 3; i++)
cksum += data[i];
data[i] = -cksum;
break;
}
}
}
offset += taglen;
}
/* Zero out the unused portion */
(void) memset(data + offset + taglen, 0xff, size - offset - taglen);
return (0);
fail9:
EFSYS_PROBE(fail9);
fail8:
EFSYS_PROBE(fail8);
fail7:
EFSYS_PROBE(fail7);
fail6:
EFSYS_PROBE(fail6);
fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_vpd_fini(
__in efx_nic_t *enp)
{
efx_vpd_ops_t *evpdop = enp->en_evpdop;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
if (evpdop->evpdo_fini != NULL)
evpdop->evpdo_fini(enp);
enp->en_evpdop = NULL;
enp->en_mod_flags &= ~EFX_MOD_VPD;
}
#endif /* EFSYS_OPT_VPD */

View File

@ -0,0 +1,396 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_impl.h"
#if EFSYS_OPT_WOL
__checkReturn int
efx_wol_init(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_WOL));
if (~(encp->enc_features) & EFX_FEATURE_WOL) {
rc = ENOTSUP;
goto fail1;
}
/* Current implementation is Siena specific */
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
enp->en_mod_flags |= EFX_MOD_WOL;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_wol_filter_clear(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_WOL_FILTER_RESET_IN_LEN];
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
req.emr_cmd = MC_CMD_WOL_FILTER_RESET;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_WOL_FILTER_RESET_IN_LEN;
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, WOL_FILTER_RESET_IN_MASK,
MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS |
MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_wol_filter_add(
__in efx_nic_t *enp,
__in efx_wol_type_t type,
__in efx_wol_param_t *paramp,
__out uint32_t *filter_idp)
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_WOL_FILTER_SET_IN_LEN,
MC_CMD_WOL_FILTER_SET_OUT_LEN)];
efx_byte_t link_mask;
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
req.emr_cmd = MC_CMD_WOL_FILTER_SET;
(void) memset(payload, '\0', sizeof (payload));
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_WOL_FILTER_SET_IN_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_WOL_FILTER_SET_OUT_LEN;
switch (type) {
case EFX_WOL_TYPE_MAGIC:
MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE,
MC_CMD_FILTER_MODE_SIMPLE);
MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE,
MC_CMD_WOL_TYPE_MAGIC);
EFX_MAC_ADDR_COPY(
MCDI_IN2(req, uint8_t, WOL_FILTER_SET_IN_MAGIC_MAC),
paramp->ewp_magic.mac_addr);
break;
case EFX_WOL_TYPE_BITMAP: {
uint32_t swapped = 0;
efx_dword_t *dwordp;
unsigned int pos, bit;
MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE,
MC_CMD_FILTER_MODE_SIMPLE);
MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE,
MC_CMD_WOL_TYPE_BITMAP);
/*
* MC bitmask is supposed to be bit swapped
* amongst 32 bit words(!)
*/
dwordp = MCDI_IN2(req, efx_dword_t,
WOL_FILTER_SET_IN_BITMAP_MASK);
EFSYS_ASSERT3U(EFX_WOL_BITMAP_MASK_SIZE % 4, ==, 0);
for (pos = 0; pos < EFX_WOL_BITMAP_MASK_SIZE; ++pos) {
uint8_t native = paramp->ewp_bitmap.mask[pos];
for (bit = 0; bit < 8; ++bit) {
swapped <<= 1;
swapped |= (native & 0x1);
native >>= 1;
}
if ((pos & 3) == 3) {
EFX_POPULATE_DWORD_1(dwordp[pos >> 2],
EFX_DWORD_0, swapped);
swapped = 0;
}
}
memcpy(MCDI_IN2(req, uint8_t, WOL_FILTER_SET_IN_BITMAP_BITMAP),
paramp->ewp_bitmap.value,
sizeof (paramp->ewp_bitmap.value));
EFSYS_ASSERT3U(paramp->ewp_bitmap.value_len, <=,
sizeof (paramp->ewp_bitmap.value));
MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_BITMAP_LEN,
paramp->ewp_bitmap.value_len);
}
break;
case EFX_WOL_TYPE_LINK:
MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_FILTER_MODE,
MC_CMD_FILTER_MODE_SIMPLE);
MCDI_IN_SET_DWORD(req, WOL_FILTER_SET_IN_WOL_TYPE,
MC_CMD_WOL_TYPE_LINK);
EFX_ZERO_BYTE(link_mask);
EFX_SET_BYTE_FIELD(link_mask, MC_CMD_WOL_FILTER_SET_IN_LINK_UP,
1);
MCDI_IN_SET_BYTE(req, WOL_FILTER_SET_IN_LINK_MASK,
link_mask.eb_u8[0]);
break;
default:
EFSYS_ASSERT3U(type, !=, type);
}
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
*filter_idp = MCDI_OUT_DWORD(req, WOL_FILTER_SET_OUT_FILTER_ID);
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_wol_filter_remove(
__in efx_nic_t *enp,
__in uint32_t filter_id)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
req.emr_cmd = MC_CMD_WOL_FILTER_REMOVE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_WOL_FILTER_REMOVE_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_WOL_FILTER_REMOVE_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, WOL_FILTER_REMOVE_IN_FILTER_ID, filter_id);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_lightsout_offload_add(
__in efx_nic_t *enp,
__in efx_lightsout_offload_type_t type,
__in efx_lightsout_offload_param_t *paramp,
__out uint32_t *filter_idp)
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MAX(MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN,
MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN),
MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN)];
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
req.emr_cmd = MC_CMD_ADD_LIGHTSOUT_OFFLOAD;
req.emr_in_buf = payload;
req.emr_in_length = sizeof (type);
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN;
switch (type) {
case EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP:
req.emr_in_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN;
MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP);
EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t,
ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC),
paramp->elop_arp.mac_addr);
MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP,
paramp->elop_arp.ip);
break;
case EFX_LIGHTSOUT_OFFLOAD_TYPE_NS:
req.emr_in_length = MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN;
MCDI_IN_SET_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS);
EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t,
ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC),
paramp->elop_ns.mac_addr);
memcpy(MCDI_IN2(req, uint8_t,
ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6),
paramp->elop_ns.solicited_node,
sizeof (paramp->elop_ns.solicited_node));
memcpy(MCDI_IN2(req, uint8_t, ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6),
paramp->elop_ns.ip, sizeof (paramp->elop_ns.ip));
break;
default:
EFSYS_ASSERT3U(type, !=, type);
}
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
*filter_idp = MCDI_OUT_DWORD(req, ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID);
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
efx_lightsout_offload_remove(
__in efx_nic_t *enp,
__in efx_lightsout_offload_type_t type,
__in uint32_t filter_id)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN];
int rc;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
req.emr_cmd = MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD;
req.emr_in_buf = payload;
req.emr_in_length = sizeof (payload);
EFX_STATIC_ASSERT(MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
switch (type) {
case EFX_LIGHTSOUT_OFFLOAD_TYPE_ARP:
MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP);
break;
case EFX_LIGHTSOUT_OFFLOAD_TYPE_NS:
MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL,
MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS);
break;
default:
EFSYS_ASSERT3U(type, !=, type);
}
MCDI_IN_SET_DWORD(req, REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID,
filter_id);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
efx_wol_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_WOL);
enp->en_mod_flags &= ~EFX_MOD_WOL;
}
#endif /* EFSYS_OPT_WOL */

View File

@ -0,0 +1,132 @@
/*-
* Copyright 2007-2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_SIENA_FLASH_H
#define _SYS_SIENA_FLASH_H
#pragma pack(1)
/* Fixed locations near the start of flash (which may be in the internal PHY
* firmware header) point to the boot header.
*
* - parsed by MC boot ROM and firmware
* - reserved (but not parsed) by PHY firmware
* - opaque to driver
*/
#define SIENA_MC_BOOT_PHY_FW_HDR_LEN (0x20)
#define SIENA_MC_BOOT_PTR_LOCATION (0x18) /* First thing we try to boot */
#define SIENA_MC_BOOT_ALT_PTR_LOCATION (0x1c) /* Alternative if that fails */
#define SIENA_MC_BOOT_HDR_LEN (0x200)
#define SIENA_MC_BOOT_MAGIC (0x51E4A001)
#define SIENA_MC_BOOT_VERSION (1)
typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
efx_dword_t magic; /* = SIENA_MC_BOOT_MAGIC */
efx_word_t hdr_version; /* this structure definition is version 1 */
efx_byte_t board_type;
efx_byte_t firmware_version_a;
efx_byte_t firmware_version_b;
efx_byte_t firmware_version_c;
efx_word_t checksum; /* of whole header area + firmware image */
efx_word_t firmware_version_d;
efx_word_t reserved_a[1]; /* (set to 0) */
efx_dword_t firmware_text_offset; /* offset to firmware .text */
efx_dword_t firmware_text_size; /* length of firmware .text, in bytes */
efx_dword_t firmware_data_offset; /* offset to firmware .data */
efx_dword_t firmware_data_size; /* length of firmware .data, in bytes */
efx_dword_t reserved_b[8]; /* (set to 0) */
} siena_mc_boot_hdr_t;
#define SIENA_MC_STATIC_CONFIG_MAGIC (0xBDCF5555)
#define SIENA_MC_STATIC_CONFIG_VERSION (0)
typedef struct siena_mc_static_config_hdr_s { /* GENERATED BY scripts/genfwdef */
efx_dword_t magic; /* = SIENA_MC_STATIC_CONFIG_MAGIC */
efx_word_t length; /* of header area (i.e. not including VPD) */
efx_byte_t version;
efx_byte_t csum; /* over header area (i.e. not including VPD) */
efx_dword_t static_vpd_offset;
efx_dword_t static_vpd_length;
efx_dword_t capabilities;
efx_byte_t mac_addr_base[6];
efx_byte_t green_mode_cal; /* Green mode calibration result */
efx_byte_t green_mode_valid; /* Whether cal holds a valid value */
efx_word_t mac_addr_count;
efx_word_t mac_addr_stride;
efx_dword_t reserved2[2]; /* (write as zero) */
efx_dword_t num_dbi_items;
struct {
efx_word_t addr;
efx_word_t byte_enables;
efx_dword_t value;
} dbi[];
} siena_mc_static_config_hdr_t;
#define SIENA_MC_DYNAMIC_CONFIG_MAGIC (0xBDCFDDDD)
#define SIENA_MC_DYNAMIC_CONFIG_VERSION (0)
typedef struct siena_mc_fw_version_s { /* GENERATED BY scripts/genfwdef */
efx_dword_t fw_subtype;
efx_word_t version_w;
efx_word_t version_x;
efx_word_t version_y;
efx_word_t version_z;
} siena_mc_fw_version_t;
typedef struct siena_mc_dynamic_config_hdr_s { /* GENERATED BY scripts/genfwdef */
efx_dword_t magic; /* = SIENA_MC_DYNAMIC_CONFIG_MAGIC */
efx_word_t length; /* of header area (i.e. not including VPD) */
efx_byte_t version;
efx_byte_t csum; /* over header area (i.e. not including VPD) */
efx_dword_t dynamic_vpd_offset;
efx_dword_t dynamic_vpd_length;
efx_dword_t num_fw_version_items;
siena_mc_fw_version_t fw_version[];
} siena_mc_dynamic_config_hdr_t;
#define SIENA_MC_EXPROM_SINGLE_MAGIC (0xAA55) /* little-endian uint16_t */
#define SIENA_MC_EXPROM_COMBO_MAGIC (0xB0070102) /* little-endian uint32_t */
typedef struct siena_mc_combo_rom_hdr_s { /* GENERATED BY scripts/genfwdef */
efx_dword_t magic; /* = SIENA_MC_EXPROM_COMBO_MAGIC */
efx_dword_t len1; /* length of first image */
efx_dword_t len2; /* length of second image */
efx_dword_t off1; /* offset of first byte to edit to combine images */
efx_dword_t off2; /* offset of second byte to edit to combine images */
efx_word_t infoblk0_off; /* infoblk offset */
efx_word_t infoblk1_off; /* infoblk offset */
efx_byte_t infoblk_len; /* length of space reserved for infoblk structures */
efx_byte_t reserved[7]; /* (set to 0) */
} siena_mc_combo_rom_hdr_t;
#pragma pack()
#endif /* _SYS_SIENA_FLASH_H */

View File

@ -0,0 +1,477 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _SYS_SIENA_IMPL_H
#define _SYS_SIENA_IMPL_H
#include "efx.h"
#include "efx_regs.h"
#include "efx_mcdi.h"
#include "siena_flash.h"
#ifdef __cplusplus
extern "C" {
#endif
#if EFSYS_OPT_PHY_PROPS
/* START MKCONFIG GENERATED SienaPhyHeaderPropsBlock a8db1f8eb5106efd */
typedef enum siena_phy_prop_e {
SIENA_PHY_NPROPS
} siena_phy_prop_t;
/* END MKCONFIG GENERATED SienaPhyHeaderPropsBlock */
#endif /* EFSYS_OPT_PHY_PROPS */
#define SIENA_NVRAM_CHUNK 0x80
extern __checkReturn int
siena_nic_probe(
__in efx_nic_t *enp);
#if EFSYS_OPT_PCIE_TUNE
extern __checkReturn int
siena_nic_pcie_extended_sync(
__in efx_nic_t *enp);
#endif
extern __checkReturn int
siena_nic_reset(
__in efx_nic_t *enp);
extern __checkReturn int
siena_nic_init(
__in efx_nic_t *enp);
#if EFSYS_OPT_DIAG
extern __checkReturn int
siena_nic_register_test(
__in efx_nic_t *enp);
#endif /* EFSYS_OPT_DIAG */
extern void
siena_nic_fini(
__in efx_nic_t *enp);
extern void
siena_nic_unprobe(
__in efx_nic_t *enp);
#define SIENA_SRAM_ROWS 0x12000
extern void
siena_sram_init(
__in efx_nic_t *enp);
#if EFSYS_OPT_DIAG
extern __checkReturn int
siena_sram_test(
__in efx_nic_t *enp,
__in efx_sram_pattern_fn_t func);
#endif /* EFSYS_OPT_DIAG */
#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
extern __checkReturn int
siena_nvram_partn_size(
__in efx_nic_t *enp,
__in unsigned int partn,
__out size_t *sizep);
extern __checkReturn int
siena_nvram_partn_lock(
__in efx_nic_t *enp,
__in unsigned int partn);
extern __checkReturn int
siena_nvram_partn_read(
__in efx_nic_t *enp,
__in unsigned int partn,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn int
siena_nvram_partn_erase(
__in efx_nic_t *enp,
__in unsigned int partn,
__in unsigned int offset,
__in size_t size);
extern __checkReturn int
siena_nvram_partn_write(
__in efx_nic_t *enp,
__in unsigned int partn,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size);
extern void
siena_nvram_partn_unlock(
__in efx_nic_t *enp,
__in unsigned int partn);
extern __checkReturn int
siena_nvram_get_dynamic_cfg(
__in efx_nic_t *enp,
__in unsigned int index,
__in boolean_t vpd,
__out siena_mc_dynamic_config_hdr_t **dcfgp,
__out size_t *sizep);
#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
#if EFSYS_OPT_NVRAM
#if EFSYS_OPT_DIAG
extern __checkReturn int
siena_nvram_test(
__in efx_nic_t *enp);
#endif /* EFSYS_OPT_DIAG */
extern __checkReturn int
siena_nvram_size(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out size_t *sizep);
extern __checkReturn int
siena_nvram_get_version(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out uint32_t *subtypep,
__out_ecount(4) uint16_t version[4]);
extern __checkReturn int
siena_nvram_rw_start(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out size_t *pref_chunkp);
extern __checkReturn int
siena_nvram_read_chunk(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn int
siena_nvram_erase(
__in efx_nic_t *enp,
__in efx_nvram_type_t type);
extern __checkReturn int
siena_nvram_write_chunk(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__in unsigned int offset,
__in_bcount(size) caddr_t data,
__in size_t size);
extern void
siena_nvram_rw_finish(
__in efx_nic_t *enp,
__in efx_nvram_type_t type);
extern __checkReturn int
siena_nvram_set_version(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out uint16_t version[4]);
#endif /* EFSYS_OPT_NVRAM */
#if EFSYS_OPT_VPD
extern __checkReturn int
siena_vpd_init(
__in efx_nic_t *enp);
extern __checkReturn int
siena_vpd_size(
__in efx_nic_t *enp,
__out size_t *sizep);
extern __checkReturn int
siena_vpd_read(
__in efx_nic_t *enp,
__out_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn int
siena_vpd_verify(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn int
siena_vpd_reinit(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size);
extern __checkReturn int
siena_vpd_get(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__inout efx_vpd_value_t *evvp);
extern __checkReturn int
siena_vpd_set(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__in efx_vpd_value_t *evvp);
extern __checkReturn int
siena_vpd_next(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__out efx_vpd_value_t *evvp,
__inout unsigned int *contp);
extern __checkReturn int
siena_vpd_write(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size);
extern void
siena_vpd_fini(
__in efx_nic_t *enp);
#endif /* EFSYS_OPT_VPD */
typedef struct siena_link_state_s {
uint32_t sls_adv_cap_mask;
uint32_t sls_lp_cap_mask;
unsigned int sls_fcntl;
efx_link_mode_t sls_link_mode;
#if EFSYS_OPT_LOOPBACK
efx_loopback_type_t sls_loopback;
#endif
boolean_t sls_mac_up;
} siena_link_state_t;
extern void
siena_phy_link_ev(
__in efx_nic_t *enp,
__in efx_qword_t *eqp,
__out efx_link_mode_t *link_modep);
extern __checkReturn int
siena_phy_get_link(
__in efx_nic_t *enp,
__out siena_link_state_t *slsp);
extern __checkReturn int
siena_phy_power(
__in efx_nic_t *enp,
__in boolean_t on);
extern __checkReturn int
siena_phy_reconfigure(
__in efx_nic_t *enp);
extern __checkReturn int
siena_phy_verify(
__in efx_nic_t *enp);
extern __checkReturn int
siena_phy_oui_get(
__in efx_nic_t *enp,
__out uint32_t *ouip);
#if EFSYS_OPT_PHY_STATS
extern void
siena_phy_decode_stats(
__in efx_nic_t *enp,
__in uint32_t vmask,
__in_opt efsys_mem_t *esmp,
__out_opt uint64_t *smaskp,
__out_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat);
extern __checkReturn int
siena_phy_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__out_ecount(EFX_PHY_NSTATS) uint32_t *stat);
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
extern const char __cs *
siena_phy_prop_name(
__in efx_nic_t *enp,
__in unsigned int id);
#endif /* EFSYS_OPT_NAMES */
extern __checkReturn int
siena_phy_prop_get(
__in efx_nic_t *enp,
__in unsigned int id,
__in uint32_t flags,
__out uint32_t *valp);
extern __checkReturn int
siena_phy_prop_set(
__in efx_nic_t *enp,
__in unsigned int id,
__in uint32_t val);
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
extern __checkReturn int
siena_phy_bist_start(
__in efx_nic_t *enp,
__in efx_phy_bist_type_t type);
extern __checkReturn int
siena_phy_bist_poll(
__in efx_nic_t *enp,
__in efx_phy_bist_type_t type,
__out efx_phy_bist_result_t *resultp,
__out_opt __drv_when(count > 0, __notnull)
uint32_t *value_maskp,
__out_ecount_opt(count) __drv_when(count > 0, __notnull)
unsigned long *valuesp,
__in size_t count);
extern void
siena_phy_bist_stop(
__in efx_nic_t *enp,
__in efx_phy_bist_type_t type);
#endif /* EFSYS_OPT_PHY_BIST */
extern __checkReturn int
siena_mac_poll(
__in efx_nic_t *enp,
__out efx_link_mode_t *link_modep);
extern __checkReturn int
siena_mac_up(
__in efx_nic_t *enp,
__out boolean_t *mac_upp);
extern __checkReturn int
siena_mac_reconfigure(
__in efx_nic_t *enp);
#if EFSYS_OPT_LOOPBACK
extern __checkReturn int
siena_mac_loopback_set(
__in efx_nic_t *enp,
__in efx_link_mode_t link_mode,
__in efx_loopback_type_t loopback_type);
#endif /* EFSYS_OPT_LOOPBACK */
#if EFSYS_OPT_MAC_STATS
extern __checkReturn int
siena_mac_stats_clear(
__in efx_nic_t *enp);
extern __checkReturn int
siena_mac_stats_upload(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp);
extern __checkReturn int
siena_mac_stats_periodic(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__in uint16_t period_ms,
__in boolean_t events);
extern __checkReturn int
siena_mac_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__out_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
__out_opt uint32_t *generationp);
#endif /* EFSYS_OPT_MAC_STATS */
extern __checkReturn int
siena_mon_reset(
__in efx_nic_t *enp);
extern __checkReturn int
siena_mon_reconfigure(
__in efx_nic_t *enp);
#if EFSYS_OPT_MON_STATS
extern void
siena_mon_decode_stats(
__in efx_nic_t *enp,
__in uint32_t dmask,
__in_opt efsys_mem_t *esmp,
__out_opt uint32_t *vmaskp,
__out_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *value);
extern __checkReturn int
siena_mon_ev(
__in efx_nic_t *enp,
__in efx_qword_t *eqp,
__out efx_mon_stat_t *idp,
__out efx_mon_stat_value_t *valuep);
extern __checkReturn int
siena_mon_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__out_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
#endif /* EFSYS_OPT_MON_STATS */
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SIENA_IMPL_H */

View File

@ -0,0 +1,545 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_impl.h"
#if EFSYS_OPT_SIENA
__checkReturn int
siena_mac_poll(
__in efx_nic_t *enp,
__out efx_link_mode_t *link_modep)
{
efx_port_t *epp = &(enp->en_port);
siena_link_state_t sls;
int rc;
if ((rc = siena_phy_get_link(enp, &sls)) != 0)
goto fail1;
epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
epp->ep_fcntl = sls.sls_fcntl;
*link_modep = sls.sls_link_mode;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
*link_modep = EFX_LINK_UNKNOWN;
return (rc);
}
__checkReturn int
siena_mac_up(
__in efx_nic_t *enp,
__out boolean_t *mac_upp)
{
siena_link_state_t sls;
int rc;
/*
* Because Siena doesn't *require* polling, we can't rely on
* siena_mac_poll() being executed to populate epp->ep_mac_up.
*/
if ((rc = siena_phy_get_link(enp, &sls)) != 0)
goto fail1;
*mac_upp = sls.sls_mac_up;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_mac_reconfigure(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
MC_CMD_SET_MCAST_HASH_IN_LEN)];
efx_mcdi_req_t req;
unsigned int fcntl;
int rc;
req.emr_cmd = MC_CMD_SET_MAC;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_SET_MAC_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
epp->ep_mac_addr);
MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
SET_MAC_IN_REJECT_UNCST, !epp->ep_unicst,
SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst);
if (epp->ep_fcntl_autoneg)
/* efx_fcntl_set() has already set the phy capabilities */
fcntl = MC_CMD_FCNTL_AUTO;
else if (epp->ep_fcntl & EFX_FCNTL_RESPOND)
fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE)
? MC_CMD_FCNTL_BIDIR
: MC_CMD_FCNTL_RESPOND;
else
fcntl = MC_CMD_FCNTL_OFF;
MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
/* Push multicast hash. Set the broadcast bit (0xff) appropriately */
req.emr_cmd = MC_CMD_SET_MCAST_HASH;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_SET_MCAST_HASH_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0),
epp->ep_multicst_hash, sizeof (epp->ep_multicst_hash));
if (epp->ep_brdcst)
EFX_SET_OWORD_BIT(*MCDI_IN2(req, efx_oword_t,
SET_MCAST_HASH_IN_HASH1), 0x7f);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail2;
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_LOOPBACK
__checkReturn int
siena_mac_loopback_set(
__in efx_nic_t *enp,
__in efx_link_mode_t link_mode,
__in efx_loopback_type_t loopback_type)
{
efx_port_t *epp = &(enp->en_port);
efx_phy_ops_t *epop = epp->ep_epop;
efx_loopback_type_t old_loopback_type;
efx_link_mode_t old_loopback_link_mode;
int rc;
/* The PHY object handles this on Siena */
old_loopback_type = epp->ep_loopback_type;
old_loopback_link_mode = epp->ep_loopback_link_mode;
epp->ep_loopback_type = loopback_type;
epp->ep_loopback_link_mode = link_mode;
if ((rc = epop->epo_reconfigure(enp)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE(fail2);
epp->ep_loopback_type = old_loopback_type;
epp->ep_loopback_link_mode = old_loopback_link_mode;
return (rc);
}
#endif /* EFSYS_OPT_LOOPBACK */
#if EFSYS_OPT_MAC_STATS
__checkReturn int
siena_mac_stats_clear(
__in efx_nic_t *enp)
{
uint8_t payload[MC_CMD_MAC_STATS_IN_LEN];
efx_mcdi_req_t req;
int rc;
req.emr_cmd = MC_CMD_MAC_STATS;
req.emr_in_buf = payload;
req.emr_in_length = sizeof (payload);
EFX_STATIC_ASSERT(MC_CMD_MAC_STATS_OUT_DMA_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_POPULATE_DWORD_3(req, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, 0,
MAC_STATS_IN_CLEAR, 1,
MAC_STATS_IN_PERIODIC_CHANGE, 0);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_mac_stats_upload(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp)
{
uint8_t payload[MC_CMD_MAC_STATS_IN_LEN];
efx_mcdi_req_t req;
size_t bytes;
int rc;
EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
EFX_MAC_STATS_SIZE);
bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
req.emr_cmd = MC_CMD_MAC_STATS;
req.emr_in_buf = payload;
req.emr_in_length = sizeof (payload);
EFX_STATIC_ASSERT(MC_CMD_MAC_STATS_OUT_DMA_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
EFSYS_MEM_ADDR(esmp) & 0xffffffff);
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
EFSYS_MEM_ADDR(esmp) >> 32);
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
/*
* The MC DMAs aggregate statistics for our convinience, so we can
* avoid having to pull the statistics buffer into the cache to
* maintain cumulative statistics.
*/
MCDI_IN_POPULATE_DWORD_3(req, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, 1,
MAC_STATS_IN_CLEAR, 0,
MAC_STATS_IN_PERIODIC_CHANGE, 0);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_mac_stats_periodic(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__in uint16_t period,
__in boolean_t events)
{
uint8_t payload[MC_CMD_MAC_STATS_IN_LEN];
efx_mcdi_req_t req;
size_t bytes;
int rc;
bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
req.emr_cmd = MC_CMD_MAC_STATS;
req.emr_in_buf = payload;
req.emr_in_length = sizeof (payload);
EFX_STATIC_ASSERT(MC_CMD_MAC_STATS_OUT_DMA_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
EFSYS_MEM_ADDR(esmp) & 0xffffffff);
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
EFSYS_MEM_ADDR(esmp) >> 32);
MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
/*
* The MC DMAs aggregate statistics for our convinience, so we can
* avoid having to pull the statistics buffer into the cache to
* maintain cumulative statistics.
*/
MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
MAC_STATS_IN_DMA, 0,
MAC_STATS_IN_CLEAR, 0,
MAC_STATS_IN_PERIODIC_CHANGE, 1,
MAC_STATS_IN_PERIODIC_ENABLE, period ? 1 : 0,
MAC_STATS_IN_PERIODIC_NOEVENT, events ? 0 : 1,
MAC_STATS_IN_PERIOD_MS, period);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#define SIENA_MAC_STAT_READ(_esmp, _field, _eqp) \
EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
__checkReturn int
siena_mac_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__out_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
__out_opt uint32_t *generationp)
{
efx_qword_t rx_pkts;
efx_qword_t value;
efx_qword_t generation_start;
efx_qword_t generation_end;
_NOTE(ARGUNUSED(enp))
/* Read END first so we don't race with the MC */
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
&generation_end);
EFSYS_MEM_READ_BARRIER();
/* TX */
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
&value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
&value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
&value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
/* RX */
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &rx_pkts);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &rx_pkts);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
&(value.eq_dword[0]));
EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
&(value.eq_dword[1]));
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
&(value.eq_dword[0]));
EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
&(value.eq_dword[1]));
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
&(value.eq_dword[0]));
EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
&(value.eq_dword[1]));
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
&(value.eq_dword[0]));
EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
&(value.eq_dword[1]));
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
EFSYS_MEM_READ_BARRIER();
SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
&generation_start);
/* Check that we didn't read the stats in the middle of a DMA */
if (memcmp(&generation_start, &generation_end,
sizeof (generation_start)))
return (EAGAIN);
if (generationp)
*generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
return (0);
}
#endif /* EFSYS_OPT_MAC_STATS */
#endif /* EFSYS_OPT_SIENA */

View File

@ -0,0 +1,248 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_impl.h"
#if EFSYS_OPT_MON_SIENA
__checkReturn int
siena_mon_reset(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
return (0);
}
__checkReturn int
siena_mon_reconfigure(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
return (0);
}
#if EFSYS_OPT_MON_STATS
#define SIENA_MON_WRONG_PORT (uint16_t)0xffff
static __cs uint16_t __siena_mon_port0_map[] = {
EFX_MON_STAT_INT_TEMP, /* MC_CMD_SENSOR_CONTROLLER_TEMP */
EFX_MON_STAT_EXT_TEMP, /* MC_CMD_SENSOR_PHY_COMMON_TEMP */
EFX_MON_STAT_INT_COOLING, /* MC_CMD_SENSOR_CONTROLLER_COOLING */
EFX_MON_STAT_EXT_TEMP, /* MC_CMD_SENSOR_PHY0_TEMP */
EFX_MON_STAT_EXT_COOLING, /* MC_CMD_SENSOR_PHY0_COOLING */
SIENA_MON_WRONG_PORT, /* MC_CMD_SENSOR_PHY1_TEMP */
SIENA_MON_WRONG_PORT, /* MC_CMD_SENSOR_PHY1_COOLING */
EFX_MON_STAT_1V, /* MC_CMD_SENSOR_IN_1V0 */
EFX_MON_STAT_1_2V, /* MC_CMD_SENSOR_IN_1V2 */
EFX_MON_STAT_1_8V, /* MC_CMD_SENSOR_IN_1V8 */
EFX_MON_STAT_2_5V, /* MC_CMD_SENSOR_IN_2V5 */
EFX_MON_STAT_3_3V, /* MC_CMD_SENSOR_IN_3V3 */
EFX_MON_STAT_12V, /* MC_CMD_SENSOR_IN_12V0 */
};
static __cs uint16_t __siena_mon_port1_map[] = {
EFX_MON_STAT_INT_TEMP, /* MC_CMD_SENSOR_CONTROLLER_TEMP */
EFX_MON_STAT_EXT_TEMP, /* MC_CMD_SENSOR_PHY_COMMON_TEMP */
EFX_MON_STAT_INT_COOLING, /* MC_CMD_SENSOR_CONTROLLER_COOLING */
SIENA_MON_WRONG_PORT, /* MC_CMD_SENSOR_PHY0_TEMP */
SIENA_MON_WRONG_PORT, /* MC_CMD_SENSOR_PHY0_COOLING */
EFX_MON_STAT_EXT_TEMP, /* MC_CMD_SENSOR_PHY1_TEMP */
EFX_MON_STAT_EXT_COOLING, /* MC_CMD_SENSOR_PHY1_COOLING */
EFX_MON_STAT_1V, /* MC_CMD_SENSOR_IN_1V0 */
EFX_MON_STAT_1_2V, /* MC_CMD_SENSOR_IN_1V2 */
EFX_MON_STAT_1_8V, /* MC_CMD_SENSOR_IN_1V8 */
EFX_MON_STAT_2_5V, /* MC_CMD_SENSOR_IN_2V5 */
EFX_MON_STAT_3_3V, /* MC_CMD_SENSOR_IN_3V3 */
EFX_MON_STAT_12V, /* MC_CMD_SENSOR_IN_12V0 */
};
#define SIENA_STATIC_SENSOR_ASSERT(_field) \
EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \
== EFX_MON_STAT_STATE_ ## _field)
void
siena_mon_decode_stats(
__in efx_nic_t *enp,
__in uint32_t dmask,
__in_opt efsys_mem_t *esmp,
__out_opt uint32_t *vmaskp,
__out_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *value)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
uint16_t *sensor_map;
uint16_t mc_sensor;
size_t mc_sensor_max;
uint32_t vmask = 0;
/* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */
SIENA_STATIC_SENSOR_ASSERT(OK);
SIENA_STATIC_SENSOR_ASSERT(WARNING);
SIENA_STATIC_SENSOR_ASSERT(FATAL);
SIENA_STATIC_SENSOR_ASSERT(BROKEN);
EFX_STATIC_ASSERT(sizeof (__siena_mon_port1_map)
== sizeof (__siena_mon_port0_map));
mc_sensor_max = EFX_ARRAY_SIZE(__siena_mon_port0_map);
sensor_map = (emip->emi_port == 1)
? __siena_mon_port0_map
: __siena_mon_port1_map;
/*
* dmask may legitimately contain sensors not understood by the driver
*/
for (mc_sensor = 0; mc_sensor < mc_sensor_max; ++mc_sensor) {
uint16_t efx_sensor = sensor_map[mc_sensor];
if (efx_sensor == SIENA_MON_WRONG_PORT)
continue;
EFSYS_ASSERT(efx_sensor < EFX_MON_NSTATS);
if (~dmask & (1 << mc_sensor))
continue;
vmask |= (1 << efx_sensor);
if (value != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
efx_mon_stat_value_t *emsvp = value + efx_sensor;
efx_dword_t dword;
EFSYS_MEM_READD(esmp, 4 * mc_sensor, &dword);
emsvp->emsv_value =
(uint16_t)EFX_DWORD_FIELD(
dword,
MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
emsvp->emsv_state =
(uint16_t)EFX_DWORD_FIELD(
dword,
MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
}
}
if (vmaskp != NULL)
*vmaskp = vmask;
}
__checkReturn int
siena_mon_ev(
__in efx_nic_t *enp,
__in efx_qword_t *eqp,
__out efx_mon_stat_t *idp,
__out efx_mon_stat_value_t *valuep)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
uint16_t ev_monitor;
uint16_t ev_state;
uint16_t ev_value;
uint16_t *sensor_map;
efx_mon_stat_t id;
int rc;
sensor_map = (emip->emi_port == 1)
? __siena_mon_port0_map
: __siena_mon_port1_map;
ev_monitor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR);
ev_state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE);
ev_value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE);
/* Hardware must support this statistic */
EFSYS_ASSERT((1 << ev_monitor) & encp->enc_siena_mon_stat_mask);
/* But we don't have to understand it */
if (ev_monitor >= EFX_ARRAY_SIZE(__siena_mon_port0_map)) {
rc = ENOTSUP;
goto fail1;
}
id = sensor_map[ev_monitor];
if (id == SIENA_MON_WRONG_PORT)
return (ENODEV);
EFSYS_ASSERT(id < EFX_MON_NSTATS);
*idp = id;
valuep->emsv_value = ev_value;
valuep->emsv_state = ev_state;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_mon_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__out_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
uint32_t dmask = encp->enc_siena_mon_stat_mask;
uint32_t vmask;
uint8_t payload[MC_CMD_READ_SENSORS_IN_LEN];
efx_mcdi_req_t req;
int rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
req.emr_cmd = MC_CMD_READ_SENSORS;
req.emr_in_buf = payload;
req.emr_in_length = sizeof (payload);
EFX_STATIC_ASSERT(MC_CMD_READ_SENSORS_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, READ_SENSORS_IN_DMA_ADDR_LO,
EFSYS_MEM_ADDR(esmp) & 0xffffffff);
MCDI_IN_SET_DWORD(req, READ_SENSORS_IN_DMA_ADDR_HI,
EFSYS_MEM_ADDR(esmp) >> 32);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
siena_mon_decode_stats(enp, dmask, esmp, &vmask, values);
EFSYS_ASSERT(vmask == encp->enc_mon_stat_mask);
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_MON_STATS */
#endif /* EFSYS_OPT_MON_SIENA */

View File

@ -0,0 +1,964 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_impl.h"
#if EFSYS_OPT_SIENA
static __checkReturn int
siena_nic_get_partn_mask(
__in efx_nic_t *enp,
__out unsigned int *maskp)
{
efx_mcdi_req_t req;
uint8_t outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
int rc;
req.emr_cmd = MC_CMD_NVRAM_TYPES;
EFX_STATIC_ASSERT(MC_CMD_NVRAM_TYPES_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = sizeof (outbuf);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
*maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES);
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static __checkReturn int
siena_nic_exit_assertion_handler(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_REBOOT_IN_LEN];
int rc;
req.emr_cmd = MC_CMD_REBOOT;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_REBOOT_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0 && req.emr_rc != EIO) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static __checkReturn int
siena_nic_read_assertion(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
MC_CMD_GET_ASSERTS_OUT_LEN)];
const char *reason;
unsigned int flags;
unsigned int index;
unsigned int ofst;
int retry;
int rc;
/*
* Before we attempt to chat to the MC, we should verify that the MC
* isn't in it's assertion handler, either due to a previous reboot,
* or because we're reinitializing due to an eec_exception().
*
* Use GET_ASSERTS to read any assertion state that may be present.
* Retry this command twice. Once because a boot-time assertion failure
* might cause the 1st MCDI request to fail. And once again because
* we might race with siena_nic_exit_assertion_handler() running on the
* other port.
*/
retry = 2;
do {
req.emr_cmd = MC_CMD_GET_ASSERTS;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
efx_mcdi_execute(enp, &req);
} while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
/* Print out any assertion state recorded */
flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
return (0);
reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
? "system-level assertion"
: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
? "thread-level assertion"
: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
? "watchdog reset"
: "unknown assertion";
EFSYS_PROBE3(mcpu_assertion,
const char *, reason, unsigned int,
MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
unsigned int,
MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
for (index = 1; index < 32; index++) {
EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
EFX_DWORD_0));
ofst += sizeof (efx_dword_t);
}
EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static __checkReturn int
siena_nic_attach(
__in efx_nic_t *enp,
__in boolean_t attach)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_DRV_ATTACH_IN_LEN];
int rc;
req.emr_cmd = MC_CMD_DRV_ATTACH;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0);
MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_PCIE_TUNE
__checkReturn int
siena_nic_pcie_extended_sync(
__in efx_nic_t *enp)
{
uint8_t inbuf[MC_CMD_WORKAROUND_IN_LEN];
efx_mcdi_req_t req;
int rc;
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
req.emr_cmd = MC_CMD_WORKAROUND;
req.emr_in_buf = inbuf;
req.emr_in_length = sizeof (inbuf);
EFX_STATIC_ASSERT(MC_CMD_WORKAROUND_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, MC_CMD_WORKAROUND_BUG17230);
MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, 1);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_PCIE_TUNE */
static __checkReturn int
siena_board_cfg(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
uint8_t outbuf[MAX(MC_CMD_GET_BOARD_CFG_OUT_LEN,
MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
efx_mcdi_req_t req;
uint8_t *src;
int rc;
/* Board configuration */
req.emr_cmd = MC_CMD_GET_BOARD_CFG;
EFX_STATIC_ASSERT(MC_CMD_GET_BOARD_CFG_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LEN;
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
if (emip->emi_port == 1)
src = MCDI_OUT2(req, uint8_t,
GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
else
src = MCDI_OUT2(req, uint8_t,
GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
EFX_MAC_ADDR_COPY(encp->enc_mac_addr, src);
encp->enc_board_type = MCDI_OUT_DWORD(req,
GET_BOARD_CFG_OUT_BOARD_TYPE);
/* Resource limits */
req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
EFX_STATIC_ASSERT(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
efx_mcdi_execute(enp, &req);
if (req.emr_rc == 0) {
if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
rc = EMSGSIZE;
goto fail3;
}
encp->enc_evq_limit = MCDI_OUT_DWORD(req,
GET_RESOURCE_LIMITS_OUT_EVQ);
encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET,
MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ));
encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET,
MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ));
} else if (req.emr_rc == ENOTSUP) {
encp->enc_evq_limit = 1024;
encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
} else {
rc = req.emr_rc;
goto fail4;
}
encp->enc_buftbl_limit = SIENA_SRAM_ROWS -
(encp->enc_txq_limit * 16) - (encp->enc_rxq_limit * 64);
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static __checkReturn int
siena_phy_cfg(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
uint8_t outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
int rc;
req.emr_cmd = MC_CMD_GET_PHY_CFG;
EFX_STATIC_ASSERT(MC_CMD_GET_PHY_CFG_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = sizeof (outbuf);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
#if EFSYS_OPT_NAMES
(void) strncpy(encp->enc_phy_name,
MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME),
MIN(sizeof (encp->enc_phy_name) - 1,
MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
#endif /* EFSYS_OPT_NAMES */
(void) memset(encp->enc_phy_revision, 0,
sizeof (encp->enc_phy_revision));
memcpy(encp->enc_phy_revision,
MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
MIN(sizeof (encp->enc_phy_revision) - 1,
MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
#if EFSYS_OPT_PHY_LED_CONTROL
encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
(1 << EFX_PHY_LED_OFF) |
(1 << EFX_PHY_LED_ON));
#endif /* EFSYS_OPT_PHY_LED_CONTROL */
#if EFSYS_OPT_PHY_PROPS
encp->enc_phy_nprops = 0;
#endif /* EFSYS_OPT_PHY_PROPS */
/* Get the media type of the fixed port, if recognised. */
EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
epp->ep_fixed_port_type =
MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
epp->ep_phy_cap_mask =
MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
#if EFSYS_OPT_PHY_FLAGS
encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
#endif /* EFSYS_OPT_PHY_FLAGS */
encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
/* Populate internal state */
encp->enc_siena_channel =
(uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
#if EFSYS_OPT_PHY_STATS
encp->enc_siena_phy_stat_mask =
MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
/* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */
siena_phy_decode_stats(enp, encp->enc_siena_phy_stat_mask,
NULL, &encp->enc_phy_stat_mask, NULL);
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_BIST
encp->enc_bist_mask = 0;
if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
encp->enc_bist_mask |= (1 << EFX_PHY_BIST_TYPE_CABLE_SHORT);
if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
GET_PHY_CFG_OUT_BIST_CABLE_LONG))
encp->enc_bist_mask |= (1 << EFX_PHY_BIST_TYPE_CABLE_LONG);
if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
GET_PHY_CFG_OUT_BIST))
encp->enc_bist_mask |= (1 << EFX_PHY_BIST_TYPE_NORMAL);
#endif /* EFSYS_OPT_BIST */
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#if EFSYS_OPT_LOOPBACK
static __checkReturn int
siena_loopback_cfg(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
uint8_t outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
int rc;
req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
EFX_STATIC_ASSERT(MC_CMD_GET_LOOPBACK_MODES_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = sizeof (outbuf);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
/*
* We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree
* in siena_phy.c:siena_phy_get_link()
*/
encp->enc_loopback_types[EFX_LINK_100FDX] = EFX_LOOPBACK_MASK &
MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_100M) &
MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_SUGGESTED);
encp->enc_loopback_types[EFX_LINK_1000FDX] = EFX_LOOPBACK_MASK &
MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_1G) &
MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_SUGGESTED);
encp->enc_loopback_types[EFX_LINK_10000FDX] = EFX_LOOPBACK_MASK &
MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_10G) &
MCDI_OUT_DWORD(req, GET_LOOPBACK_MODES_OUT_SUGGESTED);
encp->enc_loopback_types[EFX_LINK_UNKNOWN] =
(1 << EFX_LOOPBACK_OFF) |
encp->enc_loopback_types[EFX_LINK_100FDX] |
encp->enc_loopback_types[EFX_LINK_1000FDX] |
encp->enc_loopback_types[EFX_LINK_10000FDX];
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_LOOPBACK */
#if EFSYS_OPT_MON_STATS
static __checkReturn int
siena_monitor_cfg(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_mcdi_req_t req;
uint8_t outbuf[MCDI_CTL_SDU_LEN_MAX];
int rc;
req.emr_cmd = MC_CMD_SENSOR_INFO;
EFX_STATIC_ASSERT(MC_CMD_SENSOR_INFO_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = sizeof (outbuf);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_SENSOR_INFO_OUT_MASK_OFST + 4) {
rc = EMSGSIZE;
goto fail2;
}
encp->enc_siena_mon_stat_mask =
MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK);
encp->enc_mon_type = EFX_MON_SFC90X0;
siena_mon_decode_stats(enp, encp->enc_siena_mon_stat_mask,
NULL, &(encp->enc_mon_stat_mask), NULL);
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_MON_STATS */
__checkReturn int
siena_nic_probe(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
siena_link_state_t sls;
unsigned int mask;
int rc;
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
/* Read clear any assertion state */
if ((rc = siena_nic_read_assertion(enp)) != 0)
goto fail1;
/* Exit the assertion handler */
if ((rc = siena_nic_exit_assertion_handler(enp)) != 0)
goto fail2;
/* Wrestle control from the BMC */
if ((rc = siena_nic_attach(enp, B_TRUE)) != 0)
goto fail3;
if ((rc = siena_board_cfg(enp)) != 0)
goto fail4;
encp->enc_evq_moderation_max =
EFX_EV_TIMER_QUANTUM << FRF_CZ_TIMER_VAL_WIDTH;
if ((rc = siena_phy_cfg(enp)) != 0)
goto fail5;
/* Obtain the default PHY advertised capabilities */
if ((rc = siena_nic_reset(enp)) != 0)
goto fail6;
if ((rc = siena_phy_get_link(enp, &sls)) != 0)
goto fail7;
epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask;
epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0)
goto fail8;
enp->en_u.siena.enu_partn_mask = mask;
#endif
#if EFSYS_OPT_MAC_STATS
/* Wipe the MAC statistics */
if ((rc = siena_mac_stats_clear(enp)) != 0)
goto fail9;
#endif
#if EFSYS_OPT_LOOPBACK
if ((rc = siena_loopback_cfg(enp)) != 0)
goto fail10;
#endif
#if EFSYS_OPT_MON_STATS
if ((rc = siena_monitor_cfg(enp)) != 0)
goto fail11;
#endif
encp->enc_features = enp->en_features;
return (0);
#if EFSYS_OPT_MON_STATS
fail11:
EFSYS_PROBE(fail11);
#endif
#if EFSYS_OPT_LOOPBACK
fail10:
EFSYS_PROBE(fail10);
#endif
#if EFSYS_OPT_MAC_STATS
fail9:
EFSYS_PROBE(fail9);
#endif
#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
fail8:
EFSYS_PROBE(fail8);
#endif
fail7:
EFSYS_PROBE(fail7);
fail6:
EFSYS_PROBE(fail6);
fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nic_reset(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
int rc;
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
/* siena_nic_reset() is called to recover from BADASSERT failures. */
if ((rc = siena_nic_read_assertion(enp)) != 0)
goto fail1;
if ((rc = siena_nic_exit_assertion_handler(enp)) != 0)
goto fail2;
req.emr_cmd = MC_CMD_PORT_RESET;
EFX_STATIC_ASSERT(MC_CMD_PORT_RESET_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
EFX_STATIC_ASSERT(MC_CMD_PORT_RESET_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail3;
}
return (0);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (0);
}
static __checkReturn int
siena_nic_logging(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_LOG_CTRL_IN_LEN];
int rc;
req.emr_cmd = MC_CMD_LOG_CTRL;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_LOG_CTRL_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static void
siena_nic_rx_cfg(
__in efx_nic_t *enp)
{
efx_oword_t oword;
/*
* RX_INGR_EN is always enabled on Siena, because we rely on
* the RX parser to be resiliant to missing SOP/EOP.
*/
EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1);
EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
/* Disable parsing of additional 802.1Q in Q packets */
EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0);
EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
}
static void
siena_nic_usrev_dis(
__in efx_nic_t *enp)
{
efx_oword_t oword;
EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1);
EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword);
}
__checkReturn int
siena_nic_init(
__in efx_nic_t *enp)
{
int rc;
EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
if ((rc = siena_nic_logging(enp)) != 0)
goto fail1;
siena_sram_init(enp);
/* Configure Siena's RX block */
siena_nic_rx_cfg(enp);
/* Disable USR_EVents for now */
siena_nic_usrev_dis(enp);
/* bug17057: Ensure set_link is called */
if ((rc = siena_phy_reconfigure(enp)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
siena_nic_fini(
__in efx_nic_t *enp)
{
_NOTE(ARGUNUSED(enp))
}
void
siena_nic_unprobe(
__in efx_nic_t *enp)
{
(void) siena_nic_attach(enp, B_FALSE);
}
#if EFSYS_OPT_DIAG
static efx_register_set_t __cs __siena_registers[] = {
{ FR_AZ_ADR_REGION_REG_OFST, 0, 1 },
{ FR_CZ_USR_EV_CFG_OFST, 0, 1 },
{ FR_AZ_RX_CFG_REG_OFST, 0, 1 },
{ FR_AZ_TX_CFG_REG_OFST, 0, 1 },
{ FR_AZ_TX_RESERVED_REG_OFST, 0, 1 },
{ FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 },
{ FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 },
{ FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 },
{ FR_AZ_DP_CTRL_REG_OFST, 0, 1 },
{ FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1},
{ FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1},
{ FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1},
{ FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1}
};
static const uint32_t __cs __siena_register_masks[] = {
0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF,
0x000103FF, 0x00000000, 0x00000000, 0x00000000,
0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000,
0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF,
0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF,
0x001FFFFF, 0x00000000, 0x00000000, 0x00000000,
0x00000003, 0x00000000, 0x00000000, 0x00000000,
0x000003FF, 0x00000000, 0x00000000, 0x00000000,
0x00000FFF, 0x00000000, 0x00000000, 0x00000000,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000
};
static efx_register_set_t __cs __siena_tables[] = {
{ FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP,
FR_AZ_RX_FILTER_TBL0_ROWS },
{ FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP,
FR_CZ_RX_MAC_FILTER_TBL0_ROWS },
{ FR_AZ_RX_DESC_PTR_TBL_OFST,
FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS },
{ FR_AZ_TX_DESC_PTR_TBL_OFST,
FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS },
{ FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS },
{ FR_CZ_TX_FILTER_TBL0_OFST,
FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS },
{ FR_CZ_TX_MAC_FILTER_TBL0_OFST,
FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS }
};
static const uint32_t __cs __siena_table_masks[] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF,
0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000,
0xFFFFFFFF, 0x0FFFFFFF, 0x01800000, 0x00000000,
0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000,
0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000,
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF,
0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000,
};
__checkReturn int
siena_nic_register_test(
__in efx_nic_t *enp)
{
efx_register_set_t *rsp;
const uint32_t *dwordp;
unsigned int nitems;
unsigned int count;
int rc;
/* Fill out the register mask entries */
EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks)
== EFX_ARRAY_SIZE(__siena_registers) * 4);
nitems = EFX_ARRAY_SIZE(__siena_registers);
dwordp = __siena_register_masks;
for (count = 0; count < nitems; ++count) {
rsp = __siena_registers + count;
rsp->mask.eo_u32[0] = *dwordp++;
rsp->mask.eo_u32[1] = *dwordp++;
rsp->mask.eo_u32[2] = *dwordp++;
rsp->mask.eo_u32[3] = *dwordp++;
}
/* Fill out the register table entries */
EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks)
== EFX_ARRAY_SIZE(__siena_tables) * 4);
nitems = EFX_ARRAY_SIZE(__siena_tables);
dwordp = __siena_table_masks;
for (count = 0; count < nitems; ++count) {
rsp = __siena_tables + count;
rsp->mask.eo_u32[0] = *dwordp++;
rsp->mask.eo_u32[1] = *dwordp++;
rsp->mask.eo_u32[2] = *dwordp++;
rsp->mask.eo_u32[3] = *dwordp++;
}
if ((rc = efx_nic_test_registers(enp, __siena_registers,
EFX_ARRAY_SIZE(__siena_registers))) != 0)
goto fail1;
if ((rc = efx_nic_test_tables(enp, __siena_tables,
EFX_PATTERN_BYTE_ALTERNATE,
EFX_ARRAY_SIZE(__siena_tables))) != 0)
goto fail2;
if ((rc = efx_nic_test_tables(enp, __siena_tables,
EFX_PATTERN_BYTE_CHANGING,
EFX_ARRAY_SIZE(__siena_tables))) != 0)
goto fail3;
if ((rc = efx_nic_test_tables(enp, __siena_tables,
EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
goto fail4;
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_DIAG */
#endif /* EFSYS_OPT_SIENA */

View File

@ -0,0 +1,985 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
#if EFSYS_OPT_SIENA
#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
__checkReturn int
siena_nvram_partn_size(
__in efx_nic_t *enp,
__in unsigned int partn,
__out size_t *sizep)
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN,
MC_CMD_NVRAM_INFO_OUT_LEN)];
int rc;
if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
rc = ENOTSUP;
goto fail1;
}
req.emr_cmd = MC_CMD_NVRAM_INFO;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_NVRAM_INFO_OUT_LEN;
MCDI_IN_SET_DWORD(req, NVRAM_INFO_IN_TYPE, partn);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail2;
}
if (req.emr_out_length_used < MC_CMD_NVRAM_INFO_OUT_LEN) {
rc = EMSGSIZE;
goto fail3;
}
*sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_SIZE);
return (0);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nvram_partn_lock(
__in efx_nic_t *enp,
__in unsigned int partn)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
int rc;
req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_NVRAM_UPDATE_START_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_IN_TYPE, partn);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nvram_partn_read(
__in efx_nic_t *enp,
__in unsigned int partn,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size)
{
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_LEN,
MC_CMD_NVRAM_READ_OUT_LEN(SIENA_NVRAM_CHUNK))];
size_t chunk;
int rc;
while (size > 0) {
chunk = MIN(size, SIENA_NVRAM_CHUNK);
req.emr_cmd = MC_CMD_NVRAM_READ;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_READ_IN_LEN;
req.emr_out_buf = payload;
req.emr_out_length =
MC_CMD_NVRAM_READ_OUT_LEN(SIENA_NVRAM_CHUNK);
MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_TYPE, partn);
MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_OFFSET, offset);
MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_LENGTH, chunk);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used <
MC_CMD_NVRAM_READ_OUT_LEN(chunk)) {
rc = EMSGSIZE;
goto fail2;
}
memcpy(data,
MCDI_OUT2(req, uint8_t, NVRAM_READ_OUT_READ_BUFFER),
chunk);
size -= chunk;
data += chunk;
offset += chunk;
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nvram_partn_erase(
__in efx_nic_t *enp,
__in unsigned int partn,
__in unsigned int offset,
__in size_t size)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_NVRAM_ERASE_IN_LEN];
int rc;
req.emr_cmd = MC_CMD_NVRAM_ERASE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_NVRAM_ERASE_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_TYPE, partn);
MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_OFFSET, offset);
MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_LENGTH, size);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nvram_partn_write(
__in efx_nic_t *enp,
__in unsigned int partn,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_NVRAM_WRITE_IN_LEN(SIENA_NVRAM_CHUNK)];
size_t chunk;
int rc;
while (size > 0) {
chunk = MIN(size, SIENA_NVRAM_CHUNK);
req.emr_cmd = MC_CMD_NVRAM_WRITE;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(chunk);
EFX_STATIC_ASSERT(MC_CMD_NVRAM_WRITE_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_TYPE, partn);
MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_OFFSET, offset);
MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_LENGTH, chunk);
memcpy(MCDI_IN2(req, uint8_t, NVRAM_WRITE_IN_WRITE_BUFFER),
data, chunk);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
size -= chunk;
data += chunk;
offset += chunk;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
siena_nvram_partn_unlock(
__in efx_nic_t *enp,
__in unsigned int partn)
{
efx_mcdi_req_t req;
uint8_t payload[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
uint32_t reboot;
int rc;
req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
/*
* Reboot into the new image only for PHYs. The driver has to
* explicitly cope with an MC reboot after a firmware update.
*/
reboot = (partn == MC_CMD_NVRAM_TYPE_PHY_PORT0 ||
partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 ||
partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO);
MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_IN_TYPE, partn);
MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_IN_REBOOT, reboot);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return;
fail1:
EFSYS_PROBE1(fail1, int, rc);
}
#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
#if EFSYS_OPT_NVRAM
typedef struct siena_parttbl_entry_s {
unsigned int partn;
unsigned int port;
efx_nvram_type_t nvtype;
} siena_parttbl_entry_t;
static siena_parttbl_entry_t siena_parttbl[] = {
{MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 1, EFX_NVRAM_NULLPHY},
{MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 2, EFX_NVRAM_NULLPHY},
{MC_CMD_NVRAM_TYPE_MC_FW, 1, EFX_NVRAM_MC_FIRMWARE},
{MC_CMD_NVRAM_TYPE_MC_FW, 2, EFX_NVRAM_MC_FIRMWARE},
{MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
{MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
{MC_CMD_NVRAM_TYPE_EXP_ROM, 1, EFX_NVRAM_BOOTROM},
{MC_CMD_NVRAM_TYPE_EXP_ROM, 2, EFX_NVRAM_BOOTROM},
{MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
{MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
{MC_CMD_NVRAM_TYPE_PHY_PORT0, 1, EFX_NVRAM_PHY},
{MC_CMD_NVRAM_TYPE_PHY_PORT1, 2, EFX_NVRAM_PHY},
{0, 0, 0},
};
static __checkReturn siena_parttbl_entry_t *
siena_parttbl_entry(
__in efx_nic_t *enp,
__in efx_nvram_type_t type)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
siena_parttbl_entry_t *entry;
EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
for (entry = siena_parttbl; entry->port > 0; ++entry) {
if (entry->port == emip->emi_port && entry->nvtype == type)
return (entry);
}
return (NULL);
}
#if EFSYS_OPT_DIAG
__checkReturn int
siena_nvram_test(
__in efx_nic_t *enp)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
siena_parttbl_entry_t *entry;
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN,
MC_CMD_NVRAM_TEST_OUT_LEN)];
int result;
int rc;
req.emr_cmd = MC_CMD_NVRAM_TEST;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN;
req.emr_out_buf = payload;
req.emr_out_length = MC_CMD_NVRAM_TEST_OUT_LEN;
/*
* Iterate over the list of supported partition types
* applicable to *this* port
*/
for (entry = siena_parttbl; entry->port > 0; ++entry) {
if (entry->port != emip->emi_port ||
!(enp->en_u.siena.enu_partn_mask & (1 << entry->partn)))
continue;
MCDI_IN_SET_DWORD(req, NVRAM_TEST_IN_TYPE, entry->partn);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_NVRAM_TEST_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
result = MCDI_OUT_DWORD(req, NVRAM_TEST_OUT_RESULT);
if (result == MC_CMD_NVRAM_TEST_FAIL) {
EFSYS_PROBE1(nvram_test_failure, int, entry->partn);
rc = (EINVAL);
goto fail3;
}
}
return (0);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_DIAG */
__checkReturn int
siena_nvram_size(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out size_t *sizep)
{
siena_parttbl_entry_t *entry;
int rc;
if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
rc = ENOTSUP;
goto fail1;
}
if ((rc = siena_nvram_partn_size(enp, entry->partn, sizep)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
*sizep = 0;
return (rc);
}
#define SIENA_DYNAMIC_CFG_SIZE(_nitems) \
(sizeof (siena_mc_dynamic_config_hdr_t) + ((_nitems) * \
sizeof (((siena_mc_dynamic_config_hdr_t *)NULL)->fw_version[0])))
__checkReturn int
siena_nvram_get_dynamic_cfg(
__in efx_nic_t *enp,
__in unsigned int partn,
__in boolean_t vpd,
__out siena_mc_dynamic_config_hdr_t **dcfgp,
__out size_t *sizep)
{
siena_mc_dynamic_config_hdr_t *dcfg;
size_t size;
uint8_t cksum;
unsigned int vpd_offset;
unsigned int vpd_length;
unsigned int hdr_length;
unsigned int nversions;
unsigned int pos;
unsigned int region;
int rc;
EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 ||
partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1);
/*
* Allocate sufficient memory for the entire dynamiccfg area, even
* if we're not actually going to read in the VPD.
*/
if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
goto fail1;
EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg);
if (dcfg == NULL) {
rc = ENOMEM;
goto fail2;
}
if ((rc = siena_nvram_partn_read(enp, partn, 0,
(caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0)
goto fail3;
/* Verify the magic */
if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0)
!= SIENA_MC_DYNAMIC_CONFIG_MAGIC)
goto invalid1;
/* All future versions of the structure must be backwards compatable */
EFX_STATIC_ASSERT(SIENA_MC_DYNAMIC_CONFIG_VERSION == 0);
hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
nversions = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
/* Verify the hdr doesn't overflow the partn size */
if (hdr_length > size || vpd_offset > size || vpd_length > size ||
vpd_length + vpd_offset > size)
goto invalid2;
/* Verify the header has room for all it's versions */
if (hdr_length < SIENA_DYNAMIC_CFG_SIZE(0) ||
hdr_length < SIENA_DYNAMIC_CFG_SIZE(nversions))
goto invalid3;
/*
* Read the remaining portion of the dcfg, either including
* the whole of VPD (there is no vpd length in this structure,
* so we have to parse each tag), or just the dcfg header itself
*/
region = vpd ? vpd_offset + vpd_length : hdr_length;
if (region > SIENA_NVRAM_CHUNK) {
if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
(caddr_t)dcfg + SIENA_NVRAM_CHUNK,
region - SIENA_NVRAM_CHUNK)) != 0)
goto fail4;
}
/* Verify checksum */
cksum = 0;
for (pos = 0; pos < hdr_length; pos++)
cksum += ((uint8_t *)dcfg)[pos];
if (cksum != 0)
goto invalid4;
goto done;
invalid4:
EFSYS_PROBE(invalid4);
invalid3:
EFSYS_PROBE(invalid3);
invalid2:
EFSYS_PROBE(invalid2);
invalid1:
EFSYS_PROBE(invalid1);
/*
* Construct a new "null" dcfg, with an empty version vector,
* and an empty VPD chunk trailing. This has the neat side effect
* of testing the exception paths in the write path.
*/
EFX_POPULATE_DWORD_1(dcfg->magic,
EFX_DWORD_0, SIENA_MC_DYNAMIC_CONFIG_MAGIC);
EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, sizeof (*dcfg));
EFX_POPULATE_BYTE_1(dcfg->version, EFX_BYTE_0,
SIENA_MC_DYNAMIC_CONFIG_VERSION);
EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
EFX_DWORD_0, sizeof (*dcfg));
EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, 0);
EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, 0);
done:
*dcfgp = dcfg;
*sizep = size;
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
EFSYS_KMEM_FREE(enp->en_esip, size, dcfg);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static __checkReturn int
siena_nvram_get_subtype(
__in efx_nic_t *enp,
__in unsigned int partn,
__out uint32_t *subtypep)
{
efx_mcdi_req_t req;
uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN];
efx_word_t *fw_list;
int rc;
req.emr_cmd = MC_CMD_GET_BOARD_CFG;
EFX_STATIC_ASSERT(MC_CMD_GET_BOARD_CFG_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = sizeof (outbuf);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
fw_list = MCDI_OUT2(req, efx_word_t,
GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
*subtypep = EFX_WORD_FIELD(fw_list[partn], EFX_WORD_0);
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nvram_get_version(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out uint32_t *subtypep,
__out_ecount(4) uint16_t version[4])
{
siena_mc_dynamic_config_hdr_t *dcfg;
siena_parttbl_entry_t *entry;
unsigned int dcfg_partn;
unsigned int partn;
int rc;
if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
rc = ENOTSUP;
goto fail1;
}
partn = entry->partn;
if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
rc = ENOTSUP;
goto fail2;
}
if ((rc = siena_nvram_get_subtype(enp, partn, subtypep)) != 0)
goto fail3;
/*
* Some partitions are accessible from both ports (for instance BOOTROM)
* Find the highest version reported by all dcfg structures on ports
* that have access to this partition.
*/
version[0] = version[1] = version[2] = version[3] = 0;
for (entry = siena_parttbl; entry->port > 0; ++entry) {
unsigned int nitems;
uint16_t temp[4];
size_t length;
if (entry->partn != partn)
continue;
dcfg_partn = (entry->port == 1)
? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
: MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
/*
* Ingore missing partitions on port 2, assuming they're due
* to to running on a single port part.
*/
if ((1 << dcfg_partn) & ~enp->en_u.siena.enu_partn_mask) {
if (entry->port == 2)
continue;
}
if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
B_FALSE, &dcfg, &length)) != 0)
goto fail4;
nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items,
EFX_DWORD_0);
if (nitems < entry->partn)
goto done;
temp[0] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_w,
EFX_WORD_0);
temp[1] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_x,
EFX_WORD_0);
temp[2] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_y,
EFX_WORD_0);
temp[3] = EFX_WORD_FIELD(dcfg->fw_version[partn].version_z,
EFX_WORD_0);
if (memcmp(version, temp, sizeof (temp)) < 0)
memcpy(version, temp, sizeof (temp));
done:
EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
}
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nvram_rw_start(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out size_t *chunk_sizep)
{
siena_parttbl_entry_t *entry;
int rc;
if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
rc = ENOTSUP;
goto fail1;
}
if ((rc = siena_nvram_partn_lock(enp, entry->partn)) != 0)
goto fail2;
if (chunk_sizep != NULL)
*chunk_sizep = SIENA_NVRAM_CHUNK;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nvram_read_chunk(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__in unsigned int offset,
__out_bcount(size) caddr_t data,
__in size_t size)
{
siena_parttbl_entry_t *entry;
int rc;
if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
rc = ENOTSUP;
goto fail1;
}
if ((rc = siena_nvram_partn_read(enp, entry->partn,
offset, data, size)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nvram_erase(
__in efx_nic_t *enp,
__in efx_nvram_type_t type)
{
siena_parttbl_entry_t *entry;
size_t size;
int rc;
if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
rc = ENOTSUP;
goto fail1;
}
if ((rc = siena_nvram_partn_size(enp, entry->partn, &size)) != 0)
goto fail2;
if ((rc = siena_nvram_partn_erase(enp, entry->partn, 0, size)) != 0)
goto fail3;
return (0);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_nvram_write_chunk(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__in unsigned int offset,
__in_bcount(size) caddr_t data,
__in size_t size)
{
siena_parttbl_entry_t *entry;
int rc;
if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
rc = ENOTSUP;
goto fail1;
}
if ((rc = siena_nvram_partn_write(enp, entry->partn,
offset, data, size)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
siena_nvram_rw_finish(
__in efx_nic_t *enp,
__in efx_nvram_type_t type)
{
siena_parttbl_entry_t *entry;
if ((entry = siena_parttbl_entry(enp, type)) != NULL)
siena_nvram_partn_unlock(enp, entry->partn);
}
__checkReturn int
siena_nvram_set_version(
__in efx_nic_t *enp,
__in efx_nvram_type_t type,
__out uint16_t version[4])
{
siena_mc_dynamic_config_hdr_t *dcfg = NULL;
siena_parttbl_entry_t *entry;
unsigned int dcfg_partn;
size_t partn_size;
unsigned int hdr_length;
unsigned int vpd_length;
unsigned int vpd_offset;
unsigned int nitems;
unsigned int required_hdr_length;
unsigned int pos;
uint8_t cksum;
uint32_t subtype;
size_t length;
int rc;
if ((entry = siena_parttbl_entry(enp, type)) == NULL) {
rc = ENOTSUP;
goto fail1;
}
dcfg_partn = (entry->port == 1)
? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
: MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0)
goto fail2;
if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
goto fail2;
if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
B_TRUE, &dcfg, &length)) != 0)
goto fail3;
hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
/*
* NOTE: This function will blatt any fields trailing the version
* vector, or the VPD chunk.
*/
required_hdr_length = SIENA_DYNAMIC_CFG_SIZE(entry->partn + 1);
if (required_hdr_length + vpd_length > length) {
rc = ENOSPC;
goto fail4;
}
if (vpd_offset < required_hdr_length) {
(void) memmove((caddr_t)dcfg + required_hdr_length,
(caddr_t)dcfg + vpd_offset, vpd_length);
vpd_offset = required_hdr_length;
EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
EFX_DWORD_0, vpd_offset);
}
if (hdr_length < required_hdr_length) {
(void) memset((caddr_t)dcfg + hdr_length, 0,
required_hdr_length - hdr_length);
hdr_length = required_hdr_length;
EFX_POPULATE_WORD_1(dcfg->length,
EFX_WORD_0, hdr_length);
}
/* Get the subtype to insert into the fw_subtype array */
if ((rc = siena_nvram_get_subtype(enp, entry->partn, &subtype)) != 0)
goto fail5;
/* Fill out the new version */
EFX_POPULATE_DWORD_1(dcfg->fw_version[entry->partn].fw_subtype,
EFX_DWORD_0, subtype);
EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_w,
EFX_WORD_0, version[0]);
EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_x,
EFX_WORD_0, version[1]);
EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_y,
EFX_WORD_0, version[2]);
EFX_POPULATE_WORD_1(dcfg->fw_version[entry->partn].version_z,
EFX_WORD_0, version[3]);
/* Update the version count */
if (nitems < entry->partn + 1) {
nitems = entry->partn + 1;
EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items,
EFX_DWORD_0, nitems);
}
/* Update the checksum */
cksum = 0;
for (pos = 0; pos < hdr_length; pos++)
cksum += ((uint8_t *)dcfg)[pos];
dcfg->csum.eb_u8[0] -= cksum;
/* Erase and write the new partition */
if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0)
goto fail6;
/* Write out the new structure to nvram */
if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0,
(caddr_t)dcfg, vpd_offset + vpd_length)) != 0)
goto fail7;
EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
siena_nvram_partn_unlock(enp, dcfg_partn);
return (0);
fail7:
EFSYS_PROBE(fail7);
fail6:
EFSYS_PROBE(fail6);
fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
#endif /* EFSYS_OPT_NVRAM */
#endif /* EFSYS_OPT_SIENA */

View File

@ -0,0 +1,857 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_impl.h"
#if EFSYS_OPT_SIENA
static void
siena_phy_decode_cap(
__in uint32_t mcdi_cap,
__out uint32_t *maskp)
{
uint32_t mask;
mask = 0;
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
mask |= (1 << EFX_PHY_CAP_10HDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
mask |= (1 << EFX_PHY_CAP_10FDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
mask |= (1 << EFX_PHY_CAP_100HDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
mask |= (1 << EFX_PHY_CAP_100FDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
mask |= (1 << EFX_PHY_CAP_1000HDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
mask |= (1 << EFX_PHY_CAP_1000FDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
mask |= (1 << EFX_PHY_CAP_10000FDX);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
mask |= (1 << EFX_PHY_CAP_PAUSE);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
mask |= (1 << EFX_PHY_CAP_ASYM);
if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
mask |= (1 << EFX_PHY_CAP_AN);
*maskp = mask;
}
static void
siena_phy_decode_link_mode(
__in efx_nic_t *enp,
__in uint32_t link_flags,
__in unsigned int speed,
__in unsigned int fcntl,
__out efx_link_mode_t *link_modep,
__out unsigned int *fcntlp)
{
boolean_t fd = !!(link_flags &
(1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
boolean_t up = !!(link_flags &
(1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
_NOTE(ARGUNUSED(enp))
if (!up)
*link_modep = EFX_LINK_DOWN;
else if (speed == 10000 && fd)
*link_modep = EFX_LINK_10000FDX;
else if (speed == 1000)
*link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
else if (speed == 100)
*link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
else if (speed == 10)
*link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
else
*link_modep = EFX_LINK_UNKNOWN;
if (fcntl == MC_CMD_FCNTL_OFF)
*fcntlp = 0;
else if (fcntl == MC_CMD_FCNTL_RESPOND)
*fcntlp = EFX_FCNTL_RESPOND;
else if (fcntl == MC_CMD_FCNTL_BIDIR)
*fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
else {
EFSYS_PROBE1(mc_pcol_error, int, fcntl);
*fcntlp = 0;
}
}
void
siena_phy_link_ev(
__in efx_nic_t *enp,
__in efx_qword_t *eqp,
__out efx_link_mode_t *link_modep)
{
efx_port_t *epp = &(enp->en_port);
unsigned int link_flags;
unsigned int speed;
unsigned int fcntl;
efx_link_mode_t link_mode;
uint32_t lp_cap_mask;
/*
* Convert the LINKCHANGE speed enumeration into mbit/s, in the
* same way as GET_LINK encodes the speed
*/
switch (MCDI_EV_FIELD(*eqp, LINKCHANGE_SPEED)) {
case MCDI_EVENT_LINKCHANGE_SPEED_100M:
speed = 100;
break;
case MCDI_EVENT_LINKCHANGE_SPEED_1G:
speed = 1000;
break;
case MCDI_EVENT_LINKCHANGE_SPEED_10G:
speed = 10000;
break;
default:
speed = 0;
break;
}
link_flags = MCDI_EV_FIELD(*eqp, LINKCHANGE_LINK_FLAGS);
siena_phy_decode_link_mode(enp, link_flags, speed,
MCDI_EV_FIELD(*eqp, LINKCHANGE_FCNTL),
&link_mode, &fcntl);
siena_phy_decode_cap(MCDI_EV_FIELD(*eqp, LINKCHANGE_LP_CAP),
&lp_cap_mask);
/*
* It's safe to update ep_lp_cap_mask without the driver's port lock
* because presumably any concurrently running efx_port_poll() is
* only going to arrive at the same value.
*
* ep_fcntl has two meanings. It's either the link common fcntl
* (if the PHY supports AN), or it's the forced link state. If
* the former, it's safe to update the value for the same reason as
* for ep_lp_cap_mask. If the latter, then just ignore the value,
* because we can race with efx_mac_fcntl_set().
*/
epp->ep_lp_cap_mask = lp_cap_mask;
if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
epp->ep_fcntl = fcntl;
*link_modep = link_mode;
}
__checkReturn int
siena_phy_power(
__in efx_nic_t *enp,
__in boolean_t power)
{
int rc;
if (!power)
return (0);
/* Check if the PHY is a zombie */
if ((rc = siena_phy_verify(enp)) != 0)
goto fail1;
enp->en_reset_flags |= EFX_RESET_PHY;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_phy_get_link(
__in efx_nic_t *enp,
__out siena_link_state_t *slsp)
{
efx_mcdi_req_t req;
uint8_t outbuf[MC_CMD_GET_LINK_OUT_LEN];
int rc;
req.emr_cmd = MC_CMD_GET_LINK;
EFX_STATIC_ASSERT(MC_CMD_GET_LINK_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = sizeof (outbuf);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
&slsp->sls_adv_cap_mask);
siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
&slsp->sls_lp_cap_mask);
siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
&slsp->sls_link_mode, &slsp->sls_fcntl);
#if EFSYS_OPT_LOOPBACK
/* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
#endif /* EFSYS_OPT_LOOPBACK */
slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_phy_reconfigure(
__in efx_nic_t *enp)
{
efx_port_t *epp = &(enp->en_port);
efx_mcdi_req_t req;
uint8_t payload[MAX(MC_CMD_SET_ID_LED_IN_LEN,
MC_CMD_SET_LINK_IN_LEN)];
uint32_t cap_mask;
unsigned int led_mode;
unsigned int speed;
int rc;
req.emr_cmd = MC_CMD_SET_LINK;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_SET_LINK_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
cap_mask = epp->ep_adv_cap_mask;
MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
#if EFSYS_OPT_LOOPBACK
MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
epp->ep_loopback_type);
switch (epp->ep_loopback_link_mode) {
case EFX_LINK_100FDX:
speed = 100;
break;
case EFX_LINK_1000FDX:
speed = 1000;
break;
case EFX_LINK_10000FDX:
speed = 10000;
break;
default:
speed = 0;
}
#else
MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
speed = 0;
#endif /* EFSYS_OPT_LOOPBACK */
MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
#if EFSYS_OPT_PHY_FLAGS
MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
#else
MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
#endif /* EFSYS_OPT_PHY_FLAGS */
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
/* And set the blink mode */
req.emr_cmd = MC_CMD_SET_ID_LED;
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
EFX_STATIC_ASSERT(MC_CMD_SET_ID_LED_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
#if EFSYS_OPT_PHY_LED_CONTROL
switch (epp->ep_phy_led_mode) {
case EFX_PHY_LED_DEFAULT:
led_mode = MC_CMD_LED_DEFAULT;
break;
case EFX_PHY_LED_OFF:
led_mode = MC_CMD_LED_OFF;
break;
case EFX_PHY_LED_ON:
led_mode = MC_CMD_LED_ON;
break;
default:
EFSYS_ASSERT(0);
led_mode = MC_CMD_LED_DEFAULT;
}
MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
#else
MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
#endif /* EFSYS_OPT_PHY_LED_CONTROL */
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail2;
}
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_phy_verify(
__in efx_nic_t *enp)
{
efx_mcdi_req_t req;
uint8_t outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
uint32_t state;
int rc;
req.emr_cmd = MC_CMD_GET_PHY_STATE;
EFX_STATIC_ASSERT(MC_CMD_GET_PHY_STATE_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = outbuf;
req.emr_out_length = sizeof (outbuf);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
rc = EMSGSIZE;
goto fail2;
}
state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
if (state != MC_CMD_PHY_STATE_OK) {
if (state != MC_CMD_PHY_STATE_ZOMBIE)
EFSYS_PROBE1(mc_pcol_error, int, state);
rc = ENOTACTIVE;
goto fail3;
}
return (0);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_phy_oui_get(
__in efx_nic_t *enp,
__out uint32_t *ouip)
{
_NOTE(ARGUNUSED(enp, ouip))
return (ENOTSUP);
}
#if EFSYS_OPT_PHY_STATS
#define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
_mc_record, _efx_record) \
if ((_vmask) & (1ULL << (_mc_record))) { \
(_smask) |= (1ULL << (_efx_record)); \
if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \
efx_dword_t dword; \
EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\
(_stat)[_efx_record] = \
EFX_DWORD_FIELD(dword, EFX_DWORD_0); \
} \
}
#define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \
SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
MC_CMD_ ## _record, \
EFX_PHY_STAT_ ## _record)
void
siena_phy_decode_stats(
__in efx_nic_t *enp,
__in uint32_t vmask,
__in_opt efsys_mem_t *esmp,
__out_opt uint64_t *smaskp,
__out_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat)
{
uint64_t smask = 0;
_NOTE(ARGUNUSED(enp))
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI);
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP);
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT);
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT);
if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) {
smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) |
(1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) |
(1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) |
(1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D));
if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
efx_dword_t dword;
uint32_t sig;
EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL,
&dword);
sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1;
stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1;
stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1;
stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1;
}
}
SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A,
EFX_PHY_STAT_SNR_A);
SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B,
EFX_PHY_STAT_SNR_B);
SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C,
EFX_PHY_STAT_SNR_C);
SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D,
EFX_PHY_STAT_SNR_D);
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP);
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT);
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT);
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER);
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS);
SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP,
EFX_PHY_STAT_PHY_XS_LINK_UP);
SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT,
EFX_PHY_STAT_PHY_XS_RX_FAULT);
SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT,
EFX_PHY_STAT_PHY_XS_TX_FAULT);
SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN,
EFX_PHY_STAT_PHY_XS_ALIGN);
if (vmask & (1 << MC_CMD_PHYXS_SYNC)) {
smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) |
(1 << EFX_PHY_STAT_PHY_XS_SYNC_B) |
(1 << EFX_PHY_STAT_PHY_XS_SYNC_C) |
(1 << EFX_PHY_STAT_PHY_XS_SYNC_D));
if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
efx_dword_t dword;
uint32_t sync;
EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword);
sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1;
stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1;
stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1;
stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1;
}
}
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP);
SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE);
SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP,
EFX_PHY_STAT_CL22EXT_LINK_UP);
if (smaskp != NULL)
*smaskp = smask;
}
__checkReturn int
siena_phy_stats_update(
__in efx_nic_t *enp,
__in efsys_mem_t *esmp,
__out_ecount(EFX_PHY_NSTATS) uint32_t *stat)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
uint32_t vmask = encp->enc_siena_phy_stat_mask;
uint8_t payload[MC_CMD_PHY_STATS_IN_LEN];
uint64_t smask;
efx_mcdi_req_t req;
int rc;
req.emr_cmd = MC_CMD_PHY_STATS;
req.emr_in_buf = payload;
req.emr_in_length = sizeof (payload);
EFX_STATIC_ASSERT(MC_CMD_PHY_STATS_OUT_DMA_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO,
EFSYS_MEM_ADDR(esmp) & 0xffffffff);
MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI,
EFSYS_MEM_ADDR(esmp) >> 32);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN);
siena_phy_decode_stats(enp, vmask, esmp, &smask, stat);
EFSYS_ASSERT(smask == encp->enc_phy_stat_mask);
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (0);
}
#endif /* EFSYS_OPT_PHY_STATS */
#if EFSYS_OPT_PHY_PROPS
#if EFSYS_OPT_NAMES
extern const char __cs *
siena_phy_prop_name(
__in efx_nic_t *enp,
__in unsigned int id)
{
_NOTE(ARGUNUSED(enp, id))
return (NULL);
}
#endif /* EFSYS_OPT_NAMES */
extern __checkReturn int
siena_phy_prop_get(
__in efx_nic_t *enp,
__in unsigned int id,
__in uint32_t flags,
__out uint32_t *valp)
{
_NOTE(ARGUNUSED(enp, id, flags, valp))
return (ENOTSUP);
}
extern __checkReturn int
siena_phy_prop_set(
__in efx_nic_t *enp,
__in unsigned int id,
__in uint32_t val)
{
_NOTE(ARGUNUSED(enp, id, val))
return (ENOTSUP);
}
#endif /* EFSYS_OPT_PHY_PROPS */
#if EFSYS_OPT_PHY_BIST
__checkReturn int
siena_phy_bist_start(
__in efx_nic_t *enp,
__in efx_phy_bist_type_t type)
{
uint8_t payload[MC_CMD_START_BIST_IN_LEN];
efx_mcdi_req_t req;
int rc;
req.emr_cmd = MC_CMD_START_BIST;
req.emr_in_buf = payload;
req.emr_in_length = sizeof (payload);
EFX_STATIC_ASSERT(MC_CMD_START_BIST_OUT_LEN == 0);
req.emr_out_buf = NULL;
req.emr_out_length = 0;
switch (type) {
case EFX_PHY_BIST_TYPE_NORMAL:
MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
break;
case EFX_PHY_BIST_TYPE_CABLE_SHORT:
MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
MC_CMD_PHY_BIST_CABLE_SHORT);
break;
case EFX_PHY_BIST_TYPE_CABLE_LONG:
MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
MC_CMD_PHY_BIST_CABLE_LONG);
break;
default:
EFSYS_ASSERT(0);
}
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
static __checkReturn unsigned long
siena_phy_sft9001_bist_status(
__in uint16_t code)
{
switch (code) {
case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY:
return (EFX_PHY_CABLE_STATUS_BUSY);
case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT:
return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT);
case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT:
return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT);
case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN:
return (EFX_PHY_CABLE_STATUS_OPEN);
case MC_CMD_POLL_BIST_SFT9001_PAIR_OK:
return (EFX_PHY_CABLE_STATUS_OK);
default:
return (EFX_PHY_CABLE_STATUS_INVALID);
}
}
__checkReturn int
siena_phy_bist_poll(
__in efx_nic_t *enp,
__in efx_phy_bist_type_t type,
__out efx_phy_bist_result_t *resultp,
__out_opt __drv_when(count > 0, __notnull)
uint32_t *value_maskp,
__out_ecount_opt(count) __drv_when(count > 0, __notnull)
unsigned long *valuesp,
__in size_t count)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
uint8_t payload[MCDI_CTL_SDU_LEN_MAX];
uint32_t value_mask = 0;
efx_mcdi_req_t req;
uint32_t result;
int rc;
req.emr_cmd = MC_CMD_POLL_BIST;
_NOTE(CONSTANTCONDITION)
EFSYS_ASSERT(MC_CMD_POLL_BIST_IN_LEN == 0);
req.emr_in_buf = NULL;
req.emr_in_length = 0;
req.emr_out_buf = payload;
req.emr_out_length = sizeof (payload);
efx_mcdi_execute(enp, &req);
if (req.emr_rc != 0) {
rc = req.emr_rc;
goto fail1;
}
if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
rc = EMSGSIZE;
goto fail2;
}
if (count > 0)
(void) memset(valuesp, '\0', count * sizeof (unsigned long));
result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
/* Extract PHY specific results */
if (result == MC_CMD_POLL_BIST_PASSED &&
encp->enc_phy_type == EFX_PHY_SFT9001B &&
req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN &&
(type == EFX_PHY_BIST_TYPE_CABLE_SHORT ||
type == EFX_PHY_BIST_TYPE_CABLE_LONG)) {
uint16_t word;
if (count > EFX_PHY_BIST_CABLE_LENGTH_A) {
if (valuesp != NULL)
valuesp[EFX_PHY_BIST_CABLE_LENGTH_A] =
MCDI_OUT_DWORD(req,
POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
value_mask |= (1 << EFX_PHY_BIST_CABLE_LENGTH_A);
}
if (count > EFX_PHY_BIST_CABLE_LENGTH_B) {
if (valuesp != NULL)
valuesp[EFX_PHY_BIST_CABLE_LENGTH_B] =
MCDI_OUT_DWORD(req,
POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B);
value_mask |= (1 << EFX_PHY_BIST_CABLE_LENGTH_B);
}
if (count > EFX_PHY_BIST_CABLE_LENGTH_C) {
if (valuesp != NULL)
valuesp[EFX_PHY_BIST_CABLE_LENGTH_C] =
MCDI_OUT_DWORD(req,
POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C);
value_mask |= (1 << EFX_PHY_BIST_CABLE_LENGTH_C);
}
if (count > EFX_PHY_BIST_CABLE_LENGTH_D) {
if (valuesp != NULL)
valuesp[EFX_PHY_BIST_CABLE_LENGTH_D] =
MCDI_OUT_DWORD(req,
POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D);
value_mask |= (1 << EFX_PHY_BIST_CABLE_LENGTH_D);
}
if (count > EFX_PHY_BIST_CABLE_STATUS_A) {
if (valuesp != NULL) {
word = MCDI_OUT_WORD(req,
POLL_BIST_OUT_SFT9001_CABLE_STATUS_A);
valuesp[EFX_PHY_BIST_CABLE_STATUS_A] =
siena_phy_sft9001_bist_status(word);
}
value_mask |= (1 << EFX_PHY_BIST_CABLE_STATUS_A);
}
if (count > EFX_PHY_BIST_CABLE_STATUS_B) {
if (valuesp != NULL) {
word = MCDI_OUT_WORD(req,
POLL_BIST_OUT_SFT9001_CABLE_STATUS_B);
valuesp[EFX_PHY_BIST_CABLE_STATUS_B] =
siena_phy_sft9001_bist_status(word);
}
value_mask |= (1 << EFX_PHY_BIST_CABLE_STATUS_B);
}
if (count > EFX_PHY_BIST_CABLE_STATUS_C) {
if (valuesp != NULL) {
word = MCDI_OUT_WORD(req,
POLL_BIST_OUT_SFT9001_CABLE_STATUS_C);
valuesp[EFX_PHY_BIST_CABLE_STATUS_C] =
siena_phy_sft9001_bist_status(word);
}
value_mask |= (1 << EFX_PHY_BIST_CABLE_STATUS_C);
}
if (count > EFX_PHY_BIST_CABLE_STATUS_D) {
if (valuesp != NULL) {
word = MCDI_OUT_WORD(req,
POLL_BIST_OUT_SFT9001_CABLE_STATUS_D);
valuesp[EFX_PHY_BIST_CABLE_STATUS_D] =
siena_phy_sft9001_bist_status(word);
}
value_mask |= (1 << EFX_PHY_BIST_CABLE_STATUS_D);
}
} else if (result == MC_CMD_POLL_BIST_FAILED &&
encp->enc_phy_type == EFX_PHY_QLX111V &&
req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
count > EFX_PHY_BIST_FAULT_CODE) {
if (valuesp != NULL)
valuesp[EFX_PHY_BIST_FAULT_CODE] =
MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
value_mask |= 1 << EFX_PHY_BIST_FAULT_CODE;
}
if (value_maskp != NULL)
*value_maskp = value_mask;
EFSYS_ASSERT(resultp != NULL);
if (result == MC_CMD_POLL_BIST_RUNNING)
*resultp = EFX_PHY_BIST_RESULT_RUNNING;
else if (result == MC_CMD_POLL_BIST_PASSED)
*resultp = EFX_PHY_BIST_RESULT_PASSED;
else
*resultp = EFX_PHY_BIST_RESULT_FAILED;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
siena_phy_bist_stop(
__in efx_nic_t *enp,
__in efx_phy_bist_type_t type)
{
/* There is no way to stop BIST on Siena */
_NOTE(ARGUNUSED(enp, type))
}
#endif /* EFSYS_OPT_PHY_BIST */
#endif /* EFSYS_OPT_SIENA */

View File

@ -0,0 +1,172 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_impl.h"
#if EFSYS_OPT_SIENA
void
siena_sram_init(
__in efx_nic_t *enp)
{
efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
efx_oword_t oword;
uint32_t rx_base, tx_base;
EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
rx_base = encp->enc_buftbl_limit;
tx_base = rx_base + (encp->enc_rxq_limit * 64);
/* Initialize the transmit descriptor cache */
EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, tx_base);
EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DC_SIZE, 1); /* 16 descriptors */
EFX_BAR_WRITEO(enp, FR_AZ_TX_DC_CFG_REG, &oword);
/* Initialize the receive descriptor cache */
EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rx_base);
EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_SIZE, 3); /* 64 descriptors */
EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_CFG_REG, &oword);
/* Set receive descriptor pre-fetch low water mark */
EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_PF_LWM, 56);
EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_PF_WM_REG, &oword);
/* Set the event queue to use for SRAM updates */
EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_UPD_EVQ_ID, 0);
EFX_BAR_WRITEO(enp, FR_AZ_SRM_UPD_EVQ_REG, &oword);
}
#if EFSYS_OPT_DIAG
__checkReturn int
siena_sram_test(
__in efx_nic_t *enp,
__in efx_sram_pattern_fn_t func)
{
efx_oword_t oword;
efx_qword_t qword;
efx_qword_t verify;
size_t rows;
unsigned int wptr;
unsigned int rptr;
int rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
/* Reconfigure into HALF buffer table mode */
EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 0);
EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
/*
* Move the descriptor caches up to the top of SRAM, and test
* all of SRAM below them. We only miss out one row here.
*/
rows = SIENA_SRAM_ROWS - 1;
EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rows);
EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, rows + 1);
EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
/*
* Write the pattern through BUF_HALF_TBL. Write
* in 64 entry batches, waiting 1us in between each batch
* to guarantee not to overflow the SRAM fifo
*/
for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
func(wptr, B_FALSE, &qword);
EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
if ((wptr - rptr) < 64 && wptr < rows - 1)
continue;
EFSYS_SPIN(1);
for (; rptr <= wptr; ++rptr) {
func(rptr, B_FALSE, &qword);
EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
&verify);
if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
rc = EFAULT;
goto fail1;
}
}
}
/* And do the same negated */
for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
func(wptr, B_TRUE, &qword);
EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
if ((wptr - rptr) < 64 && wptr < rows - 1)
continue;
EFSYS_SPIN(1);
for (; rptr <= wptr; ++rptr) {
func(rptr, B_TRUE, &qword);
EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
&verify);
if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
rc = EFAULT;
goto fail2;
}
}
}
/* Restore back to FULL buffer table mode */
EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
/*
* We don't need to reconfigure SRAM again because the API
* requires efx_nic_fini() to be called after an sram test.
*/
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
/* Restore back to FULL buffer table mode */
EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
return (rc);
}
#endif /* EFSYS_OPT_DIAG */
#endif /* EFSYS_OPT_SIENA */

View File

@ -0,0 +1,603 @@
/*-
* Copyright 2009 Solarflare Communications Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "efsys.h"
#include "efx.h"
#include "efx_types.h"
#include "efx_regs.h"
#include "efx_impl.h"
#if EFSYS_OPT_VPD
#if EFSYS_OPT_SIENA
static __checkReturn int
siena_vpd_get_static(
__in efx_nic_t *enp,
__in unsigned int partn,
__deref_out_bcount_opt(*sizep) caddr_t *svpdp,
__out size_t *sizep)
{
siena_mc_static_config_hdr_t *scfg;
caddr_t svpd;
size_t size;
uint8_t cksum;
unsigned int vpd_offset;
unsigned int vpd_length;
unsigned int hdr_length;
unsigned int pos;
unsigned int region;
int rc;
EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 ||
partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1);
/* Allocate sufficient memory for the entire static cfg area */
if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
goto fail1;
EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg);
if (scfg == NULL) {
rc = ENOMEM;
goto fail2;
}
if ((rc = siena_nvram_partn_read(enp, partn, 0,
(caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0)
goto fail3;
/* Verify the magic number */
if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) !=
SIENA_MC_STATIC_CONFIG_MAGIC) {
rc = EINVAL;
goto fail4;
}
/* All future versions of the structure must be backwards compatable */
EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0);
hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0);
vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0);
vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0);
/* Verify the hdr doesn't overflow the sector size */
if (hdr_length > size || vpd_offset > size || vpd_length > size ||
vpd_length + vpd_offset > size) {
rc = EINVAL;
goto fail5;
}
/* Read the remainder of scfg + static vpd */
region = vpd_offset + vpd_length;
if (region > SIENA_NVRAM_CHUNK) {
if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
(caddr_t)scfg + SIENA_NVRAM_CHUNK,
region - SIENA_NVRAM_CHUNK)) != 0)
goto fail6;
}
/* Verify checksum */
cksum = 0;
for (pos = 0; pos < hdr_length; pos++)
cksum += ((uint8_t *)scfg)[pos];
if (cksum != 0) {
rc = EINVAL;
goto fail7;
}
if (vpd_length == 0)
svpd = NULL;
else {
/* Copy the vpd data out */
EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd);
if (svpd == NULL) {
rc = ENOMEM;
goto fail8;
}
memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length);
}
EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
*svpdp = svpd;
*sizep = vpd_length;
return (0);
fail8:
EFSYS_PROBE(fail8);
fail7:
EFSYS_PROBE(fail7);
fail6:
EFSYS_PROBE(fail6);
fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_vpd_init(
__in efx_nic_t *enp)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
caddr_t svpd = NULL;
unsigned partn;
size_t size = 0;
int rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
partn = (emip->emi_port == 1)
? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0
: MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1;
/*
* We need the static VPD sector to present a unified static+dynamic
* VPD, that is, basically on every read, write, verify cycle. Since
* it should *never* change we can just cache it here.
*/
if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0)
goto fail1;
if (svpd != NULL && size > 0) {
if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0)
goto fail2;
}
enp->en_u.siena.enu_svpd = svpd;
enp->en_u.siena.enu_svpd_length = size;
return (0);
fail2:
EFSYS_PROBE(fail2);
EFSYS_KMEM_FREE(enp->en_esip, size, svpd);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_vpd_size(
__in efx_nic_t *enp,
__out size_t *sizep)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
unsigned int partn;
int rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
/*
* This function returns the total size the user should allocate
* for all VPD operations. We've already cached the static vpd,
* so we just need to return an upper bound on the dynamic vpd.
* Since the dynamic_config structure can change under our feet,
* (as version numbers are inserted), just be safe and return the
* total size of the dynamic_config *sector*
*/
partn = (emip->emi_port == 1)
? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
: MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0)
goto fail1;
return (0);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_vpd_read(
__in efx_nic_t *enp,
__out_bcount(size) caddr_t data,
__in size_t size)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
siena_mc_dynamic_config_hdr_t *dcfg;
unsigned int vpd_length;
unsigned int vpd_offset;
unsigned int dcfg_partn;
size_t dcfg_size;
int rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
dcfg_partn = (emip->emi_port == 1)
? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
: MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
B_TRUE, &dcfg, &dcfg_size)) != 0)
goto fail1;
vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
if (vpd_length > size) {
rc = EFAULT; /* Invalid dcfg: header bigger than sector */
goto fail2;
}
EFSYS_ASSERT3U(vpd_length, <=, size);
memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length);
/* Pad data with all-1s, consistent with update operations */
memset(data + vpd_length, 0xff, size - vpd_length);
EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
return (0);
fail2:
EFSYS_PROBE(fail2);
EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_vpd_verify(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size)
{
efx_vpd_tag_t stag;
efx_vpd_tag_t dtag;
efx_vpd_keyword_t skey;
efx_vpd_keyword_t dkey;
unsigned int scont;
unsigned int dcont;
int rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
/*
* Strictly you could take the view that dynamic vpd is optional.
* Instead, to conform more closely to the read/verify/reinit()
* paradigm, we require dynamic vpd. siena_vpd_reinit() will
* reinitialize it as required.
*/
if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
goto fail1;
/*
* Verify that there is no duplication between the static and
* dynamic cfg sectors.
*/
if (enp->en_u.siena.enu_svpd_length == 0)
goto done;
dcont = 0;
_NOTE(CONSTANTCONDITION)
while (1) {
if ((rc = efx_vpd_hunk_next(data, size, &dtag,
&dkey, NULL, NULL, &dcont)) != 0)
goto fail2;
if (dcont == 0)
break;
scont = 0;
_NOTE(CONSTANTCONDITION)
while (1) {
if ((rc = efx_vpd_hunk_next(
enp->en_u.siena.enu_svpd,
enp->en_u.siena.enu_svpd_length, &stag, &skey,
NULL, NULL, &scont)) != 0)
goto fail3;
if (scont == 0)
break;
if (stag == dtag && skey == dkey) {
rc = EEXIST;
goto fail4;
}
}
}
done:
return (0);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_vpd_reinit(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size)
{
boolean_t wantpid;
int rc;
/*
* Only create a PID if the dynamic cfg doesn't have one
*/
if (enp->en_u.siena.enu_svpd_length == 0)
wantpid = B_TRUE;
else {
unsigned int offset;
uint8_t length;
rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
enp->en_u.siena.enu_svpd_length,
EFX_VPD_ID, 0, &offset, &length);
if (rc == 0)
wantpid = B_FALSE;
else if (rc == ENOENT)
wantpid = B_TRUE;
else
goto fail1;
}
if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_vpd_get(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__inout efx_vpd_value_t *evvp)
{
unsigned int offset;
uint8_t length;
int rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
/* Attempt to satisfy the request from svpd first */
if (enp->en_u.siena.enu_svpd_length > 0) {
if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
evvp->evv_keyword, &offset, &length)) == 0) {
evvp->evv_length = length;
memcpy(evvp->evv_value,
enp->en_u.siena.enu_svpd + offset, length);
return (0);
} else if (rc != ENOENT)
goto fail1;
}
/* And then from the provided data buffer */
if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
evvp->evv_keyword, &offset, &length)) != 0)
goto fail2;
evvp->evv_length = length;
memcpy(evvp->evv_value, data + offset, length);
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_vpd_set(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__in efx_vpd_value_t *evvp)
{
int rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
/* If the provided (tag,keyword) exists in svpd, then it is readonly */
if (enp->en_u.siena.enu_svpd_length > 0) {
unsigned int offset;
uint8_t length;
if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
evvp->evv_keyword, &offset, &length)) == 0) {
rc = EACCES;
goto fail1;
}
}
if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
goto fail2;
return (0);
fail2:
EFSYS_PROBE(fail2);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
__checkReturn int
siena_vpd_next(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size,
__out efx_vpd_value_t *evvp,
__inout unsigned int *contp)
{
_NOTE(ARGUNUSED(enp, data, size, evvp, contp))
return (ENOTSUP);
}
__checkReturn int
siena_vpd_write(
__in efx_nic_t *enp,
__in_bcount(size) caddr_t data,
__in size_t size)
{
efx_mcdi_iface_t *emip = &(enp->en_u.siena.enu_mip);
siena_mc_dynamic_config_hdr_t *dcfg;
unsigned int vpd_offset;
unsigned int dcfg_partn;
unsigned int hdr_length;
unsigned int pos;
uint8_t cksum;
size_t partn_size, dcfg_size;
size_t vpd_length;
int rc;
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
/* Determine total length of all tags */
if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
goto fail1;
/* Lock dynamic config sector for write, and read structure only */
dcfg_partn = (emip->emi_port == 1)
? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
: MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0)
goto fail2;
if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
goto fail2;
if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
B_FALSE, &dcfg, &dcfg_size)) != 0)
goto fail3;
hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
/* Allocated memory should have room for the new VPD */
if (hdr_length + vpd_length > dcfg_size) {
rc = ENOSPC;
goto fail3;
}
/* Copy in new vpd and update header */
vpd_offset = dcfg_size - vpd_length;
EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
EFX_DWORD_0, vpd_offset);
memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length);
EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length,
EFX_DWORD_0, vpd_length);
/* Update the checksum */
cksum = 0;
for (pos = 0; pos < hdr_length; pos++)
cksum += ((uint8_t *)dcfg)[pos];
dcfg->csum.eb_u8[0] -= cksum;
/* Erase and write the new sector */
if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0)
goto fail4;
/* Write out the new structure to nvram */
if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg,
vpd_offset + vpd_length)) != 0)
goto fail5;
EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
siena_nvram_partn_unlock(enp, dcfg_partn);
return (0);
fail5:
EFSYS_PROBE(fail5);
fail4:
EFSYS_PROBE(fail4);
fail3:
EFSYS_PROBE(fail3);
EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
fail2:
EFSYS_PROBE(fail2);
siena_nvram_partn_unlock(enp, dcfg_partn);
fail1:
EFSYS_PROBE1(fail1, int, rc);
return (rc);
}
void
siena_vpd_fini(
__in efx_nic_t *enp)
{
EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
if (enp->en_u.siena.enu_svpd_length > 0) {
EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length,
enp->en_u.siena.enu_svpd);
enp->en_u.siena.enu_svpd = NULL;
enp->en_u.siena.enu_svpd_length = 0;
}
}
#endif /* EFSYS_OPT_SIENA */
#endif /* EFSYS_OPT_VPD */

775
sys/dev/sfxge/sfxge.c Normal file
View File

@ -0,0 +1,775 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/taskqueue.h>
#include <sys/sockio.h>
#include <sys/sysctl.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_types.h>
#include "common/efx.h"
#include "sfxge.h"
#include "sfxge_rx.h"
#define SFXGE_CAP (IFCAP_VLAN_MTU | \
IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | IFCAP_TSO | \
IFCAP_JUMBO_MTU | IFCAP_LRO | \
IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE)
#define SFXGE_CAP_ENABLE SFXGE_CAP
#define SFXGE_CAP_FIXED (IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM | \
IFCAP_JUMBO_MTU | IFCAP_LINKSTATE)
MALLOC_DEFINE(M_SFXGE, "sfxge", "Solarflare 10GigE driver");
static void
sfxge_reset(void *arg, int npending);
static int
sfxge_start(struct sfxge_softc *sc)
{
int rc;
sx_assert(&sc->softc_lock, LA_XLOCKED);
if (sc->init_state == SFXGE_STARTED)
return 0;
if (sc->init_state != SFXGE_REGISTERED) {
rc = EINVAL;
goto fail;
}
if ((rc = efx_nic_init(sc->enp)) != 0)
goto fail;
/* Start processing interrupts. */
if ((rc = sfxge_intr_start(sc)) != 0)
goto fail2;
/* Start processing events. */
if ((rc = sfxge_ev_start(sc)) != 0)
goto fail3;
/* Start the receiver side. */
if ((rc = sfxge_rx_start(sc)) != 0)
goto fail4;
/* Start the transmitter side. */
if ((rc = sfxge_tx_start(sc)) != 0)
goto fail5;
/* Fire up the port. */
if ((rc = sfxge_port_start(sc)) != 0)
goto fail6;
sc->init_state = SFXGE_STARTED;
/* Tell the stack we're running. */
sc->ifnet->if_drv_flags |= IFF_DRV_RUNNING;
sc->ifnet->if_drv_flags &= ~IFF_DRV_OACTIVE;
return (0);
fail6:
sfxge_tx_stop(sc);
fail5:
sfxge_rx_stop(sc);
fail4:
sfxge_ev_stop(sc);
fail3:
sfxge_intr_stop(sc);
fail2:
efx_nic_fini(sc->enp);
fail:
device_printf(sc->dev, "sfxge_start: %d\n", rc);
return (rc);
}
static void
sfxge_if_init(void *arg)
{
struct sfxge_softc *sc;
sc = (struct sfxge_softc *)arg;
sx_xlock(&sc->softc_lock);
(void)sfxge_start(sc);
sx_xunlock(&sc->softc_lock);
}
static void
sfxge_stop(struct sfxge_softc *sc)
{
sx_assert(&sc->softc_lock, LA_XLOCKED);
if (sc->init_state != SFXGE_STARTED)
return;
sc->init_state = SFXGE_REGISTERED;
/* Stop the port. */
sfxge_port_stop(sc);
/* Stop the transmitter. */
sfxge_tx_stop(sc);
/* Stop the receiver. */
sfxge_rx_stop(sc);
/* Stop processing events. */
sfxge_ev_stop(sc);
/* Stop processing interrupts. */
sfxge_intr_stop(sc);
efx_nic_fini(sc->enp);
sc->ifnet->if_drv_flags &= ~IFF_DRV_RUNNING;
}
static int
sfxge_if_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
{
struct sfxge_softc *sc;
struct ifreq *ifr;
int error;
ifr = (struct ifreq *)data;
sc = ifp->if_softc;
error = 0;
switch (command) {
case SIOCSIFFLAGS:
sx_xlock(&sc->softc_lock);
if (ifp->if_flags & IFF_UP) {
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
if ((ifp->if_flags ^ sc->if_flags) &
(IFF_PROMISC | IFF_ALLMULTI)) {
sfxge_mac_filter_set(sc);
}
} else
sfxge_start(sc);
} else
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
sfxge_stop(sc);
sc->if_flags = ifp->if_flags;
sx_xunlock(&sc->softc_lock);
break;
case SIOCSIFMTU:
if (ifr->ifr_mtu == ifp->if_mtu) {
/* Nothing to do */
error = 0;
} else if (ifr->ifr_mtu > SFXGE_MAX_MTU) {
error = EINVAL;
} else if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
ifp->if_mtu = ifr->ifr_mtu;
error = 0;
} else {
/* Restart required */
sx_xlock(&sc->softc_lock);
sfxge_stop(sc);
ifp->if_mtu = ifr->ifr_mtu;
error = sfxge_start(sc);
sx_xunlock(&sc->softc_lock);
if (error) {
ifp->if_flags &= ~IFF_UP;
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
if_down(ifp);
}
}
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
sfxge_mac_filter_set(sc);
break;
case SIOCSIFCAP:
sx_xlock(&sc->softc_lock);
/*
* The networking core already rejects attempts to
* enable capabilities we don't have. We still have
* to reject attempts to disable capabilities that we
* can't (yet) disable.
*/
if (~ifr->ifr_reqcap & SFXGE_CAP_FIXED) {
error = EINVAL;
sx_xunlock(&sc->softc_lock);
break;
}
ifp->if_capenable = ifr->ifr_reqcap;
if (ifp->if_capenable & IFCAP_TXCSUM)
ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP);
else
ifp->if_hwassist &= ~(CSUM_IP | CSUM_TCP | CSUM_UDP);
if (ifp->if_capenable & IFCAP_TSO)
ifp->if_hwassist |= CSUM_TSO;
else
ifp->if_hwassist &= ~CSUM_TSO;
sx_xunlock(&sc->softc_lock);
break;
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
break;
default:
error = ether_ioctl(ifp, command, data);
}
return (error);
}
static void
sfxge_ifnet_fini(struct ifnet *ifp)
{
struct sfxge_softc *sc = ifp->if_softc;
sx_xlock(&sc->softc_lock);
sfxge_stop(sc);
sx_xunlock(&sc->softc_lock);
ifmedia_removeall(&sc->media);
ether_ifdetach(ifp);
if_free(ifp);
}
static int
sfxge_ifnet_init(struct ifnet *ifp, struct sfxge_softc *sc)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sc->enp);
device_t dev;
int rc;
dev = sc->dev;
sc->ifnet = ifp;
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_init = sfxge_if_init;
ifp->if_softc = sc;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_ioctl = sfxge_if_ioctl;
ifp->if_capabilities = SFXGE_CAP;
ifp->if_capenable = SFXGE_CAP_ENABLE;
ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO;
ether_ifattach(ifp, encp->enc_mac_addr);
#ifdef SFXGE_HAVE_MQ
ifp->if_transmit = sfxge_if_transmit;
ifp->if_qflush = sfxge_if_qflush;
#else
ifp->if_start = sfxge_if_start;
IFQ_SET_MAXLEN(&ifp->if_snd, SFXGE_NDESCS - 1);
ifp->if_snd.ifq_drv_maxlen = SFXGE_NDESCS - 1;
IFQ_SET_READY(&ifp->if_snd);
mtx_init(&sc->tx_lock, "txq", NULL, MTX_DEF);
#endif
if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
goto fail;
return 0;
fail:
ether_ifdetach(sc->ifnet);
return rc;
}
void
sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n, uint32_t *idp)
{
KASSERT(sc->buffer_table_next + n <=
efx_nic_cfg_get(sc->enp)->enc_buftbl_limit,
("buffer table full"));
*idp = sc->buffer_table_next;
sc->buffer_table_next += n;
}
static int
sfxge_bar_init(struct sfxge_softc *sc)
{
efsys_bar_t *esbp = &sc->bar;
esbp->esb_rid = PCIR_BAR(EFX_MEM_BAR);
if ((esbp->esb_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
&esbp->esb_rid, RF_ACTIVE)) == NULL) {
device_printf(sc->dev, "Cannot allocate BAR region %d\n",
EFX_MEM_BAR);
return (ENXIO);
}
esbp->esb_tag = rman_get_bustag(esbp->esb_res);
esbp->esb_handle = rman_get_bushandle(esbp->esb_res);
mtx_init(&esbp->esb_lock, "sfxge_efsys_bar", NULL, MTX_DEF);
return (0);
}
static void
sfxge_bar_fini(struct sfxge_softc *sc)
{
efsys_bar_t *esbp = &sc->bar;
bus_release_resource(sc->dev, SYS_RES_MEMORY, esbp->esb_rid,
esbp->esb_res);
mtx_destroy(&esbp->esb_lock);
}
static int
sfxge_create(struct sfxge_softc *sc)
{
device_t dev;
efx_nic_t *enp;
int error;
dev = sc->dev;
sx_init(&sc->softc_lock, "sfxge_softc");
sc->stats_node = SYSCTL_ADD_NODE(
device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "stats", CTLFLAG_RD, NULL, "Statistics");
if (!sc->stats_node) {
error = ENOMEM;
goto fail;
}
TASK_INIT(&sc->task_reset, 0, sfxge_reset, sc);
(void) pci_enable_busmaster(dev);
/* Initialize DMA mappings. */
if ((error = sfxge_dma_init(sc)) != 0)
goto fail;
/* Map the device registers. */
if ((error = sfxge_bar_init(sc)) != 0)
goto fail;
error = efx_family(pci_get_vendor(dev), pci_get_device(dev),
&sc->family);
KASSERT(error == 0, ("Family should be filtered by sfxge_probe()"));
/* Create the common code nic object. */
mtx_init(&sc->enp_lock, "sfxge_nic", NULL, MTX_DEF);
if ((error = efx_nic_create(sc->family, (efsys_identifier_t *)sc,
&sc->bar, &sc->enp_lock, &enp)) != 0)
goto fail3;
sc->enp = enp;
/* Initialize MCDI to talk to the microcontroller. */
if ((error = sfxge_mcdi_init(sc)) != 0)
goto fail4;
/* Probe the NIC and build the configuration data area. */
if ((error = efx_nic_probe(enp)) != 0)
goto fail5;
/* Initialize the NVRAM. */
if ((error = efx_nvram_init(enp)) != 0)
goto fail6;
/* Initialize the VPD. */
if ((error = efx_vpd_init(enp)) != 0)
goto fail7;
/* Reset the NIC. */
if ((error = efx_nic_reset(enp)) != 0)
goto fail8;
/* Initialize buffer table allocation. */
sc->buffer_table_next = 0;
/* Set up interrupts. */
if ((error = sfxge_intr_init(sc)) != 0)
goto fail8;
/* Initialize event processing state. */
if ((error = sfxge_ev_init(sc)) != 0)
goto fail11;
/* Initialize receive state. */
if ((error = sfxge_rx_init(sc)) != 0)
goto fail12;
/* Initialize transmit state. */
if ((error = sfxge_tx_init(sc)) != 0)
goto fail13;
/* Initialize port state. */
if ((error = sfxge_port_init(sc)) != 0)
goto fail14;
sc->init_state = SFXGE_INITIALIZED;
return (0);
fail14:
sfxge_tx_fini(sc);
fail13:
sfxge_rx_fini(sc);
fail12:
sfxge_ev_fini(sc);
fail11:
sfxge_intr_fini(sc);
fail8:
efx_vpd_fini(enp);
fail7:
efx_nvram_fini(enp);
fail6:
efx_nic_unprobe(enp);
fail5:
sfxge_mcdi_fini(sc);
fail4:
sc->enp = NULL;
efx_nic_destroy(enp);
mtx_destroy(&sc->enp_lock);
fail3:
sfxge_bar_fini(sc);
(void) pci_disable_busmaster(sc->dev);
fail:
sc->dev = NULL;
sx_destroy(&sc->softc_lock);
return (error);
}
static void
sfxge_destroy(struct sfxge_softc *sc)
{
efx_nic_t *enp;
/* Clean up port state. */
sfxge_port_fini(sc);
/* Clean up transmit state. */
sfxge_tx_fini(sc);
/* Clean up receive state. */
sfxge_rx_fini(sc);
/* Clean up event processing state. */
sfxge_ev_fini(sc);
/* Clean up interrupts. */
sfxge_intr_fini(sc);
/* Tear down common code subsystems. */
efx_nic_reset(sc->enp);
efx_vpd_fini(sc->enp);
efx_nvram_fini(sc->enp);
efx_nic_unprobe(sc->enp);
/* Tear down MCDI. */
sfxge_mcdi_fini(sc);
/* Destroy common code context. */
enp = sc->enp;
sc->enp = NULL;
efx_nic_destroy(enp);
/* Free DMA memory. */
sfxge_dma_fini(sc);
/* Free mapped BARs. */
sfxge_bar_fini(sc);
(void) pci_disable_busmaster(sc->dev);
taskqueue_drain(taskqueue_thread, &sc->task_reset);
/* Destroy the softc lock. */
sx_destroy(&sc->softc_lock);
}
static int
sfxge_vpd_handler(SYSCTL_HANDLER_ARGS)
{
struct sfxge_softc *sc = arg1;
efx_vpd_value_t value;
int rc;
value.evv_tag = arg2 >> 16;
value.evv_keyword = arg2 & 0xffff;
if ((rc = efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value))
!= 0)
return rc;
return SYSCTL_OUT(req, value.evv_value, value.evv_length);
}
static void
sfxge_vpd_try_add(struct sfxge_softc *sc, struct sysctl_oid_list *list,
efx_vpd_tag_t tag, const char *keyword)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
efx_vpd_value_t value;
/* Check whether VPD tag/keyword is present */
value.evv_tag = tag;
value.evv_keyword = EFX_VPD_KEYWORD(keyword[0], keyword[1]);
if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) != 0)
return;
SYSCTL_ADD_PROC(
ctx, list, OID_AUTO, keyword, CTLTYPE_STRING|CTLFLAG_RD,
sc, tag << 16 | EFX_VPD_KEYWORD(keyword[0], keyword[1]),
sfxge_vpd_handler, "A", "");
}
static int
sfxge_vpd_init(struct sfxge_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
struct sysctl_oid *vpd_node;
struct sysctl_oid_list *vpd_list;
char keyword[3];
efx_vpd_value_t value;
int rc;
if ((rc = efx_vpd_size(sc->enp, &sc->vpd_size)) != 0)
goto fail;
sc->vpd_data = malloc(sc->vpd_size, M_SFXGE, M_WAITOK);
if ((rc = efx_vpd_read(sc->enp, sc->vpd_data, sc->vpd_size)) != 0)
goto fail2;
/* Copy ID (product name) into device description, and log it. */
value.evv_tag = EFX_VPD_ID;
if (efx_vpd_get(sc->enp, sc->vpd_data, sc->vpd_size, &value) == 0) {
value.evv_value[value.evv_length] = 0;
device_set_desc_copy(sc->dev, value.evv_value);
device_printf(sc->dev, "%s\n", value.evv_value);
}
vpd_node = SYSCTL_ADD_NODE(
ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)),
OID_AUTO, "vpd", CTLFLAG_RD, NULL, "Vital Product Data");
vpd_list = SYSCTL_CHILDREN(vpd_node);
/* Add sysctls for all expected and any vendor-defined keywords. */
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "PN");
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "EC");
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, "SN");
keyword[0] = 'V';
keyword[2] = 0;
for (keyword[1] = '0'; keyword[1] <= '9'; keyword[1]++)
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
for (keyword[1] = 'A'; keyword[1] <= 'Z'; keyword[1]++)
sfxge_vpd_try_add(sc, vpd_list, EFX_VPD_RO, keyword);
return 0;
fail2:
free(sc->vpd_data, M_SFXGE);
fail:
return rc;
}
static void
sfxge_vpd_fini(struct sfxge_softc *sc)
{
free(sc->vpd_data, M_SFXGE);
}
static void
sfxge_reset(void *arg, int npending)
{
struct sfxge_softc *sc;
int rc;
(void)npending;
sc = (struct sfxge_softc *)arg;
sx_xlock(&sc->softc_lock);
if (sc->init_state != SFXGE_STARTED)
goto done;
sfxge_stop(sc);
efx_nic_reset(sc->enp);
if ((rc = sfxge_start(sc)) != 0)
device_printf(sc->dev,
"reset failed (%d); interface is now stopped\n",
rc);
done:
sx_xunlock(&sc->softc_lock);
}
void
sfxge_schedule_reset(struct sfxge_softc *sc)
{
taskqueue_enqueue(taskqueue_thread, &sc->task_reset);
}
static int
sfxge_attach(device_t dev)
{
struct sfxge_softc *sc;
struct ifnet *ifp;
int error;
sc = device_get_softc(dev);
sc->dev = dev;
/* Allocate ifnet. */
ifp = if_alloc(IFT_ETHER);
if (ifp == NULL) {
device_printf(dev, "Couldn't allocate ifnet\n");
error = ENOMEM;
goto fail;
}
sc->ifnet = ifp;
/* Initialize hardware. */
if ((error = sfxge_create(sc)) != 0)
goto fail2;
/* Create the ifnet for the port. */
if ((error = sfxge_ifnet_init(ifp, sc)) != 0)
goto fail3;
if ((error = sfxge_vpd_init(sc)) != 0)
goto fail4;
sc->init_state = SFXGE_REGISTERED;
return (0);
fail4:
sfxge_ifnet_fini(ifp);
fail3:
sfxge_destroy(sc);
fail2:
if_free(sc->ifnet);
fail:
return (error);
}
static int
sfxge_detach(device_t dev)
{
struct sfxge_softc *sc;
sc = device_get_softc(dev);
sfxge_vpd_fini(sc);
/* Destroy the ifnet. */
sfxge_ifnet_fini(sc->ifnet);
/* Tear down hardware. */
sfxge_destroy(sc);
return (0);
}
static int
sfxge_probe(device_t dev)
{
uint16_t pci_vendor_id;
uint16_t pci_device_id;
efx_family_t family;
int rc;
pci_vendor_id = pci_get_vendor(dev);
pci_device_id = pci_get_device(dev);
rc = efx_family(pci_vendor_id, pci_device_id, &family);
if (rc)
return ENXIO;
KASSERT(family == EFX_FAMILY_SIENA, ("impossible controller family"));
device_set_desc(dev, "Solarflare SFC9000 family");
return 0;
}
static device_method_t sfxge_methods[] = {
DEVMETHOD(device_probe, sfxge_probe),
DEVMETHOD(device_attach, sfxge_attach),
DEVMETHOD(device_detach, sfxge_detach),
/* Bus interface. */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
{ 0, 0 }
};
static devclass_t sfxge_devclass;
static driver_t sfxge_driver = {
"sfxge",
sfxge_methods,
sizeof(struct sfxge_softc)
};
DRIVER_MODULE(sfxge, pci, sfxge_driver, sfxge_devclass, 0, 0);

304
sys/dev/sfxge/sfxge.h Normal file
View File

@ -0,0 +1,304 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SFXGE_H
#define _SFXGE_H
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/condvar.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/sx.h>
#include <vm/uma.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_media.h>
#include <net/if_types.h>
/*
* Backward-compatibility
*/
#ifndef CACHE_LINE_SIZE
/* This should be right on most machines the driver will be used on, and
* we needn't care too much about wasting a few KB per interface.
*/
#define CACHE_LINE_SIZE 128
#endif
#ifndef IFCAP_LINKSTATE
#define IFCAP_LINKSTATE 0
#endif
#ifndef IFCAP_VLAN_HWTSO
#define IFCAP_VLAN_HWTSO 0
#endif
#ifndef IFM_10G_T
#define IFM_10G_T IFM_UNKNOWN
#endif
#ifndef IFM_10G_KX4
#define IFM_10G_KX4 IFM_10G_CX4
#endif
#if __FreeBSD_version >= 800054
/* Networking core is multiqueue aware. We can manage our own TX
* queues and use m_pkthdr.flowid.
*/
#define SFXGE_HAVE_MQ
#endif
#if (__FreeBSD_version >= 800501 && __FreeBSD_version < 900000) || \
__FreeBSD_version >= 900003
#define SFXGE_HAVE_DESCRIBE_INTR
#endif
#ifdef IFM_ETH_RXPAUSE
#define SFXGE_HAVE_PAUSE_MEDIAOPTS
#endif
#ifndef CTLTYPE_U64
#define CTLTYPE_U64 CTLTYPE_QUAD
#endif
#include "sfxge_rx.h"
#include "sfxge_tx.h"
#define SFXGE_IP_ALIGN 2
#define SFXGE_ETHERTYPE_LOOPBACK 0x9000 /* Xerox loopback */
enum sfxge_evq_state {
SFXGE_EVQ_UNINITIALIZED = 0,
SFXGE_EVQ_INITIALIZED,
SFXGE_EVQ_STARTING,
SFXGE_EVQ_STARTED
};
#define SFXGE_EV_BATCH 16384
struct sfxge_evq {
struct sfxge_softc *sc __aligned(CACHE_LINE_SIZE);
struct mtx lock __aligned(CACHE_LINE_SIZE);
enum sfxge_evq_state init_state;
unsigned int index;
efsys_mem_t mem;
unsigned int buf_base_id;
boolean_t exception;
efx_evq_t *common;
unsigned int read_ptr;
unsigned int rx_done;
unsigned int tx_done;
/* Linked list of TX queues with completions to process */
struct sfxge_txq *txq;
struct sfxge_txq **txqs;
};
#define SFXGE_NEVS 4096
#define SFXGE_NDESCS 1024
#define SFXGE_MODERATION 30
enum sfxge_intr_state {
SFXGE_INTR_UNINITIALIZED = 0,
SFXGE_INTR_INITIALIZED,
SFXGE_INTR_TESTING,
SFXGE_INTR_STARTED
};
struct sfxge_intr_hdl {
int eih_rid;
void *eih_tag;
struct resource *eih_res;
};
struct sfxge_intr {
enum sfxge_intr_state state;
struct resource *msix_res;
struct sfxge_intr_hdl *table;
int n_alloc;
int type;
efsys_mem_t status;
uint64_t mask;
uint32_t zero_count;
};
enum sfxge_mcdi_state {
SFXGE_MCDI_UNINITIALIZED = 0,
SFXGE_MCDI_INITIALIZED,
SFXGE_MCDI_BUSY,
SFXGE_MCDI_COMPLETED
};
struct sfxge_mcdi {
struct mtx lock;
struct cv cv;
enum sfxge_mcdi_state state;
efx_mcdi_transport_t transport;
};
struct sfxge_hw_stats {
clock_t update_time;
efsys_mem_t dma_buf;
void *decode_buf;
};
enum sfxge_port_state {
SFXGE_PORT_UNINITIALIZED = 0,
SFXGE_PORT_INITIALIZED,
SFXGE_PORT_STARTED
};
struct sfxge_port {
struct sfxge_softc *sc;
struct mtx lock;
enum sfxge_port_state init_state;
#ifndef SFXGE_HAVE_PAUSE_MEDIAOPTS
unsigned int wanted_fc;
#endif
struct sfxge_hw_stats phy_stats;
struct sfxge_hw_stats mac_stats;
efx_link_mode_t link_mode;
};
enum sfxge_softc_state {
SFXGE_UNINITIALIZED = 0,
SFXGE_INITIALIZED,
SFXGE_REGISTERED,
SFXGE_STARTED
};
struct sfxge_softc {
device_t dev;
struct sx softc_lock;
enum sfxge_softc_state init_state;
struct ifnet *ifnet;
unsigned int if_flags;
struct sysctl_oid *stats_node;
struct task task_reset;
efx_family_t family;
caddr_t vpd_data;
size_t vpd_size;
efx_nic_t *enp;
struct mtx enp_lock;
bus_dma_tag_t parent_dma_tag;
efsys_bar_t bar;
struct sfxge_intr intr;
struct sfxge_mcdi mcdi;
struct sfxge_port port;
uint32_t buffer_table_next;
struct sfxge_evq *evq[SFXGE_RX_SCALE_MAX];
unsigned int ev_moderation;
clock_t ev_stats_update_time;
uint64_t ev_stats[EV_NQSTATS];
uma_zone_t rxq_cache;
struct sfxge_rxq *rxq[SFXGE_RX_SCALE_MAX];
unsigned int rx_indir_table[SFXGE_RX_SCALE_MAX];
#ifdef SFXGE_HAVE_MQ
struct sfxge_txq *txq[SFXGE_TXQ_NTYPES + SFXGE_RX_SCALE_MAX];
#else
struct sfxge_txq *txq[SFXGE_TXQ_NTYPES];
#endif
struct ifmedia media;
size_t rx_prefix_size;
size_t rx_buffer_size;
uma_zone_t rx_buffer_zone;
#ifndef SFXGE_HAVE_MQ
struct mtx tx_lock __aligned(CACHE_LINE_SIZE);
#endif
};
#define SFXGE_LINK_UP(sc) ((sc)->port.link_mode != EFX_LINK_DOWN)
#define SFXGE_RUNNING(sc) ((sc)->ifnet->if_drv_flags & IFF_DRV_RUNNING)
/*
* From sfxge.c.
*/
extern void sfxge_schedule_reset(struct sfxge_softc *sc);
extern void sfxge_sram_buf_tbl_alloc(struct sfxge_softc *sc, size_t n,
uint32_t *idp);
/*
* From sfxge_dma.c.
*/
extern int sfxge_dma_init(struct sfxge_softc *sc);
extern void sfxge_dma_fini(struct sfxge_softc *sc);
extern int sfxge_dma_alloc(struct sfxge_softc *sc, bus_size_t len,
efsys_mem_t *esmp);
extern void sfxge_dma_free(efsys_mem_t *esmp);
extern int sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
struct mbuf **mp, bus_dma_segment_t *segs, int *nsegs, int maxsegs);
/*
* From sfxge_ev.c.
*/
extern int sfxge_ev_init(struct sfxge_softc *sc);
extern void sfxge_ev_fini(struct sfxge_softc *sc);
extern int sfxge_ev_start(struct sfxge_softc *sc);
extern void sfxge_ev_stop(struct sfxge_softc *sc);
extern int sfxge_ev_qpoll(struct sfxge_softc *sc, unsigned int index);
/*
* From sfxge_intr.c.
*/
extern int sfxge_intr_init(struct sfxge_softc *sc);
extern void sfxge_intr_fini(struct sfxge_softc *sc);
extern int sfxge_intr_start(struct sfxge_softc *sc);
extern void sfxge_intr_stop(struct sfxge_softc *sc);
/*
* From sfxge_mcdi.c.
*/
extern int sfxge_mcdi_init(struct sfxge_softc *sc);
extern void sfxge_mcdi_fini(struct sfxge_softc *sc);
/*
* From sfxge_port.c.
*/
extern int sfxge_port_init(struct sfxge_softc *sc);
extern void sfxge_port_fini(struct sfxge_softc *sc);
extern int sfxge_port_start(struct sfxge_softc *sc);
extern void sfxge_port_stop(struct sfxge_softc *sc);
extern void sfxge_mac_link_update(struct sfxge_softc *sc,
efx_link_mode_t mode);
extern int sfxge_mac_filter_set(struct sfxge_softc *sc);
extern int sfxge_port_ifmedia_init(struct sfxge_softc *sc);
#define SFXGE_MAX_MTU (9 * 1024)
#endif /* _SFXGE_H */

202
sys/dev/sfxge/sfxge_dma.c Normal file
View File

@ -0,0 +1,202 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <machine/bus.h>
#include "common/efx.h"
#include "sfxge.h"
static void
sfxge_dma_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
{
bus_addr_t *addr;
addr = arg;
if (error) {
*addr = 0;
return;
}
*addr = segs[0].ds_addr;
}
int
sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
struct mbuf **mp, bus_dma_segment_t *segs, int *nsegs, int maxsegs)
{
bus_dma_segment_t *psegs;
struct mbuf *m;
int seg_count;
int defragged;
int err;
m = *mp;
defragged = err = seg_count = 0;
KASSERT(m->m_pkthdr.len, ("packet has zero header length"));
retry:
psegs = segs;
seg_count = 0;
if (m->m_next == NULL) {
sfxge_map_mbuf_fast(tag, map, m, segs);
*nsegs = 1;
return (0);
}
#if defined(__i386__) || defined(__amd64__)
while (m && seg_count < maxsegs) {
/*
* firmware doesn't like empty segments
*/
if (m->m_len != 0) {
seg_count++;
sfxge_map_mbuf_fast(tag, map, m, psegs);
psegs++;
}
m = m->m_next;
}
#else
err = bus_dmamap_load_mbuf_sg(tag, map, *mp, segs, &seg_count, 0);
#endif
if (seg_count == 0) {
err = EFBIG;
goto err_out;
} else if (err == EFBIG || seg_count >= maxsegs) {
if (!defragged) {
m = m_defrag(*mp, M_DONTWAIT);
if (m == NULL) {
err = ENOBUFS;
goto err_out;
}
*mp = m;
defragged = 1;
goto retry;
}
err = EFBIG;
goto err_out;
}
*nsegs = seg_count;
err_out:
return (err);
}
void
sfxge_dma_free(efsys_mem_t *esmp)
{
bus_dmamap_unload(esmp->esm_tag, esmp->esm_map);
bus_dmamem_free(esmp->esm_tag, esmp->esm_base, esmp->esm_map);
bus_dma_tag_destroy(esmp->esm_tag);
esmp->esm_addr = 0;
esmp->esm_base = NULL;
}
int
sfxge_dma_alloc(struct sfxge_softc *sc, bus_size_t len, efsys_mem_t *esmp)
{
void *vaddr;
/* Create the child DMA tag. */
if (bus_dma_tag_create(sc->parent_dma_tag, PAGE_SIZE, 0,
0x3FFFFFFFFFFFULL, BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0,
NULL, NULL, &esmp->esm_tag) != 0) {
device_printf(sc->dev, "Couldn't allocate txq DMA tag\n");
return (ENOMEM);
}
/* Allocate kernel memory. */
if (bus_dmamem_alloc(esmp->esm_tag, (void **)&vaddr,
BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
&esmp->esm_map) != 0) {
device_printf(sc->dev, "Couldn't allocate DMA memory\n");
bus_dma_tag_destroy(esmp->esm_tag);
return (ENOMEM);
}
/* Load map into device memory. */
if (bus_dmamap_load(esmp->esm_tag, esmp->esm_map, vaddr, len,
sfxge_dma_cb, &esmp->esm_addr, 0) != 0) {
device_printf(sc->dev, "Couldn't load DMA mapping\n");
bus_dmamem_free(esmp->esm_tag, esmp->esm_base, esmp->esm_map);
bus_dma_tag_destroy(esmp->esm_tag);
return (ENOMEM);
}
/*
* The callback gets error information about the mapping
* and will have set our vaddr to NULL if something went
* wrong.
*/
if (vaddr == NULL)
return (ENOMEM);
esmp->esm_base = vaddr;
return (0);
}
void
sfxge_dma_fini(struct sfxge_softc *sc)
{
bus_dma_tag_destroy(sc->parent_dma_tag);
}
int
sfxge_dma_init(struct sfxge_softc *sc)
{
/* Create the parent dma tag. */
if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), /* parent */
1, 0, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
BUS_SPACE_UNRESTRICTED, /* nsegments */
BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lock, lockarg */
&sc->parent_dma_tag)) {
device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
return (ENOMEM);
}
return (0);
}

862
sys/dev/sfxge/sfxge_ev.c Normal file
View File

@ -0,0 +1,862 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include "common/efx.h"
#include "sfxge.h"
static void
sfxge_ev_qcomplete(struct sfxge_evq *evq, boolean_t eop)
{
struct sfxge_softc *sc;
unsigned int index;
struct sfxge_rxq *rxq;
struct sfxge_txq *txq;
sc = evq->sc;
index = evq->index;
rxq = sc->rxq[index];
if ((txq = evq->txq) != NULL) {
evq->txq = NULL;
evq->txqs = &(evq->txq);
do {
struct sfxge_txq *next;
next = txq->next;
txq->next = NULL;
KASSERT(txq->evq_index == index,
("txq->evq_index != index"));
if (txq->pending != txq->completed)
sfxge_tx_qcomplete(txq);
txq = next;
} while (txq != NULL);
}
if (rxq->pending != rxq->completed)
sfxge_rx_qcomplete(rxq, eop);
}
static boolean_t
sfxge_ev_rx(void *arg, uint32_t label, uint32_t id, uint32_t size,
uint16_t flags)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
struct sfxge_rxq *rxq;
unsigned int expected;
struct sfxge_rx_sw_desc *rx_desc;
evq = arg;
sc = evq->sc;
if (evq->exception)
goto done;
rxq = sc->rxq[label];
KASSERT(rxq != NULL, ("rxq == NULL"));
KASSERT(evq->index == rxq->index,
("evq->index != rxq->index"));
if (rxq->init_state != SFXGE_RXQ_STARTED)
goto done;
expected = rxq->pending++ & (SFXGE_NDESCS - 1);
if (id != expected) {
evq->exception = B_TRUE;
device_printf(sc->dev, "RX completion out of order"
" (id=%#x expected=%#x flags=%#x); resetting\n",
id, expected, flags);
sfxge_schedule_reset(sc);
goto done;
}
rx_desc = &rxq->queue[id];
KASSERT(rx_desc->flags == EFX_DISCARD,
("rx_desc->flags != EFX_DISCARD"));
rx_desc->flags = flags;
KASSERT(size < (1 << 16), ("size > (1 << 16)"));
rx_desc->size = (uint16_t)size;
prefetch_read_many(rx_desc->mbuf);
evq->rx_done++;
if (rxq->pending - rxq->completed >= SFXGE_RX_BATCH)
sfxge_ev_qcomplete(evq, B_FALSE);
done:
return (evq->rx_done >= SFXGE_EV_BATCH);
}
static boolean_t
sfxge_ev_exception(void *arg, uint32_t code, uint32_t data)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
evq = (struct sfxge_evq *)arg;
sc = evq->sc;
evq->exception = B_TRUE;
if (code != EFX_EXCEPTION_UNKNOWN_SENSOREVT) {
device_printf(sc->dev,
"hardware exception (code=%u); resetting\n",
code);
sfxge_schedule_reset(sc);
}
return (B_FALSE);
}
static boolean_t
sfxge_ev_rxq_flush_done(void *arg, uint32_t label)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
struct sfxge_rxq *rxq;
unsigned int index;
uint16_t magic;
evq = (struct sfxge_evq *)arg;
sc = evq->sc;
rxq = sc->rxq[label];
KASSERT(rxq != NULL, ("rxq == NULL"));
/* Resend a software event on the correct queue */
index = rxq->index;
evq = sc->evq[index];
KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != level"));
magic = SFXGE_MAGIC_RX_QFLUSH_DONE | label;
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
("evq not started"));
efx_ev_qpost(evq->common, magic);
return (B_FALSE);
}
static boolean_t
sfxge_ev_rxq_flush_failed(void *arg, uint32_t label)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
struct sfxge_rxq *rxq;
unsigned int index;
uint16_t magic;
evq = (struct sfxge_evq *)arg;
sc = evq->sc;
rxq = sc->rxq[label];
KASSERT(rxq != NULL, ("rxq == NULL"));
/* Resend a software event on the correct queue */
index = rxq->index;
evq = sc->evq[index];
KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
magic = SFXGE_MAGIC_RX_QFLUSH_FAILED | label;
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
("evq not started"));
efx_ev_qpost(evq->common, magic);
return (B_FALSE);
}
static boolean_t
sfxge_ev_tx(void *arg, uint32_t label, uint32_t id)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
struct sfxge_txq *txq;
unsigned int stop;
unsigned int delta;
evq = (struct sfxge_evq *)arg;
sc = evq->sc;
txq = sc->txq[label];
KASSERT(txq != NULL, ("txq == NULL"));
KASSERT(evq->index == txq->evq_index,
("evq->index != txq->evq_index"));
if (txq->init_state != SFXGE_TXQ_STARTED)
goto done;
stop = (id + 1) & (SFXGE_NDESCS - 1);
id = txq->pending & (SFXGE_NDESCS - 1);
delta = (stop >= id) ? (stop - id) : (SFXGE_NDESCS - id + stop);
txq->pending += delta;
evq->tx_done++;
if (txq->next == NULL &&
evq->txqs != &(txq->next)) {
*(evq->txqs) = txq;
evq->txqs = &(txq->next);
}
if (txq->pending - txq->completed >= SFXGE_TX_BATCH)
sfxge_tx_qcomplete(txq);
done:
return (evq->tx_done >= SFXGE_EV_BATCH);
}
static boolean_t
sfxge_ev_txq_flush_done(void *arg, uint32_t label)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
struct sfxge_txq *txq;
uint16_t magic;
evq = (struct sfxge_evq *)arg;
sc = evq->sc;
txq = sc->txq[label];
KASSERT(txq != NULL, ("txq == NULL"));
KASSERT(txq->init_state == SFXGE_TXQ_INITIALIZED,
("txq not initialized"));
/* Resend a software event on the correct queue */
evq = sc->evq[txq->evq_index];
KASSERT((label & SFXGE_MAGIC_DMAQ_LABEL_MASK) == label,
("(label & SFXGE_MAGIC_DMAQ_LABEL_MASK) != label"));
magic = SFXGE_MAGIC_TX_QFLUSH_DONE | label;
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
("evq not started"));
efx_ev_qpost(evq->common, magic);
return (B_FALSE);
}
static boolean_t
sfxge_ev_software(void *arg, uint16_t magic)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
unsigned int label;
evq = (struct sfxge_evq *)arg;
sc = evq->sc;
label = magic & SFXGE_MAGIC_DMAQ_LABEL_MASK;
magic &= ~SFXGE_MAGIC_DMAQ_LABEL_MASK;
switch (magic) {
case SFXGE_MAGIC_RX_QFLUSH_DONE: {
struct sfxge_rxq *rxq = sc->rxq[label];
KASSERT(rxq != NULL, ("rxq == NULL"));
KASSERT(evq->index == rxq->index,
("evq->index != rxq->index"));
sfxge_rx_qflush_done(rxq);
break;
}
case SFXGE_MAGIC_RX_QFLUSH_FAILED: {
struct sfxge_rxq *rxq = sc->rxq[label];
KASSERT(rxq != NULL, ("rxq == NULL"));
KASSERT(evq->index == rxq->index,
("evq->index != rxq->index"));
sfxge_rx_qflush_failed(rxq);
break;
}
case SFXGE_MAGIC_RX_QREFILL: {
struct sfxge_rxq *rxq = sc->rxq[label];
KASSERT(rxq != NULL, ("rxq == NULL"));
KASSERT(evq->index == rxq->index,
("evq->index != rxq->index"));
sfxge_rx_qrefill(rxq);
break;
}
case SFXGE_MAGIC_TX_QFLUSH_DONE: {
struct sfxge_txq *txq = sc->txq[label];
KASSERT(txq != NULL, ("txq == NULL"));
KASSERT(evq->index == txq->evq_index,
("evq->index != txq->evq_index"));
sfxge_tx_qflush_done(txq);
break;
}
default:
break;
}
return (B_FALSE);
}
static boolean_t
sfxge_ev_sram(void *arg, uint32_t code)
{
(void)arg;
(void)code;
switch (code) {
case EFX_SRAM_UPDATE:
EFSYS_PROBE(sram_update);
break;
case EFX_SRAM_CLEAR:
EFSYS_PROBE(sram_clear);
break;
case EFX_SRAM_ILLEGAL_CLEAR:
EFSYS_PROBE(sram_illegal_clear);
break;
default:
KASSERT(B_FALSE, ("Impossible SRAM event"));
break;
}
return (B_FALSE);
}
static boolean_t
sfxge_ev_timer(void *arg, uint32_t index)
{
(void)arg;
(void)index;
return (B_FALSE);
}
static boolean_t
sfxge_ev_wake_up(void *arg, uint32_t index)
{
(void)arg;
(void)index;
return (B_FALSE);
}
static void
sfxge_ev_stat_update(struct sfxge_softc *sc)
{
struct sfxge_evq *evq;
unsigned int index;
clock_t now;
sx_xlock(&sc->softc_lock);
if (sc->evq[0]->init_state != SFXGE_EVQ_STARTED)
goto out;
now = ticks;
if (now - sc->ev_stats_update_time < hz)
goto out;
sc->ev_stats_update_time = now;
/* Add event counts from each event queue in turn */
for (index = 0; index < sc->intr.n_alloc; index++) {
evq = sc->evq[index];
mtx_lock(&evq->lock);
efx_ev_qstats_update(evq->common, sc->ev_stats);
mtx_unlock(&evq->lock);
}
out:
sx_xunlock(&sc->softc_lock);
}
static int
sfxge_ev_stat_handler(SYSCTL_HANDLER_ARGS)
{
struct sfxge_softc *sc = arg1;
unsigned int id = arg2;
sfxge_ev_stat_update(sc);
return SYSCTL_OUT(req, &sc->ev_stats[id], sizeof(sc->ev_stats[id]));
}
static void
sfxge_ev_stat_init(struct sfxge_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
struct sysctl_oid_list *stat_list;
unsigned int id;
char name[40];
stat_list = SYSCTL_CHILDREN(sc->stats_node);
for (id = 0; id < EV_NQSTATS; id++) {
snprintf(name, sizeof(name), "ev_%s",
efx_ev_qstat_name(sc->enp, id));
SYSCTL_ADD_PROC(
ctx, stat_list,
OID_AUTO, name, CTLTYPE_U64|CTLFLAG_RD,
sc, id, sfxge_ev_stat_handler, "Q",
"");
}
}
static void
sfxge_ev_qmoderate(struct sfxge_softc *sc, unsigned int idx, unsigned int us)
{
struct sfxge_evq *evq;
efx_evq_t *eep;
evq = sc->evq[idx];
eep = evq->common;
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
("evq->init_state != SFXGE_EVQ_STARTED"));
(void)efx_ev_qmoderate(eep, us);
}
static int
sfxge_int_mod_handler(SYSCTL_HANDLER_ARGS)
{
struct sfxge_softc *sc = arg1;
struct sfxge_intr *intr = &sc->intr;
unsigned int moderation;
int error;
int index;
sx_xlock(&sc->softc_lock);
if (req->newptr) {
if ((error = SYSCTL_IN(req, &moderation, sizeof(moderation)))
!= 0)
goto out;
/* We may not be calling efx_ev_qmoderate() now,
* so we have to range-check the value ourselves.
*/
if (moderation >
efx_nic_cfg_get(sc->enp)->enc_evq_moderation_max) {
error = EINVAL;
goto out;
}
sc->ev_moderation = moderation;
if (intr->state == SFXGE_INTR_STARTED) {
for (index = 0; index < intr->n_alloc; index++)
sfxge_ev_qmoderate(sc, index, moderation);
}
} else {
error = SYSCTL_OUT(req, &sc->ev_moderation,
sizeof(sc->ev_moderation));
}
out:
sx_xunlock(&sc->softc_lock);
return error;
}
static boolean_t
sfxge_ev_initialized(void *arg)
{
struct sfxge_evq *evq;
evq = (struct sfxge_evq *)arg;
KASSERT(evq->init_state == SFXGE_EVQ_STARTING,
("evq not starting"));
evq->init_state = SFXGE_EVQ_STARTED;
return (0);
}
static boolean_t
sfxge_ev_link_change(void *arg, efx_link_mode_t link_mode)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
evq = (struct sfxge_evq *)arg;
sc = evq->sc;
sfxge_mac_link_update(sc, link_mode);
return (0);
}
static const efx_ev_callbacks_t sfxge_ev_callbacks = {
.eec_initialized = sfxge_ev_initialized,
.eec_rx = sfxge_ev_rx,
.eec_tx = sfxge_ev_tx,
.eec_exception = sfxge_ev_exception,
.eec_rxq_flush_done = sfxge_ev_rxq_flush_done,
.eec_rxq_flush_failed = sfxge_ev_rxq_flush_failed,
.eec_txq_flush_done = sfxge_ev_txq_flush_done,
.eec_software = sfxge_ev_software,
.eec_sram = sfxge_ev_sram,
.eec_wake_up = sfxge_ev_wake_up,
.eec_timer = sfxge_ev_timer,
.eec_link_change = sfxge_ev_link_change,
};
int
sfxge_ev_qpoll(struct sfxge_softc *sc, unsigned int index)
{
struct sfxge_evq *evq;
int rc;
evq = sc->evq[index];
mtx_lock(&evq->lock);
if (evq->init_state != SFXGE_EVQ_STARTING &&
evq->init_state != SFXGE_EVQ_STARTED) {
rc = EINVAL;
goto fail;
}
/* Synchronize the DMA memory for reading */
bus_dmamap_sync(evq->mem.esm_tag, evq->mem.esm_map,
BUS_DMASYNC_POSTREAD);
KASSERT(evq->rx_done == 0, ("evq->rx_done != 0"));
KASSERT(evq->tx_done == 0, ("evq->tx_done != 0"));
KASSERT(evq->txq == NULL, ("evq->txq != NULL"));
KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
/* Poll the queue */
efx_ev_qpoll(evq->common, &evq->read_ptr, &sfxge_ev_callbacks, evq);
evq->rx_done = 0;
evq->tx_done = 0;
/* Perform any pending completion processing */
sfxge_ev_qcomplete(evq, B_TRUE);
/* Re-prime the event queue for interrupts */
if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
goto fail;
mtx_unlock(&evq->lock);
return (0);
fail:
mtx_unlock(&(evq->lock));
return (rc);
}
static void
sfxge_ev_qstop(struct sfxge_softc *sc, unsigned int index)
{
struct sfxge_evq *evq;
evq = sc->evq[index];
KASSERT(evq->init_state == SFXGE_EVQ_STARTED,
("evq->init_state != SFXGE_EVQ_STARTED"));
mtx_lock(&evq->lock);
evq->init_state = SFXGE_EVQ_INITIALIZED;
evq->read_ptr = 0;
evq->exception = B_FALSE;
/* Add event counts before discarding the common evq state */
efx_ev_qstats_update(evq->common, sc->ev_stats);
efx_ev_qdestroy(evq->common);
efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
EFX_EVQ_NBUFS(SFXGE_NEVS));
mtx_unlock(&evq->lock);
}
static int
sfxge_ev_qstart(struct sfxge_softc *sc, unsigned int index)
{
struct sfxge_evq *evq;
efsys_mem_t *esmp;
int count;
int rc;
evq = sc->evq[index];
esmp = &evq->mem;
KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
("evq->init_state != SFXGE_EVQ_INITIALIZED"));
/* Clear all events. */
(void)memset(esmp->esm_base, 0xff, EFX_EVQ_SIZE(SFXGE_NEVS));
/* Program the buffer table. */
if ((rc = efx_sram_buf_tbl_set(sc->enp, evq->buf_base_id, esmp,
EFX_EVQ_NBUFS(SFXGE_NEVS))) != 0)
return rc;
/* Create the common code event queue. */
if ((rc = efx_ev_qcreate(sc->enp, index, esmp, SFXGE_NEVS,
evq->buf_base_id, &evq->common)) != 0)
goto fail;
mtx_lock(&evq->lock);
/* Set the default moderation */
(void)efx_ev_qmoderate(evq->common, sc->ev_moderation);
/* Prime the event queue for interrupts */
if ((rc = efx_ev_qprime(evq->common, evq->read_ptr)) != 0)
goto fail2;
evq->init_state = SFXGE_EVQ_STARTING;
mtx_unlock(&evq->lock);
/* Wait for the initialization event */
count = 0;
do {
/* Pause for 100 ms */
pause("sfxge evq init", hz / 10);
/* Check to see if the test event has been processed */
if (evq->init_state == SFXGE_EVQ_STARTED)
goto done;
} while (++count < 20);
rc = ETIMEDOUT;
goto fail3;
done:
return (0);
fail3:
mtx_lock(&evq->lock);
evq->init_state = SFXGE_EVQ_INITIALIZED;
fail2:
mtx_unlock(&evq->lock);
efx_ev_qdestroy(evq->common);
fail:
efx_sram_buf_tbl_clear(sc->enp, evq->buf_base_id,
EFX_EVQ_NBUFS(SFXGE_NEVS));
return (rc);
}
void
sfxge_ev_stop(struct sfxge_softc *sc)
{
struct sfxge_intr *intr;
efx_nic_t *enp;
int index;
intr = &sc->intr;
enp = sc->enp;
KASSERT(intr->state == SFXGE_INTR_STARTED,
("Interrupts not started"));
/* Stop the event queue(s) */
index = intr->n_alloc;
while (--index >= 0)
sfxge_ev_qstop(sc, index);
/* Tear down the event module */
efx_ev_fini(enp);
}
int
sfxge_ev_start(struct sfxge_softc *sc)
{
struct sfxge_intr *intr;
int index;
int rc;
intr = &sc->intr;
KASSERT(intr->state == SFXGE_INTR_STARTED,
("intr->state != SFXGE_INTR_STARTED"));
/* Initialize the event module */
if ((rc = efx_ev_init(sc->enp)) != 0)
return rc;
/* Start the event queues */
for (index = 0; index < intr->n_alloc; index++) {
if ((rc = sfxge_ev_qstart(sc, index)) != 0)
goto fail;
}
return (0);
fail:
/* Stop the event queue(s) */
while (--index >= 0)
sfxge_ev_qstop(sc, index);
/* Tear down the event module */
efx_ev_fini(sc->enp);
return (rc);
}
static void
sfxge_ev_qfini(struct sfxge_softc *sc, unsigned int index)
{
struct sfxge_evq *evq;
evq = sc->evq[index];
KASSERT(evq->init_state == SFXGE_EVQ_INITIALIZED,
("evq->init_state != SFXGE_EVQ_INITIALIZED"));
KASSERT(evq->txqs == &evq->txq, ("evq->txqs != &evq->txq"));
sfxge_dma_free(&evq->mem);
sc->evq[index] = NULL;
mtx_destroy(&evq->lock);
free(evq, M_SFXGE);
}
static int
sfxge_ev_qinit(struct sfxge_softc *sc, unsigned int index)
{
struct sfxge_evq *evq;
efsys_mem_t *esmp;
int rc;
KASSERT(index < SFXGE_RX_SCALE_MAX, ("index >= SFXGE_RX_SCALE_MAX"));
evq = malloc(sizeof(struct sfxge_evq), M_SFXGE, M_ZERO | M_WAITOK);
evq->sc = sc;
evq->index = index;
sc->evq[index] = evq;
esmp = &evq->mem;
/* Initialise TX completion list */
evq->txqs = &evq->txq;
/* Allocate DMA space. */
if ((rc = sfxge_dma_alloc(sc, EFX_EVQ_SIZE(SFXGE_NEVS), esmp)) != 0)
return (rc);
/* Allocate buffer table entries. */
sfxge_sram_buf_tbl_alloc(sc, EFX_EVQ_NBUFS(SFXGE_NEVS),
&evq->buf_base_id);
mtx_init(&evq->lock, "evq", NULL, MTX_DEF);
evq->init_state = SFXGE_EVQ_INITIALIZED;
return (0);
}
void
sfxge_ev_fini(struct sfxge_softc *sc)
{
struct sfxge_intr *intr;
int index;
intr = &sc->intr;
KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
("intr->state != SFXGE_INTR_INITIALIZED"));
sc->ev_moderation = 0;
/* Tear down the event queue(s). */
index = intr->n_alloc;
while (--index >= 0)
sfxge_ev_qfini(sc, index);
}
int
sfxge_ev_init(struct sfxge_softc *sc)
{
struct sysctl_ctx_list *sysctl_ctx = device_get_sysctl_ctx(sc->dev);
struct sysctl_oid *sysctl_tree = device_get_sysctl_tree(sc->dev);
struct sfxge_intr *intr;
int index;
int rc;
intr = &sc->intr;
KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
("intr->state != SFXGE_INTR_INITIALIZED"));
/* Set default interrupt moderation; add a sysctl to
* read and change it.
*/
sc->ev_moderation = 30;
SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "int_mod", CTLTYPE_UINT|CTLFLAG_RW,
sc, 0, sfxge_int_mod_handler, "IU",
"sfxge interrupt moderation (us)");
/*
* Initialize the event queue(s) - one per interrupt.
*/
for (index = 0; index < intr->n_alloc; index++) {
if ((rc = sfxge_ev_qinit(sc, index)) != 0)
goto fail;
}
sfxge_ev_stat_init(sc);
return (0);
fail:
while (--index >= 0)
sfxge_ev_qfini(sc, index);
return (rc);
}

577
sys/dev/sfxge/sfxge_intr.c Normal file
View File

@ -0,0 +1,577 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/rman.h>
#include <sys/smp.h>
#include <sys/syslog.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "common/efx.h"
#include "sfxge.h"
static int
sfxge_intr_line_filter(void *arg)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
efx_nic_t *enp;
struct sfxge_intr *intr;
boolean_t fatal;
uint32_t qmask;
evq = (struct sfxge_evq *)arg;
sc = evq->sc;
enp = sc->enp;
intr = &sc->intr;
KASSERT(intr != NULL, ("intr == NULL"));
KASSERT(intr->type == EFX_INTR_LINE,
("intr->type != EFX_INTR_LINE"));
if (intr->state != SFXGE_INTR_STARTED &&
intr->state != SFXGE_INTR_TESTING)
return FILTER_STRAY;
if (intr->state == SFXGE_INTR_TESTING) {
intr->mask |= 1; /* only one interrupt */
return FILTER_HANDLED;
}
(void)efx_intr_status_line(enp, &fatal, &qmask);
if (fatal) {
(void) efx_intr_disable(enp);
(void) efx_intr_fatal(enp);
return FILTER_HANDLED;
}
if (qmask != 0) {
intr->zero_count = 0;
return FILTER_SCHEDULE_THREAD;
}
/* SF bug 15783: If the function is not asserting its IRQ and
* we read the queue mask on the cycle before a flag is added
* to the mask, this inhibits the function from asserting the
* IRQ even though we don't see the flag set. To work around
* this, we must re-prime all event queues and report the IRQ
* as handled when we see a mask of zero. To allow for shared
* IRQs, we don't repeat this if we see a mask of zero twice
* or more in a row.
*/
if (intr->zero_count++ == 0) {
if (evq->init_state == SFXGE_EVQ_STARTED) {
if (efx_ev_qpending(evq->common, evq->read_ptr))
return FILTER_SCHEDULE_THREAD;
efx_ev_qprime(evq->common, evq->read_ptr);
return FILTER_HANDLED;
}
}
return FILTER_STRAY;
}
static void
sfxge_intr_line(void *arg)
{
struct sfxge_evq *evq = arg;
struct sfxge_softc *sc = evq->sc;
(void)sfxge_ev_qpoll(sc, 0);
}
static void
sfxge_intr_message(void *arg)
{
struct sfxge_evq *evq;
struct sfxge_softc *sc;
efx_nic_t *enp;
struct sfxge_intr *intr;
unsigned int index;
boolean_t fatal;
evq = (struct sfxge_evq *)arg;
sc = evq->sc;
enp = sc->enp;
intr = &sc->intr;
index = evq->index;
KASSERT(intr != NULL, ("intr == NULL"));
KASSERT(intr->type == EFX_INTR_MESSAGE,
("intr->type != EFX_INTR_MESSAGE"));
if (intr->state != SFXGE_INTR_STARTED &&
intr->state != SFXGE_INTR_TESTING)
return;
if (intr->state == SFXGE_INTR_TESTING) {
uint64_t mask;
do {
mask = intr->mask;
} while (atomic_cmpset_long(&intr->mask, mask,
mask | (1 << index)) == 0);
return;
}
(void)efx_intr_status_message(enp, index, &fatal);
if (fatal) {
(void)efx_intr_disable(enp);
(void)efx_intr_fatal(enp);
return;
}
(void)sfxge_ev_qpoll(sc, index);
}
static int
sfxge_intr_bus_enable(struct sfxge_softc *sc)
{
struct sfxge_intr *intr;
struct sfxge_intr_hdl *table;
driver_filter_t *filter;
driver_intr_t *handler;
int index;
int err;
intr = &sc->intr;
table = intr->table;
switch (intr->type) {
case EFX_INTR_MESSAGE:
filter = NULL; /* not shared */
handler = sfxge_intr_message;
break;
case EFX_INTR_LINE:
filter = sfxge_intr_line_filter;
handler = sfxge_intr_line;
break;
default:
KASSERT(0, ("Invalid interrupt type"));
return EINVAL;
}
/* Try to add the handlers */
for (index = 0; index < intr->n_alloc; index++) {
if ((err = bus_setup_intr(sc->dev, table[index].eih_res,
INTR_MPSAFE|INTR_TYPE_NET, filter, handler,
sc->evq[index], &table[index].eih_tag)) != 0) {
goto fail;
}
#ifdef SFXGE_HAVE_DESCRIBE_INTR
if (intr->n_alloc > 1)
bus_describe_intr(sc->dev, table[index].eih_res,
table[index].eih_tag, "%d", index);
#endif
bus_bind_intr(sc->dev, table[index].eih_res, index);
}
return (0);
fail:
/* Remove remaining handlers */
while (--index >= 0)
bus_teardown_intr(sc->dev, table[index].eih_res,
table[index].eih_tag);
return (err);
}
static void
sfxge_intr_bus_disable(struct sfxge_softc *sc)
{
struct sfxge_intr *intr;
struct sfxge_intr_hdl *table;
int i;
intr = &sc->intr;
table = intr->table;
/* Remove all handlers */
for (i = 0; i < intr->n_alloc; i++)
bus_teardown_intr(sc->dev, table[i].eih_res,
table[i].eih_tag);
}
static int
sfxge_intr_alloc(struct sfxge_softc *sc, int count)
{
device_t dev;
struct sfxge_intr_hdl *table;
struct sfxge_intr *intr;
struct resource *res;
int rid;
int error;
int i;
dev = sc->dev;
intr = &sc->intr;
error = 0;
table = malloc(count * sizeof(struct sfxge_intr_hdl),
M_SFXGE, M_WAITOK);
intr->table = table;
for (i = 0; i < count; i++) {
rid = i + 1;
res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (res == NULL) {
device_printf(dev, "Couldn't allocate interrupts for "
"message %d\n", rid);
error = ENOMEM;
break;
}
table[i].eih_rid = rid;
table[i].eih_res = res;
}
if (error) {
count = i - 1;
for (i = 0; i < count; i++)
bus_release_resource(dev, SYS_RES_IRQ,
table[i].eih_rid, table[i].eih_res);
}
return (error);
}
static void
sfxge_intr_teardown_msix(struct sfxge_softc *sc)
{
device_t dev;
struct resource *resp;
int rid;
dev = sc->dev;
resp = sc->intr.msix_res;
rid = rman_get_rid(resp);
bus_release_resource(dev, SYS_RES_MEMORY, rid, resp);
}
static int
sfxge_intr_setup_msix(struct sfxge_softc *sc)
{
struct sfxge_intr *intr;
struct resource *resp;
device_t dev;
int count;
int rid;
dev = sc->dev;
intr = &sc->intr;
/* Check if MSI-X is available. */
count = pci_msix_count(dev);
if (count == 0)
return (EINVAL);
/* Limit the number of interrupts to the number of CPUs. */
if (count > mp_ncpus)
count = mp_ncpus;
/* Not very likely these days... */
if (count > EFX_MAXRSS)
count = EFX_MAXRSS;
rid = PCIR_BAR(4);
resp = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (resp == NULL)
return (ENOMEM);
if (pci_alloc_msix(dev, &count) != 0) {
bus_release_resource(dev, SYS_RES_MEMORY, rid, resp);
return (ENOMEM);
}
/* Allocate interrupt handlers. */
if (sfxge_intr_alloc(sc, count) != 0) {
bus_release_resource(dev, SYS_RES_MEMORY, rid, resp);
pci_release_msi(dev);
return (ENOMEM);
}
intr->type = EFX_INTR_MESSAGE;
intr->n_alloc = count;
intr->msix_res = resp;
return (0);
}
static int
sfxge_intr_setup_msi(struct sfxge_softc *sc)
{
struct sfxge_intr_hdl *table;
struct sfxge_intr *intr;
device_t dev;
int count;
int error;
dev = sc->dev;
intr = &sc->intr;
table = intr->table;
/*
* Check if MSI is available. All messages must be written to
* the same address and on x86 this means the IRQs have the
* same CPU affinity. So we only ever allocate 1.
*/
count = pci_msi_count(dev) ? 1 : 0;
if (count == 0)
return (EINVAL);
if ((error = pci_alloc_msi(dev, &count)) != 0)
return (ENOMEM);
/* Allocate interrupt handler. */
if (sfxge_intr_alloc(sc, count) != 0) {
pci_release_msi(dev);
return (ENOMEM);
}
intr->type = EFX_INTR_MESSAGE;
intr->n_alloc = count;
return (0);
}
static int
sfxge_intr_setup_fixed(struct sfxge_softc *sc)
{
struct sfxge_intr_hdl *table;
struct sfxge_intr *intr;
struct resource *res;
device_t dev;
int rid;
dev = sc->dev;
intr = &sc->intr;
rid = 0;
res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_SHAREABLE | RF_ACTIVE);
if (res == NULL)
return (ENOMEM);
table = malloc(sizeof(struct sfxge_intr_hdl), M_SFXGE, M_WAITOK);
table[0].eih_rid = rid;
table[0].eih_res = res;
intr->type = EFX_INTR_LINE;
intr->n_alloc = 1;
intr->table = table;
return (0);
}
static const char *const __sfxge_err[] = {
"",
"SRAM out-of-bounds",
"Buffer ID out-of-bounds",
"Internal memory parity",
"Receive buffer ownership",
"Transmit buffer ownership",
"Receive descriptor ownership",
"Transmit descriptor ownership",
"Event queue ownership",
"Event queue FIFO overflow",
"Illegal address",
"SRAM parity"
};
void
sfxge_err(efsys_identifier_t *arg, unsigned int code, uint32_t dword0,
uint32_t dword1)
{
struct sfxge_softc *sc = (struct sfxge_softc *)arg;
device_t dev = sc->dev;
log(LOG_WARNING, "[%s%d] FATAL ERROR: %s (0x%08x%08x)",
device_get_name(dev), device_get_unit(dev),
__sfxge_err[code], dword1, dword0);
}
void
sfxge_intr_stop(struct sfxge_softc *sc)
{
struct sfxge_intr *intr;
intr = &sc->intr;
KASSERT(intr->state == SFXGE_INTR_STARTED,
("Interrupts not started"));
intr->state = SFXGE_INTR_INITIALIZED;
/* Disable interrupts at the NIC */
intr->mask = 0;
efx_intr_disable(sc->enp);
/* Disable interrupts at the bus */
sfxge_intr_bus_disable(sc);
/* Tear down common code interrupt bits. */
efx_intr_fini(sc->enp);
}
int
sfxge_intr_start(struct sfxge_softc *sc)
{
struct sfxge_intr *intr;
efsys_mem_t *esmp;
int rc;
intr = &sc->intr;
esmp = &intr->status;
KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
("Interrupts not initialized"));
/* Zero the memory. */
(void)memset(esmp->esm_base, 0, EFX_INTR_SIZE);
/* Initialize common code interrupt bits. */
(void)efx_intr_init(sc->enp, intr->type, esmp);
/* Enable interrupts at the bus */
if ((rc = sfxge_intr_bus_enable(sc)) != 0)
goto fail;
intr->state = SFXGE_INTR_TESTING;
/* Enable interrupts at the NIC */
efx_intr_enable(sc->enp);
intr->state = SFXGE_INTR_STARTED;
return (0);
fail:
/* Tear down common code interrupt bits. */
efx_intr_fini(sc->enp);
intr->state = SFXGE_INTR_INITIALIZED;
return (rc);
}
void
sfxge_intr_fini(struct sfxge_softc *sc)
{
struct sfxge_intr_hdl *table;
struct sfxge_intr *intr;
efsys_mem_t *esmp;
device_t dev;
int i;
dev = sc->dev;
intr = &sc->intr;
esmp = &intr->status;
table = intr->table;
KASSERT(intr->state == SFXGE_INTR_INITIALIZED,
("intr->state != SFXGE_INTR_INITIALIZED"));
/* Free DMA memory. */
sfxge_dma_free(esmp);
/* Free interrupt handles. */
for (i = 0; i < intr->n_alloc; i++)
bus_release_resource(dev, SYS_RES_IRQ,
table[i].eih_rid, table[i].eih_res);
if (table[0].eih_rid != 0)
pci_release_msi(dev);
if (intr->msix_res != NULL)
sfxge_intr_teardown_msix(sc);
/* Free the handle table */
free(table, M_SFXGE);
intr->table = NULL;
intr->n_alloc = 0;
/* Clear the interrupt type */
intr->type = EFX_INTR_INVALID;
intr->state = SFXGE_INTR_UNINITIALIZED;
}
int
sfxge_intr_init(struct sfxge_softc *sc)
{
device_t dev;
struct sfxge_intr *intr;
efsys_mem_t *esmp;
int rc;
dev = sc->dev;
intr = &sc->intr;
esmp = &intr->status;
KASSERT(intr->state == SFXGE_INTR_UNINITIALIZED,
("Interrupts already initialized"));
/* Try to setup MSI-X or MSI interrupts if available. */
if ((rc = sfxge_intr_setup_msix(sc)) == 0)
device_printf(dev, "Using MSI-X interrupts\n");
else if ((rc = sfxge_intr_setup_msi(sc)) == 0)
device_printf(dev, "Using MSI interrupts\n");
else if ((rc = sfxge_intr_setup_fixed(sc)) == 0) {
device_printf(dev, "Using fixed interrupts\n");
} else {
device_printf(dev, "Couldn't setup interrupts\n");
return (ENOMEM);
}
/* Set up DMA for interrupts. */
if ((rc = sfxge_dma_alloc(sc, EFX_INTR_SIZE, esmp)) != 0)
return (ENOMEM);
intr->state = SFXGE_INTR_INITIALIZED;
return (0);
}

250
sys/dev/sfxge/sfxge_mcdi.c Normal file
View File

@ -0,0 +1,250 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/condvar.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/syslog.h>
#include <sys/taskqueue.h>
#include "common/efx.h"
#include "common/efx_mcdi.h"
#include "common/efx_regs_mcdi.h"
#include "sfxge.h"
#define SFXGE_MCDI_POLL_INTERVAL_MIN 10 /* 10us in 1us units */
#define SFXGE_MCDI_POLL_INTERVAL_MAX 100000 /* 100ms in 1us units */
#define SFXGE_MCDI_WATCHDOG_INTERVAL 10000000 /* 10s in 1us units */
/* Acquire exclusive access to MCDI for the duration of a request. */
static void
sfxge_mcdi_acquire(struct sfxge_mcdi *mcdi)
{
mtx_lock(&mcdi->lock);
KASSERT(mcdi->state != SFXGE_MCDI_UNINITIALIZED,
("MCDI not initialized"));
while (mcdi->state != SFXGE_MCDI_INITIALIZED)
(void)cv_wait_sig(&mcdi->cv, &mcdi->lock);
mcdi->state = SFXGE_MCDI_BUSY;
mtx_unlock(&mcdi->lock);
}
/* Release ownership of MCDI on request completion. */
static void
sfxge_mcdi_release(struct sfxge_mcdi *mcdi)
{
mtx_lock(&mcdi->lock);
KASSERT((mcdi->state == SFXGE_MCDI_BUSY ||
mcdi->state == SFXGE_MCDI_COMPLETED),
("MCDI not busy or task not completed"));
mcdi->state = SFXGE_MCDI_INITIALIZED;
cv_broadcast(&mcdi->cv);
mtx_unlock(&mcdi->lock);
}
static void
sfxge_mcdi_timeout(struct sfxge_softc *sc)
{
device_t dev = sc->dev;
log(LOG_WARNING, "[%s%d] MC_TIMEOUT", device_get_name(dev),
device_get_unit(dev));
EFSYS_PROBE(mcdi_timeout);
sfxge_schedule_reset(sc);
}
static void
sfxge_mcdi_poll(struct sfxge_softc *sc)
{
efx_nic_t *enp;
clock_t delay_total;
clock_t delay_us;
boolean_t aborted;
delay_total = 0;
delay_us = SFXGE_MCDI_POLL_INTERVAL_MIN;
enp = sc->enp;
do {
if (efx_mcdi_request_poll(enp)) {
EFSYS_PROBE1(mcdi_delay, clock_t, delay_total);
return;
}
if (delay_total > SFXGE_MCDI_WATCHDOG_INTERVAL) {
aborted = efx_mcdi_request_abort(enp);
KASSERT(aborted, ("abort failed"));
sfxge_mcdi_timeout(sc);
return;
}
/* Spin or block depending on delay interval. */
if (delay_us < 1000000)
DELAY(delay_us);
else
pause("mcdi wait", delay_us * hz / 1000000);
delay_total += delay_us;
/* Exponentially back off the poll frequency. */
delay_us = delay_us * 2;
if (delay_us > SFXGE_MCDI_POLL_INTERVAL_MAX)
delay_us = SFXGE_MCDI_POLL_INTERVAL_MAX;
} while (1);
}
static void
sfxge_mcdi_execute(void *arg, efx_mcdi_req_t *emrp)
{
struct sfxge_softc *sc;
struct sfxge_mcdi *mcdi;
sc = (struct sfxge_softc *)arg;
mcdi = &sc->mcdi;
sfxge_mcdi_acquire(mcdi);
/* Issue request and poll for completion. */
efx_mcdi_request_start(sc->enp, emrp, B_FALSE);
sfxge_mcdi_poll(sc);
sfxge_mcdi_release(mcdi);
}
static void
sfxge_mcdi_ev_cpl(void *arg)
{
struct sfxge_softc *sc;
struct sfxge_mcdi *mcdi;
sc = (struct sfxge_softc *)arg;
mcdi = &sc->mcdi;
mtx_lock(&mcdi->lock);
KASSERT(mcdi->state == SFXGE_MCDI_BUSY, ("MCDI not busy"));
mcdi->state = SFXGE_MCDI_COMPLETED;
cv_broadcast(&mcdi->cv);
mtx_unlock(&mcdi->lock);
}
static void
sfxge_mcdi_exception(void *arg, efx_mcdi_exception_t eme)
{
struct sfxge_softc *sc;
device_t dev;
sc = (struct sfxge_softc *)arg;
dev = sc->dev;
log(LOG_WARNING, "[%s%d] MC_%s", device_get_name(dev),
device_get_unit(dev),
(eme == EFX_MCDI_EXCEPTION_MC_REBOOT)
? "REBOOT"
: (eme == EFX_MCDI_EXCEPTION_MC_BADASSERT)
? "BADASSERT" : "UNKNOWN");
EFSYS_PROBE(mcdi_exception);
sfxge_schedule_reset(sc);
}
int
sfxge_mcdi_init(struct sfxge_softc *sc)
{
efx_nic_t *enp;
struct sfxge_mcdi *mcdi;
efx_mcdi_transport_t *emtp;
int rc;
enp = sc->enp;
mcdi = &sc->mcdi;
emtp = &mcdi->transport;
KASSERT(mcdi->state == SFXGE_MCDI_UNINITIALIZED,
("MCDI already initialized"));
mtx_init(&mcdi->lock, "sfxge_mcdi", NULL, MTX_DEF);
mcdi->state = SFXGE_MCDI_INITIALIZED;
emtp->emt_context = sc;
emtp->emt_execute = sfxge_mcdi_execute;
emtp->emt_ev_cpl = sfxge_mcdi_ev_cpl;
emtp->emt_exception = sfxge_mcdi_exception;
cv_init(&mcdi->cv, "sfxge_mcdi");
if ((rc = efx_mcdi_init(enp, emtp)) != 0)
goto fail;
return (0);
fail:
mtx_destroy(&mcdi->lock);
mcdi->state = SFXGE_MCDI_UNINITIALIZED;
return (rc);
}
void
sfxge_mcdi_fini(struct sfxge_softc *sc)
{
struct sfxge_mcdi *mcdi;
efx_nic_t *enp;
efx_mcdi_transport_t *emtp;
enp = sc->enp;
mcdi = &sc->mcdi;
emtp = &mcdi->transport;
mtx_lock(&mcdi->lock);
KASSERT(mcdi->state == SFXGE_MCDI_INITIALIZED,
("MCDI not initialized"));
efx_mcdi_fini(enp);
bzero(emtp, sizeof(*emtp));
cv_destroy(&mcdi->cv);
mtx_unlock(&mcdi->lock);
mtx_destroy(&mcdi->lock);
}

788
sys/dev/sfxge/sfxge_port.c Normal file
View File

@ -0,0 +1,788 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include "common/efx.h"
#include "sfxge.h"
static int
sfxge_mac_stat_update(struct sfxge_softc *sc)
{
struct sfxge_port *port = &sc->port;
efsys_mem_t *esmp = &(port->mac_stats.dma_buf);
clock_t now;
unsigned int count;
int rc;
mtx_lock(&port->lock);
if (port->init_state != SFXGE_PORT_STARTED) {
rc = 0;
goto out;
}
now = ticks;
if (now - port->mac_stats.update_time < hz) {
rc = 0;
goto out;
}
port->mac_stats.update_time = now;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (count = 0; count < 100; ++count) {
EFSYS_PROBE1(wait, unsigned int, count);
/* Synchronize the DMA memory for reading */
bus_dmamap_sync(esmp->esm_tag, esmp->esm_map,
BUS_DMASYNC_POSTREAD);
/* Try to update the cached counters */
if ((rc = efx_mac_stats_update(sc->enp, esmp,
port->mac_stats.decode_buf, NULL)) != EAGAIN)
goto out;
DELAY(100);
}
rc = ETIMEDOUT;
out:
mtx_unlock(&port->lock);
return rc;
}
static int
sfxge_mac_stat_handler(SYSCTL_HANDLER_ARGS)
{
struct sfxge_softc *sc = arg1;
unsigned int id = arg2;
int rc;
if ((rc = sfxge_mac_stat_update(sc)) != 0)
return rc;
return SYSCTL_OUT(req,
(uint64_t *)sc->port.mac_stats.decode_buf + id,
sizeof(uint64_t));
}
static void
sfxge_mac_stat_init(struct sfxge_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
struct sysctl_oid_list *stat_list;
unsigned int id;
const char *name;
stat_list = SYSCTL_CHILDREN(sc->stats_node);
/* Initialise the named stats */
for (id = 0; id < EFX_MAC_NSTATS; id++) {
name = efx_mac_stat_name(sc->enp, id);
SYSCTL_ADD_PROC(
ctx, stat_list,
OID_AUTO, name, CTLTYPE_U64|CTLFLAG_RD,
sc, id, sfxge_mac_stat_handler, "Q",
"");
}
}
#ifdef SFXGE_HAVE_PAUSE_MEDIAOPTS
static unsigned int
sfxge_port_wanted_fc(struct sfxge_softc *sc)
{
struct ifmedia_entry *ifm = sc->media.ifm_cur;
if (ifm->ifm_media == (IFM_ETHER | IFM_AUTO))
return EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
return ((ifm->ifm_media & IFM_ETH_RXPAUSE) ? EFX_FCNTL_RESPOND : 0) |
((ifm->ifm_media & IFM_ETH_TXPAUSE) ? EFX_FCNTL_GENERATE : 0);
}
static unsigned int
sfxge_port_link_fc_ifm(struct sfxge_softc *sc)
{
unsigned int wanted_fc, link_fc;
efx_mac_fcntl_get(sc->enp, &wanted_fc, &link_fc);
return ((link_fc & EFX_FCNTL_RESPOND) ? IFM_ETH_RXPAUSE : 0) |
((link_fc & EFX_FCNTL_GENERATE) ? IFM_ETH_TXPAUSE : 0);
}
#else /* !SFXGE_HAVE_PAUSE_MEDIAOPTS */
static unsigned int
sfxge_port_wanted_fc(struct sfxge_softc *sc)
{
return sc->port.wanted_fc;
}
static unsigned int
sfxge_port_link_fc_ifm(struct sfxge_softc *sc)
{
return 0;
}
static int
sfxge_port_wanted_fc_handler(SYSCTL_HANDLER_ARGS)
{
struct sfxge_softc *sc;
struct sfxge_port *port;
unsigned int fcntl;
int error;
sc = arg1;
port = &sc->port;
mtx_lock(&port->lock);
if (req->newptr) {
if ((error = SYSCTL_IN(req, &fcntl, sizeof(fcntl))) != 0)
goto out;
if (port->wanted_fc == fcntl)
goto out;
port->wanted_fc = fcntl;
if (port->init_state != SFXGE_PORT_STARTED)
goto out;
error = efx_mac_fcntl_set(sc->enp, port->wanted_fc, B_TRUE);
} else {
error = SYSCTL_OUT(req, &port->wanted_fc,
sizeof(port->wanted_fc));
}
out:
mtx_unlock(&port->lock);
return (error);
}
static int
sfxge_port_link_fc_handler(SYSCTL_HANDLER_ARGS)
{
struct sfxge_softc *sc;
struct sfxge_port *port;
unsigned int wanted_fc, link_fc;
int error;
sc = arg1;
port = &sc->port;
mtx_lock(&port->lock);
if (port->init_state == SFXGE_PORT_STARTED && SFXGE_LINK_UP(sc))
efx_mac_fcntl_get(sc->enp, &wanted_fc, &link_fc);
else
link_fc = 0;
error = SYSCTL_OUT(req, &link_fc, sizeof(link_fc));
mtx_unlock(&port->lock);
return (error);
}
#endif /* SFXGE_HAVE_PAUSE_MEDIAOPTS */
static const int sfxge_link_speed_kbit[EFX_LINK_NMODES] = {
[EFX_LINK_10HDX] = 10000,
[EFX_LINK_10FDX] = 10000,
[EFX_LINK_100HDX] = 100000,
[EFX_LINK_100FDX] = 100000,
[EFX_LINK_1000HDX] = 1000000,
[EFX_LINK_1000FDX] = 1000000,
[EFX_LINK_10000FDX] = 10000000,
};
void
sfxge_mac_link_update(struct sfxge_softc *sc, efx_link_mode_t mode)
{
struct sfxge_port *port;
int link_state;
port = &sc->port;
if (port->link_mode == mode)
return;
port->link_mode = mode;
/* Push link state update to the OS */
link_state = (port->link_mode != EFX_LINK_DOWN ?
LINK_STATE_UP : LINK_STATE_DOWN);
sc->ifnet->if_baudrate = sfxge_link_speed_kbit[port->link_mode];
if_link_state_change(sc->ifnet, link_state);
}
static void
sfxge_mac_poll_work(void *arg, int npending)
{
struct sfxge_softc *sc;
efx_nic_t *enp;
struct sfxge_port *port;
efx_link_mode_t mode;
sc = (struct sfxge_softc *)arg;
enp = sc->enp;
port = &sc->port;
mtx_lock(&port->lock);
if (port->init_state != SFXGE_PORT_STARTED)
goto done;
/* This may sleep waiting for MCDI completion */
(void)efx_port_poll(enp, &mode);
sfxge_mac_link_update(sc, mode);
done:
mtx_unlock(&port->lock);
}
static int
sfxge_mac_filter_set_locked(struct sfxge_softc *sc)
{
unsigned int bucket[EFX_MAC_HASH_BITS];
struct ifnet *ifp = sc->ifnet;
struct ifmultiaddr *ifma;
struct sockaddr_dl *sa;
efx_nic_t *enp = sc->enp;
unsigned int index;
int rc;
/* Set promisc-unicast and broadcast filter bits */
if ((rc = efx_mac_filter_set(enp, !!(ifp->if_flags & IFF_PROMISC),
B_TRUE)) != 0)
return rc;
/* Set multicast hash filter */
if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) {
for (index = 0; index < EFX_MAC_HASH_BITS; index++)
bucket[index] = 1;
} else {
/* Broadcast frames also go through the multicast
* filter, and the broadcast address hashes to
* 0xff. */
bucket[0xff] = 1;
IF_ADDR_LOCK(ifp);
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
if (ifma->ifma_addr->sa_family == AF_LINK) {
sa = (struct sockaddr_dl *)ifma->ifma_addr;
index = ether_crc32_le(LLADDR(sa), 6) & 0xff;
bucket[index] = 1;
}
}
IF_ADDR_UNLOCK(ifp);
}
return efx_mac_hash_set(enp, bucket);
}
int
sfxge_mac_filter_set(struct sfxge_softc *sc)
{
struct sfxge_port *port = &sc->port;
int rc;
KASSERT(port->init_state == SFXGE_PORT_STARTED, ("port not started"));
mtx_lock(&port->lock);
rc = sfxge_mac_filter_set_locked(sc);
mtx_unlock(&port->lock);
return rc;
}
void
sfxge_port_stop(struct sfxge_softc *sc)
{
struct sfxge_port *port;
efx_nic_t *enp;
port = &sc->port;
enp = sc->enp;
mtx_lock(&port->lock);
KASSERT(port->init_state == SFXGE_PORT_STARTED,
("port not started"));
port->init_state = SFXGE_PORT_INITIALIZED;
port->mac_stats.update_time = 0;
/* This may call MCDI */
(void)efx_mac_drain(enp, B_TRUE);
(void)efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf, 0, B_FALSE);
port->link_mode = EFX_LINK_UNKNOWN;
/* Destroy the common code port object. */
efx_port_fini(sc->enp);
mtx_unlock(&port->lock);
}
int
sfxge_port_start(struct sfxge_softc *sc)
{
uint8_t mac_addr[ETHER_ADDR_LEN];
struct ifnet *ifp = sc->ifnet;
struct sfxge_port *port;
efx_nic_t *enp;
size_t pdu;
int rc;
port = &sc->port;
enp = sc->enp;
mtx_lock(&port->lock);
KASSERT(port->init_state == SFXGE_PORT_INITIALIZED,
("port not initialized"));
/* Initialize the port object in the common code. */
if ((rc = efx_port_init(sc->enp)) != 0)
goto fail;
/* Set the SDU */
pdu = EFX_MAC_PDU(ifp->if_mtu);
if ((rc = efx_mac_pdu_set(enp, pdu)) != 0)
goto fail2;
if ((rc = efx_mac_fcntl_set(enp, sfxge_port_wanted_fc(sc), B_TRUE))
!= 0)
goto fail2;
/* Set the unicast address */
IF_ADDR_LOCK(ifp);
bcopy(LLADDR((struct sockaddr_dl *)ifp->if_addr->ifa_addr),
mac_addr, sizeof(mac_addr));
IF_ADDR_UNLOCK(ifp);
if ((rc = efx_mac_addr_set(enp, mac_addr)) != 0)
goto fail;
sfxge_mac_filter_set_locked(sc);
/* Update MAC stats by DMA every second */
if ((rc = efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf,
1000, B_FALSE)) != 0)
goto fail2;
if ((rc = efx_mac_drain(enp, B_FALSE)) != 0)
goto fail3;
if ((rc = efx_phy_adv_cap_set(sc->enp, sc->media.ifm_cur->ifm_data))
!= 0)
goto fail4;
port->init_state = SFXGE_PORT_STARTED;
/* Single poll in case there were missing initial events */
mtx_unlock(&port->lock);
sfxge_mac_poll_work(sc, 0);
return (0);
fail4:
(void)efx_mac_drain(enp, B_TRUE);
fail3:
(void)efx_mac_stats_periodic(enp, &port->mac_stats.dma_buf,
0, B_FALSE);
fail2:
efx_port_fini(sc->enp);
fail:
mtx_unlock(&port->lock);
return (rc);
}
static int
sfxge_phy_stat_update(struct sfxge_softc *sc)
{
struct sfxge_port *port = &sc->port;
efsys_mem_t *esmp = &port->phy_stats.dma_buf;
clock_t now;
unsigned int count;
int rc;
mtx_lock(&port->lock);
if (port->init_state != SFXGE_PORT_STARTED) {
rc = 0;
goto out;
}
now = ticks;
if (now - port->phy_stats.update_time < hz) {
rc = 0;
goto out;
}
port->phy_stats.update_time = now;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (count = 0; count < 100; ++count) {
EFSYS_PROBE1(wait, unsigned int, count);
/* Synchronize the DMA memory for reading */
bus_dmamap_sync(esmp->esm_tag, esmp->esm_map,
BUS_DMASYNC_POSTREAD);
/* Try to update the cached counters */
if ((rc = efx_phy_stats_update(sc->enp, esmp,
port->phy_stats.decode_buf)) != EAGAIN)
goto out;
DELAY(100);
}
rc = ETIMEDOUT;
out:
mtx_unlock(&port->lock);
return rc;
}
static int
sfxge_phy_stat_handler(SYSCTL_HANDLER_ARGS)
{
struct sfxge_softc *sc = arg1;
unsigned int id = arg2;
int rc;
if ((rc = sfxge_phy_stat_update(sc)) != 0)
return rc;
return SYSCTL_OUT(req,
(uint32_t *)sc->port.phy_stats.decode_buf + id,
sizeof(uint32_t));
}
static void
sfxge_phy_stat_init(struct sfxge_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->dev);
struct sysctl_oid_list *stat_list;
unsigned int id;
const char *name;
uint64_t stat_mask = efx_nic_cfg_get(sc->enp)->enc_phy_stat_mask;
stat_list = SYSCTL_CHILDREN(sc->stats_node);
/* Initialise the named stats */
for (id = 0; id < EFX_PHY_NSTATS; id++) {
if (!(stat_mask & ((uint64_t)1 << id)))
continue;
name = efx_phy_stat_name(sc->enp, id);
SYSCTL_ADD_PROC(
ctx, stat_list,
OID_AUTO, name, CTLTYPE_UINT|CTLFLAG_RD,
sc, id, sfxge_phy_stat_handler,
id == EFX_PHY_STAT_OUI ? "IX" : "IU",
"");
}
}
void
sfxge_port_fini(struct sfxge_softc *sc)
{
struct sfxge_port *port;
efsys_mem_t *esmp;
port = &sc->port;
esmp = &port->mac_stats.dma_buf;
KASSERT(port->init_state == SFXGE_PORT_INITIALIZED,
("Port not initialized"));
port->init_state = SFXGE_PORT_UNINITIALIZED;
port->link_mode = EFX_LINK_UNKNOWN;
/* Finish with PHY DMA memory */
sfxge_dma_free(&port->phy_stats.dma_buf);
free(port->phy_stats.decode_buf, M_SFXGE);
sfxge_dma_free(esmp);
free(port->mac_stats.decode_buf, M_SFXGE);
mtx_destroy(&port->lock);
port->sc = NULL;
}
int
sfxge_port_init(struct sfxge_softc *sc)
{
struct sfxge_port *port;
struct sysctl_ctx_list *sysctl_ctx;
struct sysctl_oid *sysctl_tree;
efsys_mem_t *mac_stats_buf, *phy_stats_buf;
int rc;
port = &sc->port;
mac_stats_buf = &port->mac_stats.dma_buf;
phy_stats_buf = &port->phy_stats.dma_buf;
KASSERT(port->init_state == SFXGE_PORT_UNINITIALIZED,
("Port already initialized"));
port->sc = sc;
mtx_init(&port->lock, "sfxge_port", NULL, MTX_DEF);
port->phy_stats.decode_buf = malloc(EFX_PHY_NSTATS * sizeof(uint32_t),
M_SFXGE, M_WAITOK | M_ZERO);
if ((rc = sfxge_dma_alloc(sc, EFX_PHY_STATS_SIZE, phy_stats_buf)) != 0)
goto fail;
bzero(phy_stats_buf->esm_base, phy_stats_buf->esm_size);
sfxge_phy_stat_init(sc);
sysctl_ctx = device_get_sysctl_ctx(sc->dev);
sysctl_tree = device_get_sysctl_tree(sc->dev);
#ifndef SFXGE_HAVE_PAUSE_MEDIAOPTS
/* If flow control cannot be configured or reported through
* ifmedia, provide sysctls for it. */
port->wanted_fc = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"wanted_fc", CTLTYPE_UINT|CTLFLAG_RW, sc, 0,
sfxge_port_wanted_fc_handler, "IU", "wanted flow control mode");
SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
"link_fc", CTLTYPE_UINT|CTLFLAG_RD, sc, 0,
sfxge_port_link_fc_handler, "IU", "link flow control mode");
#endif
port->mac_stats.decode_buf = malloc(EFX_MAC_NSTATS * sizeof(uint64_t),
M_SFXGE, M_WAITOK | M_ZERO);
if ((rc = sfxge_dma_alloc(sc, EFX_MAC_STATS_SIZE, mac_stats_buf)) != 0)
goto fail2;
bzero(mac_stats_buf->esm_base, mac_stats_buf->esm_size);
sfxge_mac_stat_init(sc);
port->init_state = SFXGE_PORT_INITIALIZED;
return (0);
fail2:
free(port->mac_stats.decode_buf, M_SFXGE);
sfxge_dma_free(phy_stats_buf);
fail:
free(port->phy_stats.decode_buf, M_SFXGE);
(void)mtx_destroy(&port->lock);
port->sc = NULL;
return rc;
}
static int sfxge_link_mode[EFX_PHY_MEDIA_NTYPES][EFX_LINK_NMODES] = {
[EFX_PHY_MEDIA_CX4] = {
[EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_CX4,
},
[EFX_PHY_MEDIA_KX4] = {
[EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_KX4,
},
[EFX_PHY_MEDIA_XFP] = {
/* Don't know the module type, but assume SR for now. */
[EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_SR,
},
[EFX_PHY_MEDIA_SFP_PLUS] = {
/* Don't know the module type, but assume SX/SR for now. */
[EFX_LINK_1000FDX] = IFM_ETHER | IFM_FDX | IFM_1000_SX,
[EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_SR,
},
[EFX_PHY_MEDIA_BASE_T] = {
[EFX_LINK_10HDX] = IFM_ETHER | IFM_HDX | IFM_10_T,
[EFX_LINK_10FDX] = IFM_ETHER | IFM_FDX | IFM_10_T,
[EFX_LINK_100HDX] = IFM_ETHER | IFM_HDX | IFM_100_TX,
[EFX_LINK_100FDX] = IFM_ETHER | IFM_FDX | IFM_100_TX,
[EFX_LINK_1000HDX] = IFM_ETHER | IFM_HDX | IFM_1000_T,
[EFX_LINK_1000FDX] = IFM_ETHER | IFM_FDX | IFM_1000_T,
[EFX_LINK_10000FDX] = IFM_ETHER | IFM_FDX | IFM_10G_T,
},
};
static void
sfxge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
{
struct sfxge_softc *sc;
efx_phy_media_type_t medium_type;
efx_link_mode_t mode;
sc = ifp->if_softc;
sx_xlock(&sc->softc_lock);
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
if (SFXGE_RUNNING(sc) && SFXGE_LINK_UP(sc)) {
ifmr->ifm_status |= IFM_ACTIVE;
efx_phy_media_type_get(sc->enp, &medium_type);
mode = sc->port.link_mode;
ifmr->ifm_active |= sfxge_link_mode[medium_type][mode];
ifmr->ifm_active |= sfxge_port_link_fc_ifm(sc);
}
sx_xunlock(&sc->softc_lock);
}
static int
sfxge_media_change(struct ifnet *ifp)
{
struct sfxge_softc *sc;
struct ifmedia_entry *ifm;
int rc;
sc = ifp->if_softc;
ifm = sc->media.ifm_cur;
sx_xlock(&sc->softc_lock);
if (!SFXGE_RUNNING(sc)) {
rc = 0;
goto out;
}
rc = efx_mac_fcntl_set(sc->enp, sfxge_port_wanted_fc(sc), B_TRUE);
if (rc != 0)
goto out;
rc = efx_phy_adv_cap_set(sc->enp, ifm->ifm_data);
out:
sx_xunlock(&sc->softc_lock);
return rc;
}
int sfxge_port_ifmedia_init(struct sfxge_softc *sc)
{
efx_phy_media_type_t medium_type;
uint32_t cap_mask, mode_cap_mask;
efx_link_mode_t mode;
int mode_ifm, best_mode_ifm = 0;
int rc;
/* We need port state to initialise the ifmedia list. */
if ((rc = efx_nic_init(sc->enp)) != 0)
goto out;
if ((rc = efx_port_init(sc->enp)) != 0)
goto out2;
/*
* Register ifconfig callbacks for querying and setting the
* link mode and link status.
*/
ifmedia_init(&sc->media, IFM_IMASK, sfxge_media_change,
sfxge_media_status);
/*
* Map firmware medium type and capabilities to ifmedia types.
* ifmedia does not distinguish between forcing the link mode
* and disabling auto-negotiation. 1000BASE-T and 10GBASE-T
* require AN even if only one link mode is enabled, and for
* 100BASE-TX it is useful even if the link mode is forced.
* Therefore we never disable auto-negotiation.
*
* Also enable and advertise flow control by default.
*/
efx_phy_media_type_get(sc->enp, &medium_type);
efx_phy_adv_cap_get(sc->enp, EFX_PHY_CAP_PERM, &cap_mask);
EFX_STATIC_ASSERT(EFX_LINK_10HDX == EFX_PHY_CAP_10HDX + 1);
EFX_STATIC_ASSERT(EFX_LINK_10FDX == EFX_PHY_CAP_10FDX + 1);
EFX_STATIC_ASSERT(EFX_LINK_100HDX == EFX_PHY_CAP_100HDX + 1);
EFX_STATIC_ASSERT(EFX_LINK_100FDX == EFX_PHY_CAP_100FDX + 1);
EFX_STATIC_ASSERT(EFX_LINK_1000HDX == EFX_PHY_CAP_1000HDX + 1);
EFX_STATIC_ASSERT(EFX_LINK_1000FDX == EFX_PHY_CAP_1000FDX + 1);
EFX_STATIC_ASSERT(EFX_LINK_10000FDX == EFX_PHY_CAP_10000FDX + 1);
for (mode = EFX_LINK_10HDX; mode <= EFX_LINK_10000FDX; mode++) {
mode_cap_mask = 1 << (mode - 1);
mode_ifm = sfxge_link_mode[medium_type][mode];
if ((cap_mask & mode_cap_mask) && mode_ifm) {
mode_cap_mask |= cap_mask & (1 << EFX_PHY_CAP_AN);
#ifdef SFXGE_HAVE_PAUSE_MEDIAOPTS
/* No flow-control */
ifmedia_add(&sc->media, mode_ifm, mode_cap_mask, NULL);
/* Respond-only. If using AN, we implicitly
* offer symmetric as well, but that doesn't
* mean we *have* to generate pause frames.
*/
mode_cap_mask |= cap_mask & ((1 << EFX_PHY_CAP_PAUSE) |
(1 << EFX_PHY_CAP_ASYM));
mode_ifm |= IFM_ETH_RXPAUSE;
ifmedia_add(&sc->media, mode_ifm, mode_cap_mask, NULL);
/* Symmetric */
mode_cap_mask &= ~(1 << EFX_PHY_CAP_ASYM);
mode_ifm |= IFM_ETH_TXPAUSE;
#else /* !SFXGE_HAVE_PAUSE_MEDIAOPTS */
mode_cap_mask |= cap_mask & (1 << EFX_PHY_CAP_PAUSE);
#endif
ifmedia_add(&sc->media, mode_ifm, mode_cap_mask, NULL);
/* Link modes are numbered in order of speed,
* so assume the last one available is the best.
*/
best_mode_ifm = mode_ifm;
}
}
if (cap_mask & (1 << EFX_PHY_CAP_AN)) {
/* Add autoselect mode. */
mode_ifm = IFM_ETHER | IFM_AUTO;
ifmedia_add(&sc->media, mode_ifm,
cap_mask & ~(1 << EFX_PHY_CAP_ASYM), NULL);
best_mode_ifm = mode_ifm;
}
if (best_mode_ifm)
ifmedia_set(&sc->media, best_mode_ifm);
/* Now discard port state until interface is started. */
efx_port_fini(sc->enp);
out2:
efx_nic_fini(sc->enp);
out:
return rc;
}

1233
sys/dev/sfxge/sfxge_rx.c Normal file

File diff suppressed because it is too large Load Diff

189
sys/dev/sfxge/sfxge_rx.h Normal file
View File

@ -0,0 +1,189 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SFXGE_RX_H
#define _SFXGE_RX_H
#define SFXGE_MAGIC_RESERVED 0x8000
#define SFXGE_MAGIC_DMAQ_LABEL_WIDTH 6
#define SFXGE_MAGIC_DMAQ_LABEL_MASK \
((1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH) - 1)
#define SFXGE_MAGIC_RX_QFLUSH_DONE \
(SFXGE_MAGIC_RESERVED | (1 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
#define SFXGE_MAGIC_RX_QFLUSH_FAILED \
(SFXGE_MAGIC_RESERVED | (2 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
#define SFXGE_MAGIC_RX_QREFILL \
(SFXGE_MAGIC_RESERVED | (3 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
#define SFXGE_MAGIC_TX_QFLUSH_DONE \
(SFXGE_MAGIC_RESERVED | (4 << SFXGE_MAGIC_DMAQ_LABEL_WIDTH))
#define SFXGE_RX_SCALE_MAX EFX_MAXRSS
struct sfxge_rx_sw_desc {
struct mbuf *mbuf;
bus_dmamap_t map;
int flags;
int size;
};
/**
* struct sfxge_lro_conn - Connection state for software LRO
* @link: Link for hash table and free list.
* @active_link: Link for active_conns list
* @l2_id: Identifying information from layer 2
* @conn_hash: Hash of connection 4-tuple
* @nh: IP (v4 or v6) header of super-packet
* @source: Source TCP port number
* @dest: Destination TCP port number
* @n_in_order_pkts: Number of in-order packets with payload.
* @next_seq: Next in-order sequence number.
* @last_pkt_ticks: Time we last saw a packet on this connection.
* @mbuf: The mbuf we are currently holding.
* If %NULL, then all following fields are undefined.
* @mbuf_tail: The tail of the frag_list of mbufs we're holding.
* Only valid after at least one merge.
* @th_last: The TCP header of the last packet merged.
* @next_buf: The next RX buffer to process.
* @next_eh: Ethernet header of the next buffer.
* @next_nh: IP header of the next buffer.
* @delivered: True if we've delivered a payload packet up this interrupt.
*/
struct sfxge_lro_conn {
TAILQ_ENTRY(sfxge_lro_conn) link;
LIST_ENTRY(sfxge_lro_conn) active_link;
uint16_t l2_id;
uint32_t conn_hash;
void *nh;
uint16_t source, dest;
int n_in_order_pkts;
unsigned next_seq;
unsigned last_pkt_ticks;
struct mbuf *mbuf;
struct mbuf *mbuf_tail;
struct tcphdr *th_last;
struct sfxge_rx_sw_desc next_buf;
void *next_eh;
void *next_nh;
int delivered;
};
/**
* struct sfxge_lro_state - Port state for software LRO
* @sc: The associated NIC.
* @conns_mask: Number of hash buckets - 1.
* @conns: Hash buckets for tracked connections.
* @conns_n: Length of linked list for each hash bucket.
* @active_conns: Connections that are holding a packet.
* Connections are self-linked when not in this list.
* @free_conns: Free sfxge_lro_conn instances.
* @last_purge_ticks: The value of ticks last time we purged idle
* connections.
* @n_merges: Number of packets absorbed by LRO.
* @n_bursts: Number of bursts spotted by LRO.
* @n_slow_start: Number of packets not merged because connection may be in
* slow-start.
* @n_misorder: Number of out-of-order packets seen in tracked streams.
* @n_too_many: Incremented when we're trying to track too many streams.
* @n_new_stream: Number of distinct streams we've tracked.
* @n_drop_idle: Number of streams discarded because they went idle.
* @n_drop_closed: Number of streams that have seen a FIN or RST.
*/
struct sfxge_lro_state {
struct sfxge_softc *sc;
unsigned conns_mask;
TAILQ_HEAD(sfxge_lro_tailq, sfxge_lro_conn) *conns;
unsigned *conns_n;
LIST_HEAD(, sfxge_lro_conn) active_conns;
TAILQ_HEAD(, sfxge_lro_conn) free_conns;
unsigned last_purge_ticks;
unsigned n_merges;
unsigned n_bursts;
unsigned n_slow_start;
unsigned n_misorder;
unsigned n_too_many;
unsigned n_new_stream;
unsigned n_drop_idle;
unsigned n_drop_closed;
};
enum sfxge_flush_state {
SFXGE_FLUSH_DONE = 0,
SFXGE_FLUSH_PENDING,
SFXGE_FLUSH_FAILED
};
enum sfxge_rxq_state {
SFXGE_RXQ_UNINITIALIZED = 0,
SFXGE_RXQ_INITIALIZED,
SFXGE_RXQ_STARTED
};
#define SFXGE_RX_BATCH 128
struct sfxge_rxq {
struct sfxge_softc *sc __aligned(CACHE_LINE_SIZE);
unsigned int index;
efsys_mem_t mem;
unsigned int buf_base_id;
enum sfxge_rxq_state init_state;
struct sfxge_rx_sw_desc *queue __aligned(CACHE_LINE_SIZE);
unsigned int added;
unsigned int pending;
unsigned int completed;
unsigned int loopback;
struct sfxge_lro_state lro;
struct callout refill_callout;
unsigned int refill_delay;
efx_rxq_t *common __aligned(CACHE_LINE_SIZE);
volatile enum sfxge_flush_state flush_state;
};
/*
* From sfxge_rx.c.
*/
extern int sfxge_rx_init(struct sfxge_softc *sc);
extern void sfxge_rx_fini(struct sfxge_softc *sc);
extern int sfxge_rx_start(struct sfxge_softc *sc);
extern void sfxge_rx_stop(struct sfxge_softc *sc);
extern void sfxge_rx_qcomplete(struct sfxge_rxq *rxq, boolean_t eop);
extern void sfxge_rx_qrefill(struct sfxge_rxq *rxq);
extern void sfxge_rx_qflush_done(struct sfxge_rxq *rxq);
extern void sfxge_rx_qflush_failed(struct sfxge_rxq *rxq);
extern void sfxge_rx_scale_update(void *arg, int npending);
#endif

1491
sys/dev/sfxge/sfxge_tx.c Normal file

File diff suppressed because it is too large Load Diff

185
sys/dev/sfxge/sfxge_tx.h Normal file
View File

@ -0,0 +1,185 @@
/*-
* Copyright (c) 2010-2011 Solarflare Communications, Inc.
* All rights reserved.
*
* This software was developed in part by Philip Paeps under contract for
* Solarflare Communications, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SFXGE_TX_H
#define _SFXGE_TX_H
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
/* Maximum number of DMA segments needed to map an mbuf chain. With
* TSO, the mbuf length may be just over 64K, divided into 2K mbuf
* clusters. (The chain could be longer than this initially, but can
* be shortened with m_collapse().)
*/
#define SFXGE_TX_MAPPING_MAX_SEG (64 / 2 + 1)
/* Maximum number of DMA segments needed to map an output packet. It
* could overlap all mbufs in the chain and also require an extra
* segment for a TSO header.
*/
#define SFXGE_TX_PACKET_MAX_SEG (SFXGE_TX_MAPPING_MAX_SEG + 1)
/*
* Buffer mapping flags.
*
* Buffers and DMA mappings must be freed when the last descriptor
* referring to them is completed. Set the TX_BUF_UNMAP and
* TX_BUF_MBUF flags on the last descriptor generated for an mbuf
* chain. Set only the TX_BUF_UNMAP flag on a descriptor referring to
* a heap buffer.
*/
enum sfxge_tx_buf_flags {
TX_BUF_UNMAP = 1,
TX_BUF_MBUF = 2,
};
/*
* Buffer mapping information for descriptors in flight.
*/
struct sfxge_tx_mapping {
union {
struct mbuf *mbuf;
caddr_t heap_buf;
} u;
bus_dmamap_t map;
enum sfxge_tx_buf_flags flags;
};
#define SFXGE_TX_MAX_DEFERRED 64
/*
* Deferred packet list.
*/
struct sfxge_tx_dpl {
uintptr_t std_put; /* Head of put list. */
struct mbuf *std_get; /* Head of get list. */
struct mbuf **std_getp; /* Tail of get list. */
unsigned int std_count; /* Count of packets. */
};
#define SFXGE_TX_BUFFER_SIZE 0x400
#define SFXGE_TX_HEADER_SIZE 0x100
#define SFXGE_TX_COPY_THRESHOLD 0x200
enum sfxge_txq_state {
SFXGE_TXQ_UNINITIALIZED = 0,
SFXGE_TXQ_INITIALIZED,
SFXGE_TXQ_STARTED
};
enum sfxge_txq_type {
SFXGE_TXQ_NON_CKSUM = 0,
SFXGE_TXQ_IP_CKSUM,
SFXGE_TXQ_IP_TCP_UDP_CKSUM,
SFXGE_TXQ_NTYPES
};
#define SFXGE_TXQ_UNBLOCK_LEVEL (EFX_TXQ_LIMIT(SFXGE_NDESCS) / 4)
#define SFXGE_TX_BATCH 64
#ifdef SFXGE_HAVE_MQ
#define SFXGE_TXQ_LOCK(txq) (&(txq)->lock)
#define SFXGE_TX_SCALE(sc) ((sc)->intr.n_alloc)
#else
#define SFXGE_TXQ_LOCK(txq) (&(txq)->sc->tx_lock)
#define SFXGE_TX_SCALE(sc) 1
#endif
struct sfxge_txq {
/* The following fields should be written very rarely */
struct sfxge_softc *sc;
enum sfxge_txq_state init_state;
enum sfxge_flush_state flush_state;
enum sfxge_txq_type type;
unsigned int txq_index;
unsigned int evq_index;
efsys_mem_t mem;
unsigned int buf_base_id;
struct sfxge_tx_mapping *stmp; /* Packets in flight. */
bus_dma_tag_t packet_dma_tag;
efx_buffer_t *pend_desc;
efx_txq_t *common;
struct sfxge_txq *next;
efsys_mem_t *tsoh_buffer;
/* This field changes more often and is read regularly on both
* the initiation and completion paths
*/
int blocked __aligned(CACHE_LINE_SIZE);
/* The following fields change more often, and are used mostly
* on the initiation path
*/
#ifdef SFXGE_HAVE_MQ
struct mtx lock __aligned(CACHE_LINE_SIZE);
struct sfxge_tx_dpl dpl; /* Deferred packet list. */
unsigned int n_pend_desc;
#else
unsigned int n_pend_desc __aligned(CACHE_LINE_SIZE);
#endif
unsigned int added;
unsigned int reaped;
/* Statistics */
unsigned long tso_bursts;
unsigned long tso_packets;
unsigned long tso_long_headers;
unsigned long collapses;
unsigned long drops;
/* The following fields change more often, and are used mostly
* on the completion path
*/
unsigned int pending __aligned(CACHE_LINE_SIZE);
unsigned int completed;
};
extern int sfxge_tx_packet_add(struct sfxge_txq *, struct mbuf *);
extern int sfxge_tx_init(struct sfxge_softc *sc);
extern void sfxge_tx_fini(struct sfxge_softc *sc);
extern int sfxge_tx_start(struct sfxge_softc *sc);
extern void sfxge_tx_stop(struct sfxge_softc *sc);
extern void sfxge_tx_qcomplete(struct sfxge_txq *txq);
extern void sfxge_tx_qflush_done(struct sfxge_txq *txq);
#ifdef SFXGE_HAVE_MQ
extern void sfxge_if_qflush(struct ifnet *ifp);
extern int sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m);
#else
extern void sfxge_if_start(struct ifnet *ifp);
#endif
#endif

View File

@ -275,6 +275,7 @@ SUBDIR= ${_3dfx} \
sem \
send \
sf \
sfxge \
sge \
siba_bwn \
siftr \

View File

@ -0,0 +1,25 @@
# $FreeBSD$
KMOD= sfxge
SFXGE= ${.CURDIR}/../../dev/sfxge
SRCS= device_if.h bus_if.h pci_if.h
SRCS+= opt_inet.h opt_zero.h opt_sched.h
.PATH: ${.CURDIR}/../../dev/sfxge
SRCS+= sfxge.c sfxge_dma.c sfxge_ev.c
SRCS+= sfxge_intr.c sfxge_mcdi.c
SRCS+= sfxge_port.c sfxge_rx.c sfxge_tx.c
.PATH: ${.CURDIR}/../../dev/sfxge/common
SRCS+= efx_ev.c efx_intr.c efx_mac.c efx_mcdi.c efx_nic.c
SRCS+= efx_nvram.c efx_phy.c efx_port.c efx_rx.c efx_sram.c efx_tx.c
SRCS+= efx_vpd.c efx_wol.c
SRCS+= siena_mac.c siena_nic.c siena_nvram.c siena_phy.c
SRCS+= siena_sram.c siena_vpd.c
DEBUG_FLAGS= -g -DDEBUG=1
.include <bsd.kmod.mk>