Neterion Xframe 10GbE Server/Storage adapter driver.

The nxge driver provides support for Neterion Xframe-I and Xframe-II
adapters. The driver supports TCP Segmentation Offload (TSO/LSO),
Jumbo frames (5 buffer mode), Header separation (2 and 3 Receive
buffer modes), VLAN, and Promiscuous mode.

Submitted by:	Neterion
Reviewed by:	rwatson
Approved by:	re (kensmith)
This commit is contained in:
Sam Leffler 2007-06-29 22:47:18 +00:00
parent 3995a80fd6
commit fd3ddbd038
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=171095
54 changed files with 37517 additions and 0 deletions

View File

@ -246,6 +246,7 @@ MAN= aac.4 \
nsp.4 \
null.4 \
${_nve.4} \
${_nxge.4} \
ohci.4 \
oldcard.4 \
orm.4 \
@ -480,6 +481,7 @@ MLINKS+=netintro.4 net.4 \
MLINKS+=${_nfe.4} ${_if_nfe.4}
MLINKS+=nge.4 if_nge.4
MLINKS+=${_nve.4} ${_if_nve.4}
MLINKS+=${_nxge.4} ${_if_nxge.4}
MLINKS+=oldcard.4 card.4
MLINKS+=patm.4 if_patm.4
MLINKS+=pccbb.4 cbb.4
@ -539,10 +541,12 @@ _hptiop.4= hptiop.4
_hptmv.4= hptmv.4
_if_nfe.4= if_nfe.4
_if_nve.4= if_nve.4
_if_nxge.4= if_nxge.4
_ipmi.4= ipmi.4
_nfsmb.4= nfsmb.4
_nfe.4= nfe.4
_nve.4= nve.4
_nxge.4= nxge.4
_rr232x.4= rr232x.4
_spkr.4= spkr.4
_speaker.4= speaker.4

80
share/man/man4/nxge.4 Normal file
View File

@ -0,0 +1,80 @@
.\" Copyright (c) 2007, Neterion Inc
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer as
.\" the first lines of this file unmodified.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
.\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
.\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
.\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
.\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
.\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
.\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
.\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd June 28, 2007
.Dt NXGE 4
.Os
.Sh NAME
.Nm nxge
.Nd "Neterion Xframe 10GbE Server/Storage adapter driver"
.Sh SYNOPSIS
To compile this driver into the kernel, place the following lines in your kernel configuration file:
.Cd "device nxge"
.Ed
.Pp
.Ed
.Sh DESCRIPTION
The
.Nm
driver provides support for Neterion Xframe-I and Xframe-II adapters. The driver supports TCP Segmentation Offload (TSO/LSO), Jumbo frames (5 buffer mode), Header separation (2 and 3 Receive buffer modes), VLAN, and Promiscuous mode.
.Pp
.Pp
For general information and support, please visit Neterion support page
.Pa http://www.neterion.com/support/support.html.
.Pp
.Pp
Jumbo frames are supported through interface MTU setting. Selecting MTU size larger than 1500 bytes using
.Xr ifconfig 8
utility configures the adapter to transmit and receive Jumbo frames. Xframe adapters support Jumbo frames upto 9600 bytes.
.Pp
.Pp
For more information on configuring this device, see
.Xr ifconfig 8 .
.Sh HARDWARE
The nxge driver supports 10 Gigabit Ethernet adapters listed in
.Pa http://www.neterion.com/how/pricing.html
.Pp
.El
.Sh SUPPORT
For troubleshooting tips and FAQs, please visit
.Pa http://trac.neterion.com/cgi-bin/trac.cgi/wiki/TitleIndex?anonymous .
.Pp
For any issues please send an email to
.Aq support@neterion.com .
.Sh SEE ALSO
.Xr ifconfig 8
.Sh HISTORY
The
.Nm
device driver first appeared in
.Fx 7.0 .
.Sh AUTHORS
The
.Nm
driver was written by
.An Neterion
.Aq support@neterion.com .

View File

@ -1905,6 +1905,7 @@ device xl # 3Com 3c90x (``Boomerang'', ``Cyclone'')
# PCI Ethernet NICs.
device de # DEC/Intel DC21x4x (``Tulip'')
device le # AMD Am7900 LANCE and Am79C9xx PCnet
device nxge # Neterion Xframe 10GbE Server/Storage Adapter
device txp # 3Com 3cR990 (``Typhoon'')
device vx # 3Com 3c590, 3c595 (``Vortex'')

View File

@ -811,6 +811,17 @@ dev/my/if_my.c optional my
dev/ncv/ncr53c500.c optional ncv
dev/ncv/ncr53c500_pccard.c optional ncv pccard
dev/nge/if_nge.c optional nge
dev/nxge/if_nxge.c optional nxge
dev/nxge/xgehal/xgehal-device.c optional nxge
dev/nxge/xgehal/xgehal-mm.c optional nxge
dev/nxge/xgehal/xge-queue.c optional nxge
dev/nxge/xgehal/xgehal-driver.c optional nxge
dev/nxge/xgehal/xgehal-ring.c optional nxge
dev/nxge/xgehal/xgehal-channel.c optional nxge
dev/nxge/xgehal/xgehal-fifo.c optional nxge
dev/nxge/xgehal/xgehal-stats.c optional nxge
dev/nxge/xgehal/xgehal-config.c optional nxge
dev/nxge/xgehal/xgehal-mgmt.c optional nxge
dev/nmdm/nmdm.c optional nmdm
dev/nsp/nsp.c optional nsp
dev/nsp/nsp_pccard.c optional nsp pccard

3415
sys/dev/nxge/if_nxge.c Normal file

File diff suppressed because it is too large Load Diff

287
sys/dev/nxge/if_nxge.h Normal file
View File

@ -0,0 +1,287 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* if_xge.h
*/
#ifndef _IF_XGE_H
#define _IF_XGE_H
#include <dev/nxge/include/xgehal.h>
#include <dev/nxge/xge-osdep.h>
#if defined(XGE_FEATURE_TSO) && (__FreeBSD_version < 700026)
#undef XGE_FEATURE_TSO
#endif
#if defined(XGE_FEATURE_LRO)
#if __FreeBSD_version < 700047
#undef XGE_FEATURE_LRO
#undef XGE_HAL_CONFIG_LRO
#else
#define XGE_HAL_CONFIG_LRO
#endif
#endif
#ifdef FUNC_PRINT
#define ENTER_FUNCTION xge_os_printf("Enter\t==>[%s]\n", __FUNCTION__);
#define LEAVE_FUNCTION xge_os_printf("Leave\t<==[%s]\n", __FUNCTION__);
#else
#define ENTER_FUNCTION
#define LEAVE_FUNCTION
#endif
/* Printing description, Copyright */
#define DRIVER_VERSION XGELL_VERSION_MAJOR"." \
XGELL_VERSION_MINOR"." \
XGELL_VERSION_FIX"." \
XGELL_VERSION_BUILD
#define COPYRIGHT_STRING "Copyright(c) 2002-2007 Neterion Inc."
#define PRINT_COPYRIGHT xge_os_printf("%s", COPYRIGHT_STRING)
/* Printing */
#define xge_trace(trace, fmt, args...) xge_debug_ll(trace, fmt, ## args);
#define xge_ctrace(trace, fmt...) xge_debug_ll(trace, fmt);
#define BUFALIGN(buffer_length) \
if((buffer_length % 128) != 0) { \
buffer_length += (128 - (buffer_length % 128)); \
}
static inline void *
xge_malloc(unsigned long size) {
void *vaddr = malloc(size, M_DEVBUF, M_NOWAIT);
bzero(vaddr, size);
return vaddr;
}
#define SINGLE_ALLOC 0
#define MULTI_ALLOC 1
#define SAVE 0
#define RESTORE 1
#define UP 1
#define DOWN 0
#define XGE_DEFAULT_USER_HARDCODED -1
#define MAX_MBUF_FRAGS 20 /* Maximum number of fragments */
#define MAX_SEGS 100 /* Maximum number of segments */
#define XGELL_TX_LEVEL_LOW 16
#define XGE_RING_COUNT XGE_HAL_MIN_RING_NUM
#define BUFFER_SIZE 20
/* Default values to configuration parameters */
#define XGE_DEFAULT_INITIAL_MTU 1500
#define XGE_DEFAULT_LATENCY_TIMER -1
#define XGE_DEFAULT_MAX_SPLITS_TRANS -1
#define XGE_DEFAULT_MMRB_COUNT -1
#define XGE_DEFAULT_SHARED_SPLITS 0
#define XGE_DEFAULT_ISR_POLLING_CNT 8
#define XGE_DEFAULT_STATS_REFRESH_TIME_SEC 4
#define XGE_DEFAULT_MAC_RMAC_BCAST_EN 1
#define XGE_DEFAULT_MAC_TMAC_UTIL_PERIOD 5
#define XGE_DEFAULT_MAC_RMAC_UTIL_PERIOD 5
#define XGE_DEFAULT_MAC_RMAC_PAUSE_GEN_EN 1
#define XGE_DEFAULT_MAC_RMAC_PAUSE_RCV_EN 1
#define XGE_DEFAULT_MAC_RMAC_PAUSE_TIME 65535
#define XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q0Q3 187
#define XGE_DEFAULT_MAC_MC_PAUSE_THRESHOLD_Q4Q7 187
#define XGE_DEFAULT_FIFO_MEMBLOCK_SIZE PAGE_SIZE
#define XGE_DEFAULT_FIFO_RESERVE_THRESHOLD 0
#define XGE_DEFAULT_FIFO_MAX_FRAGS 64
#define XGE_DEFAULT_FIFO_QUEUE_INTR 0
#define XGE_DEFAULT_FIFO_QUEUE_MAX 2048
#define XGE_DEFAULT_FIFO_QUEUE_INITIAL 2048
#define XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_A 5
#define XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_B 10
#define XGE_DEFAULT_FIFO_QUEUE_TTI_URANGE_C 20
#define XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_A 15
#define XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_B 30
#define XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_C 45
#define XGE_DEFAULT_FIFO_QUEUE_TTI_UFC_D 60
#define XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_CI_EN 1
#define XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_AC_EN 1
#define XGE_DEFAULT_FIFO_QUEUE_TTI_TIMER_VAL_US 8000
#define XGE_DEFAULT_FIFO_ALIGNMENT_SIZE sizeof(u64)
#define XGE_DEFAULT_RING_MEMBLOCK_SIZE PAGE_SIZE
#define XGE_DEFAULT_RING_STRIP_VLAN_TAG 1
#define XGE_DEFAULT_RING_QUEUE_MAX 16
#define XGE_DEFAULT_RING_QUEUE_INITIAL 16
#define XGE_DEFAULT_RING_QUEUE_DRAM_SIZE_MB 32
#define XGE_DEFAULT_RING_QUEUE_INDICATE_MAX_PKTS 16
#define XGE_DEFAULT_RING_QUEUE_BACKOFF_INTERVAL_US 1000
#define XGE_DEFAULT_RING_QUEUE_RTI_URANGE_A 5
#define XGE_DEFAULT_RING_QUEUE_RTI_URANGE_B 10
#define XGE_DEFAULT_RING_QUEUE_RTI_URANGE_C 50
#define XGE_DEFAULT_RING_QUEUE_RTI_UFC_A 1
#define XGE_DEFAULT_RING_QUEUE_RTI_UFC_B 8
#define XGE_DEFAULT_RING_QUEUE_RTI_UFC_C 16
#define XGE_DEFAULT_RING_QUEUE_RTI_UFC_D 32
#define XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN 1
#define XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US 250
/* Values to identify the requests from getinfo tool in ioctl */
#define XGE_QUERY_STATS 1
#define XGE_QUERY_PCICONF 2
#define XGE_QUERY_INTRSTATS 3
#define XGE_QUERY_DEVCONF 4
#define XGE_READ_VERSION 5
#define XGE_QUERY_TCODE 6
#define XGE_SET_BUFFER_MODE_1 7
#define XGE_SET_BUFFER_MODE_2 8
#define XGE_SET_BUFFER_MODE_3 9
#define XGE_SET_BUFFER_MODE_5 10
#define XGE_QUERY_BUFFER_MODE 11
#define XGE_OFFSET_OF_LAST_REG 0x3180
#define VENDOR_ID_AMD 0x1022
#define DEVICE_ID_8131_PCI_BRIDGE 0x7450
typedef struct mbuf *mbuf_t;
typedef enum xgell_event_e {
XGE_LL_EVENT_TRY_XMIT_AGAIN = XGE_LL_EVENT_BASE + 1,
XGE_LL_EVENT_DEVICE_RESETTING = XGE_LL_EVENT_BASE + 2,
} xgell_event_e;
/* Adapter structure */
typedef struct xgelldev {
device_t device; /* Device */
struct ifnet *ifnetp; /* Interface ifnet structure */
struct resource *irq; /* Resource structure for IRQ */
void *irqhandle; /* IRQ handle */
pci_info_t *pdev;
struct ifmedia xge_media; /* In-kernel representation of a */
/* single supported media type */
xge_hal_device_t *devh; /* HAL: Device Handle */
xge_hal_channel_h ring_channel[XGE_HAL_MAX_FIFO_NUM];
/* Ring channel */
xge_hal_channel_h fifo_channel_0; /* FIFO channel */
struct mtx xge_lock; /* Mutex - Default */
struct callout timer; /* Timer for polling */
struct xge_hal_stats_hw_info_t *hwstats; /* Hardware Statistics */
int saved_regs[16]; /* To save register space */
int xge_mtu; /* MTU */
int initialized; /* Flag: Initialized or not */
bus_dma_tag_t dma_tag_tx; /* Tag for dtr dma mapping (Tx) */
bus_dma_tag_t dma_tag_rx; /* Tag for dtr dma mapping (Rx) */
int all_multicast; /* All multicast flag */
int macaddr_count; /* Multicast address count */
int in_detach; /* To avoid ioctl during detach */
int buffer_mode; /* Buffer Mode */
int rxd_mbuf_cnt; /* Number of buffers used */
int rxd_mbuf_len[5];/* Buffer lengths */
} xgelldev_t;
/* Rx descriptor private structure */
typedef struct {
mbuf_t *bufferArray;
struct xge_dma_mbuf dmainfo[5];
} xgell_rx_priv_t;
/* Tx descriptor private structure */
typedef struct {
mbuf_t buffer;
bus_dmamap_t dma_map;
} xgell_tx_priv_t;
/* BAR0 Register */
typedef struct barregister {
char option[2];
u64 offset;
u64 value;
}bar0reg_t;
void xge_init_params(xge_hal_device_config_t *dconfig, device_t dev);
void xge_init(void *);
void xge_init_locked(void *);
void xge_stop(xgelldev_t *);
void freeResources(device_t, int);
void xgell_callback_link_up(void *);
void xgell_callback_link_down(void *);
void xgell_callback_crit_err(void *, xge_hal_event_e, u64);
void xgell_callback_event(xge_queue_item_t *);
int xge_ifmedia_change(struct ifnet *);
void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
int xge_ioctl(struct ifnet *, unsigned long, caddr_t);
void xge_timer(void *);
int xge_intr_filter(void *);
void xge_intr(void *);
int xgell_rx_open(int, xgelldev_t *, xge_hal_channel_reopen_e);
int xgell_tx_open(xgelldev_t *, xge_hal_channel_reopen_e);
int xgell_channel_close(xgelldev_t *, xge_hal_channel_reopen_e);
int xgell_channel_open(xgelldev_t *, xge_hal_channel_reopen_e);
xge_hal_status_e xgell_rx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *);
xge_hal_status_e xgell_tx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *);
xge_hal_status_e xgell_tx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h,
int, void *, xge_hal_channel_reopen_e);
xge_hal_status_e xgell_rx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h,
int, void *, xge_hal_channel_reopen_e);
void xgell_rx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e,
void *, xge_hal_channel_reopen_e);
void xgell_tx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e,
void *, xge_hal_channel_reopen_e);
void xgell_set_mbuf_cflags(mbuf_t);
void xge_send(struct ifnet *);
void xge_send_locked(struct ifnet *);
int xgell_get_multimode_normalbuf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
xgelldev_t *lldev);
int xgell_get_multimode_jumbobuf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
xgelldev_t *lldev, int lock);
int xgell_get_second_buffer(xgell_rx_priv_t *rxd_priv, xgelldev_t *lldev);
int xgell_get_buf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
xgelldev_t *lldev, int index);
int xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv);
int xgell_get_buf_3b_5b(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
xgelldev_t *lldev);
void dmamap_cb(void *, bus_dma_segment_t *, int, int);
void xgell_reset(xgelldev_t *);
void xge_setmulti(xgelldev_t *);
void xge_enable_promisc(xgelldev_t *);
void xge_disable_promisc(xgelldev_t *);
int changeMtu(xgelldev_t *, int);
int changeBufmode(xgelldev_t *, int);
void xge_initialize(device_t, xge_hal_channel_reopen_e);
void xge_terminate(device_t, xge_hal_channel_reopen_e);
void if_up_locked(xgelldev_t *);
void if_down_locked(xgelldev_t *);
int xge_probe(device_t);
int xge_driver_initialize(void);
void xge_media_init(device_t);
void xge_pci_space_save(device_t);
void xge_pci_space_restore(device_t);
int xge_attach(device_t);
int xge_interface_setup(device_t);
int xge_detach(device_t);
int xge_shutdown(device_t);
int xge_suspend(device_t);
int xge_resume(device_t);
#endif // _IF_XGE_H

View File

@ -0,0 +1,6 @@
/* $FreeBSD$ */
#ifndef BUILD_VERSION_H
#define BUILD_VERSION_H
/* Do not edit! Automatically generated when released.*/
#define GENERATED_BUILD_VERSION "10294"
#endif /* BUILD_VERSION_H */

View File

@ -0,0 +1,53 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : version.h
*
* Description: versioning file
*
* Created: 3 September 2004
*/
#ifndef VERSION_H
#define VERSION_H
#include <dev/nxge/include/build-version.h>
#define XGE_HAL_VERSION_MAJOR "2"
#define XGE_HAL_VERSION_MINOR "5"
#define XGE_HAL_VERSION_FIX "0"
#define XGE_HAL_VERSION_BUILD GENERATED_BUILD_VERSION
#define XGE_HAL_VERSION XGE_HAL_VERSION_MAJOR"."XGE_HAL_VERSION_MINOR"."\
XGE_HAL_VERSION_FIX"."XGE_HAL_VERSION_BUILD
#define XGE_HAL_DESC XGE_DRIVER_NAME" v."XGE_HAL_VERSION
/* Link Layer versioning */
#include <dev/nxge/xgell-version.h>
#endif /* VERSION_H */

View File

@ -0,0 +1,568 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xge-debug.h
*
* Description: debug facilities
*
* Created: 6 May 2004
*/
#ifndef XGE_DEBUG_H
#define XGE_DEBUG_H
#include <dev/nxge/include/xge-os-pal.h>
__EXTERN_BEGIN_DECLS
/*
* __FUNCTION__ is, together with __PRETTY_FUNCTION__ or something similar,
* a gcc extension. we'll have to #ifdef around that, and provide some
* meaningful replacement for those, so to make some gcc versions happier
*/
#ifndef __func__
#ifdef __FUNCTION__
#define __func__ __FUNCTION__
#endif
#endif
#ifdef XGE_DEBUG_FP
#define XGE_DEBUG_FP_DEVICE 0x1
#define XGE_DEBUG_FP_CHANNEL 0x2
#define XGE_DEBUG_FP_FIFO 0x4
#define XGE_DEBUG_FP_RING 0x8
#endif
/**
* enum xge_debug_level_e
* @XGE_NONE: debug disabled
* @XGE_ERR: all errors going to be logged out
* @XGE_TRACE: all errors plus all kind of verbose tracing print outs
* going to be logged out. Very noisy.
*
* This enumeration going to be used to switch between different
* debug levels during runtime if DEBUG macro defined during
* compilation. If DEBUG macro not defined than code will be
* compiled out.
*/
typedef enum xge_debug_level_e {
XGE_NONE = 0,
XGE_TRACE = 1,
XGE_ERR = 2,
} xge_debug_level_e;
#define XGE_DEBUG_MODULE_MASK_DEF 0x30000030
#define XGE_DEBUG_LEVEL_DEF XGE_ERR
#if defined(XGE_DEBUG_TRACE_MASK) || defined(XGE_DEBUG_ERR_MASK)
extern unsigned long *g_module_mask;
extern int *g_level;
#ifndef XGE_DEBUG_TRACE_MASK
#define XGE_DEBUG_TRACE_MASK 0
#endif
#ifndef XGE_DEBUG_ERR_MASK
#define XGE_DEBUG_ERR_MASK 0
#endif
/*
* @XGE_COMPONENT_HAL_CONFIG: do debug for xge core config module
* @XGE_COMPONENT_HAL_FIFO: do debug for xge core fifo module
* @XGE_COMPONENT_HAL_RING: do debug for xge core ring module
* @XGE_COMPONENT_HAL_CHANNEL: do debug for xge core channel module
* @XGE_COMPONENT_HAL_DEVICE: do debug for xge core device module
* @XGE_COMPONENT_HAL_DMQ: do debug for xge core DMQ module
* @XGE_COMPONENT_HAL_UMQ: do debug for xge core UMQ module
* @XGE_COMPONENT_HAL_SQ: do debug for xge core SQ module
* @XGE_COMPONENT_HAL_SRQ: do debug for xge core SRQ module
* @XGE_COMPONENT_HAL_CQRQ: do debug for xge core CRQ module
* @XGE_COMPONENT_HAL_POOL: do debug for xge core memory pool module
* @XGE_COMPONENT_HAL_BITMAP: do debug for xge core BITMAP module
* @XGE_COMPONENT_CORE: do debug for xge KMA core module
* @XGE_COMPONENT_OSDEP: do debug for xge KMA os dependent parts
* @XGE_COMPONENT_LL: do debug for xge link layer module
* @XGE_COMPONENT_ALL: activate debug for all modules with no exceptions
*
* This enumeration going to be used to distinguish modules
* or libraries during compilation and runtime. Makefile must declare
* XGE_DEBUG_MODULE_MASK macro and set it to proper value.
*/
#define XGE_COMPONENT_HAL_CONFIG 0x00000001
#define XGE_COMPONENT_HAL_FIFO 0x00000002
#define XGE_COMPONENT_HAL_RING 0x00000004
#define XGE_COMPONENT_HAL_CHANNEL 0x00000008
#define XGE_COMPONENT_HAL_DEVICE 0x00000010
#define XGE_COMPONENT_HAL_MM 0x00000020
#define XGE_COMPONENT_HAL_QUEUE 0x00000040
#define XGE_COMPONENT_HAL_INTERRUPT 0x00000080
#define XGE_COMPONENT_HAL_STATS 0x00000100
#ifdef XGEHAL_RNIC
#define XGE_COMPONENT_HAL_DMQ 0x00000200
#define XGE_COMPONENT_HAL_UMQ 0x00000400
#define XGE_COMPONENT_HAL_SQ 0x00000800
#define XGE_COMPONENT_HAL_SRQ 0x00001000
#define XGE_COMPONENT_HAL_CQRQ 0x00002000
#define XGE_COMPONENT_HAL_POOL 0x00004000
#define XGE_COMPONENT_HAL_BITMAP 0x00008000
#endif
/* space for CORE_XXX */
#define XGE_COMPONENT_OSDEP 0x10000000
#define XGE_COMPONENT_LL 0x20000000
#define XGE_COMPONENT_ALL 0xffffffff
#ifndef XGE_DEBUG_MODULE_MASK
#error "XGE_DEBUG_MODULE_MASK macro must be defined for DEBUG mode..."
#endif
#ifndef __GNUC__
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
#define xge_trace_aux(fmt) xge_os_vatrace(g_xge_os_tracebuf, fmt)
#else
#define xge_trace_aux(fmt) xge_os_vaprintf(fmt)
#endif
/**
* xge_debug
* @level: level of debug verbosity.
* @fmt: printf like format string
*
* Provides logging facilities. Can be customized on per-module
* basis or/and with debug levels. Input parameters, except
* module and level, are the same as posix printf. This function
* may be compiled out if DEBUG macro was never defined.
* See also: xge_debug_level_e{}.
*/
#define xge_debug(module, level, fmt) { \
if (((level >= XGE_TRACE && ((module & XGE_DEBUG_TRACE_MASK) == module)) || \
(level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \
level >= *g_level && module & *(unsigned int *)g_module_mask) { \
xge_trace_aux(fmt); \
} \
}
#else /* __GNUC__ */
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
#define xge_trace_aux(fmt...) xge_os_trace(g_xge_os_tracebuf, fmt)
#else
#define xge_trace_aux(fmt...) xge_os_printf(fmt)
#endif
#define xge_debug(module, level, fmt...) { \
if (((level >= XGE_TRACE && ((module & XGE_DEBUG_TRACE_MASK) == module)) || \
(level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \
level >= *g_level && module & *(unsigned int *)g_module_mask) { \
xge_trace_aux(fmt); \
} \
}
#endif /* __GNUC__ */
#if (XGE_COMPONENT_HAL_STATS & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_stats(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_STATS;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_stats(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_STATS, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_stats(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_stats(level, fmt...)
#endif /* __GNUC__ */
#endif
/* Interrupt Related */
#if (XGE_COMPONENT_HAL_INTERRUPT & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_interrupt(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_INTERRUPT;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_interrupt(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_INTERRUPT, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_interrupt(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_interrupt(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_QUEUE & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_queue(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_QUEUE;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_queue(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_QUEUE, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_queue(xge_debug_level_e level, char *fmt,
...) {}
#else /* __GNUC__ */
#define xge_debug_queue(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_MM & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_mm(xge_debug_level_e level, char *fmt, ...)
{
u32 module = XGE_COMPONENT_HAL_MM;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_mm(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_MM, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_mm(xge_debug_level_e level, char *fmt, ...)
{}
#else /* __GNUC__ */
#define xge_debug_mm(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_CONFIG & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_config(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_CONFIG;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_config(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_CONFIG, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_config(xge_debug_level_e level, char *fmt,
...) {}
#else /* __GNUC__ */
#define xge_debug_config(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_FIFO & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_fifo(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_FIFO;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_fifo(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_FIFO, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_fifo(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_fifo(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_RING & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_ring(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_RING;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_ring(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_RING, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_ring(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_ring(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_CHANNEL & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_channel(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_CHANNEL;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_channel(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_CHANNEL, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_channel(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_channel(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_DEVICE & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_DEVICE;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_device(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_DEVICE, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_device(level, fmt...)
#endif /* __GNUC__ */
#endif
#ifdef XGEHAL_RNIC
#if (XGE_COMPONENT_HAL_DMQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_DMQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_dmq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_DMQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_dmq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_UMQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_UMQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_umq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_UMQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_umq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_SQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_SQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_sq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_SQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_sq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_SRQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_SRQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_srq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_SRQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_srq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_CQRQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_CQRQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_cqrq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_CQRQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_cqrq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_POOL & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_POOL;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_pool(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_POOL, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_pool(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_BITMAP & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_BITMAP;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_bitmap(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_BITMAP, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_bitmap(level, fmt...)
#endif /* __GNUC__ */
#endif
#endif
#if (XGE_COMPONENT_OSDEP & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_OSDEP;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_osdep(level, fmt...) \
xge_debug(XGE_COMPONENT_OSDEP, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_osdep(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_LL & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...)
{
u32 module = XGE_COMPONENT_LL;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_ll(level, fmt...) \
xge_debug(XGE_COMPONENT_LL, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_ll(level, fmt...)
#endif /* __GNUC__ */
#endif
#else
static inline void xge_debug_interrupt(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_stats(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_queue(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_mm(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_config(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_fifo(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_ring(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_channel(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_hal(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {}
#endif /* end of XGE_DEBUG_*_MASK */
#ifdef XGE_DEBUG_ASSERT
/**
* xge_assert
* @test: C-condition to check
* @fmt: printf like format string
*
* This function implements traditional assert. By default assertions
* are enabled. It can be disabled by defining XGE_DEBUG_ASSERT macro in
* compilation
* time.
*/
#define xge_assert(test) { \
if (!(test)) xge_os_bug("bad cond: "#test" at %s:%d\n", \
__FILE__, __LINE__); }
#else
#define xge_assert(test)
#endif /* end of XGE_DEBUG_ASSERT */
__EXTERN_END_DECLS
#endif /* XGE_DEBUG_H */

View File

@ -0,0 +1,149 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xge-defs.h
*
* Description: global definitions
*
* Created: 13 May 2004
*/
#ifndef XGE_DEFS_H
#define XGE_DEFS_H
#define XGE_PCI_VENDOR_ID 0x17D5
#define XGE_PCI_DEVICE_ID_XENA_1 0x5731
#define XGE_PCI_DEVICE_ID_XENA_2 0x5831
#define XGE_PCI_DEVICE_ID_HERC_1 0x5732
#define XGE_PCI_DEVICE_ID_HERC_2 0x5832
#define XGE_PCI_DEVICE_ID_TITAN_1 0x5733
#define XGE_PCI_DEVICE_ID_TITAN_2 0x5833
#define XGE_DRIVER_NAME "Xge driver"
#define XGE_DRIVER_VENDOR "Neterion, Inc"
#define XGE_CHIP_FAMILY "Xframe"
#define XGE_SUPPORTED_MEDIA_0 "Fiber"
#include <dev/nxge/include/version.h>
#if defined(__cplusplus)
#define __EXTERN_BEGIN_DECLS extern "C" {
#define __EXTERN_END_DECLS }
#else
#define __EXTERN_BEGIN_DECLS
#define __EXTERN_END_DECLS
#endif
__EXTERN_BEGIN_DECLS
/*---------------------------- DMA attributes ------------------------------*/
/* Used in xge_os_dma_malloc() and xge_os_dma_map() */
/*---------------------------- DMA attributes ------------------------------*/
/* XGE_OS_DMA_REQUIRES_SYNC - should be defined or
NOT defined in the Makefile */
#define XGE_OS_DMA_CACHELINE_ALIGNED 0x1
/* Either STREAMING or CONSISTENT should be used.
The combination of both or none is invalid */
#define XGE_OS_DMA_STREAMING 0x2
#define XGE_OS_DMA_CONSISTENT 0x4
#define XGE_OS_SPRINTF_STRLEN 64
/*---------------------------- common stuffs -------------------------------*/
#define XGE_OS_LLXFMT "%llx"
#define XGE_OS_NEWLINE "\n"
#ifdef XGE_OS_MEMORY_CHECK
typedef struct {
void *ptr;
int size;
char *file;
int line;
} xge_os_malloc_t;
#define XGE_OS_MALLOC_CNT_MAX 64*1024
extern xge_os_malloc_t g_malloc_arr[XGE_OS_MALLOC_CNT_MAX];
extern int g_malloc_cnt;
#define XGE_OS_MEMORY_CHECK_MALLOC(_vaddr, _size, _file, _line) { \
if (_vaddr) { \
int i; \
for (i=0; i<g_malloc_cnt; i++) { \
if (g_malloc_arr[i].ptr == NULL) { \
break; \
} \
} \
if (i == g_malloc_cnt) { \
g_malloc_cnt++; \
if (g_malloc_cnt >= XGE_OS_MALLOC_CNT_MAX) { \
xge_os_bug("g_malloc_cnt exceed %d", \
XGE_OS_MALLOC_CNT_MAX); \
} \
} \
g_malloc_arr[i].ptr = _vaddr; \
g_malloc_arr[i].size = _size; \
g_malloc_arr[i].file = _file; \
g_malloc_arr[i].line = _line; \
for (i=0; i<_size; i++) { \
*((char *)_vaddr+i) = 0x5a; \
} \
} \
}
#define XGE_OS_MEMORY_CHECK_FREE(_vaddr, _check_size) { \
int i; \
for (i=0; i<XGE_OS_MALLOC_CNT_MAX; i++) { \
if (g_malloc_arr[i].ptr == _vaddr) { \
g_malloc_arr[i].ptr = NULL; \
if(_check_size && g_malloc_arr[i].size!=_check_size) { \
xge_os_printf("OSPAL: freeing with wrong " \
"size %d! allocated at %s:%d:"XGE_OS_LLXFMT":%d", \
(int)_check_size, \
g_malloc_arr[i].file, \
g_malloc_arr[i].line, \
(unsigned long long)(ulong_t) \
g_malloc_arr[i].ptr, \
g_malloc_arr[i].size); \
} \
break; \
} \
} \
if (i == XGE_OS_MALLOC_CNT_MAX) { \
xge_os_printf("OSPAL: ptr "XGE_OS_LLXFMT" not found!", \
(unsigned long long)(ulong_t)_vaddr); \
} \
}
#else
#define XGE_OS_MEMORY_CHECK_MALLOC(ptr, size, file, line)
#define XGE_OS_MEMORY_CHECK_FREE(vaddr, check_size)
#endif
__EXTERN_END_DECLS
#endif /* XGE_DEFS_H */

View File

@ -0,0 +1,203 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xge-list.h
*
* Description: Generic bi-directional linked list implementation
*
* Created: 14 May 2004
*/
#ifndef XGE_LIST_H
#define XGE_LIST_H
#include <dev/nxge/include/xge-debug.h>
__EXTERN_BEGIN_DECLS
/**
* struct xge_list_t - List item.
* @prev: Previous list item.
* @next: Next list item.
*
* Item of a bi-directional linked list.
*/
typedef struct xge_list_t {
struct xge_list_t* prev;
struct xge_list_t* next;
} xge_list_t;
/**
* xge_list_init - Initialize linked list.
* header: first element of the list (head)
*
* Initialize linked list.
* See also: xge_list_t{}.
*/
static inline void xge_list_init (xge_list_t *header)
{
header->next = header;
header->prev = header;
}
/**
* xge_list_is_empty - Is the list empty?
* header: first element of the list (head)
*
* Determine whether the bi-directional list is empty. Return '1' in
* case of 'empty'.
* See also: xge_list_t{}.
*/
static inline int xge_list_is_empty(xge_list_t *header)
{
xge_assert(header != NULL);
return header->next == header;
}
/**
* xge_list_first_get - Return the first item from the linked list.
* header: first element of the list (head)
*
* Returns the next item from the header.
* Returns NULL if the next item is header itself
* See also: xge_list_remove(), xge_list_insert(), xge_list_t{}.
*/
static inline xge_list_t *xge_list_first_get(xge_list_t *header)
{
xge_assert(header != NULL);
xge_assert(header->next != NULL);
xge_assert(header->prev != NULL);
if(header->next == header)
return NULL;
else
return header->next;
}
/**
* xge_list_remove - Remove the specified item from the linked list.
* item: element of the list
*
* Remove item from a list.
* See also: xge_list_insert(), xge_list_t{}.
*/
static inline void xge_list_remove(xge_list_t *item)
{
xge_assert(item != NULL);
xge_assert(item->next != NULL);
xge_assert(item->prev != NULL);
item->next->prev = item->prev;
item->prev->next = item->next;
#ifdef XGE_DEBUG_ASSERT
item->next = item->prev = NULL;
#endif
}
/**
* xge_list_insert - Insert a new item after the specified item.
* new_item: new element of the list
* prev_item: element of the list after which the new element is
* inserted
*
* Insert new item (new_item) after given item (prev_item).
* See also: xge_list_remove(), xge_list_insert_before(), xge_list_t{}.
*/
static inline void xge_list_insert (xge_list_t *new_item,
xge_list_t *prev_item)
{
xge_assert(new_item != NULL);
xge_assert(prev_item != NULL);
xge_assert(prev_item->next != NULL);
new_item->next = prev_item->next;
new_item->prev = prev_item;
prev_item->next->prev = new_item;
prev_item->next = new_item;
}
/**
* xge_list_insert_before - Insert a new item before the specified item.
* new_item: new element of the list
* next_item: element of the list after which the new element is inserted
*
* Insert new item (new_item) before given item (next_item).
*/
static inline void xge_list_insert_before (xge_list_t *new_item,
xge_list_t *next_item)
{
xge_assert(new_item != NULL);
xge_assert(next_item != NULL);
xge_assert(next_item->next != NULL);
new_item->next = next_item;
new_item->prev = next_item->prev;
next_item->prev->next = new_item;
next_item->prev = new_item;
}
#define xge_list_for_each(_p, _h) \
for (_p = (_h)->next, xge_os_prefetch(_p->next); _p != (_h); \
_p = _p->next, xge_os_prefetch(_p->next))
#define xge_list_for_each_safe(_p, _n, _h) \
for (_p = (_h)->next, _n = _p->next; _p != (_h); \
_p = _n, _n = _p->next)
#ifdef __GNUC__
/**
* xge_container_of - Given a member, return the containing structure.
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
* Cast a member of a structure out to the containing structure.
*/
#define xge_container_of(ptr, type, member) ({ \
__typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)(void *)( (char *)__mptr - ((size_t) &((type *)0)->member) );})
#else
/* type unsafe version */
#define xge_container_of(ptr, type, member) \
((type*)(void*)((char*)(ptr) - ((size_t) &((type *)0)->member)))
#endif
/**
* xge_offsetof - Offset of the member in the containing structure.
* @t: struct name.
* @m: the name of the member within the struct.
*
* Return the offset of the member @m in the structure @t.
*/
#define xge_offsetof(t, m) ((size_t) (&((t *)0)->m))
__EXTERN_END_DECLS
#endif /* XGE_LIST_H */

View File

@ -0,0 +1,138 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xge-os-pal.h
*
* Description: top-level header file. works just like switching between
* os-depndent parts
*
* Created: 6st May 2004
*/
#ifndef XGE_OS_PAL_H
#define XGE_OS_PAL_H
#include <dev/nxge/include/xge-defs.h>
__EXTERN_BEGIN_DECLS
/*--------------------------- platform switch ------------------------------*/
/* platform specific header */
#include <dev/nxge/xge-osdep.h>
#ifdef XGEHAL_RNIC
#define IN
#define OUT
#endif
#if !defined(XGE_OS_PLATFORM_64BIT) && !defined(XGE_OS_PLATFORM_32BIT)
#error "either 32bit or 64bit switch must be defined!"
#endif
#if !defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_HOST_LITTLE_ENDIAN)
#error "either little endian or big endian switch must be defined!"
#endif
#if defined(XGE_OS_PLATFORM_64BIT)
#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a5a5a5a5a
#else
#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a
#endif
#define XGE_OS_TRACE_MSGBUF_MAX 512
typedef struct xge_os_tracebuf_t {
int wrapped_once; /* circular buffer been wrapped */
int timestamp; /* whether timestamps are enabled */
volatile int offset; /* offset within the tracebuf */
int size; /* total size of trace buffer */
char msg[XGE_OS_TRACE_MSGBUF_MAX]; /* each individual buffer */
int msgbuf_max; /* actual size of msg buffer */
char *data; /* pointer to data buffer */
} xge_os_tracebuf_t;
extern xge_os_tracebuf_t *g_xge_os_tracebuf;
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
extern xge_os_tracebuf_t *g_xge_os_tracebuf;
extern char *dmesg_start;
/* Calculate the size of the msg and copy it into the global buffer */
#define __xge_trace(tb) { \
int msgsize = xge_os_strlen(tb->msg) + 2; \
int offset = tb->offset; \
if (msgsize != 2 && msgsize < tb->msgbuf_max) { \
int leftsize = tb->size - offset; \
if ((msgsize + tb->msgbuf_max) > leftsize) { \
xge_os_memzero(tb->data + offset, leftsize); \
offset = 0; \
tb->wrapped_once = 1; \
} \
xge_os_memcpy(tb->data + offset, tb->msg, msgsize-1); \
*(tb->data + offset + msgsize-1) = '\n'; \
*(tb->data + offset + msgsize) = 0; \
offset += msgsize; \
tb->offset = offset; \
dmesg_start = tb->data + offset; \
*tb->msg = 0; \
} \
}
#define xge_os_vatrace(tb, fmt) { \
if (tb != NULL) { \
char *_p = tb->msg; \
if (tb->timestamp) { \
xge_os_timestamp(tb->msg); \
_p = tb->msg + xge_os_strlen(tb->msg); \
} \
xge_os_vasprintf(_p, fmt); \
__xge_trace(tb); \
} \
}
#ifdef __GNUC__
#define xge_os_trace(tb, fmt...) { \
if (tb != NULL) { \
if (tb->timestamp) { \
xge_os_timestamp(tb->msg); \
} \
xge_os_sprintf(tb->msg + xge_os_strlen(tb->msg), fmt); \
__xge_trace(tb); \
} \
}
#endif /* __GNUC__ */
#else
#define xge_os_vatrace(tb, fmt)
#ifdef __GNUC__
#define xge_os_trace(tb, fmt...)
#endif /* __GNUC__ */
#endif
__EXTERN_END_DECLS
#endif /* XGE_OS_PAL_H */

View File

@ -0,0 +1,614 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xge-os-template.h
*
* Description: Template for creating platform-dependent "glue" code.
*
* Created: 6 May 2004
*/
#ifndef XGE_OS_TEMPLATE_H
#define XGE_OS_TEMPLATE_H
#ifndef TEMPLATE
# error "should not be compiled for platforms other than TEMPLATE..."
#endif
/* ------------------------- includes and defines ------------------------- */
/*
* Note:
*
* - on some operating systems like Linux & FreeBSD, there is a macro
* by using which it is possible to determine endiennes automatically
*/
#define XGE_OS_HOST_BIG_ENDIAN TEMPLATE
#define XGE_OS_HOST_PAGE_SIZE TEMPLATE
/* ---------------------- fixed size primitive types -----------------------*/
/*
* Note:
*
* - u## - means ## bits unsigned int/long
* - all names must be preserved since HAL using them.
* - ulong_t is platform specific, i.e. for 64bit - 64bit size, for
* 32bit - 32bit size
*/
#define TEMPLATE u8
#define TEMPLATE u16
#define TEMPLATE u32
#define TEMPLATE u64
#define TEMPLATE ulong_t
#define TEMPLATE ptrdiff_t
#define TEMPLATE dma_addr_t
#define TEMPLATE spinlock_t
typedef TEMPLATE pci_dev_h;
typedef TEMPLATE pci_reg_h;
typedef TEMPLATE pci_dma_h;
typedef TEMPLATE pci_irq_h;
typedef TEMPLATE pci_cfg_h;
typedef TEMPLATE pci_dma_acc_h;
/* -------------------------- "libc" functionality -------------------------*/
/*
* Note:
*
* - "libc" functionality maps one-to-one to be posix-like
*/
/* Note: use typedef: xge_os_memzero(void* mem, int size); */
#define xge_os_memzero TEMPLATE
/* Note: the 1st argument MUST be destination, like in:
* void *memcpy(void *dest, const void *src, size_t n);
*/
#define xge_os_memcpy TEMPLATE
/* Note: should accept format (the 1st argument) and a variable
* number of arguments thereafter.. */
#define xge_os_printf(fmt...) TEMPLATE
#define xge_os_vasprintf(buf, fmt...) TEMPLATE
#define xge_os_sprintf(buf, fmt, ...) TEMPLATE
#define xge_os_timestamp(buf) TEMPLATE
#define xge_os_println TEMPLATE
/* -------------------- synchronization primitives -------------------------*/
/*
* Note:
*
* - use spin_lock in interrupts or in threads when there is no races
* with interrupt
* - use spin_lock_irqsave in threads if there is a race with interrupt
* - use spin_lock_irqsave for nested locks
*/
/*
* Initialize the spin lock.
*/
#define xge_os_spin_lock_init(lockp, ctxh) TEMPLATE
/*
* Initialize the spin lock (IRQ version).
*/
#define xge_os_spin_lock_init_irq(lockp, ctxh) TEMPLATE
/*
* Destroy the lock.
*/
#define xge_os_spin_lock_destroy(lockp, ctxh) TEMPLATE
/*
* Destroy the lock (IRQ version).
*/
#define xge_os_spin_lock_destroy_irq(lockp, ctxh) TEMPLATE
/*
* Acquire the lock.
*/
#define xge_os_spin_lock(lockp) TEMPLATE
/*
* Release the lock.
*/
#define xge_os_spin_unlock(lockp) TEMPLATE
/*
* Acquire the lock(IRQ version).
*/
#define xge_os_spin_lock_irq(lockp, flags) TEMPLATE
/*
* Release the lock(IRQ version).
*/
#define xge_os_spin_unlock_irq(lockp, flags) TEMPLATE
/*
* Write memory barrier.
*/
#define xge_os_wmb() TEMPLATE
/*
* Delay (in micro seconds).
*/
#define xge_os_udelay(us) TEMPLATE
/*
* Delay (in milli seconds).
*/
#define xge_os_mdelay(ms) TEMPLATE
/*
* Compare and exchange.
*/
#define xge_os_cmpxchg(targetp, cmp, newval) TEMPLATE
/* ------------------------- misc primitives -------------------------------*/
#define xge_os_prefetch TEMPLATE
#define xge_os_prefetchw TEMPLATE
#define xge_os_bug(fmt...) TEMPLATE
/* -------------------------- compiler stuffs ------------------------------*/
#define __xge_os_attr_cacheline_aligned TEMPLATE
/* ---------------------- memory primitives --------------------------------*/
/**
* xge_os_malloc - Allocate non DMA-able memory.
* @pdev: Device context. Some OSs require device context to perform
* operations on memory.
* @size: Size to allocate.
*
* Allocate @size bytes of memory. This allocation can sleep, and
* therefore, and therefore it requires process context. In other words,
* xge_os_malloc() cannot be called from the interrupt context.
* Use xge_os_free() to free the allocated block.
*
* Returns: Pointer to allocated memory, NULL - on failure.
*
* See also: xge_os_free().
*/
static inline void *xge_os_malloc(IN pci_dev_h pdev,
IN unsigned long size)
{ TEMPLATE; }
/**
* xge_os_free - Free non DMA-able memory.
* @pdev: Device context. Some OSs require device context to perform
* operations on memory.
* @vaddr: Address of the allocated memory block.
* @size: Some OS's require to provide size on free
*
* Free the memory area obtained via xge_os_malloc().
* This call may also sleep, and therefore it cannot be used inside
* interrupt.
*
* See also: xge_os_malloc().
*/
static inline void xge_os_free(IN pci_dev_h pdev,
IN const void *vaddr,
IN unsigned long size)
{ TEMPLATE; }
/**
* xge_os_vaddr - Get Virtual address for the given physical address.
* @pdev: Device context. Some OSs require device context to perform
* operations on memory.
* @vaddr: Physical Address of the memory block.
* @size: Some OS's require to provide size
*
* Get the virtual address for physical address.
* This call may also sleep, and therefore it cannot be used inside
* interrupt.
*
* See also: xge_os_malloc().
*/
static inline void xge_os_vaddr(IN pci_dev_h pdev,
IN const void *vaddr,
IN unsigned long size)
{ TEMPLATE; }
/**
* xge_os_dma_malloc - Allocate DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @size: Size (in bytes) to allocate.
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
* XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT
* Note that the last two flags are mutually exclusive.
* @p_dmah: Handle used to map the memory onto the corresponding device memory
* space. See xge_os_dma_map(). The handle is an out-parameter
* returned by the function.
* @p_dma_acch: One more DMA handle used subsequently to free the
* DMA object (via xge_os_dma_free()).
* Note that this and the previous handle have
* physical meaning for Solaris; on Windows and Linux the
* corresponding value will be simply a pointer to PCI device.
* The value is returned by this function.
*
* Allocate DMA-able contiguous memory block of the specified @size.
* This memory can be subsequently freed using xge_os_dma_free().
* Note: can be used inside interrupt context.
*
* Returns: Pointer to allocated memory(DMA-able), NULL on failure.
*
*/
static inline void *xge_os_dma_malloc(IN pci_dev_h pdev,
IN unsigned long size,
IN int dma_flags,
OUT pci_dma_h *p_dmah,
OUT pci_dma_acc_h *p_dma_acch)
{ TEMPLATE; }
/**
* xge_os_dma_free - Free previously allocated DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @vaddr: Virtual address of the DMA-able memory.
* @p_dma_acch: DMA handle used to free the resource.
* @p_dmah: DMA handle used for mapping. See xge_os_dma_malloc().
*
* Free DMA-able memory originally allocated by xge_os_dma_malloc().
* Note: can be used inside interrupt.
* See also: xge_os_dma_malloc().
*/
static inline void xge_os_dma_free (IN pci_dev_h pdev,
IN const void *vaddr,
IN pci_dma_acc_h *p_dma_acch,
IN pci_dma_h *p_dmah)
{ TEMPLATE; }
/* ----------------------- io/pci/dma primitives ---------------------------*/
#define XGE_OS_DMA_DIR_TODEVICE TEMPLATE
#define XGE_OS_DMA_DIR_FROMDEVICE TEMPLATE
#define XGE_OS_DMA_DIR_BIDIRECTIONAL TEMPLATE
/**
* xge_os_pci_read8 - Read one byte from device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of the result.
*
* Read byte value from the specified @regh PCI configuration space at the
* specified offset = @where.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_read8(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u8 *val)
{ TEMPLATE; }
/**
* xge_os_pci_write8 - Write one byte into device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write byte value into the specified PCI configuration space
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_write8(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u8 val)
{ TEMPLATE; }
/**
* xge_os_pci_read16 - Read 16bit word from device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of the 16bit result.
*
* Read 16bit value from the specified PCI configuration space at the
* specified offset.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_read16(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u16 *val)
{ TEMPLATE; }
/**
* xge_os_pci_write16 - Write 16bit word into device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write 16bit value into the specified @offset in PCI
* configuration space.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_write16(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u16 val)
{ TEMPLATE; }
/**
* xge_os_pci_read32 - Read 32bit word from device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of 32bit result.
*
* Read 32bit value from the specified PCI configuration space at the
* specified offset.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_read32(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u32 *val)
{ TEMPLATE; }
/**
* xge_os_pci_write32 - Write 32bit word into device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write 32bit value into the specified @offset in PCI
* configuration space.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_write32(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u32 val)
{ TEMPLATE; }
/**
* xge_os_pio_mem_read8 - Read 1 byte from device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO..
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 1 byte value read from the specified (mapped) memory space address.
*/
static inline u8 xge_os_pio_mem_read8(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_write64 - Write 1 byte into device memory mapped
* space.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO..
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write byte value into the specified (mapped) device memory space.
*/
static inline void xge_os_pio_mem_write8(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN u8 val,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_read16 - Read 16bit from device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 16bit value read from the specified (mapped) memory space address.
*/
static inline u16 xge_os_pio_mem_read16(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{
TEMPLATE; }
/**
* xge_os_pio_mem_write16 - Write 16bit into device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 16bit value into the specified (mapped) device memory space.
*/
static inline void xge_os_pio_mem_write16(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN u16 val,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_read32 - Read 32bit from device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 32bit value read from the specified (mapped) memory space address.
*/
static inline u32 xge_os_pio_mem_read32(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_write32 - Write 32bit into device memory space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 32bit value into the specified (mapped) device memory space.
*/
static inline void xge_os_pio_mem_write32(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN u32 val,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_read64 - Read 64bit from device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 64bit value read from the specified (mapped) memory space address.
*/
static inline u64 xge_os_pio_mem_read64(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_write64 - Write 64bit into device memory space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 64bit value into the specified (mapped) device memory space.
*/
static inline void xge_os_pio_mem_write64(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN u64 val,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_flush_bridge - Flush the bridge.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Flush the bridge.
*/
static inline void xge_os_flush_bridge(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_dma_map - Map DMA-able memory block to, or from, or
* to-and-from device.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @vaddr: Virtual address of the DMA-able memory.
* @size: Size (in bytes) to be mapped.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
* XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT
* Note that the last two flags are mutually exclusive.
*
* Map a single memory block.
*
* Returns: DMA address of the memory block,
* XGE_OS_INVALID_DMA_ADDR on failure.
*
* See also: xge_os_dma_malloc(), xge_os_dma_unmap(),
* xge_os_dma_sync().
*/
static inline dma_addr_t xge_os_dma_map(IN pci_dev_h pdev,
IN pci_dma_h dmah,
IN void *vaddr,
IN size_t size,
IN int dir,
IN int dma_flags)
{ TEMPLATE; }
/**
* xge_os_dma_unmap - Unmap DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
* @size: Size (in bytes) to be unmapped.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
*
* Unmap a single DMA-able memory block that was previously mapped
* using xge_os_dma_map().
* See also: xge_os_dma_malloc(), xge_os_dma_map().
*/
static inline void xge_os_dma_unmap(IN pci_dev_h pdev,
IN pci_dma_h dmah,
IN dma_addr_t dma_addr,
IN size_t size,
IN int dir)
{ TEMPLATE; }
/**
* xge_os_dma_sync - Synchronize mapped memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
* @dma_offset: Offset from start of the blocke. Used by Solaris only.
* @length: Size of the block.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
*
* Make physical and CPU memory consistent for a single
* streaming mode DMA translation.
* This API compiles to NOP on cache-coherent platforms.
* On non cache-coherent platforms, depending on the direction
* of the "sync" operation, this API will effectively
* either invalidate CPU cache (that might contain old data),
* or flush CPU cache to update physical memory.
* See also: xge_os_dma_malloc(), xge_os_dma_map(),
* xge_os_dma_unmap().
*/
static inline void xge_os_dma_sync(IN pci_dev_h pdev,
IN pci_dma_h dmah,
IN dma_addr_t dma_addr,
IN u64 dma_offset,
IN size_t length,
IN int dir)
{ TEMPLATE; }
#endif /* XGE_OS_TEMPLATE_H */

View File

@ -0,0 +1,185 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xge-queue.h
*
* Description: serialized event queue
*
* Created: 7 June 2004
*/
#ifndef XGE_QUEUE_H
#define XGE_QUEUE_H
#include <dev/nxge/include/xge-os-pal.h>
#include <dev/nxge/include/xge-defs.h>
#include <dev/nxge/include/xge-list.h>
#include <dev/nxge/include/xgehal-event.h>
__EXTERN_BEGIN_DECLS
#define XGE_QUEUE_BUF_SIZE 0x1000
#define XGE_DEFAULT_EVENT_MAX_DATA_SIZE 16
/**
* enum xge_queue_status_e - Enumerates return codes of the xge_queue
* manipulation APIs.
* @XGE_QUEUE_IS_FULL: Queue is full, need to grow.
* @XGE_QUEUE_IS_EMPTY: Queue is empty.
* @XGE_QUEUE_OUT_OF_MEMORY: Out of memory.
* @XGE_QUEUE_NOT_ENOUGH_SPACE: Exceeded specified event size,
* see xge_queue_consume().
* @XGE_QUEUE_OK: Neither one of the codes listed above.
*
* Enumerates return codes of xge_queue_consume()
* and xge_queue_produce() APIs.
*/
typedef enum xge_queue_status_e {
XGE_QUEUE_OK = 0,
XGE_QUEUE_IS_FULL = 1,
XGE_QUEUE_IS_EMPTY = 2,
XGE_QUEUE_OUT_OF_MEMORY = 3,
XGE_QUEUE_NOT_ENOUGH_SPACE = 4
} xge_queue_status_e;
typedef void* xge_queue_h;
/**
* struct xge_queue_item_t - Queue item.
* @item: List item. Note that the queue is "built" on top of
* the bi-directional linked list.
* @event_type: Event type. Includes (but is not restricted to)
* one of the xge_hal_event_e{} enumerated types.
* @data_size: Size of the enqueued user data. Note that xge_queue_t
* items are allowed to have variable sizes.
* @is_critical: For critical events, e.g. ECC.
* @context: Opaque (void*) "context", for instance event producer object.
*
* Item of the xge_queue_t{}. The queue is protected
* in terms of multi-threaded concurrent access.
* See also: xge_queue_t{}.
*/
typedef struct xge_queue_item_t {
xge_list_t item;
xge_hal_event_e event_type;
int data_size;
int is_critical;
void *context;
} xge_queue_item_t;
/**
* function xge_queued_f - Item-enqueued callback.
* @data: Per-queue context independent of the event. E.g., device handle.
* @event_type: HAL or ULD-defined event type. Note that HAL own
* events are enumerated by xge_hal_event_e{}.
*
* Per-queue optional callback. If not NULL, called by HAL each
* time an event gets added to the queue.
*/
typedef void (*xge_queued_f) (void *data, int event_type);
/**
* struct xge_queue_t - Protected dynamic queue of variable-size items.
* @start_ptr: Points to the start of the queue.
* @end_ptr: Points to the end of the queue.
* @head_ptr: Points to the head of the queue. It gets changed during queue
* produce/consume operations.
* @tail_ptr: Points to the tail of the queue. It gets changed during queue
* produce/consume operations.
* @lock: Lock for queue operations(syncronization purpose).
* @pages_initial:Number of pages to be initially allocated at the time
* of queue creation.
* @pages_max: Max number of pages that can be allocated in the queue.
* @pages_current: Number of pages currently allocated
* @list_head: Points to the list of queue elements that are produced, but yet
* to be consumed.
* @signal_callback: (TODO)
* @pdev: PCI device handle
* @irqh: PCI device IRQ handle.
* @queued_func: Optional callback function to be called each time a new
* item is added to the queue.
* @queued_data: Arguments to the callback function.
* @has_critical_event: Non-zero, if the queue contains a critical event,
* see xge_hal_event_e{}.
* Protected dynamically growing queue. The queue is used to support multiple
* producer/consumer type scenarios. The queue is a strict FIFO: first come
* first served.
* Queue users may "produce" (see xge_queue_produce()) and "consume"
* (see xge_queue_consume()) items (a.k.a. events) variable sizes.
* See also: xge_queue_item_t{}.
*/
typedef struct xge_queue_t {
void *start_ptr;
void *end_ptr;
void *head_ptr;
void *tail_ptr;
spinlock_t lock;
unsigned int pages_initial;
unsigned int pages_max;
unsigned int pages_current;
xge_list_t list_head;
pci_dev_h pdev;
pci_irq_h irqh;
xge_queued_f queued_func;
void *queued_data;
int has_critical_event;
} xge_queue_t;
/* ========================== PUBLIC API ================================= */
xge_queue_h xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
int pages_max, xge_queued_f queued_func, void *queued_data);
void xge_queue_destroy(xge_queue_h queueh);
void* xge_queue_item_data(xge_queue_item_t *item);
xge_queue_status_e
xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
int is_critical, const int data_size, void *data);
static inline xge_queue_status_e
xge_queue_produce_context(xge_queue_h queueh, int event_type, void *context) {
return xge_queue_produce(queueh, event_type, context, 0, 0, 0);
}
xge_queue_status_e xge_queue_consume(xge_queue_h queueh, int data_max_size,
xge_queue_item_t *item);
void xge_queue_flush(xge_queue_h queueh);
/* ========================== PRIVATE API ================================= */
xge_queue_status_e __io_queue_grow(xge_queue_h qh);
int __queue_get_reset_critical (xge_queue_h qh);
__EXTERN_END_DECLS
#endif /* XGE_QUEUE_H */

View File

@ -0,0 +1,507 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-channel.h
*
* Description: HAL channel object functionality
*
* Created: 19 May 2004
*/
#ifndef XGE_HAL_CHANNEL_H
#define XGE_HAL_CHANNEL_H
#include <dev/nxge/include/xge-os-pal.h>
#include <dev/nxge/include/xge-list.h>
#include <dev/nxge/include/xgehal-types.h>
#include <dev/nxge/include/xgehal-stats.h>
__EXTERN_BEGIN_DECLS
/**
* enum xge_hal_channel_type_e - Enumerated channel types.
* @XGE_HAL_CHANNEL_TYPE_FIFO: fifo.
* @XGE_HAL_CHANNEL_TYPE_RING: ring.
* @XGE_HAL_CHANNEL_TYPE_SEND_QUEUE: Send Queue
* @XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE: Receive Queue
* @XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE: Receive queue completion queue
* @XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE: Up message queue
* @XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE: Down message queue
* @XGE_HAL_CHANNEL_TYPE_MAX: Maximum number of HAL-supported
* (and recognized) channel types. Currently: two.
*
* Enumerated channel types. Currently there are only two link-layer
* channels - Xframe fifo and Xframe ring. In the future the list will grow.
*/
typedef enum xge_hal_channel_type_e {
XGE_HAL_CHANNEL_TYPE_FIFO,
XGE_HAL_CHANNEL_TYPE_RING,
XGE_HAL_CHANNEL_TYPE_SEND_QUEUE,
XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE,
XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE,
XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE,
XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE,
XGE_HAL_CHANNEL_TYPE_MAX
} xge_hal_channel_type_e;
/**
* enum xge_hal_channel_flag_e - Channel flags.
* @XGE_HAL_CHANNEL_FLAG_NONE: zero (nil) flag.
* @XGE_HAL_CHANNEL_FLAG_USE_TX_LOCK: use lock when posting transmit
* descriptor.
* @XGE_HAL_CHANNEL_FLAG_FREE_RXD: to-be-defined.
*
* Channel opening flags. Reserved for future usage.
*/
typedef enum xge_hal_channel_flag_e {
XGE_HAL_CHANNEL_FLAG_NONE = 0x0,
XGE_HAL_CHANNEL_FLAG_USE_TX_LOCK = 0x1,
XGE_HAL_CHANNEL_FLAG_FREE_RXD = 0x2
} xge_hal_channel_flag_e;
/**
* enum xge_hal_dtr_state_e - Descriptor (DTR) state.
* @XGE_HAL_DTR_STATE_NONE: Invalid state.
* @XGE_HAL_DTR_STATE_AVAIL: Descriptor is available for reservation
* (via xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_reserve(), etc.).
* @XGE_HAL_DTR_STATE_POSTED: Descriptor is posted for processing by the
* device.
* @XGE_HAL_DTR_STATE_FREED: Descriptor is free and can be reused for
* filling-in and posting later.
*
* Xframe/HAL descriptor states. For more on descriptor states and transitions
* please refer to ch_intern{}.
*
* See also: xge_hal_channel_dtr_term_f{}.
*/
typedef enum xge_hal_dtr_state_e {
XGE_HAL_DTR_STATE_NONE = 0,
XGE_HAL_DTR_STATE_AVAIL = 1,
XGE_HAL_DTR_STATE_POSTED = 2,
XGE_HAL_DTR_STATE_FREED = 3
} xge_hal_dtr_state_e;
/**
* enum xge_hal_channel_reopen_e - Channel open, close, or reopen option.
* @XGE_HAL_CHANNEL_RESET_ONLY: Do not (de)allocate channel; used with
* xge_hal_channel_open(), xge_hal_channel_close().
* @XGE_HAL_CHANNEL_OC_NORMAL: Do (de)allocate channel; used with
* xge_hal_channel_open(), xge_hal_channel_close().
*
* Enumerates options used with channel open and close operations.
* The @XGE_HAL_CHANNEL_RESET_ONLY can be used when resetting the device;
* in this case there is actually no need to free and then again malloc
* the memory (including DMA-able memory) used for channel operation.
*/
typedef enum xge_hal_channel_reopen_e {
XGE_HAL_CHANNEL_RESET_ONLY = 1,
XGE_HAL_CHANNEL_OC_NORMAL = 2
} xge_hal_channel_reopen_e;
/**
* function xge_hal_channel_callback_f - Channel callback.
* @channelh: Channel "containing" 1 or more completed descriptors.
* @dtrh: First completed descriptor.
* @t_code: Transfer code, as per Xframe User Guide.
* Returned by HAL.
* @host_control: Opaque 64bit data stored by ULD inside the Xframe
* descriptor prior to posting the latter on the channel
* via xge_hal_fifo_dtr_post() or xge_hal_ring_dtr_post().
* The @host_control is returned as is to the ULD with each
* completed descriptor.
* @userdata: Opaque per-channel data specified at channel open
* time, via xge_hal_channel_open().
*
* Channel completion callback (type declaration). A single per-channel
* callback is specified at channel open time, via
* xge_hal_channel_open().
* Typically gets called as part of the processing of the Interrupt
* Service Routine.
*
* Channel callback gets called by HAL if, and only if, there is at least
* one new completion on a given ring or fifo channel. Upon processing the
* first @dtrh ULD is _supposed_ to continue consuming completions
* usingáone of the following HAL APIs:
* - xge_hal_fifo_dtr_next_completed()
* or
* - xge_hal_ring_dtr_next_completed().
*
* Note that failure to process new completions in a timely fashion
* leads to XGE_HAL_INF_OUT_OF_DESCRIPTORS condition.
*
* Non-zero @t_code means failure to process (transmit or receive, depending
* on the channel type) the descriptor.
*
* In the "transmit" case the failure could happen, for instance, when the
* link is down, in which case Xframe completes the descriptor because it
* is not able to send the data out.
*
* For details please refer to Xframe User Guide.
*
* See also: xge_hal_fifo_dtr_next_completed(),
* xge_hal_ring_dtr_next_completed(), xge_hal_channel_dtr_term_f{}.
*/
typedef xge_hal_status_e (*xge_hal_channel_callback_f)
(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
u8 t_code, void *userdata);
/**
* function xge_hal_channel_dtr_init_f - Initialize descriptor callback.
* @channelh: Channel "containing" the @dtrh descriptor.
* @dtrh: Descriptor.
* @index: Index of the descriptor in the channel's set of descriptors.
* @userdata: Per-channel user data (a.k.a. context) specified at
* channel open time, via xge_hal_channel_open().
* @reopen: See xge_hal_channel_reopen_e{}.
*
* Initialize descriptor callback. Unless NULL is specified in the
* xge_hal_channel_attr_t{} structure passed to xge_hal_channel_open()),
* HAL invokes the callback as part of the xge_hal_channel_open()
* implementation.
* For the ring type of channel the ULD is expected to fill in this descriptor
* with buffer(s) and control information.
* For the fifo type of channel the ULD could use the callback to
* pre-set DMA mappings and/or alignment buffers.
*
* See also: xge_hal_channel_attr_t{}, xge_hal_channel_dtr_term_f{}.
*/
typedef xge_hal_status_e (*xge_hal_channel_dtr_init_f)
(xge_hal_channel_h channelh,
xge_hal_dtr_h dtrh,
int index,
void *userdata,
xge_hal_channel_reopen_e reopen);
/**
* function xge_hal_channel_dtr_term_f - Terminate descriptor callback.
* @channelh: Channel "containing" the @dtrh descriptor.
* @dtrh: First completed descriptor.
* @state: One of the xge_hal_dtr_state_e{} enumerated states.
* @userdata: Per-channel user data (a.k.a. context) specified at
* channel open time, via xge_hal_channel_open().
* @reopen: See xge_hal_channel_reopen_e{}.
*
* Terminate descriptor callback. Unless NULL is specified in the
* xge_hal_channel_attr_t{} structure passed to xge_hal_channel_open()),
* HAL invokes the callback as part of closing the corresponding
* channel, prior to de-allocating the channel and associated data
* structures (including descriptors).
* ULD should utilize the callback to (for instance) unmap
* and free DMA data buffers associated with the posted (state =
* XGE_HAL_DTR_STATE_POSTED) descriptors,
* as well as other relevant cleanup functions.
*
* See also: xge_hal_channel_attr_t{}, xge_hal_channel_dtr_init_f{}.
*/
typedef void (*xge_hal_channel_dtr_term_f) (xge_hal_channel_h channelh,
xge_hal_dtr_h dtrh,
xge_hal_dtr_state_e state,
void *userdata,
xge_hal_channel_reopen_e reopen);
/**
* struct xge_hal_channel_attr_t - Channel open "template".
* @type: xge_hal_channel_type_e channel type.
* @vp_id: Virtual path id
* @post_qid: Queue ID to post descriptors. For the link layer this
* number should be in the 0..7 range.
* @compl_qid: Completion queue ID. Must be set to zero for the link layer.
* @callback: Channel completion callback. HAL invokes the callback when there
* are new completions on that channel. In many implementations
* the @callback executes in the hw interrupt context.
* @dtr_init: Channel's descriptor-initialize callback.
* See xge_hal_channel_dtr_init_f{}.
* If not NULL, HAL invokes the callback when opening
* the channel via xge_hal_channel_open().
* @dtr_term: Channel's descriptor-terminate callback. If not NULL,
* HAL invokes the callback when closing the corresponding channel.
* See also xge_hal_channel_dtr_term_f{}.
* @userdata: User-defined "context" of _that_ channel. Passed back to the
* user as one of the @callback, @dtr_init, and @dtr_term arguments.
* @per_dtr_space: If specified (i.e., greater than zero): extra space
* reserved by HAL per each transmit or receive (depending on the
* channel type) descriptor. Can be used to store,
* and retrieve on completion, information specific
* to the upper-layer.
* @flags: xge_hal_channel_flag_e enumerated flags.
*
* Channel open "template". User fills the structure with channel
* attributes and passes it to xge_hal_channel_open().
* Usage: See ex_open{}.
*/
typedef struct xge_hal_channel_attr_t {
xge_hal_channel_type_e type;
#ifdef XGEHAL_RNIC
u32 vp_id;
#endif
int post_qid;
int compl_qid;
xge_hal_channel_callback_f callback;
xge_hal_channel_dtr_init_f dtr_init;
xge_hal_channel_dtr_term_f dtr_term;
void *userdata;
int per_dtr_space;
xge_hal_channel_flag_e flags;
} xge_hal_channel_attr_t;
/*
* xge_hal_channel_t
* ---------- complete/free section ---------------
* @item: List item; used to maintain a list of open channels.
* @callback: Channel completion callback. See
* xge_hal_channel_callback_f.
* @compl_index: Completion index. At any point in time points on the
* position in the channel, which will contain next
* to-be-completed descriptor.
* @length: Channel length. Currently allocated number of descriptors.
* The channel length "grows" when more descriptors get allocated.
* See _hal_mempool_grow.
* @free_arr: Free array. Contains completed descriptors that were freed
* (i.e., handed over back to HAL) by ULD.
* See xge_hal_fifo_dtr_free(), xge_hal_ring_dtr_free().
* @free_lock: Lock to protect @free_arr.
* ----------- reserve/post section ---------------
* @post_index: Post index. At any point in time points on the
* position in the channel, which'll contain next to-be-posted
* descriptor.
* @post_lock: Lock to serialize multiple concurrent "posters" of descriptors
* on the given channel.
* @reserve_arr: Reserve array. Contains descriptors that can be reserved
* by ULD for the subsequent send or receive operation.
* See xge_hal_fifo_dtr_reserve(),
* xge_hal_ring_dtr_reserve().
* @reserve_length: Length of the @reserve_arr. The length dynamically
* changes: it decrements each time descriptor is reserved.
* @reserve_lock: Lock to serialize multiple concurrent threads accessing
* @reserve_arr.
* @reserve_threshold: Reserve threshold. Minimal number of free descriptors
* that ought to be preserved in the channel at all times.
* Note that @reserve_threshold >= 0 &&
* @reserve_threshold < @reserve_max.
* ------------ common section --------------------
* @devh: Device handle. HAL device object that contains _this_ channel.
* @dmah: Channel's DMA address. Used to synchronize (to/from device)
* descriptors.
* @regh0: Base address of the device memory space handle. Copied from HAL device
* at channel open time.
* @regh1: Base address of the device memory space handle. Copied from HAL device
* at channel open time.
* @userdata: Per-channel opaque (void*) user-defined context, which may be
* upper-layer driver object, ULP connection, etc.
* Once channel is open, @userdata is passed back to user via
* xge_hal_channel_callback_f.
* @work_arr: Work array. Contains descriptors posted to the channel.
* Note that at any point in time @work_arr contains 3 types of
* descriptors:
* 1) posted but not yet consumed by Xframe device;
* 2) consumed but not yet completed;
* 3) completed but not yet freed
* (via xge_hal_fifo_dtr_free() or xge_hal_ring_dtr_free())
* @saved_arr: Array used internally to optimize channel full-duplex
* operation.
* @stats: Channel statistcis. Includes HAL internal counters, including
* for instance, number of times out-of-descriptors
* (see XGE_HAL_INF_OUT_OF_DESCRIPTORS) condition happened.
* ------------- "slow" section ------------------
* @type: Channel type. See xge_hal_channel_type_e{}.
* @vp_id: Virtual path id
* @post_qid: Identifies Xframe queue used for posting descriptors.
* @compl_qid: Identifies Xframe completion queue.
* @flags: Channel flags. See xge_hal_channel_flag_e{}.
* @reserve_initial: Initial number of descriptors allocated at channel open
* time (see xge_hal_channel_open()). The number of
* channel descriptors can grow at runtime
* up to @reserve_max value.
* @reserve_max: Maximum number of channel descriptors. See @reserve_initial.
* @is_open: True, if channel is open; false - otherwise.
* @per_dtr_space: Per-descriptor space (in bytes) that channel user can utilize
* to store per-operation control information.
* HAL channel object. HAL devices (see xge_hal_device_t{}) contains
* zero or more channels. HAL channel contains zero or more descriptors. The
* latter are used by ULD(s) to manage the device and/or send and receive data
* to remote peer(s) via the channel.
*
* See also: xge_hal_channel_type_e{}, xge_hal_channel_flag_e,
* xge_hal_channel_callback_f{}
*/
typedef struct {
/* complete/free section */
xge_list_t item;
xge_hal_channel_callback_f callback;
void **free_arr;
int length;
int free_length;
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) || \
defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
spinlock_t free_lock;
#endif
int compl_index;
unsigned int usage_cnt;
unsigned int poll_bytes;
int unused0;
/* reserve/post data path section */
#ifdef __XGE_WIN__
int __xge_os_attr_cacheline_aligned
post_index;
#else
int post_index
__xge_os_attr_cacheline_aligned;
#endif
spinlock_t reserve_lock;
spinlock_t post_lock;
void **reserve_arr;
int reserve_length;
int reserve_threshold;
int reserve_top;
int unused1;
/* common section */
xge_hal_device_h devh;
pci_dev_h pdev;
pci_reg_h regh0;
pci_reg_h regh1;
void *userdata;
void **work_arr;
void **saved_arr;
void **orig_arr;
xge_hal_stats_channel_info_t stats;
/* slow section */
xge_hal_channel_type_e type;
#ifdef XGEHAL_RNIC
u32 vp_id;
#endif
int post_qid;
int compl_qid;
xge_hal_channel_flag_e flags;
int reserve_initial;
int reserve_max;
int is_open;
int per_dtr_space;
xge_hal_channel_dtr_term_f dtr_term;
xge_hal_channel_dtr_init_f dtr_init;
/* MSI stuff */
u32 msi_msg;
u8 rti;
u8 tti;
u16 unused2;
/* MSI-X stuff */
u64 msix_address;
u32 msix_data;
int msix_idx;
volatile int in_interrupt;
unsigned int magic;
#ifdef __XGE_WIN__
} __xge_os_attr_cacheline_aligned xge_hal_channel_t ;
#else
} xge_hal_channel_t __xge_os_attr_cacheline_aligned;
#endif
/* ========================== CHANNEL PRIVATE API ========================= */
xge_hal_status_e
__hal_channel_initialize(xge_hal_channel_h channelh,
xge_hal_channel_attr_t *attr, void **reserve_arr,
int reserve_initial, int reserve_max, int reserve_threshold);
void __hal_channel_terminate(xge_hal_channel_h channelh);
xge_hal_channel_t*
__hal_channel_allocate(xge_hal_device_h devh, int post_qid,
#ifdef XGEHAL_RNIC
u32 vp_id,
#endif
xge_hal_channel_type_e type);
void __hal_channel_free(xge_hal_channel_t *channel);
#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_CHANNEL)
#define __HAL_STATIC_CHANNEL
#define __HAL_INLINE_CHANNEL
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_try_complete(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_complete(xge_hal_channel_h channelh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_dealloc(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int offset);
/* ========================== CHANNEL PUBLIC API ========================= */
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_channel_dtr_count(xge_hal_channel_h channelh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void*
xge_hal_channel_userdata(xge_hal_channel_h channelh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_channel_id(xge_hal_channel_h channelh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
int copy_size);
#else /* XGE_FASTPATH_EXTERN */
#define __HAL_STATIC_CHANNEL static
#define __HAL_INLINE_CHANNEL inline
#include <dev/nxge/xgehal/xgehal-channel-fp.c>
#endif /* XGE_FASTPATH_INLINE */
xge_hal_status_e
xge_hal_channel_open(xge_hal_device_h hldev, xge_hal_channel_attr_t *attr,
xge_hal_channel_h *channel,
xge_hal_channel_reopen_e reopen);
void xge_hal_channel_close(xge_hal_channel_h channelh,
xge_hal_channel_reopen_e reopen);
void xge_hal_channel_abort(xge_hal_channel_h channelh,
xge_hal_channel_reopen_e reopen);
__EXTERN_END_DECLS
#endif /* XGE_HAL_CHANNEL_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,322 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-driver.h
*
* Description: HAL driver object functionality
*
* Created: 14 May 2004
*/
#ifndef XGE_HAL_DRIVER_H
#define XGE_HAL_DRIVER_H
#include <dev/nxge/include/xge-os-pal.h>
#include <dev/nxge/include/xge-list.h>
#include <dev/nxge/include/xge-queue.h>
#include <dev/nxge/include/xgehal-types.h>
#include <dev/nxge/include/xgehal-config.h>
#include <dev/nxge/include/xgehal-event.h>
__EXTERN_BEGIN_DECLS
/* maximum number of events consumed in a syncle poll() cycle */
#define XGE_HAL_DRIVER_QUEUE_CONSUME_MAX 5
/**
* function xge_uld_sched_timer_cb_f - Per-device periodic timer
* callback.
* @devh: HAL device handle.
* @userdata: Per-device user data (a.k.a. context) specified via
* xge_hal_device_initialize().
*
* Periodic or one-shot timer callback. If specified (that is, not NULL)
* HAL invokes this callback periodically. The call is performed in the
* interrupt context, or more exactly, in the context of HAL's ISR
* xge_hal_device_continue_irq().
*
* See also: xge_hal_device_initialize{}
*/
typedef void (*xge_uld_sched_timer_cb_f)(xge_hal_device_h devh, void *userdata);
/**
* function xge_uld_link_up_f - Link-Up callback provided by upper-layer
* driver.
* @userdata: Opaque context set by the ULD via
* xge_hal_device_private_set()
* (typically - at HAL device iinitialization time).
*
* Link-up notification callback provided by the ULD.
* This is one of the per-driver callbacks, see xge_hal_uld_cbs_t{}.
*
* See also: xge_hal_uld_cbs_t{}, xge_uld_link_down_f{},
* xge_hal_driver_initialize(), xge_hal_device_private_set().
*/
typedef void (*xge_uld_link_up_f) (void *userdata);
/**
* function xge_uld_link_down_f - Link-Down callback provided by
* upper-layer driver.
* @userdata: Opaque context set by the ULD via
* xge_hal_device_private_set()
* (typically - at HAL device iinitialization time).
*
* Link-Down notification callback provided by the upper-layer driver.
* This is one of the per-driver callbacks, see xge_hal_uld_cbs_t{}.
*
* See also: xge_hal_uld_cbs_t{}, xge_uld_link_up_f{},
* xge_hal_driver_initialize(), xge_hal_device_private_set().
*/
typedef void (*xge_uld_link_down_f) (void *userdata);
/**
* function xge_uld_crit_err_f - Critical Error notification callback.
* @userdata: Opaque context set by the ULD via
* xge_hal_device_private_set()
* (typically - at HAL device iinitialization time).
* @type: Enumerated hw error, e.g.: double ECC.
* @serr_data: Xframe status.
* @ext_data: Extended data. The contents depends on the @type.
*
* Link-Down notification callback provided by the upper-layer driver.
* This is one of the per-driver callbacks, see xge_hal_uld_cbs_t{}.
*
* See also: xge_hal_uld_cbs_t{}, xge_hal_event_e{},
* xge_hal_device_private_set(), xge_hal_driver_initialize().
*/
typedef void (*xge_uld_crit_err_f) (void *userdata, xge_hal_event_e type,
u64 ext_data);
/**
* function xge_uld_event_queued_f - Event-enqueued notification
* callback.
* @devh: HAL device handle.
* @event_type: HAL- or ULD-defined event type. Note that HAL
* events are enumerated by xge_hal_event_e{}.
*
* "Event-was-enqueued" notification callback provided by the upper-layer
* driver. The callback is invoked (if defined, i.e., not NULL in the
* xge_hal_uld_cbs_t{} structure) each time immediately after an event
* is enqueued.
*
* See also: xge_hal_uld_cbs_t{}, xge_hal_device_private_set(),
* xge_hal_driver_initialize().
*/
typedef void (*xge_uld_event_queued_f) (xge_hal_device_h devh, int event_type);
/**
* function xge_uld_event_f - ULD event callback.
* @item: ULD-defined event, item of the xge_queue_t.
*
* ULD event callback.
* Upper-layer driver can use HAL queue to serialize certain slow-path
* events. HAL periodically polls the queue as part of the
* xge_hal_device_poll() processing. When/if HAL discovers in the queue
* an unkown event type it simply invokes the event callback
* (which must be non-NULL and supplied by the ULD in this case).
*
* See also: xge_hal_uld_cbs_t{}, xge_hal_device_poll(), xge_queue_t{},
* xge_hal_driver_initialize(), xge_queue_item_t{}.
*/
typedef void (*xge_uld_event_f) (xge_queue_item_t *item);
/**
* function xge_uld_before_device_poll_f - ULD "before-poll" callback.
* @devh: HAL device handle.
*
* HAL invokes the callback from inside its xge_hal_device_poll()
* implementation %prior to accessing the @devh device. This allows ULD to
* perform per-device locking and/or context mapping, if required..
* The interface is currently used by AIX driver only.
* To avoid using/implementing the callback set the corresponding field
* in the xge_hal_uld_cbs_t{} structure to NULL.
*
* Returns: 0 on success, non-zero on failure.
*
* See also: xge_hal_driver_initialize(), xge_hal_uld_cbs_t{},
* xge_hal_device_poll().
*/
typedef int (*xge_uld_before_device_poll_f) (xge_hal_device_h devh);
/**
* function xge_uld_after_device_poll_f - ULD "after-poll" callback.
* @devh: HAL device handle.
*
* Unless NULL is specified,
* HAL invokes the callback from inside its xge_hal_device_poll()
* implementation immediately %after it has completed polling the @devh
* device. This allows ULD to undo the affects of
* xge_uld_before_device_poll_f{}.
* The interface is currently used by AIX driver only.
*
* See also: xge_hal_driver_initialize(), xge_hal_uld_cbs_t{},
* xge_hal_device_poll().
*/
typedef void (*xge_uld_after_device_poll_f) (xge_hal_device_h devh);
/**
* function xge_uld_xpak_alarm_log_f - ULD "XPAK alarm log" callback.
* @devh: HAL device handle.
* @type: TODO
*
* Unless NULL is specified,
* HAL invokes the callback from inside __hal_chk_xpak_counter()
*/
typedef void (*xge_uld_xpak_alarm_log_f) (xge_hal_device_h devh, xge_hal_xpak_alarm_type_e type);
/**
* struct xge_hal_uld_cbs_t - Upper-layer driver "slow-path" callbacks.
* @link_up: See xge_uld_link_up_f{}.
* @link_down: See xge_uld_link_down_f{}.
* @crit_err: See xge_uld_crit_err_f{}.
* @event: See xge_uld_event_f{}.
* @event_queued: See xge_uld_event_queued_f{}.
* @before_device_poll: See xge_uld_before_device_poll_f{}.
* @after_device_poll: See xge_uld_after_device_poll_f{}.
* @sched_timer: See xge_uld_sched_timer_cb_f{}.
* @xpak_alarm_log: TODO
*
* Upper layer driver slow-path (per-driver) callbacks.
* Implemented by ULD and provided to HAL via
* xge_hal_driver_initialize().
* Note that these callbacks are not mandatory: HAL will not invoke
* a callback if NULL is specified.
*
* Note that in addition to those, there are curently 2 per-channel callbacks
* (completion and abort) specified at channel open time
* via xge_hal_channel_open().
*
* See also: xge_hal_driver_initialize().
*/
typedef struct xge_hal_uld_cbs_t {
xge_uld_link_up_f link_up;
xge_uld_link_down_f link_down;
xge_uld_crit_err_f crit_err;
xge_uld_event_f event;
xge_uld_event_queued_f event_queued;
xge_uld_before_device_poll_f before_device_poll;
xge_uld_after_device_poll_f after_device_poll;
xge_uld_sched_timer_cb_f sched_timer;
xge_uld_xpak_alarm_log_f xpak_alarm_log;
} xge_hal_uld_cbs_t;
/**
* struct xge_hal_driver_t - Represents HAL object.
* @config: HAL configuration.
* @devices: List of all PCI-enumerated Xframe devices in the system.
* A single xge_hal_driver_t instance contains zero or more
* Xframe devices.
* @devices_lock: Lock to protect %devices when inserting/removing.
* @is_initialized: True if HAL is initialized; false otherwise.
* @uld_callbacks: Upper-layer driver callbacks. See xge_hal_uld_cbs_t{}.
* @debug_module_mask: 32bit mask that defines which components of the
* driver are to be traced. The trace-able components are:
* XGE_COMPONENT_HAL_CONFIG 0x1
* XGE_COMPONENT_HAL_FIFO 0x2
* XGE_COMPONENT_HAL_RING 0x4
* XGE_COMPONENT_HAL_CHANNEL 0x8
* XGE_COMPONENT_HAL_DEVICE 0x10
* XGE_COMPONENT_HAL_MM 0x20
* XGE_COMPONENT_HAL_QUEUE 0x40
* XGE_COMPONENT_HAL_STATS 0x100
* XGE_COMPONENT_OSDEP 0x1000
* XGE_COMPONENT_LL 0x2000
* XGE_COMPONENT_TOE 0x4000
* XGE_COMPONENT_RDMA 0x8000
* XGE_COMPONENT_ALL 0xffffffff
* The @debug_module_mask allows to switch off and on tracing at runtime.
* In addition, the traces for the same trace-able components can be
* compiled out, based on the same mask provided via Makefile.
* @debug_level: See xge_debug_level_e{}.
*
* HAL (driver) object. There is a single instance of this structure per HAL.
*/
typedef struct xge_hal_driver_t {
xge_hal_driver_config_t config;
int is_initialized;
xge_hal_uld_cbs_t uld_callbacks;
u32 debug_module_mask;
int debug_level;
} xge_hal_driver_t;
extern xge_hal_driver_t *g_xge_hal_driver;
static inline int
xge_hal_driver_is_initialized(void) {
return g_xge_hal_driver->is_initialized;
}
static inline int
xge_hal_driver_debug_module_mask(void)
{
return g_xge_hal_driver->debug_module_mask;
}
static inline void
xge_hal_driver_debug_module_mask_set(u32 new_mask)
{
#if (defined(XGE_DEBUG_TRACE_MASK) && XGE_DEBUG_TRACE_MASK > 0) || \
(defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0)
g_xge_hal_driver->debug_module_mask = new_mask;
g_module_mask = (unsigned long *)&g_xge_hal_driver->debug_module_mask;
#endif
}
static inline int
xge_hal_driver_debug_level(void) { return g_xge_hal_driver->debug_level; }
static inline void
xge_hal_driver_debug_level_set(int new_level)
{
#if (defined(XGE_DEBUG_TRACE_MASK) && XGE_DEBUG_TRACE_MASK > 0) || \
(defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0)
g_xge_hal_driver->debug_level = new_level;
g_level = &g_xge_hal_driver->debug_level;
#endif
}
xge_hal_status_e xge_hal_driver_initialize(xge_hal_driver_config_t *config,
xge_hal_uld_cbs_t *uld_callbacks);
void xge_hal_driver_terminate(void);
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
void xge_hal_driver_tracebuf_dump(void);
xge_hal_status_e
xge_hal_driver_tracebuf_read(int bufsize, char *retbuf, int *retsize);
#else
#define xge_hal_driver_tracebuf_dump()
#define xge_hal_driver_tracebuf_read(a, b, c) (0);
#endif
__EXTERN_END_DECLS
#endif /* XGE_HAL_DRIVER_H */

View File

@ -0,0 +1,85 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-event.h
*
* Description: event types
*
* Created: 7 June 2004
*/
#ifndef XGE_HAL_EVENT_H
#define XGE_HAL_EVENT_H
#include <dev/nxge/include/xge-os-pal.h>
__EXTERN_BEGIN_DECLS
#define XGE_HAL_EVENT_BASE 0
#define XGE_LL_EVENT_BASE 100
/**
* enum xge_hal_event_e - Enumerates slow-path HAL events.
* @XGE_HAL_EVENT_UNKNOWN: Unknown (and invalid) event.
* @XGE_HAL_EVENT_SERR: Serious hardware error event.
* @XGE_HAL_EVENT_LINK_IS_UP: The link state has changed from 'down' to
* 'up'; upper-layer driver (typically, link layer) is
* supposed to wake the queue, etc.
* @XGE_HAL_EVENT_LINK_IS_DOWN: Link-down event.
* The link state has changed from 'down' to 'up';
* upper-layer driver is supposed to stop traffic, etc.
* @XGE_HAL_EVENT_ECCERR: ECC error event.
* @XGE_HAL_EVENT_PARITYERR: Parity error event.
* @XGE_HAL_EVENT_TARGETABORT: Target abort event. Used when device
* aborts transmit operation with the corresponding transfer code
* (for T_CODE enum see xgehal-fifo.h and xgehal-ring.h)
* @XGE_HAL_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
* slot-freeze from the rest critical events (e.g. ECC) when it is
* impossible to PIO read "through" the bus, i.e. when getting all-foxes.
*
* xge_hal_event_e enumerates slow-path HAL eventis.
*
* See also: xge_hal_uld_cbs_t{}, xge_uld_link_up_f{},
* xge_uld_link_down_f{}.
*/
typedef enum xge_hal_event_e {
XGE_HAL_EVENT_UNKNOWN = 0,
/* HAL events */
XGE_HAL_EVENT_SERR = XGE_HAL_EVENT_BASE + 1,
XGE_HAL_EVENT_LINK_IS_UP = XGE_HAL_EVENT_BASE + 2,
XGE_HAL_EVENT_LINK_IS_DOWN = XGE_HAL_EVENT_BASE + 3,
XGE_HAL_EVENT_ECCERR = XGE_HAL_EVENT_BASE + 4,
XGE_HAL_EVENT_PARITYERR = XGE_HAL_EVENT_BASE + 5,
XGE_HAL_EVENT_TARGETABORT = XGE_HAL_EVENT_BASE + 6,
XGE_HAL_EVENT_SLOT_FREEZE = XGE_HAL_EVENT_BASE + 7,
} xge_hal_event_e;
__EXTERN_END_DECLS
#endif /* XGE_HAL_EVENT_H */

View File

@ -0,0 +1,363 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-fifo.h
*
* Description: Tx fifo object functionality
*
* Created: 19 May 2004
*/
#ifndef XGE_HAL_FIFO_H
#define XGE_HAL_FIFO_H
#include <dev/nxge/include/xgehal-channel.h>
#include <dev/nxge/include/xgehal-config.h>
#include <dev/nxge/include/xgehal-mm.h>
__EXTERN_BEGIN_DECLS
/* HW fifo configuration */
#define XGE_HAL_FIFO_INT_PER_LIST_THRESHOLD 65
#define XGE_HAL_FIFO_MAX_WRR 5
#define XGE_HAL_FIFO_MAX_PARTITION 4
#define XGE_HAL_FIFO_MAX_WRR_STATE 36
#define XGE_HAL_FIFO_HW_PAIR_OFFSET 0x20000
/* HW FIFO Weight Calender */
#define XGE_HAL_FIFO_WRR_0 0x0706050407030602ULL
#define XGE_HAL_FIFO_WRR_1 0x0507040601070503ULL
#define XGE_HAL_FIFO_WRR_2 0x0604070205060700ULL
#define XGE_HAL_FIFO_WRR_3 0x0403060705010207ULL
#define XGE_HAL_FIFO_WRR_4 0x0604050300000000ULL
/*
* xge_hal_fifo_hw_pair_t
*
* Represent a single fifo in the BAR1 memory space.
*/
typedef struct {
u64 txdl_pointer; /* offset 0x0 */
u64 reserved[2];
u64 list_control; /* offset 0x18 */
#define XGE_HAL_TX_FIFO_LAST_TXD_NUM( val) vBIT(val,0,8)
#define XGE_HAL_TX_FIFO_FIRST_LIST BIT(14)
#define XGE_HAL_TX_FIFO_LAST_LIST BIT(15)
#define XGE_HAL_TX_FIFO_FIRSTNLAST_LIST vBIT(3,14,2)
#define XGE_HAL_TX_FIFO_SPECIAL_FUNC BIT(23)
#define XGE_HAL_TX_FIFO_NO_SNOOP(n) vBIT(n,30,2)
} xge_hal_fifo_hw_pair_t;
/* Bad TxDL transfer codes */
#define XGE_HAL_TXD_T_CODE_OK 0x0
#define XGE_HAL_TXD_T_CODE_UNUSED_1 0x1
#define XGE_HAL_TXD_T_CODE_ABORT_BUFFER 0x2
#define XGE_HAL_TXD_T_CODE_ABORT_DTOR 0x3
#define XGE_HAL_TXD_T_CODE_UNUSED_5 0x5
#define XGE_HAL_TXD_T_CODE_PARITY 0x7
#define XGE_HAL_TXD_T_CODE_LOSS_OF_LINK 0xA
#define XGE_HAL_TXD_T_CODE_GENERAL_ERR 0xF
/**
* struct xge_hal_fifo_txd_t - TxD.
* @control_1: Control_1.
* @control_2: Control_2.
* @buffer_pointer: Buffer_Address.
* @host_control: Host_Control.Opaque 64bit data stored by ULD inside the Xframe
* descriptor prior to posting the latter on the channel
* via xge_hal_fifo_dtr_post() or xge_hal_ring_dtr_post().
* The %host_control is returned as is to the ULD with each
* completed descriptor.
*
* Transmit descriptor (TxD).Fifo descriptor contains configured number
* (list) of TxDs. * For more details please refer to Xframe User Guide,
* Section 5.4.2 "Transmit Descriptor (TxD) Format".
*/
typedef struct xge_hal_fifo_txd_t {
u64 control_1;
#define XGE_HAL_TXD_LIST_OWN_XENA BIT(7)
#define XGE_HAL_TXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
#define XGE_HAL_GET_TXD_T_CODE(val) ((val & XGE_HAL_TXD_T_CODE)>>48)
#define XGE_HAL_SET_TXD_T_CODE(x, val) (x |= (((u64)val & 0xF) << 48))
#define XGE_HAL_TXD_GATHER_CODE (BIT(22) | BIT(23))
#define XGE_HAL_TXD_GATHER_CODE_FIRST BIT(22)
#define XGE_HAL_TXD_GATHER_CODE_LAST BIT(23)
#define XGE_HAL_TXD_NO_LSO 0
#define XGE_HAL_TXD_UDF_COF 1
#define XGE_HAL_TXD_TCP_LSO 2
#define XGE_HAL_TXD_UDP_LSO 3
#define XGE_HAL_TXD_LSO_COF_CTRL(val) vBIT(val,30,2)
#define XGE_HAL_TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
#define XGE_HAL_TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
#define XGE_HAL_TXD_GET_LSO_BYTES_SENT(val) ((val & vBIT(0xFFFF,16,16))>>32)
u64 control_2;
#define XGE_HAL_TXD_TX_CKO_CONTROL (BIT(5)|BIT(6)|BIT(7))
#define XGE_HAL_TXD_TX_CKO_IPV4_EN BIT(5)
#define XGE_HAL_TXD_TX_CKO_TCP_EN BIT(6)
#define XGE_HAL_TXD_TX_CKO_UDP_EN BIT(7)
#define XGE_HAL_TXD_VLAN_ENABLE BIT(15)
#define XGE_HAL_TXD_VLAN_TAG(val) vBIT(val,16,16)
#define XGE_HAL_TXD_INT_NUMBER(val) vBIT(val,34,6)
#define XGE_HAL_TXD_INT_TYPE_PER_LIST BIT(47)
#define XGE_HAL_TXD_INT_TYPE_UTILZ BIT(46)
#define XGE_HAL_TXD_SET_MARKER vBIT(0x6,0,4)
u64 buffer_pointer;
u64 host_control;
} xge_hal_fifo_txd_t;
typedef xge_hal_fifo_txd_t* xge_hal_fifo_txdl_t;
/**
* struct xge_hal_fifo_t - Fifo channel.
* @channel: Channel "base" of this fifo, the common part of all HAL
* channels.
* @post_lock_ptr: Points to a lock that serializes (pointer, control) PIOs.
* Note that for Xena the serialization is done across all device
* fifos.
* @hw_pair: Per-fifo (Pointer, Control) pair used to send descriptors to the
* Xframe hardware (for details see Xframe user guide).
* @config: Fifo configuration, part of device configuration
* (see xge_hal_device_config_t{}).
* @no_snoop_bits: See xge_hal_fifo_config_t{}.
* @txdl_per_memblock: Number of TxDLs (TxD lists) per memblock.
* on TxDL please refer to Xframe UG.
* @interrupt_type: FIXME: to-be-defined.
* @txdl_size: Configured TxDL size (i.e., number of TxDs in a list), plus
* per-TxDL HAL private space (xge_hal_fifo_txdl_priv_t).
* @priv_size: Per-Tx descriptor space reserved for upper-layer driver
* usage.
* @mempool: Memory pool, from which descriptors get allocated.
* @align_size: TBD
*
* Fifo channel.
* Note: The structure is cache line aligned.
*/
typedef struct xge_hal_fifo_t {
xge_hal_channel_t channel;
spinlock_t *post_lock_ptr;
xge_hal_fifo_hw_pair_t *hw_pair;
xge_hal_fifo_config_t *config;
int no_snoop_bits;
int txdl_per_memblock;
u64 interrupt_type;
int txdl_size;
int priv_size;
xge_hal_mempool_t *mempool;
int align_size;
} __xge_os_attr_cacheline_aligned xge_hal_fifo_t;
/**
* struct xge_hal_fifo_txdl_priv_t - Transmit descriptor HAL-private
* data.
* @dma_addr: DMA (mapped) address of _this_ descriptor.
* @dma_handle: DMA handle used to map the descriptor onto device.
* @dma_offset: Descriptor's offset in the memory block. HAL allocates
* descriptors in memory blocks (see
* xge_hal_fifo_config_t{})
* Each memblock is a contiguous block of DMA-able memory.
* @frags: Total number of fragments (that is, contiguous data buffers)
* carried by this TxDL.
* @align_vaddr_start: (TODO).
* @align_vaddr: Virtual address of the per-TxDL area in memory used for
* alignement. Used to place one or more mis-aligned fragments
* (the maximum defined by configration variable
* @max_aligned_frags).
* @align_dma_addr: DMA address translated from the @align_vaddr.
* @align_dma_handle: DMA handle that corresponds to @align_dma_addr.
* @align_dma_acch: DMA access handle corresponds to @align_dma_addr.
* @align_dma_offset: The current offset into the @align_vaddr area.
* Grows while filling the descriptor, gets reset.
* @align_used_frags: (TODO).
* @alloc_frags: Total number of fragments allocated.
* @dang_frags: Number of fragments kept from release until this TxDL is freed.
* @bytes_sent: TODO
* @unused: TODO
* @dang_txdl: (TODO).
* @next_txdl_priv: (TODO).
* @first_txdp: (TODO).
* @dang_dtrh: Pointer to TxDL (list) kept from release until this TxDL
* is freed.
* @linked_txdl_priv: Pointer to any linked TxDL for creating contiguous
* TxDL list.
* @dtrh: Corresponding dtrh to this TxDL.
* @memblock: Pointer to the TxDL memory block or memory page.
* on the next send operation.
* @dma_object: DMA address and handle of the memory block that contains
* the descriptor. This member is used only in the "checked"
* version of the HAL (to enforce certain assertions);
* otherwise it gets compiled out.
* @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
*
* Per-transmit decsriptor HAL-private data. HAL uses the space to keep DMA
* information associated with the descriptor. Note that ULD can ask HAL
* to allocate additional per-descriptor space for its own (ULD-specific)
* purposes.
*
* See also: xge_hal_ring_rxd_priv_t{}.
*/
typedef struct xge_hal_fifo_txdl_priv_t {
dma_addr_t dma_addr;
pci_dma_h dma_handle;
ptrdiff_t dma_offset;
int frags;
char *align_vaddr_start;
char *align_vaddr;
dma_addr_t align_dma_addr;
pci_dma_h align_dma_handle;
pci_dma_acc_h align_dma_acch;
ptrdiff_t align_dma_offset;
int align_used_frags;
int alloc_frags;
int dang_frags;
unsigned int bytes_sent;
int unused;
xge_hal_fifo_txd_t *dang_txdl;
struct xge_hal_fifo_txdl_priv_t *next_txdl_priv;
xge_hal_fifo_txd_t *first_txdp;
void *memblock;
#ifdef XGE_DEBUG_ASSERT
xge_hal_mempool_dma_t *dma_object;
#endif
#ifdef XGE_OS_MEMORY_CHECK
int allocated;
#endif
} xge_hal_fifo_txdl_priv_t;
/**
* xge_hal_fifo_get_max_frags_cnt - Return the max fragments allocated
* for the fifo.
* @channelh: Channel handle.
*/
static inline int
xge_hal_fifo_get_max_frags_cnt(xge_hal_channel_h channelh)
{
return ((xge_hal_fifo_t *)channelh)->config->max_frags;
}
/* ========================= FIFO PRIVATE API ============================= */
xge_hal_status_e __hal_fifo_open(xge_hal_channel_h channelh,
xge_hal_channel_attr_t *attr);
void __hal_fifo_close(xge_hal_channel_h channelh);
void __hal_fifo_hw_initialize(xge_hal_device_h hldev);
xge_hal_status_e
__hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
void
__hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_FIFO)
#define __HAL_STATIC_FIFO
#define __HAL_INLINE_FIFO
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_fifo_txdl_priv_t*
__hal_fifo_txdl_priv(xge_hal_dtr_h dtrh);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
u64 ctrl_1);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_txdl_restore_many(xge_hal_channel_h channelh,
xge_hal_fifo_txd_t *txdp, int txdl_count);
/* ========================= FIFO PUBLIC API ============================== */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void*
xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO int
xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channel, int dtr_sp_size,
xge_hal_dtr_h dtr_sp);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
xge_hal_dtr_h dtrs[]);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
u8 *t_code);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int frag_idx, dma_addr_t dma_pointer, int size);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
dma_addr_t dma_pointer, int size, int misaligned_size);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
void *vaddr, int size);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int frag_idx);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh, int mss);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh, u64 cksum_bits);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh);
#else /* XGE_FASTPATH_EXTERN */
#define __HAL_STATIC_FIFO static
#define __HAL_INLINE_FIFO inline
#include <dev/nxge/xgehal/xgehal-fifo-fp.c>
#endif /* XGE_FASTPATH_INLINE */
__EXTERN_END_DECLS
#endif /* XGE_HAL_FIFO_H */

View File

@ -0,0 +1,228 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-mgmt.h
*
* Description: management API
*
* Created: 1 September 2004
*/
#ifndef XGE_HAL_MGMT_H
#define XGE_HAL_MGMT_H
#include <dev/nxge/include/xge-os-pal.h>
#include <dev/nxge/include/xge-debug.h>
#include <dev/nxge/include/xgehal-types.h>
#include <dev/nxge/include/xgehal-config.h>
#include <dev/nxge/include/xgehal-stats.h>
#include <dev/nxge/include/xgehal-regs.h>
#include <dev/nxge/include/xgehal-device.h>
__EXTERN_BEGIN_DECLS
/**
* struct xge_hal_mgmt_about_info_t - About info.
* @vendor: PCI Vendor ID.
* @device: PCI Device ID.
* @subsys_vendor: PCI Subsystem Vendor ID.
* @subsys_device: PCI Subsystem Device ID.
* @board_rev: PCI Board revision, e.g. 3 - for Xena 3.
* @vendor_name: Neterion, Inc.
* @chip_name: Xframe.
* @media: Fiber, copper.
* @hal_major: HAL major version number.
* @hal_minor: HAL minor version number.
* @hal_fix: HAL fix number.
* @hal_build: HAL build number.
* @ll_major: Link-layer ULD major version number.
* @ll_minor: Link-layer ULD minor version number.
* @ll_fix: Link-layer ULD fix version number.
* @ll_build: Link-layer ULD build number.
* @transponder_temperature: TODO
*/
typedef struct xge_hal_mgmt_about_info_t {
u16 vendor;
u16 device;
u16 subsys_vendor;
u16 subsys_device;
u8 board_rev;
char vendor_name[16];
char chip_name[16];
char media[16];
char hal_major[4];
char hal_minor[4];
char hal_fix[4];
char hal_build[16];
char ll_major[4];
char ll_minor[4];
char ll_fix[4];
char ll_build[16];
u32 transponder_temperature;
} xge_hal_mgmt_about_info_t;
typedef xge_hal_stats_hw_info_t xge_hal_mgmt_hw_stats_t;
typedef xge_hal_stats_pcim_info_t xge_hal_mgmt_pcim_stats_t;
typedef xge_hal_stats_sw_err_t xge_hal_mgmt_sw_stats_t;
typedef xge_hal_stats_device_info_t xge_hal_mgmt_device_stats_t;
typedef xge_hal_stats_channel_info_t xge_hal_mgmt_channel_stats_t;
typedef xge_hal_device_config_t xge_hal_mgmt_device_config_t;
typedef xge_hal_driver_config_t xge_hal_mgmt_driver_config_t;
typedef xge_hal_pci_config_t xge_hal_mgmt_pci_config_t;
xge_hal_status_e
xge_hal_mgmt_about(xge_hal_device_h devh, xge_hal_mgmt_about_info_t *about_info,
int size);
xge_hal_status_e
xge_hal_mgmt_hw_stats(xge_hal_device_h devh, xge_hal_mgmt_hw_stats_t *hw_stats,
int size);
xge_hal_status_e
xge_hal_mgmt_hw_stats_off(xge_hal_device_h devh, int off, int size, char *out);
xge_hal_status_e
xge_hal_mgmt_pcim_stats(xge_hal_device_h devh,
xge_hal_mgmt_pcim_stats_t *pcim_stats, int size);
xge_hal_status_e
xge_hal_mgmt_pcim_stats_off(xge_hal_device_h devh, int off, int size,
char *out);
xge_hal_status_e
xge_hal_mgmt_sw_stats(xge_hal_device_h devh, xge_hal_mgmt_sw_stats_t *hw_stats,
int size);
xge_hal_status_e
xge_hal_mgmt_device_stats(xge_hal_device_h devh,
xge_hal_mgmt_device_stats_t *device_stats, int size);
xge_hal_status_e
xge_hal_mgmt_channel_stats(xge_hal_channel_h channelh,
xge_hal_mgmt_channel_stats_t *channel_stats, int size);
xge_hal_status_e
xge_hal_mgmt_reg_read(xge_hal_device_h devh, int bar_id, unsigned int offset,
u64 *value);
xge_hal_status_e
xge_hal_mgmt_reg_write(xge_hal_device_h devh, int bar_id, unsigned int offset,
u64 value);
xge_hal_status_e
xge_hal_mgmt_pcireg_read(xge_hal_device_h devh, unsigned int offset,
int bits, u32 *value);
xge_hal_status_e
xge_hal_mgmt_device_config(xge_hal_device_h devh,
xge_hal_mgmt_device_config_t *dev_config, int size);
xge_hal_status_e
xge_hal_mgmt_driver_config(xge_hal_mgmt_driver_config_t *drv_config,
int size);
xge_hal_status_e
xge_hal_mgmt_pci_config(xge_hal_device_h devh,
xge_hal_mgmt_pci_config_t *pci_config, int size);
xge_hal_status_e
xge_hal_pma_loopback( xge_hal_device_h devh, int enable );
xge_hal_status_e
xge_hal_rldram_test(xge_hal_device_h devh, u64 * data);
u16
xge_hal_mdio_read( xge_hal_device_h devh, u32 mmd_type, u64 addr );
xge_hal_status_e
xge_hal_mdio_write( xge_hal_device_h devh, u32 mmd_type, u64 addr, u32 value );
u32
xge_hal_read_xfp_current_temp(xge_hal_device_h devh);
xge_hal_status_e
xge_hal_read_eeprom(xge_hal_device_h devh, int off, u32* data);
xge_hal_status_e
xge_hal_write_eeprom(xge_hal_device_h devh, int off, u32 data, int cnt);
xge_hal_status_e
xge_hal_register_test(xge_hal_device_h devh, u64 *data);
xge_hal_status_e
xge_hal_eeprom_test(xge_hal_device_h devh, u64 *data);
xge_hal_status_e
xge_hal_bist_test(xge_hal_device_h devh, u64 *data);
xge_hal_status_e
xge_hal_link_test(xge_hal_device_h devh, u64 *data);
int
xge_hal_setpause_data(xge_hal_device_h devh, int tx, int rx);
void
xge_hal_getpause_data(xge_hal_device_h devh, int *tx, int *rx);
void
__hal_updt_stats_xpak(xge_hal_device_t *hldev);
void
__hal_chk_xpak_counter(xge_hal_device_t *hldev, int type, u32 value);
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
xge_hal_status_e
xge_hal_mgmt_trace_read(char *buffer, unsigned buf_size, unsigned *offset,
unsigned *read_length);
#endif
void
xge_hal_restore_link_led(xge_hal_device_h devh);
void
xge_hal_flick_link_led(xge_hal_device_h devh);
/*
* Some set of Xena3 Cards were known to have some link LED
* Problems. This macro identifies if the card is among them
* given its Sub system ID.
*/
#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
((((subid >= 0x600B) && (subid <= 0x600D)) || \
((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0)
#define CHECKBIT(value, nbit) (value & (1 << nbit))
#ifdef XGE_HAL_USE_MGMT_AUX
#include <dev/nxge/include/xgehal-mgmtaux.h>
#endif
__EXTERN_END_DECLS
#endif /* XGE_HAL_MGMT_H */

View File

@ -0,0 +1,95 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-mgmtaux.h
*
* Description: management auxiliary API
*
* Created: 1 September 2004
*/
#ifndef XGE_HAL_MGMTAUX_H
#define XGE_HAL_MGMTAUX_H
#include <dev/nxge/include/xgehal-mgmt.h>
__EXTERN_BEGIN_DECLS
#define XGE_HAL_AUX_SEPA ' '
xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh,
int bufsize, char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_pci_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_bar0_read(xge_hal_device_h devh,
unsigned int offset, int bufsize, char *retbuf,
int *retsize);
xge_hal_status_e xge_hal_aux_bar0_write(xge_hal_device_h devh,
unsigned int offset, u64 value);
xge_hal_status_e xge_hal_aux_bar1_read(xge_hal_device_h devh,
unsigned int offset, int bufsize, char *retbuf,
int *retsize);
xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh,
int bufsize, char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_channel_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_device_dump(xge_hal_device_h devh);
xge_hal_status_e xge_hal_aux_driver_config_read(int bufsize, char *retbuf,
int *retsize);
xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh,
int bufsize, char *retbuf, int *retsize);
__EXTERN_END_DECLS
#endif /* XGE_HAL_MGMTAUX_H */

View File

@ -0,0 +1,174 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-mm.h
*
* Description: memory pool object
*
* Created: 28 May 2004
*/
#ifndef XGE_HAL_MM_H
#define XGE_HAL_MM_H
#include <dev/nxge/include/xge-os-pal.h>
#include <dev/nxge/include/xge-debug.h>
#include <dev/nxge/include/xgehal-types.h>
#include <dev/nxge/include/xgehal-driver.h>
__EXTERN_BEGIN_DECLS
typedef void* xge_hal_mempool_h;
/*
* struct xge_hal_mempool_dma_t - Represents DMA objects passed to the
caller.
*/
typedef struct xge_hal_mempool_dma_t {
dma_addr_t addr;
pci_dma_h handle;
pci_dma_acc_h acc_handle;
} xge_hal_mempool_dma_t;
/*
* xge_hal_mempool_item_f - Mempool item alloc/free callback
* @mempoolh: Memory pool handle.
* @item: Item that gets allocated or freed.
* @index: Item's index in the memory pool.
* @is_last: True, if this item is the last one in the pool; false - otherwise.
* userdat: Per-pool user context.
*
* Memory pool allocation/deallocation callback.
*/
typedef xge_hal_status_e (*xge_hal_mempool_item_f) (xge_hal_mempool_h mempoolh,
void *memblock, int memblock_index,
xge_hal_mempool_dma_t *dma_object, void *item,
int index, int is_last, void *userdata);
/*
* struct xge_hal_mempool_t - Memory pool.
*/
typedef struct xge_hal_mempool_t {
xge_hal_mempool_item_f item_func_alloc;
xge_hal_mempool_item_f item_func_free;
void *userdata;
void **memblocks_arr;
void **memblocks_priv_arr;
xge_hal_mempool_dma_t *memblocks_dma_arr;
pci_dev_h pdev;
int memblock_size;
int memblocks_max;
int memblocks_allocated;
int item_size;
int items_max;
int items_initial;
int items_current;
int items_per_memblock;
void **items_arr;
void **shadow_items_arr;
int items_priv_size;
} xge_hal_mempool_t;
/*
* __hal_mempool_item - Returns pointer to the item in the mempool
* items array.
*/
static inline void*
__hal_mempool_item(xge_hal_mempool_t *mempool, int index)
{
return mempool->items_arr[index];
}
/*
* __hal_mempool_item_priv - will return pointer on per item private space
*/
static inline void*
__hal_mempool_item_priv(xge_hal_mempool_t *mempool, int memblock_idx,
void *item, int *memblock_item_idx)
{
ptrdiff_t offset;
void *memblock = mempool->memblocks_arr[memblock_idx];
xge_assert(memblock);
offset = (int)((char * )item - (char *)memblock);
xge_assert(offset >= 0 && offset < mempool->memblock_size);
(*memblock_item_idx) = (int) offset / mempool->item_size;
xge_assert((*memblock_item_idx) < mempool->items_per_memblock);
return (char*)mempool->memblocks_priv_arr[memblock_idx] +
(*memblock_item_idx) * mempool->items_priv_size;
}
/*
* __hal_mempool_items_arr - will return pointer to the items array in the
* mempool.
*/
static inline void*
__hal_mempool_items_arr(xge_hal_mempool_t *mempool)
{
return mempool->items_arr;
}
/*
* __hal_mempool_memblock - will return pointer to the memblock in the
* mempool memblocks array.
*/
static inline void*
__hal_mempool_memblock(xge_hal_mempool_t *mempool, int memblock_idx)
{
xge_assert(mempool->memblocks_arr[memblock_idx]);
return mempool->memblocks_arr[memblock_idx];
}
/*
* __hal_mempool_memblock_dma - will return pointer to the dma block
* corresponds to the memblock(identified by memblock_idx) in the mempool.
*/
static inline xge_hal_mempool_dma_t*
__hal_mempool_memblock_dma(xge_hal_mempool_t *mempool, int memblock_idx)
{
return mempool->memblocks_dma_arr + memblock_idx;
}
xge_hal_status_e __hal_mempool_grow(xge_hal_mempool_t *mempool,
int num_allocate, int *num_allocated);
xge_hal_mempool_t* __hal_mempool_create(pci_dev_h pdev, int memblock_size,
int item_size, int private_size, int items_initial,
int items_max, xge_hal_mempool_item_f item_func_alloc,
xge_hal_mempool_item_f item_func_free, void *userdata);
void __hal_mempool_destroy(xge_hal_mempool_t *mempool);
__EXTERN_END_DECLS
#endif /* XGE_HAL_MM_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,473 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-ring.h
*
* Description: HAL Rx ring object functionality
*
* Created: 19 May 2004
*/
#ifndef XGE_HAL_RING_H
#define XGE_HAL_RING_H
#include <dev/nxge/include/xgehal-channel.h>
#include <dev/nxge/include/xgehal-config.h>
#include <dev/nxge/include/xgehal-mm.h>
__EXTERN_BEGIN_DECLS
/* HW ring configuration */
#define XGE_HAL_RING_RXDBLOCK_SIZE 0x1000
#define XGE_HAL_RXD_T_CODE_OK 0x0
#define XGE_HAL_RXD_T_CODE_PARITY 0x1
#define XGE_HAL_RXD_T_CODE_ABORT 0x2
#define XGE_HAL_RXD_T_CODE_PARITY_ABORT 0x3
#define XGE_HAL_RXD_T_CODE_RDA_FAILURE 0x4
#define XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO 0x5
#define XGE_HAL_RXD_T_CODE_BAD_FCS 0x6
#define XGE_HAL_RXD_T_CODE_BUFF_SIZE 0x7
#define XGE_HAL_RXD_T_CODE_BAD_ECC 0x8
#define XGE_HAL_RXD_T_CODE_UNUSED_C 0xC
#define XGE_HAL_RXD_T_CODE_UNKNOWN 0xF
#define XGE_HAL_RING_USE_MTU -1
/* control_1 and control_2 formatting - same for all buffer modes */
#define XGE_HAL_RXD_GET_L3_CKSUM(control_1) ((u16)(control_1>>16) & 0xFFFF)
#define XGE_HAL_RXD_GET_L4_CKSUM(control_1) ((u16)(control_1 & 0xFFFF))
#define XGE_HAL_RXD_MASK_VLAN_TAG vBIT(0xFFFF,48,16)
#define XGE_HAL_RXD_SET_VLAN_TAG(control_2, val) control_2 |= (u16)val
#define XGE_HAL_RXD_GET_VLAN_TAG(control_2) ((u16)(control_2 & 0xFFFF))
#define XGE_HAL_RXD_POSTED_4_XFRAME BIT(7) /* control_1 */
#define XGE_HAL_RXD_NOT_COMPLETED BIT(0) /* control_2 */
#define XGE_HAL_RXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
#define XGE_HAL_RXD_GET_T_CODE(control_1) \
((control_1 & XGE_HAL_RXD_T_CODE)>>48)
#define XGE_HAL_RXD_SET_T_CODE(control_1, val) \
(control_1 |= (((u64)val & 0xF) << 48))
#define XGE_HAL_RXD_MASK_FRAME_TYPE vBIT(0x3,25,2)
#define XGE_HAL_RXD_MASK_FRAME_PROTO vBIT(0xFFFF,24,8)
#define XGE_HAL_RXD_GET_FRAME_TYPE(control_1) \
(u8)(0x3 & ((control_1 & XGE_HAL_RXD_MASK_FRAME_TYPE) >> 37))
#define XGE_HAL_RXD_GET_FRAME_PROTO(control_1) \
(u8)((control_1 & XGE_HAL_RXD_MASK_FRAME_PROTO) >> 32)
#define XGE_HAL_RXD_FRAME_PROTO_VLAN_TAGGED BIT(24)
#define XGE_HAL_RXD_FRAME_PROTO_IPV4 BIT(27)
#define XGE_HAL_RXD_FRAME_PROTO_IPV6 BIT(28)
#define XGE_HAL_RXD_FRAME_PROTO_IP_FRAGMENTED BIT(29)
#define XGE_HAL_RXD_FRAME_PROTO_TCP BIT(30)
#define XGE_HAL_RXD_FRAME_PROTO_UDP BIT(31)
#define XGE_HAL_RXD_FRAME_TCP_OR_UDP (XGE_HAL_RXD_FRAME_PROTO_TCP | \
XGE_HAL_RXD_FRAME_PROTO_UDP)
/**
* enum xge_hal_frame_type_e - Ethernet frame format.
* @XGE_HAL_FRAME_TYPE_DIX: DIX (Ethernet II) format.
* @XGE_HAL_FRAME_TYPE_LLC: LLC format.
* @XGE_HAL_FRAME_TYPE_SNAP: SNAP format.
* @XGE_HAL_FRAME_TYPE_IPX: IPX format.
*
* Ethernet frame format.
*/
typedef enum xge_hal_frame_type_e {
XGE_HAL_FRAME_TYPE_DIX = 0x0,
XGE_HAL_FRAME_TYPE_LLC = 0x1,
XGE_HAL_FRAME_TYPE_SNAP = 0x2,
XGE_HAL_FRAME_TYPE_IPX = 0x3,
} xge_hal_frame_type_e;
/**
* enum xge_hal_frame_proto_e - Higher-layer ethernet protocols.
* @XGE_HAL_FRAME_PROTO_VLAN_TAGGED: VLAN.
* @XGE_HAL_FRAME_PROTO_IPV4: IPv4.
* @XGE_HAL_FRAME_PROTO_IPV6: IPv6.
* @XGE_HAL_FRAME_PROTO_IP_FRAGMENTED: IP fragmented.
* @XGE_HAL_FRAME_PROTO_TCP: TCP.
* @XGE_HAL_FRAME_PROTO_UDP: UDP.
* @XGE_HAL_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
*
* Higher layer ethernet protocols and options.
*/
typedef enum xge_hal_frame_proto_e {
XGE_HAL_FRAME_PROTO_VLAN_TAGGED = 0x80,
XGE_HAL_FRAME_PROTO_IPV4 = 0x10,
XGE_HAL_FRAME_PROTO_IPV6 = 0x08,
XGE_HAL_FRAME_PROTO_IP_FRAGMENTED = 0x04,
XGE_HAL_FRAME_PROTO_TCP = 0x02,
XGE_HAL_FRAME_PROTO_UDP = 0x01,
XGE_HAL_FRAME_PROTO_TCP_OR_UDP = (XGE_HAL_FRAME_PROTO_TCP | \
XGE_HAL_FRAME_PROTO_UDP)
} xge_hal_frame_proto_e;
/*
* xge_hal_ring_rxd_1_t
*/
typedef struct {
u64 host_control;
u64 control_1;
u64 control_2;
#define XGE_HAL_RXD_1_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
#define XGE_HAL_RXD_1_SET_BUFFER0_SIZE(val) vBIT(val,0,16)
#define XGE_HAL_RXD_1_GET_BUFFER0_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,0,16))>>48)
#define XGE_HAL_RXD_1_GET_RTH_VALUE(Control_2) \
(u32)((Control_2 & vBIT(0xFFFFFFFF,16,32))>>16)
u64 buffer0_ptr;
} xge_hal_ring_rxd_1_t;
/*
* xge_hal_ring_rxd_3_t
*/
typedef struct {
u64 host_control;
u64 control_1;
u64 control_2;
#define XGE_HAL_RXD_3_MASK_BUFFER0_SIZE vBIT(0xFF,8,8)
#define XGE_HAL_RXD_3_SET_BUFFER0_SIZE(val) vBIT(val,8,8)
#define XGE_HAL_RXD_3_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
#define XGE_HAL_RXD_3_SET_BUFFER1_SIZE(val) vBIT(val,16,16)
#define XGE_HAL_RXD_3_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
#define XGE_HAL_RXD_3_SET_BUFFER2_SIZE(val) vBIT(val,32,16)
#define XGE_HAL_RXD_3_GET_BUFFER0_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFF,8,8))>>48)
#define XGE_HAL_RXD_3_GET_BUFFER1_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,16,16))>>32)
#define XGE_HAL_RXD_3_GET_BUFFER2_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,32,16))>>16)
u64 buffer0_ptr;
u64 buffer1_ptr;
u64 buffer2_ptr;
} xge_hal_ring_rxd_3_t;
/*
* xge_hal_ring_rxd_5_t
*/
typedef struct {
#ifdef XGE_OS_HOST_BIG_ENDIAN
u32 host_control;
u32 control_3;
#else
u32 control_3;
u32 host_control;
#endif
#define XGE_HAL_RXD_5_MASK_BUFFER3_SIZE vBIT(0xFFFF,32,16)
#define XGE_HAL_RXD_5_SET_BUFFER3_SIZE(val) vBIT(val,32,16)
#define XGE_HAL_RXD_5_MASK_BUFFER4_SIZE vBIT(0xFFFF,48,16)
#define XGE_HAL_RXD_5_SET_BUFFER4_SIZE(val) vBIT(val,48,16)
#define XGE_HAL_RXD_5_GET_BUFFER3_SIZE(Control_3) \
(int)((Control_3 & vBIT(0xFFFF,32,16))>>16)
#define XGE_HAL_RXD_5_GET_BUFFER4_SIZE(Control_3) \
(int)((Control_3 & vBIT(0xFFFF,48,16)))
u64 control_1;
u64 control_2;
#define XGE_HAL_RXD_5_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
#define XGE_HAL_RXD_5_SET_BUFFER0_SIZE(val) vBIT(val,0,16)
#define XGE_HAL_RXD_5_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
#define XGE_HAL_RXD_5_SET_BUFFER1_SIZE(val) vBIT(val,16,16)
#define XGE_HAL_RXD_5_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
#define XGE_HAL_RXD_5_SET_BUFFER2_SIZE(val) vBIT(val,32,16)
#define XGE_HAL_RXD_5_GET_BUFFER0_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,0,16))>>48)
#define XGE_HAL_RXD_5_GET_BUFFER1_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,16,16))>>32)
#define XGE_HAL_RXD_5_GET_BUFFER2_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,32,16))>>16)
u64 buffer0_ptr;
u64 buffer1_ptr;
u64 buffer2_ptr;
u64 buffer3_ptr;
u64 buffer4_ptr;
} xge_hal_ring_rxd_5_t;
#define XGE_HAL_RXD_GET_RTH_SPDM_HIT(Control_1) \
(u8)((Control_1 & BIT(18))>>45)
#define XGE_HAL_RXD_GET_RTH_IT_HIT(Control_1) \
(u8)((Control_1 & BIT(19))>>44)
#define XGE_HAL_RXD_GET_RTH_HASH_TYPE(Control_1) \
(u8)((Control_1 & vBIT(0xF,20,4))>>40)
#define XGE_HAL_RXD_HASH_TYPE_NONE 0x0
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV4 0x1
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV4 0x2
#define XGE_HAL_RXD_HASH_TYPE_IPV4 0x3
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6 0x4
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6 0x5
#define XGE_HAL_RXD_HASH_TYPE_IPV6 0x6
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6_EX 0x7
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6_EX 0x8
#define XGE_HAL_RXD_HASH_TYPE_IPV6_EX 0x9
typedef u8 xge_hal_ring_block_t[XGE_HAL_RING_RXDBLOCK_SIZE];
#define XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET 0xFF8
#define XGE_HAL_RING_MEMBLOCK_IDX_OFFSET 0xFF0
#define XGE_HAL_RING_RXD_SIZEOF(n) \
(n==1 ? sizeof(xge_hal_ring_rxd_1_t) : \
(n==3 ? sizeof(xge_hal_ring_rxd_3_t) : \
sizeof(xge_hal_ring_rxd_5_t)))
#define XGE_HAL_RING_RXDS_PER_BLOCK(n) \
(n==1 ? 127 : (n==3 ? 85 : 63))
/**
* struct xge_hal_ring_rxd_priv_t - Receive descriptor HAL-private data.
* @dma_addr: DMA (mapped) address of _this_ descriptor.
* @dma_handle: DMA handle used to map the descriptor onto device.
* @dma_offset: Descriptor's offset in the memory block. HAL allocates
* descriptors in memory blocks of
* %XGE_HAL_RING_RXDBLOCK_SIZE
* bytes. Each memblock is contiguous DMA-able memory. Each
* memblock contains 1 or more 4KB RxD blocks visible to the
* Xframe hardware.
* @dma_object: DMA address and handle of the memory block that contains
* the descriptor. This member is used only in the "checked"
* version of the HAL (to enforce certain assertions);
* otherwise it gets compiled out.
* @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
*
* Per-receive decsriptor HAL-private data. HAL uses the space to keep DMA
* information associated with the descriptor. Note that ULD can ask HAL
* to allocate additional per-descriptor space for its own (ULD-specific)
* purposes.
*/
typedef struct xge_hal_ring_rxd_priv_t {
dma_addr_t dma_addr;
pci_dma_h dma_handle;
ptrdiff_t dma_offset;
#ifdef XGE_DEBUG_ASSERT
xge_hal_mempool_dma_t *dma_object;
#endif
#ifdef XGE_OS_MEMORY_CHECK
int allocated;
#endif
} xge_hal_ring_rxd_priv_t;
/**
* struct xge_hal_ring_t - Ring channel.
* @channel: Channel "base" of this ring, the common part of all HAL
* channels.
* @buffer_mode: 1, 3, or 5. The value specifies a receive buffer mode,
* as per Xframe User Guide.
* @indicate_max_pkts: Maximum number of packets processed within a single
* interrupt. Can be used to limit the time spent inside hw
* interrupt.
* @config: Ring configuration, part of device configuration
* (see xge_hal_device_config_t{}).
* @rxd_size: RxD sizes for 1-, 3- or 5- buffer modes. As per Xframe spec,
* 1-buffer mode descriptor is 32 byte long, etc.
* @rxd_priv_size: Per RxD size reserved (by HAL) for ULD to keep per-descriptor
* data (e.g., DMA handle for Solaris)
* @rxds_per_block: Number of descriptors per hardware-defined RxD
* block. Depends on the (1-,3-,5-) buffer mode.
* @mempool: Memory pool, the pool from which descriptors get allocated.
* (See xge_hal_mm.h).
* @rxdblock_priv_size: Reserved at the end of each RxD block. HAL internal
* usage. Not to confuse with @rxd_priv_size.
* @reserved_rxds_arr: Array of RxD pointers. At any point in time each
* entry in this array is available for allocation
* (via xge_hal_ring_dtr_reserve()) and posting.
* @cmpl_cnt: Completion counter. Is reset to zero upon entering the ISR.
* Used in conjunction with @indicate_max_pkts.
* Ring channel.
*
* Note: The structure is cache line aligned to better utilize
* CPU cache performance.
*/
typedef struct xge_hal_ring_t {
xge_hal_channel_t channel;
int buffer_mode;
int indicate_max_pkts;
xge_hal_ring_config_t *config;
int rxd_size;
int rxd_priv_size;
int rxds_per_block;
xge_hal_mempool_t *mempool;
int rxdblock_priv_size;
void **reserved_rxds_arr;
int cmpl_cnt;
} __xge_os_attr_cacheline_aligned xge_hal_ring_t;
/**
* struct xge_hal_dtr_info_t - Extended information associated with a
* completed ring descriptor.
* @l3_cksum: Result of IP checksum check (by Xframe hardware).
* This field containing XGE_HAL_L3_CKSUM_OK would mean that
* the checksum is correct, otherwise - the datagram is
* corrupted.
* @l4_cksum: Result of TCP/UDP checksum check (by Xframe hardware).
* This field containing XGE_HAL_L4_CKSUM_OK would mean that
* the checksum is correct. Otherwise - the packet is
* corrupted.
* @frame: See xge_hal_frame_type_e{}.
* @proto: Reporting bits for various higher-layer protocols, including (but
* note restricted to) TCP and UDP. See xge_hal_frame_proto_e{}.
* @vlan: VLAN tag extracted from the received frame.
* @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Xframe II
* hardware if RTH is enabled.
* @rth_it_hit: Set, If RTH hash value calculated by the Xframe II hardware
* has a matching entry in the Indirection table.
* @rth_spdm_hit: Set, If RTH hash value calculated by the Xframe II hardware
* has a matching entry in the Socket Pair Direct Match table.
* @rth_hash_type: RTH hash code of the function used to calculate the hash.
* @reserved_pad: Unused byte.
*/
typedef struct xge_hal_dtr_info_t {
int l3_cksum;
int l4_cksum;
int frame; /* zero or more of xge_hal_frame_type_e flags */
int proto; /* zero or more of xge_hal_frame_proto_e flags */
int vlan;
u32 rth_value;
u8 rth_it_hit;
u8 rth_spdm_hit;
u8 rth_hash_type;
u8 reserved_pad;
} xge_hal_dtr_info_t;
/* ========================== RING PRIVATE API ============================ */
xge_hal_status_e __hal_ring_open(xge_hal_channel_h channelh,
xge_hal_channel_attr_t *attr);
void __hal_ring_close(xge_hal_channel_h channelh);
void __hal_ring_hw_initialize(xge_hal_device_h devh);
void __hal_ring_mtu_set(xge_hal_device_h devh, int new_mtu);
void __hal_ring_prc_enable(xge_hal_channel_h channelh);
void __hal_ring_prc_disable(xge_hal_channel_h channelh);
xge_hal_status_e __hal_ring_initial_replenish(xge_hal_channel_t *channel,
xge_hal_channel_reopen_e reopen);
#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_RING)
#define __HAL_STATIC_RING
#define __HAL_INLINE_RING
__HAL_STATIC_RING __HAL_INLINE_RING int
__hal_ring_block_memblock_idx(xge_hal_ring_block_t *block);
__HAL_STATIC_RING __HAL_INLINE_RING void
__hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx);
__HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t
__hal_ring_block_next_pointer(xge_hal_ring_block_t *block);
__HAL_STATIC_RING __HAL_INLINE_RING void
__hal_ring_block_next_pointer_set(xge_hal_ring_block_t*block,
dma_addr_t dma_next);
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh);
/* =========================== RING PUBLIC API ============================ */
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING void*
xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xge_hal_dtr_info_t *ext_info);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t *dma_pointer, int *pkt_length);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
int sizes[]);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t dma_pointers[], int sizes[]);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
int sizes[]);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t dma_pointer[], int sizes[]);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
u8 *t_code);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh);
#else /* XGE_FASTPATH_EXTERN */
#define __HAL_STATIC_RING static
#define __HAL_INLINE_RING inline
#include <dev/nxge/xgehal/xgehal-ring-fp.c>
#endif /* XGE_FASTPATH_INLINE */
__EXTERN_END_DECLS
#endif /* XGE_HAL_RING_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,626 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-types.h
*
* Description: HAL commonly used types and enumerations
*
* Created: 19 May 2004
*/
#ifndef XGE_HAL_TYPES_H
#define XGE_HAL_TYPES_H
#include <dev/nxge/include/xge-os-pal.h>
__EXTERN_BEGIN_DECLS
/*
* BIT(loc) - set bit at offset
*/
#define BIT(loc) (0x8000000000000000ULL >> (loc))
/*
* vBIT(val, loc, sz) - set bits at offset
*/
#define vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
#define vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
/*
* bVALx(bits, loc) - Get the value of x bits at location
*/
#define bVAL1(bits, loc) ((((u64)bits) >> (64-(loc+1))) & 0x1)
#define bVAL2(bits, loc) ((((u64)bits) >> (64-(loc+2))) & 0x3)
#define bVAL3(bits, loc) ((((u64)bits) >> (64-(loc+3))) & 0x7)
#define bVAL4(bits, loc) ((((u64)bits) >> (64-(loc+4))) & 0xF)
#define bVAL5(bits, loc) ((((u64)bits) >> (64-(loc+5))) & 0x1F)
#define bVAL6(bits, loc) ((((u64)bits) >> (64-(loc+6))) & 0x3F)
#define bVAL7(bits, loc) ((((u64)bits) >> (64-(loc+7))) & 0x7F)
#define bVAL8(bits, loc) ((((u64)bits) >> (64-(loc+8))) & 0xFF)
#define bVAL12(bits, loc) ((((u64)bits) >> (64-(loc+12))) & 0xFFF)
#define bVAL14(bits, loc) ((((u64)bits) >> (64-(loc+14))) & 0x3FFF)
#define bVAL16(bits, loc) ((((u64)bits) >> (64-(loc+16))) & 0xFFFF)
#define bVAL20(bits, loc) ((((u64)bits) >> (64-(loc+20))) & 0xFFFFF)
#define bVAL22(bits, loc) ((((u64)bits) >> (64-(loc+22))) & 0x3FFFFF)
#define bVAL24(bits, loc) ((((u64)bits) >> (64-(loc+24))) & 0xFFFFFF)
#define bVAL28(bits, loc) ((((u64)bits) >> (64-(loc+28))) & 0xFFFFFFF)
#define bVAL32(bits, loc) ((((u64)bits) >> (64-(loc+32))) & 0xFFFFFFFF)
#define bVAL36(bits, loc) ((((u64)bits) >> (64-(loc+36))) & 0xFFFFFFFFF)
#define bVAL40(bits, loc) ((((u64)bits) >> (64-(loc+40))) & 0xFFFFFFFFFF)
#define bVAL44(bits, loc) ((((u64)bits) >> (64-(loc+44))) & 0xFFFFFFFFFFF)
#define bVAL48(bits, loc) ((((u64)bits) >> (64-(loc+48))) & 0xFFFFFFFFFFFF)
#define bVAL52(bits, loc) ((((u64)bits) >> (64-(loc+52))) & 0xFFFFFFFFFFFFF)
#define bVAL56(bits, loc) ((((u64)bits) >> (64-(loc+56))) & 0xFFFFFFFFFFFFFF)
#define bVAL60(bits, loc) ((((u64)bits) >> (64-(loc+60))) & 0xFFFFFFFFFFFFFFF)
#define XGE_HAL_BASE_INF 100
#define XGE_HAL_BASE_ERR 200
#define XGE_HAL_BASE_BADCFG 300
#define XGE_HAL_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
/**
* enum xge_hal_status_e - HAL return codes.
* @XGE_HAL_OK: Success.
* @XGE_HAL_FAIL: Failure.
* @XGE_HAL_COMPLETIONS_REMAIN: There are more completions on a channel.
* (specific to polling mode completion processing).
* @XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS: No more completed
* descriptors. See xge_hal_fifo_dtr_next_completed().
* @XGE_HAL_INF_OUT_OF_DESCRIPTORS: Out of descriptors. Channel
* descriptors
* are reserved (via xge_hal_fifo_dtr_reserve(),
* xge_hal_fifo_dtr_reserve())
* and not yet freed (via xge_hal_fifo_dtr_free(),
* xge_hal_ring_dtr_free()).
* @XGE_HAL_INF_CHANNEL_IS_NOT_READY: Channel is not ready for
* operation.
* @XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING: Indicates that host needs to
* poll until PIO is executed.
* @XGE_HAL_INF_STATS_IS_NOT_READY: Cannot retrieve statistics because
* HAL and/or device is not yet initialized.
* @XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS: No descriptors left to
* reserve. Internal use only.
* @XGE_HAL_INF_IRQ_POLLING_CONTINUE: Returned by the ULD channel
* callback when instructed to exit descriptor processing loop
* prematurely. Typical usage: polling mode of processing completed
* descriptors.
* Upon getting LRO_ISED, ll driver shall
* 1) initialise lro struct with mbuf if sg_num == 1.
* 2) else it will update m_data_ptr_of_mbuf to tcp pointer and
* append the new mbuf to the tail of mbuf chain in lro struct.
*
* @XGE_HAL_INF_LRO_BEGIN: Returned by ULD LRO module, when new LRO is
* being initiated.
* @XGE_HAL_INF_LRO_CONT: Returned by ULD LRO module, when new frame
* is appended at the end of existing LRO.
* @XGE_HAL_INF_LRO_UNCAPABLE: Returned by ULD LRO module, when new
* frame is not LRO capable.
* @XGE_HAL_INF_LRO_END_1: Returned by ULD LRO module, when new frame
* triggers LRO flush.
* @XGE_HAL_INF_LRO_END_2: Returned by ULD LRO module, when new
* frame triggers LRO flush. Lro frame should be flushed first then
* new frame should be flushed next.
* @XGE_HAL_INF_LRO_END_3: Returned by ULD LRO module, when new
* frame triggers close of current LRO session and opening of new LRO session
* with the frame.
* @XGE_HAL_INF_LRO_SESSIONS_XCDED: Returned by ULD LRO module, when no
* more LRO sessions can be added.
* @XGE_HAL_INF_NOT_ENOUGH_HW_CQES: TBD
* @XGE_HAL_ERR_DRIVER_NOT_INITIALIZED: HAL is not initialized.
* @XGE_HAL_ERR_OUT_OF_MEMORY: Out of memory (example, when and
* allocating descriptors).
* @XGE_HAL_ERR_CHANNEL_NOT_FOUND: xge_hal_channel_open will return this
* error if corresponding channel is not configured.
* @XGE_HAL_ERR_WRONG_IRQ: Returned by HAL's ISR when the latter is
* invoked not because of the Xframe-generated interrupt.
* @XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES: Returned when user tries to
* configure more than XGE_HAL_MAX_MAC_ADDRESSES mac addresses.
* @XGE_HAL_ERR_BAD_DEVICE_ID: Unknown device PCI ID.
* @XGE_HAL_ERR_OUT_ALIGNED_FRAGS: Too many unaligned fragments
* in a scatter-gather list.
* @XGE_HAL_ERR_DEVICE_NOT_INITIALIZED: Device is not initialized.
* Typically means wrong sequence of API calls.
* @XGE_HAL_ERR_SWAPPER_CTRL: Error during device initialization: failed
* to set Xframe byte swapper in accordnace with the host
* endian-ness.
* @XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT: Failed to restore the device to
* a "quiescent" state.
* @XGE_HAL_ERR_INVALID_MTU_SIZE: Returned when MTU size specified by
* caller is not in the (64, 9600) range.
* @XGE_HAL_ERR_OUT_OF_MAPPING: Failed to map DMA-able memory.
* @XGE_HAL_ERR_BAD_SUBSYSTEM_ID: Bad PCI subsystem ID. (Currently we
* check for zero/non-zero only.)
* @XGE_HAL_ERR_INVALID_BAR_ID: Invalid BAR ID. Xframe supports two Base
* Address Register Spaces: BAR0 (id=0) and BAR1 (id=1).
* @XGE_HAL_ERR_INVALID_OFFSET: Invalid offset. Example, attempt to read
* register value (with offset) outside of the BAR0 space.
* @XGE_HAL_ERR_INVALID_DEVICE: Invalid device. The HAL device handle
* (passed by ULD) is invalid.
* @XGE_HAL_ERR_OUT_OF_SPACE: Out-of-provided-buffer-space. Returned by
* management "get" routines when the retrieved information does
* not fit into the provided buffer.
* @XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE: Invalid bit size.
* @XGE_HAL_ERR_VERSION_CONFLICT: Upper-layer driver and HAL (versions)
* are not compatible.
* @XGE_HAL_ERR_INVALID_MAC_ADDRESS: Invalid MAC address.
* @XGE_HAL_ERR_SPDM_NOT_ENABLED: SPDM support is not enabled.
* @XGE_HAL_ERR_SPDM_TABLE_FULL: SPDM table is full.
* @XGE_HAL_ERR_SPDM_INVALID_ENTRY: Invalid SPDM entry.
* @XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND: Unable to locate the entry in the
* SPDM table.
* @XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT: Local SPDM table is not in
* synch ith the actual one.
* @XGE_HAL_ERR_INVALID_PCI_INFO: Invalid or unrecognized PCI frequency,
* and or width, and or mode (Xframe-II only, see UG on PCI_INFO register).
* @XGE_HAL_ERR_CRITICAL: Critical error. Returned by HAL APIs
* (including xge_hal_device_handle_tcode()) on: ECC, parity, SERR.
* Also returned when PIO read does not go through ("all-foxes")
* because of "slot-freeze".
* @XGE_HAL_ERR_RESET_FAILED: Failed to soft-reset the device.
* Returned by xge_hal_device_reset(). One circumstance when it could
* happen: slot freeze by the system (see @XGE_HAL_ERR_CRITICAL).
* @XGE_HAL_ERR_TOO_MANY: This error is returned if there were laready
* maximum number of sessions or queues allocated
* @XGE_HAL_ERR_PKT_DROP: TBD
* @XGE_HAL_BADCFG_TX_URANGE_A: Invalid Tx link utilization range A. See
* the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_UFC_A: Invalid frame count for Tx link utilization
* range A. See the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_URANGE_B: Invalid Tx link utilization range B. See
* the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_UFC_B: Invalid frame count for Tx link utilization
* range B. See the strucuture xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_URANGE_C: Invalid Tx link utilization range C. See
* the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_UFC_C: Invalid frame count for Tx link utilization
* range C. See the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_UFC_D: Invalid frame count for Tx link utilization
* range D. See the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_TIMER_VAL: Invalid Tx timer value. See the
* structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_TX_TIMER_CI_EN: Invalid Tx timer continuous interrupt
* enable. See the structure xge_hal_tti_config_t{} for valid values.
* @XGE_HAL_BADCFG_RX_URANGE_A: Invalid Rx link utilization range A. See
* the structure xge_hal_rti_config_t{} for valid values.
* @XGE_HAL_BADCFG_RX_UFC_A: Invalid frame count for Rx link utilization
* range A. See the structure xge_hal_rti_config_t{} for valid values.
* @XGE_HAL_BADCFG_RX_URANGE_B: Invalid Rx link utilization range B. See
* the structure xge_hal_rti_config_t{} for valid values.
* @XGE_HAL_BADCFG_RX_UFC_B: Invalid frame count for Rx link utilization
* range B. See the structure xge_hal_rti_config_t{} for valid values.
* @XGE_HAL_BADCFG_RX_URANGE_C: Invalid Rx link utilization range C. See
* the structure xge_hal_rti_config_t{} for valid values.
* @XGE_HAL_BADCFG_RX_UFC_C: Invalid frame count for Rx link utilization
* range C. See the structure xge_hal_rti_config_t{} for valid values.
* @XGE_HAL_BADCFG_RX_UFC_D: Invalid frame count for Rx link utilization
* range D. See the structure xge_hal_rti_config_t{} for valid values.
* @XGE_HAL_BADCFG_RX_TIMER_VAL: Invalid Rx timer value. See the
* structure xge_hal_rti_config_t{} for valid values.
* @XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH: Invalid initial fifo queue
* length. See the structure xge_hal_fifo_queue_t for valid values.
* @XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH: Invalid fifo queue max length.
* See the structure xge_hal_fifo_queue_t for valid values.
* @XGE_HAL_BADCFG_FIFO_QUEUE_INTR: Invalid fifo queue interrupt mode.
* See the structure xge_hal_fifo_queue_t for valid values.
* @XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS: Invalid Initial number of
* RxD blocks for the ring. See the structure xge_hal_ring_queue_t for
* valid values.
* @XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS: Invalid maximum number of RxD
* blocks for the ring. See the structure xge_hal_ring_queue_t for
* valid values.
* @XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE: Invalid ring buffer mode. See
* the structure xge_hal_ring_queue_t for valid values.
* @XGE_HAL_BADCFG_RING_QUEUE_SIZE: Invalid ring queue size. See the
* structure xge_hal_ring_queue_t for valid values.
* @XGE_HAL_BADCFG_BACKOFF_INTERVAL_US: Invalid backoff timer interval
* for the ring. See the structure xge_hal_ring_queue_t for valid values.
* @XGE_HAL_BADCFG_MAX_FRM_LEN: Invalid ring max frame length. See the
* structure xge_hal_ring_queue_t for valid values.
* @XGE_HAL_BADCFG_RING_PRIORITY: Invalid ring priority. See the
* structure xge_hal_ring_queue_t for valid values.
* @XGE_HAL_BADCFG_TMAC_UTIL_PERIOD: Invalid tmac util period. See the
* structure xge_hal_mac_config_t{} for valid values.
* @XGE_HAL_BADCFG_RMAC_UTIL_PERIOD: Invalid rmac util period. See the
* structure xge_hal_mac_config_t{} for valid values.
* @XGE_HAL_BADCFG_RMAC_BCAST_EN: Invalid rmac brodcast enable. See the
* structure xge_hal_mac_config_t{} for valid values.
* @XGE_HAL_BADCFG_RMAC_HIGH_PTIME: Invalid rmac pause time. See the
* structure xge_hal_mac_config_t{} for valid values.
* @XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3: Invalid threshold for pause
* frame generation for queues 0 through 3. See the structure
* xge_hal_mac_config_t{} for valid values.
* @XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7:Invalid threshold for pause
* frame generation for queues 4 through 7. See the structure
* xge_hal_mac_config_t{} for valid values.
* @XGE_HAL_BADCFG_FIFO_FRAGS: Invalid fifo max fragments length. See
* the structure xge_hal_fifo_config_t{} for valid values.
* @XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD: Invalid fifo reserve
* threshold. See the structure xge_hal_fifo_config_t{} for valid values.
* @XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE: Invalid fifo descriptors memblock
* size. See the structure xge_hal_fifo_config_t{} for valid values.
* @XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE: Invalid ring descriptors memblock
* size. See the structure xge_hal_ring_config_t{} for valid values.
* @XGE_HAL_BADCFG_MAX_MTU: Invalid max mtu for the device. See the
* structure xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_ISR_POLLING_CNT: Invalid isr polling count. See the
* structure xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_LATENCY_TIMER: Invalid Latency timer. See the
* structure xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_MAX_SPLITS_TRANS: Invalid maximum number of pci-x
* split transactions. See the structure xge_hal_device_config_t{} for valid
* values.
* @XGE_HAL_BADCFG_MMRB_COUNT: Invalid mmrb count. See the structure
* xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_SHARED_SPLITS: Invalid number of outstanding split
* transactions that is shared by Tx and Rx requests. See the structure
* xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_STATS_REFRESH_TIME: Invalid time interval for
* automatic statistics transfer to the host. See the structure
* xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_PCI_FREQ_MHERZ: Invalid pci clock frequency. See the
* structure xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_PCI_MODE: Invalid pci mode. See the structure
* xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_INTR_MODE: Invalid interrupt mode. See the structure
* xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_SCHED_TIMER_US: Invalid scheduled timer interval to
* generate interrupt. See the structure xge_hal_device_config_t{}
* for valid values.
* @XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT: Invalid scheduled timer one
* shot. See the structure xge_hal_device_config_t{} for valid values.
* @XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL: Invalid driver queue initial
* size. See the structure xge_hal_driver_config_t{} for valid values.
* @XGE_HAL_BADCFG_QUEUE_SIZE_MAX: Invalid driver queue max size. See
* the structure xge_hal_driver_config_t{} for valid values.
* @XGE_HAL_BADCFG_RING_RTH_EN: Invalid value of RTH-enable. See
* the structure xge_hal_ring_queue_t for valid values.
* @XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS: Invalid value configured for
* indicate_max_pkts variable.
* @XGE_HAL_BADCFG_TX_TIMER_AC_EN: Invalid value for Tx timer
* auto-cancel. See xge_hal_tti_config_t{}.
* @XGE_HAL_BADCFG_RX_TIMER_AC_EN: Invalid value for Rx timer
* auto-cancel. See xge_hal_rti_config_t{}.
* @XGE_HAL_BADCFG_RXUFCA_INTR_THRES: TODO
* @XGE_HAL_BADCFG_RXUFCA_LO_LIM: TODO
* @XGE_HAL_BADCFG_RXUFCA_HI_LIM: TODO
* @XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD: TODO
* @XGE_HAL_BADCFG_TRACEBUF_SIZE: Bad configuration: the size of the circular
* (in memory) trace buffer either too large or too small. See the
* the corresponding header file or README for the acceptable range.
* @XGE_HAL_BADCFG_LINK_VALID_CNT: Bad configuration: the link-valid
* counter cannot have the specified value. Note that the link-valid
* counting is done only at device-open time, to determine with the
* specified certainty that the link is up. See the
* the corresponding header file or README for the acceptable range.
* See also @XGE_HAL_BADCFG_LINK_RETRY_CNT.
* @XGE_HAL_BADCFG_LINK_RETRY_CNT: Bad configuration: the specified
* link-up retry count is out of the valid range. Note that the link-up
* retry counting is done only at device-open time.
* See also xge_hal_device_config_t{}.
* @XGE_HAL_BADCFG_LINK_STABILITY_PERIOD: Invalid link stability period.
* @XGE_HAL_BADCFG_DEVICE_POLL_MILLIS: Invalid device poll interval.
* @XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN: TBD
* @XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN: TBD
* @XGE_HAL_BADCFG_MEDIA: TBD
* @XGE_HAL_BADCFG_NO_ISR_EVENTS: TBD
* See the structure xge_hal_device_config_t{} for valid values.
* @XGE_HAL_EOF_TRACE_BUF: End of the circular (in memory) trace buffer.
* Returned by xge_hal_mgmt_trace_read(), when user tries to read the trace
* past the buffer limits. Used to enable user to load the trace in two
* or more reads.
* @XGE_HAL_BADCFG_RING_RTS_MAC_EN: Invalid value of RTS_MAC_EN enable. See
* the structure xge_hal_ring_queue_t for valid values.
* @XGE_HAL_BADCFG_LRO_SG_SIZE : Invalid value of LRO scatter gatter size.
* See the structure xge_hal_device_config_t for valid values.
* @XGE_HAL_BADCFG_LRO_FRM_LEN : Invalid value of LRO frame length.
* See the structure xge_hal_device_config_t for valid values.
* @XGE_HAL_BADCFG_WQE_NUM_ODS: TBD
* @XGE_HAL_BADCFG_BIMODAL_INTR: Invalid value to configure bimodal interrupts
* Enumerates status and error codes returned by HAL public
* API functions.
* @XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US: TBD
* @XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US: TBD
* @XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED: TBD
* @XGE_HAL_BADCFG_RTS_QOS_EN: TBD
* @XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR: TBD
* @XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR: TBD
* @XGE_HAL_BADCFG_RTS_PORT_EN: TBD
* @XGE_HAL_BADCFG_RING_RTS_PORT_EN: TBD
*
*/
typedef enum xge_hal_status_e {
XGE_HAL_OK = 0,
XGE_HAL_FAIL = 1,
XGE_HAL_COMPLETIONS_REMAIN = 2,
XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS = XGE_HAL_BASE_INF + 1,
XGE_HAL_INF_OUT_OF_DESCRIPTORS = XGE_HAL_BASE_INF + 2,
XGE_HAL_INF_CHANNEL_IS_NOT_READY = XGE_HAL_BASE_INF + 3,
XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING = XGE_HAL_BASE_INF + 4,
XGE_HAL_INF_STATS_IS_NOT_READY = XGE_HAL_BASE_INF + 5,
XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS = XGE_HAL_BASE_INF + 6,
XGE_HAL_INF_IRQ_POLLING_CONTINUE = XGE_HAL_BASE_INF + 7,
XGE_HAL_INF_LRO_BEGIN = XGE_HAL_BASE_INF + 8,
XGE_HAL_INF_LRO_CONT = XGE_HAL_BASE_INF + 9,
XGE_HAL_INF_LRO_UNCAPABLE = XGE_HAL_BASE_INF + 10,
XGE_HAL_INF_LRO_END_1 = XGE_HAL_BASE_INF + 11,
XGE_HAL_INF_LRO_END_2 = XGE_HAL_BASE_INF + 12,
XGE_HAL_INF_LRO_END_3 = XGE_HAL_BASE_INF + 13,
XGE_HAL_INF_LRO_SESSIONS_XCDED = XGE_HAL_BASE_INF + 14,
XGE_HAL_INF_NOT_ENOUGH_HW_CQES = XGE_HAL_BASE_INF + 15,
XGE_HAL_ERR_DRIVER_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 1,
XGE_HAL_ERR_OUT_OF_MEMORY = XGE_HAL_BASE_ERR + 4,
XGE_HAL_ERR_CHANNEL_NOT_FOUND = XGE_HAL_BASE_ERR + 5,
XGE_HAL_ERR_WRONG_IRQ = XGE_HAL_BASE_ERR + 6,
XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES = XGE_HAL_BASE_ERR + 7,
XGE_HAL_ERR_SWAPPER_CTRL = XGE_HAL_BASE_ERR + 8,
XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT = XGE_HAL_BASE_ERR + 9,
XGE_HAL_ERR_INVALID_MTU_SIZE = XGE_HAL_BASE_ERR + 10,
XGE_HAL_ERR_OUT_OF_MAPPING = XGE_HAL_BASE_ERR + 11,
XGE_HAL_ERR_BAD_SUBSYSTEM_ID = XGE_HAL_BASE_ERR + 12,
XGE_HAL_ERR_INVALID_BAR_ID = XGE_HAL_BASE_ERR + 13,
XGE_HAL_ERR_INVALID_OFFSET = XGE_HAL_BASE_ERR + 14,
XGE_HAL_ERR_INVALID_DEVICE = XGE_HAL_BASE_ERR + 15,
XGE_HAL_ERR_OUT_OF_SPACE = XGE_HAL_BASE_ERR + 16,
XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE = XGE_HAL_BASE_ERR + 17,
XGE_HAL_ERR_VERSION_CONFLICT = XGE_HAL_BASE_ERR + 18,
XGE_HAL_ERR_INVALID_MAC_ADDRESS = XGE_HAL_BASE_ERR + 19,
XGE_HAL_ERR_BAD_DEVICE_ID = XGE_HAL_BASE_ERR + 20,
XGE_HAL_ERR_OUT_ALIGNED_FRAGS = XGE_HAL_BASE_ERR + 21,
XGE_HAL_ERR_DEVICE_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 22,
XGE_HAL_ERR_SPDM_NOT_ENABLED = XGE_HAL_BASE_ERR + 23,
XGE_HAL_ERR_SPDM_TABLE_FULL = XGE_HAL_BASE_ERR + 24,
XGE_HAL_ERR_SPDM_INVALID_ENTRY = XGE_HAL_BASE_ERR + 25,
XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND = XGE_HAL_BASE_ERR + 26,
XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT= XGE_HAL_BASE_ERR + 27,
XGE_HAL_ERR_INVALID_PCI_INFO = XGE_HAL_BASE_ERR + 28,
XGE_HAL_ERR_CRITICAL = XGE_HAL_BASE_ERR + 29,
XGE_HAL_ERR_RESET_FAILED = XGE_HAL_BASE_ERR + 30,
XGE_HAL_ERR_TOO_MANY = XGE_HAL_BASE_ERR + 32,
XGE_HAL_ERR_PKT_DROP = XGE_HAL_BASE_ERR + 33,
XGE_HAL_BADCFG_TX_URANGE_A = XGE_HAL_BASE_BADCFG + 1,
XGE_HAL_BADCFG_TX_UFC_A = XGE_HAL_BASE_BADCFG + 2,
XGE_HAL_BADCFG_TX_URANGE_B = XGE_HAL_BASE_BADCFG + 3,
XGE_HAL_BADCFG_TX_UFC_B = XGE_HAL_BASE_BADCFG + 4,
XGE_HAL_BADCFG_TX_URANGE_C = XGE_HAL_BASE_BADCFG + 5,
XGE_HAL_BADCFG_TX_UFC_C = XGE_HAL_BASE_BADCFG + 6,
XGE_HAL_BADCFG_TX_UFC_D = XGE_HAL_BASE_BADCFG + 8,
XGE_HAL_BADCFG_TX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 9,
XGE_HAL_BADCFG_TX_TIMER_CI_EN = XGE_HAL_BASE_BADCFG + 10,
XGE_HAL_BADCFG_RX_URANGE_A = XGE_HAL_BASE_BADCFG + 11,
XGE_HAL_BADCFG_RX_UFC_A = XGE_HAL_BASE_BADCFG + 12,
XGE_HAL_BADCFG_RX_URANGE_B = XGE_HAL_BASE_BADCFG + 13,
XGE_HAL_BADCFG_RX_UFC_B = XGE_HAL_BASE_BADCFG + 14,
XGE_HAL_BADCFG_RX_URANGE_C = XGE_HAL_BASE_BADCFG + 15,
XGE_HAL_BADCFG_RX_UFC_C = XGE_HAL_BASE_BADCFG + 16,
XGE_HAL_BADCFG_RX_UFC_D = XGE_HAL_BASE_BADCFG + 17,
XGE_HAL_BADCFG_RX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 18,
XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH= XGE_HAL_BASE_BADCFG + 19,
XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH = XGE_HAL_BASE_BADCFG + 20,
XGE_HAL_BADCFG_FIFO_QUEUE_INTR = XGE_HAL_BASE_BADCFG + 21,
XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS=XGE_HAL_BASE_BADCFG + 22,
XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS = XGE_HAL_BASE_BADCFG + 23,
XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE = XGE_HAL_BASE_BADCFG + 24,
XGE_HAL_BADCFG_RING_QUEUE_SIZE = XGE_HAL_BASE_BADCFG + 25,
XGE_HAL_BADCFG_BACKOFF_INTERVAL_US = XGE_HAL_BASE_BADCFG + 26,
XGE_HAL_BADCFG_MAX_FRM_LEN = XGE_HAL_BASE_BADCFG + 27,
XGE_HAL_BADCFG_RING_PRIORITY = XGE_HAL_BASE_BADCFG + 28,
XGE_HAL_BADCFG_TMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 29,
XGE_HAL_BADCFG_RMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 30,
XGE_HAL_BADCFG_RMAC_BCAST_EN = XGE_HAL_BASE_BADCFG + 31,
XGE_HAL_BADCFG_RMAC_HIGH_PTIME = XGE_HAL_BASE_BADCFG + 32,
XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3 = XGE_HAL_BASE_BADCFG +33,
XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7 = XGE_HAL_BASE_BADCFG + 34,
XGE_HAL_BADCFG_FIFO_FRAGS = XGE_HAL_BASE_BADCFG + 35,
XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD = XGE_HAL_BASE_BADCFG + 37,
XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 38,
XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 39,
XGE_HAL_BADCFG_MAX_MTU = XGE_HAL_BASE_BADCFG + 40,
XGE_HAL_BADCFG_ISR_POLLING_CNT = XGE_HAL_BASE_BADCFG + 41,
XGE_HAL_BADCFG_LATENCY_TIMER = XGE_HAL_BASE_BADCFG + 42,
XGE_HAL_BADCFG_MAX_SPLITS_TRANS = XGE_HAL_BASE_BADCFG + 43,
XGE_HAL_BADCFG_MMRB_COUNT = XGE_HAL_BASE_BADCFG + 44,
XGE_HAL_BADCFG_SHARED_SPLITS = XGE_HAL_BASE_BADCFG + 45,
XGE_HAL_BADCFG_STATS_REFRESH_TIME = XGE_HAL_BASE_BADCFG + 46,
XGE_HAL_BADCFG_PCI_FREQ_MHERZ = XGE_HAL_BASE_BADCFG + 47,
XGE_HAL_BADCFG_PCI_MODE = XGE_HAL_BASE_BADCFG + 48,
XGE_HAL_BADCFG_INTR_MODE = XGE_HAL_BASE_BADCFG + 49,
XGE_HAL_BADCFG_SCHED_TIMER_US = XGE_HAL_BASE_BADCFG + 50,
XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT = XGE_HAL_BASE_BADCFG + 51,
XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL = XGE_HAL_BASE_BADCFG + 52,
XGE_HAL_BADCFG_QUEUE_SIZE_MAX = XGE_HAL_BASE_BADCFG + 53,
XGE_HAL_BADCFG_RING_RTH_EN = XGE_HAL_BASE_BADCFG + 54,
XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS = XGE_HAL_BASE_BADCFG + 55,
XGE_HAL_BADCFG_TX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 56,
XGE_HAL_BADCFG_RX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 57,
XGE_HAL_BADCFG_RXUFCA_INTR_THRES = XGE_HAL_BASE_BADCFG + 58,
XGE_HAL_BADCFG_RXUFCA_LO_LIM = XGE_HAL_BASE_BADCFG + 59,
XGE_HAL_BADCFG_RXUFCA_HI_LIM = XGE_HAL_BASE_BADCFG + 60,
XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD = XGE_HAL_BASE_BADCFG + 61,
XGE_HAL_BADCFG_TRACEBUF_SIZE = XGE_HAL_BASE_BADCFG + 62,
XGE_HAL_BADCFG_LINK_VALID_CNT = XGE_HAL_BASE_BADCFG + 63,
XGE_HAL_BADCFG_LINK_RETRY_CNT = XGE_HAL_BASE_BADCFG + 64,
XGE_HAL_BADCFG_LINK_STABILITY_PERIOD = XGE_HAL_BASE_BADCFG + 65,
XGE_HAL_BADCFG_DEVICE_POLL_MILLIS = XGE_HAL_BASE_BADCFG + 66,
XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN = XGE_HAL_BASE_BADCFG + 67,
XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN = XGE_HAL_BASE_BADCFG + 68,
XGE_HAL_BADCFG_MEDIA = XGE_HAL_BASE_BADCFG + 69,
XGE_HAL_BADCFG_NO_ISR_EVENTS = XGE_HAL_BASE_BADCFG + 70,
XGE_HAL_BADCFG_RING_RTS_MAC_EN = XGE_HAL_BASE_BADCFG + 71,
XGE_HAL_BADCFG_LRO_SG_SIZE = XGE_HAL_BASE_BADCFG + 72,
XGE_HAL_BADCFG_LRO_FRM_LEN = XGE_HAL_BASE_BADCFG + 73,
XGE_HAL_BADCFG_WQE_NUM_ODS = XGE_HAL_BASE_BADCFG + 74,
XGE_HAL_BADCFG_BIMODAL_INTR = XGE_HAL_BASE_BADCFG + 75,
XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US = XGE_HAL_BASE_BADCFG + 76,
XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US = XGE_HAL_BASE_BADCFG + 77,
XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED = XGE_HAL_BASE_BADCFG + 78,
XGE_HAL_BADCFG_RTS_QOS_EN = XGE_HAL_BASE_BADCFG + 79,
XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 80,
XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 81,
XGE_HAL_BADCFG_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 82,
XGE_HAL_BADCFG_RING_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 83,
XGE_HAL_BADCFG_TRACEBUF_TIMESTAMP = XGE_HAL_BASE_BADCFG + 84,
XGE_HAL_EOF_TRACE_BUF = -1
} xge_hal_status_e;
#define XGE_HAL_ETH_ALEN 6
typedef u8 macaddr_t[XGE_HAL_ETH_ALEN];
#define XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE 0x100
/* frames sizes */
#define XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE 14
#define XGE_HAL_HEADER_802_2_SIZE 3
#define XGE_HAL_HEADER_SNAP_SIZE 5
#define XGE_HAL_HEADER_VLAN_SIZE 4
#define XGE_HAL_MAC_HEADER_MAX_SIZE \
(XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + \
XGE_HAL_HEADER_802_2_SIZE + \
XGE_HAL_HEADER_SNAP_SIZE)
#define XGE_HAL_TCPIP_HEADER_MAX_SIZE (64 + 64)
/* 32bit alignments */
#define XGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN 2
#define XGE_HAL_HEADER_802_2_SNAP_ALIGN 2
#define XGE_HAL_HEADER_802_2_ALIGN 3
#define XGE_HAL_HEADER_SNAP_ALIGN 1
#define XGE_HAL_L3_CKSUM_OK 0xFFFF
#define XGE_HAL_L4_CKSUM_OK 0xFFFF
#define XGE_HAL_MIN_MTU 46
#define XGE_HAL_MAX_MTU 9600
#define XGE_HAL_DEFAULT_MTU 1500
#define XGE_HAL_SEGEMENT_OFFLOAD_MAX_SIZE 81920
#define XGE_HAL_PCISIZE_XENA 26 /* multiples of dword */
#define XGE_HAL_PCISIZE_HERC 64 /* multiples of dword */
#define XGE_HAL_MAX_MSIX_MESSAGES 64
#define XGE_HAL_MAX_MSIX_MESSAGES_WITH_ADDR XGE_HAL_MAX_MSIX_MESSAGES * 2
/* Highest level interrupt blocks */
#define XGE_HAL_TX_PIC_INTR (0x0001<<0)
#define XGE_HAL_TX_DMA_INTR (0x0001<<1)
#define XGE_HAL_TX_MAC_INTR (0x0001<<2)
#define XGE_HAL_TX_XGXS_INTR (0x0001<<3)
#define XGE_HAL_TX_TRAFFIC_INTR (0x0001<<4)
#define XGE_HAL_RX_PIC_INTR (0x0001<<5)
#define XGE_HAL_RX_DMA_INTR (0x0001<<6)
#define XGE_HAL_RX_MAC_INTR (0x0001<<7)
#define XGE_HAL_RX_XGXS_INTR (0x0001<<8)
#define XGE_HAL_RX_TRAFFIC_INTR (0x0001<<9)
#define XGE_HAL_MC_INTR (0x0001<<10)
#define XGE_HAL_SCHED_INTR (0x0001<<11)
#define XGE_HAL_ALL_INTRS (XGE_HAL_TX_PIC_INTR | \
XGE_HAL_TX_DMA_INTR | \
XGE_HAL_TX_MAC_INTR | \
XGE_HAL_TX_XGXS_INTR | \
XGE_HAL_TX_TRAFFIC_INTR | \
XGE_HAL_RX_PIC_INTR | \
XGE_HAL_RX_DMA_INTR | \
XGE_HAL_RX_MAC_INTR | \
XGE_HAL_RX_XGXS_INTR | \
XGE_HAL_RX_TRAFFIC_INTR | \
XGE_HAL_MC_INTR | \
XGE_HAL_SCHED_INTR)
#define XGE_HAL_GEN_MASK_INTR (0x0001<<12)
/* Interrupt masks for the general interrupt mask register */
#define XGE_HAL_ALL_INTRS_DIS 0xFFFFFFFFFFFFFFFFULL
#define XGE_HAL_TXPIC_INT_M BIT(0)
#define XGE_HAL_TXDMA_INT_M BIT(1)
#define XGE_HAL_TXMAC_INT_M BIT(2)
#define XGE_HAL_TXXGXS_INT_M BIT(3)
#define XGE_HAL_TXTRAFFIC_INT_M BIT(8)
#define XGE_HAL_PIC_RX_INT_M BIT(32)
#define XGE_HAL_RXDMA_INT_M BIT(33)
#define XGE_HAL_RXMAC_INT_M BIT(34)
#define XGE_HAL_MC_INT_M BIT(35)
#define XGE_HAL_RXXGXS_INT_M BIT(36)
#define XGE_HAL_RXTRAFFIC_INT_M BIT(40)
/* MSI level Interrupts */
#define XGE_HAL_MAX_MSIX_VECTORS (16)
typedef struct xge_hal_ipv4 {
u32 addr;
}xge_hal_ipv4;
typedef struct xge_hal_ipv6 {
u64 addr[2];
}xge_hal_ipv6;
typedef union xge_hal_ipaddr_t {
xge_hal_ipv4 ipv4;
xge_hal_ipv6 ipv6;
}xge_hal_ipaddr_t;
/* DMA level Interrupts */
#define XGE_HAL_TXDMA_PFC_INT_M BIT(0)
/* PFC block interrupts */
#define XGE_HAL_PFC_MISC_ERR_1 BIT(0) /* Interrupt to indicate FIFO
full */
/* basic handles */
typedef void* xge_hal_device_h;
typedef void* xge_hal_dtr_h;
typedef void* xge_hal_channel_h;
#ifdef XGEHAL_RNIC
typedef void* xge_hal_towi_h;
typedef void* xge_hal_hw_wqe_h;
typedef void* xge_hal_hw_cqe_h;
typedef void* xge_hal_lro_wqe_h;
typedef void* xge_hal_lro_cqe_h;
typedef void* xge_hal_up_msg_h;
typedef void* xge_hal_down_msg_h;
typedef void* xge_hal_channel_callback_fh;
typedef void* xge_hal_msg_queueh;
typedef void* xge_hal_pblist_h;
#endif
/*
* I2C device id. Used in I2C control register for accessing EEPROM device
* memory.
*/
#define XGE_DEV_ID 5
typedef enum xge_hal_xpak_alarm_type_e {
XGE_HAL_XPAK_ALARM_EXCESS_TEMP = 1,
XGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT = 2,
XGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT = 3,
} xge_hal_xpak_alarm_type_e;
__EXTERN_END_DECLS
#endif /* XGE_HAL_TYPES_H */

View File

@ -0,0 +1,53 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal.h
*
* Description: Consolidated header. Upper layers should include it to
* avoid include order problems.
*
* Created: 14 May 2004
*/
#ifndef XGE_HAL_H
#define XGE_HAL_H
#include <dev/nxge/include/xge-defs.h>
#include <dev/nxge/include/xge-os-pal.h>
#include <dev/nxge/include/xge-debug.h>
#include <dev/nxge/include/xgehal-types.h>
#include <dev/nxge/include/xgehal-driver.h>
#include <dev/nxge/include/xgehal-config.h>
#include <dev/nxge/include/xgehal-device.h>
#include <dev/nxge/include/xgehal-channel.h>
#include <dev/nxge/include/xgehal-fifo.h>
#include <dev/nxge/include/xgehal-ring.h>
#include <dev/nxge/include/xgehal-mgmt.h>
#endif /* XGE_HAL_H */

758
sys/dev/nxge/xge-osdep.h Normal file
View File

@ -0,0 +1,758 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* xge-osdep.h
*
* Platform-dependent "glue" code
*/
#ifndef XGE_OSDEP_H
#define XGE_OSDEP_H
/******************************************
* Includes and defines
******************************************/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/stddef.h>
#include <sys/types.h>
#include <sys/sockio.h>
#include <sys/proc.h>
#include <sys/mutex.h>
#include <sys/types.h>
#include <sys/endian.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/clock.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pci_private.h>
#include <net/if.h>
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_media.h>
#include <net/if_var.h>
#include <net/bpf.h>
#include <net/if_types.h>
#define XGE_OS_PLATFORM_64BIT
#if BYTE_ORDER == BIG_ENDIAN
#define XGE_OS_HOST_BIG_ENDIAN 1
#elif BYTE_ORDER == LITTLE_ENDIAN
#define XGE_OS_HOST_LITTLE_ENDIAN 1
#endif
#define XGE_HAL_USE_5B_MODE 1
#define XGE_HAL_PROCESS_LINK_INT_IN_ISR 1
#define OS_NETSTACK_BUF struct mbuf *
#define XGE_LL_IP_FAST_CSUM(hdr, len) 0
#define xge_os_ntohs ntohs
#define xge_os_ntohl ntohl
#define xge_os_htons htons
#define xge_os_htonl htonl
#ifndef __DECONST
#define __DECONST(type, var) ((type)(uintrptr_t)(const void *)(var))
#endif
typedef struct busresources {
bus_space_tag_t bus_tag; /* DMA Tag */
bus_space_handle_t bus_handle; /* Bus handle */
struct resource *bar_start_addr;/* BAR start address */
} busresource_t;
typedef struct xge_dma_alloc {
bus_addr_t dma_phyaddr; /* Physical Address */
caddr_t dma_viraddr; /* Virtual Address */
bus_dma_tag_t dma_tag; /* DMA Tag */
bus_dmamap_t dma_map; /* DMA Map */
bus_dma_segment_t dma_segment; /* DMA Segment */
bus_size_t dma_size; /* Size */
int dma_nseg; /* Maximum scatter-gather segs. */
} xdma;
struct xge_dma_mbuf {
bus_addr_t dma_phyaddr; /* Physical Address */
bus_dmamap_t dma_map; /* DMA Map */
};
typedef struct pci_info {
device_t device; /* Device */
struct resource *regmap0; /* Resource for BAR0 */
struct resource *regmap1; /* Resource for BAR1 */
void *bar0resource; /* BAR0 tag and handle */
void *bar1resource; /* BAR1 tag and handle */
} pci_info_t;
/******************************************
* Fixed size primitive types
******************************************/
#define u8 uint8_t
#define u16 uint16_t
#define u32 uint32_t
#define u64 uint64_t
#define ulong_t unsigned long
#define uint unsigned int
#define ptrdiff_t ptrdiff_t
typedef bus_addr_t dma_addr_t;
typedef struct mtx spinlock_t;
typedef pci_info_t *pci_dev_h;
typedef busresource_t *pci_reg_h;
typedef struct xge_dma_alloc pci_dma_h;
typedef struct resource *pci_irq_h;
typedef pci_info_t *pci_cfg_h;
typedef struct xge_dma_alloc pci_dma_acc_h;
/******************************************
* "libc" functionality
******************************************/
#define xge_os_memzero(addr, size) bzero(addr, size)
#define xge_os_memcpy(dst, src, size) bcopy(src, dst, size)
#define xge_os_memcmp memcmp
#define xge_os_strcpy strcpy
#define xge_os_strlen strlen
#define xge_os_snprintf snprintf
#define xge_os_sprintf sprintf
#define xge_os_printf(fmt...) { \
printf(fmt); \
printf("\n"); \
}
#define xge_os_vaprintf(fmt) { \
sprintf(fmt, fmt, "\n"); \
va_list va; \
va_start(va, fmt); \
vprintf(fmt, va); \
va_end(va); \
}
#define xge_os_vasprintf(buf, fmt) { \
va_list va; \
va_start(va, fmt); \
(void) vaprintf(buf, fmt, va); \
va_end(va); \
}
#define xge_os_timestamp(buf) { \
struct timeval current_time; \
gettimeofday(&current_time, 0); \
sprintf(buf, "%08li.%08li: ", current_time.tv_sec, \
current_time.tv_usec); \
}
#define xge_os_println xge_os_printf
/******************************************
* Synchronization Primitives
******************************************/
/* Initialize the spin lock */
#define xge_os_spin_lock_init(lockp, ctxh) \
if(mtx_initialized(lockp) == 0) { \
mtx_init((lockp), "xge", MTX_NETWORK_LOCK, MTX_DEF); \
}
/* Initialize the spin lock (IRQ version) */
#define xge_os_spin_lock_init_irq(lockp, ctxh) \
if(mtx_initialized(lockp) == 0) { \
mtx_init((lockp), "xge", MTX_NETWORK_LOCK, MTX_DEF); \
}
/* Destroy the lock */
#define xge_os_spin_lock_destroy(lockp, ctxh) \
if(mtx_initialized(lockp) != 0) { \
mtx_destroy(lockp); \
}
/* Destroy the lock (IRQ version) */
#define xge_os_spin_lock_destroy_irq(lockp, ctxh) \
if(mtx_initialized(lockp) != 0) { \
mtx_destroy(lockp); \
}
/* Acquire the lock */
#define xge_os_spin_lock(lockp) \
if(mtx_owned(lockp) == 0) mtx_lock(lockp)
/* Release the lock */
#define xge_os_spin_unlock(lockp) mtx_unlock(lockp)
/* Acquire the lock (IRQ version) */
#define xge_os_spin_lock_irq(lockp, flags) { \
flags = MTX_QUIET; \
if(mtx_owned(lockp) == 0) mtx_lock_flags(lockp, flags); \
}
/* Release the lock (IRQ version) */
#define xge_os_spin_unlock_irq(lockp, flags) { \
flags = MTX_QUIET; \
mtx_unlock_flags(lockp, flags); \
}
/* Write memory barrier */
#define xge_os_wmb()
/* Delay (in micro seconds) */
#define xge_os_udelay(us) DELAY(us)
/* Delay (in milli seconds) */
#define xge_os_mdelay(ms) DELAY(ms * 1000)
/* Compare and exchange */
//#define xge_os_cmpxchg(targetp, cmd, newval)
/******************************************
* Misc primitives
******************************************/
#define xge_os_unlikely(x) (x)
#define xge_os_prefetch(x) (x=x)
#define xge_os_prefetchw(x) (x=x)
#define xge_os_bug(fmt...) printf(fmt...)
#define xge_os_htohs ntohs
#define xge_os_ntohl ntohl
#define xge_os_htons htons
#define xge_os_htonl htonl
/******************************************
* Compiler Stuffs
******************************************/
#define __xge_os_attr_cacheline_aligned
#define __xge_os_cacheline_size 32
/******************************************
* Memory Primitives
******************************************/
#define XGE_OS_INVALID_DMA_ADDR ((dma_addr_t)0)
/******************************************
* xge_os_malloc - Allocate non DMA-able memory.
* @pdev: Device context.
* @size: Size to allocate.
*
* Allocate @size bytes of memory. This allocation can sleep, and
* therefore, and therefore it requires process context. In other words,
* xge_os_malloc() cannot be called from the interrupt context.
* Use xge_os_free() to free the allocated block.
*
* Returns: Pointer to allocated memory, NULL - on failure.
*
* See also: xge_os_free().
******************************************/
static inline void *
xge_os_malloc(pci_dev_h pdev, unsigned long size) {
void *vaddr = malloc((size), M_DEVBUF, M_NOWAIT);
xge_os_memzero(vaddr, size);
XGE_OS_MEMORY_CHECK_MALLOC(vaddr, size, file, line);
return (vaddr);
}
/******************************************
* xge_os_free - Free non DMA-able memory.
* @pdev: Device context.
* @vaddr: Address of the allocated memory block.
* @size: Some OS's require to provide size on free
*
* Free the memory area obtained via xge_os_malloc().
* This call may also sleep, and therefore it cannot be used inside
* interrupt.
*
* See also: xge_os_malloc().
******************************************/
static inline void
xge_os_free(pci_dev_h pdev, const void *vaddr, unsigned long size) {
XGE_OS_MEMORY_CHECK_FREE(vaddr, size);
free(__DECONST(void *, vaddr), M_DEVBUF);
}
static void
xge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) {
if(error) return;
*(bus_addr_t *) arg = segs->ds_addr;
return;
}
/******************************************
* xge_os_dma_malloc - Allocate DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @size: Size (in bytes) to allocate.
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
* XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT
* Note that the last two flags are mutually exclusive.
* @p_dmah: Handle used to map the memory onto the corresponding device memory
* space. See xge_os_dma_map(). The handle is an out-parameter
* returned by the function.
* @p_dma_acch: One more DMA handle used subsequently to free the
* DMA object (via xge_os_dma_free()).
*
* Allocate DMA-able contiguous memory block of the specified @size.
* This memory can be subsequently freed using xge_os_dma_free().
* Note: can be used inside interrupt context.
*
* Returns: Pointer to allocated memory(DMA-able), NULL on failure.
*
******************************************/
static inline void *
xge_os_dma_malloc(pci_dev_h pdev, unsigned long size, int dma_flags,
pci_dma_h *p_dmah, pci_dma_acc_h *p_dma_acch) {
int retValue = bus_dma_tag_create(
bus_get_dma_tag(pdev->device), /* Parent */
PAGE_SIZE, /* Alignment no specific alignment */
0, /* Bounds */
BUS_SPACE_MAXADDR, /* Low Address */
BUS_SPACE_MAXADDR, /* High Address */
NULL, /* Filter */
NULL, /* Filter arg */
size, /* Max Size */
1, /* n segments */
size, /* max segment size */
BUS_DMA_ALLOCNOW, /* Flags */
NULL, /* lockfunction */
NULL, /* lock arg */
&p_dmah->dma_tag); /* DMA tag */
if(retValue != 0) {
xge_os_printf("bus_dma_tag_create failed\n");
goto fail_1;
}
p_dmah->dma_size = size;
retValue = bus_dmamem_alloc(p_dmah->dma_tag,
(void **)&p_dmah->dma_viraddr, BUS_DMA_NOWAIT, &p_dmah->dma_map);
if(retValue != 0) {
xge_os_printf("bus_dmamem_alloc failed\n");
goto fail_2;
}
return(p_dmah->dma_viraddr);
fail_2: bus_dma_tag_destroy(p_dmah->dma_tag);
fail_1: return(NULL);
}
/******************************************
* xge_os_dma_free - Free previously allocated DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @vaddr: Virtual address of the DMA-able memory.
* @p_dma_acch: DMA handle used to free the resource.
* @p_dmah: DMA handle used for mapping. See xge_os_dma_malloc().
*
* Free DMA-able memory originally allocated by xge_os_dma_malloc().
* Note: can be used inside interrupt.
* See also: xge_os_dma_malloc().
******************************************/
static inline void
xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size,
pci_dma_acc_h *p_dma_acch, pci_dma_h *p_dmah) {
XGE_OS_MEMORY_CHECK_FREE(p_dmah->dma_viraddr, size);
bus_dmamem_free(p_dmah->dma_tag, p_dmah->dma_viraddr, p_dmah->dma_map);
bus_dma_tag_destroy(p_dmah->dma_tag);
p_dmah->dma_map = NULL;
p_dmah->dma_tag = NULL;
p_dmah->dma_viraddr = NULL;
return;
}
/******************************************
* IO/PCI/DMA Primitives
******************************************/
#define XGE_OS_DMA_DIR_TODEVICE 0
#define XGE_OS_DMA_DIR_FROMDEVICE 1
#define XGE_OS_DMA_DIR_BIDIRECTIONAL 2
/******************************************
* xge_os_pci_read8 - Read one byte from device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of the result.
*
* Read byte value from the specified @regh PCI configuration space at the
* specified offset = @where.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_read8(pdev, cfgh, where, val) \
(*(val) = pci_read_config(pdev->device, where, 1))
/******************************************
* xge_os_pci_write8 - Write one byte into device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write byte value into the specified PCI configuration space
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_write8(pdev, cfgh, where, val) \
pci_write_config(pdev->device, where, val, 1)
/******************************************
* xge_os_pci_read16 - Read 16bit word from device PCI configuration.
* @pdev: Device context.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of the 16bit result.
*
* Read 16bit value from the specified PCI configuration space at the
* specified offset.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_read16(pdev, cfgh, where, val) \
(*(val) = pci_read_config(pdev->device, where, 2))
/******************************************
* xge_os_pci_write16 - Write 16bit word into device PCI configuration.
* @pdev: Device context.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write 16bit value into the specified @offset in PCI
* configuration space.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_write16(pdev, cfgh, where, val) \
pci_write_config(pdev->device, where, val, 2)
/******************************************
* xge_os_pci_read32 - Read 32bit word from device PCI configuration.
* @pdev: Device context.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of 32bit result.
*
* Read 32bit value from the specified PCI configuration space at the
* specified offset.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_read32(pdev, cfgh, where, val) \
(*(val) = pci_read_config(pdev->device, where, 4))
/******************************************
* xge_os_pci_write32 - Write 32bit word into device PCI configuration.
* @pdev: Device context.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write 32bit value into the specified @offset in PCI
* configuration space.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_write32(pdev, cfgh, where, val) \
pci_write_config(pdev->device, where, val, 4)
/******************************************
* xge_os_pio_mem_read8 - Read 1 byte from device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 1 byte value read from the specified (mapped) memory space address.
******************************************/
static inline u8
xge_os_pio_mem_read8(pci_dev_h pdev, pci_reg_h regh, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
return bus_space_read_1(tag, handle, (caddr_t)(addr) - addrss);
}
/******************************************
* xge_os_pio_mem_write8 - Write 1 byte into device memory mapped
* space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write byte value into the specified (mapped) device memory space.
******************************************/
static inline void
xge_os_pio_mem_write8(pci_dev_h pdev, pci_reg_h regh, u8 val, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
bus_space_write_1(tag, handle, (caddr_t)(addr) - addrss, val);
}
/******************************************
* xge_os_pio_mem_read16 - Read 16bit from device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 16bit value read from the specified (mapped) memory space address.
******************************************/
static inline u16
xge_os_pio_mem_read16(pci_dev_h pdev, pci_reg_h regh, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
return bus_space_read_2(tag, handle, (caddr_t)(addr) - addrss);
}
/******************************************
* xge_os_pio_mem_write16 - Write 16bit into device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 16bit value into the specified (mapped) device memory space.
******************************************/
static inline void
xge_os_pio_mem_write16(pci_dev_h pdev, pci_reg_h regh, u16 val, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
bus_space_write_2(tag, handle, (caddr_t)(addr) - addrss, val);
}
/******************************************
* xge_os_pio_mem_read32 - Read 32bit from device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 32bit value read from the specified (mapped) memory space address.
******************************************/
static inline u32
xge_os_pio_mem_read32(pci_dev_h pdev, pci_reg_h regh, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
return bus_space_read_4(tag, handle, (caddr_t)(addr) - addrss);
}
/******************************************
* xge_os_pio_mem_write32 - Write 32bit into device memory space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 32bit value into the specified (mapped) device memory space.
******************************************/
static inline void
xge_os_pio_mem_write32(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
bus_space_write_4(tag, handle, (caddr_t)(addr) - addrss, val);
}
/******************************************
* xge_os_pio_mem_read64 - Read 64bit from device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 64bit value read from the specified (mapped) memory space address.
******************************************/
static inline u64
xge_os_pio_mem_read64(pci_dev_h pdev, pci_reg_h regh, void *addr)
{
u64 value1, value2;
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
value1 = bus_space_read_4(tag, handle, (caddr_t)(addr) + 4 - addrss);
value1 <<= 32;
value2 = bus_space_read_4(tag, handle, (caddr_t)(addr) - addrss);
value1 |= value2;
return value1;
}
/******************************************
* xge_os_pio_mem_write64 - Write 32bit into device memory space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 64bit value into the specified (mapped) device memory space.
******************************************/
static inline void
xge_os_pio_mem_write64(pci_dev_h pdev, pci_reg_h regh, u64 val, void *addr)
{
u32 vall = val & 0xffffffff;
xge_os_pio_mem_write32(pdev, regh, vall, addr);
xge_os_pio_mem_write32(pdev, regh, val >> 32, ((caddr_t)(addr) + 4));
}
/******************************************
* FIXME: document
******************************************/
#define xge_os_flush_bridge xge_os_pio_mem_read64
/******************************************
* xge_os_dma_map - Map DMA-able memory block to, or from, or
* to-and-from device.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @vaddr: Virtual address of the DMA-able memory.
* @size: Size (in bytes) to be mapped.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
* XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT
* Note that the last two flags are mutually exclusive.
*
* Map a single memory block.
*
* Returns: DMA address of the memory block,
* XGE_OS_INVALID_DMA_ADDR on failure.
*
* See also: xge_os_dma_malloc(), xge_os_dma_unmap(),
* xge_os_dma_sync().
******************************************/
static inline dma_addr_t
xge_os_dma_map(pci_dev_h pdev, pci_dma_h dmah, void *vaddr, size_t size,
int dir, int dma_flags)
{
int retValue =
bus_dmamap_load(dmah.dma_tag, dmah.dma_map, dmah.dma_viraddr,
dmah.dma_size, xge_dmamap_cb, &dmah.dma_phyaddr, BUS_DMA_NOWAIT);
if(retValue != 0) {
xge_os_printf("bus_dmamap_load_ failed\n");
return XGE_OS_INVALID_DMA_ADDR;
}
dmah.dma_size = size;
return dmah.dma_phyaddr;
}
/******************************************
* xge_os_dma_unmap - Unmap DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
* @size: Size (in bytes) to be unmapped.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
*
* Unmap a single DMA-able memory block that was previously mapped
* using xge_os_dma_map().
* See also: xge_os_dma_malloc(), xge_os_dma_map().
******************************************/
static inline void
xge_os_dma_unmap(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
size_t size, int dir)
{
bus_dmamap_unload(dmah.dma_tag, dmah.dma_map);
return;
}
/******************************************
* xge_os_dma_sync - Synchronize mapped memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
* @dma_offset: Offset from start of the blocke. Used by Solaris only.
* @length: Size of the block.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
*
* Make physical and CPU memory consistent for a single
* streaming mode DMA translation.
* This API compiles to NOP on cache-coherent platforms.
* On non cache-coherent platforms, depending on the direction
* of the "sync" operation, this API will effectively
* either invalidate CPU cache (that might contain old data),
* or flush CPU cache to update physical memory.
* See also: xge_os_dma_malloc(), xge_os_dma_map(),
* xge_os_dma_unmap().
******************************************/
static inline void
xge_os_dma_sync(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
u64 dma_offset, size_t length, int dir)
{
bus_dmasync_op_t syncop;
switch(dir) {
case XGE_OS_DMA_DIR_TODEVICE:
syncop = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTWRITE;
break;
case XGE_OS_DMA_DIR_FROMDEVICE:
syncop = BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD;
break;
case XGE_OS_DMA_DIR_BIDIRECTIONAL:
syncop = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREWRITE;
break;
}
bus_dmamap_sync(dmah.dma_tag, dmah.dma_map, syncop);
return;
}
#endif /* XGE_OSDEP_H */

View File

@ -0,0 +1,460 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xge-queue.c
*
* Description: serialized event queue
*
* Created: 7 June 2004
*/
#include <dev/nxge/include/xge-queue.h>
/**
* xge_queue_item_data - Get item's data.
* @item: Queue item.
*
* Returns: item data(variable size). Note that xge_queue_t
* contains items comprized of a fixed xge_queue_item_t "header"
* and a variable size data. This function returns the variable
* user-defined portion of the queue item.
*/
void* xge_queue_item_data(xge_queue_item_t *item)
{
return (char *)item + sizeof(xge_queue_item_t);
}
/*
* __queue_consume - (Lockless) dequeue an item from the specified queue.
*
* @queue: Event queue.
* See xge_queue_consume().
*/
static xge_queue_status_e
__queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
{
int real_size;
xge_queue_item_t *elem;
if (xge_list_is_empty(&queue->list_head))
return XGE_QUEUE_IS_EMPTY;
elem = (xge_queue_item_t *)queue->list_head.next;
if (elem->data_size > data_max_size)
return XGE_QUEUE_NOT_ENOUGH_SPACE;
xge_list_remove(&elem->item);
real_size = elem->data_size + sizeof(xge_queue_item_t);
if (queue->head_ptr == elem) {
queue->head_ptr = (char *)queue->head_ptr + real_size;
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the head: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
} else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
queue->tail_ptr = (char *)queue->tail_ptr - real_size;
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the tail: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
} else {
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the list: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
}
xge_assert(queue->tail_ptr >= queue->head_ptr);
xge_assert(queue->tail_ptr >= queue->start_ptr &&
queue->tail_ptr <= queue->end_ptr);
xge_assert(queue->head_ptr >= queue->start_ptr &&
queue->head_ptr < queue->end_ptr);
xge_os_memcpy(item, elem, sizeof(xge_queue_item_t));
xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem),
elem->data_size);
if (xge_list_is_empty(&queue->list_head)) {
/* reset buffer pointers just to be clean */
queue->head_ptr = queue->tail_ptr = queue->start_ptr;
}
return XGE_QUEUE_OK;
}
/**
* xge_queue_produce - Enqueue an item (see xge_queue_item_t{})
* into the specified queue.
* @queueh: Queue handle.
* @event_type: Event type. One of the enumerated event types
* that both consumer and producer "understand".
* For an example, please refer to xge_hal_event_e.
* @context: Opaque (void*) "context", for instance event producer object.
* @is_critical: For critical event, e.g. ECC.
* @data_size: Size of the @data.
* @data: User data of variable @data_size that is _copied_ into
* the new queue item (see xge_queue_item_t{}). Upon return
* from the call the @data memory can be re-used or released.
*
* Enqueue a new item.
*
* Returns: XGE_QUEUE_OK - success.
* XGE_QUEUE_IS_FULL - Queue is full.
* XGE_QUEUE_OUT_OF_MEMORY - Memory allocation failed.
*
* See also: xge_queue_item_t{}, xge_queue_consume().
*/
xge_queue_status_e
xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
int is_critical, const int data_size, void *data)
{
xge_queue_t *queue = (xge_queue_t *)queueh;
int real_size = data_size + sizeof(xge_queue_item_t);
xge_queue_item_t *elem;
unsigned long flags = 0;
xge_assert(real_size <= XGE_QUEUE_BUF_SIZE);
xge_os_spin_lock_irq(&queue->lock, flags);
if (is_critical && !queue->has_critical_event) {
unsigned char item_buf[sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
while (__queue_consume(queue,
XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
item) != XGE_QUEUE_IS_EMPTY)
; /* do nothing */
}
try_again:
if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
elem = (xge_queue_item_t *) queue->tail_ptr;
queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
xge_debug_queue(XGE_TRACE,
"event_type: %d adding to the tail: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
} else if ((char *)queue->head_ptr - real_size >=
(char *)queue->start_ptr) {
elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
queue->head_ptr = elem;
xge_debug_queue(XGE_TRACE,
"event_type: %d adding to the head: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" length %d",
event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
real_size);
} else {
xge_queue_status_e status;
if (queue->pages_current >= queue->pages_max) {
xge_os_spin_unlock_irq(&queue->lock, flags);
return XGE_QUEUE_IS_FULL;
}
if (queue->has_critical_event) {
xge_os_spin_unlock_irq(&queue->lock, flags);
return XGE_QUEUE_IS_FULL;
}
/* grow */
status = __io_queue_grow(queueh);
if (status != XGE_QUEUE_OK) {
xge_os_spin_unlock_irq(&queue->lock, flags);
return status;
}
goto try_again;
}
xge_assert(queue->tail_ptr >= queue->head_ptr);
xge_assert(queue->tail_ptr >= queue->start_ptr &&
queue->tail_ptr <= queue->end_ptr);
xge_assert(queue->head_ptr >= queue->start_ptr &&
queue->head_ptr < queue->end_ptr);
elem->data_size = data_size;
elem->event_type = (xge_hal_event_e) event_type;
elem->is_critical = is_critical;
if (is_critical)
queue->has_critical_event = 1;
elem->context = context;
xge_os_memcpy(xge_queue_item_data(elem), data, data_size);
xge_list_insert_before(&elem->item, &queue->list_head);
xge_os_spin_unlock_irq(&queue->lock, flags);
/* no lock taken! */
queue->queued_func(queue->queued_data, event_type);
return XGE_QUEUE_OK;
}
/**
* xge_queue_create - Create protected first-in-first-out queue.
* @pdev: PCI device handle.
* @irqh: PCI device IRQ handle.
* @pages_initial: Number of pages to be initially allocated at the
* time of queue creation.
* @pages_max: Max number of pages that can be allocated in the queue.
* @queued: Optional callback function to be called each time a new item is
* added to the queue.
* @queued_data: Argument to the callback function.
*
* Create protected (fifo) queue.
*
* Returns: Pointer to xge_queue_t structure,
* NULL - on failure.
*
* See also: xge_queue_item_t{}, xge_queue_destroy().
*/
xge_queue_h
xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
int pages_max, xge_queued_f queued, void *queued_data)
{
xge_queue_t *queue;
if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
return NULL;
queue->queued_func = queued;
queue->queued_data = queued_data;
queue->pdev = pdev;
queue->irqh = irqh;
queue->pages_current = pages_initial;
queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
XGE_QUEUE_BUF_SIZE);
if (queue->start_ptr == NULL) {
xge_os_free(pdev, queue, sizeof(xge_queue_t));
return NULL;
}
queue->head_ptr = queue->tail_ptr = queue->start_ptr;
queue->end_ptr = (char *)queue->start_ptr +
queue->pages_current * XGE_QUEUE_BUF_SIZE;
xge_os_spin_lock_init_irq(&queue->lock, irqh);
queue->pages_initial = pages_initial;
queue->pages_max = pages_max;
xge_list_init(&queue->list_head);
return queue;
}
/**
* xge_queue_destroy - Destroy xge_queue_t object.
* @queueh: Queue handle.
*
* Destroy the specified xge_queue_t object.
*
* See also: xge_queue_item_t{}, xge_queue_create().
*/
void xge_queue_destroy(xge_queue_h queueh)
{
xge_queue_t *queue = (xge_queue_t *)queueh;
xge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
if (!xge_list_is_empty(&queue->list_head)) {
xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
XGE_OS_LLXFMT, (u64)(ulong_t)queue);
}
xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
XGE_QUEUE_BUF_SIZE);
xge_os_free(queue->pdev, queue, sizeof(xge_queue_t));
}
/*
* __io_queue_grow - Dynamically increases the size of the queue.
* @queueh: Queue handle.
*
* This function is called in the case of no slot avaialble in the queue
* to accomodate the newly received event.
* Note that queue cannot grow beyond the max size specified for the
* queue.
*
* Returns XGE_QUEUE_OK: On success.
* XGE_QUEUE_OUT_OF_MEMORY : No memory is available.
*/
xge_queue_status_e
__io_queue_grow(xge_queue_h queueh)
{
xge_queue_t *queue = (xge_queue_t *)queueh;
void *newbuf, *oldbuf;
xge_list_t *item;
xge_queue_item_t *elem;
xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
(u64)(ulong_t)queue, queue->pages_current);
newbuf = xge_os_malloc(queue->pdev,
(queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
if (newbuf == NULL)
return XGE_QUEUE_OUT_OF_MEMORY;
xge_os_memcpy(newbuf, queue->start_ptr,
queue->pages_current * XGE_QUEUE_BUF_SIZE);
oldbuf = queue->start_ptr;
/* adjust queue sizes */
queue->start_ptr = newbuf;
queue->end_ptr = (char *)newbuf +
(queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
(char *)oldbuf);
queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
(char *)oldbuf);
xge_assert(!xge_list_is_empty(&queue->list_head));
queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
((char *)queue->list_head.next - (char *)oldbuf));
queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
((char *)queue->list_head.prev - (char *)oldbuf));
/* adjust queue list */
xge_list_for_each(item, &queue->list_head) {
elem = xge_container_of(item, xge_queue_item_t, item);
if (elem->item.next != &queue->list_head) {
elem->item.next =
(xge_list_t*)(void *)((char *)newbuf +
((char *)elem->item.next - (char *)oldbuf));
}
if (elem->item.prev != &queue->list_head) {
elem->item.prev =
(xge_list_t*) (void *)((char *)newbuf +
((char *)elem->item.prev - (char *)oldbuf));
}
}
xge_os_free(queue->pdev, oldbuf,
queue->pages_current * XGE_QUEUE_BUF_SIZE);
queue->pages_current++;
return XGE_QUEUE_OK;
}
/**
* xge_queue_consume - Dequeue an item from the specified queue.
* @queueh: Queue handle.
* @data_max_size: Maximum expected size of the item.
* @item: Memory area into which the item is _copied_ upon return
* from the function.
*
* Dequeue an item from the queue. The caller is required to provide
* enough space for the item.
*
* Returns: XGE_QUEUE_OK - success.
* XGE_QUEUE_IS_EMPTY - Queue is empty.
* XGE_QUEUE_NOT_ENOUGH_SPACE - Requested item size(@data_max_size)
* is too small to accomodate an item from the queue.
*
* See also: xge_queue_item_t{}, xge_queue_produce().
*/
xge_queue_status_e
xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item)
{
xge_queue_t *queue = (xge_queue_t *)queueh;
unsigned long flags = 0;
xge_queue_status_e status;
xge_os_spin_lock_irq(&queue->lock, flags);
status = __queue_consume(queue, data_max_size, item);
xge_os_spin_unlock_irq(&queue->lock, flags);
return status;
}
/**
* xge_queue_flush - Flush, or empty, the queue.
* @queueh: Queue handle.
*
* Flush the queue, i.e. make it empty by consuming all events
* without invoking the event processing logic (callbacks, etc.)
*/
void xge_queue_flush(xge_queue_h queueh)
{
unsigned char item_buf[sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
/* flush queue by consuming all enqueued items */
while (xge_queue_consume(queueh,
XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
item) != XGE_QUEUE_IS_EMPTY) {
/* do nothing */
xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
item, item->event_type);
}
(void) __queue_get_reset_critical (queueh);
}
/*
* __queue_get_reset_critical - Check for critical events in the queue,
* @qh: Queue handle.
*
* Check for critical event(s) in the queue, and reset the
* "has-critical-event" flag upon return.
* Returns: 1 - if the queue contains atleast one critical event.
* 0 - If there are no critical events in the queue.
*/
int __queue_get_reset_critical (xge_queue_h qh) {
xge_queue_t* queue = (xge_queue_t*)qh;
int c = queue->has_critical_event;
queue->has_critical_event = 0;
return c;
}

View File

@ -0,0 +1,299 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-channel-fp.c
*
* Description: HAL channel object functionality (fast path)
*
* Created: 10 June 2004
*/
#ifdef XGE_DEBUG_FP
#include <dev/nxge/include/xgehal-channel.h>
#endif
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
{
void **tmp_arr;
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
unsigned long flags = 0;
#endif
if (channel->reserve_length - channel->reserve_top >
channel->reserve_threshold) {
_alloc_after_swap:
*dtrh = channel->reserve_arr[--channel->reserve_length];
xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" allocated, "
"channel %d:%d:%d, reserve_idx %d",
(unsigned long long)(ulong_t)*dtrh,
channel->type, channel->post_qid,
channel->compl_qid, channel->reserve_length);
return XGE_HAL_OK;
}
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_lock_irq(&channel->free_lock, flags);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_lock(&channel->free_lock);
#endif
/* switch between empty and full arrays */
/* the idea behind such a design is that by having free and reserved
* arrays separated we basically separated irq and non-irq parts.
* i.e. no additional lock need to be done when we free a resource */
if (channel->reserve_initial - channel->free_length >
channel->reserve_threshold) {
tmp_arr = channel->reserve_arr;
channel->reserve_arr = channel->free_arr;
channel->reserve_length = channel->reserve_initial;
channel->free_arr = tmp_arr;
channel->reserve_top = channel->free_length;
channel->free_length = channel->reserve_initial;
channel->stats.reserve_free_swaps_cnt++;
xge_debug_channel(XGE_TRACE,
"switch on channel %d:%d:%d, reserve_length %d, "
"free_length %d", channel->type, channel->post_qid,
channel->compl_qid, channel->reserve_length,
channel->free_length);
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_unlock_irq(&channel->free_lock, flags);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_unlock(&channel->free_lock);
#endif
goto _alloc_after_swap;
}
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_unlock_irq(&channel->free_lock, flags);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_unlock(&channel->free_lock);
#endif
xge_debug_channel(XGE_TRACE, "channel %d:%d:%d is empty!",
channel->type, channel->post_qid,
channel->compl_qid);
channel->stats.full_cnt++;
*dtrh = NULL;
return XGE_HAL_INF_OUT_OF_DESCRIPTORS;
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int offset)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
/* restore a previously allocated dtrh at current offset and update
* the available reserve length accordingly. If dtrh is null just
* update the reserve length, only */
if (dtrh) {
channel->reserve_arr[channel->reserve_length + offset] = dtrh;
xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" restored for "
"channel %d:%d:%d, offset %d at reserve index %d, ",
(unsigned long long)(ulong_t)dtrh, channel->type,
channel->post_qid, channel->compl_qid, offset,
channel->reserve_length + offset);
}
else {
channel->reserve_length += offset;
xge_debug_channel(XGE_TRACE, "channel %d:%d:%d, restored "
"for offset %d, new reserve_length %d, free length %d",
channel->type, channel->post_qid, channel->compl_qid,
offset, channel->reserve_length, channel->free_length);
}
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t*)channelh;
xge_assert(channel->work_arr[channel->post_index] == NULL);
channel->work_arr[channel->post_index++] = dtrh;
/* wrap-around */
if (channel->post_index == channel->length)
channel->post_index = 0;
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_try_complete(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_assert(channel->work_arr);
xge_assert(channel->compl_index < channel->length);
*dtrh = channel->work_arr[channel->compl_index];
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_complete(xge_hal_channel_h channelh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
channel->work_arr[channel->compl_index] = NULL;
/* wrap-around */
if (++channel->compl_index == channel->length)
channel->compl_index = 0;
channel->stats.total_compl_cnt++;
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
channel->free_arr[--channel->free_length] = dtrh;
xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" freed, "
"channel %d:%d:%d, new free_length %d",
(unsigned long long)(ulong_t)dtrh,
channel->type, channel->post_qid,
channel->compl_qid, channel->free_length);
}
/**
* xge_hal_channel_dtr_count
* @channelh: Channel handle. Obtained via xge_hal_channel_open().
*
* Retreive number of DTRs available. This function can not be called
* from data path.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_channel_dtr_count(xge_hal_channel_h channelh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
return ((channel->reserve_length - channel->reserve_top) +
(channel->reserve_initial - channel->free_length) -
channel->reserve_threshold);
}
/**
* xge_hal_channel_userdata - Get user-specified channel context.
* @channelh: Channel handle. Obtained via xge_hal_channel_open().
*
* Returns: per-channel "user data", which can be any ULD-defined context.
* The %userdata "gets" into the channel at open time
* (see xge_hal_channel_open()).
*
* See also: xge_hal_channel_open().
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void*
xge_hal_channel_userdata(xge_hal_channel_h channelh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
return channel->userdata;
}
/**
* xge_hal_channel_id - Get channel ID.
* @channelh: Channel handle. Obtained via xge_hal_channel_open().
*
* Returns: channel ID. For link layer channel id is the number
* in the range from 0 to 7 that identifies hardware ring or fifo,
* depending on the channel type.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_channel_id(xge_hal_channel_h channelh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
return channel->post_qid;
}
/**
* xge_hal_check_alignment - Check buffer alignment and calculate the
* "misaligned" portion.
* @dma_pointer: DMA address of the buffer.
* @size: Buffer size, in bytes.
* @alignment: Alignment "granularity" (see below), in bytes.
* @copy_size: Maximum number of bytes to "extract" from the buffer
* (in order to spost it as a separate scatter-gather entry). See below.
*
* Check buffer alignment and calculate "misaligned" portion, if exists.
* The buffer is considered aligned if its address is multiple of
* the specified @alignment. If this is the case,
* xge_hal_check_alignment() returns zero.
* Otherwise, xge_hal_check_alignment() uses the last argument,
* @copy_size,
* to calculate the size to "extract" from the buffer. The @copy_size
* may or may not be equal @alignment. The difference between these two
* arguments is that the @alignment is used to make the decision: aligned
* or not aligned. While the @copy_size is used to calculate the portion
* of the buffer to "extract", i.e. to post as a separate entry in the
* transmit descriptor. For example, the combination
* @alignment=8 and @copy_size=64 will work okay on AMD Opteron boxes.
*
* Note: @copy_size should be a multiple of @alignment. In many practical
* cases @copy_size and @alignment will probably be equal.
*
* See also: xge_hal_fifo_dtr_buffer_set_aligned().
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
int copy_size)
{
int misaligned_size;
misaligned_size = (int)(dma_pointer & (alignment - 1));
if (!misaligned_size) {
return 0;
}
if (size > copy_size) {
misaligned_size = (int)(dma_pointer & (copy_size - 1));
misaligned_size = copy_size - misaligned_size;
} else {
misaligned_size = size;
}
return misaligned_size;
}

View File

@ -0,0 +1,759 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-channel.c
*
* Description: chipset channel abstraction
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xgehal-channel.h>
#include <dev/nxge/include/xgehal-fifo.h>
#include <dev/nxge/include/xgehal-ring.h>
#include <dev/nxge/include/xgehal-device.h>
#include <dev/nxge/include/xgehal-regs.h>
#ifdef XGEHAL_RNIC
#include <dev/nxge/include/xgehal-types.h>
#include "xgehal-iov.h"
#endif
/*
* __hal_channel_dtr_next_reservelist
*
* Walking through the all available DTRs.
*/
static xge_hal_status_e
__hal_channel_dtr_next_reservelist(xge_hal_channel_h channelh,
xge_hal_dtr_h *dtrh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
if (channel->reserve_top >= channel->reserve_length) {
return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS;
}
*dtrh = channel->reserve_arr[channel->reserve_top++];
return XGE_HAL_OK;
}
/*
* __hal_channel_dtr_next_freelist
*
* Walking through the "freed" DTRs.
*/
static xge_hal_status_e
__hal_channel_dtr_next_freelist(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
if (channel->reserve_initial == channel->free_length) {
return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS;
}
*dtrh = channel->free_arr[channel->free_length++];
return XGE_HAL_OK;
}
/*
* __hal_channel_dtr_next_not_completed - Get the _next_ posted but
* not completed descriptor.
*
* Walking through the "not completed" DTRs.
*/
static xge_hal_status_e
__hal_channel_dtr_next_not_completed(xge_hal_channel_h channelh,
xge_hal_dtr_h *dtrh)
{
#ifndef XGEHAL_RNIC
xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
#endif
__hal_channel_dtr_try_complete(channelh, dtrh);
if (*dtrh == NULL) {
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
#ifndef XGEHAL_RNIC
rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
xge_assert(rxdp->host_control!=0);
#endif
__hal_channel_dtr_complete(channelh);
return XGE_HAL_OK;
}
xge_hal_channel_t*
__hal_channel_allocate(xge_hal_device_h devh, int post_qid,
#ifdef XGEHAL_RNIC
u32 vp_id,
#endif
xge_hal_channel_type_e type)
{
xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
xge_hal_channel_t *channel;
int size = 0;
switch(type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
xge_assert(post_qid + 1 >= XGE_HAL_MIN_FIFO_NUM &&
post_qid + 1 <= XGE_HAL_MAX_FIFO_NUM);
size = sizeof(xge_hal_fifo_t);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
xge_assert(post_qid + 1 >= XGE_HAL_MIN_RING_NUM &&
post_qid + 1 <= XGE_HAL_MAX_RING_NUM);
size = sizeof(xge_hal_ring_t);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
size = sizeof(__hal_sq_t);
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
size = sizeof(__hal_srq_t);
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
size = sizeof(__hal_cqrq_t);
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
size = sizeof(__hal_umq_t);
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
size = sizeof(__hal_dmq_t);
break;
#endif
default :
xge_assert(size);
break;
}
/* allocate FIFO channel */
channel = (xge_hal_channel_t *) xge_os_malloc(hldev->pdev, size);
if (channel == NULL) {
return NULL;
}
xge_os_memzero(channel, size);
channel->pdev = hldev->pdev;
channel->regh0 = hldev->regh0;
channel->regh1 = hldev->regh1;
channel->type = type;
channel->devh = devh;
#ifdef XGEHAL_RNIC
channel->vp_id = vp_id;
#endif
channel->post_qid = post_qid;
channel->compl_qid = 0;
return channel;
}
void __hal_channel_free(xge_hal_channel_t *channel)
{
int size = 0;
xge_assert(channel->pdev);
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
size = sizeof(xge_hal_fifo_t);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
size = sizeof(xge_hal_ring_t);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
size = sizeof(__hal_sq_t);
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
size = sizeof(__hal_srq_t);
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
size = sizeof(__hal_cqrq_t);
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
size = sizeof(__hal_umq_t);
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
size = sizeof(__hal_dmq_t);
break;
#else
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_assert(size);
break;
#endif
default:
break;
}
xge_os_free(channel->pdev, channel, size);
}
xge_hal_status_e
__hal_channel_initialize (xge_hal_channel_h channelh,
xge_hal_channel_attr_t *attr, void **reserve_arr,
int reserve_initial, int reserve_max, int reserve_threshold)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_device_t *hldev;
hldev = (xge_hal_device_t *)channel->devh;
channel->dtr_term = attr->dtr_term;
channel->dtr_init = attr->dtr_init;
channel->callback = attr->callback;
channel->userdata = attr->userdata;
channel->flags = attr->flags;
channel->per_dtr_space = attr->per_dtr_space;
channel->reserve_arr = reserve_arr;
channel->reserve_initial = reserve_initial;
channel->reserve_max = reserve_max;
channel->reserve_length = channel->reserve_initial;
channel->reserve_threshold = reserve_threshold;
channel->reserve_top = 0;
channel->saved_arr = (void **) xge_os_malloc(hldev->pdev,
sizeof(void*)*channel->reserve_max);
if (channel->saved_arr == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(channel->saved_arr, sizeof(void*)*channel->reserve_max);
channel->free_arr = channel->saved_arr;
channel->free_length = channel->reserve_initial;
channel->work_arr = (void **) xge_os_malloc(hldev->pdev,
sizeof(void*)*channel->reserve_max);
if (channel->work_arr == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(channel->work_arr,
sizeof(void*)*channel->reserve_max);
channel->post_index = 0;
channel->compl_index = 0;
channel->length = channel->reserve_initial;
channel->orig_arr = (void **) xge_os_malloc(hldev->pdev,
sizeof(void*)*channel->reserve_max);
if (channel->orig_arr == NULL)
return XGE_HAL_ERR_OUT_OF_MEMORY;
xge_os_memzero(channel->orig_arr, sizeof(void*)*channel->reserve_max);
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_lock_init_irq(&channel->free_lock, hldev->irqh);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_lock_init(&channel->free_lock, hldev->pdev);
#endif
return XGE_HAL_OK;
}
void __hal_channel_terminate(xge_hal_channel_h channelh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_device_t *hldev;
hldev = (xge_hal_device_t *)channel->devh;
xge_assert(channel->pdev);
/* undo changes made at channel_initialize() */
if (channel->work_arr) {
xge_os_free(channel->pdev, channel->work_arr,
sizeof(void*)*channel->reserve_max);
channel->work_arr = NULL;
}
if (channel->saved_arr) {
xge_os_free(channel->pdev, channel->saved_arr,
sizeof(void*)*channel->reserve_max);
channel->saved_arr = NULL;
}
if (channel->orig_arr) {
xge_os_free(channel->pdev, channel->orig_arr,
sizeof(void*)*channel->reserve_max);
channel->orig_arr = NULL;
}
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_lock_destroy_irq(&channel->free_lock, hldev->irqh);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_lock_destroy(&channel->free_lock, hldev->pdev);
#endif
}
/**
* xge_hal_channel_open - Open communication channel.
* @devh: HAL device, pointer to xge_hal_device_t structure.
* @attr: Contains attributes required to open
* the channel.
* @channelh: The channel handle. On success (XGE_HAL_OK) HAL fills
* this "out" parameter with a valid channel handle.
* @reopen: See xge_hal_channel_reopen_e{}.
*
* Open communication channel with the device.
*
* HAL uses (persistent) channel configuration to allocate both channel
* and Xframe Tx and Rx descriptors.
* Notes:
* 1) The channel config data is fed into HAL prior to
* xge_hal_channel_open().
*
* 2) The corresponding hardware queues must be already configured and
* enabled.
*
* 3) Either down or up queue may be omitted, in which case the channel
* is treated as _unidirectional_.
*
* 4) Post and completion queue may be the same, in which case the channel
* is said to have "in-band completions".
*
* Note that free_channels list is not protected. i.e. caller must provide
* safe context.
*
* Returns: XGE_HAL_OK - success.
* XGE_HAL_ERR_CHANNEL_NOT_FOUND - Unable to locate the channel.
* XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed.
*
* See also: xge_hal_channel_attr_t{}.
* Usage: See ex_open{}.
*/
xge_hal_status_e
xge_hal_channel_open(xge_hal_device_h devh,
xge_hal_channel_attr_t *attr,
xge_hal_channel_h *channelh,
xge_hal_channel_reopen_e reopen)
{
xge_list_t *item;
int i;
xge_hal_status_e status = XGE_HAL_OK;
xge_hal_channel_t *channel = NULL;
xge_hal_device_t *device = (xge_hal_device_t *)devh;
xge_assert(device);
xge_assert(attr);
*channelh = NULL;
#ifdef XGEHAL_RNIC
if((attr->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(attr->type == XGE_HAL_CHANNEL_TYPE_RING)) {
#endif
/* find channel */
xge_list_for_each(item, &device->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t, item);
if (tmp->type == attr->type &&
tmp->post_qid == attr->post_qid &&
tmp->compl_qid == attr->compl_qid) {
channel = tmp;
break;
}
}
if (channel == NULL) {
return XGE_HAL_ERR_CHANNEL_NOT_FOUND;
}
#ifdef XGEHAL_RNIC
}
else {
channel = __hal_channel_allocate(devh, attr->post_qid,
#ifdef XGEHAL_RNIC
attr->vp_id,
#endif
attr->type);
if (channel == NULL) {
xge_debug_device(XGE_ERR,
"__hal_channel_allocate failed");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
}
#endif
#ifndef XGEHAL_RNIC
xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING));
#endif
#ifdef XGEHAL_RNIC
if((reopen == XGE_HAL_CHANNEL_OC_NORMAL) ||
((channel->type != XGE_HAL_CHANNEL_TYPE_FIFO) &&
(channel->type != XGE_HAL_CHANNEL_TYPE_RING))) {
#else
if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) {
#endif
/* allocate memory, initialize pointers, etc */
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
status = __hal_fifo_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
status = __hal_ring_open(channel, attr);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
status = __hal_sq_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
status = __hal_srq_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
status = __hal_cqrq_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
status = __hal_umq_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
status = __hal_dmq_open(channel, attr);
break;
#else
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
status = XGE_HAL_FAIL;
break;
#endif
default:
break;
}
if (status == XGE_HAL_OK) {
for (i = 0; i < channel->reserve_initial; i++) {
channel->orig_arr[i] =
channel->reserve_arr[i];
}
}
else
return status;
} else {
xge_assert(reopen == XGE_HAL_CHANNEL_RESET_ONLY);
for (i = 0; i < channel->reserve_initial; i++) {
channel->reserve_arr[i] = channel->orig_arr[i];
channel->free_arr[i] = NULL;
}
channel->free_length = channel->reserve_initial;
channel->reserve_length = channel->reserve_initial;
channel->reserve_top = 0;
channel->post_index = 0;
channel->compl_index = 0;
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
status = __hal_ring_initial_replenish(channel,
reopen);
if (status != XGE_HAL_OK)
return status;
}
}
/* move channel to the open state list */
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
xge_list_remove(&channel->item);
xge_list_insert(&channel->item, &device->fifo_channels);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
xge_list_remove(&channel->item);
xge_list_insert(&channel->item, &device->ring_channels);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
xge_list_insert(&channel->item,
&device->virtual_paths[attr->vp_id].sq_channels);
device->virtual_paths[attr->vp_id].stats.no_sqs++;
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
xge_list_insert(&channel->item,
&device->virtual_paths[attr->vp_id].srq_channels);
device->virtual_paths[attr->vp_id].stats.no_srqs++;
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
xge_list_insert(&channel->item,
&device->virtual_paths[attr->vp_id].cqrq_channels);
device->virtual_paths[attr->vp_id].stats.no_cqrqs++;
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
xge_list_init(&channel->item);
device->virtual_paths[attr->vp_id].umq_channelh = channel;
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_list_init(&channel->item);
device->virtual_paths[attr->vp_id].dmq_channelh = channel;
break;
#else
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
channel->type == XGE_HAL_CHANNEL_TYPE_RING);
break;
#endif
default:
break;
}
channel->is_open = 1;
/*
* The magic check the argument validity, has to be
* removed before 03/01/2005.
*/
channel->magic = XGE_HAL_MAGIC;
*channelh = channel;
return XGE_HAL_OK;
}
/**
* xge_hal_channel_abort - Abort the channel.
* @channelh: Channel handle.
* @reopen: See xge_hal_channel_reopen_e{}.
*
* Terminate (via xge_hal_channel_dtr_term_f{}) all channel descriptors.
* Currently used internally only by HAL, as part of its
* xge_hal_channel_close() and xge_hal_channel_open() in case
* of fatal error.
*
* See also: xge_hal_channel_dtr_term_f{}.
*/
void xge_hal_channel_abort(xge_hal_channel_h channelh,
xge_hal_channel_reopen_e reopen)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_dtr_h dtr;
#ifdef XGE_OS_MEMORY_CHECK
int check_cnt = 0;
#endif
int free_length_sav;
int reserve_top_sav;
if (channel->dtr_term == NULL) {
return;
}
free_length_sav = channel->free_length;
while (__hal_channel_dtr_next_freelist(channelh, &dtr) == XGE_HAL_OK) {
#ifdef XGE_OS_MEMORY_CHECK
#ifdef XGE_DEBUG_ASSERT
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
} else {
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated);
}
}
#endif
check_cnt++;
#endif
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_FREED,
channel->userdata, reopen);
}
channel->free_length = free_length_sav;
while (__hal_channel_dtr_next_not_completed(channelh, &dtr) ==
XGE_HAL_OK) {
#ifdef XGE_OS_MEMORY_CHECK
#ifdef XGE_DEBUG_ASSERT
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(__hal_fifo_txdl_priv(dtr)->allocated);
} else {
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)
->allocated);
}
}
#endif
check_cnt++;
#endif
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_POSTED,
channel->userdata, reopen);
}
reserve_top_sav = channel->reserve_top;
while (__hal_channel_dtr_next_reservelist(channelh, &dtr) ==
XGE_HAL_OK) {
#ifdef XGE_OS_MEMORY_CHECK
#ifdef XGE_DEBUG_ASSERT
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
} else {
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated);
}
}
#endif
check_cnt++;
#endif
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_AVAIL,
channel->userdata, reopen);
}
channel->reserve_top = reserve_top_sav;
xge_assert(channel->reserve_length ==
(channel->free_length + channel->reserve_top));
#ifdef XGE_OS_MEMORY_CHECK
xge_assert(check_cnt == channel->reserve_initial);
#endif
}
/**
* xge_hal_channel_close - Close communication channel.
* @channelh: The channel handle.
* @reopen: See xge_hal_channel_reopen_e{}.
*
* Will close previously opened channel and deallocate associated resources.
* Channel must be opened otherwise assert will be generated.
* Note that free_channels list is not protected. i.e. caller must provide
* safe context.
*/
void xge_hal_channel_close(xge_hal_channel_h channelh,
xge_hal_channel_reopen_e reopen)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_device_t *hldev;
xge_list_t *item;
#ifdef XGEHAL_RNIC
u32 vp_id;
#endif
xge_assert(channel);
xge_assert(channel->type < XGE_HAL_CHANNEL_TYPE_MAX);
hldev = (xge_hal_device_t *)channel->devh;
channel->is_open = 0;
channel->magic = XGE_HAL_DEAD;
#ifdef XGEHAL_RNIC
vp_id = channel->vp_id;
if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING)) {
#endif
/* sanity check: make sure channel is not in free list */
xge_list_for_each(item, &hldev->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t, item);
xge_assert(!tmp->is_open);
if (channel == tmp) {
return;
}
}
#ifdef XGEHAL_RNIC
}
#endif
xge_hal_channel_abort(channel, reopen);
#ifndef XGEHAL_RNIC
xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING));
#endif
if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) {
/* de-allocate */
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
__hal_fifo_close(channelh);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
__hal_ring_close(channelh);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
__hal_sq_close(channelh);
hldev->virtual_paths[vp_id].stats.no_sqs--;
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
__hal_srq_close(channelh);
hldev->virtual_paths[vp_id].stats.no_srqs--;
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
__hal_cqrq_close(channelh);
hldev->virtual_paths[vp_id].stats.no_cqrqs--;
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
__hal_umq_close(channelh);
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
__hal_dmq_close(channelh);
break;
#else
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
channel->type == XGE_HAL_CHANNEL_TYPE_RING);
break;
#endif
default:
break;
}
}
else
xge_assert(reopen == XGE_HAL_CHANNEL_RESET_ONLY);
/* move channel back to free state list */
xge_list_remove(&channel->item);
#ifdef XGEHAL_RNIC
if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING)) {
#endif
xge_list_insert(&channel->item, &hldev->free_channels);
if (xge_list_is_empty(&hldev->fifo_channels) &&
xge_list_is_empty(&hldev->ring_channels)) {
/* clear msix_idx in case of following HW reset */
hldev->reset_needed_after_close = 1;
}
#ifdef XGEHAL_RNIC
}
else {
__hal_channel_free(channel);
}
#endif
}

View File

@ -0,0 +1,761 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-config.c
*
* Description: configuration functionality
*
* Created: 14 May 2004
*/
#include <dev/nxge/include/xgehal-config.h>
#include <dev/nxge/include/xge-debug.h>
/*
* __hal_tti_config_check - Check tti configuration
* @new_config: tti configuration information
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*/
static xge_hal_status_e
__hal_tti_config_check (xge_hal_tti_config_t *new_config)
{
if ((new_config->urange_a < XGE_HAL_MIN_TX_URANGE_A) ||
(new_config->urange_a > XGE_HAL_MAX_TX_URANGE_A)) {
return XGE_HAL_BADCFG_TX_URANGE_A;
}
if ((new_config->ufc_a < XGE_HAL_MIN_TX_UFC_A) ||
(new_config->ufc_a > XGE_HAL_MAX_TX_UFC_A)) {
return XGE_HAL_BADCFG_TX_UFC_A;
}
if ((new_config->urange_b < XGE_HAL_MIN_TX_URANGE_B) ||
(new_config->urange_b > XGE_HAL_MAX_TX_URANGE_B)) {
return XGE_HAL_BADCFG_TX_URANGE_B;
}
if ((new_config->ufc_b < XGE_HAL_MIN_TX_UFC_B) ||
(new_config->ufc_b > XGE_HAL_MAX_TX_UFC_B)) {
return XGE_HAL_BADCFG_TX_UFC_B;
}
if ((new_config->urange_c < XGE_HAL_MIN_TX_URANGE_C) ||
(new_config->urange_c > XGE_HAL_MAX_TX_URANGE_C)) {
return XGE_HAL_BADCFG_TX_URANGE_C;
}
if ((new_config->ufc_c < XGE_HAL_MIN_TX_UFC_C) ||
(new_config->ufc_c > XGE_HAL_MAX_TX_UFC_C)) {
return XGE_HAL_BADCFG_TX_UFC_C;
}
if ((new_config->ufc_d < XGE_HAL_MIN_TX_UFC_D) ||
(new_config->ufc_d > XGE_HAL_MAX_TX_UFC_D)) {
return XGE_HAL_BADCFG_TX_UFC_D;
}
if ((new_config->timer_val_us < XGE_HAL_MIN_TX_TIMER_VAL) ||
(new_config->timer_val_us > XGE_HAL_MAX_TX_TIMER_VAL)) {
return XGE_HAL_BADCFG_TX_TIMER_VAL;
}
if ((new_config->timer_ci_en < XGE_HAL_MIN_TX_TIMER_CI_EN) ||
(new_config->timer_ci_en > XGE_HAL_MAX_TX_TIMER_CI_EN)) {
return XGE_HAL_BADCFG_TX_TIMER_CI_EN;
}
if ((new_config->timer_ac_en < XGE_HAL_MIN_TX_TIMER_AC_EN) ||
(new_config->timer_ac_en > XGE_HAL_MAX_TX_TIMER_AC_EN)) {
return XGE_HAL_BADCFG_TX_TIMER_AC_EN;
}
return XGE_HAL_OK;
}
/*
* __hal_rti_config_check - Check rti configuration
* @new_config: rti configuration information
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*/
static xge_hal_status_e
__hal_rti_config_check (xge_hal_rti_config_t *new_config)
{
if ((new_config->urange_a < XGE_HAL_MIN_RX_URANGE_A) ||
(new_config->urange_a > XGE_HAL_MAX_RX_URANGE_A)) {
return XGE_HAL_BADCFG_RX_URANGE_A;
}
if ((new_config->ufc_a < XGE_HAL_MIN_RX_UFC_A) ||
(new_config->ufc_a > XGE_HAL_MAX_RX_UFC_A)) {
return XGE_HAL_BADCFG_RX_UFC_A;
}
if ((new_config->urange_b < XGE_HAL_MIN_RX_URANGE_B) ||
(new_config->urange_b > XGE_HAL_MAX_RX_URANGE_B)) {
return XGE_HAL_BADCFG_RX_URANGE_B;
}
if ((new_config->ufc_b < XGE_HAL_MIN_RX_UFC_B) ||
(new_config->ufc_b > XGE_HAL_MAX_RX_UFC_B)) {
return XGE_HAL_BADCFG_RX_UFC_B;
}
if ((new_config->urange_c < XGE_HAL_MIN_RX_URANGE_C) ||
(new_config->urange_c > XGE_HAL_MAX_RX_URANGE_C)) {
return XGE_HAL_BADCFG_RX_URANGE_C;
}
if ((new_config->ufc_c < XGE_HAL_MIN_RX_UFC_C) ||
(new_config->ufc_c > XGE_HAL_MAX_RX_UFC_C)) {
return XGE_HAL_BADCFG_RX_UFC_C;
}
if ((new_config->ufc_d < XGE_HAL_MIN_RX_UFC_D) ||
(new_config->ufc_d > XGE_HAL_MAX_RX_UFC_D)) {
return XGE_HAL_BADCFG_RX_UFC_D;
}
if ((new_config->timer_val_us < XGE_HAL_MIN_RX_TIMER_VAL) ||
(new_config->timer_val_us > XGE_HAL_MAX_RX_TIMER_VAL)) {
return XGE_HAL_BADCFG_RX_TIMER_VAL;
}
if ((new_config->timer_ac_en < XGE_HAL_MIN_RX_TIMER_AC_EN) ||
(new_config->timer_ac_en > XGE_HAL_MAX_RX_TIMER_AC_EN)) {
return XGE_HAL_BADCFG_RX_TIMER_AC_EN;
}
return XGE_HAL_OK;
}
/*
* __hal_fifo_queue_check - Check fifo queue configuration
* @new_config: fifo queue configuration information
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*/
static xge_hal_status_e
__hal_fifo_queue_check (xge_hal_fifo_config_t *new_config,
xge_hal_fifo_queue_t *new_queue)
{
int i;
if ((new_queue->initial < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
(new_queue->initial > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH;
}
/* FIXME: queue "grow" feature is not supported.
* Use "initial" queue size as the "maximum";
* Remove the next line when fixed. */
new_queue->max = new_queue->initial;
if ((new_queue->max < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
(new_queue->max > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH;
}
if (new_queue->max < new_config->reserve_threshold) {
return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD;
}
if ((new_queue->intr < XGE_HAL_MIN_FIFO_QUEUE_INTR) ||
(new_queue->intr > XGE_HAL_MAX_FIFO_QUEUE_INTR)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INTR;
}
if ((new_queue->intr_vector < XGE_HAL_MIN_FIFO_QUEUE_INTR_VECTOR) ||
(new_queue->intr_vector > XGE_HAL_MAX_FIFO_QUEUE_INTR_VECTOR)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR;
}
for(i = 0; i < XGE_HAL_MAX_FIFO_TTI_NUM; i++) {
/*
* Validate the tti configuration parameters only if
* the TTI feature is enabled.
*/
if (new_queue->tti[i].enabled) {
xge_hal_status_e status;
if ((status = __hal_tti_config_check(
&new_queue->tti[i])) != XGE_HAL_OK) {
return status;
}
}
}
return XGE_HAL_OK;
}
/*
* __hal_ring_queue_check - Check ring queue configuration
* @new_config: ring queue configuration information
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*/
static xge_hal_status_e
__hal_ring_queue_check (xge_hal_ring_queue_t *new_config)
{
if ((new_config->initial < XGE_HAL_MIN_RING_QUEUE_BLOCKS) ||
(new_config->initial > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) {
return XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS;
}
/* FIXME: queue "grow" feature is not supported.
* Use "initial" queue size as the "maximum";
* Remove the next line when fixed. */
new_config->max = new_config->initial;
if ((new_config->max < XGE_HAL_MIN_RING_QUEUE_BLOCKS) ||
(new_config->max > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) {
return XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS;
}
if ((new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
(new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_3) &&
(new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)) {
return XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE;
}
/*
* Herc has less DRAM; the check is done later inside
* device_initialize()
*/
if (((new_config->dram_size_mb < XGE_HAL_MIN_RING_QUEUE_SIZE) ||
(new_config->dram_size_mb > XGE_HAL_MAX_RING_QUEUE_SIZE_XENA)) &&
new_config->dram_size_mb != XGE_HAL_DEFAULT_USE_HARDCODE)
return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
if ((new_config->backoff_interval_us <
XGE_HAL_MIN_BACKOFF_INTERVAL_US) ||
(new_config->backoff_interval_us >
XGE_HAL_MAX_BACKOFF_INTERVAL_US)) {
return XGE_HAL_BADCFG_BACKOFF_INTERVAL_US;
}
if ((new_config->max_frm_len < XGE_HAL_MIN_MAX_FRM_LEN) ||
(new_config->max_frm_len > XGE_HAL_MAX_MAX_FRM_LEN)) {
return XGE_HAL_BADCFG_MAX_FRM_LEN;
}
if ((new_config->priority < XGE_HAL_MIN_RING_PRIORITY) ||
(new_config->priority > XGE_HAL_MAX_RING_PRIORITY)) {
return XGE_HAL_BADCFG_RING_PRIORITY;
}
if ((new_config->rth_en < XGE_HAL_MIN_RING_RTH_EN) ||
(new_config->rth_en > XGE_HAL_MAX_RING_RTH_EN)) {
return XGE_HAL_BADCFG_RING_RTH_EN;
}
if ((new_config->rts_mac_en < XGE_HAL_MIN_RING_RTS_MAC_EN) ||
(new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_MAC_EN)) {
return XGE_HAL_BADCFG_RING_RTS_MAC_EN;
}
if ((new_config->rts_mac_en < XGE_HAL_MIN_RING_RTS_PORT_EN) ||
(new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) {
return XGE_HAL_BADCFG_RING_RTS_PORT_EN;
}
if ((new_config->intr_vector < XGE_HAL_MIN_RING_QUEUE_INTR_VECTOR) ||
(new_config->intr_vector > XGE_HAL_MAX_RING_QUEUE_INTR_VECTOR)) {
return XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR;
}
if (new_config->indicate_max_pkts <
XGE_HAL_MIN_RING_INDICATE_MAX_PKTS ||
new_config->indicate_max_pkts >
XGE_HAL_MAX_RING_INDICATE_MAX_PKTS) {
return XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS;
}
return __hal_rti_config_check(&new_config->rti);
}
/*
* __hal_mac_config_check - Check mac configuration
* @new_config: mac configuration information
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*/
static xge_hal_status_e
__hal_mac_config_check (xge_hal_mac_config_t *new_config)
{
if ((new_config->tmac_util_period < XGE_HAL_MIN_TMAC_UTIL_PERIOD) ||
(new_config->tmac_util_period > XGE_HAL_MAX_TMAC_UTIL_PERIOD)) {
return XGE_HAL_BADCFG_TMAC_UTIL_PERIOD;
}
if ((new_config->rmac_util_period < XGE_HAL_MIN_RMAC_UTIL_PERIOD) ||
(new_config->rmac_util_period > XGE_HAL_MAX_RMAC_UTIL_PERIOD)) {
return XGE_HAL_BADCFG_RMAC_UTIL_PERIOD;
}
if ((new_config->rmac_bcast_en < XGE_HAL_MIN_RMAC_BCAST_EN) ||
(new_config->rmac_bcast_en > XGE_HAL_MAX_RMAC_BCAST_EN)) {
return XGE_HAL_BADCFG_RMAC_BCAST_EN;
}
if ((new_config->rmac_pause_gen_en < XGE_HAL_MIN_RMAC_PAUSE_GEN_EN) ||
(new_config->rmac_pause_gen_en>XGE_HAL_MAX_RMAC_PAUSE_GEN_EN)) {
return XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN;
}
if ((new_config->rmac_pause_rcv_en < XGE_HAL_MIN_RMAC_PAUSE_RCV_EN) ||
(new_config->rmac_pause_rcv_en>XGE_HAL_MAX_RMAC_PAUSE_RCV_EN)) {
return XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN;
}
if ((new_config->rmac_pause_time < XGE_HAL_MIN_RMAC_HIGH_PTIME) ||
(new_config->rmac_pause_time > XGE_HAL_MAX_RMAC_HIGH_PTIME)) {
return XGE_HAL_BADCFG_RMAC_HIGH_PTIME;
}
if ((new_config->media < XGE_HAL_MIN_MEDIA) ||
(new_config->media > XGE_HAL_MAX_MEDIA)) {
return XGE_HAL_BADCFG_MEDIA;
}
if ((new_config->mc_pause_threshold_q0q3 <
XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q0Q3) ||
(new_config->mc_pause_threshold_q0q3 >
XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q0Q3)) {
return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3;
}
if ((new_config->mc_pause_threshold_q4q7 <
XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q4Q7) ||
(new_config->mc_pause_threshold_q4q7 >
XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q4Q7)) {
return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7;
}
return XGE_HAL_OK;
}
/*
* __hal_fifo_config_check - Check fifo configuration
* @new_config: fifo configuration information
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*/
static xge_hal_status_e
__hal_fifo_config_check (xge_hal_fifo_config_t *new_config)
{
int i;
int total_fifo_length = 0;
/*
* recompute max_frags to be multiple of 4,
* which means, multiple of 128 for TxDL
*/
new_config->max_frags = ((new_config->max_frags + 3) >> 2) << 2;
if ((new_config->max_frags < XGE_HAL_MIN_FIFO_FRAGS) ||
(new_config->max_frags > XGE_HAL_MAX_FIFO_FRAGS)) {
return XGE_HAL_BADCFG_FIFO_FRAGS;
}
if ((new_config->reserve_threshold <
XGE_HAL_MIN_FIFO_RESERVE_THRESHOLD) ||
(new_config->reserve_threshold >
XGE_HAL_MAX_FIFO_RESERVE_THRESHOLD)) {
return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD;
}
if ((new_config->memblock_size < XGE_HAL_MIN_FIFO_MEMBLOCK_SIZE) ||
(new_config->memblock_size > XGE_HAL_MAX_FIFO_MEMBLOCK_SIZE)) {
return XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE;
}
for(i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
xge_hal_status_e status;
if (!new_config->queue[i].configured)
continue;
if ((status = __hal_fifo_queue_check(new_config,
&new_config->queue[i])) != XGE_HAL_OK) {
return status;
}
total_fifo_length += new_config->queue[i].max;
}
if(total_fifo_length > XGE_HAL_MAX_FIFO_QUEUE_LENGTH){
return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH;
}
return XGE_HAL_OK;
}
/*
* __hal_ring_config_check - Check ring configuration
* @new_config: Ring configuration information
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*/
static xge_hal_status_e
__hal_ring_config_check (xge_hal_ring_config_t *new_config)
{
int i;
if ((new_config->memblock_size < XGE_HAL_MIN_RING_MEMBLOCK_SIZE) ||
(new_config->memblock_size > XGE_HAL_MAX_RING_MEMBLOCK_SIZE)) {
return XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE;
}
for(i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
xge_hal_status_e status;
if (!new_config->queue[i].configured)
continue;
if ((status = __hal_ring_queue_check(&new_config->queue[i]))
!= XGE_HAL_OK) {
return status;
}
}
return XGE_HAL_OK;
}
/*
* __hal_device_config_check_common - Check device configuration.
* @new_config: Device configuration information
*
* Check part of configuration that is common to
* Xframe-I and Xframe-II.
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*
* See also: __hal_device_config_check_xena().
*/
xge_hal_status_e
__hal_device_config_check_common (xge_hal_device_config_t *new_config)
{
xge_hal_status_e status;
if ((new_config->mtu < XGE_HAL_MIN_MTU) ||
(new_config->mtu > XGE_HAL_MAX_MTU)) {
return XGE_HAL_BADCFG_MAX_MTU;
}
if ((new_config->bimodal_interrupts < XGE_HAL_BIMODAL_INTR_MIN) ||
(new_config->bimodal_interrupts > XGE_HAL_BIMODAL_INTR_MAX)) {
return XGE_HAL_BADCFG_BIMODAL_INTR;
}
if (new_config->bimodal_interrupts &&
((new_config->bimodal_timer_lo_us < XGE_HAL_BIMODAL_TIMER_LO_US_MIN) ||
(new_config->bimodal_timer_lo_us > XGE_HAL_BIMODAL_TIMER_LO_US_MAX))) {
return XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US;
}
if (new_config->bimodal_interrupts &&
((new_config->bimodal_timer_hi_us < XGE_HAL_BIMODAL_TIMER_HI_US_MIN) ||
(new_config->bimodal_timer_hi_us > XGE_HAL_BIMODAL_TIMER_HI_US_MAX))) {
return XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US;
}
if ((new_config->no_isr_events < XGE_HAL_NO_ISR_EVENTS_MIN) ||
(new_config->no_isr_events > XGE_HAL_NO_ISR_EVENTS_MAX)) {
return XGE_HAL_BADCFG_NO_ISR_EVENTS;
}
if ((new_config->isr_polling_cnt < XGE_HAL_MIN_ISR_POLLING_CNT) ||
(new_config->isr_polling_cnt > XGE_HAL_MAX_ISR_POLLING_CNT)) {
return XGE_HAL_BADCFG_ISR_POLLING_CNT;
}
if (new_config->latency_timer &&
new_config->latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) {
if ((new_config->latency_timer < XGE_HAL_MIN_LATENCY_TIMER) ||
(new_config->latency_timer > XGE_HAL_MAX_LATENCY_TIMER)) {
return XGE_HAL_BADCFG_LATENCY_TIMER;
}
}
if (new_config->max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) {
if ((new_config->max_splits_trans <
XGE_HAL_ONE_SPLIT_TRANSACTION) ||
(new_config->max_splits_trans >
XGE_HAL_THIRTYTWO_SPLIT_TRANSACTION))
return XGE_HAL_BADCFG_MAX_SPLITS_TRANS;
}
if (new_config->mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT)
{
if ((new_config->mmrb_count < XGE_HAL_MIN_MMRB_COUNT) ||
(new_config->mmrb_count > XGE_HAL_MAX_MMRB_COUNT)) {
return XGE_HAL_BADCFG_MMRB_COUNT;
}
}
if ((new_config->shared_splits < XGE_HAL_MIN_SHARED_SPLITS) ||
(new_config->shared_splits > XGE_HAL_MAX_SHARED_SPLITS)) {
return XGE_HAL_BADCFG_SHARED_SPLITS;
}
if (new_config->stats_refresh_time_sec !=
XGE_HAL_STATS_REFRESH_DISABLE) {
if ((new_config->stats_refresh_time_sec <
XGE_HAL_MIN_STATS_REFRESH_TIME) ||
(new_config->stats_refresh_time_sec >
XGE_HAL_MAX_STATS_REFRESH_TIME)) {
return XGE_HAL_BADCFG_STATS_REFRESH_TIME;
}
}
if ((new_config->intr_mode != XGE_HAL_INTR_MODE_IRQLINE) &&
(new_config->intr_mode != XGE_HAL_INTR_MODE_MSI) &&
(new_config->intr_mode != XGE_HAL_INTR_MODE_MSIX)) {
return XGE_HAL_BADCFG_INTR_MODE;
}
if ((new_config->sched_timer_us < XGE_HAL_SCHED_TIMER_MIN) ||
(new_config->sched_timer_us > XGE_HAL_SCHED_TIMER_MAX)) {
return XGE_HAL_BADCFG_SCHED_TIMER_US;
}
if ((new_config->sched_timer_one_shot !=
XGE_HAL_SCHED_TIMER_ON_SHOT_DISABLE) &&
(new_config->sched_timer_one_shot !=
XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE)) {
return XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT;
}
/*
* Check adaptive schema parameters. Note that there are two
* configuration variables needs to be enabled in ULD:
*
* a) sched_timer_us should not be zero;
* b) rxufca_hi_lim should not be equal to rxufca_lo_lim.
*
* The code bellow checking for those conditions.
*/
if (new_config->sched_timer_us &&
new_config->rxufca_hi_lim != new_config->rxufca_lo_lim) {
if ((new_config->rxufca_intr_thres <
XGE_HAL_RXUFCA_INTR_THRES_MIN) ||
(new_config->rxufca_intr_thres >
XGE_HAL_RXUFCA_INTR_THRES_MAX)) {
return XGE_HAL_BADCFG_RXUFCA_INTR_THRES;
}
if ((new_config->rxufca_hi_lim < XGE_HAL_RXUFCA_HI_LIM_MIN) ||
(new_config->rxufca_hi_lim > XGE_HAL_RXUFCA_HI_LIM_MAX)) {
return XGE_HAL_BADCFG_RXUFCA_HI_LIM;
}
if ((new_config->rxufca_lo_lim < XGE_HAL_RXUFCA_LO_LIM_MIN) ||
(new_config->rxufca_lo_lim > XGE_HAL_RXUFCA_LO_LIM_MAX) ||
(new_config->rxufca_lo_lim > new_config->rxufca_hi_lim)) {
return XGE_HAL_BADCFG_RXUFCA_LO_LIM;
}
if ((new_config->rxufca_lbolt_period <
XGE_HAL_RXUFCA_LBOLT_PERIOD_MIN) ||
(new_config->rxufca_lbolt_period >
XGE_HAL_RXUFCA_LBOLT_PERIOD_MAX)) {
return XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD;
}
}
if ((new_config->link_valid_cnt < XGE_HAL_LINK_VALID_CNT_MIN) ||
(new_config->link_valid_cnt > XGE_HAL_LINK_VALID_CNT_MAX)) {
return XGE_HAL_BADCFG_LINK_VALID_CNT;
}
if ((new_config->link_retry_cnt < XGE_HAL_LINK_RETRY_CNT_MIN) ||
(new_config->link_retry_cnt > XGE_HAL_LINK_RETRY_CNT_MAX)) {
return XGE_HAL_BADCFG_LINK_RETRY_CNT;
}
if (new_config->link_valid_cnt > new_config->link_retry_cnt)
return XGE_HAL_BADCFG_LINK_VALID_CNT;
if (new_config->link_stability_period != XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->link_stability_period <
XGE_HAL_MIN_LINK_STABILITY_PERIOD) ||
(new_config->link_stability_period >
XGE_HAL_MAX_LINK_STABILITY_PERIOD)) {
return XGE_HAL_BADCFG_LINK_STABILITY_PERIOD;
}
}
if (new_config->device_poll_millis !=
XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->device_poll_millis <
XGE_HAL_MIN_DEVICE_POLL_MILLIS) ||
(new_config->device_poll_millis >
XGE_HAL_MAX_DEVICE_POLL_MILLIS)) {
return XGE_HAL_BADCFG_DEVICE_POLL_MILLIS;
}
}
if ((new_config->rts_port_en < XGE_HAL_MIN_RING_RTS_PORT_EN) ||
(new_config->rts_port_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) {
return XGE_HAL_BADCFG_RTS_PORT_EN;
}
if ((new_config->rts_qos_en < XGE_HAL_RTS_QOS_DISABLE) ||
(new_config->rts_qos_en > XGE_HAL_RTS_QOS_ENABLE)) {
return XGE_HAL_BADCFG_RTS_QOS_EN;
}
#if defined(XGE_HAL_CONFIG_LRO)
if (new_config->lro_sg_size !=
XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->lro_sg_size < XGE_HAL_LRO_MIN_SG_SIZE) ||
(new_config->lro_sg_size > XGE_HAL_LRO_MAX_SG_SIZE)) {
return XGE_HAL_BADCFG_LRO_SG_SIZE;
}
}
if (new_config->lro_frm_len !=
XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->lro_frm_len < XGE_HAL_LRO_MIN_FRM_LEN) ||
(new_config->lro_frm_len > XGE_HAL_LRO_MAX_FRM_LEN)) {
return XGE_HAL_BADCFG_LRO_FRM_LEN;
}
}
#endif
if ((status = __hal_ring_config_check(&new_config->ring))
!= XGE_HAL_OK) {
return status;
}
if ((status = __hal_mac_config_check(&new_config->mac)) !=
XGE_HAL_OK) {
return status;
}
if ((status = __hal_fifo_config_check(&new_config->fifo)) !=
XGE_HAL_OK) {
return status;
}
return XGE_HAL_OK;
}
/*
* __hal_device_config_check_xena - Check Xframe-I configuration
* @new_config: Device configuration.
*
* Check part of configuration that is relevant only to Xframe-I.
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*
* See also: __hal_device_config_check_common().
*/
xge_hal_status_e
__hal_device_config_check_xena (xge_hal_device_config_t *new_config)
{
if ((new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_33) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_66) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_100) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_133) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_266) &&
(new_config->pci_freq_mherz != XGE_HAL_DEFAULT_USE_HARDCODE)) {
return XGE_HAL_BADCFG_PCI_FREQ_MHERZ;
}
return XGE_HAL_OK;
}
/*
* __hal_device_config_check_herc - Check device configuration
* @new_config: Device configuration.
*
* Check part of configuration that is relevant only to Xframe-II.
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*
* See also: __hal_device_config_check_common().
*/
xge_hal_status_e
__hal_device_config_check_herc (xge_hal_device_config_t *new_config)
{
return XGE_HAL_OK;
}
/*
* __hal_driver_config_check - Check HAL configuration
* @new_config: Driver configuration information
*
* Returns: XGE_HAL_OK - success,
* otherwise one of the xge_hal_status_e{} enumerated error codes.
*/
xge_hal_status_e
__hal_driver_config_check (xge_hal_driver_config_t *new_config)
{
if ((new_config->queue_size_initial <
XGE_HAL_MIN_QUEUE_SIZE_INITIAL) ||
(new_config->queue_size_initial >
XGE_HAL_MAX_QUEUE_SIZE_INITIAL)) {
return XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL;
}
if ((new_config->queue_size_max < XGE_HAL_MIN_QUEUE_SIZE_MAX) ||
(new_config->queue_size_max > XGE_HAL_MAX_QUEUE_SIZE_MAX)) {
return XGE_HAL_BADCFG_QUEUE_SIZE_MAX;
}
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
if ((new_config->tracebuf_size < XGE_HAL_MIN_CIRCULAR_ARR) ||
(new_config->tracebuf_size > XGE_HAL_MAX_CIRCULAR_ARR)) {
return XGE_HAL_BADCFG_TRACEBUF_SIZE;
}
if ((new_config->tracebuf_timestamp_en < XGE_HAL_MIN_TIMESTAMP_EN) ||
(new_config->tracebuf_timestamp_en > XGE_HAL_MAX_TIMESTAMP_EN)) {
return XGE_HAL_BADCFG_TRACEBUF_SIZE;
}
#endif
return XGE_HAL_OK;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,300 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-driver.c
*
* Description: HAL driver object functionality
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xgehal-driver.h>
#include <dev/nxge/include/xgehal-device.h>
static xge_hal_driver_t g_driver;
xge_hal_driver_t *g_xge_hal_driver = NULL;
char *g_xge_hal_log = NULL;
#ifdef XGE_OS_MEMORY_CHECK
xge_os_malloc_t g_malloc_arr[XGE_OS_MALLOC_CNT_MAX];
int g_malloc_cnt = 0;
#endif
/*
* Runtime tracing support
*/
static unsigned long g_module_mask_default = 0;
unsigned long *g_module_mask = &g_module_mask_default;
static int g_level_default = 0;
int *g_level = &g_level_default;
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
static xge_os_tracebuf_t g_tracebuf;
char *dmesg, *dmesg_start;
/**
* xge_hal_driver_tracebuf_dump - Dump the trace buffer.
*
* Dump the trace buffer contents.
*/
void
xge_hal_driver_tracebuf_dump(void)
{
int i;
int off = 0;
if (g_xge_os_tracebuf == NULL) {
return;
}
xge_os_printf("################ Trace dump Begin ###############");
if (g_xge_os_tracebuf->wrapped_once) {
for (i = 0; i < g_xge_os_tracebuf->size -
g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg_start + i))
xge_os_printf(dmesg_start + i);
off = xge_os_strlen(dmesg_start + i) + 1;
}
}
for (i = 0; i < g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg + i))
xge_os_printf(dmesg + i);
off = xge_os_strlen(dmesg + i) + 1;
}
xge_os_printf("################ Trace dump End ###############");
}
xge_hal_status_e
xge_hal_driver_tracebuf_read(int bufsize, char *retbuf, int *retsize)
{
int i;
int off = 0, retbuf_off = 0;
*retsize = 0;
*retbuf = 0;
if (g_xge_os_tracebuf == NULL) {
return XGE_HAL_FAIL;
}
if (g_xge_os_tracebuf->wrapped_once) {
for (i = 0; i < g_xge_os_tracebuf->size -
g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg_start + i)) {
xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg_start + i);
retbuf_off += xge_os_strlen(dmesg_start + i) + 1;
if (retbuf_off > bufsize)
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
off = xge_os_strlen(dmesg_start + i) + 1;
}
}
for (i = 0; i < g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg + i)) {
xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg + i);
retbuf_off += xge_os_strlen(dmesg + i) + 1;
if (retbuf_off > bufsize)
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
off = xge_os_strlen(dmesg + i) + 1;
}
*retsize = retbuf_off;
*(retbuf + retbuf_off + 1) = 0;
return XGE_HAL_OK;
}
#endif
xge_os_tracebuf_t *g_xge_os_tracebuf = NULL;
#ifdef XGE_HAL_DEBUG_BAR0_OFFSET
void
xge_hal_driver_bar0_offset_check(void)
{
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, adapter_status) ==
0x108);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_traffic_int) ==
0x08E0);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, dtx_control) ==
0x09E8);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_fifo_partition_0) ==
0x1108);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_enable) ==
0x1170);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, prc_rxd0_n[0]) ==
0x1930);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rti_command_mem) ==
0x19B8);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_cfg) ==
0x2100);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rmac_addr_cmd_mem) ==
0x2128);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_link_util) ==
0x2170);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_pause_thresh_q0q3) ==
0x2918);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_err_reg) ==
0x1040);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rxdma_int_status) ==
0x1800);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_tmac_err_reg) ==
0x2010);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_err_reg) ==
0x2810);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, xgxs_int_status) ==
0x3000);
}
#endif
/**
* xge_hal_driver_initialize - Initialize HAL.
* @config: HAL configuration, see xge_hal_driver_config_t{}.
* @uld_callbacks: Upper-layer driver callbacks, e.g. link-up.
*
* HAL initialization entry point. Not to confuse with device initialization
* (note that HAL "contains" zero or more Xframe devices).
*
* Returns: XGE_HAL_OK - success;
* XGE_HAL_ERR_BAD_DRIVER_CONFIG - Driver configuration params invalid.
*
* See also: xge_hal_device_initialize(), xge_hal_status_e{},
* xge_hal_uld_cbs_t{}.
*/
xge_hal_status_e
xge_hal_driver_initialize(xge_hal_driver_config_t *config,
xge_hal_uld_cbs_t *uld_callbacks)
{
xge_hal_status_e status;
g_xge_hal_driver = &g_driver;
xge_hal_driver_debug_module_mask_set(XGE_DEBUG_MODULE_MASK_DEF);
xge_hal_driver_debug_level_set(XGE_DEBUG_LEVEL_DEF);
#ifdef XGE_HAL_DEBUG_BAR0_OFFSET
xge_hal_driver_bar0_offset_check();
#endif
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
if (config->tracebuf_size == 0)
/*
* Trace buffer implementation is not lock protected.
* The only harm to expect is memcpy() to go beyond of
* allowed boundaries. To make it safe (driver-wise),
* we pre-allocate needed number of extra bytes.
*/
config->tracebuf_size = XGE_HAL_DEF_CIRCULAR_ARR +
XGE_OS_TRACE_MSGBUF_MAX;
#endif
status = __hal_driver_config_check(config);
if (status != XGE_HAL_OK)
return status;
xge_os_memzero(g_xge_hal_driver, sizeof(xge_hal_driver_t));
/* apply config */
xge_os_memcpy(&g_xge_hal_driver->config, config,
sizeof(xge_hal_driver_config_t));
/* apply ULD callbacks */
xge_os_memcpy(&g_xge_hal_driver->uld_callbacks, uld_callbacks,
sizeof(xge_hal_uld_cbs_t));
g_xge_hal_driver->is_initialized = 1;
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
g_tracebuf.size = config->tracebuf_size;
g_tracebuf.data = (char *)xge_os_malloc(NULL, g_tracebuf.size);
if (g_tracebuf.data == NULL) {
xge_os_printf("cannot allocate trace buffer!");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
/* timestamps disabled by default */
g_tracebuf.timestamp = config->tracebuf_timestamp_en;
if (g_tracebuf.timestamp) {
xge_os_timestamp(g_tracebuf.msg);
g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX -
xge_os_strlen(g_tracebuf.msg);
} else
g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX;
g_tracebuf.offset = 0;
*g_tracebuf.msg = 0;
xge_os_memzero(g_tracebuf.data, g_tracebuf.size);
g_xge_os_tracebuf = &g_tracebuf;
dmesg = g_tracebuf.data;
*dmesg = 0;
#endif
return XGE_HAL_OK;
}
/**
* xge_hal_driver_terminate - Terminate HAL.
*
* HAL termination entry point.
*
* See also: xge_hal_device_terminate().
*/
void
xge_hal_driver_terminate(void)
{
g_xge_hal_driver->is_initialized = 0;
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
if (g_tracebuf.size) {
xge_os_free(NULL, g_tracebuf.data, g_tracebuf.size);
}
#endif
g_xge_hal_driver = NULL;
#ifdef XGE_OS_MEMORY_CHECK
{
int i, leaks=0;
xge_os_printf("OSPAL: max g_malloc_cnt %d", g_malloc_cnt);
for (i=0; i<g_malloc_cnt; i++) {
if (g_malloc_arr[i].ptr != NULL) {
xge_os_printf("OSPAL: memory leak detected at "
"%s:%d:"XGE_OS_LLXFMT":%d",
g_malloc_arr[i].file,
g_malloc_arr[i].line,
(unsigned long long)(ulong_t)
g_malloc_arr[i].ptr,
g_malloc_arr[i].size);
leaks++;
}
}
if (leaks) {
xge_os_printf("OSPAL: %d memory leaks detected", leaks);
} else {
xge_os_printf("OSPAL: no memory leaks detected");
}
}
#endif
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,568 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-fifo.c
*
* Description: fifo object implementation
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xgehal-fifo.h>
#include <dev/nxge/include/xgehal-device.h>
static xge_hal_status_e
__hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
void *memblock,
int memblock_index,
xge_hal_mempool_dma_t *dma_object,
void *item,
int index,
int is_last,
void *userdata)
{
int memblock_item_idx;
xge_hal_fifo_txdl_priv_t *txdl_priv;
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)item;
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata;
xge_assert(item);
txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
memblock_index,
item,
&memblock_item_idx);
xge_assert(txdl_priv);
/* pre-format HAL's TxDL's private */
txdl_priv->dma_offset = (char*)item - (char*)memblock;
txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
txdl_priv->dma_handle = dma_object->handle;
txdl_priv->memblock = memblock;
txdl_priv->first_txdp = (xge_hal_fifo_txd_t *)item;
txdl_priv->next_txdl_priv = NULL;
txdl_priv->dang_txdl = NULL;
txdl_priv->dang_frags = 0;
txdl_priv->alloc_frags = 0;
#ifdef XGE_DEBUG_ASSERT
txdl_priv->dma_object = dma_object;
#endif
txdp->host_control = (u64)(ulong_t)txdl_priv;
#ifdef XGE_HAL_ALIGN_XMIT
txdl_priv->align_vaddr = NULL;
txdl_priv->align_dma_addr = (dma_addr_t)0;
#ifndef XGE_HAL_ALIGN_XMIT_ALLOC_RT
{
xge_hal_status_e status;
if (fifo->config->alignment_size) {
status =__hal_fifo_dtr_align_alloc_map(fifo, txdp);
if (status != XGE_HAL_OK) {
xge_debug_mm(XGE_ERR,
"align buffer[%d] %d bytes, status %d",
index,
fifo->align_size,
status);
return status;
}
}
}
#endif
#endif
if (fifo->channel.dtr_init) {
fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index,
fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL);
}
return XGE_HAL_OK;
}
static xge_hal_status_e
__hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh,
void *memblock,
int memblock_index,
xge_hal_mempool_dma_t *dma_object,
void *item,
int index,
int is_last,
void *userdata)
{
int memblock_item_idx;
xge_hal_fifo_txdl_priv_t *txdl_priv;
#ifdef XGE_HAL_ALIGN_XMIT
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)userdata;
#endif
xge_assert(item);
txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
memblock_index,
item,
&memblock_item_idx);
xge_assert(txdl_priv);
#ifdef XGE_HAL_ALIGN_XMIT
if (fifo->config->alignment_size) {
if (txdl_priv->align_dma_addr != 0) {
xge_os_dma_unmap(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdl_priv->align_dma_addr,
fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE);
txdl_priv->align_dma_addr = 0;
}
if (txdl_priv->align_vaddr != NULL) {
xge_os_dma_free(fifo->channel.pdev,
txdl_priv->align_vaddr,
fifo->align_size,
&txdl_priv->align_dma_acch,
&txdl_priv->align_dma_handle);
txdl_priv->align_vaddr = NULL;
}
}
#endif
return XGE_HAL_OK;
}
xge_hal_status_e
__hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
{
xge_hal_device_t *hldev;
xge_hal_status_e status;
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_hal_fifo_queue_t *queue;
int i, txdl_size, max_arr_index, mid_point;
xge_hal_dtr_h dtrh;
hldev = (xge_hal_device_t *)fifo->channel.devh;
fifo->config = &hldev->config.fifo;
queue = &fifo->config->queue[attr->post_qid];
#if defined(XGE_HAL_TX_MULTI_RESERVE)
xge_os_spin_lock_init(&fifo->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
xge_os_spin_lock_init_irq(&fifo->channel.reserve_lock, hldev->irqh);
#endif
#if defined(XGE_HAL_TX_MULTI_POST)
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
fifo->post_lock_ptr = &hldev->xena_post_lock;
} else {
xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev);
fifo->post_lock_ptr = &fifo->channel.post_lock;
}
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
fifo->post_lock_ptr = &hldev->xena_post_lock;
} else {
xge_os_spin_lock_init_irq(&fifo->channel.post_lock,
hldev->irqh);
fifo->post_lock_ptr = &fifo->channel.post_lock;
}
#endif
fifo->align_size =
fifo->config->alignment_size * fifo->config->max_aligned_frags;
/* Initializing the BAR1 address as the start of
* the FIFO queue pointer and as a location of FIFO control
* word. */
fifo->hw_pair =
(xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 +
(attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));
/* apply "interrupts per txdl" attribute */
fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ;
if (queue->intr) {
fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST;
}
fifo->no_snoop_bits =
(int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits));
/*
* FIFO memory management strategy:
*
* TxDL splitted into three independent parts:
* - set of TxD's
* - TxD HAL private part
* - upper layer private part
*
* Adaptative memory allocation used. i.e. Memory allocated on
* demand with the size which will fit into one memory block.
* One memory block may contain more than one TxDL. In simple case
* memory block size can be equal to CPU page size. On more
* sophisticated OS's memory block can be contigious across
* several pages.
*
* During "reserve" operations more memory can be allocated on demand
* for example due to FIFO full condition.
*
* Pool of memory memblocks never shrinks except __hal_fifo_close
* routine which will essentially stop channel and free the resources.
*/
/* TxDL common private size == TxDL private + ULD private */
fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) +
attr->per_dtr_space;
fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) /
__xge_os_cacheline_size) *
__xge_os_cacheline_size;
/* recompute txdl size to be cacheline aligned */
fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t);
txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) /
__xge_os_cacheline_size) * __xge_os_cacheline_size;
if (fifo->txdl_size != txdl_size)
xge_debug_fifo(XGE_ERR, "cacheline > 128 ( ?? ): %d, %d, %d, %d",
fifo->config->max_frags, fifo->txdl_size, txdl_size,
__xge_os_cacheline_size);
fifo->txdl_size = txdl_size;
/* since dtr_init() callback will be called from item_alloc(),
* the same way channels userdata might be used prior to
* channel_initialize() */
fifo->channel.dtr_init = attr->dtr_init;
fifo->channel.userdata = attr->userdata;
fifo->txdl_per_memblock = fifo->config->memblock_size /
fifo->txdl_size;
fifo->mempool = __hal_mempool_create(hldev->pdev,
fifo->config->memblock_size,
fifo->txdl_size,
fifo->priv_size,
queue->initial,
queue->max,
__hal_fifo_mempool_item_alloc,
__hal_fifo_mempool_item_free,
fifo);
if (fifo->mempool == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
status = __hal_channel_initialize(channelh, attr,
(void **) __hal_mempool_items_arr(fifo->mempool),
queue->initial, queue->max,
fifo->config->reserve_threshold);
if (status != XGE_HAL_OK) {
__hal_fifo_close(channelh);
return status;
}
xge_debug_fifo(XGE_TRACE,
"DTR reserve_length:%d reserve_top:%d\n"
"max_frags:%d reserve_threshold:%d\n"
"memblock_size:%d alignment_size:%d max_aligned_frags:%d",
fifo->channel.reserve_length, fifo->channel.reserve_top,
fifo->config->max_frags, fifo->config->reserve_threshold,
fifo->config->memblock_size, fifo->config->alignment_size,
fifo->config->max_aligned_frags);
#ifdef XGE_DEBUG_ASSERT
for ( i = 0; i < fifo->channel.reserve_length; i++) {
xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d"
" handle:%p", i, fifo->channel.reserve_arr[i]);
}
#endif
xge_assert(fifo->channel.reserve_length);
/* reverse the FIFO dtr array */
max_arr_index = fifo->channel.reserve_length - 1;
max_arr_index -=fifo->channel.reserve_top;
xge_assert(max_arr_index);
mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2;
for (i = 0; i < mid_point; i++) {
dtrh = fifo->channel.reserve_arr[i];
fifo->channel.reserve_arr[i] =
fifo->channel.reserve_arr[max_arr_index - i];
fifo->channel.reserve_arr[max_arr_index - i] = dtrh;
}
#ifdef XGE_DEBUG_ASSERT
for ( i = 0; i < fifo->channel.reserve_length; i++) {
xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d"
" handle:%p", i, fifo->channel.reserve_arr[i]);
}
#endif
return XGE_HAL_OK;
}
void
__hal_fifo_close(xge_hal_channel_h channelh)
{
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_hal_device_t *hldev = (xge_hal_device_t *)fifo->channel.devh;
if (fifo->mempool) {
__hal_mempool_destroy(fifo->mempool);
}
__hal_channel_terminate(channelh);
#if defined(XGE_HAL_TX_MULTI_RESERVE)
xge_os_spin_lock_destroy(&fifo->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
xge_os_spin_lock_destroy_irq(&fifo->channel.reserve_lock, hldev->pdev);
#endif
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
#if defined(XGE_HAL_TX_MULTI_POST)
xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev);
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock,
hldev->pdev);
#endif
}
}
void
__hal_fifo_hw_initialize(xge_hal_device_h devh)
{
xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
u64* tx_fifo_partitions[4];
u64* tx_fifo_wrr[5];
u64 tx_fifo_wrr_value[5];
u64 val64, part0;
int i;
/* Tx DMA Initialization */
tx_fifo_partitions[0] = &bar0->tx_fifo_partition_0;
tx_fifo_partitions[1] = &bar0->tx_fifo_partition_1;
tx_fifo_partitions[2] = &bar0->tx_fifo_partition_2;
tx_fifo_partitions[3] = &bar0->tx_fifo_partition_3;
tx_fifo_wrr[0] = &bar0->tx_w_round_robin_0;
tx_fifo_wrr[1] = &bar0->tx_w_round_robin_1;
tx_fifo_wrr[2] = &bar0->tx_w_round_robin_2;
tx_fifo_wrr[3] = &bar0->tx_w_round_robin_3;
tx_fifo_wrr[4] = &bar0->tx_w_round_robin_4;
tx_fifo_wrr_value[0] = XGE_HAL_FIFO_WRR_0;
tx_fifo_wrr_value[1] = XGE_HAL_FIFO_WRR_1;
tx_fifo_wrr_value[2] = XGE_HAL_FIFO_WRR_2;
tx_fifo_wrr_value[3] = XGE_HAL_FIFO_WRR_3;
tx_fifo_wrr_value[4] = XGE_HAL_FIFO_WRR_4;
/* Note: WRR calendar must be configured before the transmit
* FIFOs are enabled! page 6-77 user guide */
if (!hldev->config.rts_qos_en) {
/* all zeroes for Round-Robin */
for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0,
tx_fifo_wrr[i]);
}
/* reset all of them but '0' */
for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
tx_fifo_partitions[i]);
}
} else { /* Change the default settings */
for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
tx_fifo_wrr_value[i], tx_fifo_wrr[i]);
}
}
/* configure only configured FIFOs */
val64 = 0; part0 = 0;
for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
int reg_half = i % 2;
int reg_num = i / 2;
if (hldev->config.fifo.queue[i].configured) {
int priority = hldev->config.fifo.queue[i].priority;
val64 |=
vBIT((hldev->config.fifo.queue[i].max-1),
(((reg_half) * 32) + 19),
13) | vBIT(priority, (((reg_half)*32) + 5), 3);
}
/* NOTE: do write operation for each second u64 half
* or force for first one if configured number
* is even */
if (reg_half) {
if (reg_num == 0) {
/* skip partition '0', must write it once at
* the end */
part0 = val64;
} else {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
val64, tx_fifo_partitions[reg_num]);
xge_debug_fifo(XGE_TRACE,
"fifo partition_%d at: "
"0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
reg_num, (unsigned long long)(ulong_t)
tx_fifo_partitions[reg_num],
(unsigned long long)val64);
}
val64 = 0;
}
}
part0 |= BIT(0); /* to enable the FIFO partition. */
__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)part0,
tx_fifo_partitions[0]);
xge_os_wmb();
__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32),
tx_fifo_partitions[0]);
xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: "
"0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
(unsigned long long)(ulong_t)
tx_fifo_partitions[0],
(unsigned long long) part0);
/*
* Initialization of Tx_PA_CONFIG register to ignore packet
* integrity checking.
*/
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->tx_pa_cfg);
val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR |
XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI |
XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL |
XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->tx_pa_cfg);
/*
* Assign MSI-X vectors
*/
for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
xge_list_t *item;
xge_hal_channel_t *channel = NULL;
if (!hldev->config.fifo.queue[i].configured ||
!hldev->config.fifo.queue[i].intr_vector ||
!hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
continue;
/* find channel */
xge_list_for_each(item, &hldev->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t,
item);
if (tmp->type == XGE_HAL_CHANNEL_TYPE_FIFO &&
tmp->post_qid == i) {
channel = tmp;
break;
}
}
if (channel) {
xge_hal_channel_msix_set(channel,
hldev->config.fifo.queue[i].intr_vector);
}
}
xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized");
}
#ifdef XGE_HAL_ALIGN_XMIT
void
__hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_fifo_txdl_priv_t *txdl_priv;
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
txdl_priv = __hal_fifo_txdl_priv(txdp);
if (txdl_priv->align_dma_addr != 0) {
xge_os_dma_unmap(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdl_priv->align_dma_addr,
fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE);
txdl_priv->align_dma_addr = 0;
}
if (txdl_priv->align_vaddr != NULL) {
xge_os_dma_free(fifo->channel.pdev,
txdl_priv->align_vaddr,
fifo->align_size,
&txdl_priv->align_dma_acch,
&txdl_priv->align_dma_handle);
txdl_priv->align_vaddr = NULL;
}
}
xge_hal_status_e
__hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_fifo_txdl_priv_t *txdl_priv;
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_assert(txdp);
txdl_priv = __hal_fifo_txdl_priv(txdp);
/* allocate alignment DMA-buffer */
txdl_priv->align_vaddr = xge_os_dma_malloc(fifo->channel.pdev,
fifo->align_size,
XGE_OS_DMA_CACHELINE_ALIGNED |
XGE_OS_DMA_STREAMING,
&txdl_priv->align_dma_handle,
&txdl_priv->align_dma_acch);
if (txdl_priv->align_vaddr == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
/* map it */
txdl_priv->align_dma_addr = xge_os_dma_map(fifo->channel.pdev,
txdl_priv->align_dma_handle, txdl_priv->align_vaddr,
fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING);
if (txdl_priv->align_dma_addr == XGE_OS_INVALID_DMA_ADDR) {
__hal_fifo_dtr_align_free_unmap(channelh, dtrh);
return XGE_HAL_ERR_OUT_OF_MAPPING;
}
return XGE_HAL_OK;
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,436 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : hal-mm.c
*
* Description: chipset memory pool object implementation
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xge-os-pal.h>
#include <dev/nxge/include/xgehal-mm.h>
#include <dev/nxge/include/xge-debug.h>
/*
* __hal_mempool_grow
*
* Will resize mempool up to %num_allocate value.
*/
xge_hal_status_e
__hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
int *num_allocated)
{
int i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
int n_items = mempool->items_per_memblock;
*num_allocated = 0;
if ((mempool->memblocks_allocated + num_allocate) >
mempool->memblocks_max) {
xge_debug_mm(XGE_ERR, "%s",
"__hal_mempool_grow: can grow anymore");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
for (i = mempool->memblocks_allocated;
i < mempool->memblocks_allocated + num_allocate; i++) {
int j;
int is_last =
((mempool->memblocks_allocated+num_allocate-1) == i);
xge_hal_mempool_dma_t *dma_object =
mempool->memblocks_dma_arr + i;
void *the_memblock;
int dma_flags;
dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
#ifdef XGE_HAL_DMA_DTR_CONSISTENT
dma_flags |= XGE_OS_DMA_CONSISTENT;
#else
dma_flags |= XGE_OS_DMA_STREAMING;
#endif
/* allocate DMA-capable memblock */
mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev,
mempool->memblock_size,
dma_flags,
&dma_object->handle,
&dma_object->acc_handle);
if (mempool->memblocks_arr[i] == NULL) {
xge_debug_mm(XGE_ERR,
"memblock[%d]: out of DMA memory", i);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(mempool->memblocks_arr[i],
mempool->memblock_size);
the_memblock = mempool->memblocks_arr[i];
/* allocate memblock's private part. Each DMA memblock
* has a space allocated for item's private usage upon
* mempool's user request. Each time mempool grows, it will
* allocate new memblock and its private part at once.
* This helps to minimize memory usage a lot. */
mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev,
mempool->items_priv_size * n_items);
if (mempool->memblocks_priv_arr[i] == NULL) {
xge_os_dma_free(mempool->pdev,
the_memblock,
mempool->memblock_size,
&dma_object->acc_handle,
&dma_object->handle);
xge_debug_mm(XGE_ERR,
"memblock_priv[%d]: out of virtual memory, "
"requested %d(%d:%d) bytes", i,
mempool->items_priv_size * n_items,
mempool->items_priv_size, n_items);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(mempool->memblocks_priv_arr[i],
mempool->items_priv_size * n_items);
/* map memblock to physical memory */
dma_object->addr = xge_os_dma_map(mempool->pdev,
dma_object->handle,
the_memblock,
mempool->memblock_size,
XGE_OS_DMA_DIR_BIDIRECTIONAL,
#ifdef XGE_HAL_DMA_DTR_CONSISTENT
XGE_OS_DMA_CONSISTENT
#else
XGE_OS_DMA_STREAMING
#endif
);
if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) {
xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
mempool->items_priv_size *
n_items);
xge_os_dma_free(mempool->pdev,
the_memblock,
mempool->memblock_size,
&dma_object->acc_handle,
&dma_object->handle);
return XGE_HAL_ERR_OUT_OF_MAPPING;
}
/* fill the items hash array */
for (j=0; j<n_items; j++) {
int index = i*n_items + j;
if (first_time && index >= mempool->items_initial) {
break;
}
mempool->items_arr[index] =
((char *)the_memblock + j*mempool->item_size);
/* let caller to do more job on each item */
if (mempool->item_func_alloc != NULL) {
xge_hal_status_e status;
if ((status = mempool->item_func_alloc(
mempool,
the_memblock,
i,
dma_object,
mempool->items_arr[index],
index,
is_last,
mempool->userdata)) != XGE_HAL_OK) {
if (mempool->item_func_free != NULL) {
int k;
for (k=0; k<j; k++) {
index =i*n_items + k;
(void)mempool->item_func_free(
mempool, the_memblock,
i, dma_object,
mempool->items_arr[index],
index, is_last,
mempool->userdata);
}
}
xge_os_free(mempool->pdev,
mempool->memblocks_priv_arr[i],
mempool->items_priv_size *
n_items);
xge_os_dma_unmap(mempool->pdev,
dma_object->handle,
dma_object->addr,
mempool->memblock_size,
XGE_OS_DMA_DIR_BIDIRECTIONAL);
xge_os_dma_free(mempool->pdev,
the_memblock,
mempool->memblock_size,
&dma_object->acc_handle,
&dma_object->handle);
return status;
}
}
mempool->items_current = index + 1;
}
xge_debug_mm(XGE_TRACE,
"memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
"dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024,
(unsigned long long)(ulong_t)mempool->memblocks_arr[i],
(unsigned long long)dma_object->addr);
(*num_allocated)++;
if (first_time && mempool->items_current ==
mempool->items_initial) {
break;
}
}
/* increment actual number of allocated memblocks */
mempool->memblocks_allocated += *num_allocated;
return XGE_HAL_OK;
}
/*
* xge_hal_mempool_create
* @memblock_size:
* @items_initial:
* @items_max:
* @item_size:
* @item_func:
*
* This function will create memory pool object. Pool may grow but will
* never shrink. Pool consists of number of dynamically allocated blocks
* with size enough to hold %items_initial number of items. Memory is
* DMA-able but client must map/unmap before interoperating with the device.
* See also: xge_os_dma_map(), xge_hal_dma_unmap(), xge_hal_status_e{}.
*/
xge_hal_mempool_t*
__hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
int items_priv_size, int items_initial, int items_max,
xge_hal_mempool_item_f item_func_alloc,
xge_hal_mempool_item_f item_func_free, void *userdata)
{
xge_hal_status_e status;
int memblocks_to_allocate;
xge_hal_mempool_t *mempool;
int allocated;
if (memblock_size < item_size) {
xge_debug_mm(XGE_ERR,
"memblock_size %d < item_size %d: misconfiguration",
memblock_size, item_size);
return NULL;
}
mempool = (xge_hal_mempool_t *) \
xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
if (mempool == NULL) {
xge_debug_mm(XGE_ERR, "mempool allocation failure");
return NULL;
}
xge_os_memzero(mempool, sizeof(xge_hal_mempool_t));
mempool->pdev = pdev;
mempool->memblock_size = memblock_size;
mempool->items_max = items_max;
mempool->items_initial = items_initial;
mempool->item_size = item_size;
mempool->items_priv_size = items_priv_size;
mempool->item_func_alloc = item_func_alloc;
mempool->item_func_free = item_func_free;
mempool->userdata = userdata;
mempool->memblocks_allocated = 0;
mempool->items_per_memblock = memblock_size / item_size;
mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
mempool->items_per_memblock;
/* allocate array of memblocks */
mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->memblocks_max);
if (mempool->memblocks_arr == NULL) {
xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->memblocks_arr,
sizeof(void*) * mempool->memblocks_max);
/* allocate array of private parts of items per memblocks */
mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->memblocks_max);
if (mempool->memblocks_priv_arr == NULL) {
xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->memblocks_priv_arr,
sizeof(void*) * mempool->memblocks_max);
/* allocate array of memblocks DMA objects */
mempool->memblocks_dma_arr =
(xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev,
sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
if (mempool->memblocks_dma_arr == NULL) {
xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->memblocks_dma_arr,
sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
/* allocate hash array of items */
mempool->items_arr = (void **) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->items_max);
if (mempool->items_arr == NULL) {
xge_debug_mm(XGE_ERR, "items_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max);
mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->items_max);
if (mempool->shadow_items_arr == NULL) {
xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->shadow_items_arr,
sizeof(void *) * mempool->items_max);
/* calculate initial number of memblocks */
memblocks_to_allocate = (mempool->items_initial +
mempool->items_per_memblock - 1) /
mempool->items_per_memblock;
xge_debug_mm(XGE_TRACE, "allocating %d memblocks, "
"%d items per memblock", memblocks_to_allocate,
mempool->items_per_memblock);
/* pre-allocate the mempool */
status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated);
xge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr,
sizeof(void*) * mempool->items_max);
if (status != XGE_HAL_OK) {
xge_debug_mm(XGE_ERR, "mempool_grow failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_debug_mm(XGE_TRACE,
"total: allocated %dk of DMA-capable memory",
mempool->memblock_size * allocated / 1024);
return mempool;
}
/*
* xge_hal_mempool_destroy
*/
void
__hal_mempool_destroy(xge_hal_mempool_t *mempool)
{
int i, j;
for (i=0; i<mempool->memblocks_allocated; i++) {
xge_hal_mempool_dma_t *dma_object;
xge_assert(mempool->memblocks_arr[i]);
xge_assert(mempool->memblocks_dma_arr + i);
dma_object = mempool->memblocks_dma_arr + i;
for (j=0; j<mempool->items_per_memblock; j++) {
int index = i*mempool->items_per_memblock + j;
/* to skip last partially filled(if any) memblock */
if (index >= mempool->items_current) {
break;
}
/* let caller to do more job on each item */
if (mempool->item_func_free != NULL) {
mempool->item_func_free(mempool,
mempool->memblocks_arr[i],
i, dma_object,
mempool->shadow_items_arr[index],
index, /* unused */ -1,
mempool->userdata);
}
}
xge_os_dma_unmap(mempool->pdev,
dma_object->handle, dma_object->addr,
mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL);
xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
mempool->items_priv_size * mempool->items_per_memblock);
xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i],
mempool->memblock_size, &dma_object->acc_handle,
&dma_object->handle);
}
if (mempool->items_arr) {
xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) *
mempool->items_max);
}
if (mempool->shadow_items_arr) {
xge_os_free(mempool->pdev, mempool->shadow_items_arr,
sizeof(void*) * mempool->items_max);
}
if (mempool->memblocks_dma_arr) {
xge_os_free(mempool->pdev, mempool->memblocks_dma_arr,
sizeof(xge_hal_mempool_dma_t) *
mempool->memblocks_max);
}
if (mempool->memblocks_priv_arr) {
xge_os_free(mempool->pdev, mempool->memblocks_priv_arr,
sizeof(void*) * mempool->memblocks_max);
}
if (mempool->memblocks_arr) {
xge_os_free(mempool->pdev, mempool->memblocks_arr,
sizeof(void*) * mempool->memblocks_max);
}
xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t));
}

View File

@ -0,0 +1,852 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xgehal-ring-fp.c
*
* Description: HAL Rx ring object functionality (fast path)
*
* Created: 10 June 2004
*/
#ifdef XGE_DEBUG_FP
#include <dev/nxge/include/xgehal-ring.h>
#endif
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
xge_hal_ring_rxd_priv_t *rxd_priv;
xge_assert(rxdp);
#if defined(XGE_HAL_USE_5B_MODE)
xge_assert(ring);
if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)dtrh;
#if defined (XGE_OS_PLATFORM_64BIT)
int memblock_idx = rxdp_5->host_control >> 16;
int i = rxdp_5->host_control & 0xFFFF;
rxd_priv = (xge_hal_ring_rxd_priv_t *)
((char*)ring->mempool->memblocks_priv_arr[memblock_idx] + ring->rxd_priv_size * i);
#else
/* 32-bit case */
rxd_priv = (xge_hal_ring_rxd_priv_t *)rxdp_5->host_control;
#endif
} else
#endif
{
rxd_priv = (xge_hal_ring_rxd_priv_t *)
(ulong_t)rxdp->host_control;
}
xge_assert(rxd_priv);
xge_assert(rxd_priv->dma_object);
xge_assert(rxd_priv->dma_object->handle == rxd_priv->dma_handle);
xge_assert(rxd_priv->dma_object->addr + rxd_priv->dma_offset ==
rxd_priv->dma_addr);
return rxd_priv;
}
__HAL_STATIC_RING __HAL_INLINE_RING int
__hal_ring_block_memblock_idx(xge_hal_ring_block_t *block)
{
return (int)*((u64 *)(void *)((char *)block +
XGE_HAL_RING_MEMBLOCK_IDX_OFFSET));
}
__HAL_STATIC_RING __HAL_INLINE_RING void
__hal_ring_block_memblock_idx_set(xge_hal_ring_block_t*block, int memblock_idx)
{
*((u64 *)(void *)((char *)block +
XGE_HAL_RING_MEMBLOCK_IDX_OFFSET)) =
memblock_idx;
}
__HAL_STATIC_RING __HAL_INLINE_RING dma_addr_t
__hal_ring_block_next_pointer(xge_hal_ring_block_t *block)
{
return (dma_addr_t)*((u64 *)(void *)((char *)block +
XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET));
}
__HAL_STATIC_RING __HAL_INLINE_RING void
__hal_ring_block_next_pointer_set(xge_hal_ring_block_t *block,
dma_addr_t dma_next)
{
*((u64 *)(void *)((char *)block +
XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
}
/**
* xge_hal_ring_dtr_private - Get ULD private per-descriptor data.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
*
* Returns: private ULD info associated with the descriptor.
* ULD requests per-descriptor space via xge_hal_channel_open().
*
* See also: xge_hal_fifo_dtr_private().
* Usage: See ex_rx_compl{}.
*/
__HAL_STATIC_RING __HAL_INLINE_RING void*
xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
return (char *)__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, dtrh) +
sizeof(xge_hal_ring_rxd_priv_t);
}
/**
* xge_hal_ring_dtr_reserve - Reserve ring descriptor.
* @channelh: Channel handle.
* @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
* with a valid handle.
*
* Reserve Rx descriptor for the subsequent filling-in (by upper layer
* driver (ULD)) and posting on the corresponding channel (@channelh)
* via xge_hal_ring_dtr_post().
*
* Returns: XGE_HAL_OK - success.
* XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
*
* See also: xge_hal_fifo_dtr_reserve(), xge_hal_ring_dtr_free(),
* xge_hal_fifo_dtr_reserve_sp(), xge_hal_status_e{}.
* Usage: See ex_post_all_rx{}.
*/
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
xge_hal_ring_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
{
xge_hal_status_e status;
#if defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
unsigned long flags;
#endif
#if defined(XGE_HAL_RX_MULTI_RESERVE)
xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
flags);
#endif
status = __hal_channel_dtr_alloc(channelh, dtrh);
#if defined(XGE_HAL_RX_MULTI_RESERVE)
xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
flags);
#endif
if (status == XGE_HAL_OK) {
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
/* instead of memset: reset this RxD */
rxdp->control_1 = rxdp->control_2 = 0;
#if defined(XGE_OS_MEMORY_CHECK)
__hal_ring_rxd_priv((xge_hal_ring_t *) channelh, rxdp)->allocated = 1;
#endif
}
return status;
}
/**
* xge_hal_ring_dtr_info_get - Get extended information associated with
* a completed receive descriptor for 1b mode.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
* @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
*
* Retrieve extended information associated with a completed receive descriptor.
*
* See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
* xge_hal_ring_dtr_5b_get().
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xge_hal_dtr_info_t *ext_info)
{
/* cast to 1-buffer mode RxD: the code below relies on the fact
* that control_1 and control_2 are formatted the same way.. */
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
/* Herc only, a few extra cycles imposed on Xena and/or
* when RTH is not enabled.
* Alternatively, could check
* xge_hal_device_check_id(), hldev->config.rth_en, queue->rth_en */
ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
ext_info->rth_spdm_hit =
XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
ext_info->rth_hash_type =
XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
ext_info->rth_value = XGE_HAL_RXD_1_GET_RTH_VALUE(rxdp->control_2);
}
/**
* xge_hal_ring_dtr_info_nb_get - Get extended information associated
* with a completed receive descriptor for 3b or 5b
* modes.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
* @ext_info: See xge_hal_dtr_info_t{}. Returned by HAL.
*
* Retrieve extended information associated with a completed receive descriptor.
*
* See also: xge_hal_dtr_info_t{}, xge_hal_ring_dtr_1b_get(),
* xge_hal_ring_dtr_5b_get().
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_info_nb_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xge_hal_dtr_info_t *ext_info)
{
/* cast to 1-buffer mode RxD: the code below relies on the fact
* that control_1 and control_2 are formatted the same way.. */
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
ext_info->l3_cksum = XGE_HAL_RXD_GET_L3_CKSUM(rxdp->control_1);
ext_info->l4_cksum = XGE_HAL_RXD_GET_L4_CKSUM(rxdp->control_1);
ext_info->frame = XGE_HAL_RXD_GET_FRAME_TYPE(rxdp->control_1);
ext_info->proto = XGE_HAL_RXD_GET_FRAME_PROTO(rxdp->control_1);
ext_info->vlan = XGE_HAL_RXD_GET_VLAN_TAG(rxdp->control_2);
/* Herc only, a few extra cycles imposed on Xena and/or
* when RTH is not enabled. Same comment as above. */
ext_info->rth_it_hit = XGE_HAL_RXD_GET_RTH_IT_HIT(rxdp->control_1);
ext_info->rth_spdm_hit =
XGE_HAL_RXD_GET_RTH_SPDM_HIT(rxdp->control_1);
ext_info->rth_hash_type =
XGE_HAL_RXD_GET_RTH_HASH_TYPE(rxdp->control_1);
ext_info->rth_value = (u32)rxdp->buffer0_ptr;
}
/**
* xge_hal_ring_dtr_1b_set - Prepare 1-buffer-mode descriptor.
* @dtrh: Descriptor handle.
* @dma_pointer: DMA address of a single receive buffer this descriptor
* should carry. Note that by the time
* xge_hal_ring_dtr_1b_set
* is called, the receive buffer should be already mapped
* to the corresponding Xframe device.
* @size: Size of the receive @dma_pointer buffer.
*
* Prepare 1-buffer-mode Rx descriptor for posting
* (via xge_hal_ring_dtr_post()).
*
* This inline helper-function does not return any parameters and always
* succeeds.
*
* See also: xge_hal_ring_dtr_3b_set(), xge_hal_ring_dtr_5b_set().
* Usage: See ex_post_all_rx{}.
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
rxdp->buffer0_ptr = dma_pointer;
rxdp->control_2 &= (~XGE_HAL_RXD_1_MASK_BUFFER0_SIZE);
rxdp->control_2 |= XGE_HAL_RXD_1_SET_BUFFER0_SIZE(size);
xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_1b_set: rxdp %p control_2 %p buffer0_ptr %p",
(xge_hal_ring_rxd_1_t *)dtrh,
rxdp->control_2,
rxdp->buffer0_ptr);
}
/**
* xge_hal_ring_dtr_1b_get - Get data from the completed 1-buf
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
* @dma_pointer: DMA address of a single receive buffer _this_ descriptor
* carries. Returned by HAL.
* @pkt_length: Length (in bytes) of the data in the buffer pointed by
* @dma_pointer. Returned by HAL.
*
* Retrieve protocol data from the completed 1-buffer-mode Rx descriptor.
* This inline helper-function uses completed descriptor to populate receive
* buffer pointer and other "out" parameters. The function always succeeds.
*
* See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
* Usage: See ex_rx_compl{}.
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t *dma_pointer, int *pkt_length)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
*pkt_length = XGE_HAL_RXD_1_GET_BUFFER0_SIZE(rxdp->control_2);
*dma_pointer = rxdp->buffer0_ptr;
((xge_hal_channel_t *)channelh)->poll_bytes += *pkt_length;
}
/**
* xge_hal_ring_dtr_3b_set - Prepare 3-buffer-mode descriptor.
* @dtrh: Descriptor handle.
* @dma_pointers: Array of DMA addresses. Contains exactly 3 receive buffers
* _this_ descriptor should carry.
* Note that by the time xge_hal_ring_dtr_3b_set
* is called, the receive buffers should be mapped
* to the corresponding Xframe device.
* @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
* buffer from @dma_pointers.
*
* Prepare 3-buffer-mode Rx descriptor for posting (via
* xge_hal_ring_dtr_post()).
* This inline helper-function does not return any parameters and always
* succeeds.
*
* See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_5b_set().
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
int sizes[])
{
xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
rxdp->buffer0_ptr = dma_pointers[0];
rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER0_SIZE);
rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER0_SIZE(sizes[0]);
rxdp->buffer1_ptr = dma_pointers[1];
rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER1_SIZE);
rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER1_SIZE(sizes[1]);
rxdp->buffer2_ptr = dma_pointers[2];
rxdp->control_2 &= (~XGE_HAL_RXD_3_MASK_BUFFER2_SIZE);
rxdp->control_2 |= XGE_HAL_RXD_3_SET_BUFFER2_SIZE(sizes[2]);
}
/**
* xge_hal_ring_dtr_3b_get - Get data from the completed 3-buf
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
* @dma_pointers: DMA addresses of the 3 receive buffers _this_ descriptor
* carries. The first two buffers contain ethernet and
* (IP + transport) headers. The 3rd buffer contains packet
* data.
* Returned by HAL.
* @sizes: Array of receive buffer sizes. Contains 3 sizes: one size per
* buffer from @dma_pointers. Returned by HAL.
*
* Retrieve protocol data from the completed 3-buffer-mode Rx descriptor.
* This inline helper-function uses completed descriptor to populate receive
* buffer pointer and other "out" parameters. The function always succeeds.
*
* See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t dma_pointers[], int sizes[])
{
xge_hal_ring_rxd_3_t *rxdp = (xge_hal_ring_rxd_3_t *)dtrh;
dma_pointers[0] = rxdp->buffer0_ptr;
sizes[0] = XGE_HAL_RXD_3_GET_BUFFER0_SIZE(rxdp->control_2);
dma_pointers[1] = rxdp->buffer1_ptr;
sizes[1] = XGE_HAL_RXD_3_GET_BUFFER1_SIZE(rxdp->control_2);
dma_pointers[2] = rxdp->buffer2_ptr;
sizes[2] = XGE_HAL_RXD_3_GET_BUFFER2_SIZE(rxdp->control_2);
((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
sizes[2];
}
/**
* xge_hal_ring_dtr_5b_set - Prepare 5-buffer-mode descriptor.
* @dtrh: Descriptor handle.
* @dma_pointers: Array of DMA addresses. Contains exactly 5 receive buffers
* _this_ descriptor should carry.
* Note that by the time xge_hal_ring_dtr_5b_set
* is called, the receive buffers should be mapped
* to the corresponding Xframe device.
* @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
* buffer from @dma_pointers.
*
* Prepare 3-buffer-mode Rx descriptor for posting (via
* xge_hal_ring_dtr_post()).
* This inline helper-function does not return any parameters and always
* succeeds.
*
* See also: xge_hal_ring_dtr_1b_set(), xge_hal_ring_dtr_3b_set().
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
int sizes[])
{
xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
rxdp->buffer0_ptr = dma_pointers[0];
rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER0_SIZE);
rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER0_SIZE(sizes[0]);
rxdp->buffer1_ptr = dma_pointers[1];
rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER1_SIZE);
rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER1_SIZE(sizes[1]);
rxdp->buffer2_ptr = dma_pointers[2];
rxdp->control_2 &= (~XGE_HAL_RXD_5_MASK_BUFFER2_SIZE);
rxdp->control_2 |= XGE_HAL_RXD_5_SET_BUFFER2_SIZE(sizes[2]);
rxdp->buffer3_ptr = dma_pointers[3];
rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER3_SIZE);
rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER3_SIZE(sizes[3]);
rxdp->buffer4_ptr = dma_pointers[4];
rxdp->control_3 &= (~XGE_HAL_RXD_5_MASK_BUFFER4_SIZE);
rxdp->control_3 |= XGE_HAL_RXD_5_SET_BUFFER4_SIZE(sizes[4]);
}
/**
* xge_hal_ring_dtr_5b_get - Get data from the completed 5-buf
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
* @dma_pointers: DMA addresses of the 5 receive buffers _this_ descriptor
* carries. The first 4 buffers contains L2 (ethernet) through
* L5 headers. The 5th buffer contain received (applicaion)
* data. Returned by HAL.
* @sizes: Array of receive buffer sizes. Contains 5 sizes: one size per
* buffer from @dma_pointers. Returned by HAL.
*
* Retrieve protocol data from the completed 5-buffer-mode Rx descriptor.
* This inline helper-function uses completed descriptor to populate receive
* buffer pointer and other "out" parameters. The function always succeeds.
*
* See also: xge_hal_ring_dtr_3b_get(), xge_hal_ring_dtr_5b_get().
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t dma_pointers[], int sizes[])
{
xge_hal_ring_rxd_5_t *rxdp = (xge_hal_ring_rxd_5_t *)dtrh;
dma_pointers[0] = rxdp->buffer0_ptr;
sizes[0] = XGE_HAL_RXD_5_GET_BUFFER0_SIZE(rxdp->control_2);
dma_pointers[1] = rxdp->buffer1_ptr;
sizes[1] = XGE_HAL_RXD_5_GET_BUFFER1_SIZE(rxdp->control_2);
dma_pointers[2] = rxdp->buffer2_ptr;
sizes[2] = XGE_HAL_RXD_5_GET_BUFFER2_SIZE(rxdp->control_2);
dma_pointers[3] = rxdp->buffer3_ptr;
sizes[3] = XGE_HAL_RXD_5_GET_BUFFER3_SIZE(rxdp->control_3);
dma_pointers[4] = rxdp->buffer4_ptr;
sizes[4] = XGE_HAL_RXD_5_GET_BUFFER4_SIZE(rxdp->control_3);
((xge_hal_channel_t *)channelh)->poll_bytes += sizes[0] + sizes[1] +
sizes[2] + sizes[3] + sizes[4];
}
/**
* xge_hal_ring_dtr_pre_post - FIXME.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
*
* TBD
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_pre_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
xge_hal_ring_rxd_priv_t *priv;
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
#endif
#if defined(XGE_HAL_RX_MULTI_POST_IRQ)
unsigned long flags;
#endif
rxdp->control_2 |= XGE_HAL_RXD_NOT_COMPLETED;
#ifdef XGE_DEBUG_ASSERT
/* make sure Xena overwrites the (illegal) t_code on completion */
XGE_HAL_RXD_SET_T_CODE(rxdp->control_1, XGE_HAL_RXD_T_CODE_UNUSED_C);
#endif
xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_pre_post: rxd 0x"XGE_OS_LLXFMT" posted %d post_qid %d",
(unsigned long long)(ulong_t)dtrh,
((xge_hal_ring_t *)channelh)->channel.post_index,
((xge_hal_ring_t *)channelh)->channel.post_qid);
#if defined(XGE_HAL_RX_MULTI_POST)
xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
flags);
#endif
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_HAL_RING_ENFORCE_ORDER)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
if (channel->post_index != 0) {
xge_hal_dtr_h prev_dtrh;
xge_hal_ring_rxd_priv_t *rxdp_priv;
rxdp_priv = __hal_ring_rxd_priv(channelh, rxdp);
prev_dtrh = channel->work_arr[channel->post_index - 1];
if (prev_dtrh != NULL &&
(rxdp_priv->dma_offset & (~0xFFF)) !=
rxdp_priv->dma_offset) {
xge_assert((char *)prev_dtrh +
((xge_hal_ring_t*)channel)->rxd_size == dtrh);
}
}
}
#endif
__hal_channel_dtr_post(channelh, dtrh);
#if defined(XGE_HAL_RX_MULTI_POST)
xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
flags);
#endif
}
/**
* xge_hal_ring_dtr_post_post - FIXME.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
*
* TBD
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_post_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
xge_hal_ring_rxd_priv_t *priv;
#endif
/* do POST */
rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
priv = __hal_ring_rxd_priv(ring, rxdp);
xge_os_dma_sync(ring->channel.pdev,
priv->dma_handle, priv->dma_addr,
priv->dma_offset, ring->rxd_size,
XGE_OS_DMA_DIR_TODEVICE);
#endif
xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post: rxdp %p control_1 %p",
(xge_hal_ring_rxd_1_t *)dtrh,
rxdp->control_1);
if (ring->channel.usage_cnt > 0)
ring->channel.usage_cnt--;
}
/**
* xge_hal_ring_dtr_post_post_wmb.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
*
* Similar as xge_hal_ring_dtr_post_post, but in addition it does memory barrier.
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
xge_hal_ring_rxd_priv_t *priv;
#endif
/* Do memory barrier before changing the ownership */
xge_os_wmb();
/* do POST */
rxdp->control_1 |= XGE_HAL_RXD_POSTED_4_XFRAME;
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
priv = __hal_ring_rxd_priv(ring, rxdp);
xge_os_dma_sync(ring->channel.pdev,
priv->dma_handle, priv->dma_addr,
priv->dma_offset, ring->rxd_size,
XGE_OS_DMA_DIR_TODEVICE);
#endif
if (ring->channel.usage_cnt > 0)
ring->channel.usage_cnt--;
xge_debug_ring(XGE_TRACE, "xge_hal_ring_dtr_post_post_wmb: rxdp %p control_1 %p rxds_with_host %d",
(xge_hal_ring_rxd_1_t *)dtrh,
rxdp->control_1, ring->channel.usage_cnt);
}
/**
* xge_hal_ring_dtr_post - Post descriptor on the ring channel.
* @channelh: Channel handle.
* @dtrh: Descriptor obtained via xge_hal_ring_dtr_reserve().
*
* Post descriptor on the 'ring' type channel.
* Prior to posting the descriptor should be filled in accordance with
* Host/Xframe interface specification for a given service (LL, etc.).
*
* See also: xge_hal_fifo_dtr_post_many(), xge_hal_fifo_dtr_post().
* Usage: See ex_post_all_rx{}.
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_ring_dtr_pre_post(channelh, dtrh);
xge_hal_ring_dtr_post_post(channelh, dtrh);
}
/**
* xge_hal_ring_dtr_next_completed - Get the _next_ completed
* descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle. Returned by HAL.
* @t_code: Transfer code, as per Xframe User Guide,
* Receive Descriptor Format. Returned by HAL.
*
* Retrieve the _next_ completed descriptor.
* HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
* upper-layer driver (ULD) of new completed descriptors. After that
* the ULD can use xge_hal_ring_dtr_next_completed to retrieve the rest
* completions (the very first completion is passed by HAL via
* xge_hal_channel_callback_f).
*
* Implementation-wise, the upper-layer driver is free to call
* xge_hal_ring_dtr_next_completed either immediately from inside the
* channel callback, or in a deferred fashion and separate (from HAL)
* context.
*
* Non-zero @t_code means failure to fill-in receive buffer(s)
* of the descriptor.
* For instance, parity error detected during the data transfer.
* In this case Xframe will complete the descriptor and indicate
* for the host that the received data is not to be used.
* For details please refer to Xframe User Guide.
*
* Returns: XGE_HAL_OK - success.
* XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
* are currently available for processing.
*
* See also: xge_hal_channel_callback_f{},
* xge_hal_fifo_dtr_next_completed(), xge_hal_status_e{}.
* Usage: See ex_rx_compl{}.
*/
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
u8 *t_code)
{
xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
xge_hal_ring_rxd_priv_t *priv;
#endif
__hal_channel_dtr_try_complete(ring, dtrh);
rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
if (rxdp == NULL) {
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
/* Note: 24 bytes at most means:
* - Control_3 in case of 5-buffer mode
* - Control_1 and Control_2
*
* This is the only length needs to be invalidated
* type of channels.*/
priv = __hal_ring_rxd_priv(ring, rxdp);
xge_os_dma_sync(ring->channel.pdev,
priv->dma_handle, priv->dma_addr,
priv->dma_offset, 24,
XGE_OS_DMA_DIR_FROMDEVICE);
#endif
/* check whether it is not the end */
if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
!(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
#ifndef XGE_HAL_IRQ_POLLING
if (++ring->cmpl_cnt > ring->indicate_max_pkts) {
/* reset it. since we don't want to return
* garbage to the ULD */
*dtrh = 0;
return XGE_HAL_COMPLETIONS_REMAIN;
}
#endif
#ifdef XGE_DEBUG_ASSERT
#if defined(XGE_HAL_USE_5B_MODE)
#if !defined(XGE_OS_PLATFORM_64BIT)
if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
xge_assert(((xge_hal_ring_rxd_5_t *)
rxdp)->host_control!=0);
}
#endif
#else
xge_assert(rxdp->host_control!=0);
#endif
#endif
__hal_channel_dtr_complete(ring);
*t_code = (u8)XGE_HAL_RXD_GET_T_CODE(rxdp->control_1);
/* see XGE_HAL_SET_RXD_T_CODE() above.. */
xge_assert(*t_code != XGE_HAL_RXD_T_CODE_UNUSED_C);
xge_debug_ring(XGE_TRACE,
"compl_index %d post_qid %d t_code %d rxd 0x"XGE_OS_LLXFMT,
((xge_hal_channel_t*)ring)->compl_index,
((xge_hal_channel_t*)ring)->post_qid, *t_code,
(unsigned long long)(ulong_t)rxdp);
ring->channel.usage_cnt++;
if (ring->channel.stats.usage_max < ring->channel.usage_cnt)
ring->channel.stats.usage_max = ring->channel.usage_cnt;
return XGE_HAL_OK;
}
/* reset it. since we don't want to return
* garbage to the ULD */
*dtrh = 0;
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
/**
* xge_hal_ring_dtr_free - Free descriptor.
* @channelh: Channel handle.
* @dtrh: Descriptor handle.
*
* Free the reserved descriptor. This operation is "symmetrical" to
* xge_hal_ring_dtr_reserve. The "free-ing" completes the descriptor's
* lifecycle.
*
* After free-ing (see xge_hal_ring_dtr_free()) the descriptor again can
* be:
*
* - reserved (xge_hal_ring_dtr_reserve);
*
* - posted (xge_hal_ring_dtr_post);
*
* - completed (xge_hal_ring_dtr_next_completed);
*
* - and recycled again (xge_hal_ring_dtr_free).
*
* For alternative state transitions and more details please refer to
* the design doc.
*
* See also: xge_hal_ring_dtr_reserve(), xge_hal_fifo_dtr_free().
* Usage: See ex_rx_compl{}.
*/
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ)
unsigned long flags;
#endif
#if defined(XGE_HAL_RX_MULTI_FREE)
xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
#elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
flags);
#endif
__hal_channel_dtr_free(channelh, dtrh);
#if defined(XGE_OS_MEMORY_CHECK)
__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtrh)->allocated = 0;
#endif
#if defined(XGE_HAL_RX_MULTI_FREE)
xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
#elif defined(XGE_HAL_RX_MULTI_FREE_IRQ)
xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
flags);
#endif
}
/**
* xge_hal_ring_is_next_dtr_completed - Check if the next dtr is completed
* @channelh: Channel handle.
*
* Checks if the the _next_ completed descriptor is in host memory
*
* Returns: XGE_HAL_OK - success.
* XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
* are currently available for processing.
*/
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh)
{
xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
xge_hal_dtr_h dtrh;
__hal_channel_dtr_try_complete(ring, &dtrh);
rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
if (rxdp == NULL) {
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
/* check whether it is not the end */
if (!(rxdp->control_2 & XGE_HAL_RXD_NOT_COMPLETED) &&
!(rxdp->control_1 & XGE_HAL_RXD_POSTED_4_XFRAME)) {
#ifdef XGE_DEBUG_ASSERT
#if defined(XGE_HAL_USE_5B_MODE)
#if !defined(XGE_OS_PLATFORM_64BIT)
if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
xge_assert(((xge_hal_ring_rxd_5_t *)
rxdp)->host_control!=0);
}
#endif
#else
xge_assert(rxdp->host_control!=0);
#endif
#endif
return XGE_HAL_OK;
}
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}

View File

@ -0,0 +1,669 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : hal-ring.c
*
* Description: Rx ring object implementation
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xgehal-ring.h>
#include <dev/nxge/include/xgehal-device.h>
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
static ptrdiff_t
__hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
void *item)
{
int memblock_idx;
void *memblock;
/* get owner memblock index */
memblock_idx = __hal_ring_block_memblock_idx(item);
/* get owner memblock by memblock index */
memblock = __hal_mempool_memblock(mempoolh, memblock_idx);
return (char*)item - (char*)memblock;
}
#endif
static dma_addr_t
__hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
pci_dma_h *dma_handle)
{
int memblock_idx;
void *memblock;
xge_hal_mempool_dma_t *memblock_dma_object;
ptrdiff_t dma_item_offset;
/* get owner memblock index */
memblock_idx = __hal_ring_block_memblock_idx((xge_hal_ring_block_t *) item);
/* get owner memblock by memblock index */
memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
memblock_idx);
/* get memblock DMA object by memblock index */
memblock_dma_object =
__hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
memblock_idx);
/* calculate offset in the memblock of this item */
dma_item_offset = (char*)item - (char*)memblock;
*dma_handle = memblock_dma_object->handle;
return memblock_dma_object->addr + dma_item_offset;
}
static void
__hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
xge_hal_ring_t *ring, int from, int to)
{
xge_hal_ring_block_t *to_item, *from_item;
dma_addr_t to_dma, from_dma;
pci_dma_h to_dma_handle, from_dma_handle;
/* get "from" RxD block */
from_item = (xge_hal_ring_block_t *)
__hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
xge_assert(from_item);
/* get "to" RxD block */
to_item = (xge_hal_ring_block_t *)
__hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
xge_assert(to_item);
/* return address of the beginning of previous RxD block */
to_dma = __hal_ring_item_dma_addr(mempoolh, to_item, &to_dma_handle);
/* set next pointer for this RxD block to point on
* previous item's DMA start address */
__hal_ring_block_next_pointer_set(from_item, to_dma);
/* return "from" RxD block's DMA start address */
from_dma =
__hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
/* we must sync "from" RxD block, so hardware will see it */
xge_os_dma_sync(ring->channel.pdev,
from_dma_handle,
from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
__hal_ring_item_dma_offset(mempoolh, from_item) +
XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
sizeof(u64),
XGE_OS_DMA_DIR_TODEVICE);
#endif
xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
from, (unsigned long long)from_dma, to,
(unsigned long long)to_dma);
}
static xge_hal_status_e
__hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
void *memblock,
int memblock_index,
xge_hal_mempool_dma_t *dma_object,
void *item,
int index,
int is_last,
void *userdata)
{
int i;
xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
xge_assert(item);
xge_assert(ring);
/* format rxds array */
for (i=ring->rxds_per_block-1; i>=0; i--) {
void *rxdblock_priv;
xge_hal_ring_rxd_priv_t *rxd_priv;
xge_hal_ring_rxd_1_t *rxdp;
int reserve_index = index * ring->rxds_per_block + i;
int memblock_item_idx;
ring->reserved_rxds_arr[reserve_index] = (char *)item +
(ring->rxds_per_block - 1 - i) * ring->rxd_size;
/* Note: memblock_item_idx is index of the item within
* the memblock. For instance, in case of three RxD-blocks
* per memblock this value can be 0,1 or 2. */
rxdblock_priv =
__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
memblock_index, item,
&memblock_item_idx);
rxdp = (xge_hal_ring_rxd_1_t *)
ring->reserved_rxds_arr[reserve_index];
rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
((char*)rxdblock_priv + ring->rxd_priv_size * i);
/* pre-format per-RxD Ring's private */
rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
rxd_priv->dma_handle = dma_object->handle;
#ifdef XGE_DEBUG_ASSERT
rxd_priv->dma_object = dma_object;
#endif
/* pre-format Host_Control */
#if defined(XGE_HAL_USE_5B_MODE)
if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
#if defined(XGE_OS_PLATFORM_64BIT)
xge_assert(memblock_index <= 0xFFFF);
xge_assert(i <= 0xFFFF);
/* store memblock's index */
rxdp_5->host_control = (u32)memblock_index << 16;
/* store index of memblock's private */
rxdp_5->host_control |= (u32)(memblock_item_idx *
ring->rxds_per_block + i);
#else
/* 32-bit case */
rxdp_5->host_control = (u32)rxd_priv;
#endif
} else {
/* 1b and 3b modes */
rxdp->host_control = (u64)(ulong_t)rxd_priv;
}
#else
/* 1b and 3b modes */
rxdp->host_control = (u64)(ulong_t)rxd_priv;
#endif
}
__hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
if (is_last) {
/* link last one with first one */
__hal_ring_rxdblock_link(mempoolh, ring, 0, index);
}
if (index > 0 ) {
/* link this RxD block with previous one */
__hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
}
return XGE_HAL_OK;
}
xge_hal_status_e
__hal_ring_initial_replenish(xge_hal_channel_t *channel,
xge_hal_channel_reopen_e reopen)
{
xge_hal_dtr_h dtr;
while (xge_hal_channel_dtr_count(channel) > 0) {
xge_hal_status_e status;
status = xge_hal_ring_dtr_reserve(channel, &dtr);
xge_assert(status == XGE_HAL_OK);
if (channel->dtr_init) {
status = channel->dtr_init(channel,
dtr, channel->reserve_length,
channel->userdata,
reopen);
if (status != XGE_HAL_OK) {
xge_hal_ring_dtr_free(channel, dtr);
xge_hal_channel_abort(channel,
XGE_HAL_CHANNEL_OC_NORMAL);
return status;
}
}
xge_hal_ring_dtr_post(channel, dtr);
}
return XGE_HAL_OK;
}
xge_hal_status_e
__hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
{
xge_hal_status_e status;
xge_hal_device_t *hldev;
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
xge_hal_ring_queue_t *queue;
/* Note: at this point we have channel.devh and channel.pdev
* pre-set only! */
hldev = (xge_hal_device_t *)ring->channel.devh;
ring->config = &hldev->config.ring;
queue = &ring->config->queue[attr->post_qid];
ring->indicate_max_pkts = queue->indicate_max_pkts;
ring->buffer_mode = queue->buffer_mode;
xge_assert(queue->configured);
#if defined(XGE_HAL_RX_MULTI_RESERVE)
xge_os_spin_lock_init(&ring->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
xge_os_spin_lock_init_irq(&ring->channel.reserve_lock, hldev->irqh);
#endif
#if defined(XGE_HAL_RX_MULTI_POST)
xge_os_spin_lock_init(&ring->channel.post_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
xge_os_spin_lock_init_irq(&ring->channel.post_lock, hldev->irqh);
#endif
ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
ring->rxd_priv_size =
sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
/* how many RxDs can fit into one block. Depends on configured
* buffer_mode. */
ring->rxds_per_block = XGE_HAL_RING_RXDS_PER_BLOCK(queue->buffer_mode);
/* calculate actual RxD block private size */
ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
sizeof(void*) * queue->max * ring->rxds_per_block);
if (ring->reserved_rxds_arr == NULL) {
__hal_ring_close(channelh);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
ring->mempool = __hal_mempool_create(
hldev->pdev,
ring->config->memblock_size,
XGE_HAL_RING_RXDBLOCK_SIZE,
ring->rxdblock_priv_size,
queue->initial, queue->max,
__hal_ring_mempool_item_alloc,
NULL, /* nothing to free */
ring);
if (ring->mempool == NULL) {
__hal_ring_close(channelh);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
status = __hal_channel_initialize(channelh,
attr,
ring->reserved_rxds_arr,
queue->initial * ring->rxds_per_block,
queue->max * ring->rxds_per_block,
0 /* no threshold for ring! */);
if (status != XGE_HAL_OK) {
__hal_ring_close(channelh);
return status;
}
/* sanity check that everything formatted ok */
xge_assert(ring->reserved_rxds_arr[0] ==
(char *)ring->mempool->items_arr[0] +
(ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
/* Note:
* Specifying dtr_init callback means two things:
* 1) dtrs need to be initialized by ULD at channel-open time;
* 2) dtrs need to be posted at channel-open time
* (that's what the initial_replenish() below does)
* Currently we don't have a case when the 1) is done without the 2).
*/
if (ring->channel.dtr_init) {
if ((status = __hal_ring_initial_replenish (
(xge_hal_channel_t *) channelh,
XGE_HAL_CHANNEL_OC_NORMAL) )
!= XGE_HAL_OK) {
__hal_ring_close(channelh);
return status;
}
}
/* initial replenish will increment the counter in its post() routine,
* we have to reset it */
ring->channel.usage_cnt = 0;
return XGE_HAL_OK;
}
void
__hal_ring_close(xge_hal_channel_h channelh)
{
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
xge_hal_ring_queue_t *queue;
#if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
#endif
xge_assert(ring->channel.pdev);
queue = &ring->config->queue[ring->channel.post_qid];
if (ring->mempool) {
__hal_mempool_destroy(ring->mempool);
}
if (ring->reserved_rxds_arr) {
xge_os_free(ring->channel.pdev,
ring->reserved_rxds_arr,
sizeof(void*) * queue->max * ring->rxds_per_block);
}
__hal_channel_terminate(channelh);
#if defined(XGE_HAL_RX_MULTI_RESERVE)
xge_os_spin_lock_destroy(&ring->channel.reserve_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)
xge_os_spin_lock_destroy_irq(&ring->channel.reserve_lock, hldev->pdev);
#endif
#if defined(XGE_HAL_RX_MULTI_POST)
xge_os_spin_lock_destroy(&ring->channel.post_lock, hldev->pdev);
#elif defined(XGE_HAL_RX_MULTI_POST_IRQ)
xge_os_spin_lock_destroy_irq(&ring->channel.post_lock, hldev->pdev);
#endif
}
void
__hal_ring_prc_enable(xge_hal_channel_h channelh)
{
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
xge_hal_pci_bar0_t *bar0;
u64 val64;
void *first_block;
int block_num;
xge_hal_ring_queue_t *queue;
pci_dma_h dma_handle;
xge_assert(ring);
xge_assert(ring->channel.pdev);
bar0 = (xge_hal_pci_bar0_t *) (void *)
((xge_hal_device_t *)ring->channel.devh)->bar0;
queue = &ring->config->queue[ring->channel.post_qid];
xge_assert(queue->buffer_mode == 1 ||
queue->buffer_mode == 3 ||
queue->buffer_mode == 5);
/* last block in fact becomes first. This is just the way it
* is filled up and linked by item_alloc() */
block_num = queue->initial;
first_block = __hal_mempool_item(ring->mempool, block_num - 1);
val64 = __hal_ring_item_dma_addr(ring->mempool,
first_block, &dma_handle);
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
ring->channel.post_qid, (unsigned long long)val64);
val64 = xge_os_pio_mem_read64(ring->channel.pdev,
ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
!queue->rth_en) {
val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
}
val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
(hldev->config.pci_freq_mherz * queue->backoff_interval_us));
/* Beware: no snoop by the bridge if (no_snoop_bits) */
val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
/* Herc: always use group_reads */
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
if (hldev->config.bimodal_interrupts)
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
/* Configure Receive Protocol Assist */
val64 = xge_os_pio_mem_read64(ring->channel.pdev,
ring->channel.regh0, &bar0->rx_pa_cfg);
val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
val64 &= ~XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(1);
val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->rx_pa_cfg);
xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
ring->channel.post_qid, queue->buffer_mode);
}
void
__hal_ring_prc_disable(xge_hal_channel_h channelh)
{
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
xge_hal_pci_bar0_t *bar0;
u64 val64;
xge_assert(ring);
xge_assert(ring->channel.pdev);
bar0 = (xge_hal_pci_bar0_t *) (void *)
((xge_hal_device_t *)ring->channel.devh)->bar0;
val64 = xge_os_pio_mem_read64(ring->channel.pdev,
ring->channel.regh0,
&bar0->prc_ctrl_n[ring->channel.post_qid]);
val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
}
void
__hal_ring_hw_initialize(xge_hal_device_h devh)
{
xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
u64 val64;
int i, j;
/* Rx DMA intialization. */
val64 = 0;
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
if (!hldev->config.ring.queue[i].configured)
continue;
val64 |= vBIT(hldev->config.ring.queue[i].priority,
(5 + (i * 8)), 3);
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rx_queue_priority);
xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
/* Configuring ring queues according to per-ring configuration */
val64 = 0;
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
if (!hldev->config.ring.queue[i].configured)
continue;
val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rx_queue_cfg);
xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
if (!hldev->config.rts_qos_en &&
!hldev->config.rts_port_en &&
!hldev->config.rts_mac_en) {
/*
* Activate default (QoS-based) Rx steering
*/
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->rts_qos_steering);
for (j = 0; j < 8 /* QoS max */; j++)
{
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
{
if (!hldev->config.ring.queue[i].configured)
continue;
if (!hldev->config.ring.queue[i].rth_en)
val64 |= (BIT(i) >> (j*8));
}
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rts_qos_steering);
xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
}
/* Note: If a queue does not exist, it should be assigned a maximum
* length of zero. Otherwise, packet loss could occur.
* P. 4-4 User guide.
*
* All configured rings will be properly set at device open time
* by utilizing device_mtu_set() API call. */
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
if (hldev->config.ring.queue[i].configured)
continue;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
&bar0->rts_frm_len_n[i]);
}
#ifdef XGE_HAL_HERC_EMULATION
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
val64 |= 0x0000000000010000;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
((u8 *)bar0 + 0x2e60));
val64 |= 0x003a000000000000;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
xge_os_mdelay(2000);
#endif
/* now enabling MC-RLDRAM after setting MC_QUEUE sizes */
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->mc_rldram_mrs);
val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
XGE_HAL_MC_RLDRAM_MRS_ENABLE;
__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
&bar0->mc_rldram_mrs);
xge_os_wmb();
__hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
&bar0->mc_rldram_mrs);
/* RLDRAM initialization procedure require 500us to complete */
xge_os_mdelay(1);
/* Temporary fixes for Herc RLDRAM */
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->mc_rldram_ref_per_herc);
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->mc_rldram_mrs_herc);
xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
val64 = 0x0003570003010300ULL;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->mc_rldram_mrs_herc);
xge_os_mdelay(1);
}
/*
* Assign MSI-X vectors
*/
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
xge_list_t *item;
xge_hal_channel_t *channel = NULL;
if (!hldev->config.ring.queue[i].configured ||
!hldev->config.ring.queue[i].intr_vector ||
!hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
continue;
/* find channel */
xge_list_for_each(item, &hldev->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t,
item);
if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING &&
tmp->post_qid == i) {
channel = tmp;
break;
}
}
if (channel) {
xge_hal_channel_msix_set(channel,
hldev->config.ring.queue[i].intr_vector);
}
}
xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
}
void
__hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
{
int i;
xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
if (!hldev->config.ring.queue[i].configured)
continue;
if (hldev->config.ring.queue[i].max_frm_len !=
XGE_HAL_RING_USE_MTU) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
XGE_HAL_MAC_RTS_FRM_LEN_SET(
hldev->config.ring.queue[i].max_frm_len),
&bar0->rts_frm_len_n[i]);
} else {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
&bar0->rts_frm_len_n[i]);
}
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
&bar0->rmac_max_pyld_len);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,48 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : version.h
*
* Description: versioning file
*
* Created: 3 September 2004
*/
#ifndef XGELL_VERSION_H
#define XGELL_VERSION_H
#define XGELL_VERSION_MAJOR "2"
#define XGELL_VERSION_MINOR "0"
#define XGELL_VERSION_FIX "7"
#define XGELL_VERSION_BUILD GENERATED_BUILD_VERSION
#define XGELL_VERSION XGELL_VERSION_MAJOR"."XGELL_VERSION_MINOR"." \
GENERATED_BUILD_VERSION
#define XGELL_DESC XGE_DRIVER_NAME" v."XGELL_VERSION
#endif /* XGELL_VERSION_H */

View File

@ -184,6 +184,7 @@ SUBDIR= ${_3dfx} \
${_nsp} \
ntfs \
ntfs_iconv \
${_nxge} \
nullfs \
${_nve} \
${_nwfs} \
@ -447,6 +448,7 @@ _mly= mly
_mxge= mxge
_nfe= nfe
_nve= nve
_nxge= nxge
.if ${MK_CRYPT} != "no" || defined(ALL_MODULES)
.if exists(${.CURDIR}/../crypto/via)
_padlock= padlock
@ -504,6 +506,7 @@ _mxge= mxge
_ndis= ndis
_nfe= nfe
_nve= nve
_nxge= nxge
_pccard= pccard
_rr232x= rr232x
_safe= safe

44
sys/modules/nxge/Makefile Normal file
View File

@ -0,0 +1,44 @@
# $FreeBSD$
.PATH: ${.CURDIR}/../../dev/nxge
VPATH = ${.CURDIR}/../../dev/nxge/xgehal
CFLAGS_NXGE =
# Debugging/Tracing:
# XGE_COMPONENT_HAL_CONFIG 0x1
# XGE_COMPONENT_HAL_FIFO 0x2
# XGE_COMPONENT_HAL_RING 0x4
# XGE_COMPONENT_HAL_CHANNEL 0x8
# XGE_COMPONENT_HAL_DEVICE 0x10
# XGE_COMPONENT_HAL_MM 0x20
# XGE_COMPONENT_HAL_QUEUE 0x40
# XGE_COMPONENT_HAL_STATS 0x100
# XGE_COMPONENT_OSDEP 0x10000000
# XGE_COMPONENT_LL 0x20000000
# XGE_COMPONENT_ALL 0xffffffff
CFLAGS_NXGE += -DXGE_DEBUG_MODULE_MASK=XGE_COMPONENT_LL
CFLAGS_NXGE += -DXGE_DEBUG_ERR_MASK=XGE_COMPONENT_LL
#CFLAGS_NXGE += -DXGE_DEBUG_TRACE_MASK=XGE_COMPONENT_LL
# 2-Buffer Mode
#CFLAGS_NXGE += -DXGE_BUFFER_MODE_2
# 3-Buffer Mode
#CFLAGS_NXGE += -DXGE_BUFFER_MODE_3
# TSO (TCP Segmentation Offload)
CFLAGS_NXGE += -DXGE_FEATURE_TSO
CFLAGS += $(CFLAGS_NXGE)
KMOD= if_nxge
SRCS= if_nxge.c
SRCS+= xgehal-device.c xgehal-mm.c xge-queue.c
SRCS+= xgehal-driver.c xgehal-ring.c xgehal-channel.c
SRCS+= xgehal-fifo.c xgehal-stats.c xgehal-config.c
SRCS+= xgehal-mgmt.c
SRCS+= device_if.h bus_if.h pci_if.h
.include <bsd.kmod.mk>

View File

@ -42,6 +42,7 @@ mfc Merge a directory from HEAD to a branch where it does not
already exist and other MFC related script(s).
mid Create a Message-ID database for mailing lists.
ncpus Count the number of processors
nxge A diagnostic tool for the nxge(4) driver
pciid Generate src/share/misc/pci_vendors.
pciroms A tool for dumping PCI ROM images. WARNING: alpha quality.
pirtool A tool for dumping the $PIR table on i386 machines at runtime.

View File

@ -0,0 +1,7 @@
# $FreeBSD$
PROG= xgeinfo
SRCS= xge_info.c xge_log.c
NO_MAN=
.include <bsd.prog.mk>

147
tools/tools/nxge/xge_cmn.h Normal file
View File

@ -0,0 +1,147 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef XGE_CMN_H
#define XGE_CMN_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <fcntl.h>
#if BYTE_ORDER == BIG_ENDIAN
#define XGE_OS_HOST_BIG_ENDIAN 1
#endif
#define u64 unsigned long long
#define u32 unsigned int
#define u16 unsigned short
#define u8 unsigned char
#define XGE_COUNT_REGS 386
#define XGE_COUNT_STATS 160
#define XGE_COUNT_PCICONF 43
#define XGE_COUNT_DEVCONF 1677
#ifdef CONFIG_LRO
#define XGE_COUNT_INTRSTAT 26
#else
#define XGE_COUNT_INTRSTAT 20
#endif
#define XGE_COUNT_TCODESTAT 54
#define DEVICE_ID_XFRAME_II 0x5832
#define XGE_COUNT_EXTENDED_STATS 56
#define XGE_PRINT(fd, fmt...) \
{ \
fprintf( fd, fmt ); \
fprintf( fd, "\n" ); \
printf( fmt ); \
printf( "\n" ); \
}
#define XGE_PRINT_LINE(fd) XGE_PRINT(fd, line);
/* Read & Write Register */
typedef struct barregister
{
char option[2];
u64 offset;
u64 value;
}bar0reg_t;
/* Register Dump */
typedef struct xge_pci_bar0_t
{
u8 name[32]; /* Register name as in user guides */
u64 offset; /* Offset from base address */
u64 value; /* Value */
char type; /* 1: XframeII, 0: Common */
} xge_pci_bar0_t;
/* Hardware Statistics */
typedef struct xge_stats_hw_info_t
{
u8 name[32]; /* Statistics name */
u64 be_offset; /* Offset from base address (BE) */
u64 le_offset; /* Offset from base address (LE) */
u8 type; /* Type: 1, 2, 3 or 4 bytes */
u64 value; /* Value */
} xge_stats_hw_info_t;
/* PCI Configuration Space */
typedef struct xge_pci_config_t
{
u8 name[32]; /* Pci conf. name */
u64 be_offset; /* Offset from base address (BE) */
u64 le_offset; /* Offset from base address (LE) */
u64 value; /* Value */
} xge_pci_config_t;
/* Device Configuration */
typedef struct xge_device_config_t
{
u8 name[32]; /* Device conf. name */
u64 value; /* Value */
} xge_device_config_t;
/* Interrupt Statistics */
typedef struct xge_stats_intr_info_t
{
u8 name[32]; /* Interrupt entry name */
u64 value; /* Value (count) */
} xge_stats_intr_info_t;
/* Tcode Statistics */
typedef struct xge_stats_tcode_info_t
{
u8 name[32]; /* Tcode entry name */
u64 value; /* Value (count) */
u8 type; /* Type: 1, 2, 3 or 4 bytes */
u16 flag;
}xge_stats_tcode_info_t;
#ifdef XGE_OS_HOST_BIG_ENDIAN
#define GET_OFFSET_STATS(index) statsInfo[(index)].be_offset
#define GET_OFFSET_PCICONF(index) pciconfInfo[(index)].be_offset
#else
#define GET_OFFSET_STATS(index) statsInfo[(index)].le_offset
#define GET_OFFSET_PCICONF(index) pciconfInfo[(index)].le_offset
#endif
#endif //XGE_CMN_H

461
tools/tools/nxge/xge_info.c Normal file
View File

@ -0,0 +1,461 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/******************************************
* xge_info.c
*
* To get the Tx, Rx, PCI, Interrupt statistics,
* PCI configuration space and bar0 register
* values
******************************************/
#include "xge_info.h"
int
main( int argc, char *argv[] )
{
if(argc >= 4) {
if(!((strcmp(argv[2], "-r") == 0) ||
(strcmp(argv[2], "-w") == 0) ||
(strcmp(argv[2], "chgbufmode") == 0)))
{ goto use; }
}
else {
if(argc != 3) { goto out; }
else
{
if(!((strcmp(argv[2], "stats") == 0) ||
(strcmp(argv[2], "pciconf") == 0) ||
(strcmp(argv[2], "devconf") == 0) ||
(strcmp(argv[2], "reginfo") == 0) ||
(strcmp(argv[2], "driverversion") == 0) ||
(strcmp(argv[2], "swstats") == 0) ||
(strcmp(argv[2], "getbufmode") == 0) ||
(strcmp(argv[2], "intr") == 0)))
{ goto out; }
}
}
if((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
{
printf("Creating socket failed\n");
return EXIT_FAILURE;
}
ifreqp.ifr_addr.sa_family = AF_INET;
strcpy(ifreqp.ifr_name, argv[1]);
if (strcmp(argv[2], "pciconf") == 0) return getPciConf();
else if(strcmp(argv[2], "devconf") == 0) return getDevConf();
else if(strcmp(argv[2], "stats") == 0) return getStats();
else if(strcmp(argv[2], "reginfo") == 0) return getRegInfo();
else if(strcmp(argv[2], "intr") == 0) return getIntrStats();
else if(strcmp(argv[2], "swstats") == 0) return getTcodeStats();
else if(strcmp(argv[2], "driverversion") == 0) return getDriverVer();
else if(strcmp(argv[2], "-r") == 0) return getReadReg(argv[2],
argv[3]);
else if(strcmp(argv[2], "-w") == 0) return getWriteReg(argv[2],
argv[3],argv[5]);
else if(strcmp(argv[2], "chgbufmode") == 0) return changeBufMode(argv[3]);
else if(strcmp(argv[2], "getbufmode") == 0) return getBufMode();
else return EXIT_FAILURE;
use:
printf("Usage:");
printf("%s <INTERFACE> [-r] [-w] [chgbufmode]\n", argv[0]);
printf("\t -r <offset> : Read register \n");
printf("\t -w <offset> -v <value> : Write register \n");
printf("\t chgbufmode <Buffer mode> : Changes buffer mode \n");
return EXIT_FAILURE;
out:
printf("Usage:");
printf("%s <INTERFACE> <[stats] [reginfo] [pciconf] [devconf] ", argv[0]);
printf("[intr] [swstats] [driverversion] ");
printf("[getbufmode] [chgbufmode] [-r] [-w] >\n");
printf("\tINTERFACE : Interface (xge0, xge1, xge2, ..)\n");
printf("\tstats : Prints statistics \n");
printf("\treginfo : Prints register values \n");
printf("\tpciconf : Prints PCI configuration space \n");
printf("\tdevconf : Prints device configuration \n");
printf("\tintr : Prints interrupt statistics \n");
printf("\tswstats : Prints sw statistics \n");
printf("\tdriverversion : Prints driver version \n");
printf("\tgetbufmode : Prints Buffer Mode \n");
printf("\tchgbufmode : Changes buffer mode \n");
printf("\t -r <offset> : Read register \n");
printf("\t -w <offset> -v <value> : Write register \n");
return EXIT_FAILURE;
}
int
getStats()
{
void *hw_stats;
void *pci_cfg;
unsigned short device_id;
int index = 0;
bufferSize = GET_OFFSET_STATS(XGE_COUNT_STATS - 1) + 8;
hw_stats = (void *) malloc(bufferSize);
if(!hw_stats)
{
printf("Allocating memory for hw_stats failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)hw_stats;
*pAccess = XGE_QUERY_STATS;
ifreqp.ifr_data = (caddr_t) hw_stats;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting hardware statistics failed\n");
free(hw_stats);
return EXIT_FAILURE;
}
bufferSize = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF -1) + 8;
pci_cfg = (void *) malloc(bufferSize);
if(!pci_cfg)
{
printf("Allocating memory for pci_cfg failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)pci_cfg;
*pAccess = XGE_QUERY_PCICONF;
ifreqp.ifr_data = (caddr_t)pci_cfg;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting pci configuration space failed\n");
free(pci_cfg);
return EXIT_FAILURE;
}
device_id = *( ( u16 * )( ( unsigned char * )pci_cfg +
GET_OFFSET_PCICONF(index) ) );
logStats( hw_stats,device_id );
free(hw_stats);
free(pci_cfg);
return EXIT_SUCCESS;
}
int
getPciConf()
{
void *pci_cfg;
indexer = 0;
bufferSize = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF -1) + 8;
pci_cfg = (void *) malloc(bufferSize);
if(!pci_cfg)
{
printf("Allocating memory for pci_cfg failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)pci_cfg;
*pAccess = XGE_QUERY_PCICONF;
ifreqp.ifr_data = (caddr_t)pci_cfg;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting pci configuration space failed\n");
free(pci_cfg);
return EXIT_FAILURE;
}
logPciConf( pci_cfg );
free(pci_cfg);
return EXIT_SUCCESS;
}
int
getDevConf()
{
void *device_cfg;
indexer = 0;
bufferSize = XGE_COUNT_DEVCONF * sizeof(int);
device_cfg = (void *) malloc(bufferSize);
if(!device_cfg)
{
printf("Allocating memory for device_cfg failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)device_cfg;
*pAccess = XGE_QUERY_DEVCONF;
ifreqp.ifr_data = (caddr_t)device_cfg;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting Device Configuration failed\n");
free(device_cfg);
return EXIT_FAILURE;
}
logDevConf( device_cfg );
free(device_cfg);
return EXIT_SUCCESS;
}
int
getBufMode()
{
void *buf_mode = 0;
buf_mode = (void *) malloc(sizeof(int));
if(!buf_mode)
{
printf("Allocating memory for Buffer mode parameter failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)buf_mode;
*pAccess = XGE_QUERY_BUFFER_MODE;
ifreqp.ifr_data = (void *)buf_mode;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting Buffer Mode failed\n");
free(buf_mode);
return EXIT_FAILURE;
}
printf("Buffer Mode is %d\n", *ifreqp.ifr_data);
free(buf_mode);
return EXIT_SUCCESS;
}
int
changeBufMode(char *bufmode)
{
char *print_msg;
pAccess = (char *)malloc(sizeof(char));
if(*bufmode == '1'){
*pAccess = XGE_SET_BUFFER_MODE_1;
}else if (*bufmode == '2'){
*pAccess = XGE_SET_BUFFER_MODE_2;
}else if (*bufmode == '3'){
*pAccess = XGE_SET_BUFFER_MODE_3;
}else if (*bufmode == '5'){
*pAccess = XGE_SET_BUFFER_MODE_5;
}else{
printf("Invalid Buffer mode\n");
return EXIT_FAILURE;
}
ifreqp.ifr_data = (char *)pAccess;
if( ioctl( sockfd, SIOCGPRIVATE_0, &ifreqp ) < 0 )
{
printf( "Changing Buffer Mode Failed\n" );
return EXIT_FAILURE;
}
print_msg = (char *)ifreqp.ifr_data;
if(*print_msg == 'Y')
printf("Requested buffer mode was already enabled\n");
else if(*print_msg == 'N')
printf("Requested buffer mode is not implemented OR\nDynamic buffer changing is not supported in this driver\n");
else if(*print_msg == 'C')
printf("Buffer mode changed to %c\n", *bufmode);
return EXIT_SUCCESS;
}
int
getRegInfo()
{
void *regBuffer;
indexer = 0;
bufferSize = regInfo[XGE_COUNT_REGS - 1].offset + 8;
regBuffer = ( void * ) malloc ( bufferSize );
if( !regBuffer )
{
printf( "Allocating memory for register dump failed\n" );
return EXIT_FAILURE;
}
ifreqp.ifr_data = ( caddr_t )regBuffer;
if( ioctl( sockfd, SIOCGPRIVATE_1, &ifreqp ) < 0 )
{
printf( "Getting register dump failed\n" );
free( regBuffer );
return EXIT_FAILURE;
}
logRegInfo( regBuffer );
free( regBuffer );
return EXIT_SUCCESS;
}
int
getReadReg(char *opt,char *offst)
{
bar0reg_t *reg;
reg = ( bar0reg_t * ) malloc (sizeof(bar0reg_t));
if( !reg )
{
printf( "Allocating memory for reading register failed\n" );
return EXIT_FAILURE;
}
strcpy(reg->option, opt);
sscanf(offst,"%x",&reg->offset);
ifreqp.ifr_data = ( caddr_t )reg;
if( ioctl( sockfd, SIOCGPRIVATE_1, &ifreqp ) < 0 )
{
printf( "Reading register failed\n" );
free(reg);
return EXIT_FAILURE;
}
logReadReg ( reg->offset,reg->value );
free(reg);
return EXIT_SUCCESS;
}
int
getWriteReg(char *opt,char *offst,char *val)
{
bar0reg_t *reg;
reg = ( bar0reg_t * ) malloc (sizeof(bar0reg_t));
if( !reg )
{
printf( "Allocating memory for writing register failed\n" );
return EXIT_FAILURE;
}
strcpy(reg->option, opt);
sscanf(offst,"%x",&reg->offset);
sscanf(val,"%llx",&reg->value);
ifreqp.ifr_data = ( caddr_t )reg;
if( ioctl( sockfd, SIOCGPRIVATE_1, &ifreqp ) < 0 )
{
printf( "Writing register failed\n" );
free(reg);
return EXIT_FAILURE;
}
free(reg);
return EXIT_SUCCESS;
}
int
getIntrStats()
{
void *intr_stat;
bufferSize = XGE_COUNT_INTRSTAT * sizeof(u32);
intr_stat = (void *) malloc(bufferSize);
if(!intr_stat)
{
printf("Allocating memory for intr_stat failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)intr_stat;
*pAccess = XGE_QUERY_INTRSTATS ;
ifreqp.ifr_data = (caddr_t)intr_stat;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting interrupt statistics failed\n");
free(intr_stat);
return EXIT_FAILURE;
}
intr_stat = (char *)ifreqp.ifr_data;
logIntrStats( intr_stat );
free(intr_stat);
return EXIT_SUCCESS;
}
int
getTcodeStats()
{
void *tcode_stat;
bufferSize = XGE_COUNT_TCODESTAT * sizeof(u32);
tcode_stat = (void *) malloc(bufferSize);
if(!tcode_stat)
{
printf("Allocating memory for tcode_stat failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)tcode_stat;
*pAccess = XGE_QUERY_TCODE ;
ifreqp.ifr_data = (caddr_t)tcode_stat;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting tcode statistics failed\n");
free(tcode_stat);
return EXIT_FAILURE;
}
tcode_stat = (char *)ifreqp.ifr_data;
logTcodeStats( tcode_stat );
free(tcode_stat);
return EXIT_SUCCESS;
}
int
getDriverVer()
{
char *version;
bufferSize = 20;
version = ( char * ) malloc ( bufferSize );
if( !version )
{
printf( "Allocating memory for getting driver version failed\n" );
return EXIT_FAILURE;
}
pAccess = version;
*pAccess = XGE_READ_VERSION;
ifreqp.ifr_data = ( caddr_t )version;
if( ioctl( sockfd, SIOCGPRIVATE_0, &ifreqp ) < 0 )
{
printf( "Getting driver version failed\n" );
free( version );
return EXIT_FAILURE;
}
logDriverInfo(version);
free( version );
return EXIT_SUCCESS;
}

View File

@ -0,0 +1,83 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/******************************************
* getinfo.h
*
* To get the Tx, Rx, PCI, Interrupt statistics,
* PCI configuration space,device configuration
* and bar0 register values
******************************************/
#ifndef XGE_CMN_H
#include "xge_cmn.h"
#endif
#define XGE_QUERY_STATS 1
#define XGE_QUERY_PCICONF 2
#define XGE_QUERY_INTRSTATS 3
#define XGE_QUERY_DEVCONF 4
#define XGE_READ_VERSION 5
#define XGE_QUERY_TCODE 6
#define XGE_SET_BUFFER_MODE_1 7
#define XGE_SET_BUFFER_MODE_2 8
#define XGE_SET_BUFFER_MODE_3 9
#define XGE_SET_BUFFER_MODE_5 10
#define XGE_QUERY_BUFFER_MODE 11
/* Function declerations */
int getPciConf();
int getDevConf();
int getStats();
int getRegInfo();
int getIntrStats();
int getTcodeStats();
int getReadReg(char *,char *);
int getWriteReg(char *,char *,char *);
int getDriverVersion();
int getBufMode();
int changeBufMode(char *);
void logStats(void *,unsigned short);
void logPciConf(void *);
void logDevConf(void *);
void logRegInfo(void *);
void logReadReg(u64,u64);
void logIntrStats(void *);
void logTcodeStats(void *);
void logDriverInfo(char *);
extern xge_pci_bar0_t regInfo[];
extern xge_pci_config_t pciconfInfo[];
extern xge_stats_hw_info_t statsInfo[];
extern xge_device_config_t devconfInfo[];
extern xge_stats_intr_info_t intrInfo[];
extern xge_stats_tcode_info_t tcodeInfo[];
struct ifreq ifreqp;
int sockfd, indexer, bufferSize = 0;
char *pAccess;

246
tools/tools/nxge/xge_log.c Normal file
View File

@ -0,0 +1,246 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include "xge_log.h"
void
logStats( void *hwStats, unsigned short device_id )
{
int index = 0;
int count = 0;
count = XGE_COUNT_STATS - ((device_id == DEVICE_ID_XFRAME_II) ? 0 : XGE_COUNT_EXTENDED_STATS);
fdAll = fopen( "stats.log", "w+" );
if( fdAll )
{
XGE_PRINT_HEADER_STATS(fdAll);
for( index = 0; index < count ; index++ )
{
switch( statsInfo[index].type )
{
case 2:
{
statsInfo[index].value =
*( ( u16 * )( ( unsigned char * ) hwStats +
GET_OFFSET_STATS( index ) ) );
break;
}
case 4:
{
statsInfo[index].value =
*( ( u32 * )( ( unsigned char * ) hwStats +
GET_OFFSET_STATS( index ) ) );
break;
}
case 8:
{
statsInfo[index].value =
*( ( u64 * )( ( unsigned char * ) hwStats +
GET_OFFSET_STATS( index ) ) );
break;
}
}
XGE_PRINT_STATS(fdAll,(const char *) statsInfo[index].name,
statsInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}
void
logPciConf( void * pciConf )
{
int index = 0;
fdAll = fopen( "pciconf.log", "w+" );
if( fdAll )
{
XGE_PRINT_HEADER_PCICONF(fdAll);
for( index = 0; index < XGE_COUNT_PCICONF; index++ )
{
pciconfInfo[index].value =
*( ( u16 * )( ( unsigned char * )pciConf +
GET_OFFSET_PCICONF(index) ) );
XGE_PRINT_PCICONF(fdAll,(const char *) pciconfInfo[index].name,
GET_OFFSET_PCICONF(index), pciconfInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}
void
logDevConf( void * devConf )
{
int index = 0;
fdAll = fopen( "devconf.log", "w+" );
if( fdAll )
{
XGE_PRINT_HEADER_DEVCONF(fdAll);
for( index = 0; index < XGE_COUNT_DEVCONF; index++ )
{
devconfInfo[index].value =
*( ( u32 * )( ( unsigned char * )devConf +
( index * ( sizeof( int ) ) ) ) );
XGE_PRINT_DEVCONF(fdAll,(const char *) devconfInfo[index].name,
devconfInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose( fdAll );
}
}
void
logRegInfo( void * regBuffer )
{
int index = 0;
fdAll = fopen( "reginfo.log", "w+" );
if( fdAll )
{
XGE_PRINT_HEADER_REGS(fdAll);
for( index = 0; index < XGE_COUNT_REGS; index++ )
{
regInfo[index].value =
*( ( u64 * )( ( unsigned char * )regBuffer +
regInfo[index].offset ) );
XGE_PRINT_REGS(fdAll,(const char *) regInfo[index].name,
regInfo[index].offset, regInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}
void
logReadReg(u64 offset,u64 temp)
{
int index=0;
fdAll = fopen( "readreg.log", "w+");
if( fdAll )
{
XGE_PRINT_READ_HEADER_REGS(fdAll);
regInfo[index].offset = offset ;
regInfo[index].value = temp ;
printf("0x%.8X\t0x%.16llX\n",regInfo[index].offset, regInfo[index].value);
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}
void
logIntrStats( void * intrStats )
{
int index = 0;
fdAll = fopen( "intrstats.log", "w+" );
if(fdAll)
{
XGE_PRINT_HEADER_STATS(fdAll);
for( index = 0; index < XGE_COUNT_INTRSTAT; index++ )
{
intrInfo[index].value =
*( ( u32 * )( ( unsigned char * )intrStats +
( index * ( sizeof( u32 ) ) ) ) );
XGE_PRINT_STATS(fdAll,(const char *) intrInfo[index].name,
intrInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}
void
logTcodeStats( void * tcodeStats )
{
int index = 0;
fdAll = fopen( "tcodestats.log", "w+" );
if(fdAll)
{
XGE_PRINT_HEADER_STATS(fdAll);
for( index = 0; index < XGE_COUNT_TCODESTAT; index++ )
{
if(!(tcodeInfo[index].flag))
{
switch( tcodeInfo[index].type )
{
case 2:
{
tcodeInfo[index].value =
*( ( u16 * )( ( unsigned char * )tcodeStats +
( index * ( sizeof( u16 ) ) ) ) );
break;
}
case 4:
{
tcodeInfo[index].value =
*( ( u32 * )( ( unsigned char * )tcodeStats +
( index * ( sizeof( u32 ) ) ) ) );
break;
}
}
XGE_PRINT_STATS(fdAll,(const char *) tcodeInfo[index].name,
tcodeInfo[index].value);
}
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}
void
logDriverInfo( char *version )
{
fdAll = fopen( "driverinfo.log", "w+");
if (fdAll)
{
XGE_PRINT_LINE(fdAll);
printf("DRIVER VERSION : %s\n",version);
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}

2573
tools/tools/nxge/xge_log.h Normal file

File diff suppressed because it is too large Load Diff