Merge Neterion if_nxge driver version 2.0.9.11230 with the following

changes:

  01 -  Enhanced LRO:
  LRO feature is extended to support multi-buffer mode. Previously,
  Ethernet frames received in contiguous buffers were offloaded.
  Now, frames received in multiple non-contiguous buffers can be
  offloaded, as well. The driver now supports LRO for jumbo frames.

  02 - Locks Optimization:
  The driver code was re-organized to limit the use of locks.
  Moreover, lock contention was reduced by replacing wait locks
  with try locks.

  03 - Code Optimization:
  The driver code was re-factored  to eliminate some memcpy
  operations.  Fast path loops were optimized.

  04 - Tag Creations:
  Physical Buffer Tags are now optimized based upon frame size.
  For better performance, Physical Memory Maps are now re-used.

  05 - Configuration:
  Features such as TSO, LRO, and Interrupt Mode can be configured
  either at load or at run time. Rx buffer mode (mode 1 or mode 2)
  can be configured at load time through kenv.

  06 - Driver Statistics:
  Run time statistics are enhanced to provide better visibility
  into the driver performance.

  07 - Bug Fixes:
  The driver contains fixes for the problems discovered and
  reported since last submission.

  08 - MSI support:
  Added Message Signaled Interrupt feature which currently uses 1
  message.

  09  Removed feature:
  Rx 3 buffer mode feature has been removed. Driver now supports 1,
  2 and 5 buffer modes of which 2 and 5 buffer modes can be used
  for header separation.

  10  Compiler warning:
  Fixed compiler warning when compiled for 32 bit system.

  11 Copyright notice:
  Source files are updated with the proper copyright notice.

MFC after:	3 days
Submitted by:	Alicia Pena <Alicia dot Pena at neterion dot com>,
		Muhammad Shafiq <Muhammad dot Shafiq at neterion dot com>
This commit is contained in:
Robert Watson 2007-10-29 14:19:32 +00:00
parent eb320b0ee7
commit 3be4cb0b4a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=173139
48 changed files with 15299 additions and 16135 deletions

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 28, 2007
.Dd October 16, 2007
.Dt NXGE 4
.Os
.Sh NAME
@ -48,9 +48,9 @@ if_nxge_load="YES"
The
.Nm
driver provides support for Neterion Xframe-I and Xframe-II adapters.
The driver supports TCP Segmentation Offload (TSO/LSO), Jumbo Frames
(5 buffer mode), Header Separation (2 and 3 Receive buffer modes),
VLAN, and Promiscuous mode.
The driver supports TCP Segmentation Offload (TSO/LSO),
Large Receive Offlaod (LRO), Jumbo Frames (5 buffer mode),
Header Separation (Rx 2 buffer mode), VLAN, and Promiscuous mode.
.Pp
For general information and support, please visit the Neterion support page
.Pa http://www.neterion.com/support/support.html .
@ -61,6 +61,11 @@ Selecting an MTU larger than 1500 bytes with the
utility configures the adapter to transmit and receive Jumbo Frames.
Xframe adapters support Jumbo Frames up to 9600 bytes.
.Pp
.Pp
For Jumbo frames, the driver will try to allocate physically contiguous buffers. Failures to do so, may degrade the performance. To resolve such problems, please visit
.Pa http://www.neterion.com
where additional information and Kernel patch can be found.
.Pp
For more information on configuring this device, see
.Xr ifconfig 8 .
.Sh HARDWARE

File diff suppressed because it is too large Load Diff

View File

@ -26,75 +26,52 @@
* $FreeBSD$
*/
/*
* if_xge.h
*/
#ifndef _IF_XGE_H
#define _IF_XGE_H
#include <dev/nxge/include/xgehal.h>
#include <dev/nxge/xge-osdep.h>
#if defined(XGE_FEATURE_TSO) && (__FreeBSD_version < 700026)
#undef XGE_FEATURE_TSO
#endif
#if defined(XGE_FEATURE_LRO)
#if __FreeBSD_version < 700047
#undef XGE_FEATURE_LRO
#undef XGE_HAL_CONFIG_LRO
#else
#define XGE_HAL_CONFIG_LRO
#endif
#endif
#ifdef FUNC_PRINT
#define ENTER_FUNCTION xge_os_printf("Enter\t==>[%s]\n", __FUNCTION__);
#define LEAVE_FUNCTION xge_os_printf("Leave\t<==[%s]\n", __FUNCTION__);
#else
#define ENTER_FUNCTION
#define LEAVE_FUNCTION
#endif
/* Printing description, Copyright */
#define DRIVER_VERSION XGELL_VERSION_MAJOR"." \
XGELL_VERSION_MINOR"." \
XGELL_VERSION_FIX"." \
XGELL_VERSION_BUILD
#define COPYRIGHT_STRING "Copyright(c) 2002-2007 Neterion Inc."
#define PRINT_COPYRIGHT xge_os_printf("%s", COPYRIGHT_STRING)
#define XGE_DRIVER_VERSION \
XGELL_VERSION_MAJOR"."XGELL_VERSION_MINOR"." \
XGELL_VERSION_FIX"."XGELL_VERSION_BUILD
#define XGE_COPYRIGHT "Copyright(c) 2002-2007 Neterion Inc."
/* Printing */
#define xge_trace(trace, fmt, args...) xge_debug_ll(trace, fmt, ## args);
#define xge_ctrace(trace, fmt...) xge_debug_ll(trace, fmt);
#define BUFALIGN(buffer_length) \
if((buffer_length % 128) != 0) { \
buffer_length += (128 - (buffer_length % 128)); \
}
static inline void *
xge_malloc(unsigned long size) {
void *vaddr = malloc(size, M_DEVBUF, M_NOWAIT);
bzero(vaddr, size);
return vaddr;
#define XGE_ALIGN_TO(buffer_length, to) { \
if((buffer_length % to) != 0) { \
buffer_length += (to - (buffer_length % to)); \
} \
}
#define XGE_EXIT_ON_ERR(text, label, return_value) { \
xge_trace(XGE_ERR, "%s (Status: %d)", text, return_value); \
status = return_value; \
goto label; \
}
#define XGE_SET_BUFFER_MODE_IN_RINGS(mode) { \
for(index = 0; index < XGE_RING_COUNT; index++) \
ring_config->queue[index].buffer_mode = mode; \
}
#define SINGLE_ALLOC 0
#define MULTI_ALLOC 1
#define SAVE 0
#define RESTORE 1
#define UP 1
#define DOWN 0
#define XGE_DEFAULT_USER_HARDCODED -1
#define MAX_MBUF_FRAGS 20 /* Maximum number of fragments */
#define MAX_SEGS 100 /* Maximum number of segments */
#define XGELL_TX_LEVEL_LOW 16
#define XGE_MAX_SEGS 100 /* Maximum number of segments */
#define XGE_TX_LEVEL_LOW 16
#define XGE_FIFO_COUNT XGE_HAL_MIN_FIFO_NUM
#define XGE_RING_COUNT XGE_HAL_MIN_RING_NUM
#define BUFFER_SIZE 20
#define XGE_BUFFER_SIZE 20
#define XGE_LRO_DEFAULT_ENTRIES 12
#define XGE_BAUDRATE 1000000000
/* Default values to configuration parameters */
#define XGE_DEFAULT_ENABLED_TSO 1
#define XGE_DEFAULT_ENABLED_LRO 1
#define XGE_DEFAULT_ENABLED_MSI 1
#define XGE_DEFAULT_BUFFER_MODE 1
#define XGE_DEFAULT_INITIAL_MTU 1500
#define XGE_DEFAULT_LATENCY_TIMER -1
#define XGE_DEFAULT_MAX_SPLITS_TRANS -1
@ -144,16 +121,56 @@ xge_malloc(unsigned long size) {
#define XGE_DEFAULT_RING_QUEUE_RTI_TIMER_AC_EN 1
#define XGE_DEFAULT_RING_QUEUE_RTI_TIMER_VAL_US 250
#define XGE_DRV_STATS(param) (lldev->driver_stats.param++)
#define XGE_SAVE_PARAM(to, what, value) to.what = value;
#define XGE_GET_PARAM(str_kenv, to, param, hardcode) { \
static int param##__LINE__; \
if(testenv(str_kenv) == 1) { \
getenv_int(str_kenv, &param##__LINE__); \
} \
else { \
param##__LINE__ = hardcode; \
} \
XGE_SAVE_PARAM(to, param, param##__LINE__); \
}
#define XGE_GET_PARAM_MAC(str_kenv, param, hardcode) \
XGE_GET_PARAM(str_kenv, ((*dconfig).mac), param, hardcode);
#define XGE_GET_PARAM_FIFO(str_kenv, param, hardcode) \
XGE_GET_PARAM(str_kenv, ((*dconfig).fifo), param, hardcode);
#define XGE_GET_PARAM_FIFO_QUEUE(str_kenv, param, qindex, hardcode) \
XGE_GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex]), param, \
hardcode);
#define XGE_GET_PARAM_FIFO_QUEUE_TTI(str_kenv, param, qindex, tindex, hardcode)\
XGE_GET_PARAM(str_kenv, ((*dconfig).fifo.queue[qindex].tti[tindex]), \
param, hardcode);
#define XGE_GET_PARAM_RING(str_kenv, param, hardcode) \
XGE_GET_PARAM(str_kenv, ((*dconfig).ring), param, hardcode);
#define XGE_GET_PARAM_RING_QUEUE(str_kenv, param, qindex, hardcode) \
XGE_GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex]), param, \
hardcode);
#define XGE_GET_PARAM_RING_QUEUE_RTI(str_kenv, param, qindex, hardcode) \
XGE_GET_PARAM(str_kenv, ((*dconfig).ring.queue[qindex].rti), param, \
hardcode);
/* Values to identify the requests from getinfo tool in ioctl */
#define XGE_QUERY_STATS 1
#define XGE_QUERY_PCICONF 2
#define XGE_QUERY_INTRSTATS 3
#define XGE_QUERY_DEVSTATS 3
#define XGE_QUERY_DEVCONF 4
#define XGE_READ_VERSION 5
#define XGE_QUERY_TCODE 6
#define XGE_SET_BUFFER_MODE_1 7
#define XGE_SET_BUFFER_MODE_2 8
#define XGE_SET_BUFFER_MODE_3 9
#define XGE_QUERY_SWSTATS 6
#define XGE_QUERY_DRIVERSTATS 7
#define XGE_SET_BUFFER_MODE_1 8
#define XGE_SET_BUFFER_MODE_2 9
#define XGE_SET_BUFFER_MODE_5 10
#define XGE_QUERY_BUFFER_MODE 11
@ -164,124 +181,233 @@ xge_malloc(unsigned long size) {
typedef struct mbuf *mbuf_t;
typedef enum xgell_event_e {
typedef enum xge_lables {
xge_free_all = 0,
xge_free_mutex = 1,
xge_free_terminate_hal_driver = 2,
xge_free_hal_device = 3,
xge_free_pci_info = 4,
xge_free_bar0 = 5,
xge_free_bar0_resource = 6,
xge_free_bar1 = 7,
xge_free_bar1_resource = 8,
xge_free_irq_resource = 9,
xge_free_terminate_hal_device = 10,
xge_free_media_interface = 11,
} xge_lables_e;
typedef enum xge_option {
XGE_CHANGE_LRO = 0,
XGE_SET_MTU = 1
} xge_option_e;
typedef enum xge_event_e {
XGE_LL_EVENT_TRY_XMIT_AGAIN = XGE_LL_EVENT_BASE + 1,
XGE_LL_EVENT_DEVICE_RESETTING = XGE_LL_EVENT_BASE + 2,
} xgell_event_e;
XGE_LL_EVENT_DEVICE_RESETTING = XGE_LL_EVENT_BASE + 2
} xge_event_e;
typedef struct xge_msi_info {
u16 msi_control; /* MSI control 0x42 */
u32 msi_lower_address; /* MSI lower address 0x44 */
u32 msi_higher_address; /* MSI higher address 0x48 */
u16 msi_data; /* MSI data */
} xge_msi_info_t;
typedef struct xge_driver_stats_t {
/* ISR statistics */
u64 isr_filter;
u64 isr_line;
u64 isr_msi;
/* Tx statistics */
u64 tx_calls;
u64 tx_completions;
u64 tx_desc_compl;
u64 tx_tcode;
u64 tx_defrag;
u64 tx_no_txd;
u64 tx_map_fail;
u64 tx_max_frags;
u64 tx_tso;
u64 tx_posted;
u64 tx_again;
u64 tx_lock_fail;
/* Rx statistics */
u64 rx_completions;
u64 rx_desc_compl;
u64 rx_tcode;
u64 rx_no_buf;
u64 rx_map_fail;
/* LRO statistics */
u64 lro_uncapable;
u64 lro_begin;
u64 lro_end1;
u64 lro_end2;
u64 lro_end3;
u64 lro_append;
u64 lro_session_exceeded;
u64 lro_close;
} xge_driver_stats_t;
typedef struct xge_lro_entry_t {
SLIST_ENTRY(xge_lro_entry_t) next;
struct mbuf *m_head;
struct mbuf *m_tail;
struct ip *lro_header_ip;
int timestamp;
u32 tsval;
u32 tsecr;
u32 source_ip;
u32 dest_ip;
u32 next_seq;
u32 ack_seq;
u32 len;
u32 data_csum;
u16 window;
u16 source_port;
u16 dest_port;
u16 append_cnt;
u16 mss;
} xge_lro_entry_t;
SLIST_HEAD(lro_head, xge_lro_entry_t);
/* Adapter structure */
typedef struct xgelldev {
typedef struct xge_lldev_t {
device_t device; /* Device */
struct ifnet *ifnetp; /* Interface ifnet structure */
struct resource *irq; /* Resource structure for IRQ */
void *irqhandle; /* IRQ handle */
pci_info_t *pdev;
struct ifmedia xge_media; /* In-kernel representation of a */
/* single supported media type */
xge_pci_info_t *pdev; /* PCI info */
xge_hal_device_t *devh; /* HAL: Device Handle */
xge_hal_channel_h ring_channel[XGE_HAL_MAX_FIFO_NUM];
/* Ring channel */
xge_hal_channel_h fifo_channel_0; /* FIFO channel */
struct mtx xge_lock; /* Mutex - Default */
struct mtx mtx_drv; /* Mutex - Driver */
struct mtx mtx_tx[XGE_FIFO_COUNT];
/* Mutex - Tx */
char mtx_name_drv[16];/*Mutex Name - Driver */
char mtx_name_tx[16][XGE_FIFO_COUNT];
/* Mutex Name - Tx */
struct callout timer; /* Timer for polling */
struct xge_hal_stats_hw_info_t *hwstats; /* Hardware Statistics */
int saved_regs[16]; /* To save register space */
int xge_mtu; /* MTU */
int initialized; /* Flag: Initialized or not */
struct ifmedia media; /* In-kernel representation of a */
/* single supported media type */
xge_hal_channel_h fifo_channel[XGE_FIFO_COUNT];
/* FIFO channels */
xge_hal_channel_h ring_channel[XGE_RING_COUNT];
/* Ring channels */
bus_dma_tag_t dma_tag_tx; /* Tag for dtr dma mapping (Tx) */
bus_dma_tag_t dma_tag_rx; /* Tag for dtr dma mapping (Rx) */
bus_dmamap_t extra_dma_map; /* Extra DMA map for Rx */
xge_msi_info_t msi_info; /* MSI info */
xge_driver_stats_t driver_stats; /* Driver statistics */
int initialized; /* Flag: Initialized or not */
int all_multicast; /* All multicast flag */
int macaddr_count; /* Multicast address count */
int in_detach; /* To avoid ioctl during detach */
int buffer_mode; /* Buffer Mode */
int rxd_mbuf_cnt; /* Number of buffers used */
int rxd_mbuf_len[5];/* Buffer lengths */
} xgelldev_t;
int enabled_tso; /* Flag: TSO Enabled */
int enabled_lro; /* Flag: LRO Enabled */
int enabled_msi; /* Flag: MSI Enabled */
int mtu; /* Interface MTU */
int lro_num; /* Number of LRO sessions */
struct lro_head lro_active; /* Active LRO sessions */
struct lro_head lro_free; /* Free LRO sessions */
} xge_lldev_t;
/* Rx descriptor private structure */
typedef struct {
mbuf_t *bufferArray;
struct xge_dma_mbuf dmainfo[5];
} xgell_rx_priv_t;
typedef struct xge_rx_priv_t {
mbuf_t *bufferArray;
xge_dma_mbuf_t dmainfo[5];
} xge_rx_priv_t;
/* Tx descriptor private structure */
typedef struct {
typedef struct xge_tx_priv_t {
mbuf_t buffer;
bus_dmamap_t dma_map;
} xgell_tx_priv_t;
} xge_tx_priv_t;
/* BAR0 Register */
typedef struct barregister {
char option[2];
typedef struct xge_register_t {
char option[2];
u64 offset;
u64 value;
}bar0reg_t;
}xge_register_t;
void xge_init_params(xge_hal_device_config_t *dconfig, device_t dev);
void xge_init_params(xge_hal_device_config_t *, device_t);
void xge_init(void *);
void xge_init_locked(void *);
void xge_stop(xgelldev_t *);
void freeResources(device_t, int);
void xgell_callback_link_up(void *);
void xgell_callback_link_down(void *);
void xgell_callback_crit_err(void *, xge_hal_event_e, u64);
void xgell_callback_event(xge_queue_item_t *);
void xge_device_init(xge_lldev_t *, xge_hal_channel_reopen_e);
void xge_device_stop(xge_lldev_t *, xge_hal_channel_reopen_e);
void xge_stop(xge_lldev_t *);
void xge_resources_free(device_t, xge_lables_e);
void xge_callback_link_up(void *);
void xge_callback_link_down(void *);
void xge_callback_crit_err(void *, xge_hal_event_e, u64);
void xge_callback_event(xge_queue_item_t *);
int xge_ifmedia_change(struct ifnet *);
void xge_ifmedia_status(struct ifnet *, struct ifmediareq *);
int xge_ioctl(struct ifnet *, unsigned long, caddr_t);
int xge_ioctl_stats(xge_lldev_t *, struct ifreq *);
int xge_ioctl_registers(xge_lldev_t *, struct ifreq *);
void xge_timer(void *);
int xge_intr_filter(void *);
void xge_intr(void *);
int xgell_rx_open(int, xgelldev_t *, xge_hal_channel_reopen_e);
int xgell_tx_open(xgelldev_t *, xge_hal_channel_reopen_e);
int xgell_channel_close(xgelldev_t *, xge_hal_channel_reopen_e);
int xgell_channel_open(xgelldev_t *, xge_hal_channel_reopen_e);
xge_hal_status_e xgell_rx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *);
xge_hal_status_e xgell_tx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *);
xge_hal_status_e xgell_tx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h,
int, void *, xge_hal_channel_reopen_e);
xge_hal_status_e xgell_rx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h,
int, void *, xge_hal_channel_reopen_e);
void xgell_rx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e,
void *, xge_hal_channel_reopen_e);
void xgell_tx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e,
void *, xge_hal_channel_reopen_e);
void xgell_set_mbuf_cflags(mbuf_t);
int xge_isr_filter(void *);
void xge_isr_line(void *);
void xge_isr_msi(void *);
void xge_enable_msi(xge_lldev_t *);
int xge_rx_open(int, xge_lldev_t *, xge_hal_channel_reopen_e);
int xge_tx_open(xge_lldev_t *, xge_hal_channel_reopen_e);
void xge_channel_close(xge_lldev_t *, xge_hal_channel_reopen_e);
int xge_channel_open(xge_lldev_t *, xge_hal_channel_reopen_e);
xge_hal_status_e xge_rx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *);
xge_hal_status_e xge_tx_compl(xge_hal_channel_h, xge_hal_dtr_h, u8, void *);
xge_hal_status_e xge_tx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h,
int, void *, xge_hal_channel_reopen_e);
xge_hal_status_e xge_rx_initial_replenish(xge_hal_channel_h, xge_hal_dtr_h,
int, void *, xge_hal_channel_reopen_e);
void xge_rx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e,
void *, xge_hal_channel_reopen_e);
void xge_tx_term(xge_hal_channel_h, xge_hal_dtr_h, xge_hal_dtr_state_e,
void *, xge_hal_channel_reopen_e);
void xge_set_mbuf_cflags(mbuf_t);
void xge_send(struct ifnet *);
void xge_send_locked(struct ifnet *);
int xgell_get_multimode_normalbuf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
xgelldev_t *lldev);
int xgell_get_multimode_jumbobuf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
xgelldev_t *lldev, int lock);
int xgell_get_second_buffer(xgell_rx_priv_t *rxd_priv, xgelldev_t *lldev);
int xgell_get_buf(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
xgelldev_t *lldev, int index);
int xge_ring_dtr_get(mbuf_t mbuf_up, xge_hal_channel_h channelh, xge_hal_dtr_h dtr,
xgelldev_t *lldev, xgell_rx_priv_t *rxd_priv);
int xgell_get_buf_3b_5b(xge_hal_dtr_h dtrh, xgell_rx_priv_t *rxd_priv,
xgelldev_t *lldev);
static void inline xge_send_locked(struct ifnet *, int);
int xge_get_buf(xge_hal_dtr_h, xge_rx_priv_t *, xge_lldev_t *, int);
int xge_ring_dtr_get(mbuf_t, xge_hal_channel_h, xge_hal_dtr_h, xge_lldev_t *,
xge_rx_priv_t *);
int xge_get_buf_3b_5b(xge_hal_dtr_h, xge_rx_priv_t *, xge_lldev_t *);
void dmamap_cb(void *, bus_dma_segment_t *, int, int);
void xgell_reset(xgelldev_t *);
void xge_setmulti(xgelldev_t *);
void xge_enable_promisc(xgelldev_t *);
void xge_disable_promisc(xgelldev_t *);
int changeMtu(xgelldev_t *, int);
int changeBufmode(xgelldev_t *, int);
void xge_reset(xge_lldev_t *);
void xge_setmulti(xge_lldev_t *);
void xge_enable_promisc(xge_lldev_t *);
void xge_disable_promisc(xge_lldev_t *);
int xge_change_mtu(xge_lldev_t *, int);
void xge_buffer_mode_init(xge_lldev_t *, int);
void xge_initialize(device_t, xge_hal_channel_reopen_e);
void xge_terminate(device_t, xge_hal_channel_reopen_e);
void if_up_locked(xgelldev_t *);
void if_down_locked(xgelldev_t *);
int xge_probe(device_t);
int xge_driver_initialize(void);
void xge_media_init(device_t);
void xge_pci_space_save(device_t);
void xge_pci_space_restore(device_t);
void xge_msi_info_save(xge_lldev_t *);
void xge_msi_info_restore(xge_lldev_t *);
int xge_attach(device_t);
int xge_interface_setup(device_t);
int xge_detach(device_t);
int xge_shutdown(device_t);
int xge_suspend(device_t);
int xge_resume(device_t);
void xge_mutex_init(xge_lldev_t *);
void xge_mutex_destroy(xge_lldev_t *);
void xge_print_info(xge_lldev_t *);
void xge_lro_flush_sessions(xge_lldev_t *);
void xge_rx_buffer_sizes_set(xge_lldev_t *, int, int);
void xge_accumulate_large_rx(xge_lldev_t *, struct mbuf *, int,
xge_rx_priv_t *);
xge_hal_status_e xge_create_dma_tags(device_t);
void xge_add_sysctl_handlers(xge_lldev_t *);
void xge_confirm_changes(xge_lldev_t *, xge_option_e);
static int xge_lro_accumulate(xge_lldev_t *, struct mbuf *);
static void xge_lro_flush(xge_lldev_t *, xge_lro_entry_t *);
#endif // _IF_XGE_H

View File

@ -1,6 +1,6 @@
/* $FreeBSD$ */
#ifndef BUILD_VERSION_H
#define BUILD_VERSION_H
/* Do not edit! Automatically generated when released.*/
#define GENERATED_BUILD_VERSION "10294"
/* $FreeBSD$ */
#define GENERATED_BUILD_VERSION "11230"
#endif /* BUILD_VERSION_H */

View File

@ -26,26 +26,18 @@
* $FreeBSD$
*/
/*
* FileName : version.h
*
* Description: versioning file
*
* Created: 3 September 2004
*/
#ifndef VERSION_H
#define VERSION_H
#include <dev/nxge/include/build-version.h>
#define XGE_HAL_VERSION_MAJOR "2"
#define XGE_HAL_VERSION_MINOR "5"
#define XGE_HAL_VERSION_FIX "0"
#define XGE_HAL_VERSION_BUILD GENERATED_BUILD_VERSION
#define XGE_HAL_VERSION_MAJOR "2"
#define XGE_HAL_VERSION_MINOR "0"
#define XGE_HAL_VERSION_FIX "9"
#define XGE_HAL_VERSION_BUILD GENERATED_BUILD_VERSION
#define XGE_HAL_VERSION XGE_HAL_VERSION_MAJOR"."XGE_HAL_VERSION_MINOR"."\
XGE_HAL_VERSION_FIX"."XGE_HAL_VERSION_BUILD
#define XGE_HAL_DESC XGE_DRIVER_NAME" v."XGE_HAL_VERSION
XGE_HAL_VERSION_FIX"."XGE_HAL_VERSION_BUILD
#define XGE_HAL_DESC XGE_DRIVER_NAME" v."XGE_HAL_VERSION
/* Link Layer versioning */
#include <dev/nxge/xgell-version.h>

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xge-debug.h
*
* Description: debug facilities
*
* Created: 6 May 2004
*/
#ifndef XGE_DEBUG_H
#define XGE_DEBUG_H
@ -54,10 +46,11 @@ __EXTERN_BEGIN_DECLS
#ifdef XGE_DEBUG_FP
#define XGE_DEBUG_FP_DEVICE 0x1
#define XGE_DEBUG_FP_DEVICE 0x1
#define XGE_DEBUG_FP_CHANNEL 0x2
#define XGE_DEBUG_FP_FIFO 0x4
#define XGE_DEBUG_FP_RING 0x8
#define XGE_DEBUG_FP_FIFO 0x4
#define XGE_DEBUG_FP_RING 0x8
#define XGE_DEBUG_FP_ALL 0xff
#endif
/**
@ -125,15 +118,6 @@ extern int *g_level;
#define XGE_COMPONENT_HAL_QUEUE 0x00000040
#define XGE_COMPONENT_HAL_INTERRUPT 0x00000080
#define XGE_COMPONENT_HAL_STATS 0x00000100
#ifdef XGEHAL_RNIC
#define XGE_COMPONENT_HAL_DMQ 0x00000200
#define XGE_COMPONENT_HAL_UMQ 0x00000400
#define XGE_COMPONENT_HAL_SQ 0x00000800
#define XGE_COMPONENT_HAL_SRQ 0x00001000
#define XGE_COMPONENT_HAL_CQRQ 0x00002000
#define XGE_COMPONENT_HAL_POOL 0x00004000
#define XGE_COMPONENT_HAL_BITMAP 0x00008000
#endif
/* space for CORE_XXX */
#define XGE_COMPONENT_OSDEP 0x10000000
@ -146,9 +130,9 @@ extern int *g_level;
#ifndef __GNUC__
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
#define xge_trace_aux(fmt) xge_os_vatrace(g_xge_os_tracebuf, fmt)
#define xge_trace_aux(fmt) xge_os_vatrace(g_xge_os_tracebuf, fmt)
#else
#define xge_trace_aux(fmt) xge_os_vaprintf(fmt)
#define xge_trace_aux(fmt) xge_os_vaprintf(fmt)
#endif
/**
@ -164,24 +148,24 @@ extern int *g_level;
*/
#define xge_debug(module, level, fmt) { \
if (((level >= XGE_TRACE && ((module & XGE_DEBUG_TRACE_MASK) == module)) || \
(level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \
level >= *g_level && module & *(unsigned int *)g_module_mask) { \
xge_trace_aux(fmt); \
(level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \
level >= *g_level && module & *(unsigned int *)g_module_mask) { \
xge_trace_aux(fmt); \
} \
}
#else /* __GNUC__ */
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
#define xge_trace_aux(fmt...) xge_os_trace(g_xge_os_tracebuf, fmt)
#define xge_trace_aux(fmt...) xge_os_trace(g_xge_os_tracebuf, fmt)
#else
#define xge_trace_aux(fmt...) xge_os_printf(fmt)
#define xge_trace_aux(fmt...) xge_os_printf(fmt)
#endif
#define xge_debug(module, level, fmt...) { \
if (((level >= XGE_TRACE && ((module & XGE_DEBUG_TRACE_MASK) == module)) || \
(level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \
level >= *g_level && module & *(unsigned int *)g_module_mask) { \
xge_trace_aux(fmt); \
(level >= XGE_ERR && ((module & XGE_DEBUG_ERR_MASK) == module))) && \
level >= *g_level && module & *(unsigned int *)g_module_mask) { \
xge_trace_aux(fmt); \
} \
}
#endif /* __GNUC__ */
@ -353,136 +337,6 @@ static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {}
#endif /* __GNUC__ */
#endif
#ifdef XGEHAL_RNIC
#if (XGE_COMPONENT_HAL_DMQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_DMQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_dmq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_DMQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_dmq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_UMQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_UMQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_umq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_UMQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_umq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_SQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_SQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_sq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_SQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_sq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_SRQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_SRQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_srq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_SRQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_srq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_CQRQ & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_CQRQ;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_cqrq(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_CQRQ, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_cqrq(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_POOL & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_POOL;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_pool(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_POOL, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_pool(level, fmt...)
#endif /* __GNUC__ */
#endif
#if (XGE_COMPONENT_HAL_BITMAP & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {
u32 module = XGE_COMPONENT_HAL_BITMAP;
xge_debug(module, level, fmt);
}
#else /* __GNUC__ */
#define xge_debug_bitmap(level, fmt...) \
xge_debug(XGE_COMPONENT_HAL_BITMAP, level, fmt)
#endif /* __GNUC__ */
#else
#ifndef __GNUC__
static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {}
#else /* __GNUC__ */
#define xge_debug_bitmap(level, fmt...)
#endif /* __GNUC__ */
#endif
#endif
#if (XGE_COMPONENT_OSDEP & XGE_DEBUG_MODULE_MASK)
#ifndef __GNUC__
static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {
@ -531,13 +385,6 @@ static inline void xge_debug_fifo(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_ring(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_channel(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_device(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_dmq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_umq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_sq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_srq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_cqrq(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_pool(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_bitmap(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_hal(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_osdep(xge_debug_level_e level, char *fmt, ...) {}
static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {}
@ -557,7 +404,7 @@ static inline void xge_debug_ll(xge_debug_level_e level, char *fmt, ...) {}
* time.
*/
#define xge_assert(test) { \
if (!(test)) xge_os_bug("bad cond: "#test" at %s:%d\n", \
if (!(test)) xge_os_bug("bad cond: "#test" at %s:%d\n", \
__FILE__, __LINE__); }
#else
#define xge_assert(test)

View File

@ -26,35 +26,27 @@
* $FreeBSD$
*/
/*
* FileName : xge-defs.h
*
* Description: global definitions
*
* Created: 13 May 2004
*/
#ifndef XGE_DEFS_H
#define XGE_DEFS_H
#define XGE_PCI_VENDOR_ID 0x17D5
#define XGE_PCI_DEVICE_ID_XENA_1 0x5731
#define XGE_PCI_DEVICE_ID_XENA_2 0x5831
#define XGE_PCI_DEVICE_ID_HERC_1 0x5732
#define XGE_PCI_DEVICE_ID_HERC_2 0x5832
#define XGE_PCI_DEVICE_ID_TITAN_1 0x5733
#define XGE_PCI_DEVICE_ID_TITAN_2 0x5833
#define XGE_PCI_VENDOR_ID 0x17D5
#define XGE_PCI_DEVICE_ID_XENA_1 0x5731
#define XGE_PCI_DEVICE_ID_XENA_2 0x5831
#define XGE_PCI_DEVICE_ID_HERC_1 0x5732
#define XGE_PCI_DEVICE_ID_HERC_2 0x5832
#define XGE_PCI_DEVICE_ID_TITAN_1 0x5733
#define XGE_PCI_DEVICE_ID_TITAN_2 0x5833
#define XGE_DRIVER_NAME "Xge driver"
#define XGE_DRIVER_VENDOR "Neterion, Inc"
#define XGE_CHIP_FAMILY "Xframe"
#define XGE_SUPPORTED_MEDIA_0 "Fiber"
#define XGE_DRIVER_NAME "Xge driver"
#define XGE_DRIVER_VENDOR "Neterion, Inc"
#define XGE_CHIP_FAMILY "Xframe"
#define XGE_SUPPORTED_MEDIA_0 "Fiber"
#include <dev/nxge/include/version.h>
#if defined(__cplusplus)
#define __EXTERN_BEGIN_DECLS extern "C" {
#define __EXTERN_END_DECLS }
#define __EXTERN_BEGIN_DECLS extern "C" {
#define __EXTERN_END_DECLS }
#else
#define __EXTERN_BEGIN_DECLS
#define __EXTERN_END_DECLS
@ -67,7 +59,7 @@ __EXTERN_BEGIN_DECLS
/*---------------------------- DMA attributes ------------------------------*/
/* XGE_OS_DMA_REQUIRES_SYNC - should be defined or
NOT defined in the Makefile */
NOT defined in the Makefile */
#define XGE_OS_DMA_CACHELINE_ALIGNED 0x1
/* Either STREAMING or CONSISTENT should be used.
The combination of both or none is invalid */
@ -77,7 +69,7 @@ __EXTERN_BEGIN_DECLS
/*---------------------------- common stuffs -------------------------------*/
#define XGE_OS_LLXFMT "%llx"
#define XGE_OS_LLXFMT "%llx"
#define XGE_OS_NEWLINE "\n"
#ifdef XGE_OS_MEMORY_CHECK
typedef struct {
@ -87,56 +79,56 @@ typedef struct {
int line;
} xge_os_malloc_t;
#define XGE_OS_MALLOC_CNT_MAX 64*1024
#define XGE_OS_MALLOC_CNT_MAX 64*1024
extern xge_os_malloc_t g_malloc_arr[XGE_OS_MALLOC_CNT_MAX];
extern int g_malloc_cnt;
#define XGE_OS_MEMORY_CHECK_MALLOC(_vaddr, _size, _file, _line) { \
if (_vaddr) { \
int i; \
for (i=0; i<g_malloc_cnt; i++) { \
if (g_malloc_arr[i].ptr == NULL) { \
break; \
} \
} \
if (i == g_malloc_cnt) { \
g_malloc_cnt++; \
if (g_malloc_cnt >= XGE_OS_MALLOC_CNT_MAX) { \
xge_os_bug("g_malloc_cnt exceed %d", \
XGE_OS_MALLOC_CNT_MAX); \
} \
} \
g_malloc_arr[i].ptr = _vaddr; \
g_malloc_arr[i].size = _size; \
g_malloc_arr[i].file = _file; \
g_malloc_arr[i].line = _line; \
for (i=0; i<_size; i++) { \
*((char *)_vaddr+i) = 0x5a; \
} \
int index_mem_chk; \
for (index_mem_chk=0; index_mem_chk < g_malloc_cnt; index_mem_chk++) { \
if (g_malloc_arr[index_mem_chk].ptr == NULL) { \
break; \
} \
} \
if (index_mem_chk == g_malloc_cnt) { \
g_malloc_cnt++; \
if (g_malloc_cnt >= XGE_OS_MALLOC_CNT_MAX) { \
xge_os_bug("g_malloc_cnt exceed %d", \
XGE_OS_MALLOC_CNT_MAX); \
} \
} \
g_malloc_arr[index_mem_chk].ptr = _vaddr; \
g_malloc_arr[index_mem_chk].size = _size; \
g_malloc_arr[index_mem_chk].file = _file; \
g_malloc_arr[index_mem_chk].line = _line; \
for (index_mem_chk=0; index_mem_chk<_size; index_mem_chk++) { \
*((char *)_vaddr+index_mem_chk) = 0x5a; \
} \
} \
}
#define XGE_OS_MEMORY_CHECK_FREE(_vaddr, _check_size) { \
int i; \
for (i=0; i<XGE_OS_MALLOC_CNT_MAX; i++) { \
if (g_malloc_arr[i].ptr == _vaddr) { \
g_malloc_arr[i].ptr = NULL; \
if(_check_size && g_malloc_arr[i].size!=_check_size) { \
xge_os_printf("OSPAL: freeing with wrong " \
"size %d! allocated at %s:%d:"XGE_OS_LLXFMT":%d", \
(int)_check_size, \
g_malloc_arr[i].file, \
g_malloc_arr[i].line, \
(unsigned long long)(ulong_t) \
g_malloc_arr[i].ptr, \
g_malloc_arr[i].size); \
} \
break; \
} \
int index_mem_chk; \
for (index_mem_chk=0; index_mem_chk < XGE_OS_MALLOC_CNT_MAX; index_mem_chk++) { \
if (g_malloc_arr[index_mem_chk].ptr == _vaddr) { \
g_malloc_arr[index_mem_chk].ptr = NULL; \
if(_check_size && g_malloc_arr[index_mem_chk].size!=_check_size) { \
xge_os_printf("OSPAL: freeing with wrong " \
"size %d! allocated at %s:%d:"XGE_OS_LLXFMT":%d", \
(int)_check_size, \
g_malloc_arr[index_mem_chk].file, \
g_malloc_arr[index_mem_chk].line, \
(unsigned long long)(ulong_t) \
g_malloc_arr[index_mem_chk].ptr, \
g_malloc_arr[index_mem_chk].size); \
} \
break; \
} \
} \
if (i == XGE_OS_MALLOC_CNT_MAX) { \
xge_os_printf("OSPAL: ptr "XGE_OS_LLXFMT" not found!", \
(unsigned long long)(ulong_t)_vaddr); \
if (index_mem_chk == XGE_OS_MALLOC_CNT_MAX) { \
xge_os_printf("OSPAL: ptr "XGE_OS_LLXFMT" not found!", \
(unsigned long long)(ulong_t)_vaddr); \
} \
}
#else

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xge-list.h
*
* Description: Generic bi-directional linked list implementation
*
* Created: 14 May 2004
*/
#ifndef XGE_LIST_H
#define XGE_LIST_H
@ -76,9 +68,9 @@ static inline void xge_list_init (xge_list_t *header)
*/
static inline int xge_list_is_empty(xge_list_t *header)
{
xge_assert(header != NULL);
xge_assert(header != NULL);
return header->next == header;
return header->next == header;
}
/**
@ -96,9 +88,9 @@ static inline xge_list_t *xge_list_first_get(xge_list_t *header)
xge_assert(header->prev != NULL);
if(header->next == header)
return NULL;
return NULL;
else
return header->next;
return header->next;
}
/**
@ -131,7 +123,7 @@ static inline void xge_list_remove(xge_list_t *item)
* See also: xge_list_remove(), xge_list_insert_before(), xge_list_t{}.
*/
static inline void xge_list_insert (xge_list_t *new_item,
xge_list_t *prev_item)
xge_list_t *prev_item)
{
xge_assert(new_item != NULL);
xge_assert(prev_item != NULL);
@ -151,7 +143,7 @@ static inline void xge_list_insert (xge_list_t *new_item,
* Insert new item (new_item) before given item (next_item).
*/
static inline void xge_list_insert_before (xge_list_t *new_item,
xge_list_t *next_item)
xge_list_t *next_item)
{
xge_assert(new_item != NULL);
xge_assert(next_item != NULL);
@ -165,34 +157,34 @@ static inline void xge_list_insert_before (xge_list_t *new_item,
#define xge_list_for_each(_p, _h) \
for (_p = (_h)->next, xge_os_prefetch(_p->next); _p != (_h); \
_p = _p->next, xge_os_prefetch(_p->next))
_p = _p->next, xge_os_prefetch(_p->next))
#define xge_list_for_each_safe(_p, _n, _h) \
for (_p = (_h)->next, _n = _p->next; _p != (_h); \
_p = _n, _n = _p->next)
for (_p = (_h)->next, _n = _p->next; _p != (_h); \
_p = _n, _n = _p->next)
#ifdef __GNUC__
/**
* xge_container_of - Given a member, return the containing structure.
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
* Cast a member of a structure out to the containing structure.
*/
#define xge_container_of(ptr, type, member) ({ \
__typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)(void *)( (char *)__mptr - ((size_t) &((type *)0)->member) );})
#define xge_container_of(ptr, type, member) ({ \
__typeof( ((type *)0)->member ) *__mptr = (ptr); \
(type *)(void *)( (char *)__mptr - ((size_t) &((type *)0)->member) );})
#else
/* type unsafe version */
#define xge_container_of(ptr, type, member) \
((type*)(void*)((char*)(ptr) - ((size_t) &((type *)0)->member)))
((type*)(void*)((char*)(ptr) - ((size_t) &((type *)0)->member)))
#endif
/**
* xge_offsetof - Offset of the member in the containing structure.
* @t: struct name.
* @m: the name of the member within the struct.
* @t: struct name.
* @m: the name of the member within the struct.
*
* Return the offset of the member @m in the structure @t.
*/

View File

@ -26,15 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xge-os-pal.h
*
* Description: top-level header file. works just like switching between
* os-depndent parts
*
* Created: 6st May 2004
*/
#ifndef XGE_OS_PAL_H
#define XGE_OS_PAL_H
@ -46,10 +37,6 @@ __EXTERN_BEGIN_DECLS
/* platform specific header */
#include <dev/nxge/xge-osdep.h>
#ifdef XGEHAL_RNIC
#define IN
#define OUT
#endif
#if !defined(XGE_OS_PLATFORM_64BIT) && !defined(XGE_OS_PLATFORM_32BIT)
#error "either 32bit or 64bit switch must be defined!"
@ -60,20 +47,20 @@ __EXTERN_BEGIN_DECLS
#endif
#if defined(XGE_OS_PLATFORM_64BIT)
#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a5a5a5a5a
#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a5a5a5a5a
#else
#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a
#define XGE_OS_MEMORY_DEADCODE_PAT 0x5a5a5a5a
#endif
#define XGE_OS_TRACE_MSGBUF_MAX 512
#define XGE_OS_TRACE_MSGBUF_MAX 512
typedef struct xge_os_tracebuf_t {
int wrapped_once; /* circular buffer been wrapped */
int timestamp; /* whether timestamps are enabled */
volatile int offset; /* offset within the tracebuf */
int size; /* total size of trace buffer */
char msg[XGE_OS_TRACE_MSGBUF_MAX]; /* each individual buffer */
int msgbuf_max; /* actual size of msg buffer */
char *data; /* pointer to data buffer */
int wrapped_once; /* circular buffer been wrapped */
int timestamp; /* whether timestamps are enabled */
volatile int offset; /* offset within the tracebuf */
int size; /* total size of trace buffer */
char msg[XGE_OS_TRACE_MSGBUF_MAX]; /* each individual buffer */
int msgbuf_max; /* actual size of msg buffer */
char *data; /* pointer to data buffer */
} xge_os_tracebuf_t;
extern xge_os_tracebuf_t *g_xge_os_tracebuf;
@ -86,42 +73,42 @@ extern char *dmesg_start;
int msgsize = xge_os_strlen(tb->msg) + 2; \
int offset = tb->offset; \
if (msgsize != 2 && msgsize < tb->msgbuf_max) { \
int leftsize = tb->size - offset; \
if ((msgsize + tb->msgbuf_max) > leftsize) { \
xge_os_memzero(tb->data + offset, leftsize); \
offset = 0; \
tb->wrapped_once = 1; \
} \
xge_os_memcpy(tb->data + offset, tb->msg, msgsize-1); \
*(tb->data + offset + msgsize-1) = '\n'; \
*(tb->data + offset + msgsize) = 0; \
offset += msgsize; \
tb->offset = offset; \
dmesg_start = tb->data + offset; \
*tb->msg = 0; \
int leftsize = tb->size - offset; \
if ((msgsize + tb->msgbuf_max) > leftsize) { \
xge_os_memzero(tb->data + offset, leftsize); \
offset = 0; \
tb->wrapped_once = 1; \
} \
xge_os_memcpy(tb->data + offset, tb->msg, msgsize-1); \
*(tb->data + offset + msgsize-1) = '\n'; \
*(tb->data + offset + msgsize) = 0; \
offset += msgsize; \
tb->offset = offset; \
dmesg_start = tb->data + offset; \
*tb->msg = 0; \
} \
}
#define xge_os_vatrace(tb, fmt) { \
if (tb != NULL) { \
char *_p = tb->msg; \
if (tb->timestamp) { \
xge_os_timestamp(tb->msg); \
_p = tb->msg + xge_os_strlen(tb->msg); \
} \
xge_os_vasprintf(_p, fmt); \
__xge_trace(tb); \
char *_p = tb->msg; \
if (tb->timestamp) { \
xge_os_timestamp(tb->msg); \
_p = tb->msg + xge_os_strlen(tb->msg); \
} \
xge_os_vasprintf(_p, fmt); \
__xge_trace(tb); \
} \
}
#ifdef __GNUC__
#define xge_os_trace(tb, fmt...) { \
if (tb != NULL) { \
if (tb->timestamp) { \
xge_os_timestamp(tb->msg); \
} \
xge_os_sprintf(tb->msg + xge_os_strlen(tb->msg), fmt); \
__xge_trace(tb); \
if (tb->timestamp) { \
xge_os_timestamp(tb->msg); \
} \
xge_os_sprintf(tb->msg + xge_os_strlen(tb->msg), fmt); \
__xge_trace(tb); \
} \
}
#endif /* __GNUC__ */

View File

@ -1,614 +0,0 @@
/*-
* Copyright (c) 2002-2007 Neterion, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* FileName : xge-os-template.h
*
* Description: Template for creating platform-dependent "glue" code.
*
* Created: 6 May 2004
*/
#ifndef XGE_OS_TEMPLATE_H
#define XGE_OS_TEMPLATE_H
#ifndef TEMPLATE
# error "should not be compiled for platforms other than TEMPLATE..."
#endif
/* ------------------------- includes and defines ------------------------- */
/*
* Note:
*
* - on some operating systems like Linux & FreeBSD, there is a macro
* by using which it is possible to determine endiennes automatically
*/
#define XGE_OS_HOST_BIG_ENDIAN TEMPLATE
#define XGE_OS_HOST_PAGE_SIZE TEMPLATE
/* ---------------------- fixed size primitive types -----------------------*/
/*
* Note:
*
* - u## - means ## bits unsigned int/long
* - all names must be preserved since HAL using them.
* - ulong_t is platform specific, i.e. for 64bit - 64bit size, for
* 32bit - 32bit size
*/
#define TEMPLATE u8
#define TEMPLATE u16
#define TEMPLATE u32
#define TEMPLATE u64
#define TEMPLATE ulong_t
#define TEMPLATE ptrdiff_t
#define TEMPLATE dma_addr_t
#define TEMPLATE spinlock_t
typedef TEMPLATE pci_dev_h;
typedef TEMPLATE pci_reg_h;
typedef TEMPLATE pci_dma_h;
typedef TEMPLATE pci_irq_h;
typedef TEMPLATE pci_cfg_h;
typedef TEMPLATE pci_dma_acc_h;
/* -------------------------- "libc" functionality -------------------------*/
/*
* Note:
*
* - "libc" functionality maps one-to-one to be posix-like
*/
/* Note: use typedef: xge_os_memzero(void* mem, int size); */
#define xge_os_memzero TEMPLATE
/* Note: the 1st argument MUST be destination, like in:
* void *memcpy(void *dest, const void *src, size_t n);
*/
#define xge_os_memcpy TEMPLATE
/* Note: should accept format (the 1st argument) and a variable
* number of arguments thereafter.. */
#define xge_os_printf(fmt...) TEMPLATE
#define xge_os_vasprintf(buf, fmt...) TEMPLATE
#define xge_os_sprintf(buf, fmt, ...) TEMPLATE
#define xge_os_timestamp(buf) TEMPLATE
#define xge_os_println TEMPLATE
/* -------------------- synchronization primitives -------------------------*/
/*
* Note:
*
* - use spin_lock in interrupts or in threads when there is no races
* with interrupt
* - use spin_lock_irqsave in threads if there is a race with interrupt
* - use spin_lock_irqsave for nested locks
*/
/*
* Initialize the spin lock.
*/
#define xge_os_spin_lock_init(lockp, ctxh) TEMPLATE
/*
* Initialize the spin lock (IRQ version).
*/
#define xge_os_spin_lock_init_irq(lockp, ctxh) TEMPLATE
/*
* Destroy the lock.
*/
#define xge_os_spin_lock_destroy(lockp, ctxh) TEMPLATE
/*
* Destroy the lock (IRQ version).
*/
#define xge_os_spin_lock_destroy_irq(lockp, ctxh) TEMPLATE
/*
* Acquire the lock.
*/
#define xge_os_spin_lock(lockp) TEMPLATE
/*
* Release the lock.
*/
#define xge_os_spin_unlock(lockp) TEMPLATE
/*
* Acquire the lock(IRQ version).
*/
#define xge_os_spin_lock_irq(lockp, flags) TEMPLATE
/*
* Release the lock(IRQ version).
*/
#define xge_os_spin_unlock_irq(lockp, flags) TEMPLATE
/*
* Write memory barrier.
*/
#define xge_os_wmb() TEMPLATE
/*
* Delay (in micro seconds).
*/
#define xge_os_udelay(us) TEMPLATE
/*
* Delay (in milli seconds).
*/
#define xge_os_mdelay(ms) TEMPLATE
/*
* Compare and exchange.
*/
#define xge_os_cmpxchg(targetp, cmp, newval) TEMPLATE
/* ------------------------- misc primitives -------------------------------*/
#define xge_os_prefetch TEMPLATE
#define xge_os_prefetchw TEMPLATE
#define xge_os_bug(fmt...) TEMPLATE
/* -------------------------- compiler stuffs ------------------------------*/
#define __xge_os_attr_cacheline_aligned TEMPLATE
/* ---------------------- memory primitives --------------------------------*/
/**
* xge_os_malloc - Allocate non DMA-able memory.
* @pdev: Device context. Some OSs require device context to perform
* operations on memory.
* @size: Size to allocate.
*
* Allocate @size bytes of memory. This allocation can sleep, and
* therefore, and therefore it requires process context. In other words,
* xge_os_malloc() cannot be called from the interrupt context.
* Use xge_os_free() to free the allocated block.
*
* Returns: Pointer to allocated memory, NULL - on failure.
*
* See also: xge_os_free().
*/
static inline void *xge_os_malloc(IN pci_dev_h pdev,
IN unsigned long size)
{ TEMPLATE; }
/**
* xge_os_free - Free non DMA-able memory.
* @pdev: Device context. Some OSs require device context to perform
* operations on memory.
* @vaddr: Address of the allocated memory block.
* @size: Some OS's require to provide size on free
*
* Free the memory area obtained via xge_os_malloc().
* This call may also sleep, and therefore it cannot be used inside
* interrupt.
*
* See also: xge_os_malloc().
*/
static inline void xge_os_free(IN pci_dev_h pdev,
IN const void *vaddr,
IN unsigned long size)
{ TEMPLATE; }
/**
* xge_os_vaddr - Get Virtual address for the given physical address.
* @pdev: Device context. Some OSs require device context to perform
* operations on memory.
* @vaddr: Physical Address of the memory block.
* @size: Some OS's require to provide size
*
* Get the virtual address for physical address.
* This call may also sleep, and therefore it cannot be used inside
* interrupt.
*
* See also: xge_os_malloc().
*/
static inline void xge_os_vaddr(IN pci_dev_h pdev,
IN const void *vaddr,
IN unsigned long size)
{ TEMPLATE; }
/**
* xge_os_dma_malloc - Allocate DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @size: Size (in bytes) to allocate.
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
* XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT
* Note that the last two flags are mutually exclusive.
* @p_dmah: Handle used to map the memory onto the corresponding device memory
* space. See xge_os_dma_map(). The handle is an out-parameter
* returned by the function.
* @p_dma_acch: One more DMA handle used subsequently to free the
* DMA object (via xge_os_dma_free()).
* Note that this and the previous handle have
* physical meaning for Solaris; on Windows and Linux the
* corresponding value will be simply a pointer to PCI device.
* The value is returned by this function.
*
* Allocate DMA-able contiguous memory block of the specified @size.
* This memory can be subsequently freed using xge_os_dma_free().
* Note: can be used inside interrupt context.
*
* Returns: Pointer to allocated memory(DMA-able), NULL on failure.
*
*/
static inline void *xge_os_dma_malloc(IN pci_dev_h pdev,
IN unsigned long size,
IN int dma_flags,
OUT pci_dma_h *p_dmah,
OUT pci_dma_acc_h *p_dma_acch)
{ TEMPLATE; }
/**
* xge_os_dma_free - Free previously allocated DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @vaddr: Virtual address of the DMA-able memory.
* @p_dma_acch: DMA handle used to free the resource.
* @p_dmah: DMA handle used for mapping. See xge_os_dma_malloc().
*
* Free DMA-able memory originally allocated by xge_os_dma_malloc().
* Note: can be used inside interrupt.
* See also: xge_os_dma_malloc().
*/
static inline void xge_os_dma_free (IN pci_dev_h pdev,
IN const void *vaddr,
IN pci_dma_acc_h *p_dma_acch,
IN pci_dma_h *p_dmah)
{ TEMPLATE; }
/* ----------------------- io/pci/dma primitives ---------------------------*/
#define XGE_OS_DMA_DIR_TODEVICE TEMPLATE
#define XGE_OS_DMA_DIR_FROMDEVICE TEMPLATE
#define XGE_OS_DMA_DIR_BIDIRECTIONAL TEMPLATE
/**
* xge_os_pci_read8 - Read one byte from device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of the result.
*
* Read byte value from the specified @regh PCI configuration space at the
* specified offset = @where.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_read8(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u8 *val)
{ TEMPLATE; }
/**
* xge_os_pci_write8 - Write one byte into device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write byte value into the specified PCI configuration space
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_write8(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u8 val)
{ TEMPLATE; }
/**
* xge_os_pci_read16 - Read 16bit word from device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of the 16bit result.
*
* Read 16bit value from the specified PCI configuration space at the
* specified offset.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_read16(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u16 *val)
{ TEMPLATE; }
/**
* xge_os_pci_write16 - Write 16bit word into device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write 16bit value into the specified @offset in PCI
* configuration space.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_write16(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u16 val)
{ TEMPLATE; }
/**
* xge_os_pci_read32 - Read 32bit word from device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of 32bit result.
*
* Read 32bit value from the specified PCI configuration space at the
* specified offset.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_read32(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u32 *val)
{ TEMPLATE; }
/**
* xge_os_pci_write32 - Write 32bit word into device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write 32bit value into the specified @offset in PCI
* configuration space.
* Returns: 0 - success, non-zero - failure.
*/
static inline int xge_os_pci_write32(IN pci_dev_h pdev,
IN pci_cfg_h cfgh,
IN int where,
IN u32 val)
{ TEMPLATE; }
/**
* xge_os_pio_mem_read8 - Read 1 byte from device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO..
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 1 byte value read from the specified (mapped) memory space address.
*/
static inline u8 xge_os_pio_mem_read8(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_write64 - Write 1 byte into device memory mapped
* space.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO..
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write byte value into the specified (mapped) device memory space.
*/
static inline void xge_os_pio_mem_write8(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN u8 val,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_read16 - Read 16bit from device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 16bit value read from the specified (mapped) memory space address.
*/
static inline u16 xge_os_pio_mem_read16(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{
TEMPLATE; }
/**
* xge_os_pio_mem_write16 - Write 16bit into device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 16bit value into the specified (mapped) device memory space.
*/
static inline void xge_os_pio_mem_write16(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN u16 val,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_read32 - Read 32bit from device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 32bit value read from the specified (mapped) memory space address.
*/
static inline u32 xge_os_pio_mem_read32(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_write32 - Write 32bit into device memory space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 32bit value into the specified (mapped) device memory space.
*/
static inline void xge_os_pio_mem_write32(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN u32 val,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_read64 - Read 64bit from device memory mapped space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 64bit value read from the specified (mapped) memory space address.
*/
static inline u64 xge_os_pio_mem_read64(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_pio_mem_write64 - Write 64bit into device memory space.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 64bit value into the specified (mapped) device memory space.
*/
static inline void xge_os_pio_mem_write64(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN u64 val,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_flush_bridge - Flush the bridge.
* @pdev: Device context. Some OSs require device context to perform
* PIO.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Flush the bridge.
*/
static inline void xge_os_flush_bridge(IN pci_dev_h pdev,
IN pci_reg_h regh,
IN void *addr)
{ TEMPLATE; }
/**
* xge_os_dma_map - Map DMA-able memory block to, or from, or
* to-and-from device.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @vaddr: Virtual address of the DMA-able memory.
* @size: Size (in bytes) to be mapped.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
* XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT
* Note that the last two flags are mutually exclusive.
*
* Map a single memory block.
*
* Returns: DMA address of the memory block,
* XGE_OS_INVALID_DMA_ADDR on failure.
*
* See also: xge_os_dma_malloc(), xge_os_dma_unmap(),
* xge_os_dma_sync().
*/
static inline dma_addr_t xge_os_dma_map(IN pci_dev_h pdev,
IN pci_dma_h dmah,
IN void *vaddr,
IN size_t size,
IN int dir,
IN int dma_flags)
{ TEMPLATE; }
/**
* xge_os_dma_unmap - Unmap DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
* @size: Size (in bytes) to be unmapped.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
*
* Unmap a single DMA-able memory block that was previously mapped
* using xge_os_dma_map().
* See also: xge_os_dma_malloc(), xge_os_dma_map().
*/
static inline void xge_os_dma_unmap(IN pci_dev_h pdev,
IN pci_dma_h dmah,
IN dma_addr_t dma_addr,
IN size_t size,
IN int dir)
{ TEMPLATE; }
/**
* xge_os_dma_sync - Synchronize mapped memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @dma_addr: DMA address of the block. Obtained via xge_os_dma_map().
* @dma_offset: Offset from start of the blocke. Used by Solaris only.
* @length: Size of the block.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
*
* Make physical and CPU memory consistent for a single
* streaming mode DMA translation.
* This API compiles to NOP on cache-coherent platforms.
* On non cache-coherent platforms, depending on the direction
* of the "sync" operation, this API will effectively
* either invalidate CPU cache (that might contain old data),
* or flush CPU cache to update physical memory.
* See also: xge_os_dma_malloc(), xge_os_dma_map(),
* xge_os_dma_unmap().
*/
static inline void xge_os_dma_sync(IN pci_dev_h pdev,
IN pci_dma_h dmah,
IN dma_addr_t dma_addr,
IN u64 dma_offset,
IN size_t length,
IN int dir)
{ TEMPLATE; }
#endif /* XGE_OS_TEMPLATE_H */

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xge-queue.h
*
* Description: serialized event queue
*
* Created: 7 June 2004
*/
#ifndef XGE_QUEUE_H
#define XGE_QUEUE_H
@ -44,8 +36,8 @@
__EXTERN_BEGIN_DECLS
#define XGE_QUEUE_BUF_SIZE 0x1000
#define XGE_DEFAULT_EVENT_MAX_DATA_SIZE 16
#define XGE_QUEUE_BUF_SIZE 0x1000
#define XGE_DEFAULT_EVENT_MAX_DATA_SIZE 16
/**
* enum xge_queue_status_e - Enumerates return codes of the xge_queue
@ -61,11 +53,11 @@ __EXTERN_BEGIN_DECLS
* and xge_queue_produce() APIs.
*/
typedef enum xge_queue_status_e {
XGE_QUEUE_OK = 0,
XGE_QUEUE_IS_FULL = 1,
XGE_QUEUE_IS_EMPTY = 2,
XGE_QUEUE_OUT_OF_MEMORY = 3,
XGE_QUEUE_NOT_ENOUGH_SPACE = 4
XGE_QUEUE_OK = 0,
XGE_QUEUE_IS_FULL = 1,
XGE_QUEUE_IS_EMPTY = 2,
XGE_QUEUE_OUT_OF_MEMORY = 3,
XGE_QUEUE_NOT_ENOUGH_SPACE = 4
} xge_queue_status_e;
typedef void* xge_queue_h;
@ -86,11 +78,11 @@ typedef void* xge_queue_h;
* See also: xge_queue_t{}.
*/
typedef struct xge_queue_item_t {
xge_list_t item;
xge_hal_event_e event_type;
int data_size;
int is_critical;
void *context;
xge_list_t item;
xge_hal_event_e event_type;
int data_size;
int is_critical;
void *context;
} xge_queue_item_t;
/**
@ -114,7 +106,7 @@ typedef void (*xge_queued_f) (void *data, int event_type);
* produce/consume operations.
* @lock: Lock for queue operations(syncronization purpose).
* @pages_initial:Number of pages to be initially allocated at the time
* of queue creation.
* of queue creation.
* @pages_max: Max number of pages that can be allocated in the queue.
* @pages_current: Number of pages currently allocated
* @list_head: Points to the list of queue elements that are produced, but yet
@ -135,26 +127,26 @@ typedef void (*xge_queued_f) (void *data, int event_type);
* See also: xge_queue_item_t{}.
*/
typedef struct xge_queue_t {
void *start_ptr;
void *end_ptr;
void *head_ptr;
void *tail_ptr;
spinlock_t lock;
unsigned int pages_initial;
unsigned int pages_max;
unsigned int pages_current;
xge_list_t list_head;
pci_dev_h pdev;
pci_irq_h irqh;
xge_queued_f queued_func;
void *queued_data;
int has_critical_event;
void *start_ptr;
void *end_ptr;
void *head_ptr;
void *tail_ptr;
spinlock_t lock;
unsigned int pages_initial;
unsigned int pages_max;
unsigned int pages_current;
xge_list_t list_head;
pci_dev_h pdev;
pci_irq_h irqh;
xge_queued_f queued_func;
void *queued_data;
int has_critical_event;
} xge_queue_t;
/* ========================== PUBLIC API ================================= */
xge_queue_h xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
int pages_max, xge_queued_f queued_func, void *queued_data);
int pages_max, xge_queued_f queued_func, void *queued_data);
void xge_queue_destroy(xge_queue_h queueh);
@ -162,7 +154,7 @@ void* xge_queue_item_data(xge_queue_item_t *item);
xge_queue_status_e
xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
int is_critical, const int data_size, void *data);
int is_critical, const int data_size, void *data);
static inline xge_queue_status_e
xge_queue_produce_context(xge_queue_h queueh, int event_type, void *context) {
@ -170,7 +162,7 @@ xge_queue_produce_context(xge_queue_h queueh, int event_type, void *context) {
}
xge_queue_status_e xge_queue_consume(xge_queue_h queueh, int data_max_size,
xge_queue_item_t *item);
xge_queue_item_t *item);
void xge_queue_flush(xge_queue_h queueh);

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-channel.h
*
* Description: HAL channel object functionality
*
* Created: 19 May 2004
*/
#ifndef XGE_HAL_CHANNEL_H
#define XGE_HAL_CHANNEL_H
@ -80,9 +72,9 @@ typedef enum xge_hal_channel_type_e {
* Channel opening flags. Reserved for future usage.
*/
typedef enum xge_hal_channel_flag_e {
XGE_HAL_CHANNEL_FLAG_NONE = 0x0,
XGE_HAL_CHANNEL_FLAG_USE_TX_LOCK = 0x1,
XGE_HAL_CHANNEL_FLAG_FREE_RXD = 0x2
XGE_HAL_CHANNEL_FLAG_NONE = 0x0,
XGE_HAL_CHANNEL_FLAG_USE_TX_LOCK = 0x1,
XGE_HAL_CHANNEL_FLAG_FREE_RXD = 0x2
} xge_hal_channel_flag_e;
/**
@ -101,10 +93,10 @@ typedef enum xge_hal_channel_flag_e {
* See also: xge_hal_channel_dtr_term_f{}.
*/
typedef enum xge_hal_dtr_state_e {
XGE_HAL_DTR_STATE_NONE = 0,
XGE_HAL_DTR_STATE_AVAIL = 1,
XGE_HAL_DTR_STATE_POSTED = 2,
XGE_HAL_DTR_STATE_FREED = 3
XGE_HAL_DTR_STATE_NONE = 0,
XGE_HAL_DTR_STATE_AVAIL = 1,
XGE_HAL_DTR_STATE_POSTED = 2,
XGE_HAL_DTR_STATE_FREED = 3
} xge_hal_dtr_state_e;
/**
@ -120,8 +112,8 @@ typedef enum xge_hal_dtr_state_e {
* the memory (including DMA-able memory) used for channel operation.
*/
typedef enum xge_hal_channel_reopen_e {
XGE_HAL_CHANNEL_RESET_ONLY = 1,
XGE_HAL_CHANNEL_OC_NORMAL = 2
XGE_HAL_CHANNEL_RESET_ONLY = 1,
XGE_HAL_CHANNEL_OC_NORMAL = 2
} xge_hal_channel_reopen_e;
/**
@ -168,8 +160,8 @@ typedef enum xge_hal_channel_reopen_e {
* xge_hal_ring_dtr_next_completed(), xge_hal_channel_dtr_term_f{}.
*/
typedef xge_hal_status_e (*xge_hal_channel_callback_f)
(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
u8 t_code, void *userdata);
(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
u8 t_code, void *userdata);
/**
* function xge_hal_channel_dtr_init_f - Initialize descriptor callback.
@ -192,11 +184,11 @@ typedef xge_hal_status_e (*xge_hal_channel_callback_f)
* See also: xge_hal_channel_attr_t{}, xge_hal_channel_dtr_term_f{}.
*/
typedef xge_hal_status_e (*xge_hal_channel_dtr_init_f)
(xge_hal_channel_h channelh,
xge_hal_dtr_h dtrh,
int index,
void *userdata,
xge_hal_channel_reopen_e reopen);
(xge_hal_channel_h channelh,
xge_hal_dtr_h dtrh,
int index,
void *userdata,
xge_hal_channel_reopen_e reopen);
/**
* function xge_hal_channel_dtr_term_f - Terminate descriptor callback.
@ -220,10 +212,10 @@ typedef xge_hal_status_e (*xge_hal_channel_dtr_init_f)
* See also: xge_hal_channel_attr_t{}, xge_hal_channel_dtr_init_f{}.
*/
typedef void (*xge_hal_channel_dtr_term_f) (xge_hal_channel_h channelh,
xge_hal_dtr_h dtrh,
xge_hal_dtr_state_e state,
void *userdata,
xge_hal_channel_reopen_e reopen);
xge_hal_dtr_h dtrh,
xge_hal_dtr_state_e state,
void *userdata,
xge_hal_channel_reopen_e reopen);
/**
@ -257,18 +249,15 @@ typedef void (*xge_hal_channel_dtr_term_f) (xge_hal_channel_h channelh,
* Usage: See ex_open{}.
*/
typedef struct xge_hal_channel_attr_t {
xge_hal_channel_type_e type;
#ifdef XGEHAL_RNIC
u32 vp_id;
#endif
int post_qid;
int compl_qid;
xge_hal_channel_callback_f callback;
xge_hal_channel_dtr_init_f dtr_init;
xge_hal_channel_dtr_term_f dtr_term;
void *userdata;
int per_dtr_space;
xge_hal_channel_flag_e flags;
xge_hal_channel_type_e type;
int post_qid;
int compl_qid;
xge_hal_channel_callback_f callback;
xge_hal_channel_dtr_init_f dtr_init;
xge_hal_channel_dtr_term_f dtr_term;
void *userdata;
int per_dtr_space;
xge_hal_channel_flag_e flags;
} xge_hal_channel_attr_t;
/*
@ -353,73 +342,70 @@ typedef struct xge_hal_channel_attr_t {
*/
typedef struct {
/* complete/free section */
xge_list_t item;
xge_hal_channel_callback_f callback;
void **free_arr;
int length;
int free_length;
xge_list_t item;
xge_hal_channel_callback_f callback;
void **free_arr;
int length;
int free_length;
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ) || \
defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
spinlock_t free_lock;
defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
spinlock_t free_lock;
#endif
int compl_index;
unsigned int usage_cnt;
unsigned int poll_bytes;
int unused0;
int compl_index;
unsigned int usage_cnt;
unsigned int poll_bytes;
/* reserve/post data path section */
int terminating;
#ifdef __XGE_WIN__
int __xge_os_attr_cacheline_aligned
post_index;
int __xge_os_attr_cacheline_aligned
post_index;
#else
int post_index
__xge_os_attr_cacheline_aligned;
int post_index
__xge_os_attr_cacheline_aligned;
#endif
spinlock_t reserve_lock;
spinlock_t post_lock;
spinlock_t reserve_lock;
spinlock_t post_lock;
void **reserve_arr;
int reserve_length;
int reserve_threshold;
int reserve_top;
void **reserve_arr;
int reserve_length;
int reserve_threshold;
int reserve_top;
int unused1;
/* common section */
xge_hal_device_h devh;
xge_hal_device_h devh;
pci_dev_h pdev;
pci_reg_h regh0;
pci_reg_h regh1;
void *userdata;
void **work_arr;
void **saved_arr;
void **orig_arr;
xge_hal_stats_channel_info_t stats;
pci_reg_h regh0;
pci_reg_h regh1;
void *userdata;
void **work_arr;
void **saved_arr;
void **orig_arr;
xge_hal_stats_channel_info_t stats;
/* slow section */
xge_hal_channel_type_e type;
#ifdef XGEHAL_RNIC
u32 vp_id;
#endif
int post_qid;
int compl_qid;
xge_hal_channel_flag_e flags;
int reserve_initial;
int reserve_max;
int is_open;
int per_dtr_space;
xge_hal_channel_dtr_term_f dtr_term;
xge_hal_channel_dtr_init_f dtr_init;
xge_hal_channel_type_e type;
int post_qid;
int compl_qid;
xge_hal_channel_flag_e flags;
int reserve_initial;
int reserve_max;
int is_open;
int per_dtr_space;
xge_hal_channel_dtr_term_f dtr_term;
xge_hal_channel_dtr_init_f dtr_init;
/* MSI stuff */
u32 msi_msg;
u8 rti;
u8 tti;
u32 msi_msg;
u8 rti;
u8 tti;
u16 unused2;
/* MSI-X stuff */
u64 msix_address;
u32 msix_data;
int msix_idx;
volatile int in_interrupt;
unsigned int magic;
u64 msix_address;
u32 msix_data;
int msix_idx;
volatile int in_interrupt;
unsigned int magic;
#ifdef __XGE_WIN__
} __xge_os_attr_cacheline_aligned xge_hal_channel_t ;
#else
@ -430,17 +416,14 @@ typedef struct {
xge_hal_status_e
__hal_channel_initialize(xge_hal_channel_h channelh,
xge_hal_channel_attr_t *attr, void **reserve_arr,
int reserve_initial, int reserve_max, int reserve_threshold);
xge_hal_channel_attr_t *attr, void **reserve_arr,
int reserve_initial, int reserve_max, int reserve_threshold);
void __hal_channel_terminate(xge_hal_channel_h channelh);
xge_hal_channel_t*
__hal_channel_allocate(xge_hal_device_h devh, int post_qid,
#ifdef XGEHAL_RNIC
u32 vp_id,
#endif
xge_hal_channel_type_e type);
xge_hal_channel_type_e type);
void __hal_channel_free(xge_hal_channel_t *channel);
@ -468,7 +451,7 @@ __hal_channel_dtr_dealloc(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int offset);
int offset);
/* ========================== CHANNEL PUBLIC API ========================= */
@ -483,7 +466,7 @@ xge_hal_channel_id(xge_hal_channel_h channelh);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
int copy_size);
int copy_size);
#else /* XGE_FASTPATH_EXTERN */
#define __HAL_STATIC_CHANNEL static
@ -493,14 +476,14 @@ xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
xge_hal_status_e
xge_hal_channel_open(xge_hal_device_h hldev, xge_hal_channel_attr_t *attr,
xge_hal_channel_h *channel,
xge_hal_channel_reopen_e reopen);
xge_hal_channel_h *channel,
xge_hal_channel_reopen_e reopen);
void xge_hal_channel_close(xge_hal_channel_h channelh,
xge_hal_channel_reopen_e reopen);
xge_hal_channel_reopen_e reopen);
void xge_hal_channel_abort(xge_hal_channel_h channelh,
xge_hal_channel_reopen_e reopen);
xge_hal_channel_reopen_e reopen);
__EXTERN_END_DECLS

File diff suppressed because it is too large Load Diff

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-device.h
*
* Description: HAL device object functionality
*
* Created: 14 May 2004
*/
#ifndef XGE_HAL_DEVICE_H
#define XGE_HAL_DEVICE_H
@ -45,18 +37,6 @@
#include <dev/nxge/include/xgehal-channel.h>
#include <dev/nxge/include/xgehal-stats.h>
#include <dev/nxge/include/xgehal-ring.h>
#ifdef XGEHAL_RNIC
#include "xgehal-common-regs.h"
#include "xgehal-pcicfg-mgmt-regs.h"
#include "xgehal-mrpcim-regs.h"
#include "xgehal-srpcim-regs.h"
#include "xgehal-vpath-regs.h"
#include "xgehal-bitmap.h"
#include "xgehal-virtualpath.h"
#include "xgehal-lbwrapper.h"
#include "xgehal-blockpool.h"
#include "xgehal-regpool.h"
#endif
__EXTERN_BEGIN_DECLS
@ -65,18 +45,18 @@ __EXTERN_BEGIN_DECLS
#define XGE_HAL_CARD_HERC_VPD_ADDR 0x80
#define XGE_HAL_VPD_READ_COMPLETE 0x80
#define XGE_HAL_VPD_BUFFER_SIZE 128
#define XGE_HAL_DEVICE_XMSI_WAIT_MAX_MILLIS 500
#define XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS 500
#define XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS 500
#define XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS 50
#define XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS 250
#define XGE_HAL_DEVICE_SPDM_READY_WAIT_MAX_MILLIS 250 /* TODO */
#define XGE_HAL_DEVICE_XMSI_WAIT_MAX_MILLIS 500
#define XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS 500
#define XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS 500
#define XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS 50
#define XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS 250
#define XGE_HAL_DEVICE_SPDM_READY_WAIT_MAX_MILLIS 250 /* TODO */
#define XGE_HAL_MAGIC 0x12345678
#define XGE_HAL_DEAD 0xDEADDEAD
#define XGE_HAL_MAGIC 0x12345678
#define XGE_HAL_DEAD 0xDEADDEAD
#define XGE_HAL_DUMP_BUF_SIZE 0x4000
#define XGE_HAL_LRO_MAX_BUCKETS 32
#define XGE_HAL_LRO_MAX_BUCKETS 32
/**
* enum xge_hal_card_e - Xframe adapter type.
@ -92,10 +72,10 @@ __EXTERN_BEGIN_DECLS
* See also: xge_hal_device_check_id().
*/
typedef enum xge_hal_card_e {
XGE_HAL_CARD_UNKNOWN = 0,
XGE_HAL_CARD_XENA = 1,
XGE_HAL_CARD_HERC = 2,
XGE_HAL_CARD_TITAN = 3,
XGE_HAL_CARD_UNKNOWN = 0,
XGE_HAL_CARD_XENA = 1,
XGE_HAL_CARD_HERC = 2,
XGE_HAL_CARD_TITAN = 3,
} xge_hal_card_e;
/**
@ -113,15 +93,15 @@ typedef enum xge_hal_card_e {
* mapped memories. Also, includes a pointer to OS-specific PCI device object.
*/
typedef struct xge_hal_device_attr_t {
pci_reg_h regh0;
pci_reg_h regh1;
pci_reg_h regh2;
char *bar0;
char *bar1;
char *bar2;
pci_irq_h irqh;
pci_cfg_h cfgh;
pci_dev_h pdev;
pci_reg_h regh0;
pci_reg_h regh1;
pci_reg_h regh2;
char *bar0;
char *bar1;
char *bar2;
pci_irq_h irqh;
pci_cfg_h cfgh;
pci_dev_h pdev;
} xge_hal_device_attr_t;
/**
@ -140,96 +120,96 @@ typedef enum xge_hal_device_link_state_e {
/**
* enum xge_hal_pci_mode_e - PIC bus speed and mode specific enumeration.
* @XGE_HAL_PCI_33MHZ_MODE: 33 MHZ pci mode.
* @XGE_HAL_PCI_66MHZ_MODE: 66 MHZ pci mode.
* @XGE_HAL_PCIX_M1_66MHZ_MODE: PCIX M1 66MHZ mode.
* @XGE_HAL_PCIX_M1_100MHZ_MODE: PCIX M1 100MHZ mode.
* @XGE_HAL_PCIX_M1_133MHZ_MODE: PCIX M1 133MHZ mode.
* @XGE_HAL_PCIX_M2_66MHZ_MODE: PCIX M2 66MHZ mode.
* @XGE_HAL_PCIX_M2_100MHZ_MODE: PCIX M2 100MHZ mode.
* @XGE_HAL_PCIX_M2_133MHZ_MODE: PCIX M3 133MHZ mode.
* @XGE_HAL_PCIX_M1_RESERVED: PCIX M1 reserved mode.
* @XGE_HAL_PCIX_M1_66MHZ_NS: PCIX M1 66MHZ mode not supported.
* @XGE_HAL_PCIX_M1_100MHZ_NS: PCIX M1 100MHZ mode not supported.
* @XGE_HAL_PCIX_M1_133MHZ_NS: PCIX M1 133MHZ not supported.
* @XGE_HAL_PCIX_M2_RESERVED: PCIX M2 reserved.
* @XGE_HAL_PCIX_533_RESERVED: PCIX 533 reserved.
* @XGE_HAL_PCI_BASIC_MODE: PCI basic mode, XENA specific value.
* @XGE_HAL_PCIX_BASIC_MODE: PCIX basic mode, XENA specific value.
* @XGE_HAL_PCI_INVALID_MODE: Invalid PCI or PCIX mode.
* @XGE_HAL_PCI_33MHZ_MODE: 33 MHZ pci mode.
* @XGE_HAL_PCI_66MHZ_MODE: 66 MHZ pci mode.
* @XGE_HAL_PCIX_M1_66MHZ_MODE: PCIX M1 66MHZ mode.
* @XGE_HAL_PCIX_M1_100MHZ_MODE: PCIX M1 100MHZ mode.
* @XGE_HAL_PCIX_M1_133MHZ_MODE: PCIX M1 133MHZ mode.
* @XGE_HAL_PCIX_M2_66MHZ_MODE: PCIX M2 66MHZ mode.
* @XGE_HAL_PCIX_M2_100MHZ_MODE: PCIX M2 100MHZ mode.
* @XGE_HAL_PCIX_M2_133MHZ_MODE: PCIX M3 133MHZ mode.
* @XGE_HAL_PCIX_M1_RESERVED: PCIX M1 reserved mode.
* @XGE_HAL_PCIX_M1_66MHZ_NS: PCIX M1 66MHZ mode not supported.
* @XGE_HAL_PCIX_M1_100MHZ_NS: PCIX M1 100MHZ mode not supported.
* @XGE_HAL_PCIX_M1_133MHZ_NS: PCIX M1 133MHZ not supported.
* @XGE_HAL_PCIX_M2_RESERVED: PCIX M2 reserved.
* @XGE_HAL_PCIX_533_RESERVED: PCIX 533 reserved.
* @XGE_HAL_PCI_BASIC_MODE: PCI basic mode, XENA specific value.
* @XGE_HAL_PCIX_BASIC_MODE: PCIX basic mode, XENA specific value.
* @XGE_HAL_PCI_INVALID_MODE: Invalid PCI or PCIX mode.
*
*/
typedef enum xge_hal_pci_mode_e {
XGE_HAL_PCI_33MHZ_MODE = 0x0,
XGE_HAL_PCI_66MHZ_MODE = 0x1,
XGE_HAL_PCIX_M1_66MHZ_MODE = 0x2,
XGE_HAL_PCIX_M1_100MHZ_MODE = 0x3,
XGE_HAL_PCIX_M1_133MHZ_MODE = 0x4,
XGE_HAL_PCIX_M2_66MHZ_MODE = 0x5,
XGE_HAL_PCIX_M2_100MHZ_MODE = 0x6,
XGE_HAL_PCIX_M2_133MHZ_MODE = 0x7,
XGE_HAL_PCIX_M1_RESERVED = 0x8,
XGE_HAL_PCIX_M1_66MHZ_NS = 0xA,
XGE_HAL_PCIX_M1_100MHZ_NS = 0xB,
XGE_HAL_PCIX_M1_133MHZ_NS = 0xC,
XGE_HAL_PCIX_M2_RESERVED = 0xD,
XGE_HAL_PCIX_533_RESERVED = 0xE,
XGE_HAL_PCI_BASIC_MODE = 0x10,
XGE_HAL_PCIX_BASIC_MODE = 0x11,
XGE_HAL_PCI_INVALID_MODE = 0x12,
XGE_HAL_PCI_33MHZ_MODE = 0x0,
XGE_HAL_PCI_66MHZ_MODE = 0x1,
XGE_HAL_PCIX_M1_66MHZ_MODE = 0x2,
XGE_HAL_PCIX_M1_100MHZ_MODE = 0x3,
XGE_HAL_PCIX_M1_133MHZ_MODE = 0x4,
XGE_HAL_PCIX_M2_66MHZ_MODE = 0x5,
XGE_HAL_PCIX_M2_100MHZ_MODE = 0x6,
XGE_HAL_PCIX_M2_133MHZ_MODE = 0x7,
XGE_HAL_PCIX_M1_RESERVED = 0x8,
XGE_HAL_PCIX_M1_66MHZ_NS = 0xA,
XGE_HAL_PCIX_M1_100MHZ_NS = 0xB,
XGE_HAL_PCIX_M1_133MHZ_NS = 0xC,
XGE_HAL_PCIX_M2_RESERVED = 0xD,
XGE_HAL_PCIX_533_RESERVED = 0xE,
XGE_HAL_PCI_BASIC_MODE = 0x10,
XGE_HAL_PCIX_BASIC_MODE = 0x11,
XGE_HAL_PCI_INVALID_MODE = 0x12,
} xge_hal_pci_mode_e;
/**
* enum xge_hal_pci_bus_frequency_e - PCI bus frequency enumeration.
* @XGE_HAL_PCI_BUS_FREQUENCY_33MHZ: PCI bus frequency 33MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_66MHZ: PCI bus frequency 66MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_100MHZ: PCI bus frequency 100MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_133MHZ: PCI bus frequency 133MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_200MHZ: PCI bus frequency 200MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_250MHZ: PCI bus frequency 250MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_266MHZ: PCI bus frequency 266MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN: Unrecognized PCI bus frequency value.
* @XGE_HAL_PCI_BUS_FREQUENCY_33MHZ: PCI bus frequency 33MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_66MHZ: PCI bus frequency 66MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_100MHZ: PCI bus frequency 100MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_133MHZ: PCI bus frequency 133MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_200MHZ: PCI bus frequency 200MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_250MHZ: PCI bus frequency 250MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_266MHZ: PCI bus frequency 266MHZ
* @XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN: Unrecognized PCI bus frequency value.
*
*/
typedef enum xge_hal_pci_bus_frequency_e {
XGE_HAL_PCI_BUS_FREQUENCY_33MHZ = 33,
XGE_HAL_PCI_BUS_FREQUENCY_66MHZ = 66,
XGE_HAL_PCI_BUS_FREQUENCY_100MHZ = 100,
XGE_HAL_PCI_BUS_FREQUENCY_133MHZ = 133,
XGE_HAL_PCI_BUS_FREQUENCY_200MHZ = 200,
XGE_HAL_PCI_BUS_FREQUENCY_250MHZ = 250,
XGE_HAL_PCI_BUS_FREQUENCY_266MHZ = 266,
XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN = 0
XGE_HAL_PCI_BUS_FREQUENCY_33MHZ = 33,
XGE_HAL_PCI_BUS_FREQUENCY_66MHZ = 66,
XGE_HAL_PCI_BUS_FREQUENCY_100MHZ = 100,
XGE_HAL_PCI_BUS_FREQUENCY_133MHZ = 133,
XGE_HAL_PCI_BUS_FREQUENCY_200MHZ = 200,
XGE_HAL_PCI_BUS_FREQUENCY_250MHZ = 250,
XGE_HAL_PCI_BUS_FREQUENCY_266MHZ = 266,
XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN = 0
} xge_hal_pci_bus_frequency_e;
/**
* enum xge_hal_pci_bus_width_e - PCI bus width enumeration.
* @XGE_HAL_PCI_BUS_WIDTH_64BIT: 64 bit bus width.
* @XGE_HAL_PCI_BUS_WIDTH_32BIT: 32 bit bus width.
* @XGE_HAL_PCI_BUS_WIDTH_64BIT: 64 bit bus width.
* @XGE_HAL_PCI_BUS_WIDTH_32BIT: 32 bit bus width.
* @XGE_HAL_PCI_BUS_WIDTH_UNKNOWN: unknown bus width.
*
*/
typedef enum xge_hal_pci_bus_width_e {
XGE_HAL_PCI_BUS_WIDTH_64BIT = 0,
XGE_HAL_PCI_BUS_WIDTH_32BIT = 1,
XGE_HAL_PCI_BUS_WIDTH_UNKNOWN = 2,
XGE_HAL_PCI_BUS_WIDTH_64BIT = 0,
XGE_HAL_PCI_BUS_WIDTH_32BIT = 1,
XGE_HAL_PCI_BUS_WIDTH_UNKNOWN = 2,
} xge_hal_pci_bus_width_e;
#if defined (XGE_HAL_CONFIG_LRO)
#define IP_TOTAL_LENGTH_OFFSET 2
#define IP_FAST_PATH_HDR_MASK 0x45
#define TCP_FAST_PATH_HDR_MASK1 0x50
#define TCP_FAST_PATH_HDR_MASK2 0x10
#define TCP_FAST_PATH_HDR_MASK3 0x18
#define IP_SOURCE_ADDRESS_OFFSET 12
#define IP_DESTINATION_ADDRESS_OFFSET 16
#define TCP_DESTINATION_PORT_OFFSET 2
#define TCP_SOURCE_PORT_OFFSET 0
#define TCP_DATA_OFFSET_OFFSET 12
#define TCP_WINDOW_OFFSET 14
#define TCP_SEQUENCE_NUMBER_OFFSET 4
#define TCP_ACKNOWLEDGEMENT_NUMBER_OFFSET 8
#define IP_TOTAL_LENGTH_OFFSET 2
#define IP_FAST_PATH_HDR_MASK 0x45
#define TCP_FAST_PATH_HDR_MASK1 0x50
#define TCP_FAST_PATH_HDR_MASK2 0x10
#define TCP_FAST_PATH_HDR_MASK3 0x18
#define IP_SOURCE_ADDRESS_OFFSET 12
#define IP_DESTINATION_ADDRESS_OFFSET 16
#define TCP_DESTINATION_PORT_OFFSET 2
#define TCP_SOURCE_PORT_OFFSET 0
#define TCP_DATA_OFFSET_OFFSET 12
#define TCP_WINDOW_OFFSET 14
#define TCP_SEQUENCE_NUMBER_OFFSET 4
#define TCP_ACKNOWLEDGEMENT_NUMBER_OFFSET 8
typedef struct tcplro {
u16 source;
@ -263,43 +243,43 @@ typedef struct iplro {
typedef struct lro {
/* non-linear: contains scatter-gather list of
xframe-mapped received buffers */
OS_NETSTACK_BUF os_buf;
OS_NETSTACK_BUF os_buf_end;
OS_NETSTACK_BUF os_buf;
OS_NETSTACK_BUF os_buf_end;
/* link layer header of the first frame;
remains intack throughout the processing */
u8 *ll_hdr;
u8 *ll_hdr;
/* IP header - gets _collapsed_ */
iplro_t *ip_hdr;
iplro_t *ip_hdr;
/* transport header - gets _collapsed_ */
tcplro_t *tcp_hdr;
tcplro_t *tcp_hdr;
/* Next tcp sequence number */
u32 tcp_next_seq_num;
u32 tcp_next_seq_num;
/* Current tcp seq & ack */
u32 tcp_seq_num;
u32 tcp_ack_num;
u32 tcp_seq_num;
u32 tcp_ack_num;
/* total number of accumulated (so far) frames */
int sg_num;
int sg_num;
/* total data length */
int total_length;
int total_length;
/* receive side hash value, available from Hercules */
u32 rth_value;
u32 rth_value;
/* In use */
u8 in_use;
u8 in_use;
/* Total length of the fragments clubbed with the inital frame */
u32 frags_len;
u32 frags_len;
/* LRO frame contains time stamp, if (ts_off != -1) */
int ts_off;
int ts_off;
} lro_t;
#endif
@ -323,9 +303,9 @@ typedef struct xge_hal_spdm_entry_t {
#if defined(XGE_HAL_CONFIG_LRO)
typedef struct {
lro_t lro_pool[XGE_HAL_LRO_MAX_BUCKETS];
int lro_next_idx;
lro_t *lro_recent;
lro_t lro_pool[XGE_HAL_LRO_MAX_BUCKETS];
int lro_next_idx;
lro_t *lro_recent;
} xge_hal_lro_desc_t;
#endif
/*
@ -334,8 +314,8 @@ typedef struct {
* Represents vpd capabilty structure
*/
typedef struct xge_hal_vpd_data_t {
u8 product_name[XGE_HAL_VPD_LENGTH];
u8 serial_num[XGE_HAL_VPD_LENGTH];
u8 product_name[XGE_HAL_VPD_LENGTH];
u8 serial_num[XGE_HAL_VPD_LENGTH];
} xge_hal_vpd_data_t;
/*
@ -344,82 +324,75 @@ typedef struct xge_hal_vpd_data_t {
* HAL device object. Represents Xframe.
*/
typedef struct {
unsigned int magic;
pci_reg_h regh0;
pci_reg_h regh1;
pci_reg_h regh2;
char *bar0;
char *isrbar0;
char *bar1;
char *bar2;
pci_irq_h irqh;
pci_cfg_h cfgh;
pci_dev_h pdev;
xge_hal_pci_config_t pci_config_space;
xge_hal_pci_config_t pci_config_space_bios;
xge_hal_device_config_t config;
xge_list_t free_channels;
xge_list_t fifo_channels;
xge_list_t ring_channels;
#ifdef XGEHAL_RNIC
__hal_bitmap_entry_t bitmap_table[XGE_HAL_MAX_BITMAP_BITS];
__hal_virtualpath_t virtual_paths[XGE_HAL_MAX_VIRTUAL_PATHS];
__hal_blockpool_t block_pool;
__hal_regpool_t reg_pool;
#endif
volatile int is_initialized;
volatile int terminating;
xge_hal_stats_t stats;
macaddr_t macaddr[1];
xge_queue_h queueh;
volatile int mcast_refcnt;
int is_promisc;
volatile xge_hal_device_link_state_e link_state;
void *upper_layer_info;
xge_hal_device_attr_t orig_attr;
u16 device_id;
u8 revision;
int msi_enabled;
int hw_is_initialized;
u64 inject_serr;
u64 inject_ecc;
u8 inject_bad_tcode;
int inject_bad_tcode_for_chan_type;
int reset_needed_after_close;
int tti_enabled;
xge_hal_tti_config_t bimodal_tti[XGE_HAL_MAX_RING_NUM];
int bimodal_timer_val_us;
int bimodal_urange_a_en;
int bimodal_intr_cnt;
char *spdm_mem_base;
u16 spdm_max_entries;
xge_hal_spdm_entry_t **spdm_table;
spinlock_t spdm_lock;
u32 msi_mask;
unsigned int magic;
pci_reg_h regh0;
pci_reg_h regh1;
pci_reg_h regh2;
char *bar0;
char *isrbar0;
char *bar1;
char *bar2;
pci_irq_h irqh;
pci_cfg_h cfgh;
pci_dev_h pdev;
xge_hal_pci_config_t pci_config_space;
xge_hal_pci_config_t pci_config_space_bios;
xge_hal_device_config_t config;
xge_list_t free_channels;
xge_list_t fifo_channels;
xge_list_t ring_channels;
volatile int is_initialized;
volatile int terminating;
xge_hal_stats_t stats;
macaddr_t macaddr[1];
xge_queue_h queueh;
volatile int mcast_refcnt;
int is_promisc;
volatile xge_hal_device_link_state_e link_state;
void *upper_layer_info;
xge_hal_device_attr_t orig_attr;
u16 device_id;
u8 revision;
int msi_enabled;
int hw_is_initialized;
u64 inject_serr;
u64 inject_ecc;
u8 inject_bad_tcode;
int inject_bad_tcode_for_chan_type;
int reset_needed_after_close;
int tti_enabled;
xge_hal_tti_config_t bimodal_tti[XGE_HAL_MAX_RING_NUM];
int bimodal_timer_val_us;
int bimodal_urange_a_en;
int bimodal_intr_cnt;
char *spdm_mem_base;
u16 spdm_max_entries;
xge_hal_spdm_entry_t **spdm_table;
spinlock_t spdm_lock;
#if defined(XGE_HAL_CONFIG_LRO)
xge_hal_lro_desc_t lro_desc[XGE_HAL_MAX_RING_NUM];
xge_hal_lro_desc_t lro_desc[XGE_HAL_MAX_RING_NUM];
#endif
spinlock_t xena_post_lock;
spinlock_t xena_post_lock;
/* bimodal workload stats */
int irq_workload_rxd[XGE_HAL_MAX_RING_NUM];
int irq_workload_rxcnt[XGE_HAL_MAX_RING_NUM];
int irq_workload_rxlen[XGE_HAL_MAX_RING_NUM];
int irq_workload_txd[XGE_HAL_MAX_FIFO_NUM];
int irq_workload_txcnt[XGE_HAL_MAX_FIFO_NUM];
int irq_workload_txlen[XGE_HAL_MAX_FIFO_NUM];
int irq_workload_rxd[XGE_HAL_MAX_RING_NUM];
int irq_workload_rxcnt[XGE_HAL_MAX_RING_NUM];
int irq_workload_rxlen[XGE_HAL_MAX_RING_NUM];
int irq_workload_txd[XGE_HAL_MAX_FIFO_NUM];
int irq_workload_txcnt[XGE_HAL_MAX_FIFO_NUM];
int irq_workload_txlen[XGE_HAL_MAX_FIFO_NUM];
int mtu_first_time_set;
u64 rxufca_lbolt;
u64 rxufca_lbolt_time;
u64 rxufca_intr_thres;
int mtu_first_time_set;
u64 rxufca_lbolt;
u64 rxufca_lbolt_time;
u64 rxufca_intr_thres;
char* dump_buf;
xge_hal_pci_mode_e pci_mode;
xge_hal_pci_mode_e pci_mode;
xge_hal_pci_bus_frequency_e bus_frequency;
xge_hal_pci_bus_width_e bus_width;
xge_hal_pci_bus_width_e bus_width;
xge_hal_vpd_data_t vpd_data;
volatile int in_poll;
u64 msix_vector_table[XGE_HAL_MAX_MSIX_MESSAGES_WITH_ADDR];
volatile int in_poll;
u64 msix_vector_table[XGE_HAL_MAX_MSIX_MESSAGES_WITH_ADDR];
} xge_hal_device_t;
@ -445,13 +418,13 @@ __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason);
xge_hal_status_e
__hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line,
u16 spdm_entry, u64 *spdm_line_val);
u16 spdm_entry, u64 *spdm_line_val);
void __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val,
void *addr);
void *addr);
void __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val,
void *addr);
void *addr);
void __hal_device_get_vpd_data(xge_hal_device_t *hldev);
xge_hal_status_e
@ -483,7 +456,7 @@ __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason);
xge_hal_status_e
__hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, int op, u64 mask,
int max_millis);
int max_millis);
xge_hal_status_e
__hal_device_rts_mac_configure(xge_hal_device_t *hldev);
@ -501,20 +474,20 @@ __hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag);
void
__hal_device_msix_intr_endis(xge_hal_device_t *hldev,
xge_hal_channel_t *channel, int flag);
xge_hal_channel_t *channel, int flag);
/* =========================== PUBLIC API ================================= */
unsigned int
__hal_fix_time_ival_herc(xge_hal_device_t *hldev,
unsigned int time_ival);
unsigned int time_ival);
xge_hal_status_e
xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable,
u32 itable_size);
u32 itable_size);
void
xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type,
u16 bucket_size);
u16 bucket_size);
void
xge_hal_rts_rth_init(xge_hal_device_t *hldev);
@ -539,6 +512,7 @@ xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index);
int xge_hal_reinitialize_hw(xge_hal_device_t * hldev);
xge_hal_status_e xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev);
/**
* xge_hal_device_rti_reconfigure
* @hldev: Hal Device
@ -602,7 +576,7 @@ xge_hal_device_in_poll(xge_hal_device_h devh)
static inline void
xge_hal_device_inject_ecc(xge_hal_device_h devh, u64 err_reg)
{
((xge_hal_device_t*)devh)->inject_ecc = err_reg;
((xge_hal_device_t*)devh)->inject_ecc = err_reg;
}
@ -622,7 +596,7 @@ xge_hal_device_inject_ecc(xge_hal_device_h devh, u64 err_reg)
static inline void
xge_hal_device_inject_serr(xge_hal_device_h devh, u64 err_reg)
{
((xge_hal_device_t*)devh)->inject_serr = err_reg;
((xge_hal_device_t*)devh)->inject_serr = err_reg;
}
@ -645,11 +619,11 @@ xge_hal_device_inject_serr(xge_hal_device_h devh, u64 err_reg)
static inline void
xge_hal_device_inject_bad_tcode(xge_hal_device_h devh, int chan_type, u8 t_code)
{
((xge_hal_device_t*)devh)->inject_bad_tcode_for_chan_type = chan_type;
((xge_hal_device_t*)devh)->inject_bad_tcode = t_code;
((xge_hal_device_t*)devh)->inject_bad_tcode_for_chan_type = chan_type;
((xge_hal_device_t*)devh)->inject_bad_tcode = t_code;
}
void xge_hal_device_msi_enable(xge_hal_device_h devh);
void xge_hal_device_msi_enable(xge_hal_device_h devh);
/*
* xge_hal_device_msi_mode - Is MSI enabled?
@ -753,7 +727,7 @@ static inline xge_hal_status_e
xge_hal_device_mtu_check(xge_hal_device_t *hldev, int new_mtu)
{
if ((new_mtu < XGE_HAL_MIN_MTU) || (new_mtu > XGE_HAL_MAX_MTU)) {
return XGE_HAL_ERR_INVALID_MTU_SIZE;
return XGE_HAL_ERR_INVALID_MTU_SIZE;
}
return XGE_HAL_OK;
@ -766,20 +740,20 @@ void xge_hal_device_bcast_disable(xge_hal_device_h devh);
void xge_hal_device_terminating(xge_hal_device_h devh);
xge_hal_status_e xge_hal_device_initialize(xge_hal_device_t *hldev,
xge_hal_device_attr_t *attr, xge_hal_device_config_t *config);
xge_hal_device_attr_t *attr, xge_hal_device_config_t *config);
void xge_hal_device_terminate(xge_hal_device_t *hldev);
xge_hal_status_e xge_hal_device_reset(xge_hal_device_t *hldev);
xge_hal_status_e xge_hal_device_macaddr_get(xge_hal_device_t *hldev,
int index, macaddr_t *macaddr);
int index, macaddr_t *macaddr);
xge_hal_status_e xge_hal_device_macaddr_set(xge_hal_device_t *hldev,
int index, macaddr_t macaddr);
int index, macaddr_t macaddr);
xge_hal_status_e xge_hal_device_macaddr_clear(xge_hal_device_t *hldev,
int index);
int index);
int xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted);
@ -804,14 +778,14 @@ xge_hal_status_e xge_hal_device_disable(xge_hal_device_t *hldev);
xge_hal_status_e xge_hal_device_enable(xge_hal_device_t *hldev);
xge_hal_status_e xge_hal_device_handle_tcode(xge_hal_channel_h channelh,
xge_hal_dtr_h dtrh,
u8 t_code);
xge_hal_dtr_h dtrh,
u8 t_code);
xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh,
xge_hal_device_link_state_e *ls);
xge_hal_device_link_state_e *ls);
void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us,
int one_shot);
int one_shot);
void xge_hal_device_poll(xge_hal_device_h devh);
@ -821,18 +795,18 @@ int xge_hal_device_is_slot_freeze(xge_hal_device_h devh);
xge_hal_status_e
xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
xge_hal_pci_bus_frequency_e *bus_frequency,
xge_hal_pci_bus_width_e *bus_width);
xge_hal_pci_bus_frequency_e *bus_frequency,
xge_hal_pci_bus_width_e *bus_width);
xge_hal_status_e
xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
u8 is_tcp, u8 is_ipv4, u8 tgt_queue);
xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
u8 is_tcp, u8 is_ipv4, u8 tgt_queue);
xge_hal_status_e
xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
u8 is_tcp, u8 is_ipv4);
xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
u8 is_tcp, u8 is_ipv4);
xge_hal_status_e
xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index);
@ -850,7 +824,7 @@ u64 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg);
/* Some function protoypes for MSI implementation. */
xge_hal_status_e
xge_hal_channel_msi_set (xge_hal_channel_h channelh, int msi,
u32 msg_val);
u32 msg_val);
void
xge_hal_mask_msi(xge_hal_device_t *hldev);
@ -869,6 +843,9 @@ xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id);
#if defined(XGE_HAL_CONFIG_LRO)
xge_hal_status_e
xge_hal_lro_init(u32 lro_scale, xge_hal_device_t *hldev);
void
xge_hal_lro_terminate(u32 lro_scale, xge_hal_device_t *hldev);
#endif
#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_DEVICE)
@ -910,7 +887,7 @@ xge_hal_device_isrbar0_set(xge_hal_device_t *hldev, char *isrbar0);
__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_bar1_set(xge_hal_device_t *hldev, xge_hal_channel_h channelh,
char *bar1);
char *bar1);
__HAL_STATIC_DEVICE __HAL_INLINE_DEVICE void
xge_hal_device_mask_tx(xge_hal_device_t *hldev);
@ -972,44 +949,41 @@ __hal_tcp_lro_capable(iplro_t *ip, tcplro_t *tcp, lro_t *lro, int *ts_off);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_lro_capable(u8 *buffer, iplro_t **ip, tcplro_t **tcp,
xge_hal_dtr_info_t *ext_info);
xge_hal_dtr_info_t *ext_info);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_get_lro_session(u8 *eth_hdr, iplro_t *ip, tcplro_t *tcp, lro_t **lro,
xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
xge_hal_lro_desc_t *ring_lro, lro_t **lro_end3);
xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
xge_hal_lro_desc_t *ring_lro, lro_t **lro_end3);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_lro_under_optimal_thresh(iplro_t *ip, tcplro_t *tcp, lro_t *lro,
xge_hal_device_t *hldev);
xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_collapse_ip_hdr(iplro_t *ip, tcplro_t *tcp, lro_t *lro,
xge_hal_device_t *hldev);
xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_collapse_tcp_hdr(iplro_t *ip, tcplro_t *tcp, lro_t *lro,
xge_hal_device_t *hldev);
xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_append_lro(iplro_t *ip, tcplro_t **tcp, u32 *seg_len, lro_t *lro,
xge_hal_device_t *hldev);
xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
xge_hal_lro_process_rx(int ring, u8 *eth_hdr, u8 *ip_hdr, tcplro_t **tcp,
u32 *seglen, lro_t **p_lro,
xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
lro_t **lro_end3);
u32 *seglen, lro_t **p_lro,
xge_hal_dtr_info_t *ext_info, xge_hal_device_t *hldev,
lro_t **lro_end3);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
xge_hal_accumulate_large_rx(u8 *buffer, tcplro_t **tcp, u32 *seglen,
lro_t **lro, xge_hal_dtr_info_t *ext_info,
xge_hal_device_t *hldev, lro_t **lro_end3);
lro_t **lro, xge_hal_dtr_info_t *ext_info,
xge_hal_device_t *hldev, lro_t **lro_end3);
void
xge_hal_lro_terminate(u32 lro_scale, xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
xge_hal_lro_next_session (xge_hal_device_t *hldev, int ring);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL lro_t *
@ -1017,11 +991,11 @@ xge_hal_lro_get_next_session(xge_hal_device_t *hldev);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_open_lro_session (u8 *buffer, iplro_t *ip, tcplro_t *tcp, lro_t **lro,
xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro,
int slot, u32 tcp_seg_len, int ts_off);
xge_hal_device_t *hldev, xge_hal_lro_desc_t *ring_lro,
int slot, u32 tcp_seg_len, int ts_off);
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
__hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro);
__hal_lro_get_free_slot (xge_hal_lro_desc_t *ring_lro);
#endif
#else /* XGE_FASTPATH_EXTERN */

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-driver.h
*
* Description: HAL driver object functionality
*
* Created: 14 May 2004
*/
#ifndef XGE_HAL_DRIVER_H
#define XGE_HAL_DRIVER_H
@ -47,7 +39,7 @@
__EXTERN_BEGIN_DECLS
/* maximum number of events consumed in a syncle poll() cycle */
#define XGE_HAL_DRIVER_QUEUE_CONSUME_MAX 5
#define XGE_HAL_DRIVER_QUEUE_CONSUME_MAX 5
/**
@ -112,7 +104,7 @@ typedef void (*xge_uld_link_down_f) (void *userdata);
* xge_hal_device_private_set(), xge_hal_driver_initialize().
*/
typedef void (*xge_uld_crit_err_f) (void *userdata, xge_hal_event_e type,
u64 ext_data);
u64 ext_data);
/**
* function xge_uld_event_queued_f - Event-enqueued notification
@ -216,15 +208,15 @@ typedef void (*xge_uld_xpak_alarm_log_f) (xge_hal_device_h devh, xge_hal_xpak_al
* See also: xge_hal_driver_initialize().
*/
typedef struct xge_hal_uld_cbs_t {
xge_uld_link_up_f link_up;
xge_uld_link_down_f link_down;
xge_uld_crit_err_f crit_err;
xge_uld_event_f event;
xge_uld_event_queued_f event_queued;
xge_uld_link_up_f link_up;
xge_uld_link_down_f link_down;
xge_uld_crit_err_f crit_err;
xge_uld_event_f event;
xge_uld_event_queued_f event_queued;
xge_uld_before_device_poll_f before_device_poll;
xge_uld_after_device_poll_f after_device_poll;
xge_uld_sched_timer_cb_f sched_timer;
xge_uld_xpak_alarm_log_f xpak_alarm_log;
xge_uld_after_device_poll_f after_device_poll;
xge_uld_sched_timer_cb_f sched_timer;
xge_uld_xpak_alarm_log_f xpak_alarm_log;
} xge_hal_uld_cbs_t;
/**
@ -238,19 +230,19 @@ typedef struct xge_hal_uld_cbs_t {
* @uld_callbacks: Upper-layer driver callbacks. See xge_hal_uld_cbs_t{}.
* @debug_module_mask: 32bit mask that defines which components of the
* driver are to be traced. The trace-able components are:
* XGE_COMPONENT_HAL_CONFIG 0x1
* XGE_COMPONENT_HAL_FIFO 0x2
* XGE_COMPONENT_HAL_RING 0x4
* XGE_COMPONENT_HAL_CHANNEL 0x8
* XGE_COMPONENT_HAL_DEVICE 0x10
* XGE_COMPONENT_HAL_MM 0x20
* XGE_COMPONENT_HAL_QUEUE 0x40
* XGE_COMPONENT_HAL_STATS 0x100
* XGE_COMPONENT_OSDEP 0x1000
* XGE_COMPONENT_LL 0x2000
* XGE_COMPONENT_TOE 0x4000
* XGE_COMPONENT_RDMA 0x8000
* XGE_COMPONENT_ALL 0xffffffff
* XGE_COMPONENT_HAL_CONFIG 0x1
* XGE_COMPONENT_HAL_FIFO 0x2
* XGE_COMPONENT_HAL_RING 0x4
* XGE_COMPONENT_HAL_CHANNEL 0x8
* XGE_COMPONENT_HAL_DEVICE 0x10
* XGE_COMPONENT_HAL_MM 0x20
* XGE_COMPONENT_HAL_QUEUE 0x40
* XGE_COMPONENT_HAL_STATS 0x100
* XGE_COMPONENT_OSDEP 0x1000
* XGE_COMPONENT_LL 0x2000
* XGE_COMPONENT_TOE 0x4000
* XGE_COMPONENT_RDMA 0x8000
* XGE_COMPONENT_ALL 0xffffffff
* The @debug_module_mask allows to switch off and on tracing at runtime.
* In addition, the traces for the same trace-able components can be
* compiled out, based on the same mask provided via Makefile.
@ -259,18 +251,18 @@ typedef struct xge_hal_uld_cbs_t {
* HAL (driver) object. There is a single instance of this structure per HAL.
*/
typedef struct xge_hal_driver_t {
xge_hal_driver_config_t config;
xge_hal_driver_config_t config;
int is_initialized;
xge_hal_uld_cbs_t uld_callbacks;
u32 debug_module_mask;
int debug_level;
u32 debug_module_mask;
int debug_level;
} xge_hal_driver_t;
extern xge_hal_driver_t *g_xge_hal_driver;
static inline int
xge_hal_driver_is_initialized(void) {
return g_xge_hal_driver->is_initialized;
return g_xge_hal_driver->is_initialized;
}
static inline int
@ -283,7 +275,7 @@ static inline void
xge_hal_driver_debug_module_mask_set(u32 new_mask)
{
#if (defined(XGE_DEBUG_TRACE_MASK) && XGE_DEBUG_TRACE_MASK > 0) || \
(defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0)
(defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0)
g_xge_hal_driver->debug_module_mask = new_mask;
g_module_mask = (unsigned long *)&g_xge_hal_driver->debug_module_mask;
#endif
@ -296,14 +288,14 @@ static inline void
xge_hal_driver_debug_level_set(int new_level)
{
#if (defined(XGE_DEBUG_TRACE_MASK) && XGE_DEBUG_TRACE_MASK > 0) || \
(defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0)
(defined(XGE_DEBUG_ERR_MASK) && XGE_DEBUG_ERR_MASK > 0)
g_xge_hal_driver->debug_level = new_level;
g_level = &g_xge_hal_driver->debug_level;
#endif
}
xge_hal_status_e xge_hal_driver_initialize(xge_hal_driver_config_t *config,
xge_hal_uld_cbs_t *uld_callbacks);
xge_hal_uld_cbs_t *uld_callbacks);
void xge_hal_driver_terminate(void);

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-event.h
*
* Description: event types
*
* Created: 7 June 2004
*/
#ifndef XGE_HAL_EVENT_H
#define XGE_HAL_EVENT_H
@ -41,8 +33,8 @@
__EXTERN_BEGIN_DECLS
#define XGE_HAL_EVENT_BASE 0
#define XGE_LL_EVENT_BASE 100
#define XGE_HAL_EVENT_BASE 0
#define XGE_LL_EVENT_BASE 100
/**
* enum xge_hal_event_e - Enumerates slow-path HAL events.
@ -69,15 +61,15 @@ __EXTERN_BEGIN_DECLS
* xge_uld_link_down_f{}.
*/
typedef enum xge_hal_event_e {
XGE_HAL_EVENT_UNKNOWN = 0,
XGE_HAL_EVENT_UNKNOWN = 0,
/* HAL events */
XGE_HAL_EVENT_SERR = XGE_HAL_EVENT_BASE + 1,
XGE_HAL_EVENT_LINK_IS_UP = XGE_HAL_EVENT_BASE + 2,
XGE_HAL_EVENT_LINK_IS_DOWN = XGE_HAL_EVENT_BASE + 3,
XGE_HAL_EVENT_ECCERR = XGE_HAL_EVENT_BASE + 4,
XGE_HAL_EVENT_PARITYERR = XGE_HAL_EVENT_BASE + 5,
XGE_HAL_EVENT_TARGETABORT = XGE_HAL_EVENT_BASE + 6,
XGE_HAL_EVENT_SLOT_FREEZE = XGE_HAL_EVENT_BASE + 7,
XGE_HAL_EVENT_SERR = XGE_HAL_EVENT_BASE + 1,
XGE_HAL_EVENT_LINK_IS_UP = XGE_HAL_EVENT_BASE + 2,
XGE_HAL_EVENT_LINK_IS_DOWN = XGE_HAL_EVENT_BASE + 3,
XGE_HAL_EVENT_ECCERR = XGE_HAL_EVENT_BASE + 4,
XGE_HAL_EVENT_PARITYERR = XGE_HAL_EVENT_BASE + 5,
XGE_HAL_EVENT_TARGETABORT = XGE_HAL_EVENT_BASE + 6,
XGE_HAL_EVENT_SLOT_FREEZE = XGE_HAL_EVENT_BASE + 7,
} xge_hal_event_e;
__EXTERN_END_DECLS

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-fifo.h
*
* Description: Tx fifo object functionality
*
* Created: 19 May 2004
*/
#ifndef XGE_HAL_FIFO_H
#define XGE_HAL_FIFO_H
@ -44,11 +36,11 @@
__EXTERN_BEGIN_DECLS
/* HW fifo configuration */
#define XGE_HAL_FIFO_INT_PER_LIST_THRESHOLD 65
#define XGE_HAL_FIFO_MAX_WRR 5
#define XGE_HAL_FIFO_MAX_PARTITION 4
#define XGE_HAL_FIFO_MAX_WRR_STATE 36
#define XGE_HAL_FIFO_HW_PAIR_OFFSET 0x20000
#define XGE_HAL_FIFO_INT_PER_LIST_THRESHOLD 65
#define XGE_HAL_FIFO_MAX_WRR 5
#define XGE_HAL_FIFO_MAX_PARTITION 4
#define XGE_HAL_FIFO_MAX_WRR_STATE 36
#define XGE_HAL_FIFO_HW_PAIR_OFFSET 0x20000
/* HW FIFO Weight Calender */
#define XGE_HAL_FIFO_WRR_0 0x0706050407030602ULL
@ -77,14 +69,14 @@ typedef struct {
/* Bad TxDL transfer codes */
#define XGE_HAL_TXD_T_CODE_OK 0x0
#define XGE_HAL_TXD_T_CODE_UNUSED_1 0x1
#define XGE_HAL_TXD_T_CODE_ABORT_BUFFER 0x2
#define XGE_HAL_TXD_T_CODE_ABORT_DTOR 0x3
#define XGE_HAL_TXD_T_CODE_UNUSED_5 0x5
#define XGE_HAL_TXD_T_CODE_PARITY 0x7
#define XGE_HAL_TXD_T_CODE_LOSS_OF_LINK 0xA
#define XGE_HAL_TXD_T_CODE_GENERAL_ERR 0xF
#define XGE_HAL_TXD_T_CODE_OK 0x0
#define XGE_HAL_TXD_T_CODE_UNUSED_1 0x1
#define XGE_HAL_TXD_T_CODE_ABORT_BUFFER 0x2
#define XGE_HAL_TXD_T_CODE_ABORT_DTOR 0x3
#define XGE_HAL_TXD_T_CODE_UNUSED_5 0x5
#define XGE_HAL_TXD_T_CODE_PARITY 0x7
#define XGE_HAL_TXD_T_CODE_LOSS_OF_LINK 0xA
#define XGE_HAL_TXD_T_CODE_GENERAL_ERR 0xF
/**
@ -105,16 +97,16 @@ typedef struct {
typedef struct xge_hal_fifo_txd_t {
u64 control_1;
#define XGE_HAL_TXD_LIST_OWN_XENA BIT(7)
#define XGE_HAL_TXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
#define XGE_HAL_TXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
#define XGE_HAL_GET_TXD_T_CODE(val) ((val & XGE_HAL_TXD_T_CODE)>>48)
#define XGE_HAL_SET_TXD_T_CODE(x, val) (x |= (((u64)val & 0xF) << 48))
#define XGE_HAL_TXD_GATHER_CODE (BIT(22) | BIT(23))
#define XGE_HAL_TXD_GATHER_CODE_FIRST BIT(22)
#define XGE_HAL_TXD_GATHER_CODE_LAST BIT(23)
#define XGE_HAL_TXD_NO_LSO 0
#define XGE_HAL_TXD_UDF_COF 1
#define XGE_HAL_TXD_TCP_LSO 2
#define XGE_HAL_TXD_UDP_LSO 3
#define XGE_HAL_TXD_NO_LSO 0
#define XGE_HAL_TXD_UDF_COF 1
#define XGE_HAL_TXD_TCP_LSO 2
#define XGE_HAL_TXD_UDP_LSO 3
#define XGE_HAL_TXD_LSO_COF_CTRL(val) vBIT(val,30,2)
#define XGE_HAL_TXD_TCP_LSO_MSS(val) vBIT(val,34,14)
#define XGE_HAL_TXD_BUFFER0_SIZE(val) vBIT(val,48,16)
@ -165,17 +157,17 @@ typedef xge_hal_fifo_txd_t* xge_hal_fifo_txdl_t;
* Note: The structure is cache line aligned.
*/
typedef struct xge_hal_fifo_t {
xge_hal_channel_t channel;
spinlock_t *post_lock_ptr;
xge_hal_fifo_hw_pair_t *hw_pair;
xge_hal_fifo_config_t *config;
int no_snoop_bits;
int txdl_per_memblock;
u64 interrupt_type;
int txdl_size;
int priv_size;
xge_hal_mempool_t *mempool;
int align_size;
xge_hal_channel_t channel;
spinlock_t *post_lock_ptr;
xge_hal_fifo_hw_pair_t *hw_pair;
xge_hal_fifo_config_t *config;
int no_snoop_bits;
int txdl_per_memblock;
u64 interrupt_type;
int txdl_size;
int priv_size;
xge_hal_mempool_t *mempool;
int align_size;
} __xge_os_attr_cacheline_aligned xge_hal_fifo_t;
/**
@ -228,30 +220,30 @@ typedef struct xge_hal_fifo_t {
* See also: xge_hal_ring_rxd_priv_t{}.
*/
typedef struct xge_hal_fifo_txdl_priv_t {
dma_addr_t dma_addr;
pci_dma_h dma_handle;
ptrdiff_t dma_offset;
int frags;
char *align_vaddr_start;
char *align_vaddr;
dma_addr_t align_dma_addr;
pci_dma_h align_dma_handle;
pci_dma_acc_h align_dma_acch;
ptrdiff_t align_dma_offset;
int align_used_frags;
int alloc_frags;
int dang_frags;
unsigned int bytes_sent;
int unused;
xge_hal_fifo_txd_t *dang_txdl;
struct xge_hal_fifo_txdl_priv_t *next_txdl_priv;
xge_hal_fifo_txd_t *first_txdp;
void *memblock;
dma_addr_t dma_addr;
pci_dma_h dma_handle;
ptrdiff_t dma_offset;
int frags;
char *align_vaddr_start;
char *align_vaddr;
dma_addr_t align_dma_addr;
pci_dma_h align_dma_handle;
pci_dma_acc_h align_dma_acch;
ptrdiff_t align_dma_offset;
int align_used_frags;
int alloc_frags;
int dang_frags;
unsigned int bytes_sent;
int unused;
xge_hal_fifo_txd_t *dang_txdl;
struct xge_hal_fifo_txdl_priv_t *next_txdl_priv;
xge_hal_fifo_txd_t *first_txdp;
void *memblock;
#ifdef XGE_DEBUG_ASSERT
xge_hal_mempool_dma_t *dma_object;
xge_hal_mempool_dma_t *dma_object;
#endif
#ifdef XGE_OS_MEMORY_CHECK
int allocated;
int allocated;
#endif
} xge_hal_fifo_txdl_priv_t;
@ -268,7 +260,7 @@ xge_hal_fifo_get_max_frags_cnt(xge_hal_channel_h channelh)
/* ========================= FIFO PRIVATE API ============================= */
xge_hal_status_e __hal_fifo_open(xge_hal_channel_h channelh,
xge_hal_channel_attr_t *attr);
xge_hal_channel_attr_t *attr);
void __hal_fifo_close(xge_hal_channel_h channelh);
@ -289,16 +281,20 @@ __hal_fifo_txdl_priv(xge_hal_dtr_h dtrh);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
u64 ctrl_1);
u64 ctrl_1);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_txdl_restore_many(xge_hal_channel_h channelh,
xge_hal_fifo_txd_t *txdp, int txdl_count);
xge_hal_fifo_txd_t *txdp, int txdl_count);
/* ========================= FIFO PUBLIC API ============================== */
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
const int frags);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void*
xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh);
@ -307,38 +303,38 @@ xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channel, int dtr_sp_size,
xge_hal_dtr_h dtr_sp);
xge_hal_dtr_h dtr_sp);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
xge_hal_dtr_h dtrs[]);
xge_hal_dtr_h dtrs[]);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
u8 *t_code);
u8 *t_code);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr);
xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int frag_idx, dma_addr_t dma_pointer, int size);
int frag_idx, dma_addr_t dma_pointer, int size);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
dma_addr_t dma_pointer, int size, int misaligned_size);
xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
dma_addr_t dma_pointer, int size, int misaligned_size);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
void *vaddr, int size);
void *vaddr, int size);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int frag_idx);
int frag_idx);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh, int mss);
@ -347,7 +343,7 @@ __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh, u64 cksum_bits);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag);
xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag);
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh);

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-mgmt.h
*
* Description: management API
*
* Created: 1 September 2004
*/
#ifndef XGE_HAL_MGMT_H
#define XGE_HAL_MGMT_H
@ -68,88 +60,88 @@ __EXTERN_BEGIN_DECLS
* @transponder_temperature: TODO
*/
typedef struct xge_hal_mgmt_about_info_t {
u16 vendor;
u16 device;
u16 subsys_vendor;
u16 subsys_device;
u8 board_rev;
char vendor_name[16];
char chip_name[16];
char media[16];
char hal_major[4];
char hal_minor[4];
char hal_fix[4];
char hal_build[16];
char ll_major[4];
char ll_minor[4];
char ll_fix[4];
char ll_build[16];
u32 transponder_temperature;
u16 vendor;
u16 device;
u16 subsys_vendor;
u16 subsys_device;
u8 board_rev;
char vendor_name[16];
char chip_name[16];
char media[16];
char hal_major[4];
char hal_minor[4];
char hal_fix[4];
char hal_build[16];
char ll_major[4];
char ll_minor[4];
char ll_fix[4];
char ll_build[16];
u32 transponder_temperature;
} xge_hal_mgmt_about_info_t;
typedef xge_hal_stats_hw_info_t xge_hal_mgmt_hw_stats_t;
typedef xge_hal_stats_pcim_info_t xge_hal_mgmt_pcim_stats_t;
typedef xge_hal_stats_sw_err_t xge_hal_mgmt_sw_stats_t;
typedef xge_hal_stats_device_info_t xge_hal_mgmt_device_stats_t;
typedef xge_hal_stats_channel_info_t xge_hal_mgmt_channel_stats_t;
typedef xge_hal_device_config_t xge_hal_mgmt_device_config_t;
typedef xge_hal_driver_config_t xge_hal_mgmt_driver_config_t;
typedef xge_hal_pci_config_t xge_hal_mgmt_pci_config_t;
typedef xge_hal_stats_hw_info_t xge_hal_mgmt_hw_stats_t;
typedef xge_hal_stats_pcim_info_t xge_hal_mgmt_pcim_stats_t;
typedef xge_hal_stats_sw_err_t xge_hal_mgmt_sw_stats_t;
typedef xge_hal_stats_device_info_t xge_hal_mgmt_device_stats_t;
typedef xge_hal_stats_channel_info_t xge_hal_mgmt_channel_stats_t;
typedef xge_hal_device_config_t xge_hal_mgmt_device_config_t;
typedef xge_hal_driver_config_t xge_hal_mgmt_driver_config_t;
typedef xge_hal_pci_config_t xge_hal_mgmt_pci_config_t;
xge_hal_status_e
xge_hal_mgmt_about(xge_hal_device_h devh, xge_hal_mgmt_about_info_t *about_info,
int size);
int size);
xge_hal_status_e
xge_hal_mgmt_hw_stats(xge_hal_device_h devh, xge_hal_mgmt_hw_stats_t *hw_stats,
int size);
int size);
xge_hal_status_e
xge_hal_mgmt_hw_stats_off(xge_hal_device_h devh, int off, int size, char *out);
xge_hal_status_e
xge_hal_mgmt_pcim_stats(xge_hal_device_h devh,
xge_hal_mgmt_pcim_stats_t *pcim_stats, int size);
xge_hal_mgmt_pcim_stats_t *pcim_stats, int size);
xge_hal_status_e
xge_hal_mgmt_pcim_stats_off(xge_hal_device_h devh, int off, int size,
char *out);
char *out);
xge_hal_status_e
xge_hal_mgmt_sw_stats(xge_hal_device_h devh, xge_hal_mgmt_sw_stats_t *hw_stats,
int size);
int size);
xge_hal_status_e
xge_hal_mgmt_device_stats(xge_hal_device_h devh,
xge_hal_mgmt_device_stats_t *device_stats, int size);
xge_hal_mgmt_device_stats_t *device_stats, int size);
xge_hal_status_e
xge_hal_mgmt_channel_stats(xge_hal_channel_h channelh,
xge_hal_mgmt_channel_stats_t *channel_stats, int size);
xge_hal_mgmt_channel_stats_t *channel_stats, int size);
xge_hal_status_e
xge_hal_mgmt_reg_read(xge_hal_device_h devh, int bar_id, unsigned int offset,
u64 *value);
u64 *value);
xge_hal_status_e
xge_hal_mgmt_reg_write(xge_hal_device_h devh, int bar_id, unsigned int offset,
u64 value);
xge_hal_mgmt_reg_write(xge_hal_device_h devh, int bar_id, unsigned int offset,
u64 value);
xge_hal_status_e
xge_hal_mgmt_pcireg_read(xge_hal_device_h devh, unsigned int offset,
int bits, u32 *value);
int bits, u32 *value);
xge_hal_status_e
xge_hal_mgmt_device_config(xge_hal_device_h devh,
xge_hal_mgmt_device_config_t *dev_config, int size);
xge_hal_mgmt_device_config_t *dev_config, int size);
xge_hal_status_e
xge_hal_mgmt_driver_config(xge_hal_mgmt_driver_config_t *drv_config,
int size);
int size);
xge_hal_status_e
xge_hal_mgmt_pci_config(xge_hal_device_h devh,
xge_hal_mgmt_pci_config_t *pci_config, int size);
xge_hal_mgmt_pci_config_t *pci_config, int size);
xge_hal_status_e
xge_hal_pma_loopback( xge_hal_device_h devh, int enable );
@ -199,7 +191,7 @@ __hal_chk_xpak_counter(xge_hal_device_t *hldev, int type, u32 value);
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
xge_hal_status_e
xge_hal_mgmt_trace_read(char *buffer, unsigned buf_size, unsigned *offset,
unsigned *read_length);
unsigned *read_length);
#endif
void
@ -215,8 +207,8 @@ xge_hal_flick_link_led(xge_hal_device_h devh);
* given its Sub system ID.
*/
#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
((((subid >= 0x600B) && (subid <= 0x600D)) || \
((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0)
((((subid >= 0x600B) && (subid <= 0x600D)) || \
((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0)
#define CHECKBIT(value, nbit) (value & (1 << nbit))
#ifdef XGE_HAL_USE_MGMT_AUX

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-mgmtaux.h
*
* Description: management auxiliary API
*
* Created: 1 September 2004
*/
#ifndef XGE_HAL_MGMTAUX_H
#define XGE_HAL_MGMTAUX_H
@ -41,54 +33,54 @@
__EXTERN_BEGIN_DECLS
#define XGE_HAL_AUX_SEPA ' '
#define XGE_HAL_AUX_SEPA ' '
xge_hal_status_e xge_hal_aux_about_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_tmac_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_rmac_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_sw_dev_read(xge_hal_device_h devh,
int bufsize, char *retbuf, int *retsize);
int bufsize, char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_pci_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_hal_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_bar0_read(xge_hal_device_h devh,
unsigned int offset, int bufsize, char *retbuf,
int *retsize);
xge_hal_status_e xge_hal_aux_bar0_read(xge_hal_device_h devh,
unsigned int offset, int bufsize, char *retbuf,
int *retsize);
xge_hal_status_e xge_hal_aux_bar0_write(xge_hal_device_h devh,
unsigned int offset, u64 value);
unsigned int offset, u64 value);
xge_hal_status_e xge_hal_aux_bar1_read(xge_hal_device_h devh,
unsigned int offset, int bufsize, char *retbuf,
int *retsize);
unsigned int offset, int bufsize, char *retbuf,
int *retsize);
xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_pci_config_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_stats_herc_enchanced(xge_hal_device_h devh,
int bufsize, char *retbuf, int *retsize);
int bufsize, char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_channel_read(xge_hal_device_h devh, int bufsize,
char *retbuf, int *retsize);
char *retbuf, int *retsize);
xge_hal_status_e xge_hal_aux_device_dump(xge_hal_device_h devh);
xge_hal_status_e xge_hal_aux_driver_config_read(int bufsize, char *retbuf,
int *retsize);
int *retsize);
xge_hal_status_e xge_hal_aux_device_config_read(xge_hal_device_h devh,
int bufsize, char *retbuf, int *retsize);
int bufsize, char *retbuf, int *retsize);
__EXTERN_END_DECLS

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-mm.h
*
* Description: memory pool object
*
* Created: 28 May 2004
*/
#ifndef XGE_HAL_MM_H
#define XGE_HAL_MM_H
@ -51,9 +43,9 @@ typedef void* xge_hal_mempool_h;
caller.
*/
typedef struct xge_hal_mempool_dma_t {
dma_addr_t addr;
pci_dma_h handle;
pci_dma_acc_h acc_handle;
dma_addr_t addr;
pci_dma_h handle;
pci_dma_acc_h acc_handle;
} xge_hal_mempool_dma_t;
/*
@ -67,32 +59,32 @@ typedef struct xge_hal_mempool_dma_t {
* Memory pool allocation/deallocation callback.
*/
typedef xge_hal_status_e (*xge_hal_mempool_item_f) (xge_hal_mempool_h mempoolh,
void *memblock, int memblock_index,
xge_hal_mempool_dma_t *dma_object, void *item,
int index, int is_last, void *userdata);
void *memblock, int memblock_index,
xge_hal_mempool_dma_t *dma_object, void *item,
int index, int is_last, void *userdata);
/*
* struct xge_hal_mempool_t - Memory pool.
*/
typedef struct xge_hal_mempool_t {
xge_hal_mempool_item_f item_func_alloc;
xge_hal_mempool_item_f item_func_free;
void *userdata;
void **memblocks_arr;
void **memblocks_priv_arr;
xge_hal_mempool_dma_t *memblocks_dma_arr;
pci_dev_h pdev;
int memblock_size;
int memblocks_max;
int memblocks_allocated;
int item_size;
int items_max;
int items_initial;
int items_current;
int items_per_memblock;
void **items_arr;
void **shadow_items_arr;
int items_priv_size;
xge_hal_mempool_item_f item_func_alloc;
xge_hal_mempool_item_f item_func_free;
void *userdata;
void **memblocks_arr;
void **memblocks_priv_arr;
xge_hal_mempool_dma_t *memblocks_dma_arr;
pci_dev_h pdev;
int memblock_size;
int memblocks_max;
int memblocks_allocated;
int item_size;
int items_max;
int items_initial;
int items_current;
int items_per_memblock;
void **items_arr;
void **shadow_items_arr;
int items_priv_size;
} xge_hal_mempool_t;
/*
@ -110,7 +102,7 @@ __hal_mempool_item(xge_hal_mempool_t *mempool, int index)
*/
static inline void*
__hal_mempool_item_priv(xge_hal_mempool_t *mempool, int memblock_idx,
void *item, int *memblock_item_idx)
void *item, int *memblock_item_idx)
{
ptrdiff_t offset;
void *memblock = mempool->memblocks_arr[memblock_idx];
@ -124,7 +116,7 @@ __hal_mempool_item_priv(xge_hal_mempool_t *mempool, int memblock_idx,
xge_assert((*memblock_item_idx) < mempool->items_per_memblock);
return (char*)mempool->memblocks_priv_arr[memblock_idx] +
(*memblock_item_idx) * mempool->items_priv_size;
(*memblock_item_idx) * mempool->items_priv_size;
}
/*
@ -159,12 +151,12 @@ __hal_mempool_memblock_dma(xge_hal_mempool_t *mempool, int memblock_idx)
}
xge_hal_status_e __hal_mempool_grow(xge_hal_mempool_t *mempool,
int num_allocate, int *num_allocated);
int num_allocate, int *num_allocated);
xge_hal_mempool_t* __hal_mempool_create(pci_dev_h pdev, int memblock_size,
int item_size, int private_size, int items_initial,
int items_max, xge_hal_mempool_item_f item_func_alloc,
xge_hal_mempool_item_f item_func_free, void *userdata);
int item_size, int private_size, int items_initial,
int items_max, xge_hal_mempool_item_f item_func_alloc,
xge_hal_mempool_item_f item_func_free, void *userdata);
void __hal_mempool_destroy(xge_hal_mempool_t *mempool);

File diff suppressed because it is too large Load Diff

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-ring.h
*
* Description: HAL Rx ring object functionality
*
* Created: 19 May 2004
*/
#ifndef XGE_HAL_RING_H
#define XGE_HAL_RING_H
@ -44,52 +36,52 @@
__EXTERN_BEGIN_DECLS
/* HW ring configuration */
#define XGE_HAL_RING_RXDBLOCK_SIZE 0x1000
#define XGE_HAL_RING_RXDBLOCK_SIZE 0x1000
#define XGE_HAL_RXD_T_CODE_OK 0x0
#define XGE_HAL_RXD_T_CODE_PARITY 0x1
#define XGE_HAL_RXD_T_CODE_ABORT 0x2
#define XGE_HAL_RXD_T_CODE_PARITY_ABORT 0x3
#define XGE_HAL_RXD_T_CODE_RDA_FAILURE 0x4
#define XGE_HAL_RXD_T_CODE_OK 0x0
#define XGE_HAL_RXD_T_CODE_PARITY 0x1
#define XGE_HAL_RXD_T_CODE_ABORT 0x2
#define XGE_HAL_RXD_T_CODE_PARITY_ABORT 0x3
#define XGE_HAL_RXD_T_CODE_RDA_FAILURE 0x4
#define XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO 0x5
#define XGE_HAL_RXD_T_CODE_BAD_FCS 0x6
#define XGE_HAL_RXD_T_CODE_BUFF_SIZE 0x7
#define XGE_HAL_RXD_T_CODE_BAD_ECC 0x8
#define XGE_HAL_RXD_T_CODE_UNUSED_C 0xC
#define XGE_HAL_RXD_T_CODE_UNKNOWN 0xF
#define XGE_HAL_RXD_T_CODE_BAD_FCS 0x6
#define XGE_HAL_RXD_T_CODE_BUFF_SIZE 0x7
#define XGE_HAL_RXD_T_CODE_BAD_ECC 0x8
#define XGE_HAL_RXD_T_CODE_UNUSED_C 0xC
#define XGE_HAL_RXD_T_CODE_UNKNOWN 0xF
#define XGE_HAL_RING_USE_MTU -1
#define XGE_HAL_RING_USE_MTU -1
/* control_1 and control_2 formatting - same for all buffer modes */
#define XGE_HAL_RXD_GET_L3_CKSUM(control_1) ((u16)(control_1>>16) & 0xFFFF)
#define XGE_HAL_RXD_GET_L4_CKSUM(control_1) ((u16)(control_1 & 0xFFFF))
#define XGE_HAL_RXD_MASK_VLAN_TAG vBIT(0xFFFF,48,16)
#define XGE_HAL_RXD_MASK_VLAN_TAG vBIT(0xFFFF,48,16)
#define XGE_HAL_RXD_SET_VLAN_TAG(control_2, val) control_2 |= (u16)val
#define XGE_HAL_RXD_GET_VLAN_TAG(control_2) ((u16)(control_2 & 0xFFFF))
#define XGE_HAL_RXD_GET_VLAN_TAG(control_2) ((u16)(control_2 & 0xFFFF))
#define XGE_HAL_RXD_POSTED_4_XFRAME BIT(7) /* control_1 */
#define XGE_HAL_RXD_POSTED_4_XFRAME BIT(7) /* control_1 */
#define XGE_HAL_RXD_NOT_COMPLETED BIT(0) /* control_2 */
#define XGE_HAL_RXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
#define XGE_HAL_RXD_GET_T_CODE(control_1) \
((control_1 & XGE_HAL_RXD_T_CODE)>>48)
#define XGE_HAL_RXD_T_CODE (BIT(12)|BIT(13)|BIT(14)|BIT(15))
#define XGE_HAL_RXD_GET_T_CODE(control_1) \
((control_1 & XGE_HAL_RXD_T_CODE)>>48)
#define XGE_HAL_RXD_SET_T_CODE(control_1, val) \
(control_1 |= (((u64)val & 0xF) << 48))
(control_1 |= (((u64)val & 0xF) << 48))
#define XGE_HAL_RXD_MASK_FRAME_TYPE vBIT(0x3,25,2)
#define XGE_HAL_RXD_MASK_FRAME_PROTO vBIT(0xFFFF,24,8)
#define XGE_HAL_RXD_GET_FRAME_TYPE(control_1) \
(u8)(0x3 & ((control_1 & XGE_HAL_RXD_MASK_FRAME_TYPE) >> 37))
#define XGE_HAL_RXD_GET_FRAME_PROTO(control_1) \
(u8)((control_1 & XGE_HAL_RXD_MASK_FRAME_PROTO) >> 32)
#define XGE_HAL_RXD_FRAME_PROTO_VLAN_TAGGED BIT(24)
#define XGE_HAL_RXD_FRAME_PROTO_IPV4 BIT(27)
#define XGE_HAL_RXD_FRAME_PROTO_IPV6 BIT(28)
#define XGE_HAL_RXD_FRAME_PROTO_IP_FRAGMENTED BIT(29)
#define XGE_HAL_RXD_FRAME_PROTO_TCP BIT(30)
#define XGE_HAL_RXD_FRAME_PROTO_UDP BIT(31)
#define XGE_HAL_RXD_MASK_FRAME_TYPE vBIT(0x3,25,2)
#define XGE_HAL_RXD_MASK_FRAME_PROTO vBIT(0xFFFF,24,8)
#define XGE_HAL_RXD_GET_FRAME_TYPE(control_1) \
(u8)(0x3 & ((control_1 & XGE_HAL_RXD_MASK_FRAME_TYPE) >> 37))
#define XGE_HAL_RXD_GET_FRAME_PROTO(control_1) \
(u8)((control_1 & XGE_HAL_RXD_MASK_FRAME_PROTO) >> 32)
#define XGE_HAL_RXD_FRAME_PROTO_VLAN_TAGGED BIT(24)
#define XGE_HAL_RXD_FRAME_PROTO_IPV4 BIT(27)
#define XGE_HAL_RXD_FRAME_PROTO_IPV6 BIT(28)
#define XGE_HAL_RXD_FRAME_PROTO_IP_FRAGMENTED BIT(29)
#define XGE_HAL_RXD_FRAME_PROTO_TCP BIT(30)
#define XGE_HAL_RXD_FRAME_PROTO_UDP BIT(31)
#define XGE_HAL_RXD_FRAME_TCP_OR_UDP (XGE_HAL_RXD_FRAME_PROTO_TCP | \
XGE_HAL_RXD_FRAME_PROTO_UDP)
XGE_HAL_RXD_FRAME_PROTO_UDP)
/**
* enum xge_hal_frame_type_e - Ethernet frame format.
@ -101,10 +93,10 @@ __EXTERN_BEGIN_DECLS
* Ethernet frame format.
*/
typedef enum xge_hal_frame_type_e {
XGE_HAL_FRAME_TYPE_DIX = 0x0,
XGE_HAL_FRAME_TYPE_LLC = 0x1,
XGE_HAL_FRAME_TYPE_SNAP = 0x2,
XGE_HAL_FRAME_TYPE_IPX = 0x3,
XGE_HAL_FRAME_TYPE_DIX = 0x0,
XGE_HAL_FRAME_TYPE_LLC = 0x1,
XGE_HAL_FRAME_TYPE_SNAP = 0x2,
XGE_HAL_FRAME_TYPE_IPX = 0x3,
} xge_hal_frame_type_e;
/**
@ -120,14 +112,14 @@ typedef enum xge_hal_frame_type_e {
* Higher layer ethernet protocols and options.
*/
typedef enum xge_hal_frame_proto_e {
XGE_HAL_FRAME_PROTO_VLAN_TAGGED = 0x80,
XGE_HAL_FRAME_PROTO_IPV4 = 0x10,
XGE_HAL_FRAME_PROTO_IPV6 = 0x08,
XGE_HAL_FRAME_PROTO_IP_FRAGMENTED = 0x04,
XGE_HAL_FRAME_PROTO_TCP = 0x02,
XGE_HAL_FRAME_PROTO_UDP = 0x01,
XGE_HAL_FRAME_PROTO_TCP_OR_UDP = (XGE_HAL_FRAME_PROTO_TCP | \
XGE_HAL_FRAME_PROTO_UDP)
XGE_HAL_FRAME_PROTO_VLAN_TAGGED = 0x80,
XGE_HAL_FRAME_PROTO_IPV4 = 0x10,
XGE_HAL_FRAME_PROTO_IPV6 = 0x08,
XGE_HAL_FRAME_PROTO_IP_FRAGMENTED = 0x04,
XGE_HAL_FRAME_PROTO_TCP = 0x02,
XGE_HAL_FRAME_PROTO_UDP = 0x01,
XGE_HAL_FRAME_PROTO_TCP_OR_UDP = (XGE_HAL_FRAME_PROTO_TCP | \
XGE_HAL_FRAME_PROTO_UDP)
} xge_hal_frame_proto_e;
/*
@ -137,12 +129,12 @@ typedef struct {
u64 host_control;
u64 control_1;
u64 control_2;
#define XGE_HAL_RXD_1_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
#define XGE_HAL_RXD_1_SET_BUFFER0_SIZE(val) vBIT(val,0,16)
#define XGE_HAL_RXD_1_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
#define XGE_HAL_RXD_1_SET_BUFFER0_SIZE(val) vBIT(val,0,16)
#define XGE_HAL_RXD_1_GET_BUFFER0_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,0,16))>>48)
(int)((Control_2 & vBIT(0xFFFF,0,16))>>48)
#define XGE_HAL_RXD_1_GET_RTH_VALUE(Control_2) \
(u32)((Control_2 & vBIT(0xFFFFFFFF,16,32))>>16)
(u32)((Control_2 & vBIT(0xFFFFFFFF,16,32))>>16)
u64 buffer0_ptr;
} xge_hal_ring_rxd_1_t;
@ -154,20 +146,20 @@ typedef struct {
u64 control_1;
u64 control_2;
#define XGE_HAL_RXD_3_MASK_BUFFER0_SIZE vBIT(0xFF,8,8)
#define XGE_HAL_RXD_3_SET_BUFFER0_SIZE(val) vBIT(val,8,8)
#define XGE_HAL_RXD_3_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
#define XGE_HAL_RXD_3_SET_BUFFER1_SIZE(val) vBIT(val,16,16)
#define XGE_HAL_RXD_3_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
#define XGE_HAL_RXD_3_SET_BUFFER2_SIZE(val) vBIT(val,32,16)
#define XGE_HAL_RXD_3_MASK_BUFFER0_SIZE vBIT(0xFF,8,8)
#define XGE_HAL_RXD_3_SET_BUFFER0_SIZE(val) vBIT(val,8,8)
#define XGE_HAL_RXD_3_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
#define XGE_HAL_RXD_3_SET_BUFFER1_SIZE(val) vBIT(val,16,16)
#define XGE_HAL_RXD_3_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
#define XGE_HAL_RXD_3_SET_BUFFER2_SIZE(val) vBIT(val,32,16)
#define XGE_HAL_RXD_3_GET_BUFFER0_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFF,8,8))>>48)
(int)((Control_2 & vBIT(0xFF,8,8))>>48)
#define XGE_HAL_RXD_3_GET_BUFFER1_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,16,16))>>32)
(int)((Control_2 & vBIT(0xFFFF,16,16))>>32)
#define XGE_HAL_RXD_3_GET_BUFFER2_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,32,16))>>16)
(int)((Control_2 & vBIT(0xFFFF,32,16))>>16)
u64 buffer0_ptr;
u64 buffer1_ptr;
@ -187,33 +179,33 @@ typedef struct {
#endif
#define XGE_HAL_RXD_5_MASK_BUFFER3_SIZE vBIT(0xFFFF,32,16)
#define XGE_HAL_RXD_5_SET_BUFFER3_SIZE(val) vBIT(val,32,16)
#define XGE_HAL_RXD_5_MASK_BUFFER4_SIZE vBIT(0xFFFF,48,16)
#define XGE_HAL_RXD_5_SET_BUFFER4_SIZE(val) vBIT(val,48,16)
#define XGE_HAL_RXD_5_MASK_BUFFER3_SIZE vBIT(0xFFFF,32,16)
#define XGE_HAL_RXD_5_SET_BUFFER3_SIZE(val) vBIT(val,32,16)
#define XGE_HAL_RXD_5_MASK_BUFFER4_SIZE vBIT(0xFFFF,48,16)
#define XGE_HAL_RXD_5_SET_BUFFER4_SIZE(val) vBIT(val,48,16)
#define XGE_HAL_RXD_5_GET_BUFFER3_SIZE(Control_3) \
(int)((Control_3 & vBIT(0xFFFF,32,16))>>16)
(int)((Control_3 & vBIT(0xFFFF,32,16))>>16)
#define XGE_HAL_RXD_5_GET_BUFFER4_SIZE(Control_3) \
(int)((Control_3 & vBIT(0xFFFF,48,16)))
(int)((Control_3 & vBIT(0xFFFF,48,16)))
u64 control_1;
u64 control_2;
#define XGE_HAL_RXD_5_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
#define XGE_HAL_RXD_5_SET_BUFFER0_SIZE(val) vBIT(val,0,16)
#define XGE_HAL_RXD_5_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
#define XGE_HAL_RXD_5_SET_BUFFER1_SIZE(val) vBIT(val,16,16)
#define XGE_HAL_RXD_5_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
#define XGE_HAL_RXD_5_SET_BUFFER2_SIZE(val) vBIT(val,32,16)
#define XGE_HAL_RXD_5_MASK_BUFFER0_SIZE vBIT(0xFFFF,0,16)
#define XGE_HAL_RXD_5_SET_BUFFER0_SIZE(val) vBIT(val,0,16)
#define XGE_HAL_RXD_5_MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16)
#define XGE_HAL_RXD_5_SET_BUFFER1_SIZE(val) vBIT(val,16,16)
#define XGE_HAL_RXD_5_MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16)
#define XGE_HAL_RXD_5_SET_BUFFER2_SIZE(val) vBIT(val,32,16)
#define XGE_HAL_RXD_5_GET_BUFFER0_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,0,16))>>48)
(int)((Control_2 & vBIT(0xFFFF,0,16))>>48)
#define XGE_HAL_RXD_5_GET_BUFFER1_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,16,16))>>32)
(int)((Control_2 & vBIT(0xFFFF,16,16))>>32)
#define XGE_HAL_RXD_5_GET_BUFFER2_SIZE(Control_2) \
(int)((Control_2 & vBIT(0xFFFF,32,16))>>16)
(int)((Control_2 & vBIT(0xFFFF,32,16))>>16)
u64 buffer0_ptr;
u64 buffer1_ptr;
u64 buffer2_ptr;
@ -222,32 +214,32 @@ typedef struct {
} xge_hal_ring_rxd_5_t;
#define XGE_HAL_RXD_GET_RTH_SPDM_HIT(Control_1) \
(u8)((Control_1 & BIT(18))>>45)
(u8)((Control_1 & BIT(18))>>45)
#define XGE_HAL_RXD_GET_RTH_IT_HIT(Control_1) \
(u8)((Control_1 & BIT(19))>>44)
(u8)((Control_1 & BIT(19))>>44)
#define XGE_HAL_RXD_GET_RTH_HASH_TYPE(Control_1) \
(u8)((Control_1 & vBIT(0xF,20,4))>>40)
(u8)((Control_1 & vBIT(0xF,20,4))>>40)
#define XGE_HAL_RXD_HASH_TYPE_NONE 0x0
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV4 0x1
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV4 0x2
#define XGE_HAL_RXD_HASH_TYPE_IPV4 0x3
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6 0x4
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6 0x5
#define XGE_HAL_RXD_HASH_TYPE_IPV6 0x6
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6_EX 0x7
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6_EX 0x8
#define XGE_HAL_RXD_HASH_TYPE_IPV6_EX 0x9
#define XGE_HAL_RXD_HASH_TYPE_NONE 0x0
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV4 0x1
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV4 0x2
#define XGE_HAL_RXD_HASH_TYPE_IPV4 0x3
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6 0x4
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6 0x5
#define XGE_HAL_RXD_HASH_TYPE_IPV6 0x6
#define XGE_HAL_RXD_HASH_TYPE_TCP_IPV6_EX 0x7
#define XGE_HAL_RXD_HASH_TYPE_UDP_IPV6_EX 0x8
#define XGE_HAL_RXD_HASH_TYPE_IPV6_EX 0x9
typedef u8 xge_hal_ring_block_t[XGE_HAL_RING_RXDBLOCK_SIZE];
#define XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET 0xFF8
#define XGE_HAL_RING_MEMBLOCK_IDX_OFFSET 0xFF0
#define XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET 0xFF8
#define XGE_HAL_RING_MEMBLOCK_IDX_OFFSET 0xFF0
#define XGE_HAL_RING_RXD_SIZEOF(n) \
(n==1 ? sizeof(xge_hal_ring_rxd_1_t) : \
(n==3 ? sizeof(xge_hal_ring_rxd_3_t) : \
sizeof(xge_hal_ring_rxd_5_t)))
(n==3 ? sizeof(xge_hal_ring_rxd_3_t) : \
sizeof(xge_hal_ring_rxd_5_t)))
#define XGE_HAL_RING_RXDS_PER_BLOCK(n) \
(n==1 ? 127 : (n==3 ? 85 : 63))
@ -274,14 +266,14 @@ typedef u8 xge_hal_ring_block_t[XGE_HAL_RING_RXDBLOCK_SIZE];
* purposes.
*/
typedef struct xge_hal_ring_rxd_priv_t {
dma_addr_t dma_addr;
pci_dma_h dma_handle;
ptrdiff_t dma_offset;
dma_addr_t dma_addr;
pci_dma_h dma_handle;
ptrdiff_t dma_offset;
#ifdef XGE_DEBUG_ASSERT
xge_hal_mempool_dma_t *dma_object;
xge_hal_mempool_dma_t *dma_object;
#endif
#ifdef XGE_OS_MEMORY_CHECK
int allocated;
int allocated;
#endif
} xge_hal_ring_rxd_priv_t;
@ -317,17 +309,17 @@ typedef struct xge_hal_ring_rxd_priv_t {
* CPU cache performance.
*/
typedef struct xge_hal_ring_t {
xge_hal_channel_t channel;
int buffer_mode;
int indicate_max_pkts;
xge_hal_ring_config_t *config;
int rxd_size;
int rxd_priv_size;
int rxds_per_block;
xge_hal_mempool_t *mempool;
int rxdblock_priv_size;
void **reserved_rxds_arr;
int cmpl_cnt;
xge_hal_channel_t channel;
int buffer_mode;
int indicate_max_pkts;
xge_hal_ring_config_t *config;
int rxd_size;
int rxd_priv_size;
int rxds_per_block;
xge_hal_mempool_t *mempool;
int rxdblock_priv_size;
void **reserved_rxds_arr;
int cmpl_cnt;
} __xge_os_attr_cacheline_aligned xge_hal_ring_t;
/**
@ -343,7 +335,7 @@ typedef struct xge_hal_ring_t {
* corrupted.
* @frame: See xge_hal_frame_type_e{}.
* @proto: Reporting bits for various higher-layer protocols, including (but
* note restricted to) TCP and UDP. See xge_hal_frame_proto_e{}.
* note restricted to) TCP and UDP. See xge_hal_frame_proto_e{}.
* @vlan: VLAN tag extracted from the received frame.
* @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Xframe II
* hardware if RTH is enabled.
@ -355,22 +347,22 @@ typedef struct xge_hal_ring_t {
* @reserved_pad: Unused byte.
*/
typedef struct xge_hal_dtr_info_t {
int l3_cksum;
int l4_cksum;
int frame; /* zero or more of xge_hal_frame_type_e flags */
int proto; /* zero or more of xge_hal_frame_proto_e flags */
int vlan;
u32 rth_value;
u8 rth_it_hit;
u8 rth_spdm_hit;
u8 rth_hash_type;
u8 reserved_pad;
int l3_cksum;
int l4_cksum;
int frame; /* zero or more of xge_hal_frame_type_e flags */
int proto; /* zero or more of xge_hal_frame_proto_e flags */
int vlan;
u32 rth_value;
u8 rth_it_hit;
u8 rth_spdm_hit;
u8 rth_hash_type;
u8 reserved_pad;
} xge_hal_dtr_info_t;
/* ========================== RING PRIVATE API ============================ */
xge_hal_status_e __hal_ring_open(xge_hal_channel_h channelh,
xge_hal_channel_attr_t *attr);
xge_hal_channel_attr_t *attr);
void __hal_ring_close(xge_hal_channel_h channelh);
@ -383,7 +375,7 @@ void __hal_ring_prc_enable(xge_hal_channel_h channelh);
void __hal_ring_prc_disable(xge_hal_channel_h channelh);
xge_hal_status_e __hal_ring_initial_replenish(xge_hal_channel_t *channel,
xge_hal_channel_reopen_e reopen);
xge_hal_channel_reopen_e reopen);
#if defined(XGE_DEBUG_FP) && (XGE_DEBUG_FP & XGE_DEBUG_FP_RING)
#define __HAL_STATIC_RING
@ -400,7 +392,7 @@ __hal_ring_block_next_pointer(xge_hal_ring_block_t *block);
__HAL_STATIC_RING __HAL_INLINE_RING void
__hal_ring_block_next_pointer_set(xge_hal_ring_block_t*block,
dma_addr_t dma_next);
dma_addr_t dma_next);
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_ring_rxd_priv_t*
__hal_ring_rxd_priv(xge_hal_ring_t *ring, xge_hal_dtr_h dtrh);
@ -414,31 +406,31 @@ __HAL_STATIC_RING __HAL_INLINE_RING void*
xge_hal_ring_dtr_private(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size);
xge_hal_ring_dtr_1b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointer, int size);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_info_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
xge_hal_dtr_info_t *ext_info);
xge_hal_dtr_info_t *ext_info);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_1b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t *dma_pointer, int *pkt_length);
dma_addr_t *dma_pointer, int *pkt_length);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_3b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
int sizes[]);
int sizes[]);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_3b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t dma_pointers[], int sizes[]);
dma_addr_t dma_pointers[], int sizes[]);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_5b_set(xge_hal_dtr_h dtrh, dma_addr_t dma_pointers[],
int sizes[]);
int sizes[]);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_5b_get(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
dma_addr_t dma_pointer[], int sizes[]);
dma_addr_t dma_pointer[], int sizes[]);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
@ -454,12 +446,12 @@ xge_hal_ring_dtr_post_post_wmb(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
xge_hal_ring_dtr_next_completed(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh,
u8 *t_code);
u8 *t_code);
__HAL_STATIC_RING __HAL_INLINE_RING void
xge_hal_ring_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh);
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
__HAL_STATIC_RING __HAL_INLINE_RING xge_hal_status_e
xge_hal_ring_is_next_dtr_completed(xge_hal_channel_h channelh);
#else /* XGE_FASTPATH_EXTERN */

View File

@ -26,15 +26,6 @@
* $FreeBSD$
*/
/*
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
* FileName : xgehal-stats.h
*
* Description: HW statistics object
*
* Created: 2 June 2004
*/
#ifndef XGE_HAL_STATS_H
#define XGE_HAL_STATS_H
@ -524,7 +515,7 @@ typedef struct xge_hal_stats_hw_info_t {
u32 wr_disc_cnt;
u32 rd_rtry_wr_ack_cnt;
/* DMA Transaction statistics. */
/* DMA Transaction statistics. */
u32 txp_wr_cnt;
u32 txd_rd_cnt;
u32 txd_wr_cnt;
@ -696,7 +687,7 @@ typedef struct xge_hal_stats_hw_info_t {
u32 wr_disc_cnt;
u32 wr_rtry_cnt;
/* PCI/PCI-X Write / DMA Transaction statistics. */
/* PCI/PCI-X Write / DMA Transaction statistics. */
u32 txp_wr_cnt;
u32 rd_rtry_wr_ack_cnt;
u32 txd_wr_cnt;
@ -805,34 +796,34 @@ typedef struct xge_hal_stats_hw_info_t {
* @total_posts_dtrs_many: Total number of posts on the channel that involving
* more than one descriptor.
* @total_posts_frags_many: Total number of fragments posted on the channel
* during post requests of multiple descriptors.
* during post requests of multiple descriptors.
* @total_posts_dang_dtrs: Total number of posts on the channel involving
* dangling descriptors.
* @total_posts_dang_frags: Total number of dangling fragments posted on the channel
* during post request containing multiple descriptors.
* during post request containing multiple descriptors.
*
* HAL channel counters.
* See also: xge_hal_stats_device_info_t{}.
*/
typedef struct xge_hal_stats_channel_info_t {
u32 full_cnt;
u32 usage_max;
u32 reserve_free_swaps_cnt;
u32 avg_compl_per_intr_cnt;
u32 total_compl_cnt;
u32 total_posts;
u32 total_posts_many;
u32 total_buffers;
u32 copied_frags;
u32 copied_buffers;
u32 avg_buffers_per_post;
u32 avg_buffer_size;
u32 avg_post_size;
u32 ring_bump_cnt;
u32 total_posts_dtrs_many;
u32 total_posts_frags_many;
u32 total_posts_dang_dtrs;
u32 total_posts_dang_frags;
u32 full_cnt;
u32 usage_max;
u32 reserve_free_swaps_cnt;
u32 avg_compl_per_intr_cnt;
u32 total_compl_cnt;
u32 total_posts;
u32 total_posts_many;
u32 total_buffers;
u32 copied_frags;
u32 copied_buffers;
u32 avg_buffers_per_post;
u32 avg_buffer_size;
u32 avg_post_size;
u32 ring_bump_cnt;
u32 total_posts_dtrs_many;
u32 total_posts_frags_many;
u32 total_posts_dang_dtrs;
u32 total_posts_dang_frags;
} xge_hal_stats_channel_info_t;
/**
@ -843,10 +834,10 @@ typedef struct xge_hal_stats_channel_info_t {
* @tick_period: tick count for each cycle
*/
typedef struct xge_hal_xpak_counter_t {
u32 excess_temp;
u32 excess_bias_current;
u32 excess_laser_output;
u32 tick_period;
u32 excess_temp;
u32 excess_bias_current;
u32 excess_laser_output;
u32 tick_period;
} xge_hal_xpak_counter_t;
/**
@ -865,18 +856,18 @@ typedef struct xge_hal_xpak_counter_t {
* @warn_laser_output_power_low: warn_laser_output_power_low count value
*/
typedef struct xge_hal_stats_xpak_t {
u16 alarm_transceiver_temp_high;
u16 alarm_transceiver_temp_low;
u16 alarm_laser_bias_current_high;
u16 alarm_laser_bias_current_low;
u16 alarm_laser_output_power_high;
u16 alarm_laser_output_power_low;
u16 warn_transceiver_temp_high;
u16 warn_transceiver_temp_low;
u16 warn_laser_bias_current_high;
u16 warn_laser_bias_current_low;
u16 warn_laser_output_power_high;
u16 warn_laser_output_power_low;
u16 alarm_transceiver_temp_high;
u16 alarm_transceiver_temp_low;
u16 alarm_laser_bias_current_high;
u16 alarm_laser_bias_current_low;
u16 alarm_laser_output_power_high;
u16 alarm_laser_output_power_low;
u16 warn_transceiver_temp_high;
u16 warn_transceiver_temp_low;
u16 warn_laser_bias_current_high;
u16 warn_laser_bias_current_low;
u16 warn_laser_output_power_high;
u16 warn_laser_output_power_low;
} xge_hal_stats_xpak_t;
@ -955,83 +946,55 @@ typedef struct xge_hal_stats_sw_err_t {
* See also: xge_hal_stats_channel_info_t{}.
*/
typedef struct xge_hal_stats_device_info_t {
u32 rx_traffic_intr_cnt;
u32 tx_traffic_intr_cnt;
u32 txpic_intr_cnt;
u32 txdma_intr_cnt;
u32 pfc_err_cnt;
u32 tda_err_cnt;
u32 pcc_err_cnt;
u32 tti_err_cnt;
u32 lso_err_cnt;
u32 tpa_err_cnt;
u32 sm_err_cnt;
u32 txmac_intr_cnt;
u32 mac_tmac_err_cnt;
u32 txxgxs_intr_cnt;
u32 xgxs_txgxs_err_cnt;
u32 rxpic_intr_cnt;
u32 rxdma_intr_cnt;
u32 rc_err_cnt;
u32 rpa_err_cnt;
u32 rda_err_cnt;
u32 rti_err_cnt;
u32 rxmac_intr_cnt;
u32 mac_rmac_err_cnt;
u32 rxxgxs_intr_cnt;
u32 xgxs_rxgxs_err_cnt;
u32 mc_intr_cnt;
u32 not_traffic_intr_cnt;
u32 not_xge_intr_cnt;
u32 traffic_intr_cnt;
u32 total_intr_cnt;
u32 soft_reset_cnt;
u32 rxufca_hi_adjust_cnt;
u32 rxufca_lo_adjust_cnt;
u32 bimodal_hi_adjust_cnt;
u32 bimodal_lo_adjust_cnt;
u32 rx_traffic_intr_cnt;
u32 tx_traffic_intr_cnt;
u32 txpic_intr_cnt;
u32 txdma_intr_cnt;
u32 pfc_err_cnt;
u32 tda_err_cnt;
u32 pcc_err_cnt;
u32 tti_err_cnt;
u32 lso_err_cnt;
u32 tpa_err_cnt;
u32 sm_err_cnt;
u32 txmac_intr_cnt;
u32 mac_tmac_err_cnt;
u32 txxgxs_intr_cnt;
u32 xgxs_txgxs_err_cnt;
u32 rxpic_intr_cnt;
u32 rxdma_intr_cnt;
u32 rc_err_cnt;
u32 rpa_err_cnt;
u32 rda_err_cnt;
u32 rti_err_cnt;
u32 rxmac_intr_cnt;
u32 mac_rmac_err_cnt;
u32 rxxgxs_intr_cnt;
u32 xgxs_rxgxs_err_cnt;
u32 mc_intr_cnt;
u32 not_traffic_intr_cnt;
u32 not_xge_intr_cnt;
u32 traffic_intr_cnt;
u32 total_intr_cnt;
u32 soft_reset_cnt;
u32 rxufca_hi_adjust_cnt;
u32 rxufca_lo_adjust_cnt;
u32 bimodal_hi_adjust_cnt;
u32 bimodal_lo_adjust_cnt;
#ifdef XGE_HAL_CONFIG_LRO
u32 tot_frms_lroised;
u32 tot_lro_sessions;
u32 lro_frm_len_exceed_cnt;
u32 lro_sg_exceed_cnt;
u32 lro_out_of_seq_pkt_cnt;
u32 lro_dup_pkt_cnt;
u32 tot_frms_lroised;
u32 tot_lro_sessions;
u32 lro_frm_len_exceed_cnt;
u32 lro_sg_exceed_cnt;
u32 lro_out_of_seq_pkt_cnt;
u32 lro_dup_pkt_cnt;
#endif
} xge_hal_stats_device_info_t;
#ifdef XGEHAL_RNIC
/**
* struct xge_hal_vp_statistics_t - Virtual Path Statistics
*
* @no_nces: Number of NCEs on Adapter in this VP
* @no_sqs: Number of SQs on Adapter in this VP
* @no_srqs: Number of SRQs on Adapter in this VP
* @no_cqrqs: Number of CQRQs on Adapter in this VP
* @no_tcp_sessions: Number of TCP sessions on Adapter in this VP
* @no_lro_sessions: Number of LRO sessions on Adapter in this VP
* @no_spdm_sessions: Number of SPDM sessions on Adapter in this VP
*
* This structure contains fields to keep statistics of virtual path
*/
typedef struct xge_hal_vp_statistics_t {
u32 no_nces;
u32 no_sqs;
u32 no_srqs;
u32 no_cqrqs;
u32 no_tcp_sessions;
u32 no_lro_sessions;
u32 no_spdm_sessions;
}xge_hal_vp_statistics_t;
#endif
/* ========================== XFRAME ER STATISTICS ======================== */
#define XGE_HAL_MAC_LINKS 3
#define XGE_HAL_MAC_AGGREGATORS 2
#define XGE_HAL_VPATHS 17
#define XGE_HAL_MAC_LINKS 3
#define XGE_HAL_MAC_AGGREGATORS 2
#define XGE_HAL_VPATHS 17
/**
* struct xge_hal_stats_link_info_t - XGMAC statistics for a link
*
@ -1207,97 +1170,97 @@ typedef struct xge_hal_vp_statistics_t {
* queue for mac the link.
*/
typedef struct xge_hal_stats_link_info_t {
u64 tx_frms;
u64 tx_ttl_eth_octets;
u64 tx_data_octets;
u64 tx_mcst_frms;
u64 tx_bcst_frms;
u64 tx_ucst_frms;
u64 tx_tagged_frms;
u64 tx_vld_ip;
u64 tx_vld_ip_octets;
u64 tx_icmp;
u64 tx_tcp;
u64 tx_rst_tcp;
u64 tx_udp;
u64 tx_unknown_protocol;
u64 tx_parse_error;
u64 tx_pause_ctrl_frms;
u64 tx_lacpdu_frms;
u64 tx_marker_pdu_frms;
u64 tx_marker_resp_pdu_frms;
u64 tx_drop_ip;
u64 tx_xgmii_char1_match;
u64 tx_xgmii_char2_match;
u64 tx_xgmii_column1_match;
u64 tx_xgmii_column2_match;
u64 tx_drop_frms;
u64 tx_any_err_frms;
u64 rx_ttl_frms;
u64 rx_vld_frms;
u64 rx_offld_frms;
u64 rx_ttl_eth_octets;
u64 rx_data_octets;
u64 rx_offld_octets;
u64 rx_vld_mcst_frms;
u64 rx_vld_bcst_frms;
u64 rx_accepted_ucst_frms;
u64 rx_accepted_nucst_frms;
u64 rx_tagged_frms;
u64 rx_long_frms;
u64 rx_usized_frms;
u64 rx_osized_frms;
u64 rx_frag_frms;
u64 rx_jabber_frms;
u64 rx_ttl_64_frms;
u64 rx_ttl_65_127_frms;
u64 rx_ttl_128_255_frms;
u64 rx_ttl_256_511_frms;
u64 rx_ttl_512_1023_frms;
u64 rx_ttl_1024_1518_frms;
u64 rx_ttl_1519_4095_frms;
u64 rx_ttl_40956_8191_frms;
u64 rx_ttl_8192_max_frms;
u64 rx_ttl_gt_max_frms;
u64 rx_ip;
u64 rx_ip_octets;
u64 rx_hdr_err_ip;
u64 rx_icmp;
u64 rx_tcp;
u64 rx_udp;
u64 rx_err_tcp;
u64 rx_pause_cnt;
u64 rx_pause_ctrl_frms;
u64 rx_unsup_ctrl_frms;
u64 rx_in_rng_len_err_frms;
u64 rx_out_rng_len_err_frms;
u64 rx_drop_frms;
u64 rx_discarded_frms;
u64 rx_drop_ip;
u64 rx_err_drp_udp;
u64 rx_lacpdu_frms;
u64 rx_marker_pdu_frms;
u64 rx_marker_resp_pdu_frms;
u64 rx_unknown_pdu_frms;
u64 rx_illegal_pdu_frms;
u64 rx_fcs_discard;
u64 rx_len_discard;
u64 rx_pf_discard;
u64 rx_trash_discard;
u64 rx_rts_discard;
u64 rx_wol_discard;
u64 rx_red_discard;
u64 rx_ingm_full_discard;
u64 rx_xgmii_data_err_cnt;
u64 rx_xgmii_ctrl_err_cnt;
u64 rx_xgmii_err_sym;
u64 rx_xgmii_char1_match;
u64 rx_xgmii_char2_match;
u64 rx_xgmii_column1_match;
u64 rx_xgmii_column2_match;
u64 rx_local_fault;
u64 rx_remote_fault;
u64 rx_queue_full;
u64 tx_frms;
u64 tx_ttl_eth_octets;
u64 tx_data_octets;
u64 tx_mcst_frms;
u64 tx_bcst_frms;
u64 tx_ucst_frms;
u64 tx_tagged_frms;
u64 tx_vld_ip;
u64 tx_vld_ip_octets;
u64 tx_icmp;
u64 tx_tcp;
u64 tx_rst_tcp;
u64 tx_udp;
u64 tx_unknown_protocol;
u64 tx_parse_error;
u64 tx_pause_ctrl_frms;
u64 tx_lacpdu_frms;
u64 tx_marker_pdu_frms;
u64 tx_marker_resp_pdu_frms;
u64 tx_drop_ip;
u64 tx_xgmii_char1_match;
u64 tx_xgmii_char2_match;
u64 tx_xgmii_column1_match;
u64 tx_xgmii_column2_match;
u64 tx_drop_frms;
u64 tx_any_err_frms;
u64 rx_ttl_frms;
u64 rx_vld_frms;
u64 rx_offld_frms;
u64 rx_ttl_eth_octets;
u64 rx_data_octets;
u64 rx_offld_octets;
u64 rx_vld_mcst_frms;
u64 rx_vld_bcst_frms;
u64 rx_accepted_ucst_frms;
u64 rx_accepted_nucst_frms;
u64 rx_tagged_frms;
u64 rx_long_frms;
u64 rx_usized_frms;
u64 rx_osized_frms;
u64 rx_frag_frms;
u64 rx_jabber_frms;
u64 rx_ttl_64_frms;
u64 rx_ttl_65_127_frms;
u64 rx_ttl_128_255_frms;
u64 rx_ttl_256_511_frms;
u64 rx_ttl_512_1023_frms;
u64 rx_ttl_1024_1518_frms;
u64 rx_ttl_1519_4095_frms;
u64 rx_ttl_40956_8191_frms;
u64 rx_ttl_8192_max_frms;
u64 rx_ttl_gt_max_frms;
u64 rx_ip;
u64 rx_ip_octets;
u64 rx_hdr_err_ip;
u64 rx_icmp;
u64 rx_tcp;
u64 rx_udp;
u64 rx_err_tcp;
u64 rx_pause_cnt;
u64 rx_pause_ctrl_frms;
u64 rx_unsup_ctrl_frms;
u64 rx_in_rng_len_err_frms;
u64 rx_out_rng_len_err_frms;
u64 rx_drop_frms;
u64 rx_discarded_frms;
u64 rx_drop_ip;
u64 rx_err_drp_udp;
u64 rx_lacpdu_frms;
u64 rx_marker_pdu_frms;
u64 rx_marker_resp_pdu_frms;
u64 rx_unknown_pdu_frms;
u64 rx_illegal_pdu_frms;
u64 rx_fcs_discard;
u64 rx_len_discard;
u64 rx_pf_discard;
u64 rx_trash_discard;
u64 rx_rts_discard;
u64 rx_wol_discard;
u64 rx_red_discard;
u64 rx_ingm_full_discard;
u64 rx_xgmii_data_err_cnt;
u64 rx_xgmii_ctrl_err_cnt;
u64 rx_xgmii_err_sym;
u64 rx_xgmii_char1_match;
u64 rx_xgmii_char2_match;
u64 rx_xgmii_column1_match;
u64 rx_xgmii_column2_match;
u64 rx_local_fault;
u64 rx_remote_fault;
u64 rx_queue_full;
}xge_hal_stats_link_info_t;
/**
@ -1323,18 +1286,18 @@ typedef struct xge_hal_stats_link_info_t {
* the aggregator.
*/
typedef struct xge_hal_stats_aggr_info_t {
u64 tx_frms;
u64 tx_mcst_frms;
u64 tx_bcst_frms;
u64 tx_discarded_frms;
u64 tx_errored_frms;
u64 rx_frms;
u64 rx_data_octets;
u64 rx_mcst_frms;
u64 rx_bcst_frms;
u64 rx_discarded_frms;
u64 rx_errored_frms;
u64 rx_unknown_protocol_frms;
u64 tx_frms;
u64 tx_mcst_frms;
u64 tx_bcst_frms;
u64 tx_discarded_frms;
u64 tx_errored_frms;
u64 rx_frms;
u64 rx_data_octets;
u64 rx_mcst_frms;
u64 rx_bcst_frms;
u64 rx_discarded_frms;
u64 rx_errored_frms;
u64 rx_unknown_protocol_frms;
}xge_hal_stats_aggr_info_t;
/**
@ -1439,60 +1402,60 @@ typedef struct xge_hal_stats_aggr_info_t {
* the vpath.
*/
typedef struct xge_hal_stats_vpath_info_t {
u64 tx_frms;
u64 tx_ttl_eth_octets;
u64 tx_data_octets;
u64 tx_mcst_frms;
u64 tx_bcst_frms;
u64 tx_ucst_frms;
u64 tx_tagged_frms;
u64 tx_vld_ip;
u64 tx_vld_ip_octets;
u64 tx_icmp;
u64 tx_tcp;
u64 tx_rst_tcp;
u64 tx_udp;
u64 tx_unknown_protocol;
u64 tx_parse_error;
u64 rx_ttl_frms;
u64 rx_vld_frms;
u64 rx_offld_frms;
u64 rx_ttl_eth_octets;
u64 rx_data_octets;
u64 rx_offld_octets;
u64 rx_vld_mcst_frms;
u64 rx_vld_bcst_frms;
u64 rx_accepted_ucst_frms;
u64 rx_accepted_nucst_frms;
u64 rx_tagged_frms;
u64 rx_long_frms;
u64 rx_usized_frms;
u64 rx_osized_frms;
u64 rx_frag_frms;
u64 rx_jabber_frms;
u64 rx_ttl_64_frms;
u64 rx_ttl_65_127_frms;
u64 rx_ttl_128_255_frms;
u64 rx_ttl_256_511_frms;
u64 rx_ttl_512_1023_frms;
u64 rx_ttl_1024_1518_frms;
u64 rx_ttl_1519_4095_frms;
u64 rx_ttl_40956_8191_frms;
u64 rx_ttl_8192_max_frms;
u64 rx_ttl_gt_max_frms;
u64 rx_ip;
u64 rx_accepted_ip;
u64 rx_ip_octets;
u64 rx_hdr_err_ip;
u64 rx_icmp;
u64 rx_tcp;
u64 rx_udp;
u64 rx_err_tcp;
u64 rx_mpa_ok_frms;
u64 rx_mpa_crc_fail_frms;
u64 rx_mpa_mrk_fail_frms;
u64 rx_mpa_len_fail_frms;
u64 rx_wol_frms;
u64 tx_frms;
u64 tx_ttl_eth_octets;
u64 tx_data_octets;
u64 tx_mcst_frms;
u64 tx_bcst_frms;
u64 tx_ucst_frms;
u64 tx_tagged_frms;
u64 tx_vld_ip;
u64 tx_vld_ip_octets;
u64 tx_icmp;
u64 tx_tcp;
u64 tx_rst_tcp;
u64 tx_udp;
u64 tx_unknown_protocol;
u64 tx_parse_error;
u64 rx_ttl_frms;
u64 rx_vld_frms;
u64 rx_offld_frms;
u64 rx_ttl_eth_octets;
u64 rx_data_octets;
u64 rx_offld_octets;
u64 rx_vld_mcst_frms;
u64 rx_vld_bcst_frms;
u64 rx_accepted_ucst_frms;
u64 rx_accepted_nucst_frms;
u64 rx_tagged_frms;
u64 rx_long_frms;
u64 rx_usized_frms;
u64 rx_osized_frms;
u64 rx_frag_frms;
u64 rx_jabber_frms;
u64 rx_ttl_64_frms;
u64 rx_ttl_65_127_frms;
u64 rx_ttl_128_255_frms;
u64 rx_ttl_256_511_frms;
u64 rx_ttl_512_1023_frms;
u64 rx_ttl_1024_1518_frms;
u64 rx_ttl_1519_4095_frms;
u64 rx_ttl_40956_8191_frms;
u64 rx_ttl_8192_max_frms;
u64 rx_ttl_gt_max_frms;
u64 rx_ip;
u64 rx_accepted_ip;
u64 rx_ip_octets;
u64 rx_hdr_err_ip;
u64 rx_icmp;
u64 rx_tcp;
u64 rx_udp;
u64 rx_err_tcp;
u64 rx_mpa_ok_frms;
u64 rx_mpa_crc_fail_frms;
u64 rx_mpa_mrk_fail_frms;
u64 rx_mpa_len_fail_frms;
u64 rx_wol_frms;
}xge_hal_stats_vpath_info_t;
/**
@ -1503,8 +1466,8 @@ typedef struct xge_hal_stats_vpath_info_t {
* See also: xge_hal_stats_link_info_t{}, xge_hal_stats_aggr_info_t{}.
*/
typedef struct xge_hal_stats_pcim_info_t {
xge_hal_stats_link_info_t link_info[XGE_HAL_MAC_LINKS];
xge_hal_stats_aggr_info_t aggr_info[XGE_HAL_MAC_AGGREGATORS];
xge_hal_stats_link_info_t link_info[XGE_HAL_MAC_LINKS];
xge_hal_stats_aggr_info_t aggr_info[XGE_HAL_MAC_AGGREGATORS];
}xge_hal_stats_pcim_info_t;
/**
@ -1541,35 +1504,35 @@ typedef struct xge_hal_stats_pcim_info_t {
* See also: xge_hal_stats_channel_info_t{}.
*/
typedef struct xge_hal_stats_t {
/* handles */
xge_hal_device_h devh;
dma_addr_t dma_addr;
pci_dma_h hw_info_dmah;
pci_dma_acc_h hw_info_dma_acch;
/* handles */
xge_hal_device_h devh;
dma_addr_t dma_addr;
pci_dma_h hw_info_dmah;
pci_dma_acc_h hw_info_dma_acch;
/* HAL device hardware statistics */
xge_hal_stats_hw_info_t *hw_info;
xge_hal_stats_hw_info_t hw_info_saved;
xge_hal_stats_hw_info_t hw_info_latest;
/* HAL device hardware statistics */
xge_hal_stats_hw_info_t *hw_info;
xge_hal_stats_hw_info_t hw_info_saved;
xge_hal_stats_hw_info_t hw_info_latest;
/* HAL device hardware statistics for XFRAME ER */
xge_hal_stats_pcim_info_t *pcim_info;
xge_hal_stats_pcim_info_t *pcim_info_saved;
xge_hal_stats_pcim_info_t *pcim_info_latest;
xge_hal_stats_pcim_info_t *pcim_info;
xge_hal_stats_pcim_info_t *pcim_info_saved;
xge_hal_stats_pcim_info_t *pcim_info_latest;
/* HAL device "soft" stats */
/* HAL device "soft" stats */
xge_hal_stats_sw_err_t sw_dev_err_stats;
xge_hal_stats_device_info_t sw_dev_info_stats;
/* flags */
int is_initialized;
int is_enabled;
/* flags */
int is_initialized;
int is_enabled;
} xge_hal_stats_t;
/* ========================== STATS PRIVATE API ========================= */
xge_hal_status_e __hal_stats_initialize(xge_hal_stats_t *stats,
xge_hal_device_h devh);
xge_hal_device_h devh);
void __hal_stats_terminate(xge_hal_stats_t *stats);
@ -1582,16 +1545,16 @@ void __hal_stats_soft_reset(xge_hal_device_h devh, int reset_all);
/* ========================== STATS PUBLIC API ========================= */
xge_hal_status_e xge_hal_stats_hw(xge_hal_device_h devh,
xge_hal_stats_hw_info_t **hw_info);
xge_hal_stats_hw_info_t **hw_info);
xge_hal_status_e xge_hal_stats_pcim(xge_hal_device_h devh,
xge_hal_stats_pcim_info_t **pcim_info);
xge_hal_stats_pcim_info_t **pcim_info);
xge_hal_status_e xge_hal_stats_device(xge_hal_device_h devh,
xge_hal_stats_device_info_t **device_info);
xge_hal_stats_device_info_t **device_info);
xge_hal_status_e xge_hal_stats_channel(xge_hal_channel_h channelh,
xge_hal_stats_channel_info_t **channel_info);
xge_hal_stats_channel_info_t **channel_info);
xge_hal_status_e xge_hal_stats_reset(xge_hal_device_h devh);

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-types.h
*
* Description: HAL commonly used types and enumerations
*
* Created: 19 May 2004
*/
#ifndef XGE_HAL_TYPES_H
#define XGE_HAL_TYPES_H
@ -44,44 +36,44 @@ __EXTERN_BEGIN_DECLS
/*
* BIT(loc) - set bit at offset
*/
#define BIT(loc) (0x8000000000000000ULL >> (loc))
#define BIT(loc) (0x8000000000000000ULL >> (loc))
/*
* vBIT(val, loc, sz) - set bits at offset
*/
#define vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
#define vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
#define vBIT(val, loc, sz) (((u64)(val)) << (64-(loc)-(sz)))
#define vBIT32(val, loc, sz) (((u32)(val)) << (32-(loc)-(sz)))
/*
* bVALx(bits, loc) - Get the value of x bits at location
*/
#define bVAL1(bits, loc) ((((u64)bits) >> (64-(loc+1))) & 0x1)
#define bVAL2(bits, loc) ((((u64)bits) >> (64-(loc+2))) & 0x3)
#define bVAL3(bits, loc) ((((u64)bits) >> (64-(loc+3))) & 0x7)
#define bVAL4(bits, loc) ((((u64)bits) >> (64-(loc+4))) & 0xF)
#define bVAL5(bits, loc) ((((u64)bits) >> (64-(loc+5))) & 0x1F)
#define bVAL6(bits, loc) ((((u64)bits) >> (64-(loc+6))) & 0x3F)
#define bVAL7(bits, loc) ((((u64)bits) >> (64-(loc+7))) & 0x7F)
#define bVAL8(bits, loc) ((((u64)bits) >> (64-(loc+8))) & 0xFF)
#define bVAL12(bits, loc) ((((u64)bits) >> (64-(loc+12))) & 0xFFF)
#define bVAL14(bits, loc) ((((u64)bits) >> (64-(loc+14))) & 0x3FFF)
#define bVAL16(bits, loc) ((((u64)bits) >> (64-(loc+16))) & 0xFFFF)
#define bVAL20(bits, loc) ((((u64)bits) >> (64-(loc+20))) & 0xFFFFF)
#define bVAL22(bits, loc) ((((u64)bits) >> (64-(loc+22))) & 0x3FFFFF)
#define bVAL24(bits, loc) ((((u64)bits) >> (64-(loc+24))) & 0xFFFFFF)
#define bVAL28(bits, loc) ((((u64)bits) >> (64-(loc+28))) & 0xFFFFFFF)
#define bVAL32(bits, loc) ((((u64)bits) >> (64-(loc+32))) & 0xFFFFFFFF)
#define bVAL36(bits, loc) ((((u64)bits) >> (64-(loc+36))) & 0xFFFFFFFFF)
#define bVAL40(bits, loc) ((((u64)bits) >> (64-(loc+40))) & 0xFFFFFFFFFF)
#define bVAL44(bits, loc) ((((u64)bits) >> (64-(loc+44))) & 0xFFFFFFFFFFF)
#define bVAL48(bits, loc) ((((u64)bits) >> (64-(loc+48))) & 0xFFFFFFFFFFFF)
#define bVAL52(bits, loc) ((((u64)bits) >> (64-(loc+52))) & 0xFFFFFFFFFFFFF)
#define bVAL56(bits, loc) ((((u64)bits) >> (64-(loc+56))) & 0xFFFFFFFFFFFFFF)
#define bVAL60(bits, loc) ((((u64)bits) >> (64-(loc+60))) & 0xFFFFFFFFFFFFFFF)
#define bVAL1(bits, loc) ((((u64)bits) >> (64-(loc+1))) & 0x1)
#define bVAL2(bits, loc) ((((u64)bits) >> (64-(loc+2))) & 0x3)
#define bVAL3(bits, loc) ((((u64)bits) >> (64-(loc+3))) & 0x7)
#define bVAL4(bits, loc) ((((u64)bits) >> (64-(loc+4))) & 0xF)
#define bVAL5(bits, loc) ((((u64)bits) >> (64-(loc+5))) & 0x1F)
#define bVAL6(bits, loc) ((((u64)bits) >> (64-(loc+6))) & 0x3F)
#define bVAL7(bits, loc) ((((u64)bits) >> (64-(loc+7))) & 0x7F)
#define bVAL8(bits, loc) ((((u64)bits) >> (64-(loc+8))) & 0xFF)
#define bVAL12(bits, loc) ((((u64)bits) >> (64-(loc+12))) & 0xFFF)
#define bVAL14(bits, loc) ((((u64)bits) >> (64-(loc+14))) & 0x3FFF)
#define bVAL16(bits, loc) ((((u64)bits) >> (64-(loc+16))) & 0xFFFF)
#define bVAL20(bits, loc) ((((u64)bits) >> (64-(loc+20))) & 0xFFFFF)
#define bVAL22(bits, loc) ((((u64)bits) >> (64-(loc+22))) & 0x3FFFFF)
#define bVAL24(bits, loc) ((((u64)bits) >> (64-(loc+24))) & 0xFFFFFF)
#define bVAL28(bits, loc) ((((u64)bits) >> (64-(loc+28))) & 0xFFFFFFF)
#define bVAL32(bits, loc) ((((u64)bits) >> (64-(loc+32))) & 0xFFFFFFFF)
#define bVAL36(bits, loc) ((((u64)bits) >> (64-(loc+36))) & 0xFFFFFFFFF)
#define bVAL40(bits, loc) ((((u64)bits) >> (64-(loc+40))) & 0xFFFFFFFFFF)
#define bVAL44(bits, loc) ((((u64)bits) >> (64-(loc+44))) & 0xFFFFFFFFFFF)
#define bVAL48(bits, loc) ((((u64)bits) >> (64-(loc+48))) & 0xFFFFFFFFFFFF)
#define bVAL52(bits, loc) ((((u64)bits) >> (64-(loc+52))) & 0xFFFFFFFFFFFFF)
#define bVAL56(bits, loc) ((((u64)bits) >> (64-(loc+56))) & 0xFFFFFFFFFFFFFF)
#define bVAL60(bits, loc) ((((u64)bits) >> (64-(loc+60))) & 0xFFFFFFFFFFFFFFF)
#define XGE_HAL_BASE_INF 100
#define XGE_HAL_BASE_ERR 200
#define XGE_HAL_BASE_BADCFG 300
#define XGE_HAL_BASE_INF 100
#define XGE_HAL_BASE_ERR 200
#define XGE_HAL_BASE_BADCFG 300
#define XGE_HAL_ALL_FOXES 0xFFFFFFFFFFFFFFFFULL
@ -356,176 +348,176 @@ __EXTERN_BEGIN_DECLS
*
*/
typedef enum xge_hal_status_e {
XGE_HAL_OK = 0,
XGE_HAL_FAIL = 1,
XGE_HAL_COMPLETIONS_REMAIN = 2,
XGE_HAL_OK = 0,
XGE_HAL_FAIL = 1,
XGE_HAL_COMPLETIONS_REMAIN = 2,
XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS = XGE_HAL_BASE_INF + 1,
XGE_HAL_INF_OUT_OF_DESCRIPTORS = XGE_HAL_BASE_INF + 2,
XGE_HAL_INF_CHANNEL_IS_NOT_READY = XGE_HAL_BASE_INF + 3,
XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING = XGE_HAL_BASE_INF + 4,
XGE_HAL_INF_STATS_IS_NOT_READY = XGE_HAL_BASE_INF + 5,
XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS = XGE_HAL_BASE_INF + 6,
XGE_HAL_INF_IRQ_POLLING_CONTINUE = XGE_HAL_BASE_INF + 7,
XGE_HAL_INF_LRO_BEGIN = XGE_HAL_BASE_INF + 8,
XGE_HAL_INF_LRO_CONT = XGE_HAL_BASE_INF + 9,
XGE_HAL_INF_LRO_UNCAPABLE = XGE_HAL_BASE_INF + 10,
XGE_HAL_INF_LRO_END_1 = XGE_HAL_BASE_INF + 11,
XGE_HAL_INF_LRO_END_2 = XGE_HAL_BASE_INF + 12,
XGE_HAL_INF_LRO_END_3 = XGE_HAL_BASE_INF + 13,
XGE_HAL_INF_LRO_SESSIONS_XCDED = XGE_HAL_BASE_INF + 14,
XGE_HAL_INF_NOT_ENOUGH_HW_CQES = XGE_HAL_BASE_INF + 15,
XGE_HAL_ERR_DRIVER_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 1,
XGE_HAL_ERR_OUT_OF_MEMORY = XGE_HAL_BASE_ERR + 4,
XGE_HAL_ERR_CHANNEL_NOT_FOUND = XGE_HAL_BASE_ERR + 5,
XGE_HAL_ERR_WRONG_IRQ = XGE_HAL_BASE_ERR + 6,
XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES = XGE_HAL_BASE_ERR + 7,
XGE_HAL_ERR_SWAPPER_CTRL = XGE_HAL_BASE_ERR + 8,
XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT = XGE_HAL_BASE_ERR + 9,
XGE_HAL_ERR_INVALID_MTU_SIZE = XGE_HAL_BASE_ERR + 10,
XGE_HAL_ERR_OUT_OF_MAPPING = XGE_HAL_BASE_ERR + 11,
XGE_HAL_ERR_BAD_SUBSYSTEM_ID = XGE_HAL_BASE_ERR + 12,
XGE_HAL_ERR_INVALID_BAR_ID = XGE_HAL_BASE_ERR + 13,
XGE_HAL_ERR_INVALID_OFFSET = XGE_HAL_BASE_ERR + 14,
XGE_HAL_ERR_INVALID_DEVICE = XGE_HAL_BASE_ERR + 15,
XGE_HAL_ERR_OUT_OF_SPACE = XGE_HAL_BASE_ERR + 16,
XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE = XGE_HAL_BASE_ERR + 17,
XGE_HAL_ERR_VERSION_CONFLICT = XGE_HAL_BASE_ERR + 18,
XGE_HAL_ERR_INVALID_MAC_ADDRESS = XGE_HAL_BASE_ERR + 19,
XGE_HAL_ERR_BAD_DEVICE_ID = XGE_HAL_BASE_ERR + 20,
XGE_HAL_ERR_OUT_ALIGNED_FRAGS = XGE_HAL_BASE_ERR + 21,
XGE_HAL_ERR_DEVICE_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 22,
XGE_HAL_ERR_SPDM_NOT_ENABLED = XGE_HAL_BASE_ERR + 23,
XGE_HAL_ERR_SPDM_TABLE_FULL = XGE_HAL_BASE_ERR + 24,
XGE_HAL_ERR_SPDM_INVALID_ENTRY = XGE_HAL_BASE_ERR + 25,
XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND = XGE_HAL_BASE_ERR + 26,
XGE_HAL_INF_OUT_OF_DESCRIPTORS = XGE_HAL_BASE_INF + 2,
XGE_HAL_INF_CHANNEL_IS_NOT_READY = XGE_HAL_BASE_INF + 3,
XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING = XGE_HAL_BASE_INF + 4,
XGE_HAL_INF_STATS_IS_NOT_READY = XGE_HAL_BASE_INF + 5,
XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS = XGE_HAL_BASE_INF + 6,
XGE_HAL_INF_IRQ_POLLING_CONTINUE = XGE_HAL_BASE_INF + 7,
XGE_HAL_INF_LRO_BEGIN = XGE_HAL_BASE_INF + 8,
XGE_HAL_INF_LRO_CONT = XGE_HAL_BASE_INF + 9,
XGE_HAL_INF_LRO_UNCAPABLE = XGE_HAL_BASE_INF + 10,
XGE_HAL_INF_LRO_END_1 = XGE_HAL_BASE_INF + 11,
XGE_HAL_INF_LRO_END_2 = XGE_HAL_BASE_INF + 12,
XGE_HAL_INF_LRO_END_3 = XGE_HAL_BASE_INF + 13,
XGE_HAL_INF_LRO_SESSIONS_XCDED = XGE_HAL_BASE_INF + 14,
XGE_HAL_INF_NOT_ENOUGH_HW_CQES = XGE_HAL_BASE_INF + 15,
XGE_HAL_ERR_DRIVER_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 1,
XGE_HAL_ERR_OUT_OF_MEMORY = XGE_HAL_BASE_ERR + 4,
XGE_HAL_ERR_CHANNEL_NOT_FOUND = XGE_HAL_BASE_ERR + 5,
XGE_HAL_ERR_WRONG_IRQ = XGE_HAL_BASE_ERR + 6,
XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES = XGE_HAL_BASE_ERR + 7,
XGE_HAL_ERR_SWAPPER_CTRL = XGE_HAL_BASE_ERR + 8,
XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT = XGE_HAL_BASE_ERR + 9,
XGE_HAL_ERR_INVALID_MTU_SIZE = XGE_HAL_BASE_ERR + 10,
XGE_HAL_ERR_OUT_OF_MAPPING = XGE_HAL_BASE_ERR + 11,
XGE_HAL_ERR_BAD_SUBSYSTEM_ID = XGE_HAL_BASE_ERR + 12,
XGE_HAL_ERR_INVALID_BAR_ID = XGE_HAL_BASE_ERR + 13,
XGE_HAL_ERR_INVALID_OFFSET = XGE_HAL_BASE_ERR + 14,
XGE_HAL_ERR_INVALID_DEVICE = XGE_HAL_BASE_ERR + 15,
XGE_HAL_ERR_OUT_OF_SPACE = XGE_HAL_BASE_ERR + 16,
XGE_HAL_ERR_INVALID_VALUE_BIT_SIZE = XGE_HAL_BASE_ERR + 17,
XGE_HAL_ERR_VERSION_CONFLICT = XGE_HAL_BASE_ERR + 18,
XGE_HAL_ERR_INVALID_MAC_ADDRESS = XGE_HAL_BASE_ERR + 19,
XGE_HAL_ERR_BAD_DEVICE_ID = XGE_HAL_BASE_ERR + 20,
XGE_HAL_ERR_OUT_ALIGNED_FRAGS = XGE_HAL_BASE_ERR + 21,
XGE_HAL_ERR_DEVICE_NOT_INITIALIZED = XGE_HAL_BASE_ERR + 22,
XGE_HAL_ERR_SPDM_NOT_ENABLED = XGE_HAL_BASE_ERR + 23,
XGE_HAL_ERR_SPDM_TABLE_FULL = XGE_HAL_BASE_ERR + 24,
XGE_HAL_ERR_SPDM_INVALID_ENTRY = XGE_HAL_BASE_ERR + 25,
XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND = XGE_HAL_BASE_ERR + 26,
XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT= XGE_HAL_BASE_ERR + 27,
XGE_HAL_ERR_INVALID_PCI_INFO = XGE_HAL_BASE_ERR + 28,
XGE_HAL_ERR_CRITICAL = XGE_HAL_BASE_ERR + 29,
XGE_HAL_ERR_RESET_FAILED = XGE_HAL_BASE_ERR + 30,
XGE_HAL_ERR_TOO_MANY = XGE_HAL_BASE_ERR + 32,
XGE_HAL_ERR_PKT_DROP = XGE_HAL_BASE_ERR + 33,
XGE_HAL_ERR_INVALID_PCI_INFO = XGE_HAL_BASE_ERR + 28,
XGE_HAL_ERR_CRITICAL = XGE_HAL_BASE_ERR + 29,
XGE_HAL_ERR_RESET_FAILED = XGE_HAL_BASE_ERR + 30,
XGE_HAL_ERR_TOO_MANY = XGE_HAL_BASE_ERR + 32,
XGE_HAL_ERR_PKT_DROP = XGE_HAL_BASE_ERR + 33,
XGE_HAL_BADCFG_TX_URANGE_A = XGE_HAL_BASE_BADCFG + 1,
XGE_HAL_BADCFG_TX_UFC_A = XGE_HAL_BASE_BADCFG + 2,
XGE_HAL_BADCFG_TX_URANGE_B = XGE_HAL_BASE_BADCFG + 3,
XGE_HAL_BADCFG_TX_UFC_B = XGE_HAL_BASE_BADCFG + 4,
XGE_HAL_BADCFG_TX_URANGE_C = XGE_HAL_BASE_BADCFG + 5,
XGE_HAL_BADCFG_TX_UFC_C = XGE_HAL_BASE_BADCFG + 6,
XGE_HAL_BADCFG_TX_UFC_D = XGE_HAL_BASE_BADCFG + 8,
XGE_HAL_BADCFG_TX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 9,
XGE_HAL_BADCFG_TX_TIMER_CI_EN = XGE_HAL_BASE_BADCFG + 10,
XGE_HAL_BADCFG_RX_URANGE_A = XGE_HAL_BASE_BADCFG + 11,
XGE_HAL_BADCFG_RX_UFC_A = XGE_HAL_BASE_BADCFG + 12,
XGE_HAL_BADCFG_RX_URANGE_B = XGE_HAL_BASE_BADCFG + 13,
XGE_HAL_BADCFG_RX_UFC_B = XGE_HAL_BASE_BADCFG + 14,
XGE_HAL_BADCFG_RX_URANGE_C = XGE_HAL_BASE_BADCFG + 15,
XGE_HAL_BADCFG_RX_UFC_C = XGE_HAL_BASE_BADCFG + 16,
XGE_HAL_BADCFG_RX_UFC_D = XGE_HAL_BASE_BADCFG + 17,
XGE_HAL_BADCFG_RX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 18,
XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH= XGE_HAL_BASE_BADCFG + 19,
XGE_HAL_BADCFG_TX_URANGE_A = XGE_HAL_BASE_BADCFG + 1,
XGE_HAL_BADCFG_TX_UFC_A = XGE_HAL_BASE_BADCFG + 2,
XGE_HAL_BADCFG_TX_URANGE_B = XGE_HAL_BASE_BADCFG + 3,
XGE_HAL_BADCFG_TX_UFC_B = XGE_HAL_BASE_BADCFG + 4,
XGE_HAL_BADCFG_TX_URANGE_C = XGE_HAL_BASE_BADCFG + 5,
XGE_HAL_BADCFG_TX_UFC_C = XGE_HAL_BASE_BADCFG + 6,
XGE_HAL_BADCFG_TX_UFC_D = XGE_HAL_BASE_BADCFG + 8,
XGE_HAL_BADCFG_TX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 9,
XGE_HAL_BADCFG_TX_TIMER_CI_EN = XGE_HAL_BASE_BADCFG + 10,
XGE_HAL_BADCFG_RX_URANGE_A = XGE_HAL_BASE_BADCFG + 11,
XGE_HAL_BADCFG_RX_UFC_A = XGE_HAL_BASE_BADCFG + 12,
XGE_HAL_BADCFG_RX_URANGE_B = XGE_HAL_BASE_BADCFG + 13,
XGE_HAL_BADCFG_RX_UFC_B = XGE_HAL_BASE_BADCFG + 14,
XGE_HAL_BADCFG_RX_URANGE_C = XGE_HAL_BASE_BADCFG + 15,
XGE_HAL_BADCFG_RX_UFC_C = XGE_HAL_BASE_BADCFG + 16,
XGE_HAL_BADCFG_RX_UFC_D = XGE_HAL_BASE_BADCFG + 17,
XGE_HAL_BADCFG_RX_TIMER_VAL = XGE_HAL_BASE_BADCFG + 18,
XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH= XGE_HAL_BASE_BADCFG + 19,
XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH = XGE_HAL_BASE_BADCFG + 20,
XGE_HAL_BADCFG_FIFO_QUEUE_INTR = XGE_HAL_BASE_BADCFG + 21,
XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS=XGE_HAL_BASE_BADCFG + 22,
XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS = XGE_HAL_BASE_BADCFG + 23,
XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE = XGE_HAL_BASE_BADCFG + 24,
XGE_HAL_BADCFG_RING_QUEUE_SIZE = XGE_HAL_BASE_BADCFG + 25,
XGE_HAL_BADCFG_BACKOFF_INTERVAL_US = XGE_HAL_BASE_BADCFG + 26,
XGE_HAL_BADCFG_MAX_FRM_LEN = XGE_HAL_BASE_BADCFG + 27,
XGE_HAL_BADCFG_RING_PRIORITY = XGE_HAL_BASE_BADCFG + 28,
XGE_HAL_BADCFG_TMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 29,
XGE_HAL_BADCFG_RMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 30,
XGE_HAL_BADCFG_RMAC_BCAST_EN = XGE_HAL_BASE_BADCFG + 31,
XGE_HAL_BADCFG_RMAC_HIGH_PTIME = XGE_HAL_BASE_BADCFG + 32,
XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3 = XGE_HAL_BASE_BADCFG +33,
XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7 = XGE_HAL_BASE_BADCFG + 34,
XGE_HAL_BADCFG_FIFO_FRAGS = XGE_HAL_BASE_BADCFG + 35,
XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD = XGE_HAL_BASE_BADCFG + 37,
XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 38,
XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 39,
XGE_HAL_BADCFG_MAX_MTU = XGE_HAL_BASE_BADCFG + 40,
XGE_HAL_BADCFG_ISR_POLLING_CNT = XGE_HAL_BASE_BADCFG + 41,
XGE_HAL_BADCFG_LATENCY_TIMER = XGE_HAL_BASE_BADCFG + 42,
XGE_HAL_BADCFG_MAX_SPLITS_TRANS = XGE_HAL_BASE_BADCFG + 43,
XGE_HAL_BADCFG_MMRB_COUNT = XGE_HAL_BASE_BADCFG + 44,
XGE_HAL_BADCFG_SHARED_SPLITS = XGE_HAL_BASE_BADCFG + 45,
XGE_HAL_BADCFG_STATS_REFRESH_TIME = XGE_HAL_BASE_BADCFG + 46,
XGE_HAL_BADCFG_PCI_FREQ_MHERZ = XGE_HAL_BASE_BADCFG + 47,
XGE_HAL_BADCFG_PCI_MODE = XGE_HAL_BASE_BADCFG + 48,
XGE_HAL_BADCFG_INTR_MODE = XGE_HAL_BASE_BADCFG + 49,
XGE_HAL_BADCFG_SCHED_TIMER_US = XGE_HAL_BASE_BADCFG + 50,
XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT = XGE_HAL_BASE_BADCFG + 51,
XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL = XGE_HAL_BASE_BADCFG + 52,
XGE_HAL_BADCFG_QUEUE_SIZE_MAX = XGE_HAL_BASE_BADCFG + 53,
XGE_HAL_BADCFG_RING_RTH_EN = XGE_HAL_BASE_BADCFG + 54,
XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS = XGE_HAL_BASE_BADCFG + 55,
XGE_HAL_BADCFG_TX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 56,
XGE_HAL_BADCFG_RX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 57,
XGE_HAL_BADCFG_RXUFCA_INTR_THRES = XGE_HAL_BASE_BADCFG + 58,
XGE_HAL_BADCFG_RXUFCA_LO_LIM = XGE_HAL_BASE_BADCFG + 59,
XGE_HAL_BADCFG_RXUFCA_HI_LIM = XGE_HAL_BASE_BADCFG + 60,
XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD = XGE_HAL_BASE_BADCFG + 61,
XGE_HAL_BADCFG_TRACEBUF_SIZE = XGE_HAL_BASE_BADCFG + 62,
XGE_HAL_BADCFG_LINK_VALID_CNT = XGE_HAL_BASE_BADCFG + 63,
XGE_HAL_BADCFG_LINK_RETRY_CNT = XGE_HAL_BASE_BADCFG + 64,
XGE_HAL_BADCFG_LINK_STABILITY_PERIOD = XGE_HAL_BASE_BADCFG + 65,
XGE_HAL_BADCFG_FIFO_QUEUE_INTR = XGE_HAL_BASE_BADCFG + 21,
XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS=XGE_HAL_BASE_BADCFG + 22,
XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS = XGE_HAL_BASE_BADCFG + 23,
XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE = XGE_HAL_BASE_BADCFG + 24,
XGE_HAL_BADCFG_RING_QUEUE_SIZE = XGE_HAL_BASE_BADCFG + 25,
XGE_HAL_BADCFG_BACKOFF_INTERVAL_US = XGE_HAL_BASE_BADCFG + 26,
XGE_HAL_BADCFG_MAX_FRM_LEN = XGE_HAL_BASE_BADCFG + 27,
XGE_HAL_BADCFG_RING_PRIORITY = XGE_HAL_BASE_BADCFG + 28,
XGE_HAL_BADCFG_TMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 29,
XGE_HAL_BADCFG_RMAC_UTIL_PERIOD = XGE_HAL_BASE_BADCFG + 30,
XGE_HAL_BADCFG_RMAC_BCAST_EN = XGE_HAL_BASE_BADCFG + 31,
XGE_HAL_BADCFG_RMAC_HIGH_PTIME = XGE_HAL_BASE_BADCFG + 32,
XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3 = XGE_HAL_BASE_BADCFG +33,
XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7 = XGE_HAL_BASE_BADCFG + 34,
XGE_HAL_BADCFG_FIFO_FRAGS = XGE_HAL_BASE_BADCFG + 35,
XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD = XGE_HAL_BASE_BADCFG + 37,
XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 38,
XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE = XGE_HAL_BASE_BADCFG + 39,
XGE_HAL_BADCFG_MAX_MTU = XGE_HAL_BASE_BADCFG + 40,
XGE_HAL_BADCFG_ISR_POLLING_CNT = XGE_HAL_BASE_BADCFG + 41,
XGE_HAL_BADCFG_LATENCY_TIMER = XGE_HAL_BASE_BADCFG + 42,
XGE_HAL_BADCFG_MAX_SPLITS_TRANS = XGE_HAL_BASE_BADCFG + 43,
XGE_HAL_BADCFG_MMRB_COUNT = XGE_HAL_BASE_BADCFG + 44,
XGE_HAL_BADCFG_SHARED_SPLITS = XGE_HAL_BASE_BADCFG + 45,
XGE_HAL_BADCFG_STATS_REFRESH_TIME = XGE_HAL_BASE_BADCFG + 46,
XGE_HAL_BADCFG_PCI_FREQ_MHERZ = XGE_HAL_BASE_BADCFG + 47,
XGE_HAL_BADCFG_PCI_MODE = XGE_HAL_BASE_BADCFG + 48,
XGE_HAL_BADCFG_INTR_MODE = XGE_HAL_BASE_BADCFG + 49,
XGE_HAL_BADCFG_SCHED_TIMER_US = XGE_HAL_BASE_BADCFG + 50,
XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT = XGE_HAL_BASE_BADCFG + 51,
XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL = XGE_HAL_BASE_BADCFG + 52,
XGE_HAL_BADCFG_QUEUE_SIZE_MAX = XGE_HAL_BASE_BADCFG + 53,
XGE_HAL_BADCFG_RING_RTH_EN = XGE_HAL_BASE_BADCFG + 54,
XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS = XGE_HAL_BASE_BADCFG + 55,
XGE_HAL_BADCFG_TX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 56,
XGE_HAL_BADCFG_RX_TIMER_AC_EN = XGE_HAL_BASE_BADCFG + 57,
XGE_HAL_BADCFG_RXUFCA_INTR_THRES = XGE_HAL_BASE_BADCFG + 58,
XGE_HAL_BADCFG_RXUFCA_LO_LIM = XGE_HAL_BASE_BADCFG + 59,
XGE_HAL_BADCFG_RXUFCA_HI_LIM = XGE_HAL_BASE_BADCFG + 60,
XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD = XGE_HAL_BASE_BADCFG + 61,
XGE_HAL_BADCFG_TRACEBUF_SIZE = XGE_HAL_BASE_BADCFG + 62,
XGE_HAL_BADCFG_LINK_VALID_CNT = XGE_HAL_BASE_BADCFG + 63,
XGE_HAL_BADCFG_LINK_RETRY_CNT = XGE_HAL_BASE_BADCFG + 64,
XGE_HAL_BADCFG_LINK_STABILITY_PERIOD = XGE_HAL_BASE_BADCFG + 65,
XGE_HAL_BADCFG_DEVICE_POLL_MILLIS = XGE_HAL_BASE_BADCFG + 66,
XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN = XGE_HAL_BASE_BADCFG + 67,
XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN = XGE_HAL_BASE_BADCFG + 68,
XGE_HAL_BADCFG_MEDIA = XGE_HAL_BASE_BADCFG + 69,
XGE_HAL_BADCFG_NO_ISR_EVENTS = XGE_HAL_BASE_BADCFG + 70,
XGE_HAL_BADCFG_RING_RTS_MAC_EN = XGE_HAL_BASE_BADCFG + 71,
XGE_HAL_BADCFG_LRO_SG_SIZE = XGE_HAL_BASE_BADCFG + 72,
XGE_HAL_BADCFG_LRO_FRM_LEN = XGE_HAL_BASE_BADCFG + 73,
XGE_HAL_BADCFG_WQE_NUM_ODS = XGE_HAL_BASE_BADCFG + 74,
XGE_HAL_BADCFG_BIMODAL_INTR = XGE_HAL_BASE_BADCFG + 75,
XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US = XGE_HAL_BASE_BADCFG + 76,
XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US = XGE_HAL_BASE_BADCFG + 77,
XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED = XGE_HAL_BASE_BADCFG + 78,
XGE_HAL_BADCFG_RTS_QOS_EN = XGE_HAL_BASE_BADCFG + 79,
XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 80,
XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 81,
XGE_HAL_BADCFG_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 82,
XGE_HAL_BADCFG_RING_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 83,
XGE_HAL_BADCFG_TRACEBUF_TIMESTAMP = XGE_HAL_BASE_BADCFG + 84,
XGE_HAL_EOF_TRACE_BUF = -1
XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN = XGE_HAL_BASE_BADCFG + 67,
XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN = XGE_HAL_BASE_BADCFG + 68,
XGE_HAL_BADCFG_MEDIA = XGE_HAL_BASE_BADCFG + 69,
XGE_HAL_BADCFG_NO_ISR_EVENTS = XGE_HAL_BASE_BADCFG + 70,
XGE_HAL_BADCFG_RING_RTS_MAC_EN = XGE_HAL_BASE_BADCFG + 71,
XGE_HAL_BADCFG_LRO_SG_SIZE = XGE_HAL_BASE_BADCFG + 72,
XGE_HAL_BADCFG_LRO_FRM_LEN = XGE_HAL_BASE_BADCFG + 73,
XGE_HAL_BADCFG_WQE_NUM_ODS = XGE_HAL_BASE_BADCFG + 74,
XGE_HAL_BADCFG_BIMODAL_INTR = XGE_HAL_BASE_BADCFG + 75,
XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US = XGE_HAL_BASE_BADCFG + 76,
XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US = XGE_HAL_BASE_BADCFG + 77,
XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED = XGE_HAL_BASE_BADCFG + 78,
XGE_HAL_BADCFG_RTS_QOS_EN = XGE_HAL_BASE_BADCFG + 79,
XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 80,
XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR = XGE_HAL_BASE_BADCFG + 81,
XGE_HAL_BADCFG_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 82,
XGE_HAL_BADCFG_RING_RTS_PORT_EN = XGE_HAL_BASE_BADCFG + 83,
XGE_HAL_BADCFG_TRACEBUF_TIMESTAMP = XGE_HAL_BASE_BADCFG + 84,
XGE_HAL_EOF_TRACE_BUF = -1
} xge_hal_status_e;
#define XGE_HAL_ETH_ALEN 6
#define XGE_HAL_ETH_ALEN 6
typedef u8 macaddr_t[XGE_HAL_ETH_ALEN];
#define XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE 0x100
#define XGE_HAL_PCI_XFRAME_CONFIG_SPACE_SIZE 0x100
/* frames sizes */
#define XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE 14
#define XGE_HAL_HEADER_802_2_SIZE 3
#define XGE_HAL_HEADER_SNAP_SIZE 5
#define XGE_HAL_HEADER_VLAN_SIZE 4
#define XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE 14
#define XGE_HAL_HEADER_802_2_SIZE 3
#define XGE_HAL_HEADER_SNAP_SIZE 5
#define XGE_HAL_HEADER_VLAN_SIZE 4
#define XGE_HAL_MAC_HEADER_MAX_SIZE \
(XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + \
XGE_HAL_HEADER_802_2_SIZE + \
XGE_HAL_HEADER_SNAP_SIZE)
(XGE_HAL_HEADER_ETHERNET_II_802_3_SIZE + \
XGE_HAL_HEADER_802_2_SIZE + \
XGE_HAL_HEADER_SNAP_SIZE)
#define XGE_HAL_TCPIP_HEADER_MAX_SIZE (64 + 64)
#define XGE_HAL_TCPIP_HEADER_MAX_SIZE (64 + 64)
/* 32bit alignments */
#define XGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN 2
#define XGE_HAL_HEADER_802_2_SNAP_ALIGN 2
#define XGE_HAL_HEADER_802_2_ALIGN 3
#define XGE_HAL_HEADER_SNAP_ALIGN 1
#define XGE_HAL_HEADER_ETHERNET_II_802_3_ALIGN 2
#define XGE_HAL_HEADER_802_2_SNAP_ALIGN 2
#define XGE_HAL_HEADER_802_2_ALIGN 3
#define XGE_HAL_HEADER_SNAP_ALIGN 1
#define XGE_HAL_L3_CKSUM_OK 0xFFFF
#define XGE_HAL_L4_CKSUM_OK 0xFFFF
#define XGE_HAL_MIN_MTU 46
#define XGE_HAL_MAX_MTU 9600
#define XGE_HAL_DEFAULT_MTU 1500
#define XGE_HAL_L3_CKSUM_OK 0xFFFF
#define XGE_HAL_L4_CKSUM_OK 0xFFFF
#define XGE_HAL_MIN_MTU 46
#define XGE_HAL_MAX_MTU 9600
#define XGE_HAL_DEFAULT_MTU 1500
#define XGE_HAL_SEGEMENT_OFFLOAD_MAX_SIZE 81920
#define XGE_HAL_SEGEMENT_OFFLOAD_MAX_SIZE 81920
#define XGE_HAL_PCISIZE_XENA 26 /* multiples of dword */
#define XGE_HAL_PCISIZE_HERC 64 /* multiples of dword */
#define XGE_HAL_PCISIZE_XENA 26 /* multiples of dword */
#define XGE_HAL_PCISIZE_HERC 64 /* multiples of dword */
#define XGE_HAL_MAX_MSIX_MESSAGES 64
#define XGE_HAL_MAX_MSIX_MESSAGES 64
#define XGE_HAL_MAX_MSIX_MESSAGES_WITH_ADDR XGE_HAL_MAX_MSIX_MESSAGES * 2
/* Highest level interrupt blocks */
#define XGE_HAL_TX_PIC_INTR (0x0001<<0)
@ -541,17 +533,17 @@ typedef u8 macaddr_t[XGE_HAL_ETH_ALEN];
#define XGE_HAL_MC_INTR (0x0001<<10)
#define XGE_HAL_SCHED_INTR (0x0001<<11)
#define XGE_HAL_ALL_INTRS (XGE_HAL_TX_PIC_INTR | \
XGE_HAL_TX_DMA_INTR | \
XGE_HAL_TX_MAC_INTR | \
XGE_HAL_TX_XGXS_INTR | \
XGE_HAL_TX_TRAFFIC_INTR | \
XGE_HAL_RX_PIC_INTR | \
XGE_HAL_RX_DMA_INTR | \
XGE_HAL_RX_MAC_INTR | \
XGE_HAL_RX_XGXS_INTR | \
XGE_HAL_RX_TRAFFIC_INTR | \
XGE_HAL_MC_INTR | \
XGE_HAL_SCHED_INTR)
XGE_HAL_TX_DMA_INTR | \
XGE_HAL_TX_MAC_INTR | \
XGE_HAL_TX_XGXS_INTR | \
XGE_HAL_TX_TRAFFIC_INTR | \
XGE_HAL_RX_PIC_INTR | \
XGE_HAL_RX_DMA_INTR | \
XGE_HAL_RX_MAC_INTR | \
XGE_HAL_RX_XGXS_INTR | \
XGE_HAL_RX_TRAFFIC_INTR | \
XGE_HAL_MC_INTR | \
XGE_HAL_SCHED_INTR)
#define XGE_HAL_GEN_MASK_INTR (0x0001<<12)
/* Interrupt masks for the general interrupt mask register */
@ -570,7 +562,7 @@ typedef u8 macaddr_t[XGE_HAL_ETH_ALEN];
#define XGE_HAL_RXTRAFFIC_INT_M BIT(40)
/* MSI level Interrupts */
#define XGE_HAL_MAX_MSIX_VECTORS (16)
#define XGE_HAL_MAX_MSIX_VECTORS (16)
typedef struct xge_hal_ipv4 {
u32 addr;
@ -586,33 +578,22 @@ typedef union xge_hal_ipaddr_t {
}xge_hal_ipaddr_t;
/* DMA level Interrupts */
#define XGE_HAL_TXDMA_PFC_INT_M BIT(0)
#define XGE_HAL_TXDMA_PFC_INT_M BIT(0)
/* PFC block interrupts */
#define XGE_HAL_PFC_MISC_ERR_1 BIT(0) /* Interrupt to indicate FIFO
#define XGE_HAL_PFC_MISC_ERR_1 BIT(0) /* Interrupt to indicate FIFO
full */
/* basic handles */
typedef void* xge_hal_device_h;
typedef void* xge_hal_dtr_h;
typedef void* xge_hal_channel_h;
#ifdef XGEHAL_RNIC
typedef void* xge_hal_towi_h;
typedef void* xge_hal_hw_wqe_h;
typedef void* xge_hal_hw_cqe_h;
typedef void* xge_hal_lro_wqe_h;
typedef void* xge_hal_lro_cqe_h;
typedef void* xge_hal_up_msg_h;
typedef void* xge_hal_down_msg_h;
typedef void* xge_hal_channel_callback_fh;
typedef void* xge_hal_msg_queueh;
typedef void* xge_hal_pblist_h;
#endif
/*
* I2C device id. Used in I2C control register for accessing EEPROM device
* memory.
*/
#define XGE_DEV_ID 5
#define XGE_DEV_ID 5
typedef enum xge_hal_xpak_alarm_type_e {
XGE_HAL_XPAK_ALARM_EXCESS_TEMP = 1,

View File

@ -26,15 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal.h
*
* Description: Consolidated header. Upper layers should include it to
* avoid include order problems.
*
* Created: 14 May 2004
*/
#ifndef XGE_HAL_H
#define XGE_HAL_H

View File

@ -26,18 +26,12 @@
* $FreeBSD$
*/
/*
* xge-osdep.h
*
* Platform-dependent "glue" code
*/
#ifndef XGE_OSDEP_H
#define XGE_OSDEP_H
/******************************************
/**
* Includes and defines
******************************************/
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
@ -57,6 +51,9 @@
#include <sys/mutex.h>
#include <sys/types.h>
#include <sys/endian.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
#include <sys/socket.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/clock.h>
@ -73,63 +70,70 @@
#include <net/if_var.h>
#include <net/bpf.h>
#include <net/if_types.h>
#include <netinet/in_systm.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/tcp.h>
#define XGE_OS_PLATFORM_64BIT
#if BYTE_ORDER == BIG_ENDIAN
#define XGE_OS_HOST_BIG_ENDIAN 1
#define XGE_OS_HOST_BIG_ENDIAN
#elif BYTE_ORDER == LITTLE_ENDIAN
#define XGE_OS_HOST_LITTLE_ENDIAN 1
#define XGE_OS_HOST_LITTLE_ENDIAN
#endif
#define XGE_HAL_USE_5B_MODE
#ifdef XGE_TRACE_ASSERT
#undef XGE_TRACE_ASSERT
#endif
#define XGE_HAL_USE_5B_MODE 1
#define XGE_HAL_PROCESS_LINK_INT_IN_ISR 1
#define OS_NETSTACK_BUF struct mbuf *
#define XGE_LL_IP_FAST_CSUM(hdr, len) 0
#ifndef __DECONST
#define __DECONST(type, var) ((type)(uintrptr_t)(const void *)(var))
#endif
#define xge_os_ntohs ntohs
#define xge_os_ntohl ntohl
#define xge_os_htons htons
#define xge_os_htonl htonl
#ifndef __DECONST
#define __DECONST(type, var) ((type)(uintrptr_t)(const void *)(var))
#endif
typedef struct xge_bus_resource_t {
bus_space_tag_t bus_tag; /* DMA Tag */
bus_space_handle_t bus_handle; /* Bus handle */
struct resource *bar_start_addr;/* BAR start address */
} xge_bus_resource_t;
typedef struct busresources {
bus_space_tag_t bus_tag; /* DMA Tag */
bus_space_handle_t bus_handle; /* Bus handle */
struct resource *bar_start_addr;/* BAR start address */
} busresource_t;
typedef struct xge_dma_alloc_t {
bus_addr_t dma_phyaddr; /* Physical Address */
caddr_t dma_viraddr; /* Virtual Address */
bus_dma_tag_t dma_tag; /* DMA Tag */
bus_dmamap_t dma_map; /* DMA Map */
bus_dma_segment_t dma_segment; /* DMA Segment */
bus_size_t dma_size; /* Size */
int dma_nseg; /* Maximum scatter-gather segs. */
} xge_dma_alloc_t;
typedef struct xge_dma_alloc {
bus_addr_t dma_phyaddr; /* Physical Address */
caddr_t dma_viraddr; /* Virtual Address */
bus_dma_tag_t dma_tag; /* DMA Tag */
bus_dmamap_t dma_map; /* DMA Map */
bus_dma_segment_t dma_segment; /* DMA Segment */
bus_size_t dma_size; /* Size */
int dma_nseg; /* Maximum scatter-gather segs. */
} xdma;
typedef struct xge_dma_mbuf_t {
bus_addr_t dma_phyaddr; /* Physical Address */
bus_dmamap_t dma_map; /* DMA Map */
}xge_dma_mbuf_t;
struct xge_dma_mbuf {
bus_addr_t dma_phyaddr; /* Physical Address */
bus_dmamap_t dma_map; /* DMA Map */
};
typedef struct pci_info {
device_t device; /* Device */
struct resource *regmap0; /* Resource for BAR0 */
struct resource *regmap1; /* Resource for BAR1 */
void *bar0resource; /* BAR0 tag and handle */
void *bar1resource; /* BAR1 tag and handle */
} pci_info_t;
typedef struct xge_pci_info {
device_t device; /* Device */
struct resource *regmap0; /* Resource for BAR0 */
struct resource *regmap1; /* Resource for BAR1 */
void *bar0resource; /* BAR0 tag and handle */
void *bar1resource; /* BAR1 tag and handle */
} xge_pci_info_t;
/******************************************
/**
* Fixed size primitive types
******************************************/
*/
#define u8 uint8_t
#define u16 uint16_t
#define u32 uint32_t
@ -139,16 +143,16 @@ typedef struct pci_info {
#define ptrdiff_t ptrdiff_t
typedef bus_addr_t dma_addr_t;
typedef struct mtx spinlock_t;
typedef pci_info_t *pci_dev_h;
typedef busresource_t *pci_reg_h;
typedef struct xge_dma_alloc pci_dma_h;
typedef xge_pci_info_t *pci_dev_h;
typedef xge_bus_resource_t *pci_reg_h;
typedef xge_dma_alloc_t pci_dma_h;
typedef xge_dma_alloc_t pci_dma_acc_h;
typedef struct resource *pci_irq_h;
typedef pci_info_t *pci_cfg_h;
typedef struct xge_dma_alloc pci_dma_acc_h;
typedef xge_pci_info_t *pci_cfg_h;
/******************************************
/**
* "libc" functionality
******************************************/
*/
#define xge_os_memzero(addr, size) bzero(addr, size)
#define xge_os_memcpy(dst, src, size) bcopy(src, dst, size)
#define xge_os_memcmp memcmp
@ -156,79 +160,86 @@ typedef struct xge_dma_alloc pci_dma_acc_h;
#define xge_os_strlen strlen
#define xge_os_snprintf snprintf
#define xge_os_sprintf sprintf
#define xge_os_printf(fmt...) { \
printf(fmt); \
printf("\n"); \
#define xge_os_printf(fmt...) { \
printf(fmt); \
printf("\n"); \
}
#define xge_os_vaprintf(fmt) { \
sprintf(fmt, fmt, "\n"); \
va_list va; \
va_start(va, fmt); \
vprintf(fmt, va); \
va_end(va); \
#define xge_os_vaprintf(fmt) { \
sprintf(fmt, fmt, "\n"); \
va_list va; \
va_start(va, fmt); \
vprintf(fmt, va); \
va_end(va); \
}
#define xge_os_vasprintf(buf, fmt) { \
va_list va; \
va_start(va, fmt); \
(void) vaprintf(buf, fmt, va); \
va_end(va); \
#define xge_os_vasprintf(buf, fmt) { \
va_list va; \
va_start(va, fmt); \
(void) vaprintf(buf, fmt, va); \
va_end(va); \
}
#define xge_os_timestamp(buf) { \
struct timeval current_time; \
gettimeofday(&current_time, 0); \
sprintf(buf, "%08li.%08li: ", current_time.tv_sec, \
current_time.tv_usec); \
#define xge_os_timestamp(buf) { \
struct timeval current_time; \
gettimeofday(&current_time, 0); \
sprintf(buf, "%08li.%08li: ", current_time.tv_sec, \
current_time.tv_usec); \
}
#define xge_os_println xge_os_printf
/******************************************
/**
* Synchronization Primitives
******************************************/
*/
/* Initialize the spin lock */
#define xge_os_spin_lock_init(lockp, ctxh) \
if(mtx_initialized(lockp) == 0) { \
mtx_init((lockp), "xge", MTX_NETWORK_LOCK, MTX_DEF); \
}
#define xge_os_spin_lock_init(lockp, ctxh) { \
if(mtx_initialized(lockp) == 0) { \
mtx_init((lockp), "xge", NULL, MTX_DEF); \
} \
}
/* Initialize the spin lock (IRQ version) */
#define xge_os_spin_lock_init_irq(lockp, ctxh) \
if(mtx_initialized(lockp) == 0) { \
mtx_init((lockp), "xge", MTX_NETWORK_LOCK, MTX_DEF); \
}
#define xge_os_spin_lock_init_irq(lockp, ctxh) { \
if(mtx_initialized(lockp) == 0) { \
mtx_init((lockp), "xge", NULL, MTX_DEF); \
} \
}
/* Destroy the lock */
#define xge_os_spin_lock_destroy(lockp, ctxh) \
if(mtx_initialized(lockp) != 0) { \
mtx_destroy(lockp); \
}
#define xge_os_spin_lock_destroy(lockp, ctxh) { \
if(mtx_initialized(lockp) != 0) { \
mtx_destroy(lockp); \
} \
}
/* Destroy the lock (IRQ version) */
#define xge_os_spin_lock_destroy_irq(lockp, ctxh) \
if(mtx_initialized(lockp) != 0) { \
mtx_destroy(lockp); \
}
#define xge_os_spin_lock_destroy_irq(lockp, ctxh) { \
if(mtx_initialized(lockp) != 0) { \
mtx_destroy(lockp); \
} \
}
/* Acquire the lock */
#define xge_os_spin_lock(lockp) \
if(mtx_owned(lockp) == 0) mtx_lock(lockp)
#define xge_os_spin_lock(lockp) { \
if(mtx_owned(lockp) == 0) mtx_lock(lockp); \
}
/* Release the lock */
#define xge_os_spin_unlock(lockp) mtx_unlock(lockp)
#define xge_os_spin_unlock(lockp) { \
mtx_unlock(lockp); \
}
/* Acquire the lock (IRQ version) */
#define xge_os_spin_lock_irq(lockp, flags) { \
flags = MTX_QUIET; \
if(mtx_owned(lockp) == 0) mtx_lock_flags(lockp, flags); \
#define xge_os_spin_lock_irq(lockp, flags) { \
flags = MTX_QUIET; \
if(mtx_owned(lockp) == 0) mtx_lock_flags(lockp, flags); \
}
/* Release the lock (IRQ version) */
#define xge_os_spin_unlock_irq(lockp, flags) { \
flags = MTX_QUIET; \
mtx_unlock_flags(lockp, flags); \
#define xge_os_spin_unlock_irq(lockp, flags) { \
flags = MTX_QUIET; \
mtx_unlock_flags(lockp, flags); \
}
/* Write memory barrier */
@ -241,65 +252,68 @@ typedef struct xge_dma_alloc pci_dma_acc_h;
#define xge_os_mdelay(ms) DELAY(ms * 1000)
/* Compare and exchange */
//#define xge_os_cmpxchg(targetp, cmd, newval)
//#define xge_os_cmpxchg(targetp, cmd, newval)
/******************************************
/**
* Misc primitives
******************************************/
*/
#define xge_os_unlikely(x) (x)
#define xge_os_prefetch(x) (x=x)
#define xge_os_prefetchw(x) (x=x)
#define xge_os_bug(fmt...) printf(fmt...)
#define xge_os_prefetch(x) (x=x)
#define xge_os_prefetchw(x) (x=x)
#define xge_os_bug(fmt...) printf(fmt)
#define xge_os_htohs ntohs
#define xge_os_ntohl ntohl
#define xge_os_htons htons
#define xge_os_htonl htonl
/******************************************
/**
* Compiler Stuffs
******************************************/
#define __xge_os_attr_cacheline_aligned
*/
#define __xge_os_attr_cacheline_aligned
#define __xge_os_cacheline_size 32
/******************************************
/**
* Memory Primitives
******************************************/
*/
#define XGE_OS_INVALID_DMA_ADDR ((dma_addr_t)0)
/******************************************
* xge_os_malloc - Allocate non DMA-able memory.
/**
* xge_os_malloc
* Allocate non DMA-able memory.
* @pdev: Device context.
* @size: Size to allocate.
*
* Allocate @size bytes of memory. This allocation can sleep, and
* therefore, and therefore it requires process context. In other words,
* xge_os_malloc() cannot be called from the interrupt context.
* Use xge_os_free() to free the allocated block.
* Allocate @size bytes of memory. This allocation can sleep, and therefore,
* and therefore it requires process context. In other words, xge_os_malloc()
* cannot be called from the interrupt context. Use xge_os_free() to free the
* allocated block.
*
* Returns: Pointer to allocated memory, NULL - on failure.
*
* See also: xge_os_free().
******************************************/
*/
static inline void *
xge_os_malloc(pci_dev_h pdev, unsigned long size) {
void *vaddr = malloc((size), M_DEVBUF, M_NOWAIT);
xge_os_memzero(vaddr, size);
XGE_OS_MEMORY_CHECK_MALLOC(vaddr, size, file, line);
return (vaddr);
void *vaddr = malloc((size), M_DEVBUF, M_NOWAIT | M_ZERO);
if(vaddr != NULL) {
XGE_OS_MEMORY_CHECK_MALLOC(vaddr, size, __FILE__, __LINE__);
xge_os_memzero(vaddr, size);
}
return (vaddr);
}
/******************************************
* xge_os_free - Free non DMA-able memory.
/**
* xge_os_free
* Free non DMA-able memory.
* @pdev: Device context.
* @vaddr: Address of the allocated memory block.
* @size: Some OS's require to provide size on free
*
* Free the memory area obtained via xge_os_malloc().
* This call may also sleep, and therefore it cannot be used inside
* interrupt.
* Free the memory area obtained via xge_os_malloc(). This call may also sleep,
* and therefore it cannot be used inside interrupt.
*
* See also: xge_os_malloc().
******************************************/
*/
static inline void
xge_os_free(pci_dev_h pdev, const void *vaddr, unsigned long size) {
XGE_OS_MEMORY_CHECK_FREE(vaddr, size);
@ -313,64 +327,65 @@ xge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) {
return;
}
/******************************************
* xge_os_dma_malloc - Allocate DMA-able memory.
/**
* xge_os_dma_malloc
* Allocate DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @size: Size (in bytes) to allocate.
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
* XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT
* Note that the last two flags are mutually exclusive.
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED, XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT (Note that the last two flags are mutually exclusive.)
* @p_dmah: Handle used to map the memory onto the corresponding device memory
* space. See xge_os_dma_map(). The handle is an out-parameter
* returned by the function.
* @p_dma_acch: One more DMA handle used subsequently to free the
* DMA object (via xge_os_dma_free()).
* space. See xge_os_dma_map(). The handle is an out-parameter returned by the
* function.
* @p_dma_acch: One more DMA handle used subsequently to free the DMA object
* (via xge_os_dma_free()).
*
* Allocate DMA-able contiguous memory block of the specified @size.
* This memory can be subsequently freed using xge_os_dma_free().
* Allocate DMA-able contiguous memory block of the specified @size. This memory
* can be subsequently freed using xge_os_dma_free().
* Note: can be used inside interrupt context.
*
* Returns: Pointer to allocated memory(DMA-able), NULL on failure.
*
******************************************/
*/
static inline void *
xge_os_dma_malloc(pci_dev_h pdev, unsigned long size, int dma_flags,
pci_dma_h *p_dmah, pci_dma_acc_h *p_dma_acch) {
int retValue = bus_dma_tag_create(
bus_get_dma_tag(pdev->device), /* Parent */
bus_get_dma_tag(pdev->device), /* Parent */
PAGE_SIZE, /* Alignment no specific alignment */
0, /* Bounds */
BUS_SPACE_MAXADDR, /* Low Address */
BUS_SPACE_MAXADDR, /* High Address */
NULL, /* Filter */
NULL, /* Filter arg */
size, /* Max Size */
1, /* n segments */
size, /* max segment size */
BUS_DMA_ALLOCNOW, /* Flags */
NULL, /* lockfunction */
NULL, /* lock arg */
&p_dmah->dma_tag); /* DMA tag */
0, /* Bounds */
BUS_SPACE_MAXADDR, /* Low Address */
BUS_SPACE_MAXADDR, /* High Address */
NULL, /* Filter */
NULL, /* Filter arg */
size, /* Max Size */
1, /* n segments */
size, /* max segment size */
BUS_DMA_ALLOCNOW, /* Flags */
NULL, /* lockfunction */
NULL, /* lock arg */
&p_dmah->dma_tag); /* DMA tag */
if(retValue != 0) {
xge_os_printf("bus_dma_tag_create failed\n");
xge_os_printf("bus_dma_tag_create failed\n")
goto fail_1;
}
p_dmah->dma_size = size;
retValue = bus_dmamem_alloc(p_dmah->dma_tag,
(void **)&p_dmah->dma_viraddr, BUS_DMA_NOWAIT, &p_dmah->dma_map);
if(retValue != 0) {
xge_os_printf("bus_dmamem_alloc failed\n");
xge_os_printf("bus_dmamem_alloc failed\n")
goto fail_2;
}
XGE_OS_MEMORY_CHECK_MALLOC(p_dmah->dma_viraddr, p_dmah->dma_size,
__FILE__, __LINE__);
return(p_dmah->dma_viraddr);
fail_2: bus_dma_tag_destroy(p_dmah->dma_tag);
fail_1: return(NULL);
}
/******************************************
* xge_os_dma_free - Free previously allocated DMA-able memory.
/**
* xge_os_dma_free
* Free previously allocated DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @vaddr: Virtual address of the DMA-able memory.
* @p_dma_acch: DMA handle used to free the resource.
@ -379,10 +394,11 @@ fail_1: return(NULL);
* Free DMA-able memory originally allocated by xge_os_dma_malloc().
* Note: can be used inside interrupt.
* See also: xge_os_dma_malloc().
******************************************/
*/
static inline void
xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size,
pci_dma_acc_h *p_dma_acch, pci_dma_h *p_dmah) {
pci_dma_acc_h *p_dma_acch, pci_dma_h *p_dmah)
{
XGE_OS_MEMORY_CHECK_FREE(p_dmah->dma_viraddr, size);
bus_dmamem_free(p_dmah->dma_tag, p_dmah->dma_viraddr, p_dmah->dma_map);
bus_dma_tag_destroy(p_dmah->dma_tag);
@ -392,17 +408,18 @@ xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size,
return;
}
/******************************************
/**
* IO/PCI/DMA Primitives
******************************************/
*/
#define XGE_OS_DMA_DIR_TODEVICE 0
#define XGE_OS_DMA_DIR_FROMDEVICE 1
#define XGE_OS_DMA_DIR_BIDIRECTIONAL 2
/******************************************
* xge_os_pci_read8 - Read one byte from device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
/**
* xge_os_pci_read8
* Read one byte from device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform PIO and/or
* config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Address of the result.
@ -410,26 +427,28 @@ xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size,
* Read byte value from the specified @regh PCI configuration space at the
* specified offset = @where.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_read8(pdev, cfgh, where, val) \
*/
#define xge_os_pci_read8(pdev, cfgh, where, val) \
(*(val) = pci_read_config(pdev->device, where, 1))
/******************************************
* xge_os_pci_write8 - Write one byte into device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform
* PIO and/or config space IO.
/**
* xge_os_pci_write8
* Write one byte into device PCI configuration.
* @pdev: Device context. Some OSs require device context to perform PIO and/or
* config space IO.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write byte value into the specified PCI configuration space
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_write8(pdev, cfgh, where, val) \
*/
#define xge_os_pci_write8(pdev, cfgh, where, val) \
pci_write_config(pdev->device, where, val, 1)
/******************************************
* xge_os_pci_read16 - Read 16bit word from device PCI configuration.
/**
* xge_os_pci_read16
* Read 16bit word from device PCI configuration.
* @pdev: Device context.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
@ -438,26 +457,27 @@ xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size,
* Read 16bit value from the specified PCI configuration space at the
* specified offset.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_read16(pdev, cfgh, where, val) \
*/
#define xge_os_pci_read16(pdev, cfgh, where, val) \
(*(val) = pci_read_config(pdev->device, where, 2))
/******************************************
* xge_os_pci_write16 - Write 16bit word into device PCI configuration.
/**
* xge_os_pci_write16
* Write 16bit word into device PCI configuration.
* @pdev: Device context.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write 16bit value into the specified @offset in PCI
* configuration space.
* Write 16bit value into the specified @offset in PCI configuration space.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_write16(pdev, cfgh, where, val) \
*/
#define xge_os_pci_write16(pdev, cfgh, where, val) \
pci_write_config(pdev->device, where, val, 2)
/******************************************
* xge_os_pci_read32 - Read 32bit word from device PCI configuration.
/**
* xge_os_pci_read32
* Read 32bit word from device PCI configuration.
* @pdev: Device context.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
@ -466,165 +486,176 @@ xge_os_dma_free(pci_dev_h pdev, const void *vaddr, int size,
* Read 32bit value from the specified PCI configuration space at the
* specified offset.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_read32(pdev, cfgh, where, val) \
*/
#define xge_os_pci_read32(pdev, cfgh, where, val) \
(*(val) = pci_read_config(pdev->device, where, 4))
/******************************************
* xge_os_pci_write32 - Write 32bit word into device PCI configuration.
/**
* xge_os_pci_write32
* Write 32bit word into device PCI configuration.
* @pdev: Device context.
* @cfgh: PCI configuration space handle.
* @where: Offset in the PCI configuration space.
* @val: Value to write.
*
* Write 32bit value into the specified @offset in PCI
* configuration space.
* Write 32bit value into the specified @offset in PCI configuration space.
* Returns: 0 - success, non-zero - failure.
******************************************/
#define xge_os_pci_write32(pdev, cfgh, where, val) \
*/
#define xge_os_pci_write32(pdev, cfgh, where, val) \
pci_write_config(pdev->device, where, val, 4)
/******************************************
* xge_os_pio_mem_read8 - Read 1 byte from device memory mapped space.
/**
* xge_os_pio_mem_read8
* Read 1 byte from device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 1 byte value read from the specified (mapped) memory space address.
******************************************/
*/
static inline u8
xge_os_pio_mem_read8(pci_dev_h pdev, pci_reg_h regh, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
(bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
(bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)
(((xge_bus_resource_t *)(regh))->bar_start_addr);
return bus_space_read_1(tag, handle, (caddr_t)(addr) - addrss);
}
/******************************************
* xge_os_pio_mem_write8 - Write 1 byte into device memory mapped
* space.
/**
* xge_os_pio_mem_write8
* Write 1 byte into device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write byte value into the specified (mapped) device memory space.
******************************************/
*/
static inline void
xge_os_pio_mem_write8(pci_dev_h pdev, pci_reg_h regh, u8 val, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
(bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
(bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)
(((xge_bus_resource_t *)(regh))->bar_start_addr);
bus_space_write_1(tag, handle, (caddr_t)(addr) - addrss, val);
}
/******************************************
* xge_os_pio_mem_read16 - Read 16bit from device memory mapped space.
/**
* xge_os_pio_mem_read16
* Read 16bit from device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 16bit value read from the specified (mapped) memory space address.
******************************************/
*/
static inline u16
xge_os_pio_mem_read16(pci_dev_h pdev, pci_reg_h regh, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
(bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
(bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)
(((xge_bus_resource_t *)(regh))->bar_start_addr);
return bus_space_read_2(tag, handle, (caddr_t)(addr) - addrss);
}
/******************************************
* xge_os_pio_mem_write16 - Write 16bit into device memory mapped space.
/**
* xge_os_pio_mem_write16
* Write 16bit into device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 16bit value into the specified (mapped) device memory space.
******************************************/
*/
static inline void
xge_os_pio_mem_write16(pci_dev_h pdev, pci_reg_h regh, u16 val, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
(bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
(bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((xge_bus_resource_t *)(regh))->bar_start_addr);
bus_space_write_2(tag, handle, (caddr_t)(addr) - addrss, val);
}
/******************************************
* xge_os_pio_mem_read32 - Read 32bit from device memory mapped space.
/**
* xge_os_pio_mem_read32
* Read 32bit from device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 32bit value read from the specified (mapped) memory space address.
******************************************/
*/
static inline u32
xge_os_pio_mem_read32(pci_dev_h pdev, pci_reg_h regh, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
(bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
(bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)
(((xge_bus_resource_t *)(regh))->bar_start_addr);
return bus_space_read_4(tag, handle, (caddr_t)(addr) - addrss);
}
/******************************************
* xge_os_pio_mem_write32 - Write 32bit into device memory space.
/**
* xge_os_pio_mem_write32
* Write 32bit into device memory space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 32bit value into the specified (mapped) device memory space.
******************************************/
*/
static inline void
xge_os_pio_mem_write32(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr)
{
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
(bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
(bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((xge_bus_resource_t *)(regh))->bar_start_addr);
bus_space_write_4(tag, handle, (caddr_t)(addr) - addrss, val);
}
/******************************************
* xge_os_pio_mem_read64 - Read 64bit from device memory mapped space.
/**
* xge_os_pio_mem_read64
* Read 64bit from device memory mapped space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @addr: Address in device memory space.
*
* Returns: 64bit value read from the specified (mapped) memory space address.
******************************************/
*/
static inline u64
xge_os_pio_mem_read64(pci_dev_h pdev, pci_reg_h regh, void *addr)
{
u64 value1, value2;
bus_space_tag_t tag =
(bus_space_tag_t)(((busresource_t *)regh)->bus_tag);
(bus_space_tag_t)(((xge_bus_resource_t *)regh)->bus_tag);
bus_space_handle_t handle =
(bus_space_handle_t)(((busresource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)(((busresource_t *)(regh))->bar_start_addr);
(bus_space_handle_t)(((xge_bus_resource_t *)regh)->bus_handle);
caddr_t addrss = (caddr_t)
(((xge_bus_resource_t *)(regh))->bar_start_addr);
value1 = bus_space_read_4(tag, handle, (caddr_t)(addr) + 4 - addrss);
value1 <<= 32;
@ -633,15 +664,16 @@ xge_os_pio_mem_read64(pci_dev_h pdev, pci_reg_h regh, void *addr)
return value1;
}
/******************************************
* xge_os_pio_mem_write64 - Write 32bit into device memory space.
/**
* xge_os_pio_mem_write64
* Write 32bit into device memory space.
* @pdev: Device context.
* @regh: PCI configuration space handle.
* @val: Value to write.
* @addr: Address in device memory space.
*
* Write 64bit value into the specified (mapped) device memory space.
******************************************/
*/
static inline void
xge_os_pio_mem_write64(pci_dev_h pdev, pci_reg_h regh, u64 val, void *addr)
{
@ -650,33 +682,29 @@ xge_os_pio_mem_write64(pci_dev_h pdev, pci_reg_h regh, u64 val, void *addr)
xge_os_pio_mem_write32(pdev, regh, val >> 32, ((caddr_t)(addr) + 4));
}
/******************************************
/**
* FIXME: document
******************************************/
*/
#define xge_os_flush_bridge xge_os_pio_mem_read64
/******************************************
* xge_os_dma_map - Map DMA-able memory block to, or from, or
* to-and-from device.
/**
* xge_os_dma_map
* Map DMA-able memory block to, or from, or to-and-from device.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
* xge_os_dma_malloc().
* @vaddr: Virtual address of the DMA-able memory.
* @size: Size (in bytes) to be mapped.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED,
* XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT
* Note that the last two flags are mutually exclusive.
* @dma_flags: XGE_OS_DMA_CACHELINE_ALIGNED, XGE_OS_DMA_STREAMING,
* XGE_OS_DMA_CONSISTENT (Note that the last two flags are mutually exclusive).
*
* Map a single memory block.
*
* Returns: DMA address of the memory block,
* XGE_OS_INVALID_DMA_ADDR on failure.
* Returns: DMA address of the memory block, XGE_OS_INVALID_DMA_ADDR on failure.
*
* See also: xge_os_dma_malloc(), xge_os_dma_unmap(),
* xge_os_dma_sync().
******************************************/
* See also: xge_os_dma_malloc(), xge_os_dma_unmap(), xge_os_dma_sync().
*/
static inline dma_addr_t
xge_os_dma_map(pci_dev_h pdev, pci_dma_h dmah, void *vaddr, size_t size,
int dir, int dma_flags)
@ -685,14 +713,14 @@ xge_os_dma_map(pci_dev_h pdev, pci_dma_h dmah, void *vaddr, size_t size,
bus_dmamap_load(dmah.dma_tag, dmah.dma_map, dmah.dma_viraddr,
dmah.dma_size, xge_dmamap_cb, &dmah.dma_phyaddr, BUS_DMA_NOWAIT);
if(retValue != 0) {
xge_os_printf("bus_dmamap_load_ failed\n");
xge_os_printf("bus_dmamap_load_ failed\n")
return XGE_OS_INVALID_DMA_ADDR;
}
dmah.dma_size = size;
return dmah.dma_phyaddr;
}
/******************************************
/**
* xge_os_dma_unmap - Unmap DMA-able memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
@ -701,10 +729,10 @@ xge_os_dma_map(pci_dev_h pdev, pci_dma_h dmah, void *vaddr, size_t size,
* @size: Size (in bytes) to be unmapped.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
*
* Unmap a single DMA-able memory block that was previously mapped
* using xge_os_dma_map().
* Unmap a single DMA-able memory block that was previously mapped using
* xge_os_dma_map().
* See also: xge_os_dma_malloc(), xge_os_dma_map().
******************************************/
*/
static inline void
xge_os_dma_unmap(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
size_t size, int dir)
@ -713,7 +741,7 @@ xge_os_dma_unmap(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
return;
}
/******************************************
/**
* xge_os_dma_sync - Synchronize mapped memory.
* @pdev: Device context. Used to allocate/pin/map/unmap DMA-able memory.
* @dmah: DMA handle used to map the memory block. Obtained via
@ -723,16 +751,14 @@ xge_os_dma_unmap(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
* @length: Size of the block.
* @dir: Direction of this operation (XGE_OS_DMA_DIR_TODEVICE, etc.)
*
* Make physical and CPU memory consistent for a single
* streaming mode DMA translation.
* This API compiles to NOP on cache-coherent platforms.
* On non cache-coherent platforms, depending on the direction
* of the "sync" operation, this API will effectively
* either invalidate CPU cache (that might contain old data),
* or flush CPU cache to update physical memory.
* Make physical and CPU memory consistent for a single streaming mode DMA
* translation. This API compiles to NOP on cache-coherent platforms. On
* non cache-coherent platforms, depending on the direction of the "sync"
* operation, this API will effectively either invalidate CPU cache (that might
* contain old data), or flush CPU cache to update physical memory.
* See also: xge_os_dma_malloc(), xge_os_dma_map(),
* xge_os_dma_unmap().
******************************************/
*/
static inline void
xge_os_dma_sync(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
u64 dma_offset, size_t length, int dir)
@ -747,7 +773,7 @@ xge_os_dma_sync(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
syncop = BUS_DMASYNC_PREREAD | BUS_DMASYNC_POSTREAD;
break;
case XGE_OS_DMA_DIR_BIDIRECTIONAL:
default:
syncop = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREWRITE;
break;
}
@ -756,3 +782,4 @@ xge_os_dma_sync(pci_dev_h pdev, pci_dma_h dmah, dma_addr_t dma_addr,
}
#endif /* XGE_OSDEP_H */

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xge-queue.c
*
* Description: serialized event queue
*
* Created: 7 June 2004
*/
#include <dev/nxge/include/xge-queue.h>
/**
@ -63,65 +55,65 @@ __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
xge_queue_item_t *elem;
if (xge_list_is_empty(&queue->list_head))
return XGE_QUEUE_IS_EMPTY;
return XGE_QUEUE_IS_EMPTY;
elem = (xge_queue_item_t *)queue->list_head.next;
if (elem->data_size > data_max_size)
return XGE_QUEUE_NOT_ENOUGH_SPACE;
return XGE_QUEUE_NOT_ENOUGH_SPACE;
xge_list_remove(&elem->item);
real_size = elem->data_size + sizeof(xge_queue_item_t);
if (queue->head_ptr == elem) {
queue->head_ptr = (char *)queue->head_ptr + real_size;
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the head: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
queue->head_ptr = (char *)queue->head_ptr + real_size;
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the head: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
} else if ((char *)queue->tail_ptr - real_size == (char*)elem) {
queue->tail_ptr = (char *)queue->tail_ptr - real_size;
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the tail: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
queue->tail_ptr = (char *)queue->tail_ptr - real_size;
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the tail: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
} else {
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the list: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
xge_debug_queue(XGE_TRACE,
"event_type: %d removing from the list: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
elem->event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
}
xge_assert(queue->tail_ptr >= queue->head_ptr);
xge_assert(queue->tail_ptr >= queue->start_ptr &&
queue->tail_ptr <= queue->end_ptr);
queue->tail_ptr <= queue->end_ptr);
xge_assert(queue->head_ptr >= queue->start_ptr &&
queue->head_ptr < queue->end_ptr);
queue->head_ptr < queue->end_ptr);
xge_os_memcpy(item, elem, sizeof(xge_queue_item_t));
xge_os_memcpy(xge_queue_item_data(item), xge_queue_item_data(elem),
elem->data_size);
elem->data_size);
if (xge_list_is_empty(&queue->list_head)) {
/* reset buffer pointers just to be clean */
queue->head_ptr = queue->tail_ptr = queue->start_ptr;
/* reset buffer pointers just to be clean */
queue->head_ptr = queue->tail_ptr = queue->start_ptr;
}
return XGE_QUEUE_OK;
}
@ -150,7 +142,7 @@ __queue_consume(xge_queue_t *queue, int data_max_size, xge_queue_item_t *item)
*/
xge_queue_status_e
xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
int is_critical, const int data_size, void *data)
int is_critical, const int data_size, void *data)
{
xge_queue_t *queue = (xge_queue_t *)queueh;
int real_size = data_size + sizeof(xge_queue_item_t);
@ -162,76 +154,76 @@ xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
xge_os_spin_lock_irq(&queue->lock, flags);
if (is_critical && !queue->has_critical_event) {
unsigned char item_buf[sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
unsigned char item_buf[sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
while (__queue_consume(queue,
XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
item) != XGE_QUEUE_IS_EMPTY)
; /* do nothing */
XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
item) != XGE_QUEUE_IS_EMPTY)
; /* do nothing */
}
try_again:
if ((char *)queue->tail_ptr + real_size <= (char *)queue->end_ptr) {
elem = (xge_queue_item_t *) queue->tail_ptr;
queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
xge_debug_queue(XGE_TRACE,
"event_type: %d adding to the tail: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
elem = (xge_queue_item_t *) queue->tail_ptr;
queue->tail_ptr = (void *)((char *)queue->tail_ptr + real_size);
xge_debug_queue(XGE_TRACE,
"event_type: %d adding to the tail: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" elem 0x"XGE_OS_LLXFMT" length %d",
event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
(u64)(ulong_t)elem,
real_size);
} else if ((char *)queue->head_ptr - real_size >=
(char *)queue->start_ptr) {
elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
queue->head_ptr = elem;
xge_debug_queue(XGE_TRACE,
"event_type: %d adding to the head: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" length %d",
event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
real_size);
(char *)queue->start_ptr) {
elem = (xge_queue_item_t *) ((char *)queue->head_ptr - real_size);
queue->head_ptr = elem;
xge_debug_queue(XGE_TRACE,
"event_type: %d adding to the head: "
"0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT":0x"XGE_OS_LLXFMT
":0x"XGE_OS_LLXFMT" length %d",
event_type,
(u64)(ulong_t)queue->start_ptr,
(u64)(ulong_t)queue->head_ptr,
(u64)(ulong_t)queue->tail_ptr,
(u64)(ulong_t)queue->end_ptr,
real_size);
} else {
xge_queue_status_e status;
xge_queue_status_e status;
if (queue->pages_current >= queue->pages_max) {
xge_os_spin_unlock_irq(&queue->lock, flags);
return XGE_QUEUE_IS_FULL;
}
if (queue->pages_current >= queue->pages_max) {
xge_os_spin_unlock_irq(&queue->lock, flags);
return XGE_QUEUE_IS_FULL;
}
if (queue->has_critical_event) {
xge_os_spin_unlock_irq(&queue->lock, flags);
return XGE_QUEUE_IS_FULL;
}
if (queue->has_critical_event) {
xge_os_spin_unlock_irq(&queue->lock, flags);
return XGE_QUEUE_IS_FULL;
}
/* grow */
status = __io_queue_grow(queueh);
if (status != XGE_QUEUE_OK) {
xge_os_spin_unlock_irq(&queue->lock, flags);
return status;
}
/* grow */
status = __io_queue_grow(queueh);
if (status != XGE_QUEUE_OK) {
xge_os_spin_unlock_irq(&queue->lock, flags);
return status;
}
goto try_again;
goto try_again;
}
xge_assert(queue->tail_ptr >= queue->head_ptr);
xge_assert(queue->tail_ptr >= queue->start_ptr &&
queue->tail_ptr <= queue->end_ptr);
queue->tail_ptr <= queue->end_ptr);
xge_assert(queue->head_ptr >= queue->start_ptr &&
queue->head_ptr < queue->end_ptr);
queue->head_ptr < queue->end_ptr);
elem->data_size = data_size;
elem->event_type = (xge_hal_event_e) event_type;
elem->event_type = (xge_hal_event_e) event_type;
elem->is_critical = is_critical;
if (is_critical)
queue->has_critical_event = 1;
@ -267,12 +259,12 @@ xge_queue_produce(xge_queue_h queueh, int event_type, void *context,
*/
xge_queue_h
xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
int pages_max, xge_queued_f queued, void *queued_data)
int pages_max, xge_queued_f queued, void *queued_data)
{
xge_queue_t *queue;
if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
return NULL;
if ((queue = (xge_queue_t *) xge_os_malloc(pdev, sizeof(xge_queue_t))) == NULL)
return NULL;
queue->queued_func = queued;
queue->queued_data = queued_data;
@ -282,12 +274,12 @@ xge_queue_create(pci_dev_h pdev, pci_irq_h irqh, int pages_initial,
queue->start_ptr = xge_os_malloc(pdev, queue->pages_current *
XGE_QUEUE_BUF_SIZE);
if (queue->start_ptr == NULL) {
xge_os_free(pdev, queue, sizeof(xge_queue_t));
return NULL;
xge_os_free(pdev, queue, sizeof(xge_queue_t));
return NULL;
}
queue->head_ptr = queue->tail_ptr = queue->start_ptr;
queue->end_ptr = (char *)queue->start_ptr +
queue->pages_current * XGE_QUEUE_BUF_SIZE;
queue->pages_current * XGE_QUEUE_BUF_SIZE;
xge_os_spin_lock_init_irq(&queue->lock, irqh);
queue->pages_initial = pages_initial;
queue->pages_max = pages_max;
@ -309,8 +301,8 @@ void xge_queue_destroy(xge_queue_h queueh)
xge_queue_t *queue = (xge_queue_t *)queueh;
xge_os_spin_lock_destroy_irq(&queue->lock, queue->irqh);
if (!xge_list_is_empty(&queue->list_head)) {
xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
XGE_OS_LLXFMT, (u64)(ulong_t)queue);
xge_debug_queue(XGE_ERR, "destroying non-empty queue 0x"
XGE_OS_LLXFMT, (u64)(ulong_t)queue);
}
xge_os_free(queue->pdev, queue->start_ptr, queue->pages_current *
XGE_QUEUE_BUF_SIZE);
@ -339,12 +331,12 @@ __io_queue_grow(xge_queue_h queueh)
xge_queue_item_t *elem;
xge_debug_queue(XGE_TRACE, "queue 0x"XGE_OS_LLXFMT":%d is growing",
(u64)(ulong_t)queue, queue->pages_current);
(u64)(ulong_t)queue, queue->pages_current);
newbuf = xge_os_malloc(queue->pdev,
(queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE);
if (newbuf == NULL)
return XGE_QUEUE_OUT_OF_MEMORY;
return XGE_QUEUE_OUT_OF_MEMORY;
xge_os_memcpy(newbuf, queue->start_ptr,
queue->pages_current * XGE_QUEUE_BUF_SIZE);
@ -353,32 +345,32 @@ __io_queue_grow(xge_queue_h queueh)
/* adjust queue sizes */
queue->start_ptr = newbuf;
queue->end_ptr = (char *)newbuf +
(queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
(queue->pages_current + 1) * XGE_QUEUE_BUF_SIZE;
queue->tail_ptr = (char *)newbuf + ((char *)queue->tail_ptr -
(char *)oldbuf);
(char *)oldbuf);
queue->head_ptr = (char *)newbuf + ((char *)queue->head_ptr -
(char *)oldbuf);
(char *)oldbuf);
xge_assert(!xge_list_is_empty(&queue->list_head));
queue->list_head.next = (xge_list_t *) (void *)((char *)newbuf +
((char *)queue->list_head.next - (char *)oldbuf));
((char *)queue->list_head.next - (char *)oldbuf));
queue->list_head.prev = (xge_list_t *) (void *)((char *)newbuf +
((char *)queue->list_head.prev - (char *)oldbuf));
((char *)queue->list_head.prev - (char *)oldbuf));
/* adjust queue list */
xge_list_for_each(item, &queue->list_head) {
elem = xge_container_of(item, xge_queue_item_t, item);
if (elem->item.next != &queue->list_head) {
elem->item.next =
(xge_list_t*)(void *)((char *)newbuf +
((char *)elem->item.next - (char *)oldbuf));
}
if (elem->item.prev != &queue->list_head) {
elem->item.prev =
(xge_list_t*) (void *)((char *)newbuf +
((char *)elem->item.prev - (char *)oldbuf));
}
elem = xge_container_of(item, xge_queue_item_t, item);
if (elem->item.next != &queue->list_head) {
elem->item.next =
(xge_list_t*)(void *)((char *)newbuf +
((char *)elem->item.next - (char *)oldbuf));
}
if (elem->item.prev != &queue->list_head) {
elem->item.prev =
(xge_list_t*) (void *)((char *)newbuf +
((char *)elem->item.prev - (char *)oldbuf));
}
}
xge_os_free(queue->pdev, oldbuf,
queue->pages_current * XGE_QUEUE_BUF_SIZE);
queue->pages_current * XGE_QUEUE_BUF_SIZE);
queue->pages_current++;
return XGE_QUEUE_OK;
@ -426,18 +418,18 @@ xge_queue_consume(xge_queue_h queueh, int data_max_size, xge_queue_item_t *item)
void xge_queue_flush(xge_queue_h queueh)
{
unsigned char item_buf[sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
/* flush queue by consuming all enqueued items */
while (xge_queue_consume(queueh,
XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
item) != XGE_QUEUE_IS_EMPTY) {
/* do nothing */
xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
item, item->event_type);
XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
item) != XGE_QUEUE_IS_EMPTY) {
/* do nothing */
xge_debug_queue(XGE_TRACE, "item "XGE_OS_LLXFMT"(%d) flushed",
item, item->event_type);
}
(void) __queue_get_reset_critical (queueh);
}
@ -456,5 +448,5 @@ int __queue_get_reset_critical (xge_queue_h qh) {
int c = queue->has_critical_event;
queue->has_critical_event = 0;
return c;
return c;
}

View File

@ -26,159 +26,154 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-channel-fp.c
*
* Description: HAL channel object functionality (fast path)
*
* Created: 10 June 2004
*/
#ifdef XGE_DEBUG_FP
#include <dev/nxge/include/xgehal-channel.h>
#endif
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL xge_hal_status_e
__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
__hal_channel_dtr_alloc(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
{
void **tmp_arr;
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
unsigned long flags = 0;
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
unsigned long flags = 0;
#endif
if (channel->reserve_length - channel->reserve_top >
channel->reserve_threshold) {
_alloc_after_swap:
*dtrh = channel->reserve_arr[--channel->reserve_length];
xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" allocated, "
"channel %d:%d:%d, reserve_idx %d",
(unsigned long long)(ulong_t)*dtrh,
channel->type, channel->post_qid,
channel->compl_qid, channel->reserve_length);
return XGE_HAL_OK;
if (channel->terminating) {
return XGE_HAL_FAIL;
}
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
if (channel->reserve_length - channel->reserve_top >
channel->reserve_threshold) {
_alloc_after_swap:
*dtrh = channel->reserve_arr[--channel->reserve_length];
xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" allocated, "
"channel %d:%d:%d, reserve_idx %d",
(unsigned long long)(ulong_t)*dtrh,
channel->type, channel->post_qid,
channel->compl_qid, channel->reserve_length);
return XGE_HAL_OK;
}
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_lock_irq(&channel->free_lock, flags);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_lock(&channel->free_lock);
#endif
/* switch between empty and full arrays */
/* switch between empty and full arrays */
/* the idea behind such a design is that by having free and reserved
* arrays separated we basically separated irq and non-irq parts.
* i.e. no additional lock need to be done when we free a resource */
/* the idea behind such a design is that by having free and reserved
* arrays separated we basically separated irq and non-irq parts.
* i.e. no additional lock need to be done when we free a resource */
if (channel->reserve_initial - channel->free_length >
channel->reserve_threshold) {
if (channel->reserve_initial - channel->free_length >
channel->reserve_threshold) {
tmp_arr = channel->reserve_arr;
channel->reserve_arr = channel->free_arr;
channel->reserve_length = channel->reserve_initial;
channel->free_arr = tmp_arr;
channel->reserve_top = channel->free_length;
channel->free_length = channel->reserve_initial;
tmp_arr = channel->reserve_arr;
channel->reserve_arr = channel->free_arr;
channel->reserve_length = channel->reserve_initial;
channel->free_arr = tmp_arr;
channel->reserve_top = channel->free_length;
channel->free_length = channel->reserve_initial;
channel->stats.reserve_free_swaps_cnt++;
channel->stats.reserve_free_swaps_cnt++;
xge_debug_channel(XGE_TRACE,
"switch on channel %d:%d:%d, reserve_length %d, "
"free_length %d", channel->type, channel->post_qid,
channel->compl_qid, channel->reserve_length,
channel->free_length);
xge_debug_channel(XGE_TRACE,
"switch on channel %d:%d:%d, reserve_length %d, "
"free_length %d", channel->type, channel->post_qid,
channel->compl_qid, channel->reserve_length,
channel->free_length);
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_unlock_irq(&channel->free_lock, flags);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_unlock(&channel->free_lock);
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_unlock_irq(&channel->free_lock, flags);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_unlock(&channel->free_lock);
#endif
goto _alloc_after_swap;
goto _alloc_after_swap;
}
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_unlock_irq(&channel->free_lock, flags);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
xge_os_spin_unlock_irq(&channel->free_lock, flags);
#elif defined(XGE_HAL_RX_MULTI_FREE) || defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_unlock(&channel->free_lock);
#endif
xge_debug_channel(XGE_TRACE, "channel %d:%d:%d is empty!",
channel->type, channel->post_qid,
channel->compl_qid);
channel->type, channel->post_qid,
channel->compl_qid);
channel->stats.full_cnt++;
*dtrh = NULL;
*dtrh = NULL;
return XGE_HAL_INF_OUT_OF_DESCRIPTORS;
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int offset)
__hal_channel_dtr_restore(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int offset)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
/* restore a previously allocated dtrh at current offset and update
* the available reserve length accordingly. If dtrh is null just
/* restore a previously allocated dtrh at current offset and update
* the available reserve length accordingly. If dtrh is null just
* update the reserve length, only */
if (dtrh) {
channel->reserve_arr[channel->reserve_length + offset] = dtrh;
xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" restored for "
"channel %d:%d:%d, offset %d at reserve index %d, ",
(unsigned long long)(ulong_t)dtrh, channel->type,
channel->post_qid, channel->compl_qid, offset,
channel->reserve_length + offset);
channel->reserve_arr[channel->reserve_length + offset] = dtrh;
xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" restored for "
"channel %d:%d:%d, offset %d at reserve index %d, ",
(unsigned long long)(ulong_t)dtrh, channel->type,
channel->post_qid, channel->compl_qid, offset,
channel->reserve_length + offset);
}
else {
channel->reserve_length += offset;
xge_debug_channel(XGE_TRACE, "channel %d:%d:%d, restored "
"for offset %d, new reserve_length %d, free length %d",
channel->type, channel->post_qid, channel->compl_qid,
offset, channel->reserve_length, channel->free_length);
channel->reserve_length += offset;
xge_debug_channel(XGE_TRACE, "channel %d:%d:%d, restored "
"for offset %d, new reserve_length %d, free length %d",
channel->type, channel->post_qid, channel->compl_qid,
offset, channel->reserve_length, channel->free_length);
}
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t*)channelh;
xge_hal_channel_t *channel = (xge_hal_channel_t*)channelh;
xge_assert(channel->work_arr[channel->post_index] == NULL);
channel->work_arr[channel->post_index++] = dtrh;
/* wrap-around */
if (channel->post_index == channel->length)
channel->post_index = 0;
/* wrap-around */
if (channel->post_index == channel->length)
channel->post_index = 0;
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_try_complete(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_assert(channel->work_arr);
xge_assert(channel->compl_index < channel->length);
xge_assert(channel->compl_index < channel->length);
*dtrh = channel->work_arr[channel->compl_index];
*dtrh = channel->work_arr[channel->compl_index];
}
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_complete(xge_hal_channel_h channelh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
channel->work_arr[channel->compl_index] = NULL;
channel->work_arr[channel->compl_index] = NULL;
/* wrap-around */
if (++channel->compl_index == channel->length)
channel->compl_index = 0;
channel->compl_index = 0;
channel->stats.total_compl_cnt++;
}
@ -186,15 +181,15 @@ __hal_channel_dtr_complete(xge_hal_channel_h channelh)
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void
__hal_channel_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
channel->free_arr[--channel->free_length] = dtrh;
channel->free_arr[--channel->free_length] = dtrh;
xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" freed, "
"channel %d:%d:%d, new free_length %d",
(unsigned long long)(ulong_t)dtrh,
channel->type, channel->post_qid,
channel->compl_qid, channel->free_length);
xge_debug_channel(XGE_TRACE, "dtrh 0x"XGE_OS_LLXFMT" freed, "
"channel %d:%d:%d, new free_length %d",
(unsigned long long)(ulong_t)dtrh,
channel->type, channel->post_qid,
channel->compl_qid, channel->free_length);
}
/**
@ -210,88 +205,88 @@ xge_hal_channel_dtr_count(xge_hal_channel_h channelh)
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
return ((channel->reserve_length - channel->reserve_top) +
(channel->reserve_initial - channel->free_length) -
channel->reserve_threshold);
(channel->reserve_initial - channel->free_length) -
channel->reserve_threshold);
}
/**
* xge_hal_channel_userdata - Get user-specified channel context.
* xge_hal_channel_userdata - Get user-specified channel context.
* @channelh: Channel handle. Obtained via xge_hal_channel_open().
*
* Returns: per-channel "user data", which can be any ULD-defined context.
* The %userdata "gets" into the channel at open time
* (see xge_hal_channel_open()).
* Returns: per-channel "user data", which can be any ULD-defined context.
* The %userdata "gets" into the channel at open time
* (see xge_hal_channel_open()).
*
* See also: xge_hal_channel_open().
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL void*
xge_hal_channel_userdata(xge_hal_channel_h channelh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
return channel->userdata;
}
/**
* xge_hal_channel_id - Get channel ID.
* xge_hal_channel_id - Get channel ID.
* @channelh: Channel handle. Obtained via xge_hal_channel_open().
*
* Returns: channel ID. For link layer channel id is the number
* in the range from 0 to 7 that identifies hardware ring or fifo,
* depending on the channel type.
* Returns: channel ID. For link layer channel id is the number
* in the range from 0 to 7 that identifies hardware ring or fifo,
* depending on the channel type.
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_channel_id(xge_hal_channel_h channelh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
return channel->post_qid;
}
/**
* xge_hal_check_alignment - Check buffer alignment and calculate the
* "misaligned" portion.
* @dma_pointer: DMA address of the buffer.
* xge_hal_check_alignment - Check buffer alignment and calculate the
* "misaligned" portion.
* @dma_pointer: DMA address of the buffer.
* @size: Buffer size, in bytes.
* @alignment: Alignment "granularity" (see below), in bytes.
* @copy_size: Maximum number of bytes to "extract" from the buffer
* (in order to spost it as a separate scatter-gather entry). See below.
* @alignment: Alignment "granularity" (see below), in bytes.
* @copy_size: Maximum number of bytes to "extract" from the buffer
* (in order to spost it as a separate scatter-gather entry). See below.
*
* Check buffer alignment and calculate "misaligned" portion, if exists.
* The buffer is considered aligned if its address is multiple of
* the specified @alignment. If this is the case,
* Check buffer alignment and calculate "misaligned" portion, if exists.
* The buffer is considered aligned if its address is multiple of
* the specified @alignment. If this is the case,
* xge_hal_check_alignment() returns zero.
* Otherwise, xge_hal_check_alignment() uses the last argument,
* Otherwise, xge_hal_check_alignment() uses the last argument,
* @copy_size,
* to calculate the size to "extract" from the buffer. The @copy_size
* may or may not be equal @alignment. The difference between these two
* arguments is that the @alignment is used to make the decision: aligned
* or not aligned. While the @copy_size is used to calculate the portion
* of the buffer to "extract", i.e. to post as a separate entry in the
* transmit descriptor. For example, the combination
* @alignment=8 and @copy_size=64 will work okay on AMD Opteron boxes.
* to calculate the size to "extract" from the buffer. The @copy_size
* may or may not be equal @alignment. The difference between these two
* arguments is that the @alignment is used to make the decision: aligned
* or not aligned. While the @copy_size is used to calculate the portion
* of the buffer to "extract", i.e. to post as a separate entry in the
* transmit descriptor. For example, the combination
* @alignment=8 and @copy_size=64 will work okay on AMD Opteron boxes.
*
* Note: @copy_size should be a multiple of @alignment. In many practical
* cases @copy_size and @alignment will probably be equal.
* Note: @copy_size should be a multiple of @alignment. In many practical
* cases @copy_size and @alignment will probably be equal.
*
* See also: xge_hal_fifo_dtr_buffer_set_aligned().
*/
__HAL_STATIC_CHANNEL __HAL_INLINE_CHANNEL int
xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
int copy_size)
xge_hal_check_alignment(dma_addr_t dma_pointer, int size, int alignment,
int copy_size)
{
int misaligned_size;
int misaligned_size;
misaligned_size = (int)(dma_pointer & (alignment - 1));
misaligned_size = (int)(dma_pointer & (alignment - 1));
if (!misaligned_size) {
return 0;
return 0;
}
if (size > copy_size) {
misaligned_size = (int)(dma_pointer & (copy_size - 1));
misaligned_size = copy_size - misaligned_size;
misaligned_size = (int)(dma_pointer & (copy_size - 1));
misaligned_size = copy_size - misaligned_size;
} else {
misaligned_size = size;
misaligned_size = size;
}
return misaligned_size;

View File

@ -26,23 +26,11 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-channel.c
*
* Description: chipset channel abstraction
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xgehal-channel.h>
#include <dev/nxge/include/xgehal-fifo.h>
#include <dev/nxge/include/xgehal-ring.h>
#include <dev/nxge/include/xgehal-device.h>
#include <dev/nxge/include/xgehal-regs.h>
#ifdef XGEHAL_RNIC
#include <dev/nxge/include/xgehal-types.h>
#include "xgehal-iov.h"
#endif
/*
* __hal_channel_dtr_next_reservelist
@ -51,12 +39,12 @@
*/
static xge_hal_status_e
__hal_channel_dtr_next_reservelist(xge_hal_channel_h channelh,
xge_hal_dtr_h *dtrh)
xge_hal_dtr_h *dtrh)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
if (channel->reserve_top >= channel->reserve_length) {
return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS;
return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS;
}
*dtrh = channel->reserve_arr[channel->reserve_top++];
@ -75,7 +63,7 @@ __hal_channel_dtr_next_freelist(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
if (channel->reserve_initial == channel->free_length) {
return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS;
return XGE_HAL_INF_NO_MORE_FREED_DESCRIPTORS;
}
*dtrh = channel->free_arr[channel->free_length++];
@ -91,21 +79,16 @@ __hal_channel_dtr_next_freelist(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
*/
static xge_hal_status_e
__hal_channel_dtr_next_not_completed(xge_hal_channel_h channelh,
xge_hal_dtr_h *dtrh)
xge_hal_dtr_h *dtrh)
{
#ifndef XGEHAL_RNIC
xge_hal_ring_rxd_1_t *rxdp; /* doesn't matter 1, 3 or 5... */
#endif
__hal_channel_dtr_try_complete(channelh, dtrh);
if (*dtrh == NULL) {
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
#ifndef XGEHAL_RNIC
rxdp = (xge_hal_ring_rxd_1_t *)*dtrh;
xge_assert(rxdp->host_control!=0);
#endif
__hal_channel_dtr_complete(channelh);
@ -114,46 +97,26 @@ __hal_channel_dtr_next_not_completed(xge_hal_channel_h channelh,
xge_hal_channel_t*
__hal_channel_allocate(xge_hal_device_h devh, int post_qid,
#ifdef XGEHAL_RNIC
u32 vp_id,
#endif
xge_hal_channel_type_e type)
xge_hal_channel_type_e type)
{
xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
xge_hal_channel_t *channel;
int size = 0;
switch(type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
xge_assert(post_qid + 1 >= XGE_HAL_MIN_FIFO_NUM &&
post_qid + 1 <= XGE_HAL_MAX_FIFO_NUM);
size = sizeof(xge_hal_fifo_t);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
xge_assert(post_qid + 1 >= XGE_HAL_MIN_RING_NUM &&
post_qid + 1 <= XGE_HAL_MAX_RING_NUM);
size = sizeof(xge_hal_ring_t);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
size = sizeof(__hal_sq_t);
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
size = sizeof(__hal_srq_t);
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
size = sizeof(__hal_cqrq_t);
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
size = sizeof(__hal_umq_t);
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
size = sizeof(__hal_dmq_t);
break;
#endif
default :
xge_assert(size);
break;
case XGE_HAL_CHANNEL_TYPE_FIFO:
xge_assert(post_qid + 1 >= XGE_HAL_MIN_FIFO_NUM &&
post_qid + 1 <= XGE_HAL_MAX_FIFO_NUM);
size = sizeof(xge_hal_fifo_t);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
xge_assert(post_qid + 1 >= XGE_HAL_MIN_RING_NUM &&
post_qid + 1 <= XGE_HAL_MAX_RING_NUM);
size = sizeof(xge_hal_ring_t);
break;
default :
xge_assert(size);
break;
}
@ -161,20 +124,17 @@ __hal_channel_allocate(xge_hal_device_h devh, int post_qid,
/* allocate FIFO channel */
channel = (xge_hal_channel_t *) xge_os_malloc(hldev->pdev, size);
if (channel == NULL) {
return NULL;
return NULL;
}
xge_os_memzero(channel, size);
channel->pdev = hldev->pdev;
channel->regh0 = hldev->regh0;
channel->regh1 = hldev->regh1;
channel->type = type;
channel->devh = devh;
#ifdef XGEHAL_RNIC
channel->vp_id = vp_id;
#endif
channel->post_qid = post_qid;
channel->compl_qid = 0;
channel->pdev = hldev->pdev;
channel->regh0 = hldev->regh0;
channel->regh1 = hldev->regh1;
channel->type = type;
channel->devh = devh;
channel->post_qid = post_qid;
channel->compl_qid = 0;
return channel;
}
@ -186,39 +146,21 @@ void __hal_channel_free(xge_hal_channel_t *channel)
xge_assert(channel->pdev);
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
size = sizeof(xge_hal_fifo_t);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
size = sizeof(xge_hal_ring_t);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
size = sizeof(__hal_sq_t);
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
size = sizeof(__hal_srq_t);
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
size = sizeof(__hal_cqrq_t);
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
size = sizeof(__hal_umq_t);
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
size = sizeof(__hal_dmq_t);
break;
#else
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_assert(size);
break;
#endif
default:
break;
case XGE_HAL_CHANNEL_TYPE_FIFO:
size = sizeof(xge_hal_fifo_t);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
size = sizeof(xge_hal_ring_t);
break;
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_assert(size);
break;
default:
break;
}
xge_os_free(channel->pdev, channel, size);
@ -226,8 +168,8 @@ void __hal_channel_free(xge_hal_channel_t *channel)
xge_hal_status_e
__hal_channel_initialize (xge_hal_channel_h channelh,
xge_hal_channel_attr_t *attr, void **reserve_arr,
int reserve_initial, int reserve_max, int reserve_threshold)
xge_hal_channel_attr_t *attr, void **reserve_arr,
int reserve_initial, int reserve_max, int reserve_threshold)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_device_t *hldev;
@ -248,28 +190,28 @@ __hal_channel_initialize (xge_hal_channel_h channelh,
channel->reserve_threshold = reserve_threshold;
channel->reserve_top = 0;
channel->saved_arr = (void **) xge_os_malloc(hldev->pdev,
sizeof(void*)*channel->reserve_max);
sizeof(void*)*channel->reserve_max);
if (channel->saved_arr == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(channel->saved_arr, sizeof(void*)*channel->reserve_max);
channel->free_arr = channel->saved_arr;
channel->free_length = channel->reserve_initial;
channel->work_arr = (void **) xge_os_malloc(hldev->pdev,
sizeof(void*)*channel->reserve_max);
sizeof(void*)*channel->reserve_max);
if (channel->work_arr == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(channel->work_arr,
sizeof(void*)*channel->reserve_max);
sizeof(void*)*channel->reserve_max);
channel->post_index = 0;
channel->compl_index = 0;
channel->length = channel->reserve_initial;
channel->orig_arr = (void **) xge_os_malloc(hldev->pdev,
sizeof(void*)*channel->reserve_max);
sizeof(void*)*channel->reserve_max);
if (channel->orig_arr == NULL)
return XGE_HAL_ERR_OUT_OF_MEMORY;
return XGE_HAL_ERR_OUT_OF_MEMORY;
xge_os_memzero(channel->orig_arr, sizeof(void*)*channel->reserve_max);
@ -292,21 +234,21 @@ void __hal_channel_terminate(xge_hal_channel_h channelh)
xge_assert(channel->pdev);
/* undo changes made at channel_initialize() */
if (channel->work_arr) {
xge_os_free(channel->pdev, channel->work_arr,
sizeof(void*)*channel->reserve_max);
channel->work_arr = NULL;
xge_os_free(channel->pdev, channel->work_arr,
sizeof(void*)*channel->reserve_max);
channel->work_arr = NULL;
}
if (channel->saved_arr) {
xge_os_free(channel->pdev, channel->saved_arr,
sizeof(void*)*channel->reserve_max);
channel->saved_arr = NULL;
xge_os_free(channel->pdev, channel->saved_arr,
sizeof(void*)*channel->reserve_max);
channel->saved_arr = NULL;
}
if (channel->orig_arr) {
xge_os_free(channel->pdev, channel->orig_arr,
sizeof(void*)*channel->reserve_max);
channel->orig_arr = NULL;
xge_os_free(channel->pdev, channel->orig_arr,
sizeof(void*)*channel->reserve_max);
channel->orig_arr = NULL;
}
#if defined(XGE_HAL_RX_MULTI_FREE_IRQ) || defined(XGE_HAL_TX_MULTI_FREE_IRQ)
@ -354,9 +296,9 @@ void __hal_channel_terminate(xge_hal_channel_h channelh)
*/
xge_hal_status_e
xge_hal_channel_open(xge_hal_device_h devh,
xge_hal_channel_attr_t *attr,
xge_hal_channel_h *channelh,
xge_hal_channel_reopen_e reopen)
xge_hal_channel_attr_t *attr,
xge_hal_channel_h *channelh,
xge_hal_channel_reopen_e reopen)
{
xge_list_t *item;
int i;
@ -369,169 +311,98 @@ xge_hal_channel_open(xge_hal_device_h devh,
*channelh = NULL;
#ifdef XGEHAL_RNIC
if((attr->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(attr->type == XGE_HAL_CHANNEL_TYPE_RING)) {
#endif
/* find channel */
xge_list_for_each(item, &device->free_channels) {
xge_hal_channel_t *tmp;
xge_list_for_each(item, &device->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t, item);
if (tmp->type == attr->type &&
tmp->post_qid == attr->post_qid &&
tmp->compl_qid == attr->compl_qid) {
channel = tmp;
break;
}
}
if (channel == NULL) {
return XGE_HAL_ERR_CHANNEL_NOT_FOUND;
}
#ifdef XGEHAL_RNIC
tmp = xge_container_of(item, xge_hal_channel_t, item);
if (tmp->type == attr->type &&
tmp->post_qid == attr->post_qid &&
tmp->compl_qid == attr->compl_qid) {
channel = tmp;
break;
}
}
if (channel == NULL) {
return XGE_HAL_ERR_CHANNEL_NOT_FOUND;
}
else {
channel = __hal_channel_allocate(devh, attr->post_qid,
#ifdef XGEHAL_RNIC
attr->vp_id,
#endif
attr->type);
if (channel == NULL) {
xge_debug_device(XGE_ERR,
"__hal_channel_allocate failed");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
}
#endif
#ifndef XGEHAL_RNIC
xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING));
#endif
(channel->type == XGE_HAL_CHANNEL_TYPE_RING));
#ifdef XGEHAL_RNIC
if((reopen == XGE_HAL_CHANNEL_OC_NORMAL) ||
((channel->type != XGE_HAL_CHANNEL_TYPE_FIFO) &&
(channel->type != XGE_HAL_CHANNEL_TYPE_RING))) {
#else
if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) {
#endif
/* allocate memory, initialize pointers, etc */
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
status = __hal_fifo_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
status = __hal_ring_open(channel, attr);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
status = __hal_sq_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
status = __hal_srq_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
status = __hal_cqrq_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
status = __hal_umq_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
status = __hal_dmq_open(channel, attr);
break;
#else
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
status = XGE_HAL_FAIL;
break;
#endif
default:
break;
}
/* allocate memory, initialize pointers, etc */
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
status = __hal_fifo_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
status = __hal_ring_open(channel, attr);
break;
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
status = XGE_HAL_FAIL;
break;
default:
break;
}
if (status == XGE_HAL_OK) {
for (i = 0; i < channel->reserve_initial; i++) {
channel->orig_arr[i] =
channel->reserve_arr[i];
}
}
else
return status;
if (status == XGE_HAL_OK) {
for (i = 0; i < channel->reserve_initial; i++) {
channel->orig_arr[i] =
channel->reserve_arr[i];
}
}
else
return status;
} else {
xge_assert(reopen == XGE_HAL_CHANNEL_RESET_ONLY);
for (i = 0; i < channel->reserve_initial; i++) {
channel->reserve_arr[i] = channel->orig_arr[i];
channel->free_arr[i] = NULL;
}
channel->free_length = channel->reserve_initial;
channel->reserve_length = channel->reserve_initial;
channel->reserve_top = 0;
channel->post_index = 0;
channel->compl_index = 0;
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
status = __hal_ring_initial_replenish(channel,
reopen);
if (status != XGE_HAL_OK)
return status;
}
for (i = 0; i < channel->reserve_initial; i++) {
channel->reserve_arr[i] = channel->orig_arr[i];
channel->free_arr[i] = NULL;
}
channel->free_length = channel->reserve_initial;
channel->reserve_length = channel->reserve_initial;
channel->reserve_top = 0;
channel->post_index = 0;
channel->compl_index = 0;
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
status = __hal_ring_initial_replenish(channel,
reopen);
if (status != XGE_HAL_OK)
return status;
}
}
/* move channel to the open state list */
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
xge_list_remove(&channel->item);
xge_list_insert(&channel->item, &device->fifo_channels);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
xge_list_remove(&channel->item);
xge_list_insert(&channel->item, &device->ring_channels);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
xge_list_insert(&channel->item,
&device->virtual_paths[attr->vp_id].sq_channels);
device->virtual_paths[attr->vp_id].stats.no_sqs++;
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
xge_list_insert(&channel->item,
&device->virtual_paths[attr->vp_id].srq_channels);
device->virtual_paths[attr->vp_id].stats.no_srqs++;
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
xge_list_insert(&channel->item,
&device->virtual_paths[attr->vp_id].cqrq_channels);
device->virtual_paths[attr->vp_id].stats.no_cqrqs++;
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
xge_list_init(&channel->item);
device->virtual_paths[attr->vp_id].umq_channelh = channel;
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_list_init(&channel->item);
device->virtual_paths[attr->vp_id].dmq_channelh = channel;
break;
#else
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
channel->type == XGE_HAL_CHANNEL_TYPE_RING);
break;
#endif
default:
break;
case XGE_HAL_CHANNEL_TYPE_FIFO:
xge_list_remove(&channel->item);
xge_list_insert(&channel->item, &device->fifo_channels);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
xge_list_remove(&channel->item);
xge_list_insert(&channel->item, &device->ring_channels);
break;
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
channel->type == XGE_HAL_CHANNEL_TYPE_RING);
break;
default:
break;
}
channel->is_open = 1;
channel->terminating = 0;
/*
* The magic check the argument validity, has to be
* removed before 03/01/2005.
@ -556,7 +427,7 @@ xge_hal_channel_open(xge_hal_device_h devh,
* See also: xge_hal_channel_dtr_term_f{}.
*/
void xge_hal_channel_abort(xge_hal_channel_h channelh,
xge_hal_channel_reopen_e reopen)
xge_hal_channel_reopen_e reopen)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_dtr_h dtr;
@ -567,25 +438,25 @@ void xge_hal_channel_abort(xge_hal_channel_h channelh,
int reserve_top_sav;
if (channel->dtr_term == NULL) {
return;
return;
}
free_length_sav = channel->free_length;
while (__hal_channel_dtr_next_freelist(channelh, &dtr) == XGE_HAL_OK) {
#ifdef XGE_OS_MEMORY_CHECK
#ifdef XGE_DEBUG_ASSERT
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
} else {
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated);
}
}
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
} else {
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated);
}
}
#endif
check_cnt++;
check_cnt++;
#endif
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_FREED,
channel->userdata, reopen);
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_FREED,
channel->userdata, reopen);
}
channel->free_length = free_length_sav;
@ -593,44 +464,44 @@ void xge_hal_channel_abort(xge_hal_channel_h channelh,
XGE_HAL_OK) {
#ifdef XGE_OS_MEMORY_CHECK
#ifdef XGE_DEBUG_ASSERT
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(__hal_fifo_txdl_priv(dtr)->allocated);
} else {
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)
->allocated);
}
}
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(__hal_fifo_txdl_priv(dtr)->allocated);
} else {
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)
->allocated);
}
}
#endif
check_cnt++;
check_cnt++;
#endif
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_POSTED,
channel->userdata, reopen);
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_POSTED,
channel->userdata, reopen);
}
reserve_top_sav = channel->reserve_top;
while (__hal_channel_dtr_next_reservelist(channelh, &dtr) ==
XGE_HAL_OK) {
XGE_HAL_OK) {
#ifdef XGE_OS_MEMORY_CHECK
#ifdef XGE_DEBUG_ASSERT
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
} else {
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated);
}
}
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
xge_assert(!__hal_fifo_txdl_priv(dtr)->allocated);
} else {
if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
xge_assert(!__hal_ring_rxd_priv((xge_hal_ring_t * ) channelh, dtr)->allocated);
}
}
#endif
check_cnt++;
check_cnt++;
#endif
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_AVAIL,
channel->userdata, reopen);
channel->dtr_term(channel, dtr, XGE_HAL_DTR_STATE_AVAIL,
channel->userdata, reopen);
}
channel->reserve_top = reserve_top_sav;
xge_assert(channel->reserve_length ==
(channel->free_length + channel->reserve_top));
(channel->free_length + channel->reserve_top));
#ifdef XGE_OS_MEMORY_CHECK
xge_assert(check_cnt == channel->reserve_initial);
@ -649,14 +520,11 @@ void xge_hal_channel_abort(xge_hal_channel_h channelh,
* safe context.
*/
void xge_hal_channel_close(xge_hal_channel_h channelh,
xge_hal_channel_reopen_e reopen)
xge_hal_channel_reopen_e reopen)
{
xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
xge_hal_device_t *hldev;
xge_list_t *item;
#ifdef XGEHAL_RNIC
u32 vp_id;
#endif
xge_assert(channel);
xge_assert(channel->type < XGE_HAL_CHANNEL_TYPE_MAX);
@ -664,96 +532,53 @@ void xge_hal_channel_close(xge_hal_channel_h channelh,
channel->is_open = 0;
channel->magic = XGE_HAL_DEAD;
#ifdef XGEHAL_RNIC
vp_id = channel->vp_id;
/* sanity check: make sure channel is not in free list */
xge_list_for_each(item, &hldev->free_channels) {
xge_hal_channel_t *tmp;
if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING)) {
#endif
/* sanity check: make sure channel is not in free list */
xge_list_for_each(item, &hldev->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t, item);
xge_assert(!tmp->is_open);
if (channel == tmp) {
return;
}
}
#ifdef XGEHAL_RNIC
tmp = xge_container_of(item, xge_hal_channel_t, item);
xge_assert(!tmp->is_open);
if (channel == tmp) {
return;
}
}
#endif
xge_hal_channel_abort(channel, reopen);
#ifndef XGEHAL_RNIC
xge_assert((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING));
#endif
(channel->type == XGE_HAL_CHANNEL_TYPE_RING));
if (reopen == XGE_HAL_CHANNEL_OC_NORMAL) {
/* de-allocate */
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
__hal_fifo_close(channelh);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
__hal_ring_close(channelh);
break;
#ifdef XGEHAL_RNIC
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
__hal_sq_close(channelh);
hldev->virtual_paths[vp_id].stats.no_sqs--;
break;
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
__hal_srq_close(channelh);
hldev->virtual_paths[vp_id].stats.no_srqs--;
break;
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
__hal_cqrq_close(channelh);
hldev->virtual_paths[vp_id].stats.no_cqrqs--;
break;
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
__hal_umq_close(channelh);
break;
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
__hal_dmq_close(channelh);
break;
#else
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
channel->type == XGE_HAL_CHANNEL_TYPE_RING);
break;
#endif
default:
break;
}
/* de-allocate */
switch(channel->type) {
case XGE_HAL_CHANNEL_TYPE_FIFO:
__hal_fifo_close(channelh);
break;
case XGE_HAL_CHANNEL_TYPE_RING:
__hal_ring_close(channelh);
break;
case XGE_HAL_CHANNEL_TYPE_SEND_QUEUE:
case XGE_HAL_CHANNEL_TYPE_RECEIVE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_COMPLETION_QUEUE:
case XGE_HAL_CHANNEL_TYPE_UP_MESSAGE_QUEUE:
case XGE_HAL_CHANNEL_TYPE_DOWN_MESSAGE_QUEUE:
xge_assert(channel->type == XGE_HAL_CHANNEL_TYPE_FIFO ||
channel->type == XGE_HAL_CHANNEL_TYPE_RING);
break;
default:
break;
}
}
else
xge_assert(reopen == XGE_HAL_CHANNEL_RESET_ONLY);
/* move channel back to free state list */
xge_list_remove(&channel->item);
#ifdef XGEHAL_RNIC
if((channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) ||
(channel->type == XGE_HAL_CHANNEL_TYPE_RING)) {
#endif
xge_list_insert(&channel->item, &hldev->free_channels);
xge_list_insert(&channel->item, &hldev->free_channels);
if (xge_list_is_empty(&hldev->fifo_channels) &&
xge_list_is_empty(&hldev->ring_channels)) {
/* clear msix_idx in case of following HW reset */
hldev->reset_needed_after_close = 1;
}
#ifdef XGEHAL_RNIC
if (xge_list_is_empty(&hldev->fifo_channels) &&
xge_list_is_empty(&hldev->ring_channels)) {
/* clear msix_idx in case of following HW reset */
hldev->reset_needed_after_close = 1;
}
else {
__hal_channel_free(channel);
}
#endif
}

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-config.c
*
* Description: configuration functionality
*
* Created: 14 May 2004
*/
#include <dev/nxge/include/xgehal-config.h>
#include <dev/nxge/include/xge-debug.h>
@ -48,53 +40,53 @@ static xge_hal_status_e
__hal_tti_config_check (xge_hal_tti_config_t *new_config)
{
if ((new_config->urange_a < XGE_HAL_MIN_TX_URANGE_A) ||
(new_config->urange_a > XGE_HAL_MAX_TX_URANGE_A)) {
return XGE_HAL_BADCFG_TX_URANGE_A;
(new_config->urange_a > XGE_HAL_MAX_TX_URANGE_A)) {
return XGE_HAL_BADCFG_TX_URANGE_A;
}
if ((new_config->ufc_a < XGE_HAL_MIN_TX_UFC_A) ||
(new_config->ufc_a > XGE_HAL_MAX_TX_UFC_A)) {
return XGE_HAL_BADCFG_TX_UFC_A;
(new_config->ufc_a > XGE_HAL_MAX_TX_UFC_A)) {
return XGE_HAL_BADCFG_TX_UFC_A;
}
if ((new_config->urange_b < XGE_HAL_MIN_TX_URANGE_B) ||
(new_config->urange_b > XGE_HAL_MAX_TX_URANGE_B)) {
return XGE_HAL_BADCFG_TX_URANGE_B;
(new_config->urange_b > XGE_HAL_MAX_TX_URANGE_B)) {
return XGE_HAL_BADCFG_TX_URANGE_B;
}
if ((new_config->ufc_b < XGE_HAL_MIN_TX_UFC_B) ||
(new_config->ufc_b > XGE_HAL_MAX_TX_UFC_B)) {
return XGE_HAL_BADCFG_TX_UFC_B;
(new_config->ufc_b > XGE_HAL_MAX_TX_UFC_B)) {
return XGE_HAL_BADCFG_TX_UFC_B;
}
if ((new_config->urange_c < XGE_HAL_MIN_TX_URANGE_C) ||
(new_config->urange_c > XGE_HAL_MAX_TX_URANGE_C)) {
return XGE_HAL_BADCFG_TX_URANGE_C;
(new_config->urange_c > XGE_HAL_MAX_TX_URANGE_C)) {
return XGE_HAL_BADCFG_TX_URANGE_C;
}
if ((new_config->ufc_c < XGE_HAL_MIN_TX_UFC_C) ||
(new_config->ufc_c > XGE_HAL_MAX_TX_UFC_C)) {
return XGE_HAL_BADCFG_TX_UFC_C;
(new_config->ufc_c > XGE_HAL_MAX_TX_UFC_C)) {
return XGE_HAL_BADCFG_TX_UFC_C;
}
if ((new_config->ufc_d < XGE_HAL_MIN_TX_UFC_D) ||
(new_config->ufc_d > XGE_HAL_MAX_TX_UFC_D)) {
return XGE_HAL_BADCFG_TX_UFC_D;
(new_config->ufc_d > XGE_HAL_MAX_TX_UFC_D)) {
return XGE_HAL_BADCFG_TX_UFC_D;
}
if ((new_config->timer_val_us < XGE_HAL_MIN_TX_TIMER_VAL) ||
(new_config->timer_val_us > XGE_HAL_MAX_TX_TIMER_VAL)) {
return XGE_HAL_BADCFG_TX_TIMER_VAL;
(new_config->timer_val_us > XGE_HAL_MAX_TX_TIMER_VAL)) {
return XGE_HAL_BADCFG_TX_TIMER_VAL;
}
if ((new_config->timer_ci_en < XGE_HAL_MIN_TX_TIMER_CI_EN) ||
(new_config->timer_ci_en > XGE_HAL_MAX_TX_TIMER_CI_EN)) {
return XGE_HAL_BADCFG_TX_TIMER_CI_EN;
(new_config->timer_ci_en > XGE_HAL_MAX_TX_TIMER_CI_EN)) {
return XGE_HAL_BADCFG_TX_TIMER_CI_EN;
}
if ((new_config->timer_ac_en < XGE_HAL_MIN_TX_TIMER_AC_EN) ||
(new_config->timer_ac_en > XGE_HAL_MAX_TX_TIMER_AC_EN)) {
return XGE_HAL_BADCFG_TX_TIMER_AC_EN;
(new_config->timer_ac_en > XGE_HAL_MAX_TX_TIMER_AC_EN)) {
return XGE_HAL_BADCFG_TX_TIMER_AC_EN;
}
return XGE_HAL_OK;
@ -111,48 +103,48 @@ static xge_hal_status_e
__hal_rti_config_check (xge_hal_rti_config_t *new_config)
{
if ((new_config->urange_a < XGE_HAL_MIN_RX_URANGE_A) ||
(new_config->urange_a > XGE_HAL_MAX_RX_URANGE_A)) {
return XGE_HAL_BADCFG_RX_URANGE_A;
(new_config->urange_a > XGE_HAL_MAX_RX_URANGE_A)) {
return XGE_HAL_BADCFG_RX_URANGE_A;
}
if ((new_config->ufc_a < XGE_HAL_MIN_RX_UFC_A) ||
(new_config->ufc_a > XGE_HAL_MAX_RX_UFC_A)) {
return XGE_HAL_BADCFG_RX_UFC_A;
(new_config->ufc_a > XGE_HAL_MAX_RX_UFC_A)) {
return XGE_HAL_BADCFG_RX_UFC_A;
}
if ((new_config->urange_b < XGE_HAL_MIN_RX_URANGE_B) ||
(new_config->urange_b > XGE_HAL_MAX_RX_URANGE_B)) {
return XGE_HAL_BADCFG_RX_URANGE_B;
(new_config->urange_b > XGE_HAL_MAX_RX_URANGE_B)) {
return XGE_HAL_BADCFG_RX_URANGE_B;
}
if ((new_config->ufc_b < XGE_HAL_MIN_RX_UFC_B) ||
(new_config->ufc_b > XGE_HAL_MAX_RX_UFC_B)) {
return XGE_HAL_BADCFG_RX_UFC_B;
(new_config->ufc_b > XGE_HAL_MAX_RX_UFC_B)) {
return XGE_HAL_BADCFG_RX_UFC_B;
}
if ((new_config->urange_c < XGE_HAL_MIN_RX_URANGE_C) ||
(new_config->urange_c > XGE_HAL_MAX_RX_URANGE_C)) {
return XGE_HAL_BADCFG_RX_URANGE_C;
(new_config->urange_c > XGE_HAL_MAX_RX_URANGE_C)) {
return XGE_HAL_BADCFG_RX_URANGE_C;
}
if ((new_config->ufc_c < XGE_HAL_MIN_RX_UFC_C) ||
(new_config->ufc_c > XGE_HAL_MAX_RX_UFC_C)) {
return XGE_HAL_BADCFG_RX_UFC_C;
(new_config->ufc_c > XGE_HAL_MAX_RX_UFC_C)) {
return XGE_HAL_BADCFG_RX_UFC_C;
}
if ((new_config->ufc_d < XGE_HAL_MIN_RX_UFC_D) ||
(new_config->ufc_d > XGE_HAL_MAX_RX_UFC_D)) {
return XGE_HAL_BADCFG_RX_UFC_D;
(new_config->ufc_d > XGE_HAL_MAX_RX_UFC_D)) {
return XGE_HAL_BADCFG_RX_UFC_D;
}
if ((new_config->timer_val_us < XGE_HAL_MIN_RX_TIMER_VAL) ||
(new_config->timer_val_us > XGE_HAL_MAX_RX_TIMER_VAL)) {
return XGE_HAL_BADCFG_RX_TIMER_VAL;
(new_config->timer_val_us > XGE_HAL_MAX_RX_TIMER_VAL)) {
return XGE_HAL_BADCFG_RX_TIMER_VAL;
}
if ((new_config->timer_ac_en < XGE_HAL_MIN_RX_TIMER_AC_EN) ||
(new_config->timer_ac_en > XGE_HAL_MAX_RX_TIMER_AC_EN)) {
return XGE_HAL_BADCFG_RX_TIMER_AC_EN;
(new_config->timer_ac_en > XGE_HAL_MAX_RX_TIMER_AC_EN)) {
return XGE_HAL_BADCFG_RX_TIMER_AC_EN;
}
return XGE_HAL_OK;
@ -168,13 +160,13 @@ __hal_rti_config_check (xge_hal_rti_config_t *new_config)
*/
static xge_hal_status_e
__hal_fifo_queue_check (xge_hal_fifo_config_t *new_config,
xge_hal_fifo_queue_t *new_queue)
xge_hal_fifo_queue_t *new_queue)
{
int i;
if ((new_queue->initial < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
(new_queue->initial > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH;
(new_queue->initial > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INITIAL_LENGTH;
}
/* FIXME: queue "grow" feature is not supported.
@ -183,37 +175,37 @@ __hal_fifo_queue_check (xge_hal_fifo_config_t *new_config,
new_queue->max = new_queue->initial;
if ((new_queue->max < XGE_HAL_MIN_FIFO_QUEUE_LENGTH) ||
(new_queue->max > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH;
(new_queue->max > XGE_HAL_MAX_FIFO_QUEUE_LENGTH)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH;
}
if (new_queue->max < new_config->reserve_threshold) {
return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD;
return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD;
}
if ((new_queue->intr < XGE_HAL_MIN_FIFO_QUEUE_INTR) ||
(new_queue->intr > XGE_HAL_MAX_FIFO_QUEUE_INTR)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INTR;
(new_queue->intr > XGE_HAL_MAX_FIFO_QUEUE_INTR)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INTR;
}
if ((new_queue->intr_vector < XGE_HAL_MIN_FIFO_QUEUE_INTR_VECTOR) ||
(new_queue->intr_vector > XGE_HAL_MAX_FIFO_QUEUE_INTR_VECTOR)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR;
(new_queue->intr_vector > XGE_HAL_MAX_FIFO_QUEUE_INTR_VECTOR)) {
return XGE_HAL_BADCFG_FIFO_QUEUE_INTR_VECTOR;
}
for(i = 0; i < XGE_HAL_MAX_FIFO_TTI_NUM; i++) {
/*
* Validate the tti configuration parameters only if
* the TTI feature is enabled.
*/
if (new_queue->tti[i].enabled) {
xge_hal_status_e status;
/*
* Validate the tti configuration parameters only if
* the TTI feature is enabled.
*/
if (new_queue->tti[i].enabled) {
xge_hal_status_e status;
if ((status = __hal_tti_config_check(
&new_queue->tti[i])) != XGE_HAL_OK) {
return status;
}
}
if ((status = __hal_tti_config_check(
&new_queue->tti[i])) != XGE_HAL_OK) {
return status;
}
}
}
return XGE_HAL_OK;
@ -231,8 +223,8 @@ __hal_ring_queue_check (xge_hal_ring_queue_t *new_config)
{
if ((new_config->initial < XGE_HAL_MIN_RING_QUEUE_BLOCKS) ||
(new_config->initial > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) {
return XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS;
(new_config->initial > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) {
return XGE_HAL_BADCFG_RING_QUEUE_INITIAL_BLOCKS;
}
/* FIXME: queue "grow" feature is not supported.
@ -241,67 +233,67 @@ __hal_ring_queue_check (xge_hal_ring_queue_t *new_config)
new_config->max = new_config->initial;
if ((new_config->max < XGE_HAL_MIN_RING_QUEUE_BLOCKS) ||
(new_config->max > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) {
return XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS;
(new_config->max > XGE_HAL_MAX_RING_QUEUE_BLOCKS)) {
return XGE_HAL_BADCFG_RING_QUEUE_MAX_BLOCKS;
}
if ((new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_1) &&
(new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_3) &&
(new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)) {
return XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE;
(new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_3) &&
(new_config->buffer_mode != XGE_HAL_RING_QUEUE_BUFFER_MODE_5)) {
return XGE_HAL_BADCFG_RING_QUEUE_BUFFER_MODE;
}
/*
/*
* Herc has less DRAM; the check is done later inside
* device_initialize()
*/
if (((new_config->dram_size_mb < XGE_HAL_MIN_RING_QUEUE_SIZE) ||
(new_config->dram_size_mb > XGE_HAL_MAX_RING_QUEUE_SIZE_XENA)) &&
new_config->dram_size_mb != XGE_HAL_DEFAULT_USE_HARDCODE)
return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
if ((new_config->backoff_interval_us <
XGE_HAL_MIN_BACKOFF_INTERVAL_US) ||
(new_config->backoff_interval_us >
XGE_HAL_MAX_BACKOFF_INTERVAL_US)) {
return XGE_HAL_BADCFG_BACKOFF_INTERVAL_US;
XGE_HAL_MIN_BACKOFF_INTERVAL_US) ||
(new_config->backoff_interval_us >
XGE_HAL_MAX_BACKOFF_INTERVAL_US)) {
return XGE_HAL_BADCFG_BACKOFF_INTERVAL_US;
}
if ((new_config->max_frm_len < XGE_HAL_MIN_MAX_FRM_LEN) ||
(new_config->max_frm_len > XGE_HAL_MAX_MAX_FRM_LEN)) {
return XGE_HAL_BADCFG_MAX_FRM_LEN;
(new_config->max_frm_len > XGE_HAL_MAX_MAX_FRM_LEN)) {
return XGE_HAL_BADCFG_MAX_FRM_LEN;
}
if ((new_config->priority < XGE_HAL_MIN_RING_PRIORITY) ||
(new_config->priority > XGE_HAL_MAX_RING_PRIORITY)) {
return XGE_HAL_BADCFG_RING_PRIORITY;
(new_config->priority > XGE_HAL_MAX_RING_PRIORITY)) {
return XGE_HAL_BADCFG_RING_PRIORITY;
}
if ((new_config->rth_en < XGE_HAL_MIN_RING_RTH_EN) ||
(new_config->rth_en > XGE_HAL_MAX_RING_RTH_EN)) {
return XGE_HAL_BADCFG_RING_RTH_EN;
(new_config->rth_en > XGE_HAL_MAX_RING_RTH_EN)) {
return XGE_HAL_BADCFG_RING_RTH_EN;
}
if ((new_config->rts_mac_en < XGE_HAL_MIN_RING_RTS_MAC_EN) ||
(new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_MAC_EN)) {
return XGE_HAL_BADCFG_RING_RTS_MAC_EN;
(new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_MAC_EN)) {
return XGE_HAL_BADCFG_RING_RTS_MAC_EN;
}
if ((new_config->rts_mac_en < XGE_HAL_MIN_RING_RTS_PORT_EN) ||
(new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) {
return XGE_HAL_BADCFG_RING_RTS_PORT_EN;
(new_config->rts_mac_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) {
return XGE_HAL_BADCFG_RING_RTS_PORT_EN;
}
if ((new_config->intr_vector < XGE_HAL_MIN_RING_QUEUE_INTR_VECTOR) ||
(new_config->intr_vector > XGE_HAL_MAX_RING_QUEUE_INTR_VECTOR)) {
return XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR;
(new_config->intr_vector > XGE_HAL_MAX_RING_QUEUE_INTR_VECTOR)) {
return XGE_HAL_BADCFG_RING_QUEUE_INTR_VECTOR;
}
if (new_config->indicate_max_pkts <
XGE_HAL_MIN_RING_INDICATE_MAX_PKTS ||
new_config->indicate_max_pkts >
XGE_HAL_MAX_RING_INDICATE_MAX_PKTS) {
return XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS;
return XGE_HAL_BADCFG_RING_INDICATE_MAX_PKTS;
}
return __hal_rti_config_check(&new_config->rti);
@ -318,52 +310,52 @@ static xge_hal_status_e
__hal_mac_config_check (xge_hal_mac_config_t *new_config)
{
if ((new_config->tmac_util_period < XGE_HAL_MIN_TMAC_UTIL_PERIOD) ||
(new_config->tmac_util_period > XGE_HAL_MAX_TMAC_UTIL_PERIOD)) {
return XGE_HAL_BADCFG_TMAC_UTIL_PERIOD;
(new_config->tmac_util_period > XGE_HAL_MAX_TMAC_UTIL_PERIOD)) {
return XGE_HAL_BADCFG_TMAC_UTIL_PERIOD;
}
if ((new_config->rmac_util_period < XGE_HAL_MIN_RMAC_UTIL_PERIOD) ||
(new_config->rmac_util_period > XGE_HAL_MAX_RMAC_UTIL_PERIOD)) {
return XGE_HAL_BADCFG_RMAC_UTIL_PERIOD;
(new_config->rmac_util_period > XGE_HAL_MAX_RMAC_UTIL_PERIOD)) {
return XGE_HAL_BADCFG_RMAC_UTIL_PERIOD;
}
if ((new_config->rmac_bcast_en < XGE_HAL_MIN_RMAC_BCAST_EN) ||
(new_config->rmac_bcast_en > XGE_HAL_MAX_RMAC_BCAST_EN)) {
return XGE_HAL_BADCFG_RMAC_BCAST_EN;
(new_config->rmac_bcast_en > XGE_HAL_MAX_RMAC_BCAST_EN)) {
return XGE_HAL_BADCFG_RMAC_BCAST_EN;
}
if ((new_config->rmac_pause_gen_en < XGE_HAL_MIN_RMAC_PAUSE_GEN_EN) ||
(new_config->rmac_pause_gen_en>XGE_HAL_MAX_RMAC_PAUSE_GEN_EN)) {
return XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN;
(new_config->rmac_pause_gen_en>XGE_HAL_MAX_RMAC_PAUSE_GEN_EN)) {
return XGE_HAL_BADCFG_RMAC_PAUSE_GEN_EN;
}
if ((new_config->rmac_pause_rcv_en < XGE_HAL_MIN_RMAC_PAUSE_RCV_EN) ||
(new_config->rmac_pause_rcv_en>XGE_HAL_MAX_RMAC_PAUSE_RCV_EN)) {
return XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN;
(new_config->rmac_pause_rcv_en>XGE_HAL_MAX_RMAC_PAUSE_RCV_EN)) {
return XGE_HAL_BADCFG_RMAC_PAUSE_RCV_EN;
}
if ((new_config->rmac_pause_time < XGE_HAL_MIN_RMAC_HIGH_PTIME) ||
(new_config->rmac_pause_time > XGE_HAL_MAX_RMAC_HIGH_PTIME)) {
return XGE_HAL_BADCFG_RMAC_HIGH_PTIME;
(new_config->rmac_pause_time > XGE_HAL_MAX_RMAC_HIGH_PTIME)) {
return XGE_HAL_BADCFG_RMAC_HIGH_PTIME;
}
if ((new_config->media < XGE_HAL_MIN_MEDIA) ||
(new_config->media > XGE_HAL_MAX_MEDIA)) {
return XGE_HAL_BADCFG_MEDIA;
(new_config->media > XGE_HAL_MAX_MEDIA)) {
return XGE_HAL_BADCFG_MEDIA;
}
if ((new_config->mc_pause_threshold_q0q3 <
XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q0Q3) ||
(new_config->mc_pause_threshold_q0q3 >
XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q0Q3)) {
return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3;
XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q0Q3) ||
(new_config->mc_pause_threshold_q0q3 >
XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q0Q3)) {
return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q0Q3;
}
if ((new_config->mc_pause_threshold_q4q7 <
XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q4Q7) ||
(new_config->mc_pause_threshold_q4q7 >
XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q4Q7)) {
return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7;
XGE_HAL_MIN_MC_PAUSE_THRESHOLD_Q4Q7) ||
(new_config->mc_pause_threshold_q4q7 >
XGE_HAL_MAX_MC_PAUSE_THRESHOLD_Q4Q7)) {
return XGE_HAL_BADCFG_MC_PAUSE_THRESHOLD_Q4Q7;
}
return XGE_HAL_OK;
@ -389,38 +381,38 @@ __hal_fifo_config_check (xge_hal_fifo_config_t *new_config)
new_config->max_frags = ((new_config->max_frags + 3) >> 2) << 2;
if ((new_config->max_frags < XGE_HAL_MIN_FIFO_FRAGS) ||
(new_config->max_frags > XGE_HAL_MAX_FIFO_FRAGS)) {
return XGE_HAL_BADCFG_FIFO_FRAGS;
(new_config->max_frags > XGE_HAL_MAX_FIFO_FRAGS)) {
return XGE_HAL_BADCFG_FIFO_FRAGS;
}
if ((new_config->reserve_threshold <
XGE_HAL_MIN_FIFO_RESERVE_THRESHOLD) ||
(new_config->reserve_threshold >
XGE_HAL_MAX_FIFO_RESERVE_THRESHOLD)) {
return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD;
XGE_HAL_MIN_FIFO_RESERVE_THRESHOLD) ||
(new_config->reserve_threshold >
XGE_HAL_MAX_FIFO_RESERVE_THRESHOLD)) {
return XGE_HAL_BADCFG_FIFO_RESERVE_THRESHOLD;
}
if ((new_config->memblock_size < XGE_HAL_MIN_FIFO_MEMBLOCK_SIZE) ||
(new_config->memblock_size > XGE_HAL_MAX_FIFO_MEMBLOCK_SIZE)) {
return XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE;
(new_config->memblock_size > XGE_HAL_MAX_FIFO_MEMBLOCK_SIZE)) {
return XGE_HAL_BADCFG_FIFO_MEMBLOCK_SIZE;
}
for(i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
xge_hal_status_e status;
xge_hal_status_e status;
if (!new_config->queue[i].configured)
continue;
if (!new_config->queue[i].configured)
continue;
if ((status = __hal_fifo_queue_check(new_config,
&new_config->queue[i])) != XGE_HAL_OK) {
return status;
}
if ((status = __hal_fifo_queue_check(new_config,
&new_config->queue[i])) != XGE_HAL_OK) {
return status;
}
total_fifo_length += new_config->queue[i].max;
}
if(total_fifo_length > XGE_HAL_MAX_FIFO_QUEUE_LENGTH){
return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH;
return XGE_HAL_BADCFG_FIFO_QUEUE_MAX_LENGTH;
}
return XGE_HAL_OK;
@ -439,20 +431,20 @@ __hal_ring_config_check (xge_hal_ring_config_t *new_config)
int i;
if ((new_config->memblock_size < XGE_HAL_MIN_RING_MEMBLOCK_SIZE) ||
(new_config->memblock_size > XGE_HAL_MAX_RING_MEMBLOCK_SIZE)) {
return XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE;
(new_config->memblock_size > XGE_HAL_MAX_RING_MEMBLOCK_SIZE)) {
return XGE_HAL_BADCFG_RING_MEMBLOCK_SIZE;
}
for(i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
xge_hal_status_e status;
xge_hal_status_e status;
if (!new_config->queue[i].configured)
continue;
if (!new_config->queue[i].configured)
continue;
if ((status = __hal_ring_queue_check(&new_config->queue[i]))
!= XGE_HAL_OK) {
return status;
}
if ((status = __hal_ring_queue_check(&new_config->queue[i]))
!= XGE_HAL_OK) {
return status;
}
}
return XGE_HAL_OK;
@ -477,92 +469,92 @@ __hal_device_config_check_common (xge_hal_device_config_t *new_config)
xge_hal_status_e status;
if ((new_config->mtu < XGE_HAL_MIN_MTU) ||
(new_config->mtu > XGE_HAL_MAX_MTU)) {
return XGE_HAL_BADCFG_MAX_MTU;
(new_config->mtu > XGE_HAL_MAX_MTU)) {
return XGE_HAL_BADCFG_MAX_MTU;
}
if ((new_config->bimodal_interrupts < XGE_HAL_BIMODAL_INTR_MIN) ||
(new_config->bimodal_interrupts > XGE_HAL_BIMODAL_INTR_MAX)) {
return XGE_HAL_BADCFG_BIMODAL_INTR;
(new_config->bimodal_interrupts > XGE_HAL_BIMODAL_INTR_MAX)) {
return XGE_HAL_BADCFG_BIMODAL_INTR;
}
if (new_config->bimodal_interrupts &&
((new_config->bimodal_timer_lo_us < XGE_HAL_BIMODAL_TIMER_LO_US_MIN) ||
(new_config->bimodal_timer_lo_us > XGE_HAL_BIMODAL_TIMER_LO_US_MAX))) {
return XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US;
(new_config->bimodal_timer_lo_us > XGE_HAL_BIMODAL_TIMER_LO_US_MAX))) {
return XGE_HAL_BADCFG_BIMODAL_TIMER_LO_US;
}
if (new_config->bimodal_interrupts &&
((new_config->bimodal_timer_hi_us < XGE_HAL_BIMODAL_TIMER_HI_US_MIN) ||
(new_config->bimodal_timer_hi_us > XGE_HAL_BIMODAL_TIMER_HI_US_MAX))) {
return XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US;
(new_config->bimodal_timer_hi_us > XGE_HAL_BIMODAL_TIMER_HI_US_MAX))) {
return XGE_HAL_BADCFG_BIMODAL_TIMER_HI_US;
}
if ((new_config->no_isr_events < XGE_HAL_NO_ISR_EVENTS_MIN) ||
(new_config->no_isr_events > XGE_HAL_NO_ISR_EVENTS_MAX)) {
return XGE_HAL_BADCFG_NO_ISR_EVENTS;
(new_config->no_isr_events > XGE_HAL_NO_ISR_EVENTS_MAX)) {
return XGE_HAL_BADCFG_NO_ISR_EVENTS;
}
if ((new_config->isr_polling_cnt < XGE_HAL_MIN_ISR_POLLING_CNT) ||
(new_config->isr_polling_cnt > XGE_HAL_MAX_ISR_POLLING_CNT)) {
return XGE_HAL_BADCFG_ISR_POLLING_CNT;
(new_config->isr_polling_cnt > XGE_HAL_MAX_ISR_POLLING_CNT)) {
return XGE_HAL_BADCFG_ISR_POLLING_CNT;
}
if (new_config->latency_timer &&
new_config->latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) {
if ((new_config->latency_timer < XGE_HAL_MIN_LATENCY_TIMER) ||
(new_config->latency_timer > XGE_HAL_MAX_LATENCY_TIMER)) {
return XGE_HAL_BADCFG_LATENCY_TIMER;
}
if ((new_config->latency_timer < XGE_HAL_MIN_LATENCY_TIMER) ||
(new_config->latency_timer > XGE_HAL_MAX_LATENCY_TIMER)) {
return XGE_HAL_BADCFG_LATENCY_TIMER;
}
}
if (new_config->max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) {
if ((new_config->max_splits_trans <
XGE_HAL_ONE_SPLIT_TRANSACTION) ||
(new_config->max_splits_trans >
XGE_HAL_THIRTYTWO_SPLIT_TRANSACTION))
return XGE_HAL_BADCFG_MAX_SPLITS_TRANS;
if ((new_config->max_splits_trans <
XGE_HAL_ONE_SPLIT_TRANSACTION) ||
(new_config->max_splits_trans >
XGE_HAL_THIRTYTWO_SPLIT_TRANSACTION))
return XGE_HAL_BADCFG_MAX_SPLITS_TRANS;
}
if (new_config->mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT)
{
if ((new_config->mmrb_count < XGE_HAL_MIN_MMRB_COUNT) ||
(new_config->mmrb_count > XGE_HAL_MAX_MMRB_COUNT)) {
return XGE_HAL_BADCFG_MMRB_COUNT;
(new_config->mmrb_count > XGE_HAL_MAX_MMRB_COUNT)) {
return XGE_HAL_BADCFG_MMRB_COUNT;
}
}
if ((new_config->shared_splits < XGE_HAL_MIN_SHARED_SPLITS) ||
(new_config->shared_splits > XGE_HAL_MAX_SHARED_SPLITS)) {
return XGE_HAL_BADCFG_SHARED_SPLITS;
(new_config->shared_splits > XGE_HAL_MAX_SHARED_SPLITS)) {
return XGE_HAL_BADCFG_SHARED_SPLITS;
}
if (new_config->stats_refresh_time_sec !=
XGE_HAL_STATS_REFRESH_DISABLE) {
if ((new_config->stats_refresh_time_sec <
XGE_HAL_MIN_STATS_REFRESH_TIME) ||
XGE_HAL_MIN_STATS_REFRESH_TIME) ||
(new_config->stats_refresh_time_sec >
XGE_HAL_MAX_STATS_REFRESH_TIME)) {
return XGE_HAL_BADCFG_STATS_REFRESH_TIME;
XGE_HAL_MAX_STATS_REFRESH_TIME)) {
return XGE_HAL_BADCFG_STATS_REFRESH_TIME;
}
}
if ((new_config->intr_mode != XGE_HAL_INTR_MODE_IRQLINE) &&
(new_config->intr_mode != XGE_HAL_INTR_MODE_MSI) &&
(new_config->intr_mode != XGE_HAL_INTR_MODE_MSIX)) {
return XGE_HAL_BADCFG_INTR_MODE;
(new_config->intr_mode != XGE_HAL_INTR_MODE_MSI) &&
(new_config->intr_mode != XGE_HAL_INTR_MODE_MSIX)) {
return XGE_HAL_BADCFG_INTR_MODE;
}
if ((new_config->sched_timer_us < XGE_HAL_SCHED_TIMER_MIN) ||
(new_config->sched_timer_us > XGE_HAL_SCHED_TIMER_MAX)) {
return XGE_HAL_BADCFG_SCHED_TIMER_US;
(new_config->sched_timer_us > XGE_HAL_SCHED_TIMER_MAX)) {
return XGE_HAL_BADCFG_SCHED_TIMER_US;
}
if ((new_config->sched_timer_one_shot !=
XGE_HAL_SCHED_TIMER_ON_SHOT_DISABLE) &&
(new_config->sched_timer_one_shot !=
XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE)) {
return XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT;
XGE_HAL_SCHED_TIMER_ON_SHOT_DISABLE) &&
(new_config->sched_timer_one_shot !=
XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE)) {
return XGE_HAL_BADCFG_SCHED_TIMER_ON_SHOT;
}
/*
@ -576,105 +568,105 @@ __hal_device_config_check_common (xge_hal_device_config_t *new_config)
*/
if (new_config->sched_timer_us &&
new_config->rxufca_hi_lim != new_config->rxufca_lo_lim) {
if ((new_config->rxufca_intr_thres <
XGE_HAL_RXUFCA_INTR_THRES_MIN) ||
(new_config->rxufca_intr_thres >
XGE_HAL_RXUFCA_INTR_THRES_MAX)) {
return XGE_HAL_BADCFG_RXUFCA_INTR_THRES;
}
if ((new_config->rxufca_intr_thres <
XGE_HAL_RXUFCA_INTR_THRES_MIN) ||
(new_config->rxufca_intr_thres >
XGE_HAL_RXUFCA_INTR_THRES_MAX)) {
return XGE_HAL_BADCFG_RXUFCA_INTR_THRES;
}
if ((new_config->rxufca_hi_lim < XGE_HAL_RXUFCA_HI_LIM_MIN) ||
(new_config->rxufca_hi_lim > XGE_HAL_RXUFCA_HI_LIM_MAX)) {
return XGE_HAL_BADCFG_RXUFCA_HI_LIM;
}
if ((new_config->rxufca_hi_lim < XGE_HAL_RXUFCA_HI_LIM_MIN) ||
(new_config->rxufca_hi_lim > XGE_HAL_RXUFCA_HI_LIM_MAX)) {
return XGE_HAL_BADCFG_RXUFCA_HI_LIM;
}
if ((new_config->rxufca_lo_lim < XGE_HAL_RXUFCA_LO_LIM_MIN) ||
(new_config->rxufca_lo_lim > XGE_HAL_RXUFCA_LO_LIM_MAX) ||
(new_config->rxufca_lo_lim > new_config->rxufca_hi_lim)) {
return XGE_HAL_BADCFG_RXUFCA_LO_LIM;
}
if ((new_config->rxufca_lo_lim < XGE_HAL_RXUFCA_LO_LIM_MIN) ||
(new_config->rxufca_lo_lim > XGE_HAL_RXUFCA_LO_LIM_MAX) ||
(new_config->rxufca_lo_lim > new_config->rxufca_hi_lim)) {
return XGE_HAL_BADCFG_RXUFCA_LO_LIM;
}
if ((new_config->rxufca_lbolt_period <
XGE_HAL_RXUFCA_LBOLT_PERIOD_MIN) ||
(new_config->rxufca_lbolt_period >
XGE_HAL_RXUFCA_LBOLT_PERIOD_MAX)) {
return XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD;
}
if ((new_config->rxufca_lbolt_period <
XGE_HAL_RXUFCA_LBOLT_PERIOD_MIN) ||
(new_config->rxufca_lbolt_period >
XGE_HAL_RXUFCA_LBOLT_PERIOD_MAX)) {
return XGE_HAL_BADCFG_RXUFCA_LBOLT_PERIOD;
}
}
if ((new_config->link_valid_cnt < XGE_HAL_LINK_VALID_CNT_MIN) ||
(new_config->link_valid_cnt > XGE_HAL_LINK_VALID_CNT_MAX)) {
return XGE_HAL_BADCFG_LINK_VALID_CNT;
(new_config->link_valid_cnt > XGE_HAL_LINK_VALID_CNT_MAX)) {
return XGE_HAL_BADCFG_LINK_VALID_CNT;
}
if ((new_config->link_retry_cnt < XGE_HAL_LINK_RETRY_CNT_MIN) ||
(new_config->link_retry_cnt > XGE_HAL_LINK_RETRY_CNT_MAX)) {
return XGE_HAL_BADCFG_LINK_RETRY_CNT;
(new_config->link_retry_cnt > XGE_HAL_LINK_RETRY_CNT_MAX)) {
return XGE_HAL_BADCFG_LINK_RETRY_CNT;
}
if (new_config->link_valid_cnt > new_config->link_retry_cnt)
return XGE_HAL_BADCFG_LINK_VALID_CNT;
return XGE_HAL_BADCFG_LINK_VALID_CNT;
if (new_config->link_stability_period != XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->link_stability_period <
XGE_HAL_MIN_LINK_STABILITY_PERIOD) ||
(new_config->link_stability_period >
XGE_HAL_MAX_LINK_STABILITY_PERIOD)) {
return XGE_HAL_BADCFG_LINK_STABILITY_PERIOD;
XGE_HAL_MIN_LINK_STABILITY_PERIOD) ||
(new_config->link_stability_period >
XGE_HAL_MAX_LINK_STABILITY_PERIOD)) {
return XGE_HAL_BADCFG_LINK_STABILITY_PERIOD;
}
}
if (new_config->device_poll_millis !=
XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->device_poll_millis <
XGE_HAL_MIN_DEVICE_POLL_MILLIS) ||
(new_config->device_poll_millis >
XGE_HAL_MAX_DEVICE_POLL_MILLIS)) {
return XGE_HAL_BADCFG_DEVICE_POLL_MILLIS;
XGE_HAL_MIN_DEVICE_POLL_MILLIS) ||
(new_config->device_poll_millis >
XGE_HAL_MAX_DEVICE_POLL_MILLIS)) {
return XGE_HAL_BADCFG_DEVICE_POLL_MILLIS;
}
}
}
if ((new_config->rts_port_en < XGE_HAL_MIN_RING_RTS_PORT_EN) ||
(new_config->rts_port_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) {
return XGE_HAL_BADCFG_RTS_PORT_EN;
(new_config->rts_port_en > XGE_HAL_MAX_RING_RTS_PORT_EN)) {
return XGE_HAL_BADCFG_RTS_PORT_EN;
}
if ((new_config->rts_qos_en < XGE_HAL_RTS_QOS_DISABLE) ||
(new_config->rts_qos_en > XGE_HAL_RTS_QOS_ENABLE)) {
return XGE_HAL_BADCFG_RTS_QOS_EN;
(new_config->rts_qos_en > XGE_HAL_RTS_QOS_ENABLE)) {
return XGE_HAL_BADCFG_RTS_QOS_EN;
}
#if defined(XGE_HAL_CONFIG_LRO)
if (new_config->lro_sg_size !=
XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->lro_sg_size < XGE_HAL_LRO_MIN_SG_SIZE) ||
(new_config->lro_sg_size > XGE_HAL_LRO_MAX_SG_SIZE)) {
return XGE_HAL_BADCFG_LRO_SG_SIZE;
}
XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->lro_sg_size < XGE_HAL_LRO_MIN_SG_SIZE) ||
(new_config->lro_sg_size > XGE_HAL_LRO_MAX_SG_SIZE)) {
return XGE_HAL_BADCFG_LRO_SG_SIZE;
}
}
if (new_config->lro_frm_len !=
XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->lro_frm_len < XGE_HAL_LRO_MIN_FRM_LEN) ||
(new_config->lro_frm_len > XGE_HAL_LRO_MAX_FRM_LEN)) {
return XGE_HAL_BADCFG_LRO_FRM_LEN;
}
XGE_HAL_DEFAULT_USE_HARDCODE) {
if ((new_config->lro_frm_len < XGE_HAL_LRO_MIN_FRM_LEN) ||
(new_config->lro_frm_len > XGE_HAL_LRO_MAX_FRM_LEN)) {
return XGE_HAL_BADCFG_LRO_FRM_LEN;
}
}
#endif
if ((status = __hal_ring_config_check(&new_config->ring))
!= XGE_HAL_OK) {
return status;
!= XGE_HAL_OK) {
return status;
}
if ((status = __hal_mac_config_check(&new_config->mac)) !=
XGE_HAL_OK) {
return status;
return status;
}
if ((status = __hal_fifo_config_check(&new_config->fifo)) !=
XGE_HAL_OK) {
return status;
return status;
}
return XGE_HAL_OK;
@ -695,12 +687,12 @@ xge_hal_status_e
__hal_device_config_check_xena (xge_hal_device_config_t *new_config)
{
if ((new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_33) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_66) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_100) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_133) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_266) &&
(new_config->pci_freq_mherz != XGE_HAL_DEFAULT_USE_HARDCODE)) {
return XGE_HAL_BADCFG_PCI_FREQ_MHERZ;
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_66) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_100) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_133) &&
(new_config->pci_freq_mherz != XGE_HAL_PCI_FREQ_MHERZ_266) &&
(new_config->pci_freq_mherz != XGE_HAL_DEFAULT_USE_HARDCODE)) {
return XGE_HAL_BADCFG_PCI_FREQ_MHERZ;
}
return XGE_HAL_OK;
@ -735,25 +727,25 @@ xge_hal_status_e
__hal_driver_config_check (xge_hal_driver_config_t *new_config)
{
if ((new_config->queue_size_initial <
XGE_HAL_MIN_QUEUE_SIZE_INITIAL) ||
XGE_HAL_MIN_QUEUE_SIZE_INITIAL) ||
(new_config->queue_size_initial >
XGE_HAL_MAX_QUEUE_SIZE_INITIAL)) {
return XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL;
XGE_HAL_MAX_QUEUE_SIZE_INITIAL)) {
return XGE_HAL_BADCFG_QUEUE_SIZE_INITIAL;
}
if ((new_config->queue_size_max < XGE_HAL_MIN_QUEUE_SIZE_MAX) ||
(new_config->queue_size_max > XGE_HAL_MAX_QUEUE_SIZE_MAX)) {
return XGE_HAL_BADCFG_QUEUE_SIZE_MAX;
(new_config->queue_size_max > XGE_HAL_MAX_QUEUE_SIZE_MAX)) {
return XGE_HAL_BADCFG_QUEUE_SIZE_MAX;
}
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
if ((new_config->tracebuf_size < XGE_HAL_MIN_CIRCULAR_ARR) ||
(new_config->tracebuf_size > XGE_HAL_MAX_CIRCULAR_ARR)) {
return XGE_HAL_BADCFG_TRACEBUF_SIZE;
(new_config->tracebuf_size > XGE_HAL_MAX_CIRCULAR_ARR)) {
return XGE_HAL_BADCFG_TRACEBUF_SIZE;
}
if ((new_config->tracebuf_timestamp_en < XGE_HAL_MIN_TIMESTAMP_EN) ||
(new_config->tracebuf_timestamp_en > XGE_HAL_MAX_TIMESTAMP_EN)) {
return XGE_HAL_BADCFG_TRACEBUF_SIZE;
(new_config->tracebuf_timestamp_en > XGE_HAL_MAX_TIMESTAMP_EN)) {
return XGE_HAL_BADCFG_TRACEBUF_SIZE;
}
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-driver.c
*
* Description: HAL driver object functionality
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xgehal-driver.h>
#include <dev/nxge/include/xgehal-device.h>
@ -70,22 +62,22 @@ xge_hal_driver_tracebuf_dump(void)
int off = 0;
if (g_xge_os_tracebuf == NULL) {
return;
return;
}
xge_os_printf("################ Trace dump Begin ###############");
if (g_xge_os_tracebuf->wrapped_once) {
for (i = 0; i < g_xge_os_tracebuf->size -
g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg_start + i))
xge_os_printf(dmesg_start + i);
off = xge_os_strlen(dmesg_start + i) + 1;
}
for (i = 0; i < g_xge_os_tracebuf->size -
g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg_start + i))
xge_os_printf(dmesg_start + i);
off = xge_os_strlen(dmesg_start + i) + 1;
}
}
for (i = 0; i < g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg + i))
xge_os_printf(dmesg + i);
off = xge_os_strlen(dmesg + i) + 1;
if (*(dmesg + i))
xge_os_printf(dmesg + i);
off = xge_os_strlen(dmesg + i) + 1;
}
xge_os_printf("################ Trace dump End ###############");
}
@ -100,29 +92,29 @@ xge_hal_driver_tracebuf_read(int bufsize, char *retbuf, int *retsize)
*retbuf = 0;
if (g_xge_os_tracebuf == NULL) {
return XGE_HAL_FAIL;
return XGE_HAL_FAIL;
}
if (g_xge_os_tracebuf->wrapped_once) {
for (i = 0; i < g_xge_os_tracebuf->size -
g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg_start + i)) {
xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg_start + i);
retbuf_off += xge_os_strlen(dmesg_start + i) + 1;
if (retbuf_off > bufsize)
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
off = xge_os_strlen(dmesg_start + i) + 1;
}
for (i = 0; i < g_xge_os_tracebuf->size -
g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg_start + i)) {
xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg_start + i);
retbuf_off += xge_os_strlen(dmesg_start + i) + 1;
if (retbuf_off > bufsize)
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
off = xge_os_strlen(dmesg_start + i) + 1;
}
}
for (i = 0; i < g_xge_os_tracebuf->offset; i += off) {
if (*(dmesg + i)) {
xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg + i);
retbuf_off += xge_os_strlen(dmesg + i) + 1;
if (retbuf_off > bufsize)
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
off = xge_os_strlen(dmesg + i) + 1;
if (*(dmesg + i)) {
xge_os_sprintf(retbuf + retbuf_off, "%s\n", dmesg + i);
retbuf_off += xge_os_strlen(dmesg + i) + 1;
if (retbuf_off > bufsize)
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
off = xge_os_strlen(dmesg + i) + 1;
}
*retsize = retbuf_off;
@ -138,37 +130,37 @@ void
xge_hal_driver_bar0_offset_check(void)
{
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, adapter_status) ==
0x108);
0x108);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_traffic_int) ==
0x08E0);
0x08E0);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, dtx_control) ==
0x09E8);
0x09E8);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, tx_fifo_partition_0) ==
0x1108);
0x1108);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_enable) ==
0x1170);
0x1170);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, prc_rxd0_n[0]) ==
0x1930);
0x1930);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rti_command_mem) ==
0x19B8);
0x19B8);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_cfg) ==
0x2100);
0x2100);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rmac_addr_cmd_mem) ==
0x2128);
0x2128);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_link_util) ==
0x2170);
0x2170);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_pause_thresh_q0q3) ==
0x2918);
0x2918);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, pcc_err_reg) ==
0x1040);
0x1040);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, rxdma_int_status) ==
0x1800);
0x1800);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mac_tmac_err_reg) ==
0x2010);
0x2010);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, mc_err_reg) ==
0x2810);
0x2810);
xge_assert(xge_offsetof(xge_hal_pci_bar0_t, xgxs_int_status) ==
0x3000);
0x3000);
}
#endif
@ -188,7 +180,7 @@ xge_hal_driver_bar0_offset_check(void)
*/
xge_hal_status_e
xge_hal_driver_initialize(xge_hal_driver_config_t *config,
xge_hal_uld_cbs_t *uld_callbacks)
xge_hal_uld_cbs_t *uld_callbacks)
{
xge_hal_status_e status;
@ -203,29 +195,29 @@ xge_hal_driver_initialize(xge_hal_driver_config_t *config,
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
if (config->tracebuf_size == 0)
/*
* Trace buffer implementation is not lock protected.
* The only harm to expect is memcpy() to go beyond of
* allowed boundaries. To make it safe (driver-wise),
* we pre-allocate needed number of extra bytes.
*/
config->tracebuf_size = XGE_HAL_DEF_CIRCULAR_ARR +
XGE_OS_TRACE_MSGBUF_MAX;
/*
* Trace buffer implementation is not lock protected.
* The only harm to expect is memcpy() to go beyond of
* allowed boundaries. To make it safe (driver-wise),
* we pre-allocate needed number of extra bytes.
*/
config->tracebuf_size = XGE_HAL_DEF_CIRCULAR_ARR +
XGE_OS_TRACE_MSGBUF_MAX;
#endif
status = __hal_driver_config_check(config);
if (status != XGE_HAL_OK)
return status;
return status;
xge_os_memzero(g_xge_hal_driver, sizeof(xge_hal_driver_t));
/* apply config */
xge_os_memcpy(&g_xge_hal_driver->config, config,
sizeof(xge_hal_driver_config_t));
sizeof(xge_hal_driver_config_t));
/* apply ULD callbacks */
xge_os_memcpy(&g_xge_hal_driver->uld_callbacks, uld_callbacks,
sizeof(xge_hal_uld_cbs_t));
sizeof(xge_hal_uld_cbs_t));
g_xge_hal_driver->is_initialized = 1;
@ -233,17 +225,17 @@ xge_hal_driver_initialize(xge_hal_driver_config_t *config,
g_tracebuf.size = config->tracebuf_size;
g_tracebuf.data = (char *)xge_os_malloc(NULL, g_tracebuf.size);
if (g_tracebuf.data == NULL) {
xge_os_printf("cannot allocate trace buffer!");
return XGE_HAL_ERR_OUT_OF_MEMORY;
xge_os_printf("cannot allocate trace buffer!");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
/* timestamps disabled by default */
g_tracebuf.timestamp = config->tracebuf_timestamp_en;
if (g_tracebuf.timestamp) {
xge_os_timestamp(g_tracebuf.msg);
g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX -
xge_os_strlen(g_tracebuf.msg);
xge_os_timestamp(g_tracebuf.msg);
g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX -
xge_os_strlen(g_tracebuf.msg);
} else
g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX;
g_tracebuf.msgbuf_max = XGE_OS_TRACE_MSGBUF_MAX;
g_tracebuf.offset = 0;
*g_tracebuf.msg = 0;
xge_os_memzero(g_tracebuf.data, g_tracebuf.size);
@ -268,7 +260,7 @@ xge_hal_driver_terminate(void)
#ifdef XGE_TRACE_INTO_CIRCULAR_ARR
if (g_tracebuf.size) {
xge_os_free(NULL, g_tracebuf.data, g_tracebuf.size);
xge_os_free(NULL, g_tracebuf.data, g_tracebuf.size);
}
#endif
@ -276,25 +268,25 @@ xge_hal_driver_terminate(void)
#ifdef XGE_OS_MEMORY_CHECK
{
int i, leaks=0;
xge_os_printf("OSPAL: max g_malloc_cnt %d", g_malloc_cnt);
for (i=0; i<g_malloc_cnt; i++) {
if (g_malloc_arr[i].ptr != NULL) {
xge_os_printf("OSPAL: memory leak detected at "
"%s:%d:"XGE_OS_LLXFMT":%d",
g_malloc_arr[i].file,
g_malloc_arr[i].line,
(unsigned long long)(ulong_t)
g_malloc_arr[i].ptr,
g_malloc_arr[i].size);
leaks++;
}
}
if (leaks) {
xge_os_printf("OSPAL: %d memory leaks detected", leaks);
} else {
xge_os_printf("OSPAL: no memory leaks detected");
}
int i, leaks=0;
xge_os_printf("OSPAL: max g_malloc_cnt %d", g_malloc_cnt);
for (i=0; i<g_malloc_cnt; i++) {
if (g_malloc_arr[i].ptr != NULL) {
xge_os_printf("OSPAL: memory leak detected at "
"%s:%d:"XGE_OS_LLXFMT":%d",
g_malloc_arr[i].file,
g_malloc_arr[i].line,
(unsigned long long)(ulong_t)
g_malloc_arr[i].ptr,
g_malloc_arr[i].size);
leaks++;
}
}
if (leaks) {
xge_os_printf("OSPAL: %d memory leaks detected", leaks);
} else {
xge_os_printf("OSPAL: no memory leaks detected");
}
}
#endif
}

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-fifo-fp.c
*
* Description: Tx fifo object functionality (fast path)
*
* Created: 10 June 2004
*/
#ifdef XGE_DEBUG_FP
#include <dev/nxge/include/xgehal-fifo.h>
#endif
@ -46,7 +38,7 @@ __hal_fifo_txdl_priv(xge_hal_dtr_h dtrh)
xge_assert(txdp);
txdl_priv = (xge_hal_fifo_txdl_priv_t *)
(ulong_t)txdp->host_control;
(ulong_t)txdp->host_control;
xge_assert(txdl_priv);
xge_assert(txdl_priv->dma_object);
@ -59,19 +51,19 @@ __hal_fifo_txdl_priv(xge_hal_dtr_h dtrh)
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
u64 ctrl_1)
u64 ctrl_1)
{
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_hal_fifo_hw_pair_t *hw_pair = fifo->hw_pair;
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
xge_hal_fifo_txdl_priv_t *txdl_priv;
u64 ctrl;
u64 ctrl;
txdp->control_1 |= XGE_HAL_TXD_LIST_OWN_XENA;
#ifdef XGE_DEBUG_ASSERT
/* make sure Xena overwrites the (illegal) t_code value on completion */
XGE_HAL_SET_TXD_T_CODE(txdp->control_1, XGE_HAL_TXD_T_CODE_UNUSED_5);
/* make sure Xena overwrites the (illegal) t_code value on completion */
XGE_HAL_SET_TXD_T_CODE(txdp->control_1, XGE_HAL_TXD_T_CODE_UNUSED_5);
#endif
txdl_priv = __hal_fifo_txdl_priv(dtrh);
@ -80,14 +72,14 @@ __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
/* sync the TxDL to device */
xge_os_dma_sync(fifo->channel.pdev,
txdl_priv->dma_handle,
txdl_priv->dma_addr,
txdl_priv->dma_offset,
txdl_priv->frags << 5 /* sizeof(xge_hal_fifo_txd_t) */,
XGE_OS_DMA_DIR_TODEVICE);
txdl_priv->dma_addr,
txdl_priv->dma_offset,
txdl_priv->frags << 5 /* sizeof(xge_hal_fifo_txd_t) */,
XGE_OS_DMA_DIR_TODEVICE);
#endif
/* write the pointer first */
xge_os_pio_mem_write64(fifo->channel.pdev,
fifo->channel.regh1,
fifo->channel.regh1,
txdl_priv->dma_addr,
&hw_pair->txdl_pointer);
@ -97,7 +89,7 @@ __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
ctrl |= fifo->no_snoop_bits;
if (txdp->control_1 & XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO)) {
ctrl |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
ctrl |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
}
/*
@ -118,89 +110,89 @@ __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
__hal_channel_dtr_post(channelh, dtrh);
xge_os_pio_mem_write64(fifo->channel.pdev, fifo->channel.regh1,
ctrl, &hw_pair->list_control);
ctrl, &hw_pair->list_control);
xge_debug_fifo(XGE_TRACE, "posted txdl 0x"XGE_OS_LLXFMT" ctrl 0x"XGE_OS_LLXFMT" "
"into 0x"XGE_OS_LLXFMT"", (unsigned long long)txdl_priv->dma_addr,
(unsigned long long)ctrl,
(unsigned long long)(ulong_t)&hw_pair->txdl_pointer);
"into 0x"XGE_OS_LLXFMT"", (unsigned long long)txdl_priv->dma_addr,
(unsigned long long)ctrl,
(unsigned long long)(ulong_t)&hw_pair->txdl_pointer);
#ifdef XGE_HAL_FIFO_DUMP_TXD
xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
XGE_OS_LLXFMT" dma "XGE_OS_LLXFMT,
txdp->control_1, txdp->control_2, txdp->buffer_pointer,
txdp->host_control, txdl_priv->dma_addr);
XGE_OS_LLXFMT" dma "XGE_OS_LLXFMT,
txdp->control_1, txdp->control_2, txdp->buffer_pointer,
txdp->host_control, txdl_priv->dma_addr);
#endif
fifo->channel.stats.total_posts++;
fifo->channel.usage_cnt++;
if (fifo->channel.stats.usage_max < fifo->channel.usage_cnt)
fifo->channel.stats.usage_max = fifo->channel.usage_cnt;
fifo->channel.stats.usage_max = fifo->channel.usage_cnt;
}
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_txdl_free_many(xge_hal_channel_h channelh,
xge_hal_fifo_txd_t *txdp, int list_size, int frags)
xge_hal_fifo_txd_t *txdp, int list_size, int frags)
{
xge_hal_fifo_txdl_priv_t *current_txdl_priv;
xge_hal_fifo_txdl_priv_t *next_txdl_priv;
int invalid_frags = frags % list_size;
if (invalid_frags){
xge_debug_fifo(XGE_ERR,
"freeing corrupt dtrh %p, fragments %d list size %d",
txdp, frags, list_size);
xge_assert(invalid_frags == 0);
xge_debug_fifo(XGE_ERR,
"freeing corrupt dtrh %p, fragments %d list size %d",
txdp, frags, list_size);
xge_assert(invalid_frags == 0);
}
while(txdp){
xge_debug_fifo(XGE_TRACE,
"freeing linked dtrh %p, fragments %d list size %d",
txdp, frags, list_size);
current_txdl_priv = __hal_fifo_txdl_priv(txdp);
xge_debug_fifo(XGE_TRACE,
"freeing linked dtrh %p, fragments %d list size %d",
txdp, frags, list_size);
current_txdl_priv = __hal_fifo_txdl_priv(txdp);
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
current_txdl_priv->allocated = 0;
current_txdl_priv->allocated = 0;
#endif
__hal_channel_dtr_free(channelh, txdp);
next_txdl_priv = current_txdl_priv->next_txdl_priv;
xge_assert(frags);
frags -= list_size;
if (next_txdl_priv) {
current_txdl_priv->next_txdl_priv = NULL;
txdp = next_txdl_priv->first_txdp;
}
else {
xge_debug_fifo(XGE_TRACE,
"freed linked dtrh fragments %d list size %d",
frags, list_size);
break;
}
__hal_channel_dtr_free(channelh, txdp);
next_txdl_priv = current_txdl_priv->next_txdl_priv;
xge_assert(frags);
frags -= list_size;
if (next_txdl_priv) {
current_txdl_priv->next_txdl_priv = NULL;
txdp = next_txdl_priv->first_txdp;
}
else {
xge_debug_fifo(XGE_TRACE,
"freed linked dtrh fragments %d list size %d",
frags, list_size);
break;
}
}
xge_assert(frags == 0)
}
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_txdl_restore_many(xge_hal_channel_h channelh,
xge_hal_fifo_txd_t *txdp, int txdl_count)
xge_hal_fifo_txd_t *txdp, int txdl_count)
{
xge_hal_fifo_txdl_priv_t *current_txdl_priv;
xge_hal_fifo_txdl_priv_t *next_txdl_priv;
int i = txdl_count;
xge_assert(((xge_hal_channel_t *)channelh)->reserve_length +
txdl_count <= ((xge_hal_channel_t *)channelh)->reserve_initial);
txdl_count <= ((xge_hal_channel_t *)channelh)->reserve_initial);
current_txdl_priv = __hal_fifo_txdl_priv(txdp);
do{
xge_assert(i);
xge_assert(i);
#if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
current_txdl_priv->allocated = 0;
current_txdl_priv->allocated = 0;
#endif
next_txdl_priv = current_txdl_priv->next_txdl_priv;
txdp = current_txdl_priv->first_txdp;
current_txdl_priv->next_txdl_priv = NULL;
__hal_channel_dtr_restore(channelh, (xge_hal_dtr_h )txdp, --i);
xge_debug_fifo(XGE_TRACE,
"dtrh %p restored at offset %d", txdp, i);
current_txdl_priv = next_txdl_priv;
next_txdl_priv = current_txdl_priv->next_txdl_priv;
txdp = current_txdl_priv->first_txdp;
current_txdl_priv->next_txdl_priv = NULL;
__hal_channel_dtr_restore(channelh, (xge_hal_dtr_h )txdp, --i);
xge_debug_fifo(XGE_TRACE,
"dtrh %p restored at offset %d", txdp, i);
current_txdl_priv = next_txdl_priv;
} while(current_txdl_priv);
__hal_channel_dtr_restore(channelh, NULL, txdl_count);
}
@ -222,7 +214,7 @@ xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh)
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
return ((char *)(ulong_t)txdp->host_control) +
sizeof(xge_hal_fifo_txdl_priv_t);
sizeof(xge_hal_fifo_txdl_priv_t);
}
/**
@ -247,7 +239,7 @@ xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh)
}
/**
* xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more
* than single txdl.
* than single txdl.
* @channelh: Channel handle.
* @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
* with a valid handle.
@ -267,7 +259,7 @@ xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh)
*/
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
xge_hal_dtr_h *dtrh, const int frags)
xge_hal_dtr_h *dtrh, const int frags)
{
xge_hal_status_e status = XGE_HAL_OK;
int alloc_frags = 0, dang_frags = 0;
@ -281,7 +273,7 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
unsigned long flags=0;
#endif
xge_debug_fifo(XGE_TRACE, "dtr_reserve_many called for frags %d",
frags);
frags);
xge_assert(frags < (fifo->txdl_per_memblock * max_frags));
#if defined(XGE_HAL_TX_MULTI_RESERVE)
xge_os_spin_lock(&fifo->channel.reserve_lock);
@ -289,68 +281,68 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
xge_os_spin_lock_irq(&fifo->channel.reserve_lock, flags);
#endif
while(alloc_frags < frags) {
status = __hal_channel_dtr_alloc(channelh,
(xge_hal_dtr_h *)(void*)&next_txdp);
if (status != XGE_HAL_OK){
xge_debug_fifo(XGE_ERR,
"failed to allocate linked fragments rc %d",
status);
xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS);
if (*dtrh) {
xge_assert(alloc_frags/max_frags);
__hal_fifo_txdl_restore_many(channelh,
(xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags);
}
if (dang_dtrh) {
xge_assert(dang_frags/max_frags);
__hal_fifo_txdl_restore_many(channelh,
(xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags);
}
break;
}
xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p"
" for frags %d", next_txdp, frags);
next_txdl_priv = __hal_fifo_txdl_priv(next_txdp);
xge_assert(next_txdl_priv);
xge_assert(next_txdl_priv->first_txdp == next_txdp);
next_txdl_priv->dang_txdl = NULL;
next_txdl_priv->dang_frags = 0;
next_txdl_priv->next_txdl_priv = NULL;
status = __hal_channel_dtr_alloc(channelh,
(xge_hal_dtr_h *)(void*)&next_txdp);
if (status != XGE_HAL_OK){
xge_debug_fifo(XGE_ERR,
"failed to allocate linked fragments rc %d",
status);
xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS);
if (*dtrh) {
xge_assert(alloc_frags/max_frags);
__hal_fifo_txdl_restore_many(channelh,
(xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags);
}
if (dang_dtrh) {
xge_assert(dang_frags/max_frags);
__hal_fifo_txdl_restore_many(channelh,
(xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags);
}
break;
}
xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p"
" for frags %d", next_txdp, frags);
next_txdl_priv = __hal_fifo_txdl_priv(next_txdp);
xge_assert(next_txdl_priv);
xge_assert(next_txdl_priv->first_txdp == next_txdp);
next_txdl_priv->dang_txdl = NULL;
next_txdl_priv->dang_frags = 0;
next_txdl_priv->next_txdl_priv = NULL;
#if defined(XGE_OS_MEMORY_CHECK)
next_txdl_priv->allocated = 1;
next_txdl_priv->allocated = 1;
#endif
if (!curr_txdp || !curr_txdl_priv) {
curr_txdp = next_txdp;
curr_txdl_priv = next_txdl_priv;
*dtrh = (xge_hal_dtr_h)next_txdp;
alloc_frags = max_frags;
continue;
}
if (curr_txdl_priv->memblock ==
next_txdl_priv->memblock) {
xge_debug_fifo(XGE_TRACE,
"linking dtrh %p, with %p",
*dtrh, next_txdp);
xge_assert (next_txdp ==
curr_txdp + max_frags);
alloc_frags += max_frags;
curr_txdl_priv->next_txdl_priv = next_txdl_priv;
}
else {
xge_assert(*dtrh);
xge_assert(dang_dtrh == NULL);
dang_dtrh = *dtrh;
dang_frags = alloc_frags;
xge_debug_fifo(XGE_TRACE,
"dangling dtrh %p, linked with dtrh %p",
*dtrh, next_txdp);
next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh;
next_txdl_priv->dang_frags = alloc_frags;
alloc_frags = max_frags;
*dtrh = next_txdp;
}
curr_txdp = next_txdp;
curr_txdl_priv = next_txdl_priv;
if (!curr_txdp || !curr_txdl_priv) {
curr_txdp = next_txdp;
curr_txdl_priv = next_txdl_priv;
*dtrh = (xge_hal_dtr_h)next_txdp;
alloc_frags = max_frags;
continue;
}
if (curr_txdl_priv->memblock ==
next_txdl_priv->memblock) {
xge_debug_fifo(XGE_TRACE,
"linking dtrh %p, with %p",
*dtrh, next_txdp);
xge_assert (next_txdp ==
curr_txdp + max_frags);
alloc_frags += max_frags;
curr_txdl_priv->next_txdl_priv = next_txdl_priv;
}
else {
xge_assert(*dtrh);
xge_assert(dang_dtrh == NULL);
dang_dtrh = *dtrh;
dang_frags = alloc_frags;
xge_debug_fifo(XGE_TRACE,
"dangling dtrh %p, linked with dtrh %p",
*dtrh, next_txdp);
next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh;
next_txdl_priv->dang_frags = alloc_frags;
alloc_frags = max_frags;
*dtrh = next_txdp;
}
curr_txdp = next_txdp;
curr_txdl_priv = next_txdl_priv;
}
#if defined(XGE_HAL_TX_MULTI_RESERVE)
@ -360,30 +352,30 @@ xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
#endif
if (status == XGE_HAL_OK) {
xge_hal_fifo_txdl_priv_t * txdl_priv;
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats;
txdl_priv = __hal_fifo_txdl_priv(txdp);
/* reset the TxDL's private */
txdl_priv->align_dma_offset = 0;
txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
txdl_priv->align_used_frags = 0;
txdl_priv->frags = 0;
txdl_priv->bytes_sent = 0;
txdl_priv->alloc_frags = alloc_frags;
/* reset TxD0 */
txdp->control_1 = txdp->control_2 = 0;
xge_hal_fifo_txdl_priv_t * txdl_priv;
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats;
txdl_priv = __hal_fifo_txdl_priv(txdp);
/* reset the TxDL's private */
txdl_priv->align_dma_offset = 0;
txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
txdl_priv->align_used_frags = 0;
txdl_priv->frags = 0;
txdl_priv->bytes_sent = 0;
txdl_priv->alloc_frags = alloc_frags;
/* reset TxD0 */
txdp->control_1 = txdp->control_2 = 0;
#if defined(XGE_OS_MEMORY_CHECK)
txdl_priv->allocated = 1;
txdl_priv->allocated = 1;
#endif
/* update statistics */
statsp->total_posts_dtrs_many++;
statsp->total_posts_frags_many += txdl_priv->alloc_frags;
if (txdl_priv->dang_frags){
statsp->total_posts_dang_dtrs++;
statsp->total_posts_dang_frags += txdl_priv->dang_frags;
}
/* update statistics */
statsp->total_posts_dtrs_many++;
statsp->total_posts_frags_many += txdl_priv->alloc_frags;
if (txdl_priv->dang_frags){
statsp->total_posts_dang_dtrs++;
statsp->total_posts_dang_frags += txdl_priv->dang_frags;
}
}
return status;
@ -436,28 +428,28 @@ xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
#endif
if (status == XGE_HAL_OK) {
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
xge_hal_fifo_txdl_priv_t *txdl_priv;
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
xge_hal_fifo_txdl_priv_t *txdl_priv;
txdl_priv = __hal_fifo_txdl_priv(txdp);
txdl_priv = __hal_fifo_txdl_priv(txdp);
/* reset the TxDL's private */
txdl_priv->align_dma_offset = 0;
txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
txdl_priv->align_used_frags = 0;
txdl_priv->frags = 0;
txdl_priv->alloc_frags =
((xge_hal_fifo_t *)channelh)->config->max_frags;
txdl_priv->dang_txdl = NULL;
txdl_priv->dang_frags = 0;
txdl_priv->next_txdl_priv = NULL;
txdl_priv->bytes_sent = 0;
/* reset the TxDL's private */
txdl_priv->align_dma_offset = 0;
txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
txdl_priv->align_used_frags = 0;
txdl_priv->frags = 0;
txdl_priv->alloc_frags =
((xge_hal_fifo_t *)channelh)->config->max_frags;
txdl_priv->dang_txdl = NULL;
txdl_priv->dang_frags = 0;
txdl_priv->next_txdl_priv = NULL;
txdl_priv->bytes_sent = 0;
/* reset TxD0 */
txdp->control_1 = txdp->control_2 = 0;
/* reset TxD0 */
txdp->control_1 = txdp->control_2 = 0;
#if defined(XGE_OS_MEMORY_CHECK)
txdl_priv->allocated = 1;
txdl_priv->allocated = 1;
#endif
}
@ -488,7 +480,7 @@ xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
*/
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channelh, int dtr_sp_size,
xge_hal_dtr_h dtr_sp)
xge_hal_dtr_h dtr_sp)
{
/* FIXME: implement */
return XGE_HAL_OK;
@ -536,7 +528,7 @@ xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
#endif
__hal_fifo_dtr_post_single(channelh, dtrh,
(u64)(XGE_HAL_TX_FIFO_FIRST_LIST | XGE_HAL_TX_FIFO_LAST_LIST));
(u64)(XGE_HAL_TX_FIFO_FIRST_LIST | XGE_HAL_TX_FIFO_LAST_LIST));
#if defined(XGE_HAL_TX_MULTI_POST)
xge_os_spin_unlock(fifo->post_lock_ptr);
@ -563,7 +555,7 @@ xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
*/
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
xge_hal_dtr_h dtrs[])
xge_hal_dtr_h dtrs[])
{
int i;
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
@ -582,7 +574,7 @@ xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
txdl_priv_last = __hal_fifo_txdl_priv(dtrs[num-1]);
txdp_last = (xge_hal_fifo_txd_t *)dtrs[num-1] +
(txdl_priv_last->frags - 1);
(txdl_priv_last->frags - 1);
txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST;
#if defined(XGE_HAL_TX_MULTI_POST)
@ -593,22 +585,22 @@ xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
#endif
for (i=0; i<num; i++) {
xge_hal_fifo_txdl_priv_t *txdl_priv;
u64 val64;
xge_hal_dtr_h dtrh = dtrs[i];
xge_hal_fifo_txdl_priv_t *txdl_priv;
u64 val64;
xge_hal_dtr_h dtrh = dtrs[i];
txdl_priv = __hal_fifo_txdl_priv(dtrh);
txdl_priv = txdl_priv; /* Cheat lint */
txdl_priv = __hal_fifo_txdl_priv(dtrh);
txdl_priv = txdl_priv; /* Cheat lint */
val64 = 0;
if (i == 0) {
val64 |= XGE_HAL_TX_FIFO_FIRST_LIST;
} else if (i == num -1) {
val64 |= XGE_HAL_TX_FIFO_LAST_LIST;
}
val64 = 0;
if (i == 0) {
val64 |= XGE_HAL_TX_FIFO_FIRST_LIST;
} else if (i == num -1) {
val64 |= XGE_HAL_TX_FIFO_LAST_LIST;
}
val64 |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
__hal_fifo_dtr_post_single(channelh, dtrh, val64);
val64 |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
__hal_fifo_dtr_post_single(channelh, dtrh, val64);
}
#if defined(XGE_HAL_TX_MULTI_POST)
@ -658,7 +650,7 @@ xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
*/
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,
xge_hal_dtr_h *dtrh, u8 *t_code)
xge_hal_dtr_h *dtrh, u8 *t_code)
{
xge_hal_fifo_txd_t *txdp;
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
@ -669,7 +661,7 @@ xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,
__hal_channel_dtr_try_complete(channelh, dtrh);
txdp = (xge_hal_fifo_txd_t *)*dtrh;
if (txdp == NULL) {
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
@ -680,28 +672,28 @@ xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,
* Note: 16bytes means Control_1 & Control_2 */
xge_os_dma_sync(fifo->channel.pdev,
txdl_priv->dma_handle,
txdl_priv->dma_addr,
txdl_priv->dma_offset,
16,
XGE_OS_DMA_DIR_FROMDEVICE);
txdl_priv->dma_addr,
txdl_priv->dma_offset,
16,
XGE_OS_DMA_DIR_FROMDEVICE);
#endif
/* check whether host owns it */
if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
xge_assert(txdp->host_control!=0);
xge_assert(txdp->host_control!=0);
__hal_channel_dtr_complete(channelh);
__hal_channel_dtr_complete(channelh);
*t_code = (u8)XGE_HAL_GET_TXD_T_CODE(txdp->control_1);
*t_code = (u8)XGE_HAL_GET_TXD_T_CODE(txdp->control_1);
/* see XGE_HAL_SET_TXD_T_CODE() above.. */
xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5);
/* see XGE_HAL_SET_TXD_T_CODE() above.. */
xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5);
if (fifo->channel.usage_cnt > 0)
fifo->channel.usage_cnt--;
if (fifo->channel.usage_cnt > 0)
fifo->channel.usage_cnt--;
return XGE_HAL_OK;
return XGE_HAL_OK;
}
/* no more completions */
@ -742,7 +734,7 @@ xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
unsigned long flags = 0;
#endif
xge_hal_fifo_txdl_priv_t *txdl_priv = __hal_fifo_txdl_priv(
(xge_hal_fifo_txd_t *)dtr);
(xge_hal_fifo_txd_t *)dtr);
int max_frags = ((xge_hal_fifo_t *)channelh)->config->max_frags;
#if defined(XGE_HAL_TX_MULTI_FREE)
xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
@ -752,35 +744,35 @@ xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
#endif
if (txdl_priv->alloc_frags > max_frags) {
xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *)
txdl_priv->dang_txdl;
int dang_frags = txdl_priv->dang_frags;
int alloc_frags = txdl_priv->alloc_frags;
txdl_priv->dang_txdl = NULL;
txdl_priv->dang_frags = 0;
txdl_priv->alloc_frags = 0;
/* dtrh must have a linked list of dtrh */
xge_assert(txdl_priv->next_txdl_priv);
xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *)
txdl_priv->dang_txdl;
int dang_frags = txdl_priv->dang_frags;
int alloc_frags = txdl_priv->alloc_frags;
txdl_priv->dang_txdl = NULL;
txdl_priv->dang_frags = 0;
txdl_priv->alloc_frags = 0;
/* dtrh must have a linked list of dtrh */
xge_assert(txdl_priv->next_txdl_priv);
/* free any dangling dtrh first */
if (dang_txdp) {
xge_debug_fifo(XGE_TRACE,
"freeing dangled dtrh %p for %d fragments",
dang_txdp, dang_frags);
__hal_fifo_txdl_free_many(channelh, dang_txdp,
max_frags, dang_frags);
}
/* free any dangling dtrh first */
if (dang_txdp) {
xge_debug_fifo(XGE_TRACE,
"freeing dangled dtrh %p for %d fragments",
dang_txdp, dang_frags);
__hal_fifo_txdl_free_many(channelh, dang_txdp,
max_frags, dang_frags);
}
/* now free the reserved dtrh list */
xge_debug_fifo(XGE_TRACE,
"freeing dtrh %p list of %d fragments", dtr,
alloc_frags);
__hal_fifo_txdl_free_many(channelh,
(xge_hal_fifo_txd_t *)dtr, max_frags,
alloc_frags);
/* now free the reserved dtrh list */
xge_debug_fifo(XGE_TRACE,
"freeing dtrh %p list of %d fragments", dtr,
alloc_frags);
__hal_fifo_txdl_free_many(channelh,
(xge_hal_fifo_txd_t *)dtr, max_frags,
alloc_frags);
}
else
__hal_channel_dtr_free(channelh, dtr);
__hal_channel_dtr_free(channelh, dtr);
((xge_hal_channel_t *)channelh)->poll_bytes += txdl_priv->bytes_sent;
@ -844,8 +836,8 @@ xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
*/
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
dma_addr_t dma_pointer, int size, int misaligned_size)
xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
dma_addr_t dma_pointer, int size, int misaligned_size)
{
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_hal_fifo_txdl_priv_t *txdl_priv;
@ -857,7 +849,7 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
if (frag_idx != 0) {
txdp->control_1 = txdp->control_2 = 0;
txdp->control_1 = txdp->control_2 = 0;
}
/* On some systems buffer size could be zero.
@ -866,17 +858,17 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
xge_assert(size > 0);
xge_assert(frag_idx < txdl_priv->alloc_frags);
xge_assert(misaligned_size != 0 &&
misaligned_size <= fifo->config->alignment_size);
misaligned_size <= fifo->config->alignment_size);
remaining_size = size - misaligned_size;
xge_assert(remaining_size >= 0);
xge_os_memcpy((char*)txdl_priv->align_vaddr_start,
vaddr, misaligned_size);
vaddr, misaligned_size);
if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
}
}
/* setup new buffer */
prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
@ -887,29 +879,29 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
txdl_priv->frags++;
txdl_priv->align_used_frags++;
txdl_priv->align_vaddr_start += fifo->config->alignment_size;
txdl_priv->align_dma_offset = 0;
txdl_priv->align_dma_offset = 0;
#if defined(XGE_OS_DMA_REQUIRES_SYNC)
/* sync new buffer */
xge_os_dma_sync(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdp->buffer_pointer,
0,
misaligned_size,
XGE_OS_DMA_DIR_TODEVICE);
txdl_priv->align_dma_handle,
txdp->buffer_pointer,
0,
misaligned_size,
XGE_OS_DMA_DIR_TODEVICE);
#endif
if (remaining_size) {
xge_assert(frag_idx < txdl_priv->alloc_frags);
txdp++;
txdp->buffer_pointer = (u64)dma_pointer +
misaligned_size;
txdp->control_1 =
XGE_HAL_TXD_BUFFER0_SIZE(remaining_size);
txdl_priv->bytes_sent += remaining_size;
txdp->control_2 = 0;
fifo->channel.stats.total_buffers++;
txdl_priv->frags++;
xge_assert(frag_idx < txdl_priv->alloc_frags);
txdp++;
txdp->buffer_pointer = (u64)dma_pointer +
misaligned_size;
txdp->control_1 =
XGE_HAL_TXD_BUFFER0_SIZE(remaining_size);
txdl_priv->bytes_sent += remaining_size;
txdp->control_2 = 0;
fifo->channel.stats.total_buffers++;
txdl_priv->frags++;
}
return XGE_HAL_OK;
@ -936,7 +928,7 @@ xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
*/
__HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
void *vaddr, int size)
void *vaddr, int size)
{
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_hal_fifo_txdl_priv_t *txdl_priv;
@ -952,7 +944,7 @@ xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
xge_os_memcpy((char*)txdl_priv->align_vaddr_start +
txdl_priv->align_dma_offset, vaddr, size);
txdl_priv->align_dma_offset, vaddr, size);
fifo->channel.stats.copied_frags++;
@ -977,7 +969,7 @@ xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
*/
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int frag_idx)
int frag_idx)
{
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_hal_fifo_txdl_priv_t *txdl_priv;
@ -990,13 +982,13 @@ xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
if (frag_idx != 0) {
txdp->control_1 = txdp->control_2 = 0;
txdp->control_1 = txdp->control_2 = 0;
}
prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
txdp->control_1 |=
XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset);
XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset);
txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset;
fifo->channel.stats.total_buffers++;
fifo->channel.stats.copied_buffers++;
@ -1006,16 +998,16 @@ xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
#if defined(XGE_OS_DMA_REQUIRES_SYNC)
/* sync pre-mapped buffer */
xge_os_dma_sync(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdp->buffer_pointer,
0,
txdl_priv->align_dma_offset,
XGE_OS_DMA_DIR_TODEVICE);
txdl_priv->align_dma_handle,
txdp->buffer_pointer,
0,
txdl_priv->align_dma_offset,
XGE_OS_DMA_DIR_TODEVICE);
#endif
/* increment vaddr_start for the next buffer_append() iteration */
txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset;
txdl_priv->align_dma_offset = 0;
txdl_priv->align_dma_offset = 0;
}
/**
@ -1048,7 +1040,7 @@ xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
*/
__HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
int frag_idx, dma_addr_t dma_pointer, int size)
int frag_idx, dma_addr_t dma_pointer, int size)
{
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
xge_hal_fifo_txdl_priv_t *txdl_priv;
@ -1058,7 +1050,7 @@ xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
if (frag_idx != 0) {
txdp->control_1 = txdp->control_2 = 0;
txdp->control_1 = txdp->control_2 = 0;
}
/* Note:
@ -1161,13 +1153,13 @@ xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh)
__hal_channel_dtr_try_complete(channelh, &dtrh);
txdp = (xge_hal_fifo_txd_t *)dtrh;
if (txdp == NULL) {
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
}
/* check whether host owns it */
if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
xge_assert(txdp->host_control!=0);
return XGE_HAL_OK;
xge_assert(txdp->host_control!=0);
return XGE_HAL_OK;
}
/* no more completions */

View File

@ -26,26 +26,18 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-fifo.c
*
* Description: fifo object implementation
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xgehal-fifo.h>
#include <dev/nxge/include/xgehal-device.h>
static xge_hal_status_e
__hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
void *memblock,
int memblock_index,
xge_hal_mempool_dma_t *dma_object,
void *item,
int index,
int is_last,
void *userdata)
void *memblock,
int memblock_index,
xge_hal_mempool_dma_t *dma_object,
void *item,
int index,
int is_last,
void *userdata)
{
int memblock_item_idx;
xge_hal_fifo_txdl_priv_t *txdl_priv;
@ -54,10 +46,10 @@ __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
xge_assert(item);
txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
memblock_index,
item,
&memblock_item_idx);
__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
memblock_index,
item,
&memblock_item_idx);
xge_assert(txdl_priv);
/* pre-format HAL's TxDL's private */
@ -85,22 +77,22 @@ __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
xge_hal_status_e status;
if (fifo->config->alignment_size) {
status =__hal_fifo_dtr_align_alloc_map(fifo, txdp);
if (status != XGE_HAL_OK) {
xge_debug_mm(XGE_ERR,
"align buffer[%d] %d bytes, status %d",
index,
fifo->align_size,
status);
return status;
}
if (status != XGE_HAL_OK) {
xge_debug_mm(XGE_ERR,
"align buffer[%d] %d bytes, status %d",
index,
fifo->align_size,
status);
return status;
}
}
}
#endif
#endif
if (fifo->channel.dtr_init) {
fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index,
fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL);
fifo->channel.dtr_init(fifo, (xge_hal_dtr_h)txdp, index,
fifo->channel.userdata, XGE_HAL_CHANNEL_OC_NORMAL);
}
return XGE_HAL_OK;
@ -109,13 +101,13 @@ __hal_fifo_mempool_item_alloc(xge_hal_mempool_h mempoolh,
static xge_hal_status_e
__hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh,
void *memblock,
int memblock_index,
xge_hal_mempool_dma_t *dma_object,
void *item,
int index,
int is_last,
void *userdata)
void *memblock,
int memblock_index,
xge_hal_mempool_dma_t *dma_object,
void *item,
int index,
int is_last,
void *userdata)
{
int memblock_item_idx;
xge_hal_fifo_txdl_priv_t *txdl_priv;
@ -126,33 +118,33 @@ __hal_fifo_mempool_item_free(xge_hal_mempool_h mempoolh,
xge_assert(item);
txdl_priv = (xge_hal_fifo_txdl_priv_t *) \
__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
memblock_index,
item,
&memblock_item_idx);
__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
memblock_index,
item,
&memblock_item_idx);
xge_assert(txdl_priv);
#ifdef XGE_HAL_ALIGN_XMIT
if (fifo->config->alignment_size) {
if (txdl_priv->align_dma_addr != 0) {
xge_os_dma_unmap(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdl_priv->align_dma_addr,
fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE);
if (txdl_priv->align_dma_addr != 0) {
xge_os_dma_unmap(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdl_priv->align_dma_addr,
fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE);
txdl_priv->align_dma_addr = 0;
}
txdl_priv->align_dma_addr = 0;
}
if (txdl_priv->align_vaddr != NULL) {
xge_os_dma_free(fifo->channel.pdev,
txdl_priv->align_vaddr,
fifo->align_size,
&txdl_priv->align_dma_acch,
&txdl_priv->align_dma_handle);
if (txdl_priv->align_vaddr != NULL) {
xge_os_dma_free(fifo->channel.pdev,
txdl_priv->align_vaddr,
fifo->align_size,
&txdl_priv->align_dma_acch,
&txdl_priv->align_dma_handle);
txdl_priv->align_vaddr = NULL;
}
txdl_priv->align_vaddr = NULL;
}
}
#endif
@ -180,46 +172,46 @@ __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
#endif
#if defined(XGE_HAL_TX_MULTI_POST)
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
fifo->post_lock_ptr = &hldev->xena_post_lock;
fifo->post_lock_ptr = &hldev->xena_post_lock;
} else {
xge_os_spin_lock_init(&fifo->channel.post_lock, hldev->pdev);
fifo->post_lock_ptr = &fifo->channel.post_lock;
fifo->post_lock_ptr = &fifo->channel.post_lock;
}
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
fifo->post_lock_ptr = &hldev->xena_post_lock;
fifo->post_lock_ptr = &hldev->xena_post_lock;
} else {
xge_os_spin_lock_init_irq(&fifo->channel.post_lock,
hldev->irqh);
fifo->post_lock_ptr = &fifo->channel.post_lock;
hldev->irqh);
fifo->post_lock_ptr = &fifo->channel.post_lock;
}
#endif
fifo->align_size =
fifo->config->alignment_size * fifo->config->max_aligned_frags;
fifo->config->alignment_size * fifo->config->max_aligned_frags;
/* Initializing the BAR1 address as the start of
* the FIFO queue pointer and as a location of FIFO control
* word. */
fifo->hw_pair =
(xge_hal_fifo_hw_pair_t *) (void *)(hldev->bar1 +
(attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));
(attr->post_qid * XGE_HAL_FIFO_HW_PAIR_OFFSET));
/* apply "interrupts per txdl" attribute */
fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_UTILZ;
if (queue->intr) {
fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST;
fifo->interrupt_type = XGE_HAL_TXD_INT_TYPE_PER_LIST;
}
fifo->no_snoop_bits =
(int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits));
(int)(XGE_HAL_TX_FIFO_NO_SNOOP(queue->no_snoop_bits));
/*
* FIFO memory management strategy:
*
* TxDL splitted into three independent parts:
* - set of TxD's
* - TxD HAL private part
* - upper layer private part
* - set of TxD's
* - TxD HAL private part
* - upper layer private part
*
* Adaptative memory allocation used. i.e. Memory allocated on
* demand with the size which will fit into one memory block.
@ -239,18 +231,18 @@ __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
fifo->priv_size = sizeof(xge_hal_fifo_txdl_priv_t) +
attr->per_dtr_space;
fifo->priv_size = ((fifo->priv_size + __xge_os_cacheline_size -1) /
__xge_os_cacheline_size) *
__xge_os_cacheline_size;
__xge_os_cacheline_size) *
__xge_os_cacheline_size;
/* recompute txdl size to be cacheline aligned */
fifo->txdl_size = fifo->config->max_frags * sizeof(xge_hal_fifo_txd_t);
txdl_size = ((fifo->txdl_size + __xge_os_cacheline_size - 1) /
__xge_os_cacheline_size) * __xge_os_cacheline_size;
__xge_os_cacheline_size) * __xge_os_cacheline_size;
if (fifo->txdl_size != txdl_size)
xge_debug_fifo(XGE_ERR, "cacheline > 128 ( ?? ): %d, %d, %d, %d",
fifo->config->max_frags, fifo->txdl_size, txdl_size,
__xge_os_cacheline_size);
fifo->config->max_frags, fifo->txdl_size, txdl_size,
__xge_os_cacheline_size);
fifo->txdl_size = txdl_size;
@ -260,62 +252,62 @@ __hal_fifo_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
fifo->channel.dtr_init = attr->dtr_init;
fifo->channel.userdata = attr->userdata;
fifo->txdl_per_memblock = fifo->config->memblock_size /
fifo->txdl_size;
fifo->txdl_size;
fifo->mempool = __hal_mempool_create(hldev->pdev,
fifo->config->memblock_size,
fifo->txdl_size,
fifo->priv_size,
queue->initial,
queue->max,
__hal_fifo_mempool_item_alloc,
__hal_fifo_mempool_item_free,
fifo);
fifo->config->memblock_size,
fifo->txdl_size,
fifo->priv_size,
queue->initial,
queue->max,
__hal_fifo_mempool_item_alloc,
__hal_fifo_mempool_item_free,
fifo);
if (fifo->mempool == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
status = __hal_channel_initialize(channelh, attr,
(void **) __hal_mempool_items_arr(fifo->mempool),
queue->initial, queue->max,
fifo->config->reserve_threshold);
(void **) __hal_mempool_items_arr(fifo->mempool),
queue->initial, queue->max,
fifo->config->reserve_threshold);
if (status != XGE_HAL_OK) {
__hal_fifo_close(channelh);
return status;
__hal_fifo_close(channelh);
return status;
}
xge_debug_fifo(XGE_TRACE,
"DTR reserve_length:%d reserve_top:%d\n"
"max_frags:%d reserve_threshold:%d\n"
"memblock_size:%d alignment_size:%d max_aligned_frags:%d",
fifo->channel.reserve_length, fifo->channel.reserve_top,
fifo->config->max_frags, fifo->config->reserve_threshold,
fifo->config->memblock_size, fifo->config->alignment_size,
fifo->config->max_aligned_frags);
"DTR reserve_length:%d reserve_top:%d\n"
"max_frags:%d reserve_threshold:%d\n"
"memblock_size:%d alignment_size:%d max_aligned_frags:%d",
fifo->channel.reserve_length, fifo->channel.reserve_top,
fifo->config->max_frags, fifo->config->reserve_threshold,
fifo->config->memblock_size, fifo->config->alignment_size,
fifo->config->max_aligned_frags);
#ifdef XGE_DEBUG_ASSERT
for ( i = 0; i < fifo->channel.reserve_length; i++) {
xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d"
" handle:%p", i, fifo->channel.reserve_arr[i]);
xge_debug_fifo(XGE_TRACE, "DTR before reversing index:%d"
" handle:%p", i, fifo->channel.reserve_arr[i]);
}
#endif
xge_assert(fifo->channel.reserve_length);
/* reverse the FIFO dtr array */
max_arr_index = fifo->channel.reserve_length - 1;
max_arr_index -=fifo->channel.reserve_top;
max_arr_index = fifo->channel.reserve_length - 1;
max_arr_index -=fifo->channel.reserve_top;
xge_assert(max_arr_index);
mid_point = (fifo->channel.reserve_length - fifo->channel.reserve_top)/2;
for (i = 0; i < mid_point; i++) {
dtrh = fifo->channel.reserve_arr[i];
fifo->channel.reserve_arr[i] =
fifo->channel.reserve_arr[max_arr_index - i];
fifo->channel.reserve_arr[max_arr_index - i] = dtrh;
dtrh = fifo->channel.reserve_arr[i];
fifo->channel.reserve_arr[i] =
fifo->channel.reserve_arr[max_arr_index - i];
fifo->channel.reserve_arr[max_arr_index - i] = dtrh;
}
#ifdef XGE_DEBUG_ASSERT
for ( i = 0; i < fifo->channel.reserve_length; i++) {
xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d"
" handle:%p", i, fifo->channel.reserve_arr[i]);
xge_debug_fifo(XGE_TRACE, "DTR after reversing index:%d"
" handle:%p", i, fifo->channel.reserve_arr[i]);
}
#endif
@ -329,7 +321,7 @@ __hal_fifo_close(xge_hal_channel_h channelh)
xge_hal_device_t *hldev = (xge_hal_device_t *)fifo->channel.devh;
if (fifo->mempool) {
__hal_mempool_destroy(fifo->mempool);
__hal_mempool_destroy(fifo->mempool);
}
__hal_channel_terminate(channelh);
@ -341,10 +333,10 @@ __hal_fifo_close(xge_hal_channel_h channelh)
#endif
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
#if defined(XGE_HAL_TX_MULTI_POST)
xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev);
xge_os_spin_lock_destroy(&fifo->channel.post_lock, hldev->pdev);
#elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock,
hldev->pdev);
xge_os_spin_lock_destroy_irq(&fifo->channel.post_lock,
hldev->pdev);
#endif
}
}
@ -383,59 +375,59 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh)
* FIFOs are enabled! page 6-77 user guide */
if (!hldev->config.rts_qos_en) {
/* all zeroes for Round-Robin */
for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0,
tx_fifo_wrr[i]);
}
/* all zeroes for Round-Robin */
for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0,
tx_fifo_wrr[i]);
}
/* reset all of them but '0' */
for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
tx_fifo_partitions[i]);
}
/* reset all of them but '0' */
for (i=1; i < XGE_HAL_FIFO_MAX_PARTITION; i++) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
tx_fifo_partitions[i]);
}
} else { /* Change the default settings */
for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
tx_fifo_wrr_value[i], tx_fifo_wrr[i]);
}
for (i = 0; i < XGE_HAL_FIFO_MAX_WRR; i++) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
tx_fifo_wrr_value[i], tx_fifo_wrr[i]);
}
}
/* configure only configured FIFOs */
val64 = 0; part0 = 0;
for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
int reg_half = i % 2;
int reg_num = i / 2;
int reg_half = i % 2;
int reg_num = i / 2;
if (hldev->config.fifo.queue[i].configured) {
int priority = hldev->config.fifo.queue[i].priority;
val64 |=
vBIT((hldev->config.fifo.queue[i].max-1),
(((reg_half) * 32) + 19),
13) | vBIT(priority, (((reg_half)*32) + 5), 3);
}
if (hldev->config.fifo.queue[i].configured) {
int priority = hldev->config.fifo.queue[i].priority;
val64 |=
vBIT((hldev->config.fifo.queue[i].max-1),
(((reg_half) * 32) + 19),
13) | vBIT(priority, (((reg_half)*32) + 5), 3);
}
/* NOTE: do write operation for each second u64 half
* or force for first one if configured number
* is even */
if (reg_half) {
if (reg_num == 0) {
/* skip partition '0', must write it once at
* the end */
part0 = val64;
} else {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
val64, tx_fifo_partitions[reg_num]);
xge_debug_fifo(XGE_TRACE,
"fifo partition_%d at: "
"0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
reg_num, (unsigned long long)(ulong_t)
tx_fifo_partitions[reg_num],
(unsigned long long)val64);
}
val64 = 0;
}
/* NOTE: do write operation for each second u64 half
* or force for first one if configured number
* is even */
if (reg_half) {
if (reg_num == 0) {
/* skip partition '0', must write it once at
* the end */
part0 = val64;
} else {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
val64, tx_fifo_partitions[reg_num]);
xge_debug_fifo(XGE_TRACE,
"fifo partition_%d at: "
"0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
reg_num, (unsigned long long)(ulong_t)
tx_fifo_partitions[reg_num],
(unsigned long long)val64);
}
val64 = 0;
}
}
part0 |= BIT(0); /* to enable the FIFO partition. */
@ -445,10 +437,10 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh)
__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(part0>>32),
tx_fifo_partitions[0]);
xge_debug_fifo(XGE_TRACE, "fifo partition_0 at: "
"0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
(unsigned long long)(ulong_t)
tx_fifo_partitions[0],
(unsigned long long) part0);
"0x"XGE_OS_LLXFMT" is: 0x"XGE_OS_LLXFMT,
(unsigned long long)(ulong_t)
tx_fifo_partitions[0],
(unsigned long long) part0);
/*
* Initialization of Tx_PA_CONFIG register to ignore packet
@ -457,9 +449,9 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh)
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->tx_pa_cfg);
val64 |= XGE_HAL_TX_PA_CFG_IGNORE_FRM_ERR |
XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI |
XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL |
XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR;
XGE_HAL_TX_PA_CFG_IGNORE_SNAP_OUI |
XGE_HAL_TX_PA_CFG_IGNORE_LLC_CTRL |
XGE_HAL_TX_PA_CFG_IGNORE_L2_ERR;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->tx_pa_cfg);
@ -467,30 +459,30 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh)
* Assign MSI-X vectors
*/
for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
xge_list_t *item;
xge_hal_channel_t *channel = NULL;
xge_list_t *item;
xge_hal_channel_t *channel = NULL;
if (!hldev->config.fifo.queue[i].configured ||
!hldev->config.fifo.queue[i].intr_vector ||
!hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
continue;
if (!hldev->config.fifo.queue[i].configured ||
!hldev->config.fifo.queue[i].intr_vector ||
!hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
continue;
/* find channel */
xge_list_for_each(item, &hldev->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t,
item);
if (tmp->type == XGE_HAL_CHANNEL_TYPE_FIFO &&
tmp->post_qid == i) {
channel = tmp;
break;
}
}
/* find channel */
xge_list_for_each(item, &hldev->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t,
item);
if (tmp->type == XGE_HAL_CHANNEL_TYPE_FIFO &&
tmp->post_qid == i) {
channel = tmp;
break;
}
}
if (channel) {
xge_hal_channel_msix_set(channel,
hldev->config.fifo.queue[i].intr_vector);
}
if (channel) {
xge_hal_channel_msix_set(channel,
hldev->config.fifo.queue[i].intr_vector);
}
}
xge_debug_fifo(XGE_TRACE, "%s", "fifo channels initialized");
@ -500,23 +492,23 @@ __hal_fifo_hw_initialize(xge_hal_device_h devh)
void
__hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_fifo_txdl_priv_t *txdl_priv;
xge_hal_fifo_txdl_priv_t *txdl_priv;
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
txdl_priv = __hal_fifo_txdl_priv(txdp);
if (txdl_priv->align_dma_addr != 0) {
xge_os_dma_unmap(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdl_priv->align_dma_addr,
fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE);
xge_os_dma_unmap(fifo->channel.pdev,
txdl_priv->align_dma_handle,
txdl_priv->align_dma_addr,
fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE);
txdl_priv->align_dma_addr = 0;
txdl_priv->align_dma_addr = 0;
}
if (txdl_priv->align_vaddr != NULL) {
if (txdl_priv->align_vaddr != NULL) {
xge_os_dma_free(fifo->channel.pdev,
txdl_priv->align_vaddr,
fifo->align_size,
@ -525,13 +517,13 @@ __hal_fifo_dtr_align_free_unmap(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
txdl_priv->align_vaddr = NULL;
}
}
}
xge_hal_status_e
__hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
{
xge_hal_fifo_txdl_priv_t *txdl_priv;
xge_hal_fifo_txdl_priv_t *txdl_priv;
xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
@ -540,25 +532,25 @@ __hal_fifo_dtr_align_alloc_map(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
txdl_priv = __hal_fifo_txdl_priv(txdp);
/* allocate alignment DMA-buffer */
txdl_priv->align_vaddr = xge_os_dma_malloc(fifo->channel.pdev,
fifo->align_size,
XGE_OS_DMA_CACHELINE_ALIGNED |
XGE_OS_DMA_STREAMING,
&txdl_priv->align_dma_handle,
&txdl_priv->align_dma_acch);
txdl_priv->align_vaddr = (char *)xge_os_dma_malloc(fifo->channel.pdev,
fifo->align_size,
XGE_OS_DMA_CACHELINE_ALIGNED |
XGE_OS_DMA_STREAMING,
&txdl_priv->align_dma_handle,
&txdl_priv->align_dma_acch);
if (txdl_priv->align_vaddr == NULL) {
return XGE_HAL_ERR_OUT_OF_MEMORY;
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
/* map it */
txdl_priv->align_dma_addr = xge_os_dma_map(fifo->channel.pdev,
txdl_priv->align_dma_handle, txdl_priv->align_vaddr,
fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING);
txdl_priv->align_dma_handle, txdl_priv->align_vaddr,
fifo->align_size,
XGE_OS_DMA_DIR_TODEVICE, XGE_OS_DMA_STREAMING);
if (txdl_priv->align_dma_addr == XGE_OS_INVALID_DMA_ADDR) {
__hal_fifo_dtr_align_free_unmap(channelh, dtrh);
return XGE_HAL_ERR_OUT_OF_MAPPING;
__hal_fifo_dtr_align_free_unmap(channelh, dtrh);
return XGE_HAL_ERR_OUT_OF_MAPPING;
}
return XGE_HAL_OK;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : hal-mm.c
*
* Description: chipset memory pool object implementation
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xge-os-pal.h>
#include <dev/nxge/include/xgehal-mm.h>
#include <dev/nxge/include/xge-debug.h>
@ -45,7 +37,7 @@
*/
xge_hal_status_e
__hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
int *num_allocated)
int *num_allocated)
{
int i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
int n_items = mempool->items_per_memblock;
@ -53,165 +45,165 @@ __hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
*num_allocated = 0;
if ((mempool->memblocks_allocated + num_allocate) >
mempool->memblocks_max) {
xge_debug_mm(XGE_ERR, "%s",
"__hal_mempool_grow: can grow anymore");
return XGE_HAL_ERR_OUT_OF_MEMORY;
mempool->memblocks_max) {
xge_debug_mm(XGE_ERR, "%s",
"__hal_mempool_grow: can grow anymore");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
for (i = mempool->memblocks_allocated;
i < mempool->memblocks_allocated + num_allocate; i++) {
int j;
int is_last =
((mempool->memblocks_allocated+num_allocate-1) == i);
xge_hal_mempool_dma_t *dma_object =
mempool->memblocks_dma_arr + i;
void *the_memblock;
int dma_flags;
int j;
int is_last =
((mempool->memblocks_allocated+num_allocate-1) == i);
xge_hal_mempool_dma_t *dma_object =
mempool->memblocks_dma_arr + i;
void *the_memblock;
int dma_flags;
dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
dma_flags = XGE_OS_DMA_CACHELINE_ALIGNED;
#ifdef XGE_HAL_DMA_DTR_CONSISTENT
dma_flags |= XGE_OS_DMA_CONSISTENT;
dma_flags |= XGE_OS_DMA_CONSISTENT;
#else
dma_flags |= XGE_OS_DMA_STREAMING;
dma_flags |= XGE_OS_DMA_STREAMING;
#endif
/* allocate DMA-capable memblock */
mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev,
mempool->memblock_size,
dma_flags,
&dma_object->handle,
&dma_object->acc_handle);
if (mempool->memblocks_arr[i] == NULL) {
xge_debug_mm(XGE_ERR,
"memblock[%d]: out of DMA memory", i);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(mempool->memblocks_arr[i],
mempool->memblock_size);
the_memblock = mempool->memblocks_arr[i];
/* allocate DMA-capable memblock */
mempool->memblocks_arr[i] = xge_os_dma_malloc(mempool->pdev,
mempool->memblock_size,
dma_flags,
&dma_object->handle,
&dma_object->acc_handle);
if (mempool->memblocks_arr[i] == NULL) {
xge_debug_mm(XGE_ERR,
"memblock[%d]: out of DMA memory", i);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(mempool->memblocks_arr[i],
mempool->memblock_size);
the_memblock = mempool->memblocks_arr[i];
/* allocate memblock's private part. Each DMA memblock
* has a space allocated for item's private usage upon
* mempool's user request. Each time mempool grows, it will
* allocate new memblock and its private part at once.
* This helps to minimize memory usage a lot. */
mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev,
mempool->items_priv_size * n_items);
if (mempool->memblocks_priv_arr[i] == NULL) {
xge_os_dma_free(mempool->pdev,
the_memblock,
mempool->memblock_size,
&dma_object->acc_handle,
&dma_object->handle);
xge_debug_mm(XGE_ERR,
"memblock_priv[%d]: out of virtual memory, "
"requested %d(%d:%d) bytes", i,
mempool->items_priv_size * n_items,
mempool->items_priv_size, n_items);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(mempool->memblocks_priv_arr[i],
mempool->items_priv_size * n_items);
/* allocate memblock's private part. Each DMA memblock
* has a space allocated for item's private usage upon
* mempool's user request. Each time mempool grows, it will
* allocate new memblock and its private part at once.
* This helps to minimize memory usage a lot. */
mempool->memblocks_priv_arr[i] = xge_os_malloc(mempool->pdev,
mempool->items_priv_size * n_items);
if (mempool->memblocks_priv_arr[i] == NULL) {
xge_os_dma_free(mempool->pdev,
the_memblock,
mempool->memblock_size,
&dma_object->acc_handle,
&dma_object->handle);
xge_debug_mm(XGE_ERR,
"memblock_priv[%d]: out of virtual memory, "
"requested %d(%d:%d) bytes", i,
mempool->items_priv_size * n_items,
mempool->items_priv_size, n_items);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(mempool->memblocks_priv_arr[i],
mempool->items_priv_size * n_items);
/* map memblock to physical memory */
dma_object->addr = xge_os_dma_map(mempool->pdev,
dma_object->handle,
the_memblock,
mempool->memblock_size,
XGE_OS_DMA_DIR_BIDIRECTIONAL,
/* map memblock to physical memory */
dma_object->addr = xge_os_dma_map(mempool->pdev,
dma_object->handle,
the_memblock,
mempool->memblock_size,
XGE_OS_DMA_DIR_BIDIRECTIONAL,
#ifdef XGE_HAL_DMA_DTR_CONSISTENT
XGE_OS_DMA_CONSISTENT
XGE_OS_DMA_CONSISTENT
#else
XGE_OS_DMA_STREAMING
XGE_OS_DMA_STREAMING
#endif
);
if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) {
xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
mempool->items_priv_size *
n_items);
xge_os_dma_free(mempool->pdev,
the_memblock,
mempool->memblock_size,
&dma_object->acc_handle,
&dma_object->handle);
return XGE_HAL_ERR_OUT_OF_MAPPING;
}
);
if (dma_object->addr == XGE_OS_INVALID_DMA_ADDR) {
xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
mempool->items_priv_size *
n_items);
xge_os_dma_free(mempool->pdev,
the_memblock,
mempool->memblock_size,
&dma_object->acc_handle,
&dma_object->handle);
return XGE_HAL_ERR_OUT_OF_MAPPING;
}
/* fill the items hash array */
for (j=0; j<n_items; j++) {
int index = i*n_items + j;
/* fill the items hash array */
for (j=0; j<n_items; j++) {
int index = i*n_items + j;
if (first_time && index >= mempool->items_initial) {
break;
}
if (first_time && index >= mempool->items_initial) {
break;
}
mempool->items_arr[index] =
((char *)the_memblock + j*mempool->item_size);
mempool->items_arr[index] =
((char *)the_memblock + j*mempool->item_size);
/* let caller to do more job on each item */
if (mempool->item_func_alloc != NULL) {
xge_hal_status_e status;
/* let caller to do more job on each item */
if (mempool->item_func_alloc != NULL) {
xge_hal_status_e status;
if ((status = mempool->item_func_alloc(
mempool,
the_memblock,
i,
dma_object,
mempool->items_arr[index],
index,
is_last,
mempool->userdata)) != XGE_HAL_OK) {
if ((status = mempool->item_func_alloc(
mempool,
the_memblock,
i,
dma_object,
mempool->items_arr[index],
index,
is_last,
mempool->userdata)) != XGE_HAL_OK) {
if (mempool->item_func_free != NULL) {
int k;
if (mempool->item_func_free != NULL) {
int k;
for (k=0; k<j; k++) {
for (k=0; k<j; k++) {
index =i*n_items + k;
index =i*n_items + k;
(void)mempool->item_func_free(
mempool, the_memblock,
i, dma_object,
mempool->items_arr[index],
index, is_last,
mempool->userdata);
}
}
(void)mempool->item_func_free(
mempool, the_memblock,
i, dma_object,
mempool->items_arr[index],
index, is_last,
mempool->userdata);
}
}
xge_os_free(mempool->pdev,
mempool->memblocks_priv_arr[i],
mempool->items_priv_size *
n_items);
xge_os_dma_unmap(mempool->pdev,
dma_object->handle,
dma_object->addr,
mempool->memblock_size,
XGE_OS_DMA_DIR_BIDIRECTIONAL);
xge_os_dma_free(mempool->pdev,
the_memblock,
mempool->memblock_size,
&dma_object->acc_handle,
&dma_object->handle);
return status;
}
}
xge_os_free(mempool->pdev,
mempool->memblocks_priv_arr[i],
mempool->items_priv_size *
n_items);
xge_os_dma_unmap(mempool->pdev,
dma_object->handle,
dma_object->addr,
mempool->memblock_size,
XGE_OS_DMA_DIR_BIDIRECTIONAL);
xge_os_dma_free(mempool->pdev,
the_memblock,
mempool->memblock_size,
&dma_object->acc_handle,
&dma_object->handle);
return status;
}
}
mempool->items_current = index + 1;
}
mempool->items_current = index + 1;
}
xge_debug_mm(XGE_TRACE,
"memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
"dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024,
(unsigned long long)(ulong_t)mempool->memblocks_arr[i],
(unsigned long long)dma_object->addr);
xge_debug_mm(XGE_TRACE,
"memblock%d: allocated %dk, vaddr 0x"XGE_OS_LLXFMT", "
"dma_addr 0x"XGE_OS_LLXFMT, i, mempool->memblock_size / 1024,
(unsigned long long)(ulong_t)mempool->memblocks_arr[i],
(unsigned long long)dma_object->addr);
(*num_allocated)++;
(*num_allocated)++;
if (first_time && mempool->items_current ==
mempool->items_initial) {
break;
}
if (first_time && mempool->items_current ==
mempool->items_initial) {
break;
}
}
/* increment actual number of allocated memblocks */
@ -236,9 +228,9 @@ __hal_mempool_grow(xge_hal_mempool_t *mempool, int num_allocate,
*/
xge_hal_mempool_t*
__hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
int items_priv_size, int items_initial, int items_max,
xge_hal_mempool_item_f item_func_alloc,
xge_hal_mempool_item_f item_func_free, void *userdata)
int items_priv_size, int items_initial, int items_max,
xge_hal_mempool_item_f item_func_alloc,
xge_hal_mempool_item_f item_func_free, void *userdata)
{
xge_hal_status_e status;
int memblocks_to_allocate;
@ -246,114 +238,114 @@ __hal_mempool_create(pci_dev_h pdev, int memblock_size, int item_size,
int allocated;
if (memblock_size < item_size) {
xge_debug_mm(XGE_ERR,
"memblock_size %d < item_size %d: misconfiguration",
memblock_size, item_size);
return NULL;
xge_debug_mm(XGE_ERR,
"memblock_size %d < item_size %d: misconfiguration",
memblock_size, item_size);
return NULL;
}
mempool = (xge_hal_mempool_t *) \
xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
xge_os_malloc(pdev, sizeof(xge_hal_mempool_t));
if (mempool == NULL) {
xge_debug_mm(XGE_ERR, "mempool allocation failure");
return NULL;
xge_debug_mm(XGE_ERR, "mempool allocation failure");
return NULL;
}
xge_os_memzero(mempool, sizeof(xge_hal_mempool_t));
mempool->pdev = pdev;
mempool->memblock_size = memblock_size;
mempool->items_max = items_max;
mempool->items_initial = items_initial;
mempool->item_size = item_size;
mempool->items_priv_size = items_priv_size;
mempool->item_func_alloc = item_func_alloc;
mempool->item_func_free = item_func_free;
mempool->userdata = userdata;
mempool->pdev = pdev;
mempool->memblock_size = memblock_size;
mempool->items_max = items_max;
mempool->items_initial = items_initial;
mempool->item_size = item_size;
mempool->items_priv_size = items_priv_size;
mempool->item_func_alloc = item_func_alloc;
mempool->item_func_free = item_func_free;
mempool->userdata = userdata;
mempool->memblocks_allocated = 0;
mempool->items_per_memblock = memblock_size / item_size;
mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
mempool->items_per_memblock;
mempool->items_per_memblock;
/* allocate array of memblocks */
mempool->memblocks_arr = (void ** ) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->memblocks_max);
sizeof(void*) * mempool->memblocks_max);
if (mempool->memblocks_arr == NULL) {
xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
xge_debug_mm(XGE_ERR, "memblocks_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->memblocks_arr,
sizeof(void*) * mempool->memblocks_max);
sizeof(void*) * mempool->memblocks_max);
/* allocate array of private parts of items per memblocks */
mempool->memblocks_priv_arr = (void **) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->memblocks_max);
sizeof(void*) * mempool->memblocks_max);
if (mempool->memblocks_priv_arr == NULL) {
xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
xge_debug_mm(XGE_ERR, "memblocks_priv_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->memblocks_priv_arr,
sizeof(void*) * mempool->memblocks_max);
sizeof(void*) * mempool->memblocks_max);
/* allocate array of memblocks DMA objects */
mempool->memblocks_dma_arr =
(xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev,
sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
(xge_hal_mempool_dma_t *) xge_os_malloc(mempool->pdev,
sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
if (mempool->memblocks_dma_arr == NULL) {
xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
xge_debug_mm(XGE_ERR, "memblocks_dma_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->memblocks_dma_arr,
sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
sizeof(xge_hal_mempool_dma_t) * mempool->memblocks_max);
/* allocate hash array of items */
mempool->items_arr = (void **) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->items_max);
sizeof(void*) * mempool->items_max);
if (mempool->items_arr == NULL) {
xge_debug_mm(XGE_ERR, "items_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
xge_debug_mm(XGE_ERR, "items_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->items_arr, sizeof(void *) * mempool->items_max);
mempool->shadow_items_arr = (void **) xge_os_malloc(mempool->pdev,
sizeof(void*) * mempool->items_max);
sizeof(void*) * mempool->items_max);
if (mempool->shadow_items_arr == NULL) {
xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
xge_debug_mm(XGE_ERR, "shadow_items_arr allocation failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_os_memzero(mempool->shadow_items_arr,
sizeof(void *) * mempool->items_max);
sizeof(void *) * mempool->items_max);
/* calculate initial number of memblocks */
memblocks_to_allocate = (mempool->items_initial +
mempool->items_per_memblock - 1) /
mempool->items_per_memblock;
mempool->items_per_memblock - 1) /
mempool->items_per_memblock;
xge_debug_mm(XGE_TRACE, "allocating %d memblocks, "
"%d items per memblock", memblocks_to_allocate,
mempool->items_per_memblock);
"%d items per memblock", memblocks_to_allocate,
mempool->items_per_memblock);
/* pre-allocate the mempool */
status = __hal_mempool_grow(mempool, memblocks_to_allocate, &allocated);
xge_os_memcpy(mempool->shadow_items_arr, mempool->items_arr,
sizeof(void*) * mempool->items_max);
sizeof(void*) * mempool->items_max);
if (status != XGE_HAL_OK) {
xge_debug_mm(XGE_ERR, "mempool_grow failure");
__hal_mempool_destroy(mempool);
return NULL;
xge_debug_mm(XGE_ERR, "mempool_grow failure");
__hal_mempool_destroy(mempool);
return NULL;
}
xge_debug_mm(XGE_TRACE,
"total: allocated %dk of DMA-capable memory",
mempool->memblock_size * allocated / 1024);
"total: allocated %dk of DMA-capable memory",
mempool->memblock_size * allocated / 1024);
return mempool;
}
@ -367,69 +359,69 @@ __hal_mempool_destroy(xge_hal_mempool_t *mempool)
int i, j;
for (i=0; i<mempool->memblocks_allocated; i++) {
xge_hal_mempool_dma_t *dma_object;
xge_hal_mempool_dma_t *dma_object;
xge_assert(mempool->memblocks_arr[i]);
xge_assert(mempool->memblocks_dma_arr + i);
xge_assert(mempool->memblocks_arr[i]);
xge_assert(mempool->memblocks_dma_arr + i);
dma_object = mempool->memblocks_dma_arr + i;
dma_object = mempool->memblocks_dma_arr + i;
for (j=0; j<mempool->items_per_memblock; j++) {
int index = i*mempool->items_per_memblock + j;
for (j=0; j<mempool->items_per_memblock; j++) {
int index = i*mempool->items_per_memblock + j;
/* to skip last partially filled(if any) memblock */
if (index >= mempool->items_current) {
break;
}
/* to skip last partially filled(if any) memblock */
if (index >= mempool->items_current) {
break;
}
/* let caller to do more job on each item */
if (mempool->item_func_free != NULL) {
/* let caller to do more job on each item */
if (mempool->item_func_free != NULL) {
mempool->item_func_free(mempool,
mempool->memblocks_arr[i],
i, dma_object,
mempool->shadow_items_arr[index],
index, /* unused */ -1,
mempool->userdata);
}
}
mempool->item_func_free(mempool,
mempool->memblocks_arr[i],
i, dma_object,
mempool->shadow_items_arr[index],
index, /* unused */ -1,
mempool->userdata);
}
}
xge_os_dma_unmap(mempool->pdev,
xge_os_dma_unmap(mempool->pdev,
dma_object->handle, dma_object->addr,
mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL);
mempool->memblock_size, XGE_OS_DMA_DIR_BIDIRECTIONAL);
xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
mempool->items_priv_size * mempool->items_per_memblock);
xge_os_free(mempool->pdev, mempool->memblocks_priv_arr[i],
mempool->items_priv_size * mempool->items_per_memblock);
xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i],
mempool->memblock_size, &dma_object->acc_handle,
&dma_object->handle);
xge_os_dma_free(mempool->pdev, mempool->memblocks_arr[i],
mempool->memblock_size, &dma_object->acc_handle,
&dma_object->handle);
}
if (mempool->items_arr) {
xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) *
mempool->items_max);
xge_os_free(mempool->pdev, mempool->items_arr, sizeof(void*) *
mempool->items_max);
}
if (mempool->shadow_items_arr) {
xge_os_free(mempool->pdev, mempool->shadow_items_arr,
sizeof(void*) * mempool->items_max);
xge_os_free(mempool->pdev, mempool->shadow_items_arr,
sizeof(void*) * mempool->items_max);
}
if (mempool->memblocks_dma_arr) {
xge_os_free(mempool->pdev, mempool->memblocks_dma_arr,
sizeof(xge_hal_mempool_dma_t) *
mempool->memblocks_max);
xge_os_free(mempool->pdev, mempool->memblocks_dma_arr,
sizeof(xge_hal_mempool_dma_t) *
mempool->memblocks_max);
}
if (mempool->memblocks_priv_arr) {
xge_os_free(mempool->pdev, mempool->memblocks_priv_arr,
sizeof(void*) * mempool->memblocks_max);
xge_os_free(mempool->pdev, mempool->memblocks_priv_arr,
sizeof(void*) * mempool->memblocks_max);
}
if (mempool->memblocks_arr) {
xge_os_free(mempool->pdev, mempool->memblocks_arr,
sizeof(void*) * mempool->memblocks_max);
xge_os_free(mempool->pdev, mempool->memblocks_arr,
sizeof(void*) * mempool->memblocks_max);
}
xge_os_free(mempool->pdev, mempool, sizeof(xge_hal_mempool_t));

File diff suppressed because it is too large Load Diff

View File

@ -26,21 +26,13 @@
* $FreeBSD$
*/
/*
* FileName : hal-ring.c
*
* Description: Rx ring object implementation
*
* Created: 10 May 2004
*/
#include <dev/nxge/include/xgehal-ring.h>
#include <dev/nxge/include/xgehal-device.h>
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
static ptrdiff_t
__hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
void *item)
void *item)
{
int memblock_idx;
void *memblock;
@ -57,7 +49,7 @@ __hal_ring_item_dma_offset(xge_hal_mempool_h mempoolh,
static dma_addr_t
__hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
pci_dma_h *dma_handle)
pci_dma_h *dma_handle)
{
int memblock_idx;
void *memblock;
@ -69,12 +61,12 @@ __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
/* get owner memblock by memblock index */
memblock = __hal_mempool_memblock((xge_hal_mempool_t *) mempoolh,
memblock_idx);
memblock_idx);
/* get memblock DMA object by memblock index */
memblock_dma_object =
__hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
memblock_idx);
__hal_mempool_memblock_dma((xge_hal_mempool_t *) mempoolh,
memblock_idx);
/* calculate offset in the memblock of this item */
dma_item_offset = (char*)item - (char*)memblock;
@ -86,7 +78,7 @@ __hal_ring_item_dma_addr(xge_hal_mempool_h mempoolh, void *item,
static void
__hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
xge_hal_ring_t *ring, int from, int to)
xge_hal_ring_t *ring, int from, int to)
{
xge_hal_ring_block_t *to_item, *from_item;
dma_addr_t to_dma, from_dma;
@ -94,12 +86,12 @@ __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
/* get "from" RxD block */
from_item = (xge_hal_ring_block_t *)
__hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
__hal_mempool_item((xge_hal_mempool_t *) mempoolh, from);
xge_assert(from_item);
/* get "to" RxD block */
to_item = (xge_hal_ring_block_t *)
__hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
__hal_mempool_item((xge_hal_mempool_t *) mempoolh, to);
xge_assert(to_item);
/* return address of the beginning of previous RxD block */
@ -111,33 +103,33 @@ __hal_ring_rxdblock_link(xge_hal_mempool_h mempoolh,
/* return "from" RxD block's DMA start address */
from_dma =
__hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
__hal_ring_item_dma_addr(mempoolh, from_item, &from_dma_handle);
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
/* we must sync "from" RxD block, so hardware will see it */
xge_os_dma_sync(ring->channel.pdev,
from_dma_handle,
from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
__hal_ring_item_dma_offset(mempoolh, from_item) +
XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
sizeof(u64),
XGE_OS_DMA_DIR_TODEVICE);
from_dma + XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
__hal_ring_item_dma_offset(mempoolh, from_item) +
XGE_HAL_RING_NEXT_BLOCK_POINTER_OFFSET,
sizeof(u64),
XGE_OS_DMA_DIR_TODEVICE);
#endif
xge_debug_ring(XGE_TRACE, "block%d:0x"XGE_OS_LLXFMT" => block%d:0x"XGE_OS_LLXFMT,
from, (unsigned long long)from_dma, to,
(unsigned long long)to_dma);
from, (unsigned long long)from_dma, to,
(unsigned long long)to_dma);
}
static xge_hal_status_e
__hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
void *memblock,
int memblock_index,
xge_hal_mempool_dma_t *dma_object,
void *item,
int index,
int is_last,
void *userdata)
void *memblock,
int memblock_index,
xge_hal_mempool_dma_t *dma_object,
void *item,
int index,
int is_last,
void *userdata)
{
int i;
xge_hal_ring_t *ring = (xge_hal_ring_t *)userdata;
@ -148,71 +140,71 @@ __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
/* format rxds array */
for (i=ring->rxds_per_block-1; i>=0; i--) {
void *rxdblock_priv;
xge_hal_ring_rxd_priv_t *rxd_priv;
xge_hal_ring_rxd_1_t *rxdp;
int reserve_index = index * ring->rxds_per_block + i;
int memblock_item_idx;
void *rxdblock_priv;
xge_hal_ring_rxd_priv_t *rxd_priv;
xge_hal_ring_rxd_1_t *rxdp;
int reserve_index = index * ring->rxds_per_block + i;
int memblock_item_idx;
ring->reserved_rxds_arr[reserve_index] = (char *)item +
(ring->rxds_per_block - 1 - i) * ring->rxd_size;
ring->reserved_rxds_arr[reserve_index] = (char *)item +
(ring->rxds_per_block - 1 - i) * ring->rxd_size;
/* Note: memblock_item_idx is index of the item within
* the memblock. For instance, in case of three RxD-blocks
* per memblock this value can be 0,1 or 2. */
rxdblock_priv =
__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
memblock_index, item,
&memblock_item_idx);
rxdp = (xge_hal_ring_rxd_1_t *)
ring->reserved_rxds_arr[reserve_index];
rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
((char*)rxdblock_priv + ring->rxd_priv_size * i);
/* Note: memblock_item_idx is index of the item within
* the memblock. For instance, in case of three RxD-blocks
* per memblock this value can be 0,1 or 2. */
rxdblock_priv =
__hal_mempool_item_priv((xge_hal_mempool_t *) mempoolh,
memblock_index, item,
&memblock_item_idx);
rxdp = (xge_hal_ring_rxd_1_t *)
ring->reserved_rxds_arr[reserve_index];
rxd_priv = (xge_hal_ring_rxd_priv_t *) (void *)
((char*)rxdblock_priv + ring->rxd_priv_size * i);
/* pre-format per-RxD Ring's private */
rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
rxd_priv->dma_handle = dma_object->handle;
/* pre-format per-RxD Ring's private */
rxd_priv->dma_offset = (char*)rxdp - (char*)memblock;
rxd_priv->dma_addr = dma_object->addr + rxd_priv->dma_offset;
rxd_priv->dma_handle = dma_object->handle;
#ifdef XGE_DEBUG_ASSERT
rxd_priv->dma_object = dma_object;
rxd_priv->dma_object = dma_object;
#endif
/* pre-format Host_Control */
/* pre-format Host_Control */
#if defined(XGE_HAL_USE_5B_MODE)
if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
if (ring->buffer_mode == XGE_HAL_RING_QUEUE_BUFFER_MODE_5) {
xge_hal_ring_rxd_5_t *rxdp_5 = (xge_hal_ring_rxd_5_t *)rxdp;
#if defined(XGE_OS_PLATFORM_64BIT)
xge_assert(memblock_index <= 0xFFFF);
xge_assert(i <= 0xFFFF);
/* store memblock's index */
rxdp_5->host_control = (u32)memblock_index << 16;
/* store index of memblock's private */
rxdp_5->host_control |= (u32)(memblock_item_idx *
ring->rxds_per_block + i);
xge_assert(memblock_index <= 0xFFFF);
xge_assert(i <= 0xFFFF);
/* store memblock's index */
rxdp_5->host_control = (u32)memblock_index << 16;
/* store index of memblock's private */
rxdp_5->host_control |= (u32)(memblock_item_idx *
ring->rxds_per_block + i);
#else
/* 32-bit case */
rxdp_5->host_control = (u32)rxd_priv;
/* 32-bit case */
rxdp_5->host_control = (u32)rxd_priv;
#endif
} else {
/* 1b and 3b modes */
rxdp->host_control = (u64)(ulong_t)rxd_priv;
}
} else {
/* 1b and 3b modes */
rxdp->host_control = (u64)(ulong_t)rxd_priv;
}
#else
/* 1b and 3b modes */
rxdp->host_control = (u64)(ulong_t)rxd_priv;
/* 1b and 3b modes */
rxdp->host_control = (u64)(ulong_t)rxd_priv;
#endif
}
__hal_ring_block_memblock_idx_set((xge_hal_ring_block_t *) item, memblock_index);
if (is_last) {
/* link last one with first one */
__hal_ring_rxdblock_link(mempoolh, ring, 0, index);
/* link last one with first one */
__hal_ring_rxdblock_link(mempoolh, ring, 0, index);
}
if (index > 0 ) {
/* link this RxD block with previous one */
__hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
/* link this RxD block with previous one */
__hal_ring_rxdblock_link(mempoolh, ring, index, index-1);
}
return XGE_HAL_OK;
@ -220,30 +212,30 @@ __hal_ring_mempool_item_alloc(xge_hal_mempool_h mempoolh,
xge_hal_status_e
__hal_ring_initial_replenish(xge_hal_channel_t *channel,
xge_hal_channel_reopen_e reopen)
xge_hal_channel_reopen_e reopen)
{
xge_hal_dtr_h dtr;
xge_hal_dtr_h dtr = NULL;
while (xge_hal_channel_dtr_count(channel) > 0) {
xge_hal_status_e status;
xge_hal_status_e status;
status = xge_hal_ring_dtr_reserve(channel, &dtr);
xge_assert(status == XGE_HAL_OK);
status = xge_hal_ring_dtr_reserve(channel, &dtr);
xge_assert(status == XGE_HAL_OK);
if (channel->dtr_init) {
status = channel->dtr_init(channel,
dtr, channel->reserve_length,
channel->userdata,
reopen);
if (status != XGE_HAL_OK) {
xge_hal_ring_dtr_free(channel, dtr);
xge_hal_channel_abort(channel,
XGE_HAL_CHANNEL_OC_NORMAL);
return status;
}
}
if (channel->dtr_init) {
status = channel->dtr_init(channel,
dtr, channel->reserve_length,
channel->userdata,
reopen);
if (status != XGE_HAL_OK) {
xge_hal_ring_dtr_free(channel, dtr);
xge_hal_channel_abort(channel,
XGE_HAL_CHANNEL_OC_NORMAL);
return status;
}
}
xge_hal_ring_dtr_post(channel, dtr);
xge_hal_ring_dtr_post(channel, dtr);
}
return XGE_HAL_OK;
@ -282,7 +274,7 @@ __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
ring->rxd_size = XGE_HAL_RING_RXD_SIZEOF(queue->buffer_mode);
ring->rxd_priv_size =
sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
sizeof(xge_hal_ring_rxd_priv_t) + attr->per_dtr_space;
/* how many RxDs can fit into one block. Depends on configured
* buffer_mode. */
@ -292,44 +284,44 @@ __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
ring->reserved_rxds_arr = (void **) xge_os_malloc(ring->channel.pdev,
sizeof(void*) * queue->max * ring->rxds_per_block);
sizeof(void*) * queue->max * ring->rxds_per_block);
if (ring->reserved_rxds_arr == NULL) {
__hal_ring_close(channelh);
return XGE_HAL_ERR_OUT_OF_MEMORY;
__hal_ring_close(channelh);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
ring->mempool = __hal_mempool_create(
hldev->pdev,
ring->config->memblock_size,
XGE_HAL_RING_RXDBLOCK_SIZE,
ring->rxdblock_priv_size,
queue->initial, queue->max,
__hal_ring_mempool_item_alloc,
NULL, /* nothing to free */
ring);
hldev->pdev,
ring->config->memblock_size,
XGE_HAL_RING_RXDBLOCK_SIZE,
ring->rxdblock_priv_size,
queue->initial, queue->max,
__hal_ring_mempool_item_alloc,
NULL, /* nothing to free */
ring);
if (ring->mempool == NULL) {
__hal_ring_close(channelh);
return XGE_HAL_ERR_OUT_OF_MEMORY;
__hal_ring_close(channelh);
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
status = __hal_channel_initialize(channelh,
attr,
ring->reserved_rxds_arr,
queue->initial * ring->rxds_per_block,
queue->max * ring->rxds_per_block,
0 /* no threshold for ring! */);
attr,
ring->reserved_rxds_arr,
queue->initial * ring->rxds_per_block,
queue->max * ring->rxds_per_block,
0 /* no threshold for ring! */);
if (status != XGE_HAL_OK) {
__hal_ring_close(channelh);
return status;
__hal_ring_close(channelh);
return status;
}
/* sanity check that everything formatted ok */
xge_assert(ring->reserved_rxds_arr[0] ==
(char *)ring->mempool->items_arr[0] +
(ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
(char *)ring->mempool->items_arr[0] +
(ring->rxds_per_block * ring->rxd_size - ring->rxd_size));
/* Note:
/* Note:
* Specifying dtr_init callback means two things:
* 1) dtrs need to be initialized by ULD at channel-open time;
* 2) dtrs need to be posted at channel-open time
@ -337,13 +329,13 @@ __hal_ring_open(xge_hal_channel_h channelh, xge_hal_channel_attr_t *attr)
* Currently we don't have a case when the 1) is done without the 2).
*/
if (ring->channel.dtr_init) {
if ((status = __hal_ring_initial_replenish (
(xge_hal_channel_t *) channelh,
XGE_HAL_CHANNEL_OC_NORMAL) )
!= XGE_HAL_OK) {
__hal_ring_close(channelh);
return status;
}
if ((status = __hal_ring_initial_replenish (
(xge_hal_channel_t *) channelh,
XGE_HAL_CHANNEL_OC_NORMAL) )
!= XGE_HAL_OK) {
__hal_ring_close(channelh);
return status;
}
}
/* initial replenish will increment the counter in its post() routine,
@ -359,7 +351,7 @@ __hal_ring_close(xge_hal_channel_h channelh)
xge_hal_ring_t *ring = (xge_hal_ring_t *)channelh;
xge_hal_ring_queue_t *queue;
#if defined(XGE_HAL_RX_MULTI_RESERVE)||defined(XGE_HAL_RX_MULTI_RESERVE_IRQ)||\
defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
defined(XGE_HAL_RX_MULTI_POST) || defined(XGE_HAL_RX_MULTI_POST_IRQ)
xge_hal_device_t *hldev = (xge_hal_device_t *)ring->channel.devh;
#endif
@ -368,13 +360,13 @@ __hal_ring_close(xge_hal_channel_h channelh)
queue = &ring->config->queue[ring->channel.post_qid];
if (ring->mempool) {
__hal_mempool_destroy(ring->mempool);
__hal_mempool_destroy(ring->mempool);
}
if (ring->reserved_rxds_arr) {
xge_os_free(ring->channel.pdev,
ring->reserved_rxds_arr,
sizeof(void*) * queue->max * ring->rxds_per_block);
xge_os_free(ring->channel.pdev,
ring->reserved_rxds_arr,
sizeof(void*) * queue->max * ring->rxds_per_block);
}
__hal_channel_terminate(channelh);
@ -406,12 +398,12 @@ __hal_ring_prc_enable(xge_hal_channel_h channelh)
xge_assert(ring);
xge_assert(ring->channel.pdev);
bar0 = (xge_hal_pci_bar0_t *) (void *)
((xge_hal_device_t *)ring->channel.devh)->bar0;
((xge_hal_device_t *)ring->channel.devh)->bar0;
queue = &ring->config->queue[ring->channel.post_qid];
xge_assert(queue->buffer_mode == 1 ||
queue->buffer_mode == 3 ||
queue->buffer_mode == 5);
queue->buffer_mode == 3 ||
queue->buffer_mode == 5);
/* last block in fact becomes first. This is just the way it
* is filled up and linked by item_alloc() */
@ -419,43 +411,43 @@ __hal_ring_prc_enable(xge_hal_channel_h channelh)
block_num = queue->initial;
first_block = __hal_mempool_item(ring->mempool, block_num - 1);
val64 = __hal_ring_item_dma_addr(ring->mempool,
first_block, &dma_handle);
first_block, &dma_handle);
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
val64, &bar0->prc_rxd0_n[ring->channel.post_qid]);
xge_debug_ring(XGE_TRACE, "ring%d PRC DMA addr 0x"XGE_OS_LLXFMT" initialized",
ring->channel.post_qid, (unsigned long long)val64);
ring->channel.post_qid, (unsigned long long)val64);
val64 = xge_os_pio_mem_read64(ring->channel.pdev,
ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
ring->channel.regh0, &bar0->prc_ctrl_n[ring->channel.post_qid]);
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
!queue->rth_en) {
val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
val64 |= XGE_HAL_PRC_CTRL_RTH_DISABLE;
}
val64 |= XGE_HAL_PRC_CTRL_RC_ENABLED;
val64 |= vBIT((queue->buffer_mode >> 1),14,2);/* 1,3 or 5 => 0,1 or 2 */
val64 &= ~XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
val64 |= XGE_HAL_PRC_CTRL_RXD_BACKOFF_INTERVAL(
(hldev->config.pci_freq_mherz * queue->backoff_interval_us));
(hldev->config.pci_freq_mherz * queue->backoff_interval_us));
/* Beware: no snoop by the bridge if (no_snoop_bits) */
val64 |= XGE_HAL_PRC_CTRL_NO_SNOOP(queue->no_snoop_bits);
/* Herc: always use group_reads */
/* Herc: always use group_reads */
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
val64 |= XGE_HAL_PRC_CTRL_GROUP_READS;
if (hldev->config.bimodal_interrupts)
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
val64 |= XGE_HAL_PRC_CTRL_BIMODAL_INTERRUPT;
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
/* Configure Receive Protocol Assist */
val64 = xge_os_pio_mem_read64(ring->channel.pdev,
ring->channel.regh0, &bar0->rx_pa_cfg);
ring->channel.regh0, &bar0->rx_pa_cfg);
val64 |= XGE_HAL_RX_PA_CFG_SCATTER_MODE(ring->config->scatter_mode);
val64 |= (XGE_HAL_RX_PA_CFG_IGNORE_SNAP_OUI | XGE_HAL_RX_PA_CFG_IGNORE_LLC_CTRL);
/* Clean STRIP_VLAN_TAG bit and set as config from upper layer */
@ -463,10 +455,10 @@ __hal_ring_prc_enable(xge_hal_channel_h channelh)
val64 |= XGE_HAL_RX_PA_CFG_STRIP_VLAN_TAG_MODE(ring->config->strip_vlan_tag);
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->rx_pa_cfg);
val64, &bar0->rx_pa_cfg);
xge_debug_ring(XGE_TRACE, "ring%d enabled in buffer_mode %d",
ring->channel.post_qid, queue->buffer_mode);
ring->channel.post_qid, queue->buffer_mode);
}
void
@ -479,14 +471,14 @@ __hal_ring_prc_disable(xge_hal_channel_h channelh)
xge_assert(ring);
xge_assert(ring->channel.pdev);
bar0 = (xge_hal_pci_bar0_t *) (void *)
((xge_hal_device_t *)ring->channel.devh)->bar0;
((xge_hal_device_t *)ring->channel.devh)->bar0;
val64 = xge_os_pio_mem_read64(ring->channel.pdev,
ring->channel.regh0,
&bar0->prc_ctrl_n[ring->channel.post_qid]);
&bar0->prc_ctrl_n[ring->channel.post_qid]);
val64 &= ~((u64) XGE_HAL_PRC_CTRL_RC_ENABLED);
xge_os_pio_mem_write64(ring->channel.pdev, ring->channel.regh0,
val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
val64, &bar0->prc_ctrl_n[ring->channel.post_qid]);
}
void
@ -501,78 +493,78 @@ __hal_ring_hw_initialize(xge_hal_device_h devh)
val64 = 0;
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
if (!hldev->config.ring.queue[i].configured)
continue;
val64 |= vBIT(hldev->config.ring.queue[i].priority,
(5 + (i * 8)), 3);
if (!hldev->config.ring.queue[i].configured)
continue;
val64 |= vBIT(hldev->config.ring.queue[i].priority,
(5 + (i * 8)), 3);
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rx_queue_priority);
&bar0->rx_queue_priority);
xge_debug_ring(XGE_TRACE, "Rings priority configured to 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
(unsigned long long)val64);
/* Configuring ring queues according to per-ring configuration */
val64 = 0;
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
if (!hldev->config.ring.queue[i].configured)
continue;
val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
if (!hldev->config.ring.queue[i].configured)
continue;
val64 |= vBIT(hldev->config.ring.queue[i].dram_size_mb,(i*8),8);
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rx_queue_cfg);
xge_debug_ring(XGE_TRACE, "DRAM configured to 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
(unsigned long long)val64);
if (!hldev->config.rts_qos_en &&
!hldev->config.rts_port_en &&
!hldev->config.rts_mac_en) {
/*
* Activate default (QoS-based) Rx steering
*/
/*
* Activate default (QoS-based) Rx steering
*/
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->rts_qos_steering);
for (j = 0; j < 8 /* QoS max */; j++)
{
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
{
if (!hldev->config.ring.queue[i].configured)
continue;
if (!hldev->config.ring.queue[i].rth_en)
val64 |= (BIT(i) >> (j*8));
}
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rts_qos_steering);
xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->rts_qos_steering);
for (j = 0; j < 8 /* QoS max */; j++)
{
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++)
{
if (!hldev->config.ring.queue[i].configured)
continue;
if (!hldev->config.ring.queue[i].rth_en)
val64 |= (BIT(i) >> (j*8));
}
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->rts_qos_steering);
xge_debug_ring(XGE_TRACE, "QoS steering configured to 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
}
/* Note: If a queue does not exist, it should be assigned a maximum
* length of zero. Otherwise, packet loss could occur.
* P. 4-4 User guide.
* length of zero. Otherwise, packet loss could occur.
* P. 4-4 User guide.
*
* All configured rings will be properly set at device open time
* by utilizing device_mtu_set() API call. */
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
if (hldev->config.ring.queue[i].configured)
continue;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
&bar0->rts_frm_len_n[i]);
if (hldev->config.ring.queue[i].configured)
continue;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
&bar0->rts_frm_len_n[i]);
}
#ifdef XGE_HAL_HERC_EMULATION
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
((u8 *)bar0 + 0x2e60)); /* mc_rldram_mrs_herc */
val64 |= 0x0000000000010000;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
((u8 *)bar0 + 0x2e60));
((u8 *)bar0 + 0x2e60));
val64 |= 0x003a000000000000;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
((u8 *)bar0 + 0x2e40)); /* mc_rldram_ref_herc */
xge_os_mdelay(2000);
#endif
@ -580,7 +572,7 @@ __hal_ring_hw_initialize(xge_hal_device_h devh)
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->mc_rldram_mrs);
val64 |= XGE_HAL_MC_RLDRAM_QUEUE_SIZE_ENABLE |
XGE_HAL_MC_RLDRAM_MRS_ENABLE;
XGE_HAL_MC_RLDRAM_MRS_ENABLE;
__hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
&bar0->mc_rldram_mrs);
xge_os_wmb();
@ -592,50 +584,50 @@ __hal_ring_hw_initialize(xge_hal_device_h devh)
/* Temporary fixes for Herc RLDRAM */
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->mc_rldram_ref_per_herc);
val64 = XGE_HAL_MC_RLDRAM_SET_REF_PERIOD(0x0279);
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->mc_rldram_ref_per_herc);
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->mc_rldram_mrs_herc);
xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
xge_debug_ring(XGE_TRACE, "default mc_rldram_mrs_herc 0x"XGE_OS_LLXFMT,
(unsigned long long)val64);
val64 = 0x0003570003010300ULL;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->mc_rldram_mrs_herc);
val64 = 0x0003570003010300ULL;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->mc_rldram_mrs_herc);
xge_os_mdelay(1);
xge_os_mdelay(1);
}
/*
* Assign MSI-X vectors
*/
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
xge_list_t *item;
xge_hal_channel_t *channel = NULL;
xge_list_t *item;
xge_hal_channel_t *channel = NULL;
if (!hldev->config.ring.queue[i].configured ||
!hldev->config.ring.queue[i].intr_vector ||
!hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
continue;
if (!hldev->config.ring.queue[i].configured ||
!hldev->config.ring.queue[i].intr_vector ||
!hldev->config.intr_mode != XGE_HAL_INTR_MODE_MSIX)
continue;
/* find channel */
xge_list_for_each(item, &hldev->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t,
item);
if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING &&
tmp->post_qid == i) {
channel = tmp;
break;
}
}
/* find channel */
xge_list_for_each(item, &hldev->free_channels) {
xge_hal_channel_t *tmp;
tmp = xge_container_of(item, xge_hal_channel_t,
item);
if (tmp->type == XGE_HAL_CHANNEL_TYPE_RING &&
tmp->post_qid == i) {
channel = tmp;
break;
}
}
if (channel) {
xge_hal_channel_msix_set(channel,
hldev->config.ring.queue[i].intr_vector);
}
if (channel) {
xge_hal_channel_msix_set(channel,
hldev->config.ring.queue[i].intr_vector);
}
}
xge_debug_ring(XGE_TRACE, "%s", "ring channels initialized");
@ -649,21 +641,21 @@ __hal_ring_mtu_set(xge_hal_device_h devh, int new_frmlen)
xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
if (!hldev->config.ring.queue[i].configured)
continue;
if (hldev->config.ring.queue[i].max_frm_len !=
XGE_HAL_RING_USE_MTU) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
XGE_HAL_MAC_RTS_FRM_LEN_SET(
hldev->config.ring.queue[i].max_frm_len),
&bar0->rts_frm_len_n[i]);
} else {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
&bar0->rts_frm_len_n[i]);
}
if (!hldev->config.ring.queue[i].configured)
continue;
if (hldev->config.ring.queue[i].max_frm_len !=
XGE_HAL_RING_USE_MTU) {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
XGE_HAL_MAC_RTS_FRM_LEN_SET(
hldev->config.ring.queue[i].max_frm_len),
&bar0->rts_frm_len_n[i]);
} else {
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
XGE_HAL_MAC_RTS_FRM_LEN_SET(new_frmlen),
&bar0->rts_frm_len_n[i]);
}
}
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
&bar0->rmac_max_pyld_len);
XGE_HAL_RMAC_MAX_PYLD_LEN(new_frmlen),
&bar0->rmac_max_pyld_len);
}

View File

@ -26,14 +26,6 @@
* $FreeBSD$
*/
/*
* FileName : xgehal-stats.c
*
* Description: statistics object implementation
*
* Created: 2 June 2004
*/
#include <dev/nxge/include/xgehal-stats.h>
#include <dev/nxge/include/xgehal-device.h>
@ -61,132 +53,132 @@ __hal_stats_initialize (xge_hal_stats_t *stats, xge_hal_device_h devh)
dma_flags |= XGE_OS_DMA_STREAMING;
#endif
if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
stats->hw_info =
(xge_hal_stats_hw_info_t *) xge_os_dma_malloc(
hldev->pdev,
sizeof(xge_hal_stats_hw_info_t),
dma_flags,
&stats->hw_info_dmah,
&stats->hw_info_dma_acch);
stats->hw_info =
(xge_hal_stats_hw_info_t *) xge_os_dma_malloc(
hldev->pdev,
sizeof(xge_hal_stats_hw_info_t),
dma_flags,
&stats->hw_info_dmah,
&stats->hw_info_dma_acch);
if (stats->hw_info == NULL) {
xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(stats->hw_info,
sizeof(xge_hal_stats_hw_info_t));
xge_os_memzero(&stats->hw_info_saved,
sizeof(xge_hal_stats_hw_info_t));
xge_os_memzero(&stats->hw_info_latest,
sizeof(xge_hal_stats_hw_info_t));
if (stats->hw_info == NULL) {
xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(stats->hw_info,
sizeof(xge_hal_stats_hw_info_t));
xge_os_memzero(&stats->hw_info_saved,
sizeof(xge_hal_stats_hw_info_t));
xge_os_memzero(&stats->hw_info_latest,
sizeof(xge_hal_stats_hw_info_t));
stats->dma_addr = xge_os_dma_map(hldev->pdev,
stats->dma_addr = xge_os_dma_map(hldev->pdev,
stats->hw_info_dmah,
stats->hw_info,
sizeof(xge_hal_stats_hw_info_t),
XGE_OS_DMA_DIR_FROMDEVICE,
XGE_OS_DMA_CACHELINE_ALIGNED |
stats->hw_info,
sizeof(xge_hal_stats_hw_info_t),
XGE_OS_DMA_DIR_FROMDEVICE,
XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
XGE_OS_DMA_CONSISTENT
XGE_OS_DMA_CONSISTENT
#else
XGE_OS_DMA_STREAMING
XGE_OS_DMA_STREAMING
#endif
);
if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
xge_debug_stats(XGE_ERR,
"can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
(unsigned long long)(ulong_t)stats->hw_info);
xge_os_dma_free(hldev->pdev,
stats->hw_info,
sizeof(xge_hal_stats_hw_info_t),
&stats->hw_info_dma_acch,
&stats->hw_info_dmah);
return XGE_HAL_ERR_OUT_OF_MAPPING;
}
);
if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
xge_debug_stats(XGE_ERR,
"can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
(unsigned long long)(ulong_t)stats->hw_info);
xge_os_dma_free(hldev->pdev,
stats->hw_info,
sizeof(xge_hal_stats_hw_info_t),
&stats->hw_info_dma_acch,
&stats->hw_info_dmah);
return XGE_HAL_ERR_OUT_OF_MAPPING;
}
}
else {
stats->pcim_info_saved =
(xge_hal_stats_pcim_info_t *)xge_os_malloc(
hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
if (stats->pcim_info_saved == NULL) {
xge_debug_stats(XGE_ERR, "%s", "can not alloc");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
stats->pcim_info_saved =
(xge_hal_stats_pcim_info_t *)xge_os_malloc(
hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
if (stats->pcim_info_saved == NULL) {
xge_debug_stats(XGE_ERR, "%s", "can not alloc");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
stats->pcim_info_latest =
(xge_hal_stats_pcim_info_t *)xge_os_malloc(
hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
if (stats->pcim_info_latest == NULL) {
xge_os_free(hldev->pdev, stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_debug_stats(XGE_ERR, "%s", "can not alloc");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
stats->pcim_info_latest =
(xge_hal_stats_pcim_info_t *)xge_os_malloc(
hldev->pdev, sizeof(xge_hal_stats_pcim_info_t));
if (stats->pcim_info_latest == NULL) {
xge_os_free(hldev->pdev, stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_debug_stats(XGE_ERR, "%s", "can not alloc");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
stats->pcim_info =
(xge_hal_stats_pcim_info_t *) xge_os_dma_malloc(
hldev->pdev,
sizeof(xge_hal_stats_pcim_info_t),
dma_flags,
&stats->hw_info_dmah,
&stats->hw_info_dma_acch);
stats->pcim_info =
(xge_hal_stats_pcim_info_t *) xge_os_dma_malloc(
hldev->pdev,
sizeof(xge_hal_stats_pcim_info_t),
dma_flags,
&stats->hw_info_dmah,
&stats->hw_info_dma_acch);
if (stats->pcim_info == NULL) {
xge_os_free(hldev->pdev, stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_free(hldev->pdev, stats->pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
if (stats->pcim_info == NULL) {
xge_os_free(hldev->pdev, stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_free(hldev->pdev, stats->pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
xge_debug_stats(XGE_ERR, "%s", "can not DMA alloc");
return XGE_HAL_ERR_OUT_OF_MEMORY;
}
xge_os_memzero(stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_memzero(stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_memzero(stats->pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_memzero(stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_memzero(stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_memzero(stats->pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
stats->dma_addr = xge_os_dma_map(hldev->pdev,
stats->dma_addr = xge_os_dma_map(hldev->pdev,
stats->hw_info_dmah,
stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t),
XGE_OS_DMA_DIR_FROMDEVICE,
XGE_OS_DMA_CACHELINE_ALIGNED |
stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t),
XGE_OS_DMA_DIR_FROMDEVICE,
XGE_OS_DMA_CACHELINE_ALIGNED |
#ifdef XGE_HAL_DMA_STATS_CONSISTENT
XGE_OS_DMA_CONSISTENT
XGE_OS_DMA_CONSISTENT
#else
XGE_OS_DMA_STREAMING
XGE_OS_DMA_STREAMING
#endif
);
if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
xge_debug_stats(XGE_ERR,
"can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
(unsigned long long)(ulong_t)stats->hw_info);
);
if (stats->dma_addr == XGE_OS_INVALID_DMA_ADDR) {
xge_debug_stats(XGE_ERR,
"can not map vaddr 0x"XGE_OS_LLXFMT" to DMA",
(unsigned long long)(ulong_t)stats->hw_info);
xge_os_dma_free(hldev->pdev,
stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t),
&stats->hw_info_dma_acch,
&stats->hw_info_dmah);
xge_os_dma_free(hldev->pdev,
stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t),
&stats->hw_info_dma_acch,
&stats->hw_info_dmah);
xge_os_free(hldev->pdev, stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_free(hldev->pdev, stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_free(hldev->pdev, stats->pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_free(hldev->pdev, stats->pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
return XGE_HAL_ERR_OUT_OF_MAPPING;
}
return XGE_HAL_ERR_OUT_OF_MAPPING;
}
}
stats->devh = devh;
xge_os_memzero(&stats->sw_dev_info_stats,
sizeof(xge_hal_stats_device_info_t));
sizeof(xge_hal_stats_device_info_t));
stats->is_initialized = 1;
@ -199,19 +191,19 @@ __hal_stats_save (xge_hal_stats_t *stats)
xge_hal_device_t *hldev = (xge_hal_device_t*)stats->devh;
if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
xge_hal_stats_hw_info_t *latest;
xge_hal_stats_hw_info_t *latest;
(void) xge_hal_stats_hw(stats->devh, &latest);
(void) xge_hal_stats_hw(stats->devh, &latest);
xge_os_memcpy(&stats->hw_info_saved, stats->hw_info,
sizeof(xge_hal_stats_hw_info_t));
xge_os_memcpy(&stats->hw_info_saved, stats->hw_info,
sizeof(xge_hal_stats_hw_info_t));
} else {
xge_hal_stats_pcim_info_t *latest;
xge_hal_stats_pcim_info_t *latest;
(void) xge_hal_stats_pcim(stats->devh, &latest);
(void) xge_hal_stats_pcim(stats->devh, &latest);
xge_os_memcpy(stats->pcim_info_saved, stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_memcpy(stats->pcim_info_saved, stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t));
}
}
@ -237,16 +229,16 @@ __hal_stats_disable (xge_hal_stats_t *stats)
bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->stat_cfg);
&bar0->stat_cfg);
val64 &= ~XGE_HAL_STAT_CFG_STAT_EN;
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
&bar0->stat_cfg);
&bar0->stat_cfg);
/* flush the write */
(void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
&bar0->stat_cfg);
&bar0->stat_cfg);
xge_debug_stats(XGE_TRACE, "stats disabled at 0x"XGE_OS_LLXFMT,
(unsigned long long)stats->dma_addr);
(unsigned long long)stats->dma_addr);
stats->is_enabled = 0;
}
@ -268,35 +260,35 @@ __hal_stats_terminate (xge_hal_stats_t *stats)
xge_assert(hldev);
xge_assert(stats->is_initialized);
if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
xge_os_dma_unmap(hldev->pdev,
xge_os_dma_unmap(hldev->pdev,
stats->hw_info_dmah,
stats->dma_addr,
sizeof(xge_hal_stats_hw_info_t),
XGE_OS_DMA_DIR_FROMDEVICE);
stats->dma_addr,
sizeof(xge_hal_stats_hw_info_t),
XGE_OS_DMA_DIR_FROMDEVICE);
xge_os_dma_free(hldev->pdev,
stats->hw_info,
sizeof(xge_hal_stats_hw_info_t),
&stats->hw_info_dma_acch,
&stats->hw_info_dmah);
xge_os_dma_free(hldev->pdev,
stats->hw_info,
sizeof(xge_hal_stats_hw_info_t),
&stats->hw_info_dma_acch,
&stats->hw_info_dmah);
} else {
xge_os_dma_unmap(hldev->pdev,
xge_os_dma_unmap(hldev->pdev,
stats->hw_info_dmah,
stats->dma_addr,
sizeof(xge_hal_stats_pcim_info_t),
XGE_OS_DMA_DIR_FROMDEVICE);
stats->dma_addr,
sizeof(xge_hal_stats_pcim_info_t),
XGE_OS_DMA_DIR_FROMDEVICE);
xge_os_dma_free(hldev->pdev,
stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t),
&stats->hw_info_dma_acch,
&stats->hw_info_dmah);
xge_os_dma_free(hldev->pdev,
stats->pcim_info,
sizeof(xge_hal_stats_pcim_info_t),
&stats->hw_info_dma_acch,
&stats->hw_info_dmah);
xge_os_free(hldev->pdev, stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_free(hldev->pdev, stats->pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_free(hldev->pdev, stats->pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_free(hldev->pdev, stats->pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
}
@ -333,13 +325,13 @@ __hal_stats_enable (xge_hal_stats_t *stats)
* For Titan stat_addr offset == 0x09d8, and stat_cfg offset == 0x09d0
*/
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
stats->dma_addr, &bar0->stat_addr);
stats->dma_addr, &bar0->stat_addr);
refresh_time_pci_clocks = XGE_HAL_XENA_PER_SEC *
hldev->config.stats_refresh_time_sec;
hldev->config.stats_refresh_time_sec;
refresh_time_pci_clocks =
__hal_fix_time_ival_herc(hldev,
refresh_time_pci_clocks);
__hal_fix_time_ival_herc(hldev,
refresh_time_pci_clocks);
#ifdef XGE_HAL_HERC_EMULATION
/*
@ -351,18 +343,18 @@ __hal_stats_enable (xge_hal_stats_t *stats)
*/
val64 = (0x20C | XGE_HAL_STAT_CFG_STAT_RO |
XGE_HAL_STAT_CFG_STAT_EN);
XGE_HAL_STAT_CFG_STAT_EN);
#else
val64 = XGE_HAL_SET_UPDT_PERIOD(refresh_time_pci_clocks) |
XGE_HAL_STAT_CFG_STAT_RO |
XGE_HAL_STAT_CFG_STAT_EN;
XGE_HAL_STAT_CFG_STAT_RO |
XGE_HAL_STAT_CFG_STAT_EN;
#endif
xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
val64, &bar0->stat_cfg);
val64, &bar0->stat_cfg);
xge_debug_stats(XGE_TRACE, "stats enabled at 0x"XGE_OS_LLXFMT,
(unsigned long long)stats->dma_addr);
(unsigned long long)stats->dma_addr);
stats->is_enabled = 1;
}
@ -377,133 +369,133 @@ __hal_stats_pcim_update_latest(xge_hal_device_h devh)
xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
int i;
#define set_latest_stat_link_cnt(_link, _p) \
hldev->stats.pcim_info_latest->link_info[_link]._p = \
((hldev->stats.pcim_info->link_info[_link]._p >= \
hldev->stats.pcim_info_saved->link_info[_link]._p) ? \
hldev->stats.pcim_info->link_info[_link]._p - \
hldev->stats.pcim_info_saved->link_info[_link]._p : \
((-1) - hldev->stats.pcim_info_saved->link_info[_link]._p) + \
hldev->stats.pcim_info->link_info[_link]._p)
#define set_latest_stat_link_cnt(_link, _p) \
hldev->stats.pcim_info_latest->link_info[_link]._p = \
((hldev->stats.pcim_info->link_info[_link]._p >= \
hldev->stats.pcim_info_saved->link_info[_link]._p) ? \
hldev->stats.pcim_info->link_info[_link]._p - \
hldev->stats.pcim_info_saved->link_info[_link]._p : \
((-1) - hldev->stats.pcim_info_saved->link_info[_link]._p) + \
hldev->stats.pcim_info->link_info[_link]._p)
#define set_latest_stat_aggr_cnt(_aggr, _p) \
hldev->stats.pcim_info_latest->aggr_info[_aggr]._p = \
((hldev->stats.pcim_info->aggr_info[_aggr]._p >= \
hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) ? \
hldev->stats.pcim_info->aggr_info[_aggr]._p - \
hldev->stats.pcim_info_saved->aggr_info[_aggr]._p : \
((-1) - hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) + \
hldev->stats.pcim_info->aggr_info[_aggr]._p)
#define set_latest_stat_aggr_cnt(_aggr, _p) \
hldev->stats.pcim_info_latest->aggr_info[_aggr]._p = \
((hldev->stats.pcim_info->aggr_info[_aggr]._p >= \
hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) ? \
hldev->stats.pcim_info->aggr_info[_aggr]._p - \
hldev->stats.pcim_info_saved->aggr_info[_aggr]._p : \
((-1) - hldev->stats.pcim_info_saved->aggr_info[_aggr]._p) + \
hldev->stats.pcim_info->aggr_info[_aggr]._p)
for (i = 0; i < XGE_HAL_MAC_LINKS; i++) {
set_latest_stat_link_cnt(i, tx_frms);
set_latest_stat_link_cnt(i, tx_ttl_eth_octets);
set_latest_stat_link_cnt(i, tx_data_octets);
set_latest_stat_link_cnt(i, tx_mcst_frms);
set_latest_stat_link_cnt(i, tx_bcst_frms);
set_latest_stat_link_cnt(i, tx_ucst_frms);
set_latest_stat_link_cnt(i, tx_tagged_frms);
set_latest_stat_link_cnt(i, tx_vld_ip);
set_latest_stat_link_cnt(i, tx_vld_ip_octets);
set_latest_stat_link_cnt(i, tx_icmp);
set_latest_stat_link_cnt(i, tx_tcp);
set_latest_stat_link_cnt(i, tx_rst_tcp);
set_latest_stat_link_cnt(i, tx_udp);
set_latest_stat_link_cnt(i, tx_unknown_protocol);
set_latest_stat_link_cnt(i, tx_parse_error);
set_latest_stat_link_cnt(i, tx_pause_ctrl_frms);
set_latest_stat_link_cnt(i, tx_lacpdu_frms);
set_latest_stat_link_cnt(i, tx_marker_pdu_frms);
set_latest_stat_link_cnt(i, tx_marker_resp_pdu_frms);
set_latest_stat_link_cnt(i, tx_drop_ip);
set_latest_stat_link_cnt(i, tx_xgmii_char1_match);
set_latest_stat_link_cnt(i, tx_xgmii_char2_match);
set_latest_stat_link_cnt(i, tx_xgmii_column1_match);
set_latest_stat_link_cnt(i, tx_xgmii_column2_match);
set_latest_stat_link_cnt(i, tx_drop_frms);
set_latest_stat_link_cnt(i, tx_any_err_frms);
set_latest_stat_link_cnt(i, rx_ttl_frms);
set_latest_stat_link_cnt(i, rx_vld_frms);
set_latest_stat_link_cnt(i, rx_offld_frms);
set_latest_stat_link_cnt(i, rx_ttl_eth_octets);
set_latest_stat_link_cnt(i, rx_data_octets);
set_latest_stat_link_cnt(i, rx_offld_octets);
set_latest_stat_link_cnt(i, rx_vld_mcst_frms);
set_latest_stat_link_cnt(i, rx_vld_bcst_frms);
set_latest_stat_link_cnt(i, rx_accepted_ucst_frms);
set_latest_stat_link_cnt(i, rx_accepted_nucst_frms);
set_latest_stat_link_cnt(i, rx_tagged_frms);
set_latest_stat_link_cnt(i, rx_long_frms);
set_latest_stat_link_cnt(i, rx_usized_frms);
set_latest_stat_link_cnt(i, rx_osized_frms);
set_latest_stat_link_cnt(i, rx_frag_frms);
set_latest_stat_link_cnt(i, rx_jabber_frms);
set_latest_stat_link_cnt(i, rx_ttl_64_frms);
set_latest_stat_link_cnt(i, rx_ttl_65_127_frms);
set_latest_stat_link_cnt(i, rx_ttl_128_255_frms);
set_latest_stat_link_cnt(i, rx_ttl_256_511_frms);
set_latest_stat_link_cnt(i, rx_ttl_512_1023_frms);
set_latest_stat_link_cnt(i, rx_ttl_1024_1518_frms);
set_latest_stat_link_cnt(i, rx_ttl_1519_4095_frms);
set_latest_stat_link_cnt(i, rx_ttl_40956_8191_frms);
set_latest_stat_link_cnt(i, rx_ttl_8192_max_frms);
set_latest_stat_link_cnt(i, rx_ttl_gt_max_frms);
set_latest_stat_link_cnt(i, rx_ip);
set_latest_stat_link_cnt(i, rx_ip_octets);
set_latest_stat_link_cnt(i, rx_hdr_err_ip);
set_latest_stat_link_cnt(i, rx_icmp);
set_latest_stat_link_cnt(i, rx_tcp);
set_latest_stat_link_cnt(i, rx_udp);
set_latest_stat_link_cnt(i, rx_err_tcp);
set_latest_stat_link_cnt(i, rx_pause_cnt);
set_latest_stat_link_cnt(i, rx_pause_ctrl_frms);
set_latest_stat_link_cnt(i, rx_unsup_ctrl_frms);
set_latest_stat_link_cnt(i, rx_in_rng_len_err_frms);
set_latest_stat_link_cnt(i, rx_out_rng_len_err_frms);
set_latest_stat_link_cnt(i, rx_drop_frms);
set_latest_stat_link_cnt(i, rx_discarded_frms);
set_latest_stat_link_cnt(i, rx_drop_ip);
set_latest_stat_link_cnt(i, rx_err_drp_udp);
set_latest_stat_link_cnt(i, rx_lacpdu_frms);
set_latest_stat_link_cnt(i, rx_marker_pdu_frms);
set_latest_stat_link_cnt(i, rx_marker_resp_pdu_frms);
set_latest_stat_link_cnt(i, rx_unknown_pdu_frms);
set_latest_stat_link_cnt(i, rx_illegal_pdu_frms);
set_latest_stat_link_cnt(i, rx_fcs_discard);
set_latest_stat_link_cnt(i, rx_len_discard);
set_latest_stat_link_cnt(i, rx_pf_discard);
set_latest_stat_link_cnt(i, rx_trash_discard);
set_latest_stat_link_cnt(i, rx_rts_discard);
set_latest_stat_link_cnt(i, rx_wol_discard);
set_latest_stat_link_cnt(i, rx_red_discard);
set_latest_stat_link_cnt(i, rx_ingm_full_discard);
set_latest_stat_link_cnt(i, rx_xgmii_data_err_cnt);
set_latest_stat_link_cnt(i, rx_xgmii_ctrl_err_cnt);
set_latest_stat_link_cnt(i, rx_xgmii_err_sym);
set_latest_stat_link_cnt(i, rx_xgmii_char1_match);
set_latest_stat_link_cnt(i, rx_xgmii_char2_match);
set_latest_stat_link_cnt(i, rx_xgmii_column1_match);
set_latest_stat_link_cnt(i, rx_xgmii_column2_match);
set_latest_stat_link_cnt(i, rx_local_fault);
set_latest_stat_link_cnt(i, rx_remote_fault);
set_latest_stat_link_cnt(i, rx_queue_full);
set_latest_stat_link_cnt(i, tx_frms);
set_latest_stat_link_cnt(i, tx_ttl_eth_octets);
set_latest_stat_link_cnt(i, tx_data_octets);
set_latest_stat_link_cnt(i, tx_mcst_frms);
set_latest_stat_link_cnt(i, tx_bcst_frms);
set_latest_stat_link_cnt(i, tx_ucst_frms);
set_latest_stat_link_cnt(i, tx_tagged_frms);
set_latest_stat_link_cnt(i, tx_vld_ip);
set_latest_stat_link_cnt(i, tx_vld_ip_octets);
set_latest_stat_link_cnt(i, tx_icmp);
set_latest_stat_link_cnt(i, tx_tcp);
set_latest_stat_link_cnt(i, tx_rst_tcp);
set_latest_stat_link_cnt(i, tx_udp);
set_latest_stat_link_cnt(i, tx_unknown_protocol);
set_latest_stat_link_cnt(i, tx_parse_error);
set_latest_stat_link_cnt(i, tx_pause_ctrl_frms);
set_latest_stat_link_cnt(i, tx_lacpdu_frms);
set_latest_stat_link_cnt(i, tx_marker_pdu_frms);
set_latest_stat_link_cnt(i, tx_marker_resp_pdu_frms);
set_latest_stat_link_cnt(i, tx_drop_ip);
set_latest_stat_link_cnt(i, tx_xgmii_char1_match);
set_latest_stat_link_cnt(i, tx_xgmii_char2_match);
set_latest_stat_link_cnt(i, tx_xgmii_column1_match);
set_latest_stat_link_cnt(i, tx_xgmii_column2_match);
set_latest_stat_link_cnt(i, tx_drop_frms);
set_latest_stat_link_cnt(i, tx_any_err_frms);
set_latest_stat_link_cnt(i, rx_ttl_frms);
set_latest_stat_link_cnt(i, rx_vld_frms);
set_latest_stat_link_cnt(i, rx_offld_frms);
set_latest_stat_link_cnt(i, rx_ttl_eth_octets);
set_latest_stat_link_cnt(i, rx_data_octets);
set_latest_stat_link_cnt(i, rx_offld_octets);
set_latest_stat_link_cnt(i, rx_vld_mcst_frms);
set_latest_stat_link_cnt(i, rx_vld_bcst_frms);
set_latest_stat_link_cnt(i, rx_accepted_ucst_frms);
set_latest_stat_link_cnt(i, rx_accepted_nucst_frms);
set_latest_stat_link_cnt(i, rx_tagged_frms);
set_latest_stat_link_cnt(i, rx_long_frms);
set_latest_stat_link_cnt(i, rx_usized_frms);
set_latest_stat_link_cnt(i, rx_osized_frms);
set_latest_stat_link_cnt(i, rx_frag_frms);
set_latest_stat_link_cnt(i, rx_jabber_frms);
set_latest_stat_link_cnt(i, rx_ttl_64_frms);
set_latest_stat_link_cnt(i, rx_ttl_65_127_frms);
set_latest_stat_link_cnt(i, rx_ttl_128_255_frms);
set_latest_stat_link_cnt(i, rx_ttl_256_511_frms);
set_latest_stat_link_cnt(i, rx_ttl_512_1023_frms);
set_latest_stat_link_cnt(i, rx_ttl_1024_1518_frms);
set_latest_stat_link_cnt(i, rx_ttl_1519_4095_frms);
set_latest_stat_link_cnt(i, rx_ttl_40956_8191_frms);
set_latest_stat_link_cnt(i, rx_ttl_8192_max_frms);
set_latest_stat_link_cnt(i, rx_ttl_gt_max_frms);
set_latest_stat_link_cnt(i, rx_ip);
set_latest_stat_link_cnt(i, rx_ip_octets);
set_latest_stat_link_cnt(i, rx_hdr_err_ip);
set_latest_stat_link_cnt(i, rx_icmp);
set_latest_stat_link_cnt(i, rx_tcp);
set_latest_stat_link_cnt(i, rx_udp);
set_latest_stat_link_cnt(i, rx_err_tcp);
set_latest_stat_link_cnt(i, rx_pause_cnt);
set_latest_stat_link_cnt(i, rx_pause_ctrl_frms);
set_latest_stat_link_cnt(i, rx_unsup_ctrl_frms);
set_latest_stat_link_cnt(i, rx_in_rng_len_err_frms);
set_latest_stat_link_cnt(i, rx_out_rng_len_err_frms);
set_latest_stat_link_cnt(i, rx_drop_frms);
set_latest_stat_link_cnt(i, rx_discarded_frms);
set_latest_stat_link_cnt(i, rx_drop_ip);
set_latest_stat_link_cnt(i, rx_err_drp_udp);
set_latest_stat_link_cnt(i, rx_lacpdu_frms);
set_latest_stat_link_cnt(i, rx_marker_pdu_frms);
set_latest_stat_link_cnt(i, rx_marker_resp_pdu_frms);
set_latest_stat_link_cnt(i, rx_unknown_pdu_frms);
set_latest_stat_link_cnt(i, rx_illegal_pdu_frms);
set_latest_stat_link_cnt(i, rx_fcs_discard);
set_latest_stat_link_cnt(i, rx_len_discard);
set_latest_stat_link_cnt(i, rx_pf_discard);
set_latest_stat_link_cnt(i, rx_trash_discard);
set_latest_stat_link_cnt(i, rx_rts_discard);
set_latest_stat_link_cnt(i, rx_wol_discard);
set_latest_stat_link_cnt(i, rx_red_discard);
set_latest_stat_link_cnt(i, rx_ingm_full_discard);
set_latest_stat_link_cnt(i, rx_xgmii_data_err_cnt);
set_latest_stat_link_cnt(i, rx_xgmii_ctrl_err_cnt);
set_latest_stat_link_cnt(i, rx_xgmii_err_sym);
set_latest_stat_link_cnt(i, rx_xgmii_char1_match);
set_latest_stat_link_cnt(i, rx_xgmii_char2_match);
set_latest_stat_link_cnt(i, rx_xgmii_column1_match);
set_latest_stat_link_cnt(i, rx_xgmii_column2_match);
set_latest_stat_link_cnt(i, rx_local_fault);
set_latest_stat_link_cnt(i, rx_remote_fault);
set_latest_stat_link_cnt(i, rx_queue_full);
}
for (i = 0; i < XGE_HAL_MAC_AGGREGATORS; i++) {
set_latest_stat_aggr_cnt(i, tx_frms);
set_latest_stat_aggr_cnt(i, tx_mcst_frms);
set_latest_stat_aggr_cnt(i, tx_bcst_frms);
set_latest_stat_aggr_cnt(i, tx_discarded_frms);
set_latest_stat_aggr_cnt(i, tx_errored_frms);
set_latest_stat_aggr_cnt(i, rx_frms);
set_latest_stat_aggr_cnt(i, rx_data_octets);
set_latest_stat_aggr_cnt(i, rx_mcst_frms);
set_latest_stat_aggr_cnt(i, rx_bcst_frms);
set_latest_stat_aggr_cnt(i, rx_discarded_frms);
set_latest_stat_aggr_cnt(i, rx_errored_frms);
set_latest_stat_aggr_cnt(i, rx_unknown_protocol_frms);
set_latest_stat_aggr_cnt(i, tx_frms);
set_latest_stat_aggr_cnt(i, tx_mcst_frms);
set_latest_stat_aggr_cnt(i, tx_bcst_frms);
set_latest_stat_aggr_cnt(i, tx_discarded_frms);
set_latest_stat_aggr_cnt(i, tx_errored_frms);
set_latest_stat_aggr_cnt(i, rx_frms);
set_latest_stat_aggr_cnt(i, rx_data_octets);
set_latest_stat_aggr_cnt(i, rx_mcst_frms);
set_latest_stat_aggr_cnt(i, rx_bcst_frms);
set_latest_stat_aggr_cnt(i, rx_discarded_frms);
set_latest_stat_aggr_cnt(i, rx_errored_frms);
set_latest_stat_aggr_cnt(i, rx_unknown_protocol_frms);
}
return;
}
@ -518,14 +510,14 @@ __hal_stats_update_latest(xge_hal_device_h devh)
xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
#define set_latest_stat_cnt(_dev, _p) \
hldev->stats.hw_info_latest._p = \
hldev->stats.hw_info_latest._p = \
((hldev->stats.hw_info->_p >= hldev->stats.hw_info_saved._p) ? \
hldev->stats.hw_info->_p - hldev->stats.hw_info_saved._p : \
hldev->stats.hw_info->_p - hldev->stats.hw_info_saved._p : \
((-1) - hldev->stats.hw_info_saved._p) + hldev->stats.hw_info->_p)
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_TITAN) {
__hal_stats_pcim_update_latest(devh);
return;
__hal_stats_pcim_update_latest(devh);
return;
}
/* Tx MAC statistics counters. */
@ -721,20 +713,20 @@ xge_hal_stats_hw(xge_hal_device_h devh, xge_hal_stats_hw_info_t **hw_info)
if (!hldev->stats.is_initialized ||
!hldev->stats.is_enabled) {
*hw_info = NULL;
return XGE_HAL_INF_STATS_IS_NOT_READY;
*hw_info = NULL;
return XGE_HAL_INF_STATS_IS_NOT_READY;
}
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_STATS_STREAMING)
xge_os_dma_sync(hldev->pdev,
hldev->stats.hw_info_dmah,
hldev->stats.dma_addr,
0,
sizeof(xge_hal_stats_hw_info_t),
XGE_OS_DMA_DIR_FROMDEVICE);
hldev->stats.dma_addr,
0,
sizeof(xge_hal_stats_hw_info_t),
XGE_OS_DMA_DIR_FROMDEVICE);
#endif
/*
/*
* update hw counters, taking into account
* the "reset" or "saved"
* values
@ -746,19 +738,19 @@ xge_hal_stats_hw(xge_hal_device_h devh, xge_hal_stats_hw_info_t **hw_info)
*/
if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA ||
xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
u64 mcst, bcst;
xge_hal_stats_hw_info_t *hwsta = &hldev->stats.hw_info_latest;
u64 mcst, bcst;
xge_hal_stats_hw_info_t *hwsta = &hldev->stats.hw_info_latest;
mcst = ((u64)hwsta->rmac_vld_mcst_frms_oflow << 32) |
hwsta->rmac_vld_mcst_frms;
mcst = ((u64)hwsta->rmac_vld_mcst_frms_oflow << 32) |
hwsta->rmac_vld_mcst_frms;
bcst = ((u64)hwsta->rmac_vld_bcst_frms_oflow << 32) |
hwsta->rmac_vld_bcst_frms;
bcst = ((u64)hwsta->rmac_vld_bcst_frms_oflow << 32) |
hwsta->rmac_vld_bcst_frms;
mcst -= bcst;
mcst -= bcst;
hwsta->rmac_vld_mcst_frms_oflow = (u32)(mcst >> 32);
hwsta->rmac_vld_mcst_frms = (u32)mcst;
hwsta->rmac_vld_mcst_frms_oflow = (u32)(mcst >> 32);
hwsta->rmac_vld_mcst_frms = (u32)mcst;
}
*hw_info = &hldev->stats.hw_info_latest;
@ -786,20 +778,20 @@ xge_hal_stats_pcim(xge_hal_device_h devh, xge_hal_stats_pcim_info_t **hw_info)
if (!hldev->stats.is_initialized ||
!hldev->stats.is_enabled) {
*hw_info = NULL;
return XGE_HAL_INF_STATS_IS_NOT_READY;
*hw_info = NULL;
return XGE_HAL_INF_STATS_IS_NOT_READY;
}
#if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_STATS_STREAMING)
xge_os_dma_sync(hldev->pdev,
hldev->stats.hw_info_dmah,
hldev->stats.dma_addr,
0,
sizeof(xge_hal_stats_pcim_info_t),
XGE_OS_DMA_DIR_FROMDEVICE);
hldev->stats.dma_addr,
0,
sizeof(xge_hal_stats_pcim_info_t),
XGE_OS_DMA_DIR_FROMDEVICE);
#endif
/*
/*
* update hw counters, taking into account
* the "reset" or "saved"
* values
@ -830,19 +822,19 @@ xge_hal_stats_pcim(xge_hal_device_h devh, xge_hal_stats_pcim_info_t **hw_info)
*/
xge_hal_status_e
xge_hal_stats_device(xge_hal_device_h devh,
xge_hal_stats_device_info_t **device_info)
xge_hal_stats_device_info_t **device_info)
{
xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
if (!hldev->stats.is_initialized ||
!hldev->stats.is_enabled) {
*device_info = NULL;
return XGE_HAL_INF_STATS_IS_NOT_READY;
*device_info = NULL;
return XGE_HAL_INF_STATS_IS_NOT_READY;
}
hldev->stats.sw_dev_info_stats.traffic_intr_cnt =
hldev->stats.sw_dev_info_stats.total_intr_cnt -
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt;
hldev->stats.sw_dev_info_stats.total_intr_cnt -
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt;
*device_info = &hldev->stats.sw_dev_info_stats;
@ -866,64 +858,64 @@ xge_hal_stats_device(xge_hal_device_h devh,
*/
xge_hal_status_e
xge_hal_stats_channel(xge_hal_channel_h channelh,
xge_hal_stats_channel_info_t **channel_info)
xge_hal_stats_channel_info_t **channel_info)
{
xge_hal_stats_hw_info_t *latest;
xge_hal_stats_hw_info_t *latest;
xge_hal_channel_t *channel;
xge_hal_device_t *hldev;
channel = (xge_hal_channel_t *)channelh;
if ((channel == NULL) || (channel->magic != XGE_HAL_MAGIC)) {
return XGE_HAL_ERR_INVALID_DEVICE;
}
hldev = (xge_hal_device_t *)channel->devh;
if ((hldev == NULL) || (hldev->magic != XGE_HAL_MAGIC)) {
return XGE_HAL_ERR_INVALID_DEVICE;
}
if ((channel == NULL) || (channel->magic != XGE_HAL_MAGIC)) {
return XGE_HAL_ERR_INVALID_DEVICE;
return XGE_HAL_ERR_INVALID_DEVICE;
}
if (!hldev->stats.is_initialized ||
!hldev->stats.is_enabled ||
!channel->is_open) {
*channel_info = NULL;
return XGE_HAL_INF_STATS_IS_NOT_READY;
*channel_info = NULL;
return XGE_HAL_INF_STATS_IS_NOT_READY;
}
hldev->stats.sw_dev_info_stats.traffic_intr_cnt =
hldev->stats.sw_dev_info_stats.total_intr_cnt -
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt;
hldev->stats.sw_dev_info_stats.total_intr_cnt -
hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt;
if (hldev->stats.sw_dev_info_stats.traffic_intr_cnt) {
int rxcnt = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
int txcnt = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
if (!txcnt)
txcnt = 1;
channel->stats.avg_compl_per_intr_cnt =
channel->stats.total_compl_cnt / txcnt;
} else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING &&
!hldev->config.bimodal_interrupts) {
if (!rxcnt)
rxcnt = 1;
channel->stats.avg_compl_per_intr_cnt =
channel->stats.total_compl_cnt / rxcnt;
}
if (channel->stats.avg_compl_per_intr_cnt == 0) {
/* to not confuse user */
channel->stats.avg_compl_per_intr_cnt = 1;
}
int rxcnt = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
int txcnt = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
if (!txcnt)
txcnt = 1;
channel->stats.avg_compl_per_intr_cnt =
channel->stats.total_compl_cnt / txcnt;
} else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING &&
!hldev->config.bimodal_interrupts) {
if (!rxcnt)
rxcnt = 1;
channel->stats.avg_compl_per_intr_cnt =
channel->stats.total_compl_cnt / rxcnt;
}
if (channel->stats.avg_compl_per_intr_cnt == 0) {
/* to not confuse user */
channel->stats.avg_compl_per_intr_cnt = 1;
}
}
(void) xge_hal_stats_hw(hldev, &latest);
if (channel->stats.total_posts) {
channel->stats.avg_buffers_per_post =
channel->stats.total_buffers /
channel->stats.total_posts;
channel->stats.avg_buffers_per_post =
channel->stats.total_buffers /
channel->stats.total_posts;
#ifdef XGE_OS_PLATFORM_64BIT
if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
channel->stats.avg_post_size =
(u32)(latest->tmac_ttl_less_fb_octets /
channel->stats.total_posts);
channel->stats.avg_post_size =
(u32)(latest->tmac_ttl_less_fb_octets /
channel->stats.total_posts);
}
#endif
}
@ -931,9 +923,9 @@ xge_hal_stats_channel(xge_hal_channel_h channelh,
#ifdef XGE_OS_PLATFORM_64BIT
if (channel->stats.total_buffers &&
channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
channel->stats.avg_buffer_size =
(u32)(latest->tmac_ttl_less_fb_octets /
channel->stats.total_buffers);
channel->stats.avg_buffer_size =
(u32)(latest->tmac_ttl_less_fb_octets /
channel->stats.total_buffers);
}
#endif
@ -960,14 +952,14 @@ xge_hal_stats_reset(xge_hal_device_h devh)
if (!hldev->stats.is_initialized ||
!hldev->stats.is_enabled) {
return XGE_HAL_INF_STATS_IS_NOT_READY;
return XGE_HAL_INF_STATS_IS_NOT_READY;
}
/* save hw stats to calculate the after-reset values */
__hal_stats_save(&hldev->stats);
/* zero-out driver-maintained stats, don't reset the saved */
__hal_stats_soft_reset(hldev, 0);
__hal_stats_soft_reset(hldev, 0);
return XGE_HAL_OK;
}
@ -982,19 +974,19 @@ __hal_stats_soft_reset (xge_hal_device_h devh, int reset_all)
xge_hal_channel_t *channel;
xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
if (reset_all) {
if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
xge_os_memzero(&hldev->stats.hw_info_saved,
sizeof(xge_hal_stats_hw_info_t));
xge_os_memzero(&hldev->stats.hw_info_latest,
sizeof(xge_hal_stats_hw_info_t));
} else {
xge_os_memzero(&hldev->stats.pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_memzero(&hldev->stats.pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
}
}
if (reset_all) {
if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_TITAN) {
xge_os_memzero(&hldev->stats.hw_info_saved,
sizeof(xge_hal_stats_hw_info_t));
xge_os_memzero(&hldev->stats.hw_info_latest,
sizeof(xge_hal_stats_hw_info_t));
} else {
xge_os_memzero(&hldev->stats.pcim_info_saved,
sizeof(xge_hal_stats_pcim_info_t));
xge_os_memzero(&hldev->stats.pcim_info_latest,
sizeof(xge_hal_stats_pcim_info_t));
}
}
/* Reset the "soft" error and informational statistics */
xge_os_memzero(&hldev->stats.sw_dev_err_stats,
@ -1004,16 +996,16 @@ __hal_stats_soft_reset (xge_hal_device_h devh, int reset_all)
/* for each Rx channel */
xge_list_for_each(item, &hldev->ring_channels) {
channel = xge_container_of(item, xge_hal_channel_t, item);
xge_os_memzero(&channel->stats,
sizeof(xge_hal_stats_channel_info_t));
channel = xge_container_of(item, xge_hal_channel_t, item);
xge_os_memzero(&channel->stats,
sizeof(xge_hal_stats_channel_info_t));
}
/* for each Tx channel */
xge_list_for_each(item, &hldev->fifo_channels) {
channel = xge_container_of(item, xge_hal_channel_t, item);
xge_os_memzero(&channel->stats,
sizeof(xge_hal_stats_channel_info_t));
channel = xge_container_of(item, xge_hal_channel_t, item);
xge_os_memzero(&channel->stats,
sizeof(xge_hal_stats_channel_info_t));
}
}

View File

@ -26,20 +26,12 @@
* $FreeBSD$
*/
/*
* FileName : version.h
*
* Description: versioning file
*
* Created: 3 September 2004
*/
#ifndef XGELL_VERSION_H
#define XGELL_VERSION_H
#define XGELL_VERSION_MAJOR "2"
#define XGELL_VERSION_MINOR "0"
#define XGELL_VERSION_FIX "7"
#define XGELL_VERSION_FIX "9"
#define XGELL_VERSION_BUILD GENERATED_BUILD_VERSION
#define XGELL_VERSION XGELL_VERSION_MAJOR"."XGELL_VERSION_MINOR"." \
GENERATED_BUILD_VERSION

View File

@ -22,14 +22,11 @@ CFLAGS_NXGE += -DXGE_DEBUG_MODULE_MASK=XGE_COMPONENT_LL
CFLAGS_NXGE += -DXGE_DEBUG_ERR_MASK=XGE_COMPONENT_LL
#CFLAGS_NXGE += -DXGE_DEBUG_TRACE_MASK=XGE_COMPONENT_LL
# Check Memory
#CFLAGS_NXGE += -DXGE_OS_MEMORY_CHECK
# 2-Buffer Mode
#CFLAGS_NXGE += -DXGE_BUFFER_MODE_2
# 3-Buffer Mode
#CFLAGS_NXGE += -DXGE_BUFFER_MODE_3
# TSO (TCP Segmentation Offload)
CFLAGS_NXGE += -DXGE_FEATURE_TSO
#CFLAGS_NXGE += -DXGE_FEATURE_BUFFER_MODE_2
CFLAGS += $(CFLAGS_NXGE)

View File

@ -25,6 +25,7 @@
*
* $FreeBSD$
*/
#ifndef XGE_CMN_H
#define XGE_CMN_H
@ -45,97 +46,99 @@
#define XGE_OS_HOST_BIG_ENDIAN 1
#endif
#define u64 unsigned long long
#define u32 unsigned int
#define u16 unsigned short
#define u8 unsigned char
#define u64 unsigned long long
#define u32 unsigned int
#define u16 unsigned short
#define u8 unsigned char
#define XGE_COUNT_REGS 386
#define XGE_COUNT_STATS 160
#define XGE_COUNT_PCICONF 43
#define XGE_COUNT_DEVCONF 1677
#define XGE_COUNT_DEVCONF 1677
#ifdef CONFIG_LRO
#define XGE_COUNT_INTRSTAT 26
#define XGE_COUNT_INTRSTAT 26
#else
#define XGE_COUNT_INTRSTAT 20
#define XGE_COUNT_INTRSTAT 20
#endif
#define XGE_COUNT_TCODESTAT 54
#define XGE_COUNT_SWSTAT 54
#define XGE_COUNT_DRIVERSTATS 27
#define DEVICE_ID_XFRAME_II 0x5832
#define XGE_COUNT_EXTENDED_STATS 56
#define XGE_PRINT(fd, fmt...) \
{ \
fprintf( fd, fmt ); \
fprintf( fd, "\n" ); \
printf( fmt ); \
printf( "\n" ); \
#define XGE_PRINT(fd, fmt...) { \
fprintf(fd, fmt); \
fprintf(fd, "\n"); \
printf(fmt); \
printf("\n"); \
}
#define XGE_PRINT_LINE(fd) XGE_PRINT(fd, line);
#define XGE_PRINT_LINE(fd) XGE_PRINT(fd, line);
/* Read & Write Register */
typedef struct barregister
{
char option[2];
u64 offset;
u64 value;
}bar0reg_t;
char option[2];
u64 offset;
u64 value;
}xge_register_info_t;
/* Register Dump */
typedef struct xge_pci_bar0_t
{
u8 name[32]; /* Register name as in user guides */
u64 offset; /* Offset from base address */
u64 value; /* Value */
char type; /* 1: XframeII, 0: Common */
u8 name[32]; /* Register name as in user guides */
u64 offset; /* Offset from base address */
u64 value; /* Value */
char type; /* 1: XframeII, 0: Common */
} xge_pci_bar0_t;
/* Hardware Statistics */
typedef struct xge_stats_hw_info_t
{
u8 name[32]; /* Statistics name */
u64 be_offset; /* Offset from base address (BE) */
u64 le_offset; /* Offset from base address (LE) */
u8 type; /* Type: 1, 2, 3 or 4 bytes */
u64 value; /* Value */
u8 name[32]; /* Statistics name */
u64 be_offset; /* Offset from base address (BE) */
u64 le_offset; /* Offset from base address (LE) */
u8 type; /* Type: 1, 2, 3 or 4 bytes */
u64 value; /* Value */
} xge_stats_hw_info_t;
/* PCI Configuration Space */
typedef struct xge_pci_config_t
{
u8 name[32]; /* Pci conf. name */
u64 be_offset; /* Offset from base address (BE) */
u64 le_offset; /* Offset from base address (LE) */
u64 value; /* Value */
u8 name[32]; /* Pci conf. name */
u64 be_offset; /* Offset from base address (BE) */
u64 le_offset; /* Offset from base address (LE) */
u64 value; /* Value */
} xge_pci_config_t;
/* Device Configuration */
typedef struct xge_device_config_t
{
u8 name[32]; /* Device conf. name */
u64 value; /* Value */
u8 name[32]; /* Device conf. name */
u64 value; /* Value */
} xge_device_config_t;
/* Interrupt Statistics */
typedef struct xge_stats_intr_info_t
{
u8 name[32]; /* Interrupt entry name */
u64 value; /* Value (count) */
u8 name[32]; /* Interrupt entry name */
u64 value; /* Value (count) */
} xge_stats_intr_info_t;
/* Tcode Statistics */
typedef struct xge_stats_tcode_info_t
{
u8 name[32]; /* Tcode entry name */
u64 value; /* Value (count) */
u8 type; /* Type: 1, 2, 3 or 4 bytes */
u16 flag;
u8 name[32]; /* Tcode entry name */
u64 value; /* Value (count) */
u8 type; /* Type: 1, 2, 3 or 4 bytes */
u16 flag;
}xge_stats_tcode_info_t;
typedef struct xge_stats_driver_info_t
{
u8 name[32]; /* Driver statistics name */
u64 value; /* Value */
} xge_stats_driver_info_t;
#ifdef XGE_OS_HOST_BIG_ENDIAN
#define GET_OFFSET_STATS(index) statsInfo[(index)].be_offset
#define GET_OFFSET_PCICONF(index) pciconfInfo[(index)].be_offset

View File

@ -25,437 +25,582 @@
*
* $FreeBSD$
*/
/******************************************
* xge_info.c
*
* To get the Tx, Rx, PCI, Interrupt statistics,
* PCI configuration space and bar0 register
* values
******************************************/
#include "xge_info.h"
int
main( int argc, char *argv[] )
{
if(argc >= 4) {
if(!((strcmp(argv[2], "-r") == 0) ||
(strcmp(argv[2], "-w") == 0) ||
(strcmp(argv[2], "chgbufmode") == 0)))
{ goto use; }
}
else {
if(argc != 3) { goto out; }
else
{
if(!((strcmp(argv[2], "stats") == 0) ||
(strcmp(argv[2], "pciconf") == 0) ||
(strcmp(argv[2], "devconf") == 0) ||
(strcmp(argv[2], "reginfo") == 0) ||
(strcmp(argv[2], "driverversion") == 0) ||
(strcmp(argv[2], "swstats") == 0) ||
(strcmp(argv[2], "getbufmode") == 0) ||
(strcmp(argv[2], "intr") == 0)))
{ goto out; }
}
}
int status = EXIT_FAILURE;
if((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
{
printf("Creating socket failed\n");
return EXIT_FAILURE;
}
if(argc >= 4) {
if(!((strcmp(argv[2], "getregister") == 0) ||
(strcmp(argv[2], "setregister") == 0) ||
(strcmp(argv[2], "setbufmode") == 0))) {
goto out;
}
}
else {
if(argc != 3) {
goto out;
}
else {
if(!((strcmp(argv[2], "hwstats") == 0) ||
(strcmp(argv[2], "pciconf") == 0) ||
(strcmp(argv[2], "devconf") == 0) ||
(strcmp(argv[2], "registers") == 0) ||
(strcmp(argv[2], "version") == 0) ||
(strcmp(argv[2], "swstats") == 0) ||
(strcmp(argv[2], "drvstats") == 0) ||
(strcmp(argv[2], "getbufmode") == 0) ||
(strcmp(argv[2], "devstats") == 0))) {
goto out;
}
}
}
ifreqp.ifr_addr.sa_family = AF_INET;
strcpy(ifreqp.ifr_name, argv[1]);
if((sockfd = socket(AF_INET, SOCK_DGRAM, 0)) < 0) {
printf("Creating socket failed\n");
goto _exit;
}
if (strcmp(argv[2], "pciconf") == 0) return getPciConf();
else if(strcmp(argv[2], "devconf") == 0) return getDevConf();
else if(strcmp(argv[2], "stats") == 0) return getStats();
else if(strcmp(argv[2], "reginfo") == 0) return getRegInfo();
else if(strcmp(argv[2], "intr") == 0) return getIntrStats();
else if(strcmp(argv[2], "swstats") == 0) return getTcodeStats();
else if(strcmp(argv[2], "driverversion") == 0) return getDriverVer();
else if(strcmp(argv[2], "-r") == 0) return getReadReg(argv[2],
argv[3]);
else if(strcmp(argv[2], "-w") == 0) return getWriteReg(argv[2],
argv[3],argv[5]);
else if(strcmp(argv[2], "chgbufmode") == 0) return changeBufMode(argv[3]);
else if(strcmp(argv[2], "getbufmode") == 0) return getBufMode();
else return EXIT_FAILURE;
ifreqp.ifr_addr.sa_family = AF_INET;
strcpy(ifreqp.ifr_name, argv[1]);
use:
printf("Usage:");
printf("%s <INTERFACE> [-r] [-w] [chgbufmode]\n", argv[0]);
printf("\t -r <offset> : Read register \n");
printf("\t -w <offset> -v <value> : Write register \n");
printf("\t chgbufmode <Buffer mode> : Changes buffer mode \n");
return EXIT_FAILURE;
if (strcmp(argv[2], "pciconf") == 0)
status = xge_get_pciconf();
else if(strcmp(argv[2], "devconf") == 0)
status = xge_get_devconf();
else if(strcmp(argv[2], "hwstats") == 0)
status = xge_get_hwstats();
else if(strcmp(argv[2], "registers") == 0)
status = xge_get_registers();
else if(strcmp(argv[2], "devstats") == 0)
status = xge_get_devstats();
else if(strcmp(argv[2], "swstats") == 0)
status = xge_get_swstats();
else if(strcmp(argv[2], "drvstats") == 0)
status = xge_get_drvstats();
else if(strcmp(argv[2], "version") == 0)
status = xge_get_drv_version();
else if(strcmp(argv[2], "getbufmode") == 0)
status = xge_get_buffer_mode();
else if(strcmp(argv[2], "getregister") == 0)
status = xge_get_register(argv[3]);
else if(strcmp(argv[2], "setregister") == 0)
status = xge_set_register(argv[3], argv[4]);
else if(strcmp(argv[2], "setbufmode") == 0)
status = xge_change_buffer_mode(argv[3]);
goto _exit;
out:
printf("Usage:");
printf("%s <INTERFACE> <[stats] [reginfo] [pciconf] [devconf] ", argv[0]);
printf("[intr] [swstats] [driverversion] ");
printf("[getbufmode] [chgbufmode] [-r] [-w] >\n");
printf("\tINTERFACE : Interface (xge0, xge1, xge2, ..)\n");
printf("\tstats : Prints statistics \n");
printf("\treginfo : Prints register values \n");
printf("\tpciconf : Prints PCI configuration space \n");
printf("\tdevconf : Prints device configuration \n");
printf("\tintr : Prints interrupt statistics \n");
printf("\tswstats : Prints sw statistics \n");
printf("\tdriverversion : Prints driver version \n");
printf("\tgetbufmode : Prints Buffer Mode \n");
printf("\tchgbufmode : Changes buffer mode \n");
printf("\t -r <offset> : Read register \n");
printf("\t -w <offset> -v <value> : Write register \n");
return EXIT_FAILURE;
printf("Usage: ");
printf("getinfo <INTERFACE> [hwstats] [swstats] [devstats] ");
printf("[drvstats] [version] [registers] [getregister offset] ");
printf("[setregister offset value] [pciconf] [devconf] [getbufmode] ");
printf("[setbufmode]\n");
printf("\tINTERFACE : Interface (nxge0, nxge1, nxge2, ..) \n");
printf("\thwstats : Prints hardware statistics \n");
printf("\tswstats : Prints software statistics \n");
printf("\tdevstats : Prints device statistics \n");
printf("\tdrvstats : Prints driver statistics \n");
printf("\tversion : Prints driver version \n");
printf("\tregisters : Prints register values \n");
printf("\tgetregister : Read a register \n");
printf("\tsetregister : Write to a register \n");
printf("\tpciconf : Prints PCI configuration space \n");
printf("\tdevconf : Prints device configuration \n");
printf("\tgetbufmode : Prints Buffer Mode \n");
printf("\tsetbufmode : Changes buffer mode \n");
_exit:
return status;
}
/**
* xge_get_hwstats
* Gets hardware statistics
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
getStats()
xge_get_hwstats(void)
{
void *hw_stats;
void *pci_cfg;
unsigned short device_id;
int index = 0;
bufferSize = GET_OFFSET_STATS(XGE_COUNT_STATS - 1) + 8;
char *hw_stats = NULL, *pci_cfg = NULL;
unsigned short device_id;
int index = 0;
int status = EXIT_FAILURE;
hw_stats = (void *) malloc(bufferSize);
if(!hw_stats)
{
printf("Allocating memory for hw_stats failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)hw_stats;
*pAccess = XGE_QUERY_STATS;
ifreqp.ifr_data = (caddr_t) hw_stats;
buffer_size = GET_OFFSET_STATS(XGE_COUNT_STATS - 1) + 8;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting hardware statistics failed\n");
free(hw_stats);
return EXIT_FAILURE;
}
bufferSize = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF -1) + 8;
hw_stats = (char *)malloc(buffer_size);
if(!hw_stats) {
printf("Allocating memory for hardware statistics failed\n");
goto _exit;
}
*hw_stats = XGE_QUERY_STATS;
ifreqp.ifr_data = (caddr_t) hw_stats;
pci_cfg = (void *) malloc(bufferSize);
if(!pci_cfg)
{
printf("Allocating memory for pci_cfg failed\n");
return EXIT_FAILURE;
}
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Getting hardware statistics failed\n");
goto _exit1;
}
pAccess = (char *)pci_cfg;
*pAccess = XGE_QUERY_PCICONF;
ifreqp.ifr_data = (caddr_t)pci_cfg;
buffer_size = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF - 1) + 8;
pci_cfg = (void *)malloc(buffer_size);
if(!pci_cfg) {
printf("Allocating memory for PCI configuration failed\n");
goto _exit1;
}
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting pci configuration space failed\n");
free(pci_cfg);
return EXIT_FAILURE;
}
device_id = *( ( u16 * )( ( unsigned char * )pci_cfg +
GET_OFFSET_PCICONF(index) ) );
logStats( hw_stats,device_id );
free(hw_stats);
free(pci_cfg);
return EXIT_SUCCESS;
*pci_cfg = XGE_QUERY_PCICONF;
ifreqp.ifr_data = (caddr_t)pci_cfg;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Getting pci configuration space failed\n");
goto _exit2;
}
device_id = *((u16 *)((unsigned char *)pci_cfg +
GET_OFFSET_PCICONF(index)));
xge_print_hwstats(hw_stats,device_id);
status = EXIT_SUCCESS;
_exit2:
free(pci_cfg);
_exit1:
free(hw_stats);
_exit:
return status;
}
/**
* xge_get_pciconf
* Gets PCI configuration space
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
getPciConf()
xge_get_pciconf(void)
{
void *pci_cfg;
char *pci_cfg = NULL;
int status = EXIT_FAILURE;
indexer = 0;
bufferSize = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF -1) + 8;
buffer_size = GET_OFFSET_PCICONF(XGE_COUNT_PCICONF - 1) + 8;
pci_cfg = (void *) malloc(bufferSize);
if(!pci_cfg)
{
printf("Allocating memory for pci_cfg failed\n");
return EXIT_FAILURE;
}
pci_cfg = (char *)malloc(buffer_size);
if(!pci_cfg) {
printf("Allocating memory for PCI configuration failed\n");
goto _exit;
}
pAccess = (char *)pci_cfg;
*pAccess = XGE_QUERY_PCICONF;
ifreqp.ifr_data = (caddr_t)pci_cfg;
*pci_cfg = XGE_QUERY_PCICONF;
ifreqp.ifr_data = (caddr_t)pci_cfg;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting pci configuration space failed\n");
free(pci_cfg);
return EXIT_FAILURE;
}
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Getting PCI configuration space failed\n");
goto _exit1;
}
logPciConf( pci_cfg );
free(pci_cfg);
return EXIT_SUCCESS;
xge_print_pciconf( pci_cfg );
status = EXIT_SUCCESS;
_exit1:
free(pci_cfg);
_exit:
return status;
}
/**
* xge_get_devconf
* Gets device configuration
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
getDevConf()
xge_get_devconf(void)
{
void *device_cfg;
char *device_cfg = NULL;
int status = EXIT_FAILURE;
indexer = 0;
bufferSize = XGE_COUNT_DEVCONF * sizeof(int);
buffer_size = XGE_COUNT_DEVCONF * sizeof(int);
device_cfg = (void *) malloc(bufferSize);
if(!device_cfg)
{
printf("Allocating memory for device_cfg failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)device_cfg;
*pAccess = XGE_QUERY_DEVCONF;
ifreqp.ifr_data = (caddr_t)device_cfg;
device_cfg = (char *)malloc(buffer_size);
if(!device_cfg) {
printf("Allocating memory for device configuration failed\n");
goto _exit;
}
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting Device Configuration failed\n");
free(device_cfg);
return EXIT_FAILURE;
}
*device_cfg = XGE_QUERY_DEVCONF;
ifreqp.ifr_data = (caddr_t)device_cfg;
logDevConf( device_cfg );
free(device_cfg);
return EXIT_SUCCESS;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Getting Device Configuration failed\n");
goto _exit1;
}
xge_print_devconf( device_cfg );
status = EXIT_SUCCESS;
_exit1:
free(device_cfg);
_exit:
return status;
}
/**
* xge_get_buffer_mode
* Get current Rx buffer mode
*
* Return EXIT_SUCCESS or EXIT_FAILURE
*/
int
getBufMode()
xge_get_buffer_mode(void)
{
void *buf_mode = 0;
char *buf_mode = NULL;
int status = EXIT_FAILURE;
buf_mode = (void *) malloc(sizeof(int));
if(!buf_mode)
{
printf("Allocating memory for Buffer mode parameter failed\n");
return EXIT_FAILURE;
}
buf_mode = (char *)malloc(sizeof(int));
if(!buf_mode) {
printf("Allocating memory for buffer mode failed\n");
goto _exit;
}
pAccess = (char *)buf_mode;
*pAccess = XGE_QUERY_BUFFER_MODE;
ifreqp.ifr_data = (void *)buf_mode;
*buf_mode = XGE_QUERY_BUFFER_MODE;
ifreqp.ifr_data = (void *)buf_mode;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting Buffer Mode failed\n");
free(buf_mode);
return EXIT_FAILURE;
}
printf("Buffer Mode is %d\n", *ifreqp.ifr_data);
free(buf_mode);
return EXIT_SUCCESS;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Getting Buffer Mode failed\n");
goto _exit1;
}
printf("Rx Buffer Mode: %d\n", *ifreqp.ifr_data);
status = EXIT_SUCCESS;
_exit1:
free(buf_mode);
_exit:
return status;
}
/**
* xge_change_buffer_mode
* Change Rx buffer mode
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
changeBufMode(char *bufmode)
xge_change_buffer_mode(char *bufmode)
{
char *print_msg = NULL;
int status = EXIT_FAILURE;
char *print_msg;
pAccess = (char *)malloc(sizeof(char));
print_msg = (char *)malloc(sizeof(char));
if(print_msg == NULL) {
printf("Allocation of memory for message failed\n");
goto _exit;
}
if(*bufmode == '1'){
*pAccess = XGE_SET_BUFFER_MODE_1;
}else if (*bufmode == '2'){
*pAccess = XGE_SET_BUFFER_MODE_2;
}else if (*bufmode == '3'){
*pAccess = XGE_SET_BUFFER_MODE_3;
}else if (*bufmode == '5'){
*pAccess = XGE_SET_BUFFER_MODE_5;
}else{
printf("Invalid Buffer mode\n");
return EXIT_FAILURE;
}
if (*bufmode == '1') *print_msg = XGE_SET_BUFFER_MODE_1;
else if(*bufmode == '2') *print_msg = XGE_SET_BUFFER_MODE_2;
else if(*bufmode == '5') *print_msg = XGE_SET_BUFFER_MODE_5;
else {
printf("Invalid Buffer mode\n");
goto _exit1;
}
ifreqp.ifr_data = (char *)pAccess;
if( ioctl( sockfd, SIOCGPRIVATE_0, &ifreqp ) < 0 )
{
printf( "Changing Buffer Mode Failed\n" );
return EXIT_FAILURE;
}
print_msg = (char *)ifreqp.ifr_data;
if(*print_msg == 'Y')
printf("Requested buffer mode was already enabled\n");
else if(*print_msg == 'N')
printf("Requested buffer mode is not implemented OR\nDynamic buffer changing is not supported in this driver\n");
else if(*print_msg == 'C')
printf("Buffer mode changed to %c\n", *bufmode);
ifreqp.ifr_data = (char *)print_msg;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Changing buffer mode failed\n");
goto _exit1;
}
return EXIT_SUCCESS;
if(*print_msg == 'Y') {
printf("Requested buffer mode was already enabled\n");
}
else if(*print_msg == 'N') {
printf("Requested buffer mode is not implemented OR\n");
printf("Dynamic buffer changing is not supported in this driver\n");
}
else if(*print_msg == 'C') {
printf("Buffer mode changed to %c\n", *bufmode);
}
status = EXIT_SUCCESS;
_exit1:
free(print_msg);
_exit:
return status;
}
/**
* xge_get_registers
* Gets register values
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
getRegInfo()
xge_get_registers(void)
{
void *regBuffer;
void *registers = NULL;
int status = EXIT_FAILURE;
indexer = 0;
bufferSize = regInfo[XGE_COUNT_REGS - 1].offset + 8;
buffer_size = regInfo[XGE_COUNT_REGS - 1].offset + 8;
regBuffer = ( void * ) malloc ( bufferSize );
if( !regBuffer )
{
printf( "Allocating memory for register dump failed\n" );
return EXIT_FAILURE;
}
ifreqp.ifr_data = ( caddr_t )regBuffer;
if( ioctl( sockfd, SIOCGPRIVATE_1, &ifreqp ) < 0 )
{
printf( "Getting register dump failed\n" );
free( regBuffer );
return EXIT_FAILURE;
}
registers = (void *)malloc(buffer_size);
if(!registers) {
printf("Allocating memory for register dump failed\n");
goto _exit;
}
logRegInfo( regBuffer );
free( regBuffer );
return EXIT_SUCCESS;
ifreqp.ifr_data = (caddr_t)registers;
if(ioctl(sockfd, SIOCGPRIVATE_1, &ifreqp) < 0) {
printf("Getting register values failed\n");
goto _exit1;
}
xge_print_registers(registers);
status = EXIT_SUCCESS;
_exit1:
free(registers);
_exit:
return status;
}
int
getReadReg(char *opt,char *offst)
{
bar0reg_t *reg;
reg = ( bar0reg_t * ) malloc (sizeof(bar0reg_t));
if( !reg )
{
printf( "Allocating memory for reading register failed\n" );
return EXIT_FAILURE;
}
strcpy(reg->option, opt);
sscanf(offst,"%x",&reg->offset);
ifreqp.ifr_data = ( caddr_t )reg;
if( ioctl( sockfd, SIOCGPRIVATE_1, &ifreqp ) < 0 )
{
printf( "Reading register failed\n" );
free(reg);
return EXIT_FAILURE;
}
logReadReg ( reg->offset,reg->value );
free(reg);
return EXIT_SUCCESS;
}
/**
* xge_get_register
* Reads a register specified offset
*
* @offset Offset of register from base address
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
getWriteReg(char *opt,char *offst,char *val)
xge_get_register(char *offset)
{
bar0reg_t *reg;
reg = ( bar0reg_t * ) malloc (sizeof(bar0reg_t));
if( !reg )
{
printf( "Allocating memory for writing register failed\n" );
return EXIT_FAILURE;
}
strcpy(reg->option, opt);
sscanf(offst,"%x",&reg->offset);
sscanf(val,"%llx",&reg->value);
ifreqp.ifr_data = ( caddr_t )reg;
if( ioctl( sockfd, SIOCGPRIVATE_1, &ifreqp ) < 0 )
{
printf( "Writing register failed\n" );
free(reg);
return EXIT_FAILURE;
}
free(reg);
return EXIT_SUCCESS;
xge_register_info_t *register_info = NULL;
int status = EXIT_FAILURE;
register_info =
(xge_register_info_t *)malloc(sizeof(xge_register_info_t));
if(!register_info) {
printf("Allocating memory for register info failed\n");
goto _exit;
}
strcpy(register_info->option, "-r");
sscanf(offset, "%x", &register_info->offset);
ifreqp.ifr_data = (caddr_t)register_info;
if(ioctl(sockfd, SIOCGPRIVATE_1, &ifreqp) < 0) {
printf("Reading register failed\n");
goto _exit1;
}
xge_print_register(register_info->offset, register_info->value);
status = EXIT_SUCCESS;
_exit1:
free(register_info);
_exit:
return status;
}
/**
* xge_set_register
* Writes to a register specified offset
*
* @offset Offset of register from base address
* @value Value to write to
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
getIntrStats()
xge_set_register(char *offset, char *value)
{
void *intr_stat;
xge_register_info_t *register_info = NULL;
int status = EXIT_FAILURE;
bufferSize = XGE_COUNT_INTRSTAT * sizeof(u32);
register_info =
(xge_register_info_t *)malloc(sizeof(xge_register_info_t));
if(!register_info) {
printf("Allocating memory for register info failed\n");
goto _exit;
}
intr_stat = (void *) malloc(bufferSize);
if(!intr_stat)
{
printf("Allocating memory for intr_stat failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)intr_stat;
*pAccess = XGE_QUERY_INTRSTATS ;
ifreqp.ifr_data = (caddr_t)intr_stat;
strcpy(register_info->option, "-w");
sscanf(offset, "%x", &register_info->offset);
sscanf(value, "%llx", &register_info->value);
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting interrupt statistics failed\n");
free(intr_stat);
return EXIT_FAILURE;
}
intr_stat = (char *)ifreqp.ifr_data;
ifreqp.ifr_data = (caddr_t)register_info;
if(ioctl(sockfd, SIOCGPRIVATE_1, &ifreqp) < 0) {
printf("Writing register failed\n");
goto _exit1;
}
status = EXIT_SUCCESS;
logIntrStats( intr_stat );
free(intr_stat);
return EXIT_SUCCESS;
_exit1:
free(register_info);
_exit:
return status;
}
/**
* xge_get_devstats
* Gets device statistics
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
getTcodeStats()
xge_get_devstats(void)
{
void *tcode_stat;
char *dev_stats = NULL;
int status = EXIT_FAILURE;
bufferSize = XGE_COUNT_TCODESTAT * sizeof(u32);
buffer_size = XGE_COUNT_INTRSTAT * sizeof(u32);
tcode_stat = (void *) malloc(bufferSize);
if(!tcode_stat)
{
printf("Allocating memory for tcode_stat failed\n");
return EXIT_FAILURE;
}
pAccess = (char *)tcode_stat;
*pAccess = XGE_QUERY_TCODE ;
ifreqp.ifr_data = (caddr_t)tcode_stat;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0)
{
printf("Getting tcode statistics failed\n");
free(tcode_stat);
return EXIT_FAILURE;
}
tcode_stat = (char *)ifreqp.ifr_data;
dev_stats = (char *)malloc(buffer_size);
if(!dev_stats) {
printf("Allocating memory for device statistics failed\n");
goto _exit;
}
logTcodeStats( tcode_stat );
free(tcode_stat);
return EXIT_SUCCESS;
*dev_stats = XGE_QUERY_DEVSTATS;
ifreqp.ifr_data = (caddr_t)dev_stats;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Getting device statistics failed\n");
goto _exit1;
}
xge_print_devstats(dev_stats);
status = EXIT_SUCCESS;
_exit1:
free(dev_stats);
_exit:
return status;
}
/**
* xge_get_swstats
* Gets software statistics
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
getDriverVer()
xge_get_swstats(void)
{
char *version;
bufferSize = 20;
version = ( char * ) malloc ( bufferSize );
if( !version )
{
printf( "Allocating memory for getting driver version failed\n" );
return EXIT_FAILURE;
}
pAccess = version;
*pAccess = XGE_READ_VERSION;
char *sw_stats = NULL;
int status = EXIT_FAILURE;
ifreqp.ifr_data = ( caddr_t )version;
if( ioctl( sockfd, SIOCGPRIVATE_0, &ifreqp ) < 0 )
{
printf( "Getting driver version failed\n" );
free( version );
return EXIT_FAILURE;
}
logDriverInfo(version);
free( version );
return EXIT_SUCCESS;
buffer_size = XGE_COUNT_SWSTAT * sizeof(u32);
sw_stats = (char *) malloc(buffer_size);
if(!sw_stats) {
printf("Allocating memory for software statistics failed\n");
goto _exit;
}
*sw_stats = XGE_QUERY_SWSTATS;
ifreqp.ifr_data = (caddr_t)sw_stats;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Getting software statistics failed\n");
goto _exit1;
}
xge_print_swstats(sw_stats);
status = EXIT_SUCCESS;
_exit1:
free(sw_stats);
_exit:
return status;
}
/**
* xge_get_drv_version
* Gets driver version
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
xge_get_drv_version(void)
{
char *version = NULL;
int status = EXIT_FAILURE;
buffer_size = 20;
version = (char *)malloc(buffer_size);
if(!version) {
printf("Allocating memory for driver version failed\n");
goto _exit;
}
*version = XGE_READ_VERSION;
ifreqp.ifr_data = ( caddr_t )version;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Getting driver version failed\n");
goto _exit1;
}
xge_print_drv_version(version);
status = EXIT_SUCCESS;
_exit1:
free(version);
_exit:
return status;
}
/**
* xge_get_drvstats
* Gets driver statistics
*
* Returns EXIT_SUCCESS or EXIT_FAILURE
*/
int
xge_get_drvstats(void)
{
char *driver_stats = NULL;
int status = EXIT_FAILURE;
buffer_size = XGE_COUNT_DRIVERSTATS * sizeof(u64);
driver_stats = (char *)malloc(buffer_size);
if(!driver_stats) {
printf("Allocating memory for driver statistics failed\n");
goto _exit;
}
*driver_stats = XGE_QUERY_DRIVERSTATS;
ifreqp.ifr_data = (caddr_t)driver_stats;
if(ioctl(sockfd, SIOCGPRIVATE_0, &ifreqp) < 0) {
printf("Getting Driver Statistics failed\n");
goto _exit1;
}
xge_print_drvstats(driver_stats);
status = EXIT_SUCCESS;
_exit1:
free(driver_stats);
_exit:
return status;
}

View File

@ -25,51 +25,46 @@
*
* $FreeBSD$
*/
/******************************************
* getinfo.h
*
* To get the Tx, Rx, PCI, Interrupt statistics,
* PCI configuration space,device configuration
* and bar0 register values
******************************************/
#ifndef XGE_CMN_H
#include "xge_cmn.h"
#endif
#define XGE_QUERY_STATS 1
#define XGE_QUERY_PCICONF 2
#define XGE_QUERY_INTRSTATS 3
#define XGE_QUERY_DEVSTATS 3
#define XGE_QUERY_DEVCONF 4
#define XGE_READ_VERSION 5
#define XGE_QUERY_TCODE 6
#define XGE_SET_BUFFER_MODE_1 7
#define XGE_SET_BUFFER_MODE_2 8
#define XGE_SET_BUFFER_MODE_3 9
#define XGE_QUERY_SWSTATS 6
#define XGE_QUERY_DRIVERSTATS 7
#define XGE_SET_BUFFER_MODE_1 8
#define XGE_SET_BUFFER_MODE_2 9
#define XGE_SET_BUFFER_MODE_5 10
#define XGE_QUERY_BUFFER_MODE 11
/* Function declerations */
int getPciConf();
int getDevConf();
int getStats();
int getRegInfo();
int getIntrStats();
int getTcodeStats();
int getReadReg(char *,char *);
int getWriteReg(char *,char *,char *);
int getDriverVersion();
int getBufMode();
int changeBufMode(char *);
void logStats(void *,unsigned short);
void logPciConf(void *);
void logDevConf(void *);
void logRegInfo(void *);
void logReadReg(u64,u64);
void logIntrStats(void *);
void logTcodeStats(void *);
void logDriverInfo(char *);
int xge_get_pciconf(void);
int xge_get_devconf(void);
int xge_get_hwstats(void);
int xge_get_registers(void);
int xge_get_devstats(void);
int xge_get_swstats(void);
int xge_get_drvstats(void);
int xge_get_register(char *);
int xge_set_register(char *,char *);
int xge_get_drv_version(void);
int xge_get_buffer_mode(void);
int xge_change_buffer_mode(char *);
void xge_print_hwstats(void *,unsigned short);
void xge_print_pciconf(void *);
void xge_print_devconf(void *);
void xge_print_registers(void *);
void xge_print_register(u64,u64);
void xge_print_devstats(void *);
void xge_print_swstats(void *);
void xge_print_drvstats(void *);
void xge_print_drv_version(char *);
extern xge_pci_bar0_t regInfo[];
extern xge_pci_config_t pciconfInfo[];
@ -77,7 +72,8 @@ extern xge_stats_hw_info_t statsInfo[];
extern xge_device_config_t devconfInfo[];
extern xge_stats_intr_info_t intrInfo[];
extern xge_stats_tcode_info_t tcodeInfo[];
struct ifreq ifreqp;
int sockfd, indexer, bufferSize = 0;
char *pAccess;
extern xge_stats_driver_info_t driverInfo[];
struct ifreq ifreqp;
int sockfd, indexer, buffer_size = 0;

View File

@ -25,222 +25,297 @@
*
* $FreeBSD$
*/
#include "xge_log.h"
/**
* xge_print_hwstats
* Prints/logs hardware statistics
*
* @hw_stats Hardware statistics
* @device_id Device ID
*/
void
logStats( void *hwStats, unsigned short device_id )
xge_print_hwstats(void *hw_stats, unsigned short device_id)
{
int index = 0;
int count = 0;
count = XGE_COUNT_STATS - ((device_id == DEVICE_ID_XFRAME_II) ? 0 : XGE_COUNT_EXTENDED_STATS);
fdAll = fopen( "stats.log", "w+" );
if( fdAll )
{
XGE_PRINT_HEADER_STATS(fdAll);
int index = 0, count = 0;
for( index = 0; index < count ; index++ )
{
switch( statsInfo[index].type )
{
case 2:
{
statsInfo[index].value =
*( ( u16 * )( ( unsigned char * ) hwStats +
GET_OFFSET_STATS( index ) ) );
break;
}
case 4:
{
statsInfo[index].value =
*( ( u32 * )( ( unsigned char * ) hwStats +
GET_OFFSET_STATS( index ) ) );
break;
}
case 8:
{
statsInfo[index].value =
*( ( u64 * )( ( unsigned char * ) hwStats +
GET_OFFSET_STATS( index ) ) );
break;
}
}
count = XGE_COUNT_STATS -
((device_id == DEVICE_ID_XFRAME_II) ? 0 : XGE_COUNT_EXTENDED_STATS);
XGE_PRINT_STATS(fdAll,(const char *) statsInfo[index].name,
statsInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}
fdAll = fopen("stats.log", "w+");
if(!fdAll)
goto _exit;
void
logPciConf( void * pciConf )
{
int index = 0;
XGE_PRINT_HEADER_STATS(fdAll);
for(index = 0; index < count ; index++) {
switch(statsInfo[index].type) {
case 2:
statsInfo[index].value =
*((u16 *)((unsigned char *)hw_stats +
GET_OFFSET_STATS(index)));
break;
case 4:
statsInfo[index].value =
*((u32 *)((unsigned char *) hw_stats +
GET_OFFSET_STATS(index)));
break;
case 8:
statsInfo[index].value =
*((u64 *)((unsigned char *)hw_stats +
GET_OFFSET_STATS(index)));
break;
}
fdAll = fopen( "pciconf.log", "w+" );
if( fdAll )
{
XGE_PRINT_HEADER_PCICONF(fdAll);
for( index = 0; index < XGE_COUNT_PCICONF; index++ )
{
pciconfInfo[index].value =
*( ( u16 * )( ( unsigned char * )pciConf +
GET_OFFSET_PCICONF(index) ) );
XGE_PRINT_PCICONF(fdAll,(const char *) pciconfInfo[index].name,
GET_OFFSET_PCICONF(index), pciconfInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}
void
logDevConf( void * devConf )
{
int index = 0;
fdAll = fopen( "devconf.log", "w+" );
if( fdAll )
{
XGE_PRINT_HEADER_DEVCONF(fdAll);
for( index = 0; index < XGE_COUNT_DEVCONF; index++ )
{
devconfInfo[index].value =
*( ( u32 * )( ( unsigned char * )devConf +
( index * ( sizeof( int ) ) ) ) );
XGE_PRINT_DEVCONF(fdAll,(const char *) devconfInfo[index].name,
devconfInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose( fdAll );
}
}
void
logRegInfo( void * regBuffer )
{
int index = 0;
fdAll = fopen( "reginfo.log", "w+" );
if( fdAll )
{
XGE_PRINT_HEADER_REGS(fdAll);
for( index = 0; index < XGE_COUNT_REGS; index++ )
{
regInfo[index].value =
*( ( u64 * )( ( unsigned char * )regBuffer +
regInfo[index].offset ) );
XGE_PRINT_REGS(fdAll,(const char *) regInfo[index].name,
regInfo[index].offset, regInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
}
void
logReadReg(u64 offset,u64 temp)
{
int index=0;
fdAll = fopen( "readreg.log", "w+");
if( fdAll )
{
XGE_PRINT_READ_HEADER_REGS(fdAll);
regInfo[index].offset = offset ;
regInfo[index].value = temp ;
printf("0x%.8X\t0x%.16llX\n",regInfo[index].offset, regInfo[index].value);
XGE_PRINT_STATS(fdAll,(const char *) statsInfo[index].name,
statsInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
fclose(fdAll);
_exit:
return;
}
/**
* xge_print_pciconf
* Prints/logs PCI configuration space
*
* @pci_conf PCI Configuration
*/
void
logIntrStats( void * intrStats )
xge_print_pciconf(void * pci_conf)
{
int index = 0;
int index = 0;
fdAll = fopen( "intrstats.log", "w+" );
if(fdAll)
{
XGE_PRINT_HEADER_STATS(fdAll);
fdAll = fopen("pciconf.log", "w+");
if(!fdAll)
goto _exit;
for( index = 0; index < XGE_COUNT_INTRSTAT; index++ )
{
intrInfo[index].value =
*( ( u32 * )( ( unsigned char * )intrStats +
( index * ( sizeof( u32 ) ) ) ) );
XGE_PRINT_STATS(fdAll,(const char *) intrInfo[index].name,
intrInfo[index].value);
}
XGE_PRINT_HEADER_PCICONF(fdAll);
for(index = 0; index < XGE_COUNT_PCICONF; index++) {
pciconfInfo[index].value = *((u16 *)((unsigned char *)pci_conf +
GET_OFFSET_PCICONF(index)));
XGE_PRINT_PCICONF(fdAll,(const char *) pciconfInfo[index].name,
GET_OFFSET_PCICONF(index), pciconfInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
_exit:
return;
}
/**
* xge_print_devconf
* Prints/logs Device Configuration
*
* @dev_conf Device Configuration
*/
void
logTcodeStats( void * tcodeStats )
xge_print_devconf(void * dev_conf)
{
int index = 0;
int index = 0;
fdAll = fopen( "tcodestats.log", "w+" );
if(fdAll)
{
XGE_PRINT_HEADER_STATS(fdAll);
fdAll = fopen("devconf.log", "w+");
if(!fdAll)
goto _exit;
for( index = 0; index < XGE_COUNT_TCODESTAT; index++ )
{
if(!(tcodeInfo[index].flag))
{
switch( tcodeInfo[index].type )
{
case 2:
{
tcodeInfo[index].value =
*( ( u16 * )( ( unsigned char * )tcodeStats +
( index * ( sizeof( u16 ) ) ) ) );
break;
}
case 4:
{
tcodeInfo[index].value =
*( ( u32 * )( ( unsigned char * )tcodeStats +
( index * ( sizeof( u32 ) ) ) ) );
break;
}
}
XGE_PRINT_STATS(fdAll,(const char *) tcodeInfo[index].name,
tcodeInfo[index].value);
}
}
XGE_PRINT_HEADER_DEVCONF(fdAll);
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
for(index = 0; index < XGE_COUNT_DEVCONF; index++) {
devconfInfo[index].value = *((u32 *)((unsigned char *)dev_conf +
(index * (sizeof(int)))));
XGE_PRINT_DEVCONF(fdAll,(const char *) devconfInfo[index].name,
devconfInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose( fdAll );
_exit:
return;
}
/**
* xge_print_registers
* Prints/logs Register values
*
* @registers Register values
*/
void
logDriverInfo( char *version )
xge_print_registers(void * registers)
{
fdAll = fopen( "driverinfo.log", "w+");
if (fdAll)
{
XGE_PRINT_LINE(fdAll);
printf("DRIVER VERSION : %s\n",version);
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
}
int index = 0;
fdAll = fopen("reginfo.log", "w+");
if(!fdAll)
goto _exit;
XGE_PRINT_HEADER_REGS(fdAll);
for(index = 0; index < XGE_COUNT_REGS; index++) {
regInfo[index].value = *((u64 *)((unsigned char *)registers +
regInfo[index].offset));
XGE_PRINT_REGS(fdAll,(const char *) regInfo[index].name,
regInfo[index].offset, regInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
_exit:
return;
}
/**
* xge_print_register
* Prints/logs a register value
*
* @offset Offset of the register
* @temp???
*/
void
xge_print_register(u64 offset, u64 value)
{
int index = 0;
fdAll = fopen("readreg.log", "w+");
if(!fdAll)
goto _exit;
XGE_PRINT_READ_HEADER_REGS(fdAll);
regInfo[index].offset = offset;
regInfo[index].value = value;
printf("0x%.8X\t0x%.16llX\n", regInfo[index].offset,
regInfo[index].value);
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
_exit:
return;
}
/**
* xge_print_devstats
* Prints Device Statistics
*
* @dev_stats Device Statistics
*/
void
xge_print_devstats(void *dev_stats)
{
int index = 0;
fdAll = fopen("intrstats.log", "w+");
if(!fdAll)
goto _exit;
XGE_PRINT_HEADER_STATS(fdAll);
for(index = 0; index < XGE_COUNT_INTRSTAT; index++) {
intrInfo[index].value = *((u32 *)((unsigned char *)dev_stats +
(index * (sizeof(u32)))));
XGE_PRINT_STATS(fdAll,(const char *) intrInfo[index].name,
intrInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
_exit:
return;
}
/**
* xge_print_swstats
* Prints/logs Software Statistics
*
* @sw_stats Software statistics
*/
void
xge_print_swstats(void * sw_stats)
{
int index = 0;
fdAll = fopen("tcodestats.log", "w+");
if(!fdAll)
goto _exit;
XGE_PRINT_HEADER_STATS(fdAll);
for(index = 0; index < XGE_COUNT_SWSTAT; index++) {
if(!(tcodeInfo[index].flag)) {
switch(tcodeInfo[index].type) {
case 2:
tcodeInfo[index].value =
*((u16 *)((unsigned char *)sw_stats +
(index * (sizeof(u16)))));
break;
case 4:
tcodeInfo[index].value =
*((u32 *)((unsigned char *)sw_stats +
(index * (sizeof(u32)))));
break;
}
XGE_PRINT_STATS(fdAll,(const char *) tcodeInfo[index].name,
tcodeInfo[index].value);
}
}
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
_exit:
return;
}
/**
* xge_print_drv_version
* Prints/logs driver version
*
* @version Driver version
*/
void
xge_print_drv_version(char *version)
{
fdAll = fopen("driverinfo.log", "w+");
if(!fdAll)
goto _exit;
XGE_PRINT_LINE(fdAll);
printf("Driver Version: %s\n", version);
XGE_PRINT_LINE(fdAll);
fclose(fdAll);
_exit:
return;
}
/**
* xge_print_drvstats
* Prints/logs Driver Statistics
*
* @driver_stats Driver Statistics
*/
void
xge_print_drvstats(void * driver_stats)
{
int index = 0;
fdAll = fopen("driver_stats.log", "w+");
if(!fdAll)
goto _exit;
XGE_PRINT_HEADER_STATS(fdAll);
for(index = 0; index < XGE_COUNT_DRIVERSTATS; index++) {
driverInfo[index].value = *((u64 *)((unsigned char *)driver_stats +
(index * (sizeof(u64)))));
XGE_PRINT_STATS(fdAll,(const char *) driverInfo[index].name,
driverInfo[index].value);
}
XGE_PRINT_LINE(fdAll);
fclose( fdAll );
_exit:
return;
}

File diff suppressed because it is too large Load Diff