remove octeon sdk

Octeon is a mips-only target.

Sponsored by:		Netflix
This commit is contained in:
Warner Losh 2021-12-31 00:28:12 -07:00
parent cb264bc716
commit 373d5df3e3
247 changed files with 0 additions and 272217 deletions

View File

@ -1,209 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Cavium Inc. Internet Protocol (IP)
*
* Definitions for the Internet Protocol (IP) support.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifndef __CVMIP_H__
#define __CVMIP_H__
/*
* IP protocol values (1 byte)
*
*/
#define CVMIP_PROTO_ICMP 1 /* Internet Control Message Protocol */
#define CVMIP_PROTO_TCP 6 /* Transmission Control Protocol */
#define CVMIP_PROTO_UDP 17 /* User Datagram Protocol */
#define CVMIP_PROTO_ESP 50 /* Encapsulated Security Payload */
#define CVMIP_PROTO_AH 51 /* Authentication Header */
/**
* network packet header definitions
* (originally from octane_hw.h)
*
*/
/**
* UDP Packet header
*/
typedef struct {
union {
int32_t s32 ;
uint32_t u32 ;
struct {
uint16_t src_prt ;
uint16_t dst_prt ;
} s;
} prts;
uint16_t len ;
uint16_t chksum ;
} cvmip_udp_hdr_t;
/**
* TCP Packet header
*/
typedef struct {
uint16_t src_prt ;
uint16_t dst_prt ;
uint32_t seq ;
uint32_t ack_seq ;
uint32_t hlen :4;
uint32_t rsvd :6;
uint32_t urg :1;
uint32_t ack :1;
uint32_t psh :1;
uint32_t rst :1;
uint32_t syn :1;
uint32_t fin :1;
uint16_t win_sz ;
uint16_t chksum ;
uint16_t urg_ptr ;
uint32_t junk ;
} cvmip_tcp_hdr_t;
/**
* L4 Packet header
*/
typedef union {
cvmip_udp_hdr_t udphdr;
cvmip_tcp_hdr_t tcphdr;
struct {
union {
int32_t s32 ;
uint32_t u32 ;
struct {
uint16_t src_prt;
uint16_t dst_prt;
} s;
} prts;
uint16_t len ;
uint16_t chksum ;
char dat[48] ; // 48 for IPv6 with no extension hdrs, 64 for IPv4 without options
} udp;
struct {
uint16_t src_prt ;
uint16_t dst_prt ;
uint32_t seq ;
uint32_t ack_seq ;
uint32_t hlen :4;
uint32_t rsvd :6;
uint32_t urg :1;
uint32_t ack :1;
uint32_t psh :1;
uint32_t rst :1;
uint32_t syn :1;
uint32_t fin :1;
uint16_t win_sz ;
uint16_t chksum ;
uint16_t urg_ptr ;
char dat[36] ; // 36 for IPv6 with no extension hdrs, 52 for IPv6 without options
} tcp;
} cvmip_l4_info_t;
/**
* Special struct to add a pad to IPv4 header
*/
typedef struct {
uint32_t pad;
uint32_t version : 4;
uint32_t hl : 4;
uint8_t tos ;
uint16_t len ;
uint16_t id ;
uint32_t mbz : 1;
uint32_t df : 1;
uint32_t mf : 1;
uint32_t off :13;
uint8_t ttl ;
uint8_t protocol;
uint16_t chksum ;
union {
uint64_t u64;
struct {
uint32_t src;
uint32_t dst;
} s;
} src_dst;
} cvmip_ipv4_hdr_t;
/**
* IPv6 Packet header
*/
typedef struct {
uint32_t version : 4;
uint32_t v6class : 8;
uint32_t flow :20;
uint16_t len ; // includes extension headers plus payload (add 40 to be equiv to v4 len field)
uint8_t next_hdr; // equivalent to the v4 protocol field
uint8_t hop_lim ; // equivalent to the v4 TTL field
union {
uint64_t u64[4];
struct {
uint64_t src[2];
uint64_t dst[2];
} s;
} src_dst;
} cvmip_ipv6_hdr_t;
#endif /* __CVMIP_H__ */

View File

@ -1,112 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* This file defines macros for use in determining the current calling ABI.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_ABI_H__
#define __CVMX_ABI_H__
#if defined(__FreeBSD__) && defined(_KERNEL)
#include <machine/endian.h>
#else
#ifndef __U_BOOT__
#include <endian.h>
#endif
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Check for N32 ABI, defined for 32-bit Simple Exec applications
and Linux N32 ABI.*/
#if (defined _ABIN32 && _MIPS_SIM == _ABIN32)
#define CVMX_ABI_N32
/* Check for N64 ABI, defined for 64-bit Linux toolchain. */
#elif (defined _ABI64 && _MIPS_SIM == _ABI64)
#define CVMX_ABI_N64
/* Check for O32 ABI, defined for Linux 032 ABI, not supported yet. */
#elif (defined _ABIO32 && _MIPS_SIM == _ABIO32)
#define CVMX_ABI_O32
/* Check for EABI ABI, defined for 64-bit Simple Exec applications. */
#else
#define CVMX_ABI_EABI
#endif
#ifndef __BYTE_ORDER
#if defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
#define __BYTE_ORDER __BIG_ENDIAN
#elif !defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
#define __BYTE_ORDER __LITTLE_ENDIAN
#define __BIG_ENDIAN 4321
#elif !defined(__BIG_ENDIAN) && !defined(__LITTLE_ENDIAN)
#define __BIG_ENDIAN 4321
#define __BYTE_ORDER __BIG_ENDIAN
#else
#error Unable to determine Endian mode
#endif
#endif
/* For compatibility with Linux definitions... */
#if __BYTE_ORDER == __BIG_ENDIAN
# ifndef __BIG_ENDIAN_BITFIELD
# define __BIG_ENDIAN_BITFIELD
# endif
#else
# ifndef __LITTLE_ENDIAN_BITFIELD
# define __LITTLE_ENDIAN_BITFIELD
# endif
#endif
#if defined(__BIG_ENDIAN_BITFIELD) && defined(__LITTLE_ENDIAN_BITFIELD)
# error Cannot define both __BIG_ENDIAN_BITFIELD and __LITTLE_ENDIAN_BITFIELD
#endif
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_ABI_H__ */

View File

@ -1,725 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
* Functions for accessing memory and CSRs on Octeon when we are compiling
* natively.
*
* <hr>$Revision: 38306 $<hr>
*/
#ifndef __CVMX_ACCESS_NATIVE_H__
#define __CVMX_ACCESS_NATIVE_H__
#ifdef __cplusplus
extern "C" {
#endif
/**
* Returns the Octeon processor ID.
*
* @return Octeon processor ID from COP0
*/
static inline uint32_t cvmx_get_proc_id(void)
{
#ifdef CVMX_BUILD_FOR_LINUX_USER
extern uint32_t cvmx_app_init_processor_id;
return cvmx_app_init_processor_id;
#else
uint32_t id;
asm ("mfc0 %0, $15,0" : "=r" (id));
return id;
#endif
}
/**
* Convert a memory pointer (void*) into a hardware compatable
* memory address (uint64_t). Octeon hardware widgets don't
* understand logical addresses.
*
* @param ptr C style memory pointer
* @return Hardware physical address
*/
static inline uint64_t cvmx_ptr_to_phys(void *ptr)
{
if (CVMX_ENABLE_PARAMETER_CHECKING)
cvmx_warn_if(ptr==NULL, "cvmx_ptr_to_phys() passed a NULL pointer\n");
#ifdef CVMX_BUILD_FOR_UBOOT
uint64_t uboot_tlb_ptr_to_phys(void *ptr);
if (((uint32_t)ptr) < 0x80000000)
{
/* Handle useg (unmapped due to ERL) here*/
return(CAST64(ptr) & 0x7FFFFFFF);
}
else if (((uint32_t)ptr) < 0xC0000000)
{
/* Here we handle KSEG0/KSEG1 _pointers_. We know we are dealing
** with 32 bit only values, so we treat them that way. Note that
** a cvmx_phys_to_ptr(cvmx_ptr_to_phys(X)) will not return X in this case,
** but the physical address of the KSEG0/KSEG1 address. */
return(CAST64(ptr) & 0x1FFFFFFF);
}
else
return(uboot_tlb_ptr_to_phys(ptr)); /* Should not get get here in !TLB case */
#endif
#ifdef __linux__
if (sizeof(void*) == 8)
{
/* We're running in 64 bit mode. Normally this means that we can use
40 bits of address space (the hardware limit). Unfortunately there
is one case were we need to limit this to 30 bits, sign extended
32 bit. Although these are 64 bits wide, only 30 bits can be used */
if ((CAST64(ptr) >> 62) == 3)
return CAST64(ptr) & cvmx_build_mask(30);
else
return CAST64(ptr) & cvmx_build_mask(40);
}
else
{
#ifdef __KERNEL__
return (long)(ptr) & 0x1fffffff;
#else
extern uint64_t linux_mem32_offset;
if (cvmx_likely(ptr))
return CAST64(ptr) - linux_mem32_offset;
else
return 0;
#endif
}
#elif defined(_WRS_KERNEL)
return (long)(ptr) & 0x7fffffff;
#elif defined(VXWORKS_USER_MAPPINGS)
/* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
uint64_t address = (long)ptr;
if (address & 0x80000000)
return address & 0x1fffffff; /* KSEG pointers directly map the lower 256MB and bootbus */
else if ((address >= 0x10000000) && (address < 0x20000000))
return address + 0x400000000ull; /* 256MB-512MB is a virtual mapping for the 2nd 256MB */
else
return address; /* Looks to be a 1:1 mapped userspace pointer */
#elif defined(__FreeBSD__) && defined(_KERNEL)
return (pmap_kextract((vm_offset_t)ptr));
#else
#if CVMX_USE_1_TO_1_TLB_MAPPINGS
/* We are assumung we're running the Simple Executive standalone. In this
mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
addresses are never used. Since we know all this, save the masking
cycles and do nothing */
return CAST64(ptr);
#else
if (sizeof(void*) == 8)
{
/* We're running in 64 bit mode. Normally this means that we can use
40 bits of address space (the hardware limit). Unfortunately there
is one case were we need to limit this to 30 bits, sign extended
32 bit. Although these are 64 bits wide, only 30 bits can be used */
if ((CAST64(ptr) >> 62) == 3)
return CAST64(ptr) & cvmx_build_mask(30);
else
return CAST64(ptr) & cvmx_build_mask(40);
}
else
return (long)(ptr) & 0x7fffffff;
#endif
#endif
}
/**
* Convert a hardware physical address (uint64_t) into a
* memory pointer (void *).
*
* @param physical_address
* Hardware physical address to memory
* @return Pointer to memory
*/
static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
{
if (CVMX_ENABLE_PARAMETER_CHECKING)
cvmx_warn_if(physical_address==0, "cvmx_phys_to_ptr() passed a zero address\n");
#ifdef CVMX_BUILD_FOR_UBOOT
/* U-boot is a special case, as it is running in 32 bit mode, using the TLB to map code/data
** which can have a physical address above the 32 bit address space. 1-1 mappings are used
** to allow the low 2 GBytes to be accessed as in error level.
**
** NOTE: This conversion can cause problems in u-boot, as users may want to enter addresses
** like 0xBFC00000 (kseg1 boot bus address), which is a valid 64 bit physical address,
** but is likely intended to be a boot bus address. */
if (physical_address < 0x80000000)
{
/* Handle useg here. ERL is set, so useg is unmapped. This is the only physical
** address range that is directly addressable by u-boot. */
return CASTPTR(void, physical_address);
}
else
{
DECLARE_GLOBAL_DATA_PTR;
extern char uboot_start;
/* Above 0x80000000 we can only support one case - a physical address
** that is mapped for u-boot code/data. We check against the u-boot mem range,
** and return NULL if it is out of this range.
*/
if (physical_address >= gd->bd->bi_uboot_ram_addr
&& physical_address < gd->bd->bi_uboot_ram_addr + gd->bd->bi_uboot_ram_used_size)
{
return ((char *)&uboot_start + (physical_address - gd->bd->bi_uboot_ram_addr));
}
else
return(NULL);
}
if (physical_address >= 0x80000000)
return NULL;
else
#endif
#ifdef __linux__
if (sizeof(void*) == 8)
{
/* Just set the top bit, avoiding any TLB uglyness */
return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
}
else
{
#ifdef __KERNEL__
return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
#else
extern uint64_t linux_mem32_offset;
if (cvmx_likely(physical_address))
return CASTPTR(void, physical_address + linux_mem32_offset);
else
return NULL;
#endif
}
#elif defined(_WRS_KERNEL)
return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
#elif defined(VXWORKS_USER_MAPPINGS)
/* This mapping mode is used in vxWorks 5.5 to support 2GB of ram. The
2nd 256MB is mapped at 0x10000000 and the rest of memory is 1:1 */
if ((physical_address >= 0x10000000) && (physical_address < 0x20000000))
return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
else if ((OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
&& (physical_address >= 0x410000000ull)
&& (physical_address < 0x420000000ull))
return CASTPTR(void, physical_address - 0x400000000ull);
else
return CASTPTR(void, physical_address);
#elif defined(__FreeBSD__) && defined(_KERNEL)
#if defined(__mips_n64)
return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
#else
if (physical_address < 0x20000000)
return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
else
panic("%s: mapping high address (%#jx) not yet supported.\n", __func__, (uintmax_t)physical_address);
#endif
#else
#if CVMX_USE_1_TO_1_TLB_MAPPINGS
/* We are assumung we're running the Simple Executive standalone. In this
mode the TLB is setup to perform 1:1 mapping and 32 bit sign extended
addresses are never used. Since we know all this, save bit insert
cycles and do nothing */
return CASTPTR(void, physical_address);
#else
/* Set the XKPHYS/KSEG0 bit as appropriate based on ABI */
if (sizeof(void*) == 8)
return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address));
else
return CASTPTR(void, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, physical_address));
#endif
#endif
}
/* The following #if controls the definition of the macro
CVMX_BUILD_WRITE64. This macro is used to build a store operation to
a full 64bit address. With a 64bit ABI, this can be done with a simple
pointer access. 32bit ABIs require more complicated assembly */
#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
#define CVMX_BUILD_WRITE64(TYPE, ST) \
static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
{ \
*CASTPTR(volatile TYPE##_t, addr) = val; \
}
#elif defined(CVMX_ABI_N32)
/* The N32 ABI passes all 64bit quantities in a single register, so it is
possible to use the arguments directly. We have to use inline assembly
for the actual store since a pointer would truncate the address */
#define CVMX_BUILD_WRITE64(TYPE, ST) \
static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
{ \
asm volatile (ST " %[v], 0(%[c])" ::[v] "r" (val), [c] "r" (addr)); \
}
#elif defined(CVMX_ABI_O32)
#ifdef __KERNEL__
#define CVMX_BUILD_WRITE64(TYPE, LT) extern void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val);
#else
/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
separate registers. Assembly must be used to put them back together
before they're used. What should be a simple store becomes a
convoluted mess of shifts and ors */
#define CVMX_BUILD_WRITE64(TYPE, ST) \
static inline void cvmx_write64_##TYPE(uint64_t csr_addr, TYPE##_t val) \
{ \
if (sizeof(TYPE##_t) == 8) \
{ \
uint32_t csr_addrh = csr_addr>>32; \
uint32_t csr_addrl = csr_addr; \
uint32_t valh = (uint64_t)val>>32; \
uint32_t vall = val; \
uint32_t tmp1; \
uint32_t tmp2; \
uint32_t tmp3; \
\
asm volatile ( \
".set push\n" \
".set mips64\n" \
"dsll %[tmp1], %[valh], 32\n" \
"dsll %[tmp2], %[csrh], 32\n" \
"dsll %[tmp3], %[vall], 32\n" \
"dsrl %[tmp3], %[tmp3], 32\n" \
"or %[tmp1], %[tmp1], %[tmp3]\n" \
"dsll %[tmp3], %[csrl], 32\n" \
"dsrl %[tmp3], %[tmp3], 32\n" \
"or %[tmp2], %[tmp2], %[tmp3]\n" \
ST " %[tmp1], 0(%[tmp2])\n" \
".set pop\n" \
: [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2), [tmp3] "=&r" (tmp3)\
: [valh] "r" (valh), [vall] "r" (vall), \
[csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
); \
} \
else \
{ \
uint32_t csr_addrh = csr_addr>>32; \
uint32_t csr_addrl = csr_addr; \
uint32_t tmp1; \
uint32_t tmp2; \
\
asm volatile ( \
".set push\n" \
".set mips64\n" \
"dsll %[tmp1], %[csrh], 32\n" \
"dsll %[tmp2], %[csrl], 32\n" \
"dsrl %[tmp2], %[tmp2], 32\n" \
"or %[tmp1], %[tmp1], %[tmp2]\n" \
ST " %[val], 0(%[tmp1])\n" \
".set pop\n" \
: [tmp1] "=&r" (tmp1), [tmp2] "=&r" (tmp2) \
: [val] "r" (val), [csrh] "r" (csr_addrh), \
[csrl] "r" (csr_addrl) \
); \
} \
}
#endif
#else
/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
#error: Unsupported ABI
#endif
/* The following #if controls the definition of the macro
CVMX_BUILD_READ64. This macro is used to build a load operation from
a full 64bit address. With a 64bit ABI, this can be done with a simple
pointer access. 32bit ABIs require more complicated assembly */
#if defined(CVMX_ABI_N64) || defined(CVMX_ABI_EABI)
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
#define CVMX_BUILD_READ64(TYPE, LT) \
static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
{ \
return *CASTPTR(volatile TYPE##_t, addr); \
}
#elif defined(CVMX_ABI_N32)
/* The N32 ABI passes all 64bit quantities in a single register, so it is
possible to use the arguments directly. We have to use inline assembly
for the actual store since a pointer would truncate the address */
#define CVMX_BUILD_READ64(TYPE, LT) \
static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
{ \
TYPE##_t val; \
asm volatile (LT " %[v], 0(%[c])": [v] "=r" (val) : [c] "r" (addr));\
return val; \
}
#elif defined(CVMX_ABI_O32)
#ifdef __KERNEL__
#define CVMX_BUILD_READ64(TYPE, LT) extern TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr);
#else
/* Ok, now the ugly stuff starts. O32 splits 64bit quantities into two
separate registers. Assembly must be used to put them back together
before they're used. What should be a simple load becomes a
convoluted mess of shifts and ors */
#define CVMX_BUILD_READ64(TYPE, LT) \
static inline TYPE##_t cvmx_read64_##TYPE(uint64_t csr_addr) \
{ \
if (sizeof(TYPE##_t) == 8) \
{ \
uint32_t csr_addrh = csr_addr>>32; \
uint32_t csr_addrl = csr_addr; \
uint32_t valh; \
uint32_t vall; \
\
asm volatile ( \
".set push\n" \
".set mips64\n" \
"dsll %[valh], %[csrh], 32\n" \
"dsll %[vall], %[csrl], 32\n" \
"dsrl %[vall], %[vall], 32\n" \
"or %[valh], %[valh], %[vall]\n" \
LT " %[vall], 0(%[valh])\n" \
"dsrl %[valh], %[vall], 32\n" \
"sll %[vall], 0\n" \
"sll %[valh], 0\n" \
".set pop\n" \
: [valh] "=&r" (valh), [vall] "=&r" (vall) \
: [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
); \
return ((uint64_t)valh<<32) | vall; \
} \
else \
{ \
uint32_t csr_addrh = csr_addr>>32; \
uint32_t csr_addrl = csr_addr; \
TYPE##_t val; \
uint32_t tmp; \
\
asm volatile ( \
".set push\n" \
".set mips64\n" \
"dsll %[val], %[csrh], 32\n" \
"dsll %[tmp], %[csrl], 32\n" \
"dsrl %[tmp], %[tmp], 32\n" \
"or %[val], %[val], %[tmp]\n" \
LT " %[val], 0(%[val])\n" \
".set pop\n" \
: [val] "=&r" (val), [tmp] "=&r" (tmp) \
: [csrh] "r" (csr_addrh), [csrl] "r" (csr_addrl) \
); \
return val; \
} \
}
#endif /* __KERNEL__ */
#else
/* cvmx-abi.h didn't recognize the ABI. Force the compile to fail. */
#error: Unsupported ABI
#endif
/* The following defines 8 functions for writing to a 64bit address. Each
takes two arguments, the address and the value to write.
cvmx_write64_int64 cvmx_write64_uint64
cvmx_write64_int32 cvmx_write64_uint32
cvmx_write64_int16 cvmx_write64_uint16
cvmx_write64_int8 cvmx_write64_uint8 */
CVMX_BUILD_WRITE64(int64, "sd");
CVMX_BUILD_WRITE64(int32, "sw");
CVMX_BUILD_WRITE64(int16, "sh");
CVMX_BUILD_WRITE64(int8, "sb");
CVMX_BUILD_WRITE64(uint64, "sd");
CVMX_BUILD_WRITE64(uint32, "sw");
CVMX_BUILD_WRITE64(uint16, "sh");
CVMX_BUILD_WRITE64(uint8, "sb");
/* The following defines 8 functions for reading from a 64bit address. Each
takes the address as the only argument
cvmx_read64_int64 cvmx_read64_uint64
cvmx_read64_int32 cvmx_read64_uint32
cvmx_read64_int16 cvmx_read64_uint16
cvmx_read64_int8 cvmx_read64_uint8 */
CVMX_BUILD_READ64(int64, "ld");
CVMX_BUILD_READ64(int32, "lw");
CVMX_BUILD_READ64(int16, "lh");
CVMX_BUILD_READ64(int8, "lb");
CVMX_BUILD_READ64(uint64, "ld");
CVMX_BUILD_READ64(uint32, "lw");
CVMX_BUILD_READ64(uint16, "lhu");
CVMX_BUILD_READ64(uint8, "lbu");
static inline void cvmx_write_csr(uint64_t csr_addr, uint64_t val)
{
cvmx_write64_uint64(csr_addr, val);
/* Perform an immediate read after every write to an RSL register to force
the write to complete. It doesn't matter what RSL read we do, so we
choose CVMX_MIO_BOOT_BIST_STAT because it is fast and harmless */
if (((csr_addr >> 40) & 0x7ffff) == (0x118))
cvmx_read64_uint64(CVMX_MIO_BOOT_BIST_STAT);
}
static inline void cvmx_write_io(uint64_t io_addr, uint64_t val)
{
cvmx_write64_uint64(io_addr, val);
}
static inline uint64_t cvmx_read_csr(uint64_t csr_addr)
{
return cvmx_read64_uint64(csr_addr);
}
static inline void cvmx_send_single(uint64_t data)
{
const uint64_t CVMX_IOBDMA_SENDSINGLE = 0xffffffffffffa200ull;
cvmx_write64_uint64(CVMX_IOBDMA_SENDSINGLE, data);
}
static inline void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr)
{
union
{
uint64_t u64;
struct {
uint64_t scraddr : 8;
uint64_t len : 8;
uint64_t addr :48;
} s;
} addr;
addr.u64 = csr_addr;
addr.s.scraddr = scraddr >> 3;
addr.s.len = 1;
cvmx_send_single(addr.u64);
}
/**
* Number of the Core on which the program is currently running.
*
* @return Number of cores
*/
static inline unsigned int cvmx_get_core_num(void)
{
unsigned int core_num;
CVMX_RDHWRNV(core_num, 0);
return core_num;
}
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for POP instruction.
*
* @param val 32 bit value to count set bits in
*
* @return Number of bits set
*/
static inline uint32_t cvmx_pop(uint32_t val)
{
uint32_t pop;
CVMX_POP(pop, val);
return pop;
}
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for DPOP instruction.
*
* @param val 64 bit value to count set bits in
*
* @return Number of bits set
*/
static inline int cvmx_dpop(uint64_t val)
{
int pop;
CVMX_DPOP(pop, val);
return pop;
}
/**
* @deprecated
* Provide current cycle counter as a return value. Deprecated, use
* cvmx_clock_get_count(CVMX_CLOCK_CORE) to get cycle counter.
*
* @return current cycle counter
*/
static inline uint64_t cvmx_get_cycle(void)
{
return cvmx_clock_get_count(CVMX_CLOCK_CORE);
}
/**
* @deprecated
* Reads a chip global cycle counter. This counts SCLK cycles since
* chip reset. The counter is 64 bit. This function is deprecated as the rate
* of the global cycle counter is different between Octeon+ and Octeon2, use
* cvmx_clock_get_count(CVMX_CLOCK_SCLK) instead. For Octeon2, the clock rate
* of SCLK may be differnet than the core clock.
*
* @return Global chip cycle count since chip reset.
*/
static inline uint64_t cvmx_get_cycle_global(void)
{
return cvmx_clock_get_count(CVMX_CLOCK_IPD);
}
/**
* Wait for the specified number of core clock cycles
*
* @param cycles
*/
static inline void cvmx_wait(uint64_t cycles)
{
uint64_t done = cvmx_get_cycle() + cycles;
while (cvmx_get_cycle() < done)
{
/* Spin */
}
}
/**
* Wait for the specified number of micro seconds
*
* @param usec micro seconds to wait
*/
static inline void cvmx_wait_usec(uint64_t usec)
{
uint64_t done = cvmx_get_cycle() + usec * cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000;
while (cvmx_get_cycle() < done)
{
/* Spin */
}
}
/**
* Wait for the specified number of io clock cycles
*
* @param cycles
*/
static inline void cvmx_wait_io(uint64_t cycles)
{
uint64_t done = cvmx_clock_get_count(CVMX_CLOCK_SCLK) + cycles;
while (cvmx_clock_get_count(CVMX_CLOCK_SCLK) < done)
{
/* Spin */
}
}
/**
* Perform a soft reset of Octeon
*
* @return
*/
static inline void cvmx_reset_octeon(void)
{
cvmx_ciu_soft_rst_t ciu_soft_rst;
ciu_soft_rst.u64 = 0;
ciu_soft_rst.s.soft_rst = 1;
cvmx_write_csr(CVMX_CIU_SOFT_RST, ciu_soft_rst.u64);
}
/**
* Read a byte of fuse data
* @param byte_addr address to read
*
* @return fuse value: 0 or 1
*/
static inline uint8_t cvmx_fuse_read_byte(int byte_addr)
{
cvmx_mio_fus_rcmd_t read_cmd;
read_cmd.u64 = 0;
read_cmd.s.addr = byte_addr;
read_cmd.s.pend = 1;
cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD)) && read_cmd.s.pend)
;
return(read_cmd.s.dat);
}
/**
* Read a single fuse bit
*
* @param fuse Fuse number (0-1024)
*
* @return fuse value: 0 or 1
*/
static inline int cvmx_fuse_read(int fuse)
{
return((cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1);
}
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_ACCESS_NATIVE_H__ */

View File

@ -1,242 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
* Function prototypes for accessing memory and CSRs on Octeon.
*
* <hr>$Revision: 38306 $<hr>
*/
#ifndef __CVMX_ACCESS_H__
#define __CVMX_ACCESS_H__
#ifdef __cplusplus
extern "C" {
#endif
/* We're going to assume that if we are compiling for Mips then we must be
running natively on Octoen. It is possible that this code could be
compiled on a non Octeon Mips that is acting as a PCI/PCIe host. In this
case this assumption will be wrong and cause issues We can't key off of
__octeon__ since some people use stock gcc toolchains */
#if defined(__mips__) && !defined(CVMX_BUILD_FOR_LINUX_HOST)
#define CVMX_FUNCTION static inline
#else
#define CVMX_FUNCTION extern
#endif
/**
* simprintf uses simulator tricks to speed up printouts. The format
* and args are passed to the simulator and processed natively on the host.
* Simprintf is limited to 7 arguments, and they all must use %ll (long long)
* format specifiers to be displayed correctly.
*
* @param format
*
* @return
*/
EXTERN_ASM void simprintf(const char *format, ...);
/**
* This function performs some default initialization of the Octeon executive.
* It initializes the cvmx_bootmem memory allocator with the list of physical
* memory provided by the bootloader, and creates 1-1 TLB mappings for this
* memory. This function should be called on all cores that will use either the
* bootmem allocator or the 1-1 TLB mappings. Applications which require a
* different configuration can replace this function with a suitable application
* specific one.
*
* @return 0 on success
* -1 on failure
*/
extern int cvmx_user_app_init(void);
/**
* Returns the Octeon processor ID.
*
* @return Octeon processor ID from COP0
*/
CVMX_FUNCTION uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
/**
* Convert a memory pointer (void*) into a hardware compatable
* memory address (uint64_t). Octeon hardware widgets don't
* understand logical addresses.
*
* @param ptr C style memory pointer
* @return Hardware physical address
*/
CVMX_FUNCTION uint64_t cvmx_ptr_to_phys(void *ptr);
/**
* Convert a hardware physical address (uint64_t) into a
* memory pointer (void *).
*
* @param physical_address
* Hardware physical address to memory
* @return Pointer to memory
*/
CVMX_FUNCTION void *cvmx_phys_to_ptr(uint64_t physical_address);
CVMX_FUNCTION void cvmx_write64_int64(uint64_t address, int64_t value);
CVMX_FUNCTION void cvmx_write64_uint64(uint64_t address, uint64_t value);
CVMX_FUNCTION void cvmx_write64_int32(uint64_t address, int32_t value);
CVMX_FUNCTION void cvmx_write64_uint32(uint64_t address, uint32_t value);
CVMX_FUNCTION void cvmx_write64_int16(uint64_t address, int16_t value);
CVMX_FUNCTION void cvmx_write64_uint16(uint64_t address, uint16_t value);
CVMX_FUNCTION void cvmx_write64_int8(uint64_t address, int8_t value);
CVMX_FUNCTION void cvmx_write64_uint8(uint64_t address, uint8_t value);
CVMX_FUNCTION void cvmx_write_csr(uint64_t csr_addr, uint64_t val);
CVMX_FUNCTION void cvmx_write_io(uint64_t io_addr, uint64_t val);
CVMX_FUNCTION int64_t cvmx_read64_int64(uint64_t address);
CVMX_FUNCTION uint64_t cvmx_read64_uint64(uint64_t address);
CVMX_FUNCTION int32_t cvmx_read64_int32(uint64_t address);
CVMX_FUNCTION uint32_t cvmx_read64_uint32(uint64_t address);
CVMX_FUNCTION int16_t cvmx_read64_int16(uint64_t address);
CVMX_FUNCTION uint16_t cvmx_read64_uint16(uint64_t address);
CVMX_FUNCTION int8_t cvmx_read64_int8(uint64_t address);
CVMX_FUNCTION uint8_t cvmx_read64_uint8(uint64_t address);
CVMX_FUNCTION uint64_t cvmx_read_csr(uint64_t csr_addr);
CVMX_FUNCTION void cvmx_send_single(uint64_t data);
CVMX_FUNCTION void cvmx_read_csr_async(uint64_t scraddr, uint64_t csr_addr);
/**
* Number of the Core on which the program is currently running.
*
* @return Number of cores
*/
CVMX_FUNCTION unsigned int cvmx_get_core_num(void);
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for POP instruction.
*
* @param val 32 bit value to count set bits in
*
* @return Number of bits set
*/
CVMX_FUNCTION uint32_t cvmx_pop(uint32_t val);
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for DPOP instruction.
*
* @param val 64 bit value to count set bits in
*
* @return Number of bits set
*/
CVMX_FUNCTION int cvmx_dpop(uint64_t val);
/**
* @deprecated
* Provide current cycle counter as a return value. Deprecated, use
* cvmx_clock_get_count(CVMX_CLOCK_CORE) to get cycle counter.
*
* @return current cycle counter
*/
CVMX_FUNCTION uint64_t cvmx_get_cycle(void);
/**
* @deprecated
* Reads a chip global cycle counter. This counts SCLK cycles since
* chip reset. The counter is 64 bit. This function is deprecated as the rate
* of the global cycle counter is different between Octeon+ and Octeon2, use
* cvmx_clock_get_count(CVMX_CLOCK_SCLK) instead. For Octeon2, the clock rate
* of SCLK may be differnet than the core clock.
*
* @return Global chip cycle count since chip reset.
*/
CVMX_FUNCTION uint64_t cvmx_get_cycle_global(void) __attribute__((deprecated));
/**
* Wait for the specified number of core clock cycles
*
* @param cycles
*/
CVMX_FUNCTION void cvmx_wait(uint64_t cycles);
/**
* Wait for the specified number of micro seconds
*
* @param usec micro seconds to wait
*/
CVMX_FUNCTION void cvmx_wait_usec(uint64_t usec);
/**
* Wait for the specified number of io clock cycles
*
* @param cycles
*/
CVMX_FUNCTION void cvmx_wait_io(uint64_t cycles);
/**
* Perform a soft reset of Octeon
*
* @return
*/
CVMX_FUNCTION void cvmx_reset_octeon(void);
/**
* Read a byte of fuse data
* @param byte_addr address to read
*
* @return fuse value: 0 or 1
*/
CVMX_FUNCTION uint8_t cvmx_fuse_read_byte(int byte_addr);
/**
* Read a single fuse bit
*
* @param fuse Fuse number (0-1024)
*
* @return fuse value: 0 or 1
*/
CVMX_FUNCTION int cvmx_fuse_read(int fuse);
#undef CVMX_FUNCTION
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_ACCESS_H__ */

View File

@ -1,266 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
* Typedefs and defines for working with Octeon physical addresses.
*
* <hr>$Revision: 38306 $<hr>
*/
#ifndef __CVMX_ADDRESS_H__
#define __CVMX_ADDRESS_H__
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
#include "cvmx-abi.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
CVMX_MIPS_SPACE_XKSEG = 3LL,
CVMX_MIPS_SPACE_XKPHYS = 2LL,
CVMX_MIPS_SPACE_XSSEG = 1LL,
CVMX_MIPS_SPACE_XUSEG = 0LL
} cvmx_mips_space_t;
typedef enum {
CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
} cvmx_mips_xkseg_space_t;
/* decodes <14:13> of a kseg3 window address */
typedef enum {
CVMX_ADD_WIN_SCR = 0L,
CVMX_ADD_WIN_DMA = 1L, /* see cvmx_add_win_dma_dec_t for further decode */
CVMX_ADD_WIN_UNUSED = 2L,
CVMX_ADD_WIN_UNUSED2 = 3L
} cvmx_add_win_dec_t;
/* decode within DMA space */
typedef enum {
CVMX_ADD_WIN_DMA_ADD = 0L, /* add store data to the write buffer entry, allocating it if necessary */
CVMX_ADD_WIN_DMA_SENDMEM = 1L, /* send out the write buffer entry to DRAM */
/* store data must be normal DRAM memory space address in this case */
CVMX_ADD_WIN_DMA_SENDDMA = 2L, /* send out the write buffer entry as an IOBDMA command */
/* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
CVMX_ADD_WIN_DMA_SENDIO = 3L, /* send out the write buffer entry as an IO write */
/* store data must be normal IO space address in this case */
CVMX_ADD_WIN_DMA_SENDSINGLE = 4L, /* send out a single-tick command on the NCB bus */
/* no write buffer data needed/used */
} cvmx_add_win_dma_dec_t;
/**
* Physical Address Decode
*
* Octeon-I HW never interprets this X (<39:36> reserved
* for future expansion), software should set to 0.
*
* - 0x0 XXX0 0000 0000 to DRAM Cached
* - 0x0 XXX0 0FFF FFFF
*
* - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
* - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
*
* - 0x0 XXX0 2000 0000 to DRAM Cached
* - 0x0 XXXF FFFF FFFF
*
* - 0x1 00X0 0000 0000 to Boot Bus Uncached
* - 0x1 00XF FFFF FFFF
*
* - 0x1 01X0 0000 0000 to Other NCB Uncached
* - 0x1 FFXF FFFF FFFF devices
*
* Decode of all Octeon addresses
*/
typedef union {
uint64_t u64;
struct {
cvmx_mips_space_t R : 2;
uint64_t offset :62;
} sva; /* mapped or unmapped virtual address */
struct {
uint64_t zeroes :33;
uint64_t offset :31;
} suseg; /* mapped USEG virtual addresses (typically) */
struct {
uint64_t ones :33;
cvmx_mips_xkseg_space_t sp : 2;
uint64_t offset :29;
} sxkseg; /* mapped or unmapped virtual address */
struct {
cvmx_mips_space_t R : 2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
uint64_t cca : 3; /* ignored by octeon */
uint64_t mbz :10;
uint64_t pa :49; /* physical address */
} sxkphys; /* physical address accessed through xkphys unmapped virtual address */
struct {
uint64_t mbz :15;
uint64_t is_io : 1; /* if set, the address is uncached and resides on MCB bus */
uint64_t did : 8; /* the hardware ignores this field when is_io==0, else device ID */
uint64_t unaddr: 4; /* the hardware ignores <39:36> in Octeon I */
uint64_t offset :36;
} sphys; /* physical address */
struct {
uint64_t zeroes :24; /* techically, <47:40> are dont-cares */
uint64_t unaddr: 4; /* the hardware ignores <39:36> in Octeon I */
uint64_t offset :36;
} smem; /* physical mem address */
struct {
uint64_t mem_region :2;
uint64_t mbz :13;
uint64_t is_io : 1; /* 1 in this case */
uint64_t did : 8; /* the hardware ignores this field when is_io==0, else device ID */
uint64_t unaddr: 4; /* the hardware ignores <39:36> in Octeon I */
uint64_t offset :36;
} sio; /* physical IO address */
struct {
uint64_t ones : 49;
cvmx_add_win_dec_t csrdec : 2; /* CVMX_ADD_WIN_SCR (0) in this case */
uint64_t addr : 13;
} sscr; /* scratchpad virtual address - accessed through a window at the end of kseg3 */
/* there should only be stores to IOBDMA space, no loads */
struct {
uint64_t ones : 49;
cvmx_add_win_dec_t csrdec : 2; /* CVMX_ADD_WIN_DMA (1) in this case */
uint64_t unused2: 3;
cvmx_add_win_dma_dec_t type : 3;
uint64_t addr : 7;
} sdma; /* IOBDMA virtual address - accessed through a window at the end of kseg3 */
struct {
uint64_t didspace : 24;
uint64_t unused : 40;
} sfilldidspace;
} cvmx_addr_t;
/* These macros for used by 32 bit applications */
#define CVMX_MIPS32_SPACE_KSEG0 1l
#define CVMX_ADD_SEG32(segment, add) (((int32_t)segment << 31) | (int32_t)(add))
/* Currently all IOs are performed using XKPHYS addressing. Linux uses the
CvmMemCtl register to enable XKPHYS addressing to IO space from user mode.
Future OSes may need to change the upper bits of IO addresses. The
following define controls the upper two bits for all IO addresses generated
by the simple executive library */
#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
/* These macros simplify the process of creating common IO addresses */
#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
#ifndef CVMX_ADD_IO_SEG
#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
#endif
#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
#define CVMX_FULL_DID(did,subdid) (((did) << 3) | (subdid))
/* from include/ncb_rsl_id.v */
#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
#define CVMX_OCT_DID_GMX0 1ULL
#define CVMX_OCT_DID_GMX1 2ULL
#define CVMX_OCT_DID_PCI 3ULL
#define CVMX_OCT_DID_KEY 4ULL
#define CVMX_OCT_DID_FPA 5ULL
#define CVMX_OCT_DID_DFA 6ULL
#define CVMX_OCT_DID_ZIP 7ULL
#define CVMX_OCT_DID_RNG 8ULL
#define CVMX_OCT_DID_IPD 9ULL
#define CVMX_OCT_DID_PKT 10ULL
#define CVMX_OCT_DID_TIM 11ULL
#define CVMX_OCT_DID_TAG 12ULL
/* the rest are not on the IO bus */
#define CVMX_OCT_DID_L2C 16ULL
#define CVMX_OCT_DID_LMC 17ULL
#define CVMX_OCT_DID_SPX0 18ULL
#define CVMX_OCT_DID_SPX1 19ULL
#define CVMX_OCT_DID_PIP 20ULL
#define CVMX_OCT_DID_ASX0 22ULL
#define CVMX_OCT_DID_ASX1 23ULL
#define CVMX_OCT_DID_IOB 30ULL
#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT,2ULL)
#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG,0ULL)
#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG,1ULL)
#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG,2ULL)
#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG,3ULL)
#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG,4ULL)
#define CVMX_OCT_DID_TAG_TAG5 CVMX_FULL_DID(CVMX_OCT_DID_TAG,5ULL)
#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG,7ULL)
#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB,0ULL)
#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM,0ULL)
#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY,0ULL)
#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI,6ULL)
#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS,0ULL)
#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI,0ULL)
#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD,7ULL)
#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA,7ULL)
#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS,7ULL)
#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP,0ULL)
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
#ifdef CVMX_ABI_N32
#define UNMAPPED_PTR(x) ( (1U << 31) | x )
#else
#define UNMAPPED_PTR(x) ( (1ULL << 63) | x )
#endif
#endif
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_ADDRESS_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,885 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Provides APIs for applications to register for hotplug. It also provides
* APIs for requesting shutdown of a running target application.
*
* <hr>$Revision: $<hr>
*/
#include "cvmx-app-hotplug.h"
#include "cvmx-spinlock.h"
#include "cvmx-debug.h"
//#define DEBUG 1
static cvmx_app_hotplug_global_t *hotplug_global_ptr = 0;
#ifndef CVMX_BUILD_FOR_LINUX_USER
static CVMX_SHARED cvmx_spinlock_t cvmx_app_hotplug_sync_lock = { CVMX_SPINLOCK_UNLOCKED_VAL };
static CVMX_SHARED cvmx_spinlock_t cvmx_app_hotplug_lock = { CVMX_SPINLOCK_UNLOCKED_VAL };
static CVMX_SHARED cvmx_app_hotplug_info_t *cvmx_app_hotplug_info_ptr = NULL;
static void __cvmx_app_hotplug_shutdown(int irq_number, uint64_t registers[32], void *user_arg);
static void __cvmx_app_hotplug_sync(void);
static void __cvmx_app_hotplug_reset(void);
/* Declaring this array here is a compile time check to ensure that the
size of cvmx_app_hotplug_info_t is 1024. If the size is not 1024
the size of the array will be -1 and this results in a compilation
error */
char __hotplug_info_check[(sizeof(cvmx_app_hotplug_info_t) == 1024) ? 1 : -1];
/**
* This routine registers an application for hotplug. It installs a handler for
* any incoming shutdown request. It also registers a callback routine from the
* application. This callback is invoked when the application receives a
* shutdown notification.
*
* This routine only needs to be called once per application.
*
* @param fn Callback routine from the application.
* @param arg Argument to the application callback routine.
* @return Return 0 on success, -1 on failure
*
*/
int cvmx_app_hotplug_register(void(*fn)(void*), void* arg)
{
/* Find the list of applications launched by bootoct utility. */
if (!(cvmx_app_hotplug_info_ptr = cvmx_app_hotplug_get_info(cvmx_sysinfo_get()->core_mask)))
{
/* Application not launched by bootoct? */
printf("ERROR: cmvx_app_hotplug_register() failed\n");
return -1;
}
/* Register the callback */
cvmx_app_hotplug_info_ptr->data = CAST64(arg);
cvmx_app_hotplug_info_ptr->shutdown_callback = CAST64(fn);
#ifdef DEBUG
printf("cvmx_app_hotplug_register(): coremask 0x%x valid %d\n",
cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid);
#endif
cvmx_interrupt_register(CVMX_IRQ_MBOX0, __cvmx_app_hotplug_shutdown, NULL);
return 0;
}
/**
* This routine deprecates the the cvmx_app_hotplug_register method. This
* registers application for hotplug and the application will have CPU
* hotplug callbacks. Various callbacks are specified in cb.
* cvmx_app_hotplug_callbacks_t documents the callbacks
*
* This routine only needs to be called once per application.
*
* @param cb Callback routine from the application.
* @param arg Argument to the application callback routins
* @param app_shutdown When set to 1 the application will invoke core_shutdown
on each core. When set to 0 core shutdown will be
called invoked automatically after invoking the
application callback.
* @return Return index of app on success, -1 on failure
*
*/
int cvmx_app_hotplug_register_cb(cvmx_app_hotplug_callbacks_t *cb, void* arg,
int app_shutdown)
{
cvmx_app_hotplug_info_t *app_info;
/* Find the list of applications launched by bootoct utility. */
app_info = cvmx_app_hotplug_get_info(cvmx_sysinfo_get()->core_mask);
cvmx_app_hotplug_info_ptr = app_info;
if (!app_info)
{
/* Application not launched by bootoct? */
printf("ERROR: cmvx_app_hotplug_register() failed\n");
return -1;
}
/* Register the callback */
app_info->data = CAST64(arg);
app_info->shutdown_callback = CAST64(cb->shutdown_callback);
app_info->cores_added_callback = CAST64(cb->cores_added_callback);
app_info->cores_removed_callback = CAST64(cb->cores_removed_callback);
app_info->unplug_callback = CAST64(cb->unplug_core_callback);
app_info->hotplug_start = CAST64(cb->hotplug_start);
app_info->app_shutdown = app_shutdown;
#ifdef DEBUG
printf("cvmx_app_hotplug_register(): coremask 0x%x valid %d\n",
app_info->coremask, app_info->valid);
#endif
cvmx_interrupt_register(CVMX_IRQ_MBOX0, __cvmx_app_hotplug_shutdown, NULL);
return 0;
}
void cvmx_app_hotplug_remove_self_from_core_mask(void)
{
int core = cvmx_get_core_num();
uint32_t core_mask = 1ull << core;
cvmx_spinlock_lock(&cvmx_app_hotplug_lock);
cvmx_app_hotplug_info_ptr->coremask = cvmx_app_hotplug_info_ptr->coremask & ~core_mask ;
cvmx_app_hotplug_info_ptr->hotplug_activated_coremask =
cvmx_app_hotplug_info_ptr->hotplug_activated_coremask & ~core_mask ;
cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
}
/**
* Returns 1 if the running core is being unplugged, else it returns 0.
*/
int is_core_being_unplugged(void)
{
if (cvmx_app_hotplug_info_ptr->unplug_cores &
(1ull << cvmx_get_core_num()))
return 1;
return 0;
}
/**
* Activate the current application core for receiving hotplug shutdown requests.
*
* This routine makes sure that each core belonging to the application is enabled
* to receive the shutdown notification and also provides a barrier sync to make
* sure that all cores are ready.
*/
int cvmx_app_hotplug_activate(void)
{
uint64_t cnt = 0;
uint64_t cnt_interval = 10000000;
while (!cvmx_app_hotplug_info_ptr)
{
cnt++;
if ((cnt % cnt_interval) == 0)
printf("waiting for cnt=%lld\n", (unsigned long long)cnt);
}
if (cvmx_app_hotplug_info_ptr->hplugged_cores & (1ull << cvmx_get_core_num()))
{
#ifdef DEBUG
printf("core=%d : is being hotplugged \n", cvmx_get_core_num());
#endif
cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
sys_info_ptr->core_mask |= 1ull << cvmx_get_core_num();
}
else
{
__cvmx_app_hotplug_sync();
}
cvmx_spinlock_lock(&cvmx_app_hotplug_lock);
if (!cvmx_app_hotplug_info_ptr)
{
cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
printf("ERROR: This application is not registered for hotplug\n");
return -1;
}
/* Enable the interrupt before we mark the core as activated */
cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
cvmx_app_hotplug_info_ptr->hotplug_activated_coremask |= (1ull<<cvmx_get_core_num());
#ifdef DEBUG
printf("cvmx_app_hotplug_activate(): coremask 0x%x valid %d sizeof %d\n",
cvmx_app_hotplug_info_ptr->coremask, cvmx_app_hotplug_info_ptr->valid,
sizeof(*cvmx_app_hotplug_info_ptr));
#endif
cvmx_spinlock_unlock(&cvmx_app_hotplug_lock);
return 0;
}
/**
* This routine is only required if cvmx_app_hotplug_shutdown_request() was called
* with wait=0. This routine waits for the application shutdown to complete.
*
* @param coremask Coremask the application is running on.
* @return 0 on success, -1 on error
*
*/
int cvmx_app_hotplug_shutdown_complete(uint32_t coremask)
{
cvmx_app_hotplug_info_t *hotplug_info_ptr;
if (!(hotplug_info_ptr = cvmx_app_hotplug_get_info(coremask)))
{
printf("\nERROR: Failed to get hotplug info for coremask: 0x%x\n", (unsigned int)coremask);
return -1;
}
while(!hotplug_info_ptr->shutdown_done);
/* Clean up the hotplug info region for this app */
bzero(hotplug_info_ptr, sizeof(*hotplug_info_ptr));
return 0;
}
/**
* Disable recognition of any incoming shutdown request.
*/
void cvmx_app_hotplug_shutdown_disable(void)
{
cvmx_interrupt_mask_irq(CVMX_IRQ_MBOX0);
}
/**
* Re-enable recognition of incoming shutdown requests.
*/
void cvmx_app_hotplug_shutdown_enable(void)
{
cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
}
/**
* Request shutdown of the currently running core. Should be
* called by the application when it has been registered with
* app_shutdown option set to 1.
*/
void cvmx_app_hotplug_core_shutdown(void)
{
uint32_t flags;
if (cvmx_app_hotplug_info_ptr->shutdown_cores)
{
cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
__cvmx_app_hotplug_sync();
if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
{
bzero(cvmx_app_hotplug_info_ptr,
sizeof(*cvmx_app_hotplug_info_ptr));
#ifdef DEBUG
printf("__cvmx_app_hotplug_shutdown(): setting shutdown done! \n");
#endif
cvmx_app_hotplug_info_ptr->shutdown_done = 1;
}
/* Tell the debugger that this application is finishing. */
cvmx_debug_finish ();
flags = cvmx_interrupt_disable_save();
__cvmx_app_hotplug_sync();
/* Reset the core */
__cvmx_app_hotplug_reset();
}
else
{
cvmx_sysinfo_remove_self_from_core_mask();
cvmx_app_hotplug_remove_self_from_core_mask();
flags = cvmx_interrupt_disable_save();
__cvmx_app_hotplug_reset();
}
}
/*
* ISR for the incoming shutdown request interrupt.
*/
static void __cvmx_app_hotplug_shutdown(int irq_number, uint64_t registers[32],
void *user_arg)
{
cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
uint64_t mbox;
cvmx_app_hotplug_info_t *ai = cvmx_app_hotplug_info_ptr;
int dbg = 0;
#ifdef DEBUG
dbg = 1;
#endif
cvmx_interrupt_mask_irq(CVMX_IRQ_MBOX0);
mbox = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()));
/* Clear the interrupt */
cvmx_write_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()), mbox);
/* Make sure the write above completes */
cvmx_read_csr(CVMX_CIU_MBOX_CLRX(cvmx_get_core_num()));
if (!cvmx_app_hotplug_info_ptr)
{
printf("ERROR: Application is not registered for hotplug!\n");
return;
}
if (ai->hotplug_activated_coremask != sys_info_ptr->core_mask)
{
printf("ERROR: Shutdown requested when not all app cores have "
"activated hotplug\n" "Application coremask: 0x%x Hotplug "
"coremask: 0x%x\n", (unsigned int)sys_info_ptr->core_mask,
(unsigned int)ai->hotplug_activated_coremask);
return;
}
if (mbox & 1ull)
{
int core = cvmx_get_core_num();
if (dbg)
printf("Shutting down application .\n");
/* Call the application's own callback function */
if (ai->shutdown_callback)
{
((void(*)(void*))(long)ai->shutdown_callback)(CASTPTR(void *, ai->data));
}
else
{
printf("ERROR : Shutdown callback has not been registered\n");
}
if (!ai->app_shutdown)
{
if (dbg)
printf("%s : core = %d Invoking app shutdown\n", __FUNCTION__, core);
cvmx_app_hotplug_core_shutdown();
}
}
else if (mbox & 2ull)
{
int core = cvmx_get_core_num();
int unplug = is_core_being_unplugged();
if (dbg) printf("%s : core=%d Unplug event \n", __FUNCTION__, core);
if (unplug)
{
/* Call the application's own callback function */
if (ai->unplug_callback)
{
if (dbg) printf("%s : core=%d Calling unplug callback\n",
__FUNCTION__, core);
((void(*)(void*))(long)ai->unplug_callback)(CASTPTR(void *,
ai->data));
}
if (!ai->app_shutdown)
{
if (dbg) printf("%s : core = %d Invoking app shutdown\n",
__FUNCTION__, core);
cvmx_app_hotplug_core_shutdown();
}
}
else
{
if (ai->cores_removed_callback)
{
if (dbg) printf("%s : core=%d Calling cores removed callback\n",
__FUNCTION__, core);
((void(*)(uint32_t, void*))(long)ai->cores_removed_callback)
(ai->unplug_cores, CASTPTR(void *, ai->data));
}
cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
}
}
else if (mbox & 4ull)
{
int core = cvmx_get_core_num();
if (dbg) printf("%s : core=%d Add cores event \n", __FUNCTION__, core);
if (ai->cores_added_callback)
{
if (dbg) printf("%s : core=%d Calling cores added callback\n",
__FUNCTION__, core);
((void(*)(uint32_t, void*))(long)ai->cores_added_callback)
(ai->hplugged_cores, CASTPTR(void *, ai->data));
}
cvmx_interrupt_unmask_irq(CVMX_IRQ_MBOX0);
}
else
{
printf("ERROR: unexpected mbox=%llx\n", (unsigned long long)mbox);
}
}
void __cvmx_app_hotplug_reset(void)
{
#define IDLE_CORE_BLOCK_NAME "idle-core-loop"
#define HPLUG_MAKE_XKPHYS(x) ((1ULL << 63) | (x))
uint64_t reset_addr;
const cvmx_bootmem_named_block_desc_t *block_desc;
block_desc = cvmx_bootmem_find_named_block(IDLE_CORE_BLOCK_NAME);
if (!block_desc) {
cvmx_dprintf("Named block(%s) is not created\n", IDLE_CORE_BLOCK_NAME);
/* loop here, should not happen */
__asm__ volatile (
".set noreorder \n"
"\tsync \n"
"\tnop \n"
"1:\twait \n"
"\tb 1b \n"
"\tnop \n"
".set reorder \n"
::
);
}
reset_addr = HPLUG_MAKE_XKPHYS(block_desc->base_addr);
asm volatile (" .set push \n"
" .set mips64 \n"
" .set noreorder \n"
" move $2, %[addr] \n"
" jr $2 \n"
" nop \n"
" .set pop "
:: [addr] "r"(reset_addr)
: "$2");
/*Should never reach here*/
while (1) ;
}
/*
* We need a separate sync operation from cvmx_coremask_barrier_sync() to
* avoid a deadlock on state.lock, since the application itself maybe doing a
* cvmx_coremask_barrier_sync().
*/
static void __cvmx_app_hotplug_sync(void)
{
static CVMX_SHARED volatile uint32_t sync_coremask = 0;
cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
cvmx_spinlock_lock(&cvmx_app_hotplug_sync_lock);
sync_coremask |= cvmx_coremask_core(cvmx_get_core_num());
cvmx_spinlock_unlock(&cvmx_app_hotplug_sync_lock);
while (sync_coremask != sys_info_ptr->core_mask);
cvmx_spinlock_lock(&cvmx_app_hotplug_sync_lock);
sync_coremask = 0;
cvmx_spinlock_unlock(&cvmx_app_hotplug_sync_lock);
}
#endif /* CVMX_BUILD_FOR_LINUX_USER */
/**
* Returns 1 if the running core is being hotplugged, else it returns 0.
*/
int is_core_being_hot_plugged(void)
{
#ifndef CVMX_BUILD_FOR_LINUX_USER
if (!cvmx_app_hotplug_info_ptr) return 0;
if (cvmx_app_hotplug_info_ptr->hplugged_cores &
(1ull << cvmx_get_core_num()))
return 1;
return 0;
#else
return 0;
#endif
}
static cvmx_app_hotplug_global_t *cvmx_app_get_hotplug_global_ptr(void)
{
const struct cvmx_bootmem_named_block_desc *block_desc;
cvmx_app_hotplug_global_t *hgp;
if(hotplug_global_ptr != 0) return hotplug_global_ptr;
block_desc = cvmx_bootmem_find_named_block(CVMX_APP_HOTPLUG_INFO_REGION_NAME);
if (!block_desc)
{
printf("ERROR: Hotplug info region is not setup\n");
return NULL;
}
else
#ifdef CVMX_BUILD_FOR_LINUX_USER
{
size_t pg_sz = sysconf(_SC_PAGESIZE), size;
off_t offset;
char *vaddr;
int fd;
if ((fd = open("/dev/mem", O_RDWR)) == -1) {
perror("open");
return NULL;
}
/*
* We need to mmap() this memory, since this was allocated from the
* kernel bootup code and does not reside in the RESERVE32 region.
*/
size = CVMX_APP_HOTPLUG_INFO_REGION_SIZE + pg_sz-1;
offset = block_desc->base_addr & ~(pg_sz-1);
if ((vaddr = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, offset))
== MAP_FAILED)
{
perror("mmap");
return NULL;
}
hgp = (cvmx_app_hotplug_global_t *)(vaddr + ( block_desc->base_addr & (pg_sz-1)));
}
#else
hgp = CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, block_desc->base_addr));
#endif
hotplug_global_ptr = hgp;
return hgp;
}
/**
* Return the hotplug info structure (cvmx_app_hotplug_info_t) pointer for the
* application running on the given coremask.
*
* @param coremask Coremask of application.
* @return Returns hotplug info struct on success, NULL on failure
*
*/
cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info(uint32_t coremask)
{
cvmx_app_hotplug_info_t *hip;
cvmx_app_hotplug_global_t *hgp;
int i;
int dbg = 0;
#ifdef DEBUG
dbg = 1;
#endif
hgp = cvmx_app_get_hotplug_global_ptr();
if (!hgp) return NULL;
hip = hgp->hotplug_info_array;
/* Look for the current app's info */
for (i=0; i<CVMX_APP_HOTPLUG_MAX_APPS; i++)
{
if (hip[i].coremask == coremask)
{
if (dbg)
printf("cvmx_app_hotplug_get_info(): coremask match %d -- coremask 0x%x, valid %d\n", i, (unsigned int)hip[i].coremask, (unsigned int)hip[i].valid);
return &hip[i];
}
}
return NULL;
}
/**
* Return the hotplug application index structure for the application running on the
* given coremask.
*
* @param coremask Coremask of application.
* @return Returns hotplug application index on success. -1 on failure
*
*/
int cvmx_app_hotplug_get_index(uint32_t coremask)
{
cvmx_app_hotplug_info_t *hip;
cvmx_app_hotplug_global_t *hgp;
int i;
int dbg = 0;
#ifdef DEBUG
dbg = 1;
#endif
hgp = cvmx_app_get_hotplug_global_ptr();
if (!hgp) return -1;
hip = hgp->hotplug_info_array;
/* Look for the current app's info */
for (i=0; i<CVMX_APP_HOTPLUG_MAX_APPS; i++)
{
if (hip[i].coremask == coremask)
{
if (dbg)
printf("cvmx_app_hotplug_get_info(): coremask match %d -- coremask 0x%x valid %d\n", i, (unsigned int)hip[i].coremask, (unsigned int)hip[i].valid);
return i;
}
}
return -1;
}
void print_hot_plug_info(cvmx_app_hotplug_info_t* hpinfo)
{
printf("name=%s coremask=%08x hotplugged coremask=%08x valid=%d\n", hpinfo->app_name,
(unsigned int)hpinfo->coremask, (unsigned int)hpinfo->hotplug_activated_coremask, (unsigned int)hpinfo->valid);
}
/**
* Return the hotplug info structure (cvmx_app_hotplug_info_t) pointer for the
* application with the specified index.
*
* @param index index of application.
* @return Returns hotplug info struct on success, NULL on failure
*
*/
cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info_at_index(int index)
{
cvmx_app_hotplug_info_t *hip;
cvmx_app_hotplug_global_t *hgp;
hgp = cvmx_app_get_hotplug_global_ptr();
if (!hgp) return NULL;
hip = hgp->hotplug_info_array;
#ifdef DEBUG
printf("cvmx_app_hotplug_get_info(): hotplug_info phy addr 0x%llx ptr %p\n",
block_desc->base_addr, hgp);
#endif
if (index < CVMX_APP_HOTPLUG_MAX_APPS)
{
if (hip[index].valid)
{
//print_hot_plug_info( &hip[index] );
return &hip[index];
}
}
return NULL;
}
/**
* Determines if SE application at the index specified is hotpluggable.
*
* @param index index of application.
* @return Returns -1 on error.
* 0 -> The application is not hotpluggable
* 1 -> The application is hotpluggable
*/
int is_app_hotpluggable(int index)
{
cvmx_app_hotplug_info_t *ai;
if (!(ai = cvmx_app_hotplug_get_info_at_index(index)))
{
printf("\nERROR: Failed to get hotplug info for app at index=%d\n", index);
return -1;
}
if (ai->hotplug_activated_coremask) return 1;
return 0;
}
/**
* This routine sends a shutdown request to a running target application.
*
* @param coremask Coremask the application is running on.
* @param wait 1 - Wait for shutdown completion
* 0 - Do not wait
* @return 0 on success, -1 on error
*
*/
int cvmx_app_hotplug_shutdown_request(uint32_t coremask, int wait)
{
int i;
cvmx_app_hotplug_info_t *hotplug_info_ptr;
if (!(hotplug_info_ptr = cvmx_app_hotplug_get_info(coremask)))
{
printf("\nERROR: Failed to get hotplug info for coremask: 0x%x\n", (unsigned int)coremask);
return -1;
}
hotplug_info_ptr->shutdown_cores = coremask;
if (!hotplug_info_ptr->shutdown_callback)
{
printf("\nERROR: Target application has not registered for hotplug!\n");
return -1;
}
if (hotplug_info_ptr->hotplug_activated_coremask != coremask)
{
printf("\nERROR: Not all application cores have activated hotplug\n");
return -1;
}
/* Send IPIs to all application cores to request shutdown */
for (i=0; i<CVMX_MAX_CORES; i++) {
if (coremask & (1ull<<i))
cvmx_write_csr(CVMX_CIU_MBOX_SETX(i), 1);
}
if (wait)
{
while (!hotplug_info_ptr->shutdown_done);
/* Clean up the hotplug info region for this application */
bzero(hotplug_info_ptr, sizeof(*hotplug_info_ptr));
}
return 0;
}
/**
* This routine invokes the invoked the cores_added callbacks.
*/
int cvmx_app_hotplug_call_add_cores_callback(int index)
{
cvmx_app_hotplug_info_t *ai;
int i;
if (!(ai = cvmx_app_hotplug_get_info_at_index(index)))
{
printf("\nERROR: Failed to get hotplug info for app at index=%d\n", index);
return -1;
}
/* Send IPIs to all application cores to request add_cores callback*/
for (i=0; i<CVMX_MAX_CORES; i++) {
if (ai->coremask & (1ull<<i))
cvmx_write_csr(CVMX_CIU_MBOX_SETX(i), 4);
}
return 0;
}
/**
* This routine sends a request to a running target application
* to unplug a specified set cores
* @param index is the index of the target application
* @param coremask Coremask of the cores to be unplugged from the app.
* @param wait 1 - Wait for shutdown completion
* 0 - Do not wait
* @return 0 on success, -1 on error
*
*/
int cvmx_app_hotplug_unplug_cores(int index, uint32_t coremask, int wait)
{
cvmx_app_hotplug_info_t *ai;
int i;
if (!(ai = cvmx_app_hotplug_get_info_at_index(index)))
{
printf("\nERROR: Failed to get hotplug info for app at index=%d\n", index);
return -1;
}
ai->unplug_cores = coremask;
#if 0
if (!ai->shutdown_callback)
{
printf("\nERROR: Target application has not registered for hotplug!\n");
return -1;
}
#endif
if ( (ai->coremask | coremask ) != ai->coremask)
{
printf("\nERROR: Not all cores requested are a part of the app "
"r=%08x:%08x\n", (unsigned int)coremask, (unsigned int)ai->coremask);
return -1;
}
if (ai->coremask == coremask)
{
printf("\nERROR: Trying to remove all cores in app. "
"r=%08x:%08x\n", (unsigned int)coremask, (unsigned int)ai->coremask);
return -1;
}
/* Send IPIs to all application cores to request unplug/remove_cores
callback */
for (i=0; i<CVMX_MAX_CORES; i++) {
if (ai->coremask & (1ull<<i))
cvmx_write_csr(CVMX_CIU_MBOX_SETX(i), 2);
}
#if 0
if (wait)
{
while (!ai->shutdown_done);
/* Clean up the hotplug info region for this application */
bzero(ai, sizeof(*ai));
}
#endif
return 0;
}
/**
* Returns 1 if any app is currently being currently booted , hotplugged or
* shutdown. Only one app can be under a boot, hotplug or shutdown condition.
* Before booting an app this methods should be used to check whether boot or
* shutdown activity is in progress and proceed with the boot or shutdown only
* when there is no other activity.
*
*/
int is_app_under_boot_or_shutdown(void)
{
int ret=0;
cvmx_app_hotplug_global_t *hgp;
hgp = cvmx_app_get_hotplug_global_ptr();
cvmx_spinlock_lock(&hgp->hotplug_global_lock);
if (hgp->app_under_boot || hgp->app_under_shutdown) ret=1;
cvmx_spinlock_unlock(&hgp->hotplug_global_lock);
return ret;
}
/**
* Sets or clear the app_under_boot value. This when set signifies that an app
* is being currently booted or hotplugged with a new core.
*
*
* @param val sets the app_under_boot to the specified value. This should be
* set to 1 while app any is being booted and cleared after the
* application has booted up.
*
*/
void set_app_unber_boot(int val)
{
cvmx_app_hotplug_global_t *hgp;
hgp = cvmx_app_get_hotplug_global_ptr();
cvmx_spinlock_lock(&hgp->hotplug_global_lock);
hgp->app_under_boot = val;
cvmx_spinlock_unlock(&hgp->hotplug_global_lock);
}
/**
* Sets or clear the app_under_shutdown value. This when set signifies that an
* app is being currently shutdown or some cores of an app are being shutdown.
*
* @param val sets the app_under_shutdown to the specified value. This
* should be set to 1 while any app is being shutdown and cleared
* after the shutdown of the app is complete.
*
*/
void set_app_under_shutdown(int val)
{
cvmx_app_hotplug_global_t *hgp;
hgp = cvmx_app_get_hotplug_global_ptr();
cvmx_spinlock_lock(&hgp->hotplug_global_lock);
hgp->app_under_shutdown = val;
cvmx_spinlock_unlock(&hgp->hotplug_global_lock);
}

View File

@ -1,155 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Header file for the hotplug APIs
*
* <hr>$Revision: $<hr>
*/
#ifndef __CVMX_APP_HOTPLUG_H__
#define __CVMX_APP_HOTPLUG_H__
#ifdef __cplusplus
extern "C" {
#endif
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-bootmem.h>
#include <asm/octeon/cvmx-spinlock.h>
#else
#include "cvmx.h"
#include "cvmx-coremask.h"
#include "cvmx-interrupt.h"
#include "cvmx-bootmem.h"
#include "cvmx-spinlock.h"
#endif
#define CVMX_APP_HOTPLUG_MAX_APPS 32
#define CVMX_APP_HOTPLUG_MAX_APPNAME_LEN 256
/**
* hotplug_start is the entry point for hot plugged cores.
* cores_added_callback is callback which in invoked when new cores are added
* to the application. This is invoked on all the old core
* that existed before the current set of cores were
* added.
* cores_removed_callback is callback which in invoked when cores are removed
* an application. This is invoked on all the cores that
* exist after the set of cores being requesed are
* removed.
* shutdown_done_callback before the application is shutdown this callback is
* invoked on all the cores that are part of the app.
* unplug_callback before the cores are unplugged this callback is invoked
* only on the cores that are being unlpuuged.
*/
typedef struct cvmx_app_hotplug_callbacks
{
void (*hotplug_start)(void *ptr);
void (*cores_added_callback) (uint32_t ,void *ptr);
void (*cores_removed_callback) (uint32_t,void *ptr);
void (*shutdown_callback) (void *ptr);
void (*unplug_core_callback) (void *ptr);
} cvmx_app_hotplug_callbacks_t;
/* The size of this struct should be a fixed size of 1024 bytes.
Additional members should be added towards the end of the
strcuture by adjusting the size of padding */
typedef struct cvmx_app_hotplug_info
{
char app_name[CVMX_APP_HOTPLUG_MAX_APPNAME_LEN];
uint32_t coremask;
uint32_t volatile hotplug_activated_coremask;
int32_t valid;
int32_t volatile shutdown_done;
uint64_t shutdown_callback;
uint64_t unplug_callback;
uint64_t cores_added_callback;
uint64_t cores_removed_callback;
uint64_t hotplug_start;
uint64_t data;
uint32_t volatile hplugged_cores;
uint32_t shutdown_cores;
uint32_t app_shutdown;
uint32_t unplug_cores;
uint32_t padding[172];
} cvmx_app_hotplug_info_t;
struct cvmx_app_hotplug_global
{
uint32_t avail_coremask;
cvmx_app_hotplug_info_t hotplug_info_array[CVMX_APP_HOTPLUG_MAX_APPS];
uint32_t version;
cvmx_spinlock_t hotplug_global_lock;
int app_under_boot;
int app_under_shutdown;
};
typedef struct cvmx_app_hotplug_global cvmx_app_hotplug_global_t;
int is_core_being_hot_plugged(void);
int is_app_being_booted_or_shutdown(void);
void set_app_unber_boot(int val);
void set_app_under_shutdown(int val);
int cvmx_app_hotplug_shutdown_request(uint32_t, int);
int cvmx_app_hotplug_unplug_cores(int index, uint32_t coremask, int wait);
cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info(uint32_t);
int cvmx_app_hotplug_get_index(uint32_t coremask);
cvmx_app_hotplug_info_t* cvmx_app_hotplug_get_info_at_index(int index);
int is_app_hotpluggable(int index);
int cvmx_app_hotplug_call_add_cores_callback(int index);
#ifndef CVMX_BUILD_FOR_LINUX_USER
int cvmx_app_hotplug_register(void(*)(void*), void*);
int cvmx_app_hotplug_register_cb(cvmx_app_hotplug_callbacks_t *, void*, int);
int cvmx_app_hotplug_activate(void);
void cvmx_app_hotplug_core_shutdown(void);
void cvmx_app_hotplug_shutdown_disable(void);
void cvmx_app_hotplug_shutdown_enable(void);
#endif
#define CVMX_APP_HOTPLUG_INFO_REGION_SIZE sizeof(cvmx_app_hotplug_global_t)
#define CVMX_APP_HOTPLUG_INFO_REGION_NAME "cvmx-app-hotplug-block"
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_APP_HOTPLUG_H__ */

View File

@ -1,446 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
* Simple executive application initialization for Linux user space. This
* file should be used instead of cvmx-app-init.c for running simple executive
* applications under Linux in userspace. The following are some of the key
* points to remember when writing applications to run both under the
* standalone simple executive and userspace under Linux.
*
* -# Application main must be called "appmain" under Linux. Use and ifdef
* based on __linux__ to determine the proper name.
* -# Be careful to use cvmx_ptr_to_phys() and cvmx_phys_to_ptr. The simple
* executive 1-1 TLB mappings allow you to be sloppy and interchange
* hardware addresses with virtual address. This isn't true under Linux.
* -# If you're talking directly to hardware, be careful. The normal Linux
* protections are circumvented. If you do something bad, Linux won't
* save you.
* -# Most hardware can only be initialized once. Unless you're very careful,
* this also means you Linux application can only run once.
*
* <hr>$Revision: 70129 $<hr>
*
*/
#define _GNU_SOURCE
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <signal.h>
#include <sys/statfs.h>
#include <sys/wait.h>
#include <sys/sysmips.h>
#include <sched.h>
#include <octeon-app-init.h>
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-atomic.h"
#include "cvmx-sysinfo.h"
#include "cvmx-coremask.h"
#include "cvmx-spinlock.h"
#include "cvmx-bootmem.h"
#include "cvmx-helper-cfg.h"
int octeon_model_version_check(uint32_t chip_id);
#define OCTEON_ECLOCK_MULT_INPUT_X16 ((int)(33.4*16))
/* Applications using the simple executive libraries under Linux userspace must
rename their "main" function to match the prototype below. This allows the
simple executive to perform needed memory initialization and process
creation before the application runs. */
extern int appmain(int argc, const char *argv[]);
/* These two external addresses provide the beginning and end markers for the
CVMX_SHARED section. These are defined by the cvmx-shared.ld linker script.
If they aren't defined, you probably forgot to link using this script. */
extern void __cvmx_shared_start;
extern void __cvmx_shared_end;
extern uint64_t linux_mem32_min;
extern uint64_t linux_mem32_max;
extern uint64_t linux_mem32_wired;
extern uint64_t linux_mem32_offset;
/**
* This function performs some default initialization of the Octeon executive. It initializes
* the cvmx_bootmem memory allocator with the list of physical memory shared by the bootloader.
* This function should be called on all cores that will use the bootmem allocator.
* Applications which require a different configuration can replace this function with a suitable application
* specific one.
*
* @return 0 on success
* -1 on failure
*/
int cvmx_user_app_init(void)
{
return 0;
}
/**
* Simulator magic is not supported in user mode under Linux.
* This version of simprintf simply calls the underlying C
* library printf for output. It also makes sure that two
* calls to simprintf provide atomic output.
*
* @param format Format string in the same format as printf.
*/
void simprintf(const char *format, ...)
{
CVMX_SHARED static cvmx_spinlock_t simprintf_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
va_list ap;
cvmx_spinlock_lock(&simprintf_lock);
printf("SIMPRINTF(%d): ", (int)cvmx_get_core_num());
va_start(ap, format);
vprintf(format, ap);
va_end(ap);
cvmx_spinlock_unlock(&simprintf_lock);
}
/**
* Setup the CVMX_SHARED data section to be shared across
* all processors running this application. A memory mapped
* region is allocated using shm_open and mmap. The current
* contents of the CVMX_SHARED section are copied into the
* region. Then the new region is remapped to replace the
* existing CVMX_SHARED data.
*
* This function will display a message and abort the
* application under any error conditions. The Linux tmpfs
* filesystem must be mounted under /dev/shm.
*/
static void setup_cvmx_shared(void)
{
const char *SHM_NAME = "cvmx_shared";
unsigned long shared_size = &__cvmx_shared_end - &__cvmx_shared_start;
int fd;
/* If there isn't and shared data we can skip all this */
if (shared_size)
{
char shm_name[30];
printf("CVMX_SHARED: %p-%p\n", &__cvmx_shared_start, &__cvmx_shared_end);
#ifdef __UCLIBC__
const char *defaultdir = "/dev/shm/";
struct statfs f;
int pid;
/* The canonical place is /dev/shm. */
if (statfs (defaultdir, &f) == 0)
{
pid = getpid();
sprintf (shm_name, "%s%s-%d", defaultdir, SHM_NAME, pid);
}
else
{
perror("/dev/shm is not mounted");
exit(-1);
}
/* shm_open(), shm_unlink() are not implemented in uClibc. Do the
same thing using open() and close() system calls. */
fd = open (shm_name, O_RDWR | O_CREAT | O_TRUNC, 0);
if (fd < 0)
{
perror("Failed to open CVMX_SHARED(shm_name)");
exit(errno);
}
unlink (shm_name);
#else
sprintf(shm_name, "%s-%d", SHM_NAME, getpid());
/* Open a new shared memory region for use as CVMX_SHARED */
fd = shm_open(shm_name, O_RDWR | O_CREAT | O_TRUNC, 0);
if (fd <0)
{
perror("Failed to setup CVMX_SHARED(shm_open)");
exit(errno);
}
/* We don't want the file on the filesystem. Immediately unlink it so
another application can create its own shared region */
shm_unlink(shm_name);
#endif
/* Resize the region to match the size of CVMX_SHARED */
ftruncate(fd, shared_size);
/* Map the region into some random location temporarily so we can
copy the shared data to it */
void *ptr = mmap(NULL, shared_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (ptr == NULL)
{
perror("Failed to setup CVMX_SHARED(mmap copy)");
exit(errno);
}
/* Copy CVMX_SHARED to the new shared region so we don't lose
initializers */
memcpy(ptr, &__cvmx_shared_start, shared_size);
munmap(ptr, shared_size);
/* Remap the shared region to replace the old CVMX_SHARED region */
ptr = mmap(&__cvmx_shared_start, shared_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
if (ptr == NULL)
{
perror("Failed to setup CVMX_SHARED(mmap final)");
exit(errno);
}
/* Once mappings are setup, the file handle isn't needed anymore */
close(fd);
}
}
/**
* Shutdown and free the shared CVMX_SHARED region setup by
* setup_cvmx_shared.
*/
static void shutdown_cvmx_shared(void)
{
unsigned long shared_size = &__cvmx_shared_end - &__cvmx_shared_start;
if (shared_size)
munmap(&__cvmx_shared_start, shared_size);
}
/**
* Setup access to the CONFIG_CAVIUM_RESERVE32 memory section
* created by the kernel. This memory is used for shared
* hardware buffers with 32 bit userspace applications.
*/
static void setup_reserve32(void)
{
if (linux_mem32_min && linux_mem32_max)
{
int region_size = linux_mem32_max - linux_mem32_min + 1;
int mmap_flags = MAP_SHARED;
void *linux_mem32_base_ptr = NULL;
/* Although not strictly necessary, we are going to mmap() the wired
TLB region so it is in the process page tables. These pages will
never fault in, but they will allow GDB to access the wired
region. We need the mappings to exactly match the wired TLB
entry. */
if (linux_mem32_wired)
{
mmap_flags |= MAP_FIXED;
linux_mem32_base_ptr = CASTPTR(void, (1ull<<31) - region_size);
}
int fd = open("/dev/mem", O_RDWR);
if (fd < 0)
{
perror("ERROR opening /dev/mem");
exit(-1);
}
linux_mem32_base_ptr = mmap64(linux_mem32_base_ptr,
region_size,
PROT_READ | PROT_WRITE,
mmap_flags,
fd,
linux_mem32_min);
close(fd);
if (MAP_FAILED == linux_mem32_base_ptr)
{
perror("Error mapping reserve32");
exit(-1);
}
linux_mem32_offset = CAST64(linux_mem32_base_ptr) - linux_mem32_min;
}
}
/**
* Main entrypoint of the application. Here we setup shared
* memory and fork processes for each cpu. This simulates the
* normal simple executive environment of one process per
* cpu core.
*
* @param argc Number of command line arguments
* @param argv The command line arguments
* @return Return value for the process
*/
int main(int argc, const char *argv[])
{
CVMX_SHARED static cvmx_spinlock_t mask_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
CVMX_SHARED static int32_t pending_fork;
unsigned long cpumask;
unsigned long cpu;
int firstcpu = 0;
int firstcore = 0;
cvmx_linux_enable_xkphys_access(0);
cvmx_sysinfo_linux_userspace_initialize();
if (sizeof(void*) == 4)
{
if (linux_mem32_min)
setup_reserve32();
else
{
printf("\nFailed to access 32bit shared memory region. Most likely the Kernel\n"
"has not been configured for 32bit shared memory access. Check the\n"
"kernel configuration.\n"
"Aborting...\n\n");
exit(-1);
}
}
setup_cvmx_shared();
cvmx_bootmem_init(cvmx_sysinfo_get()->phy_mem_desc_addr);
/* Check to make sure the Chip version matches the configured version */
octeon_model_version_check(cvmx_get_proc_id());
/* Initialize configuration to set bpid, pkind, pko_port for all the
available ports connected. */
__cvmx_helper_cfg_init();
/* Get the list of logical cpus we should run on */
if (sched_getaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask))
{
perror("sched_getaffinity failed");
exit(errno);
}
cvmx_sysinfo_t *system_info = cvmx_sysinfo_get();
cvmx_atomic_set32(&pending_fork, 1);
/* Get the lowest logical cpu */
firstcore = ffsl(cpumask) - 1;
cpumask ^= (1ull<<(firstcore));
while (1)
{
if (cpumask == 0)
{
cpu = firstcore;
firstcpu = 1;
break;
}
cpu = ffsl(cpumask) - 1;
/* Turn off the bit for this CPU number. We've counted him */
cpumask ^= (1ull<<cpu);
/* Increment the number of CPUs running this app */
cvmx_atomic_add32(&pending_fork, 1);
/* Flush all IO streams before the fork. Otherwise any buffered
data in the C library will be duplicated. This results in
duplicate output from a single print */
fflush(NULL);
/* Fork a process for the new CPU */
int pid = fork();
if (pid == 0)
{
break;
}
else if (pid == -1)
{
perror("Fork failed");
exit(errno);
}
}
/* Set affinity to lock me to the correct CPU */
cpumask = (1<<cpu);
if (sched_setaffinity(0, sizeof(cpumask), (cpu_set_t*)&cpumask))
{
perror("sched_setaffinity failed");
exit(errno);
}
cvmx_spinlock_lock(&mask_lock);
system_info->core_mask |= 1<<cvmx_get_core_num();
cvmx_atomic_add32(&pending_fork, -1);
if (cvmx_atomic_get32(&pending_fork) == 0)
{
cvmx_dprintf("Active coremask = 0x%x\n", system_info->core_mask);
}
if (firstcpu)
system_info->init_core = cvmx_get_core_num();
cvmx_spinlock_unlock(&mask_lock);
/* Spinning waiting for forks to complete */
while (cvmx_atomic_get32(&pending_fork)) {}
cvmx_coremask_barrier_sync(system_info->core_mask);
cvmx_linux_enable_xkphys_access(1);
int result = appmain(argc, argv);
/* Wait for all forks to complete. This needs to be the core that started
all of the forks. It may not be the lowest numbered core! */
if (cvmx_get_core_num() == system_info->init_core)
{
int num_waits;
CVMX_POP(num_waits, system_info->core_mask);
num_waits--;
while (num_waits--)
{
if (wait(NULL) == -1)
perror("CVMX: Wait for forked child failed\n");
}
}
shutdown_cvmx_shared();
return result;
}

View File

@ -1,589 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include "executive-config.h"
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-spinlock.h"
#include <octeon-app-init.h>
#include "cvmx-sysinfo.h"
#include "cvmx-bootmem.h"
#include "cvmx-uart.h"
#include "cvmx-coremask.h"
#include "cvmx-core.h"
#include "cvmx-interrupt.h"
#include "cvmx-ebt3000.h"
#include "cvmx-sim-magic.h"
#include "cvmx-debug.h"
#include "cvmx-qlm.h"
#include "cvmx-scratch.h"
#include "cvmx-helper-cfg.h"
#include "cvmx-helper-jtag.h"
#include <octeon_mem_map.h>
#include "libfdt.h"
int cvmx_debug_uart = -1;
/**
* @file
*
* Main entry point for all simple executive based programs.
*/
extern void cvmx_interrupt_initialize(void);
/**
* Main entry point for all simple executive based programs.
* This is the first C function called. It completes
* initialization, calls main, and performs C level cleanup.
*
* @param app_desc_addr
* Address of the application description structure passed
* brom the boot loader.
*/
EXTERN_ASM void __cvmx_app_init(uint64_t app_desc_addr);
/**
* Set up sysinfo structure from boot descriptor versions 6 and higher.
* In these versions, the interesting data in not in the boot info structure
* defined by the toolchain, but is in the cvmx_bootinfo structure defined in
* the simple exec.
*
* @param app_desc_ptr
* pointer to boot descriptor block
*
* @param sys_info_ptr
* pointer to sysinfo structure to fill in
*/
static void process_boot_desc_ver_6(octeon_boot_descriptor_t *app_desc_ptr, cvmx_sysinfo_t *sys_info_ptr)
{
cvmx_bootinfo_t *cvmx_bootinfo_ptr = CASTPTR(cvmx_bootinfo_t, app_desc_ptr->cvmx_desc_vaddr);
/* copy application information for simple exec use */
/* Populate the sys_info structure from the boot descriptor block created by the bootloader.
** The boot descriptor block is put in the top of the heap, so it will be overwritten when the
** heap is fully used. Information that is to be used must be copied before that.
** Applications should only use the sys_info structure, not the boot descriptor
*/
if (cvmx_bootinfo_ptr->major_version == 1)
{
sys_info_ptr->core_mask = cvmx_bootinfo_ptr->core_mask;
sys_info_ptr->heap_base = cvmx_bootinfo_ptr->heap_base;
sys_info_ptr->heap_size = cvmx_bootinfo_ptr->heap_end - cvmx_bootinfo_ptr->heap_base;
sys_info_ptr->stack_top = cvmx_bootinfo_ptr->stack_top;
sys_info_ptr->stack_size = cvmx_bootinfo_ptr->stack_size;
sys_info_ptr->init_core = cvmx_get_core_num();
sys_info_ptr->phy_mem_desc_addr = cvmx_bootinfo_ptr->phy_mem_desc_addr;
sys_info_ptr->exception_base_addr = cvmx_bootinfo_ptr->exception_base_addr;
sys_info_ptr->cpu_clock_hz = cvmx_bootinfo_ptr->eclock_hz;
sys_info_ptr->dram_data_rate_hz = cvmx_bootinfo_ptr->dclock_hz * 2;
sys_info_ptr->board_type = cvmx_bootinfo_ptr->board_type;
sys_info_ptr->board_rev_major = cvmx_bootinfo_ptr->board_rev_major;
sys_info_ptr->board_rev_minor = cvmx_bootinfo_ptr->board_rev_minor;
memcpy(sys_info_ptr->mac_addr_base, cvmx_bootinfo_ptr->mac_addr_base, 6);
sys_info_ptr->mac_addr_count = cvmx_bootinfo_ptr->mac_addr_count;
memcpy(sys_info_ptr->board_serial_number, cvmx_bootinfo_ptr->board_serial_number, CVMX_BOOTINFO_OCTEON_SERIAL_LEN);
sys_info_ptr->console_uart_num = 0;
if (cvmx_bootinfo_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1)
sys_info_ptr->console_uart_num = 1;
if (cvmx_bootinfo_ptr->dram_size > 32*1024*1024)
sys_info_ptr->system_dram_size = (uint64_t)cvmx_bootinfo_ptr->dram_size; /* older bootloaders incorrectly gave this in bytes, so don't convert */
else
sys_info_ptr->system_dram_size = (uint64_t)cvmx_bootinfo_ptr->dram_size * 1024 * 1024; /* convert from Megabytes to bytes */
if (cvmx_bootinfo_ptr->minor_version >= 1)
{
sys_info_ptr->compact_flash_common_base_addr = cvmx_bootinfo_ptr->compact_flash_common_base_addr;
sys_info_ptr->compact_flash_attribute_base_addr = cvmx_bootinfo_ptr->compact_flash_attribute_base_addr;
sys_info_ptr->led_display_base_addr = cvmx_bootinfo_ptr->led_display_base_addr;
}
else if (sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT3000 ||
sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT5800 ||
sys_info_ptr->board_type == CVMX_BOARD_TYPE_EBT5810)
{
/* Default these variables so that users of structure can be the same no
** matter what version fo boot info block the bootloader passes */
sys_info_ptr->compact_flash_common_base_addr = 0x1d000000 + 0x800;
sys_info_ptr->compact_flash_attribute_base_addr = 0x1d010000;
if (sys_info_ptr->board_rev_major == 1)
sys_info_ptr->led_display_base_addr = 0x1d020000;
else
sys_info_ptr->led_display_base_addr = 0x1d020000 + 0xf8;
}
else
{
sys_info_ptr->compact_flash_common_base_addr = 0;
sys_info_ptr->compact_flash_attribute_base_addr = 0;
sys_info_ptr->led_display_base_addr = 0;
}
if (cvmx_bootinfo_ptr->minor_version >= 2)
{
sys_info_ptr->dfa_ref_clock_hz = cvmx_bootinfo_ptr->dfa_ref_clock_hz;
sys_info_ptr->bootloader_config_flags = cvmx_bootinfo_ptr->config_flags;
}
else
{
sys_info_ptr->dfa_ref_clock_hz = 0;
sys_info_ptr->bootloader_config_flags = 0;
if (app_desc_ptr->flags & OCTEON_BL_FLAG_DEBUG)
sys_info_ptr->bootloader_config_flags |= CVMX_BOOTINFO_CFG_FLAG_DEBUG;
if (app_desc_ptr->flags & OCTEON_BL_FLAG_NO_MAGIC)
sys_info_ptr->bootloader_config_flags |= CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC;
}
}
else
{
printf("ERROR: Incompatible CVMX descriptor passed by bootloader: %d.%d\n",
(int)cvmx_bootinfo_ptr->major_version, (int)cvmx_bootinfo_ptr->minor_version);
exit(-1);
}
if ((cvmx_bootinfo_ptr->minor_version >= 3) && (cvmx_bootinfo_ptr->fdt_addr != 0))
{
sys_info_ptr->fdt_addr = UNMAPPED_PTR(cvmx_bootinfo_ptr->fdt_addr);
if (fdt_check_header((const void *)sys_info_ptr->fdt_addr))
{
printf("ERROR : Corrupt Device Tree.\n");
exit(-1);
}
printf("Using device tree\n");
}
else
{
sys_info_ptr->fdt_addr = 0;
}
}
/**
* Interrupt handler for calling exit on Control-C interrupts.
*
* @param irq_number IRQ interrupt number
* @param registers CPU registers at the time of the interrupt
* @param user_arg Unused user argument
*/
static void process_break_interrupt(int irq_number, uint64_t registers[32], void *user_arg)
{
/* Exclude new functionality when building with older toolchains */
#if OCTEON_APP_INIT_H_VERSION >= 3
int uart = irq_number - CVMX_IRQ_UART0;
cvmx_uart_lsr_t lsrval;
/* Check for a Control-C interrupt from the console. This loop will eat
all input received on the uart */
lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
while (lsrval.s.dr)
{
int c = cvmx_read_csr(CVMX_MIO_UARTX_RBR(uart));
if (c == '\003')
{
register uint64_t tmp;
/* Wait for an another Control-C if right now we have no
access to the console. After this point we hold the
lock and use a different lock to synchronize between
the memfile dumps from different cores. As a
consequence regular printfs *don't* work after this
point! */
if (__octeon_uart_trylock () == 1)
return;
/* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also
set the MCD0 to be not masked by this core so we know
the signal is received by someone */
asm volatile (
"dmfc0 %0, $22\n"
"ori %0, %0, 0x1110\n"
"dmtc0 %0, $22\n"
: "=r" (tmp));
}
lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(uart));
}
#endif
}
/**
* This is the debug exception handler with "break". Before calling exit to
* dump the profile-feedback output it releases the lock on the console.
* This way if there is buffered data in stdout it can still be flushed.
* stdio is required to flush all output during an fread.
*/
static void exit_on_break(void)
{
#if OCTEON_APP_INIT_H_VERSION >= 4
unsigned int coremask = cvmx_sysinfo_get()->core_mask;
cvmx_coremask_barrier_sync(coremask);
if (cvmx_coremask_first_core(coremask))
__octeon_uart_unlock();
#endif
exit(0);
}
/* Add string signature to applications so that we can easily tell what
** Octeon revision they were compiled for. Don't make static to avoid unused
** variable warning. */
#define xstr(s) str(s)
#define str(s) #s
int octeon_model_version_check(uint32_t chip_id);
#define OMS xstr(OCTEON_MODEL)
char octeon_rev_signature[] =
#ifdef USE_RUNTIME_MODEL_CHECKS
"Compiled for runtime Octeon model checking";
#else
"Compiled for Octeon processor id: "OMS;
#endif
#define OCTEON_BL_FLAG_HPLUG_CORES (1 << 6)
void __cvmx_app_init(uint64_t app_desc_addr)
{
/* App descriptor used by bootloader */
octeon_boot_descriptor_t *app_desc_ptr = CASTPTR(octeon_boot_descriptor_t, app_desc_addr);
/* app info structure used by the simple exec */
cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
int breakflag = 0;
//printf("coremask=%08x flags=%08x \n", app_desc_ptr->core_mask, app_desc_ptr->flags);
if (cvmx_coremask_first_core(app_desc_ptr->core_mask))
{
/* Intialize the bootmem allocator with the descriptor that was provided by
* the bootloader
* IMPORTANT: All printfs must happen after this since PCI console uses named
* blocks.
*/
cvmx_bootmem_init(CASTPTR(cvmx_bootinfo_t, app_desc_ptr->cvmx_desc_vaddr)->phy_mem_desc_addr);
/* do once per application setup */
if (app_desc_ptr->desc_version < 6)
{
printf("Obsolete bootloader, can't run application\n");
exit(-1);
}
else
{
/* Handle all newer versions here.... */
if (app_desc_ptr->desc_version > 7)
{
printf("Warning: newer boot descripter version than expected\n");
}
process_boot_desc_ver_6(app_desc_ptr,sys_info_ptr);
}
/*
* set up the feature map and config.
*/
octeon_feature_init();
__cvmx_helper_cfg_init();
}
/* The flags varibale get copied over at some places and tracing the origins
found that
** In octeon_setup_boot_desc_block
. cvmx_bootinfo_array[core].flags is initialized and the various bits are set
. cvmx_bootinfo_array[core].flags gets copied to boot_desc[core].flags
. Then boot_desc then get copied over to the end of the application heap and
boot_info_block_array[core].boot_descr_addr is set to point to the boot_desc
in heap.
** In start_app boot_vect->boot_info_addr->boot_desc_addr is referenced and passed on
to octeon_setup_crt0_tlb() and this puts it into r16
** In ctr0.S of the toolchain r16 is picked up and passed on as a parameter to
__cvmx_app_init
Note : boot_vect->boot_info_addr points to boot_info_block_array[core] and this
pointer is setup in octeon_setup_boot_vector()
*/
if (!(app_desc_ptr->flags & OCTEON_BL_FLAG_HPLUG_CORES))
cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
breakflag = sys_info_ptr->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_BREAK;
/* No need to initialize bootmem, interrupts, interrupt handler and error handler
if version does not match. */
if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
{
/* Check to make sure the Chip version matches the configured version */
uint32_t chip_id = cvmx_get_proc_id();
/* Make sure we can properly run on this chip */
octeon_model_version_check(chip_id);
}
cvmx_interrupt_initialize();
if (cvmx_coremask_first_core(sys_info_ptr->core_mask))
{
int break_uart = 0;
unsigned int i;
if (breakflag && cvmx_debug_booted())
{
printf("ERROR: Using debug and break together in not supported.\n");
while (1)
;
}
/* Search through the arguments for a break=X or a debug=X. */
for (i = 0; i < app_desc_ptr->argc; i++)
{
const char *argv = CASTPTR(const char, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, app_desc_ptr->argv[i]));
if (strncmp(argv, "break=", 6) == 0)
break_uart = atoi(argv + 6);
else if (strncmp(argv, "debug=", 6) == 0)
cvmx_debug_uart = atoi(argv + 6);
}
if (breakflag)
{
int32_t *trampoline = CASTPTR(int32_t, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, BOOTLOADER_DEBUG_TRAMPOLINE));
/* On debug exception, call exit_on_break from all cores. */
*trampoline = (int32_t)(long)&exit_on_break;
cvmx_uart_enable_intr(break_uart, process_break_interrupt);
}
}
if ( !(app_desc_ptr->flags & OCTEON_BL_FLAG_HPLUG_CORES))
cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
/* Clear BEV now that we have installed exception handlers. */
uint64_t tmp;
asm volatile (
" .set push \n"
" .set mips64 \n"
" .set noreorder \n"
" .set noat \n"
" mfc0 %[tmp], $12, 0 \n"
" li $at, 1 << 22 \n"
" not $at, $at \n"
" and %[tmp], $at \n"
" mtc0 %[tmp], $12, 0 \n"
" .set pop \n"
: [tmp] "=&r" (tmp) : );
/* Set all cores to stop on MCD0 signals */
asm volatile(
"dmfc0 %0, $22, 0\n"
"or %0, %0, 0x1100\n"
"dmtc0 %0, $22, 0\n" : "=r" (tmp));
CVMX_SYNC;
/* Now intialize the debug exception handler as BEV is cleared. */
if ((!breakflag) && (!(app_desc_ptr->flags & OCTEON_BL_FLAG_HPLUG_CORES)))
cvmx_debug_init();
/* Synchronise all cores at this point */
if ( !(app_desc_ptr->flags & OCTEON_BL_FLAG_HPLUG_CORES))
cvmx_coremask_barrier_sync(app_desc_ptr->core_mask);
}
int cvmx_user_app_init(void)
{
uint64_t bist_val;
uint64_t mask;
int bist_errors = 0;
uint64_t tmp;
uint64_t base_addr;
/* Put message on LED display */
if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
ebt3000_str_write("CVMX ");
/* Check BIST results for COP0 registers, some values only meaningful in pass 2 */
CVMX_MF_CACHE_ERR(bist_val);
mask = (0x3fULL<<32); // Icache;BHT;AES;HSH/GFM;LRU;register file
bist_val &= mask;
if (bist_val)
{
printf("BIST FAILURE: COP0_CACHE_ERR: 0x%llx\n", (unsigned long long)bist_val);
bist_errors++;
}
mask = 0xfc00000000000000ull;
CVMX_MF_CVM_MEM_CTL(bist_val);
bist_val &= mask;
if (bist_val)
{
printf("BIST FAILURE: COP0_CVM_MEM_CTL: 0x%llx\n", (unsigned long long)bist_val);
bist_errors++;
}
/* Set up 4 cache lines of local memory, make available from Kernel space */
CVMX_MF_CVM_MEM_CTL(tmp);
tmp &= ~0x1ffull;
tmp |= 0x104ull;
/* Set WBTHRESH=4 as per Core-14752 errata in cn63xxp1.X. */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X))
{
tmp &= ~(0xfull << 11);
tmp |= 4 << 11;
}
CVMX_MT_CVM_MEM_CTL(tmp);
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_X))
{
/* Clear the lines of scratch memory configured, for
** 63XX pass 2 errata Core-15169. */
uint64_t addr;
unsigned num_lines;
CVMX_MF_CVM_MEM_CTL(tmp);
num_lines = tmp & 0x3f;
for (addr = 0; addr < CVMX_CACHE_LINE_SIZE * num_lines; addr += 8)
cvmx_scratch_write64(addr, 0);
}
#if CVMX_USE_1_TO_1_TLB_MAPPINGS
/* Check to see if the bootloader is indicating that the application is outside
** of the 0x10000000 0x20000000 range, in which case we can't use 1-1 mappings */
if (cvmx_sysinfo_get()->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING)
{
printf("ERROR: 1-1 TLB mappings configured and oversize application loaded.\n");
printf("ERROR: Either 1-1 TLB mappings must be disabled or application size reduced.\n");
exit(-1);
}
/* Create 1-1 Mappings for all DRAM up to 8 gigs, excluding the low 1 Megabyte. This area
** is reserved for the bootloader and exception vectors. By not mapping this area, NULL pointer
** dereferences will be caught with TLB exceptions. Exception handlers should be written
** using XKPHYS or KSEG0 addresses. */
#if CVMX_NULL_POINTER_PROTECT
/* Exclude low 1 MByte from mapping to detect NULL pointer accesses.
** The only down side of this is it uses more TLB mappings */
cvmx_core_add_fixed_tlb_mapping_bits(0x0, 0x0, 0x100000 | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, CVMX_TLB_PAGEMASK_1M);
cvmx_core_add_fixed_tlb_mapping(0x200000, 0x200000, 0x300000, CVMX_TLB_PAGEMASK_1M);
cvmx_core_add_fixed_tlb_mapping(0x400000, 0x400000, 0x500000, CVMX_TLB_PAGEMASK_1M);
cvmx_core_add_fixed_tlb_mapping(0x600000, 0x600000, 0x700000, CVMX_TLB_PAGEMASK_1M);
cvmx_core_add_fixed_tlb_mapping(0x800000, 0x800000, 0xC00000, CVMX_TLB_PAGEMASK_4M);
cvmx_core_add_fixed_tlb_mapping(0x1000000, 0x1000000, 0x1400000, CVMX_TLB_PAGEMASK_4M);
cvmx_core_add_fixed_tlb_mapping(0x1800000, 0x1800000, 0x1c00000, CVMX_TLB_PAGEMASK_4M);
cvmx_core_add_fixed_tlb_mapping(0x2000000, 0x2000000, 0x3000000, CVMX_TLB_PAGEMASK_16M);
cvmx_core_add_fixed_tlb_mapping(0x4000000, 0x4000000, 0x5000000, CVMX_TLB_PAGEMASK_16M);
cvmx_core_add_fixed_tlb_mapping(0x6000000, 0x6000000, 0x7000000, CVMX_TLB_PAGEMASK_16M);
#else
/* Map entire low 128 Megs, including 0x0 */
cvmx_core_add_fixed_tlb_mapping(0x0, 0x0, 0x4000000ULL, CVMX_TLB_PAGEMASK_64M);
#endif
cvmx_core_add_fixed_tlb_mapping(0x8000000ULL, 0x8000000ULL, 0xc000000ULL, CVMX_TLB_PAGEMASK_64M);
if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
{
for (base_addr = 0x20000000ULL; base_addr < (cvmx_sysinfo_get()->system_dram_size + 0x10000000ULL); base_addr += 0x20000000ULL)
{
if (0 > cvmx_core_add_fixed_tlb_mapping(base_addr, base_addr, base_addr + 0x10000000ULL, CVMX_TLB_PAGEMASK_256M))
{
printf("ERROR adding 1-1 TLB mapping for address 0x%llx\n", (unsigned long long)base_addr);
/* Exit from here, as expected memory mappings aren't set
up if this fails */
exit(-1);
}
}
}
else
{
/* Create 1-1 mapping for next 256 megs
** bottom page is not valid */
cvmx_core_add_fixed_tlb_mapping_bits(0x400000000ULL, 0, 0x410000000ULL | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, CVMX_TLB_PAGEMASK_256M);
/* Map from 0.5 up to the installed memory size in 512 MByte chunks. If this loop runs out of memory,
** the NULL pointer detection can be disabled to free up more TLB entries. */
if (cvmx_sysinfo_get()->system_dram_size > 0x20000000ULL)
{
for (base_addr = 0x20000000ULL; base_addr <= (cvmx_sysinfo_get()->system_dram_size - 0x20000000ULL); base_addr += 0x20000000ULL)
{
if (0 > cvmx_core_add_fixed_tlb_mapping(base_addr, base_addr, base_addr + 0x10000000ULL, CVMX_TLB_PAGEMASK_256M))
{
printf("ERROR adding 1-1 TLB mapping for address 0x%llx\n", (unsigned long long)base_addr);
/* Exit from here, as expected memory mappings
aren't set up if this fails */
exit(-1);
}
}
}
}
#endif
cvmx_sysinfo_t *sys_info_ptr = cvmx_sysinfo_get();
cvmx_bootmem_init(sys_info_ptr->phy_mem_desc_addr);
/* Initialize QLM and JTAG settings. Also apply any erratas. */
if (cvmx_coremask_first_core(cvmx_sysinfo_get()->core_mask))
cvmx_qlm_init();
return(0);
}
void __cvmx_app_exit(void)
{
cvmx_debug_finish();
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
{
CVMX_BREAK;
}
/* Hang forever, until more appropriate stand alone simple executive
exit() is implemented */
while (1);
}

View File

@ -1,516 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
* Header file for simple executive application initialization. This defines
* part of the ABI between the bootloader and the application.
* <hr>$Revision: 70327 $<hr>
*
*/
#ifndef __CVMX_APP_INIT_H__
#define __CVMX_APP_INIT_H__
#ifdef __cplusplus
extern "C" {
#endif
/* Current major and minor versions of the CVMX bootinfo block that is passed
** from the bootloader to the application. This is versioned so that applications
** can properly handle multiple bootloader versions. */
#define CVMX_BOOTINFO_MAJ_VER 1
#define CVMX_BOOTINFO_MIN_VER 3
#if (CVMX_BOOTINFO_MAJ_VER == 1)
#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
/* This structure is populated by the bootloader. For binary
** compatibility the only changes that should be made are
** adding members to the end of the structure, and the minor
** version should be incremented at that time.
** If an incompatible change is made, the major version
** must be incremented, and the minor version should be reset
** to 0.
*/
struct cvmx_bootinfo {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t major_version;
uint32_t minor_version;
uint64_t stack_top;
uint64_t heap_base;
uint64_t heap_end;
uint64_t desc_vaddr;
uint32_t exception_base_addr;
uint32_t stack_size;
uint32_t flags;
uint32_t core_mask;
uint32_t dram_size; /**< DRAM size in megabytes */
uint32_t phy_mem_desc_addr; /**< physical address of free memory descriptor block*/
uint32_t debugger_flags_base_addr; /**< used to pass flags from app to debugger */
uint32_t eclock_hz; /**< CPU clock speed, in hz */
uint32_t dclock_hz; /**< DRAM clock speed, in hz */
uint32_t reserved0;
uint16_t board_type;
uint8_t board_rev_major;
uint8_t board_rev_minor;
uint16_t reserved1;
uint8_t reserved2;
uint8_t reserved3;
char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
uint8_t mac_addr_base[6];
uint8_t mac_addr_count;
#if (CVMX_BOOTINFO_MIN_VER >= 1)
/* Several boards support compact flash on the Octeon boot bus. The CF
** memory spaces may be mapped to different addresses on different boards.
** These are the physical addresses, so care must be taken to use the correct
** XKPHYS/KSEG0 addressing depending on the application's ABI.
** These values will be 0 if CF is not present */
uint64_t compact_flash_common_base_addr;
uint64_t compact_flash_attribute_base_addr;
/* Base address of the LED display (as on EBT3000 board)
** This will be 0 if LED display not present. */
uint64_t led_display_base_addr;
#endif
#if (CVMX_BOOTINFO_MIN_VER >= 2)
uint32_t dfa_ref_clock_hz; /**< DFA reference clock in hz (if applicable)*/
uint32_t config_flags; /**< flags indicating various configuration options. These flags supercede
** the 'flags' variable and should be used instead if available */
#if defined(OCTEON_VENDOR_GEFES)
uint32_t dfm_size; /**< DFA Size */
#endif
#endif
#if (CVMX_BOOTINFO_MIN_VER >= 3)
uint64_t fdt_addr; /**< Address of the OF Flattened Device Tree structure describing the board. */
#endif
#else /* __BIG_ENDIAN */
/*
* Little-Endian: When the CPU mode is switched to
* little-endian, the view of the structure has some of the
* fields swapped.
*/
uint32_t minor_version;
uint32_t major_version;
uint64_t stack_top;
uint64_t heap_base;
uint64_t heap_end;
uint64_t desc_vaddr;
uint32_t stack_size;
uint32_t exception_base_addr;
uint32_t core_mask;
uint32_t flags;
uint32_t phy_mem_desc_addr;
uint32_t dram_size;
uint32_t eclock_hz;
uint32_t debugger_flags_base_addr;
uint32_t reserved0;
uint32_t dclock_hz;
uint8_t reserved3;
uint8_t reserved2;
uint16_t reserved1;
uint8_t board_rev_minor;
uint8_t board_rev_major;
uint16_t board_type;
union cvmx_bootinfo_scramble {
/* Must byteswap these four words so that...*/
uint64_t s[4];
/* ... this strucure has the proper data arrangement. */
struct {
char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
uint8_t mac_addr_base[6];
uint8_t mac_addr_count;
uint8_t pad[5];
} le;
} scramble1;
#if (CVMX_BOOTINFO_MIN_VER >= 1)
uint64_t compact_flash_common_base_addr;
uint64_t compact_flash_attribute_base_addr;
uint64_t led_display_base_addr;
#endif
#if (CVMX_BOOTINFO_MIN_VER >= 2)
uint32_t config_flags;
uint32_t dfa_ref_clock_hz;
#endif
#if (CVMX_BOOTINFO_MIN_VER >= 3)
uint64_t fdt_addr;
#endif
#endif
};
typedef struct cvmx_bootinfo cvmx_bootinfo_t;
#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET (1ull << 1)
#define CVMX_BOOTINFO_CFG_FLAG_DEBUG (1ull << 2)
#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
/* This flag is set if the TLB mappings are not contained in the
** 0x10000000 - 0x20000000 boot bus region. */
#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
/* Type defines for board and chip types */
enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_NULL = 0,
CVMX_BOARD_TYPE_SIM = 1,
CVMX_BOARD_TYPE_EBT3000 = 2,
CVMX_BOARD_TYPE_KODAMA = 3,
CVMX_BOARD_TYPE_NIAGARA = 4, /* Obsolete, no longer supported */
CVMX_BOARD_TYPE_NAC38 = 5, /* Obsolete, no longer supported */
CVMX_BOARD_TYPE_THUNDER = 6,
CVMX_BOARD_TYPE_TRANTOR = 7, /* Obsolete, no longer supported */
CVMX_BOARD_TYPE_EBH3000 = 8,
CVMX_BOARD_TYPE_EBH3100 = 9,
CVMX_BOARD_TYPE_HIKARI = 10,
CVMX_BOARD_TYPE_CN3010_EVB_HS5 = 11,
CVMX_BOARD_TYPE_CN3005_EVB_HS5 = 12,
#if defined(OCTEON_VENDOR_GEFES)
CVMX_BOARD_TYPE_TNPA3804 = 13,
CVMX_BOARD_TYPE_AT5810 = 14,
CVMX_BOARD_TYPE_WNPA3850 = 15,
CVMX_BOARD_TYPE_W3860 = 16,
#else
CVMX_BOARD_TYPE_KBP = 13,
CVMX_BOARD_TYPE_CN3020_EVB_HS5 = 14, /* Deprecated, CVMX_BOARD_TYPE_CN3010_EVB_HS5 supports the CN3020 */
CVMX_BOARD_TYPE_EBT5800 = 15,
CVMX_BOARD_TYPE_NICPRO2 = 16,
#endif
CVMX_BOARD_TYPE_EBH5600 = 17,
CVMX_BOARD_TYPE_EBH5601 = 18,
CVMX_BOARD_TYPE_EBH5200 = 19,
CVMX_BOARD_TYPE_BBGW_REF = 20,
CVMX_BOARD_TYPE_NIC_XLE_4G = 21,
CVMX_BOARD_TYPE_EBT5600 = 22,
CVMX_BOARD_TYPE_EBH5201 = 23,
CVMX_BOARD_TYPE_EBT5200 = 24,
CVMX_BOARD_TYPE_CB5600 = 25,
CVMX_BOARD_TYPE_CB5601 = 26,
CVMX_BOARD_TYPE_CB5200 = 27,
CVMX_BOARD_TYPE_GENERIC = 28, /* Special 'generic' board type, supports many boards */
CVMX_BOARD_TYPE_EBH5610 = 29,
CVMX_BOARD_TYPE_LANAI2_A = 30,
CVMX_BOARD_TYPE_LANAI2_U = 31,
CVMX_BOARD_TYPE_EBB5600 = 32,
CVMX_BOARD_TYPE_EBB6300 = 33,
CVMX_BOARD_TYPE_NIC_XLE_10G = 34,
CVMX_BOARD_TYPE_LANAI2_G = 35,
CVMX_BOARD_TYPE_EBT5810 = 36,
CVMX_BOARD_TYPE_NIC10E = 37,
CVMX_BOARD_TYPE_EP6300C = 38,
CVMX_BOARD_TYPE_EBB6800 = 39,
CVMX_BOARD_TYPE_NIC4E = 40,
CVMX_BOARD_TYPE_NIC2E = 41,
CVMX_BOARD_TYPE_EBB6600 = 42,
CVMX_BOARD_TYPE_REDWING = 43,
CVMX_BOARD_TYPE_NIC68_4 = 44,
CVMX_BOARD_TYPE_NIC10E_66 = 45,
CVMX_BOARD_TYPE_EBB6100 = 46,
CVMX_BOARD_TYPE_EVB7100 = 47,
CVMX_BOARD_TYPE_MAX,
/* NOTE: 256-257 are being used by a customer. */
/* The range from CVMX_BOARD_TYPE_MAX to CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved
** for future SDK use. */
/* Set aside a range for customer boards. These numbers are managed
** by Cavium.
*/
CVMX_BOARD_TYPE_CUST_DEFINED_MIN = 10000,
CVMX_BOARD_TYPE_CUST_WSX16 = 10001,
CVMX_BOARD_TYPE_CUST_NS0216 = 10002,
CVMX_BOARD_TYPE_CUST_NB5 = 10003,
CVMX_BOARD_TYPE_CUST_WMR500 = 10004,
CVMX_BOARD_TYPE_CUST_ITB101 = 10005,
CVMX_BOARD_TYPE_CUST_NTE102 = 10006,
CVMX_BOARD_TYPE_CUST_AGS103 = 10007,
#if !defined(OCTEON_VENDOR_LANNER)
CVMX_BOARD_TYPE_CUST_GST104 = 10008,
#else
CVMX_BOARD_TYPE_CUST_LANNER_MR955= 10008,
#endif
CVMX_BOARD_TYPE_CUST_GCT105 = 10009,
CVMX_BOARD_TYPE_CUST_AGS106 = 10010,
CVMX_BOARD_TYPE_CUST_SGM107 = 10011,
CVMX_BOARD_TYPE_CUST_GCT108 = 10012,
CVMX_BOARD_TYPE_CUST_AGS109 = 10013,
CVMX_BOARD_TYPE_CUST_GCT110 = 10014,
CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER = 10015,
CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER= 10016,
CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX = 10017,
CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX = 10018,
CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX= 10019,
CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX= 10020,
#if defined(OCTEON_VENDOR_LANNER)
CVMX_BOARD_TYPE_CUST_LANNER_MR730 = 10021,
#else
CVMX_BOARD_TYPE_CUST_L2_ZINWELL = 10021,
#endif
CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
/* Set aside a range for customer private use. The SDK won't
** use any numbers in this range. */
CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
#if defined(OCTEON_VENDOR_LANNER)
CVMX_BOARD_TYPE_CUST_LANNER_MR320= 20002,
CVMX_BOARD_TYPE_CUST_LANNER_MR321X=20007,
#endif
#if defined(OCTEON_VENDOR_UBIQUITI)
CVMX_BOARD_TYPE_CUST_UBIQUITI_E100=20002,
CVMX_BOARD_TYPE_CUST_UBIQUITI_E120= 20004,
#endif
#if defined(OCTEON_VENDOR_RADISYS)
CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE=20002,
#endif
#if defined(OCTEON_VENDOR_GEFES)
CVMX_BOARD_TYPE_CUST_TNPA5804 = 20005,
CVMX_BOARD_TYPE_CUST_W5434 = 20006,
CVMX_BOARD_TYPE_CUST_W5650 = 20007,
CVMX_BOARD_TYPE_CUST_W5800 = 20008,
CVMX_BOARD_TYPE_CUST_W5651X = 20009,
CVMX_BOARD_TYPE_CUST_TNPA5651X = 20010,
CVMX_BOARD_TYPE_CUST_TNPA56X4 = 20011,
CVMX_BOARD_TYPE_CUST_W63XX = 20013,
#endif
CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
/* Range for IO modules */
CVMX_BOARD_TYPE_MODULE_MIN = 30001,
CVMX_BOARD_TYPE_MODULE_PCIE_RC_4X = 30002,
CVMX_BOARD_TYPE_MODULE_PCIE_EP_4X = 30003,
CVMX_BOARD_TYPE_MODULE_SGMII_MARVEL = 30004,
CVMX_BOARD_TYPE_MODULE_SFPPLUS_BCM = 30005,
CVMX_BOARD_TYPE_MODULE_SRIO = 30006,
CVMX_BOARD_TYPE_MODULE_EBB5600_QLM0 = 30007,
CVMX_BOARD_TYPE_MODULE_EBB5600_QLM1 = 30008,
CVMX_BOARD_TYPE_MODULE_EBB5600_QLM2 = 30009,
CVMX_BOARD_TYPE_MODULE_EBB5600_QLM3 = 30010,
CVMX_BOARD_TYPE_MODULE_MAX = 31000
/* The remaining range is reserved for future use. */
};
enum cvmx_chip_types_enum {
CVMX_CHIP_TYPE_NULL = 0,
CVMX_CHIP_SIM_TYPE_DEPRECATED = 1,
CVMX_CHIP_TYPE_OCTEON_SAMPLE = 2,
CVMX_CHIP_TYPE_MAX
};
/* Compatability alias for NAC38 name change, planned to be removed from SDK 1.7 */
#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38
/* Functions to return string based on type */
#define ENUM_BRD_TYPE_CASE(x) case x: return(&#x[16]); /* Skip CVMX_BOARD_TYPE_ */
static inline const char *cvmx_board_type_to_string(enum cvmx_board_types_enum type)
{
switch (type)
{
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NULL)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SIM)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT3000)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KODAMA)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIAGARA)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NAC38)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_THUNDER)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TRANTOR)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3000)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3100)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_HIKARI)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3010_EVB_HS5)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3005_EVB_HS5)
#if defined(OCTEON_VENDOR_GEFES)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TNPA3804)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_AT5810)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_WNPA3850)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_W3860)
#else
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KBP)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3020_EVB_HS5)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5800)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NICPRO2)
#endif
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5601)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5200)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_BBGW_REF)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5200)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5610)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_A)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_U)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB5600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6300)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_10G)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_G)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5810)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EP6300C)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6800)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC4E)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC2E)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6600)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_REDWING)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC68_4)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E_66)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6100)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EVB7100)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX)
/* Customer boards listed here */
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MIN)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WSX16)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NS0216)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NB5)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WMR500)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_ITB101)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NTE102)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS103)
#if !defined(OCTEON_VENDOR_LANNER)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GST104)
#else
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_LANNER_MR955)
#endif
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT105)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS106)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_SGM107)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT108)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS109)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT110)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX)
#if defined(OCTEON_VENDOR_LANNER)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_LANNER_MR730)
#else
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ZINWELL)
#endif
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX)
/* Customer private range */
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN)
#if defined(OCTEON_VENDOR_LANNER)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_LANNER_MR320)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_LANNER_MR321X)
#endif
#if defined(OCTEON_VENDOR_UBIQUITI)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_UBIQUITI_E100)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_UBIQUITI_E120)
#endif
#if defined(OCTEON_VENDOR_RADISYS)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE)
#endif
#if defined(OCTEON_VENDOR_GEFES)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_TNPA5804)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W5434)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W5650)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W5800)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W5651X)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_TNPA5651X)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_TNPA56X4)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_W63XX)
#endif
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
/* Module range */
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_MIN)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_PCIE_RC_4X)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_PCIE_EP_4X)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_SGMII_MARVEL)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_SFPPLUS_BCM)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_SRIO)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM0)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM1)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM2)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_EBB5600_QLM3)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MODULE_MAX)
}
return "Unsupported Board";
}
#define ENUM_CHIP_TYPE_CASE(x) case x: return(&#x[15]); /* Skip CVMX_CHIP_TYPE */
static inline const char *cvmx_chip_type_to_string(enum cvmx_chip_types_enum type)
{
switch (type)
{
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_NULL)
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_SIM_TYPE_DEPRECATED)
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_OCTEON_SAMPLE)
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_MAX)
}
return "Unsupported Chip";
}
extern int cvmx_debug_uart;
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_APP_INIT_H__ */

View File

@ -1,667 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* This is file defines ASM primitives for the executive.
* <hr>$Revision: 70030 $<hr>
*
*
*/
#ifndef __CVMX_ASM_H__
#define __CVMX_ASM_H__
#define CVMX_MAX_CORES (32)
#define COP0_INDEX $0,0 /* TLB read/write index */
#define COP0_RANDOM $1,0 /* TLB random index */
#define COP0_ENTRYLO0 $2,0 /* TLB entryLo0 */
#define COP0_ENTRYLO1 $3,0 /* TLB entryLo1 */
#define COP0_CONTEXT $4,0 /* Context */
#define COP0_PAGEMASK $5,0 /* TLB pagemask */
#define COP0_PAGEGRAIN $5,1 /* TLB config for max page sizes */
#define COP0_WIRED $6,0 /* TLB number of wired entries */
#define COP0_HWRENA $7,0 /* rdhw instruction enable per register */
#define COP0_BADVADDR $8,0 /* Bad virtual address */
#define COP0_COUNT $9,0 /* Mips count register */
#define COP0_CVMCOUNT $9,6 /* Cavium count register */
#define COP0_CVMCTL $9,7 /* Cavium control */
#define COP0_ENTRYHI $10,0 /* TLB entryHi */
#define COP0_COMPARE $11,0 /* Mips compare register */
#define COP0_POWTHROTTLE $11,6 /* Power throttle register */
#define COP0_CVMMEMCTL $11,7 /* Cavium memory control */
#define COP0_STATUS $12,0 /* Mips status register */
#define COP0_INTCTL $12,1 /* Useless (Vectored interrupts) */
#define COP0_SRSCTL $12,2 /* Useless (Shadow registers) */
#define COP0_CAUSE $13,0 /* Mips cause register */
#define COP0_EPC $14,0 /* Exception program counter */
#define COP0_PRID $15,0 /* Processor ID */
#define COP0_EBASE $15,1 /* Exception base */
#define COP0_CONFIG $16,0 /* Misc config options */
#define COP0_CONFIG1 $16,1 /* Misc config options */
#define COP0_CONFIG2 $16,2 /* Misc config options */
#define COP0_CONFIG3 $16,3 /* Misc config options */
#define COP0_WATCHLO0 $18,0 /* Address watch registers */
#define COP0_WATCHLO1 $18,1 /* Address watch registers */
#define COP0_WATCHHI0 $19,0 /* Address watch registers */
#define COP0_WATCHHI1 $19,1 /* Address watch registers */
#define COP0_XCONTEXT $20,0 /* OS context */
#define COP0_MULTICOREDEBUG $22,0 /* Cavium debug */
#define COP0_DEBUG $23,0 /* Debug status */
#define COP0_DEPC $24,0 /* Debug PC */
#define COP0_PERFCONTROL0 $25,0 /* Performance counter control */
#define COP0_PERFCONTROL1 $25,2 /* Performance counter control */
#define COP0_PERFVALUE0 $25,1 /* Performance counter */
#define COP0_PERFVALUE1 $25,3 /* Performance counter */
#define COP0_CACHEERRI $27,0 /* I cache error status */
#define COP0_CACHEERRD $27,1 /* D cache error status */
#define COP0_TAGLOI $28,0 /* I cache tagLo */
#define COP0_TAGLOD $28,2 /* D cache tagLo */
#define COP0_DATALOI $28,1 /* I cache dataLo */
#define COP0_DATALOD $28,3 /* D cahce dataLo */
#define COP0_TAGHI $29,2 /* ? */
#define COP0_DATAHII $29,1 /* ? */
#define COP0_DATAHID $29,3 /* ? */
#define COP0_ERROREPC $30,0 /* Error PC */
#define COP0_DESAVE $31,0 /* Debug scratch area */
/* This header file can be included from a .S file. Keep non-preprocessor
things under !__ASSEMBLER__. */
#ifndef __ASSEMBLER__
#ifdef __cplusplus
extern "C" {
#endif
/* turn the variable name into a string */
#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
#define CVMX_TMP_STR2(x) #x
/* Since sync is required for Octeon2. */
#ifdef _MIPS_ARCH_OCTEON2
#define CVMX_CAVIUM_OCTEON2 1
#endif
/* other useful stuff */
#define CVMX_BREAK asm volatile ("break")
#define CVMX_SYNC asm volatile ("sync" : : :"memory")
/* String version of SYNCW macro for using in inline asm constructs */
#define CVMX_SYNCW_STR_OCTEON2 "syncw\n"
#ifdef CVMX_CAVIUM_OCTEON2
#define CVMX_SYNCW_STR CVMX_SYNCW_STR_OCTEON2
#else
#define CVMX_SYNCW_STR "syncw\nsyncw\n"
#endif /* CVMX_CAVIUM_OCTEON2 */
#ifdef __OCTEON__
#define CVMX_SYNCIOBDMA asm volatile ("synciobdma" : : :"memory")
/* We actually use two syncw instructions in a row when we need a write
memory barrier. This is because the CN3XXX series of Octeons have
errata Core-401. This can cause a single syncw to not enforce
ordering under very rare conditions. Even if it is rare, better safe
than sorry */
#define CVMX_SYNCW_OCTEON2 asm volatile ("syncw\n" : : :"memory")
#ifdef CVMX_CAVIUM_OCTEON2
#define CVMX_SYNCW CVMX_SYNCW_OCTEON2
#else
#define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : :"memory")
#endif /* CVMX_CAVIUM_OCTEON2 */
#if defined(VXWORKS) || defined(__linux__)
/* Define new sync instructions to be normal SYNC instructions for
operating systems that use threads */
#define CVMX_SYNCWS CVMX_SYNCW
#define CVMX_SYNCS CVMX_SYNC
#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
#define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW_OCTEON2
#define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR_OCTEON2
#else
#if defined(CVMX_BUILD_FOR_TOOLCHAIN)
/* While building simple exec toolchain, always use syncw to
support all Octeon models. */
#define CVMX_SYNCWS CVMX_SYNCW
#define CVMX_SYNCS CVMX_SYNC
#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
#define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW_OCTEON2
#define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR_OCTEON2
#else
/* Again, just like syncw, we may need two syncws instructions in a row due
errata Core-401. Only one syncws is required for Octeon2 models */
#define CVMX_SYNCS asm volatile ("syncs" : : :"memory")
#define CVMX_SYNCWS_OCTEON2 asm volatile ("syncws\n" : : :"memory")
#define CVMX_SYNCWS_STR_OCTEON2 "syncws\n"
#ifdef CVMX_CAVIUM_OCTEON2
#define CVMX_SYNCWS CVMX_SYNCWS_OCTEON2
#define CVMX_SYNCWS_STR CVMX_SYNCWS_STR_OCTEON2
#else
#define CVMX_SYNCWS asm volatile ("syncws\nsyncws\n" : : :"memory")
#define CVMX_SYNCWS_STR "syncws\nsyncws\n"
#endif /* CVMX_CAVIUM_OCTEON2 */
#endif
#endif
#else /* !__OCTEON__ */
/* Not using a Cavium compiler, always use the slower sync so the assembler stays happy */
#define CVMX_SYNCIOBDMA asm volatile ("sync" : : :"memory")
#define CVMX_SYNCW asm volatile ("sync" : : :"memory")
#define CVMX_SYNCWS CVMX_SYNCW
#define CVMX_SYNCS CVMX_SYNC
#define CVMX_SYNCWS_STR CVMX_SYNCW_STR
#define CVMX_SYNCWS_OCTEON2 CVMX_SYNCW
#define CVMX_SYNCWS_STR_OCTEON2 CVMX_SYNCW_STR
#endif
#define CVMX_SYNCI(address, offset) asm volatile ("synci " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
#define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
#define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
// a normal prefetch
#define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
// normal prefetches that use the pref instruction
#define CVMX_PREFETCH_PREFX(X, address, offset) asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
#define CVMX_PREFETCH_PREF0(address, offset) CVMX_PREFETCH_PREFX(0, address, offset)
#define CVMX_PREFETCH_PREF1(address, offset) CVMX_PREFETCH_PREFX(1, address, offset)
#define CVMX_PREFETCH_PREF6(address, offset) CVMX_PREFETCH_PREFX(6, address, offset)
#define CVMX_PREFETCH_PREF7(address, offset) CVMX_PREFETCH_PREFX(7, address, offset)
// prefetch into L1, do not put the block in the L2
#define CVMX_PREFETCH_NOTL2(address, offset) CVMX_PREFETCH_PREFX(4, address, offset)
#define CVMX_PREFETCH_NOTL22(address, offset) CVMX_PREFETCH_PREFX(5, address, offset)
// prefetch into L2, do not put the block in the L1
#define CVMX_PREFETCH_L2(address, offset) CVMX_PREFETCH_PREFX(28, address, offset)
// CVMX_PREPARE_FOR_STORE makes each byte of the block unpredictable (actually old value or zero) until
// that byte is stored to (by this or another processor. Note that the value of each byte is not only
// unpredictable, but may also change again - up until the point when one of the cores stores to the
// byte.
#define CVMX_PREPARE_FOR_STORE(address, offset) CVMX_PREFETCH_PREFX(30, address, offset)
// This is a command headed to the L2 controller to tell it to clear its dirty bit for a
// block. Basically, SW is telling HW that the current version of the block will not be
// used.
#define CVMX_DONT_WRITE_BACK(address, offset) CVMX_PREFETCH_PREFX(29, address, offset)
#define CVMX_ICACHE_INVALIDATE { CVMX_SYNC; asm volatile ("synci 0($0)" : : ); } // flush stores, invalidate entire icache
#define CVMX_ICACHE_INVALIDATE2 { CVMX_SYNC; asm volatile ("cache 0, 0($0)" : : ); } // flush stores, invalidate entire icache
#define CVMX_DCACHE_INVALIDATE { CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); } // complete prefetches, invalidate entire dcache
#define CVMX_CACHE(op, address, offset) asm volatile ("cache " CVMX_TMP_STR(op) ", " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address) )
#define CVMX_CACHE_LCKL2(address, offset) CVMX_CACHE(31, address, offset) // fetch and lock the state.
#define CVMX_CACHE_WBIL2(address, offset) CVMX_CACHE(23, address, offset) // unlock the state.
#define CVMX_CACHE_WBIL2I(address, offset) CVMX_CACHE(3, address, offset) // invalidate the cache block and clear the USED bits for the block
#define CVMX_CACHE_LTGL2I(address, offset) CVMX_CACHE(7, address, offset) // load virtual tag and data for the L2 cache block into L2C_TAD0_TAG register
/* new instruction to make RC4 run faster */
#define CVMX_BADDU(result, input1, input2) asm ("baddu %[rd],%[rs],%[rt]" : [rd] "=d" (result) : [rs] "d" (input1) , [rt] "d" (input2))
// misc v2 stuff
#define CVMX_ROTR(result, input1, shiftconst) asm ("rotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
#define CVMX_ROTRV(result, input1, input2) asm ("rotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
#define CVMX_DROTR(result, input1, shiftconst) asm ("drotr %[rd],%[rs]," CVMX_TMP_STR(shiftconst) : [rd] "=d" (result) : [rs] "d" (input1))
#define CVMX_DROTRV(result, input1, input2) asm ("drotrv %[rd],%[rt],%[rs]" : [rd] "=d" (result) : [rt] "d" (input1) , [rs] "d" (input2))
#define CVMX_SEB(result, input1) asm ("seb %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
#define CVMX_SEH(result, input1) asm ("seh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
#define CVMX_DSBH(result, input1) asm ("dsbh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
#define CVMX_DSHD(result, input1) asm ("dshd %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
#define CVMX_WSBH(result, input1) asm ("wsbh %[rd],%[rt]" : [rd] "=d" (result) : [rt] "d" (input1))
// Endian swap
#define CVMX_ES64(result, input) \
do {\
CVMX_DSBH(result, input); \
CVMX_DSHD(result, result); \
} while (0)
#define CVMX_ES32(result, input) \
do {\
CVMX_WSBH(result, input); \
CVMX_ROTR(result, result, 16); \
} while (0)
/* extract and insert - NOTE that pos and len variables must be constants! */
/* the P variants take len rather than lenm1 */
/* the M1 variants take lenm1 rather than len */
#define CVMX_EXTS(result,input,pos,lenm1) asm ("exts %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
#define CVMX_EXTSP(result,input,pos,len) CVMX_EXTS(result,input,pos,(len)-1)
#define CVMX_DEXT(result,input,pos,len) asm ("dext %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len) : [rt] "=d" (result) : [rs] "d" (input))
#define CVMX_DEXTM1(result,input,pos,lenm1) CVMX_DEXT(result,input,pos,(lenm1)+1)
#define CVMX_EXT(result,input,pos,len) asm ("ext %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len) : [rt] "=d" (result) : [rs] "d" (input))
#define CVMX_EXTM1(result,input,pos,lenm1) CVMX_EXT(result,input,pos,(lenm1)+1)
// removed
// #define CVMX_EXTU(result,input,pos,lenm1) asm ("extu %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
// #define CVMX_EXTUP(result,input,pos,len) CVMX_EXTU(result,input,pos,(len)-1)
#define CVMX_CINS(result,input,pos,lenm1) asm ("cins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : [rs] "d" (input))
#define CVMX_CINSP(result,input,pos,len) CVMX_CINS(result,input,pos,(len)-1)
#define CVMX_DINS(result,input,pos,len) asm ("dins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
#define CVMX_DINSM1(result,input,pos,lenm1) CVMX_DINS(result,input,pos,(lenm1)+1)
#define CVMX_DINSC(result,pos,len) asm ("dins %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): "[rt]" (result))
#define CVMX_DINSCM1(result,pos,lenm1) CVMX_DINSC(result,pos,(lenm1)+1)
#define CVMX_INS(result,input,pos,len) asm ("ins %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
#define CVMX_INSM1(result,input,pos,lenm1) CVMX_INS(result,input,pos,(lenm1)+1)
#define CVMX_INSC(result,pos,len) asm ("ins %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(len): [rt] "=d" (result): "[rt]" (result))
#define CVMX_INSCM1(result,pos,lenm1) CVMX_INSC(result,pos,(lenm1)+1)
// removed
// #define CVMX_INS0(result,input,pos,lenm1) asm("ins0 %[rt],%[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1): [rt] "=d" (result): [rs] "d" (input), "[rt]" (result))
// #define CVMX_INS0P(result,input,pos,len) CVMX_INS0(result,input,pos,(len)-1)
// #define CVMX_INS0C(result,pos,lenm1) asm ("ins0 %[rt],$0," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(lenm1) : [rt] "=d" (result) : "[rt]" (result))
// #define CVMX_INS0CP(result,pos,len) CVMX_INS0C(result,pos,(len)-1)
#define CVMX_CLZ(result, input) asm ("clz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
#define CVMX_DCLZ(result, input) asm ("dclz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
#define CVMX_CLO(result, input) asm ("clo %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
#define CVMX_DCLO(result, input) asm ("dclo %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
#define CVMX_POP(result, input) asm ("pop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
#define CVMX_DPOP(result, input) asm ("dpop %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
#ifdef CVMX_ABI_O32
/* rdhwr $31 is the 64 bit cmvcount register, it needs to be split
into one or two (depending on the width of the result) properly
sign extended registers. All other registers are 32 bits wide
and already properly sign extended. */
# define CVMX_RDHWRX(result, regstr, ASM_STMT) ({ \
if (regstr == 31) { \
if (sizeof(result) == 8) { \
ASM_STMT (".set\tpush\n" \
"\t.set\tmips64r2\n" \
"\trdhwr\t%L0,$31\n" \
"\tdsra\t%M0,%L0,32\n" \
"\tsll\t%L0,%L0,0\n" \
"\t.set\tpop": "=d"(result)); \
} else { \
unsigned long _v; \
ASM_STMT ("rdhwr\t%0,$31\n" \
"\tsll\t%0,%0,0" : "=d"(_v)); \
result = (__typeof(result))_v; \
} \
} else { \
unsigned long _v; \
ASM_STMT ("rdhwr\t%0,$" CVMX_TMP_STR(regstr) : "=d"(_v)); \
result = (__typeof(result))_v; \
}})
# define CVMX_RDHWR(result, regstr) CVMX_RDHWRX(result, regstr, asm volatile)
# define CVMX_RDHWRNV(result, regstr) CVMX_RDHWRX(result, regstr, asm)
#else
# define CVMX_RDHWR(result, regstr) asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
# define CVMX_RDHWRNV(result, regstr) asm ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
#endif
// some new cop0-like stuff
#define CVMX_DI(result) asm volatile ("di %[rt]" : [rt] "=d" (result))
#define CVMX_DI_NULL asm volatile ("di")
#define CVMX_EI(result) asm volatile ("ei %[rt]" : [rt] "=d" (result))
#define CVMX_EI_NULL asm volatile ("ei")
#define CVMX_EHB asm volatile ("ehb")
/* mul stuff */
#define CVMX_MTM0(m) asm volatile ("mtm0 %[rs]" : : [rs] "d" (m))
#define CVMX_MTM1(m) asm volatile ("mtm1 %[rs]" : : [rs] "d" (m))
#define CVMX_MTM2(m) asm volatile ("mtm2 %[rs]" : : [rs] "d" (m))
#define CVMX_MTP0(p) asm volatile ("mtp0 %[rs]" : : [rs] "d" (p))
#define CVMX_MTP1(p) asm volatile ("mtp1 %[rs]" : : [rs] "d" (p))
#define CVMX_MTP2(p) asm volatile ("mtp2 %[rs]" : : [rs] "d" (p))
#define CVMX_VMULU(dest,mpcand,accum) asm volatile ("vmulu %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
#define CVMX_VMM0(dest,mpcand,accum) asm volatile ("vmm0 %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
#define CVMX_V3MULU(dest,mpcand,accum) asm volatile ("v3mulu %[rd],%[rs],%[rt]" : [rd] "=d" (dest) : [rs] "d" (mpcand), [rt] "d" (accum))
/* branch stuff */
// these are hard to make work because the compiler does not realize that the
// instruction is a branch so may optimize away the label
// the labels to these next two macros must not include a ":" at the end
#define CVMX_BBIT1(var, pos, label) asm volatile ("bbit1 %[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(label) : : [rs] "d" (var))
#define CVMX_BBIT0(var, pos, label) asm volatile ("bbit0 %[rs]," CVMX_TMP_STR(pos) "," CVMX_TMP_STR(label) : : [rs] "d" (var))
// the label to this macro must include a ":" at the end
#define CVMX_ASM_LABEL(label) label \
asm volatile (CVMX_TMP_STR(label) : : )
//
// Low-latency memory stuff
//
// set can be 0-1
#define CVMX_MT_LLM_READ_ADDR(set,val) asm volatile ("dmtc2 %[rt],0x0400+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
#define CVMX_MT_LLM_WRITE_ADDR_INTERNAL(set,val) asm volatile ("dmtc2 %[rt],0x0401+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
#define CVMX_MT_LLM_READ64_ADDR(set,val) asm volatile ("dmtc2 %[rt],0x0404+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
#define CVMX_MT_LLM_WRITE64_ADDR_INTERNAL(set,val) asm volatile ("dmtc2 %[rt],0x0405+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
#define CVMX_MT_LLM_DATA(set,val) asm volatile ("dmtc2 %[rt],0x0402+(8*(" CVMX_TMP_STR(set) "))" : : [rt] "d" (val))
#define CVMX_MF_LLM_DATA(set,val) asm volatile ("dmfc2 %[rt],0x0402+(8*(" CVMX_TMP_STR(set) "))" : [rt] "=d" (val) : )
// load linked, store conditional
#define CVMX_LL(dest, address, offset) asm volatile ("ll %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (dest) : [rbase] "d" (address) )
#define CVMX_LLD(dest, address, offset) asm volatile ("lld %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (dest) : [rbase] "d" (address) )
#define CVMX_SC(srcdest, address, offset) asm volatile ("sc %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
#define CVMX_SCD(srcdest, address, offset) asm volatile ("scd %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
// load/store word left/right
#define CVMX_LWR(srcdest, address, offset) asm volatile ("lwr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
#define CVMX_LWL(srcdest, address, offset) asm volatile ("lwl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
#define CVMX_LDR(srcdest, address, offset) asm volatile ("ldr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
#define CVMX_LDL(srcdest, address, offset) asm volatile ("ldl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : [rt] "=d" (srcdest) : [rbase] "d" (address), "[rt]" (srcdest) )
#define CVMX_SWR(src, address, offset) asm volatile ("swr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
#define CVMX_SWL(src, address, offset) asm volatile ("swl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
#define CVMX_SDR(src, address, offset) asm volatile ("sdr %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
#define CVMX_SDL(src, address, offset) asm volatile ("sdl %[rt], " CVMX_TMP_STR(offset) "(%[rbase])" : : [rbase] "d" (address), [rt] "d" (src) )
//
// Useful crypto ASM's
//
// CRC
#define CVMX_MT_CRC_POLYNOMIAL(val) asm volatile ("dmtc2 %[rt],0x4200" : : [rt] "d" (val))
#define CVMX_MT_CRC_IV(val) asm volatile ("dmtc2 %[rt],0x0201" : : [rt] "d" (val))
#define CVMX_MT_CRC_LEN(val) asm volatile ("dmtc2 %[rt],0x1202" : : [rt] "d" (val))
#define CVMX_MT_CRC_BYTE(val) asm volatile ("dmtc2 %[rt],0x0204" : : [rt] "d" (val))
#define CVMX_MT_CRC_HALF(val) asm volatile ("dmtc2 %[rt],0x0205" : : [rt] "d" (val))
#define CVMX_MT_CRC_WORD(val) asm volatile ("dmtc2 %[rt],0x0206" : : [rt] "d" (val))
#define CVMX_MT_CRC_DWORD(val) asm volatile ("dmtc2 %[rt],0x1207" : : [rt] "d" (val))
#define CVMX_MT_CRC_VAR(val) asm volatile ("dmtc2 %[rt],0x1208" : : [rt] "d" (val))
#define CVMX_MT_CRC_POLYNOMIAL_REFLECT(val) asm volatile ("dmtc2 %[rt],0x4210" : : [rt] "d" (val))
#define CVMX_MT_CRC_IV_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0211" : : [rt] "d" (val))
#define CVMX_MT_CRC_BYTE_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0214" : : [rt] "d" (val))
#define CVMX_MT_CRC_HALF_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0215" : : [rt] "d" (val))
#define CVMX_MT_CRC_WORD_REFLECT(val) asm volatile ("dmtc2 %[rt],0x0216" : : [rt] "d" (val))
#define CVMX_MT_CRC_DWORD_REFLECT(val) asm volatile ("dmtc2 %[rt],0x1217" : : [rt] "d" (val))
#define CVMX_MT_CRC_VAR_REFLECT(val) asm volatile ("dmtc2 %[rt],0x1218" : : [rt] "d" (val))
#define CVMX_MF_CRC_POLYNOMIAL(val) asm volatile ("dmfc2 %[rt],0x0200" : [rt] "=d" (val) : )
#define CVMX_MF_CRC_IV(val) asm volatile ("dmfc2 %[rt],0x0201" : [rt] "=d" (val) : )
#define CVMX_MF_CRC_IV_REFLECT(val) asm volatile ("dmfc2 %[rt],0x0203" : [rt] "=d" (val) : )
#define CVMX_MF_CRC_LEN(val) asm volatile ("dmfc2 %[rt],0x0202" : [rt] "=d" (val) : )
// MD5 and SHA-1
// pos can be 0-6
#define CVMX_MT_HSH_DAT(val,pos) asm volatile ("dmtc2 %[rt],0x0040+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
#define CVMX_MT_HSH_DATZ(pos) asm volatile ("dmtc2 $0,0x0040+" CVMX_TMP_STR(pos) : : )
// pos can be 0-14
#define CVMX_MT_HSH_DATW(val,pos) asm volatile ("dmtc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
#define CVMX_MT_HSH_DATWZ(pos) asm volatile ("dmtc2 $0,0x0240+" CVMX_TMP_STR(pos) : : )
#define CVMX_MT_HSH_STARTMD5(val) asm volatile ("dmtc2 %[rt],0x4047" : : [rt] "d" (val))
#define CVMX_MT_HSH_STARTSHA(val) asm volatile ("dmtc2 %[rt],0x4057" : : [rt] "d" (val))
#define CVMX_MT_HSH_STARTSHA256(val) asm volatile ("dmtc2 %[rt],0x404f" : : [rt] "d" (val))
#define CVMX_MT_HSH_STARTSHA512(val) asm volatile ("dmtc2 %[rt],0x424f" : : [rt] "d" (val))
// pos can be 0-3
#define CVMX_MT_HSH_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0048+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
// pos can be 0-7
#define CVMX_MT_HSH_IVW(val,pos) asm volatile ("dmtc2 %[rt],0x0250+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
// pos can be 0-6
#define CVMX_MF_HSH_DAT(val,pos) asm volatile ("dmfc2 %[rt],0x0040+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-14
#define CVMX_MF_HSH_DATW(val,pos) asm volatile ("dmfc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-3
#define CVMX_MF_HSH_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0048+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-7
#define CVMX_MF_HSH_IVW(val,pos) asm volatile ("dmfc2 %[rt],0x0250+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// 3DES
// pos can be 0-2
#define CVMX_MT_3DES_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0080+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
#define CVMX_MT_3DES_IV(val) asm volatile ("dmtc2 %[rt],0x0084" : : [rt] "d" (val))
#define CVMX_MT_3DES_ENC_CBC(val) asm volatile ("dmtc2 %[rt],0x4088" : : [rt] "d" (val))
#define CVMX_MT_3DES_ENC(val) asm volatile ("dmtc2 %[rt],0x408a" : : [rt] "d" (val))
#define CVMX_MT_3DES_DEC_CBC(val) asm volatile ("dmtc2 %[rt],0x408c" : : [rt] "d" (val))
#define CVMX_MT_3DES_DEC(val) asm volatile ("dmtc2 %[rt],0x408e" : : [rt] "d" (val))
#define CVMX_MT_3DES_RESULT(val) asm volatile ("dmtc2 %[rt],0x0098" : : [rt] "d" (val))
// pos can be 0-2
#define CVMX_MF_3DES_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0080+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
#define CVMX_MF_3DES_IV(val) asm volatile ("dmfc2 %[rt],0x0084" : [rt] "=d" (val) : )
#define CVMX_MF_3DES_RESULT(val) asm volatile ("dmfc2 %[rt],0x0088" : [rt] "=d" (val) : )
// KASUMI
// pos can be 0-1
#define CVMX_MT_KAS_KEY(val,pos) CVMX_MT_3DES_KEY(val,pos)
#define CVMX_MT_KAS_ENC_CBC(val) asm volatile ("dmtc2 %[rt],0x4089" : : [rt] "d" (val))
#define CVMX_MT_KAS_ENC(val) asm volatile ("dmtc2 %[rt],0x408b" : : [rt] "d" (val))
#define CVMX_MT_KAS_RESULT(val) CVMX_MT_3DES_RESULT(val)
// pos can be 0-1
#define CVMX_MF_KAS_KEY(val,pos) CVMX_MF_3DES_KEY(val,pos)
#define CVMX_MF_KAS_RESULT(val) CVMX_MF_3DES_RESULT(val)
// AES
#define CVMX_MT_AES_ENC_CBC0(val) asm volatile ("dmtc2 %[rt],0x0108" : : [rt] "d" (val))
#define CVMX_MT_AES_ENC_CBC1(val) asm volatile ("dmtc2 %[rt],0x3109" : : [rt] "d" (val))
#define CVMX_MT_AES_ENC0(val) asm volatile ("dmtc2 %[rt],0x010a" : : [rt] "d" (val))
#define CVMX_MT_AES_ENC1(val) asm volatile ("dmtc2 %[rt],0x310b" : : [rt] "d" (val))
#define CVMX_MT_AES_DEC_CBC0(val) asm volatile ("dmtc2 %[rt],0x010c" : : [rt] "d" (val))
#define CVMX_MT_AES_DEC_CBC1(val) asm volatile ("dmtc2 %[rt],0x310d" : : [rt] "d" (val))
#define CVMX_MT_AES_DEC0(val) asm volatile ("dmtc2 %[rt],0x010e" : : [rt] "d" (val))
#define CVMX_MT_AES_DEC1(val) asm volatile ("dmtc2 %[rt],0x310f" : : [rt] "d" (val))
// pos can be 0-3
#define CVMX_MT_AES_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
// pos can be 0-1
#define CVMX_MT_AES_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0102+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
#define CVMX_MT_AES_KEYLENGTH(val) asm volatile ("dmtc2 %[rt],0x0110" : : [rt] "d" (val)) // write the keylen
// pos can be 0-1
#define CVMX_MT_AES_RESULT(val,pos) asm volatile ("dmtc2 %[rt],0x0100+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
// pos can be 0-1
#define CVMX_MF_AES_RESULT(val,pos) asm volatile ("dmfc2 %[rt],0x0100+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-1
#define CVMX_MF_AES_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0102+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-3
#define CVMX_MF_AES_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
#define CVMX_MF_AES_KEYLENGTH(val) asm volatile ("dmfc2 %[rt],0x0110" : [rt] "=d" (val) : ) // read the keylen
#define CVMX_MF_AES_DAT0(val) asm volatile ("dmfc2 %[rt],0x0111" : [rt] "=d" (val) : ) // first piece of input data
// GFM
// pos can be 0-1
#define CVMX_MF_GFM_MUL(val,pos) asm volatile ("dmfc2 %[rt],0x0258+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
#define CVMX_MF_GFM_POLY(val) asm volatile ("dmfc2 %[rt],0x025e" : [rt] "=d" (val) : )
// pos can be 0-1
#define CVMX_MF_GFM_RESINP(val,pos) asm volatile ("dmfc2 %[rt],0x025a+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-1
#define CVMX_MF_GFM_RESINP_REFLECT(val,pos) asm volatile ("dmfc2 %[rt],0x005a+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-1
#define CVMX_MT_GFM_MUL(val,pos) asm volatile ("dmtc2 %[rt],0x0258+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
#define CVMX_MT_GFM_POLY(val) asm volatile ("dmtc2 %[rt],0x025e" : : [rt] "d" (val))
// pos can be 0-1
#define CVMX_MT_GFM_RESINP(val,pos) asm volatile ("dmtc2 %[rt],0x025a+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
#define CVMX_MT_GFM_XOR0(val) asm volatile ("dmtc2 %[rt],0x025c" : : [rt] "d" (val))
#define CVMX_MT_GFM_XORMUL1(val) asm volatile ("dmtc2 %[rt],0x425d" : : [rt] "d" (val))
// pos can be 0-1
#define CVMX_MT_GFM_MUL_REFLECT(val,pos) asm volatile ("dmtc2 %[rt],0x0058+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
#define CVMX_MT_GFM_XOR0_REFLECT(val) asm volatile ("dmtc2 %[rt],0x005c" : : [rt] "d" (val))
#define CVMX_MT_GFM_XORMUL1_REFLECT(val) asm volatile ("dmtc2 %[rt],0x405d" : : [rt] "d" (val))
// SNOW 3G
// pos can be 0-7
#define CVMX_MF_SNOW3G_LFSR(val,pos) asm volatile ("dmfc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-2
#define CVMX_MF_SNOW3G_FSM(val,pos) asm volatile ("dmfc2 %[rt],0x0251+" CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
#define CVMX_MF_SNOW3G_RESULT(val) asm volatile ("dmfc2 %[rt],0x0250" : [rt] "=d" (val) : )
// pos can be 0-7
#define CVMX_MT_SNOW3G_LFSR(val,pos) asm volatile ("dmtc2 %[rt],0x0240+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
// pos can be 0-2
#define CVMX_MT_SNOW3G_FSM(val,pos) asm volatile ("dmtc2 %[rt],0x0251+" CVMX_TMP_STR(pos) : : [rt] "d" (val))
#define CVMX_MT_SNOW3G_RESULT(val) asm volatile ("dmtc2 %[rt],0x0250" : : [rt] "d" (val))
#define CVMX_MT_SNOW3G_START(val) asm volatile ("dmtc2 %[rt],0x404d" : : [rt] "d" (val))
#define CVMX_MT_SNOW3G_MORE(val) asm volatile ("dmtc2 %[rt],0x404e" : : [rt] "d" (val))
// SMS4
// pos can be 0-1
#define CVMX_MF_SMS4_IV(val,pos) asm volatile ("dmfc2 %[rt],0x0102+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-1
#define CVMX_MF_SMS4_KEY(val,pos) asm volatile ("dmfc2 %[rt],0x0104+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
// pos can be 0-1
#define CVMX_MF_SMS4_RESINP(val,pos) asm volatile ("dmfc2 %[rt],0x0100+"CVMX_TMP_STR(pos) : [rt] "=d" (val) : )
#define CVMX_MT_SMS4_DEC_CBC0(val) asm volatile ("dmtc2 %[rt],0x010c" : : [rt] "d" (val))
#define CVMX_MT_SMS4_DEC_CBC1(val) asm volatile ("dmtc2 %[rt],0x311d" : : [rt] "d" (val))
#define CVMX_MT_SMS4_DEC0(val) asm volatile ("dmtc2 %[rt],0x010e" : : [rt] "d" (val))
#define CVMX_MT_SMS4_DEC1(val) asm volatile ("dmtc2 %[rt],0x311f" : : [rt] "d" (val))
#define CVMX_MT_SMS4_ENC_CBC0(val) asm volatile ("dmtc2 %[rt],0x0108" : : [rt] "d" (val))
#define CVMX_MT_SMS4_ENC_CBC1(val) asm volatile ("dmtc2 %[rt],0x3119" : : [rt] "d" (val))
#define CVMX_MT_SMS4_ENC0(val) asm volatile ("dmtc2 %[rt],0x010a" : : [rt] "d" (val))
#define CVMX_MT_SMS4_ENC1(val) asm volatile ("dmtc2 %[rt],0x311b" : : [rt] "d" (val))
// pos can be 0-1
#define CVMX_MT_SMS4_IV(val,pos) asm volatile ("dmtc2 %[rt],0x0102+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
// pos can be 0-1
#define CVMX_MT_SMS4_KEY(val,pos) asm volatile ("dmtc2 %[rt],0x0104+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
// pos can be 0-1
#define CVMX_MT_SMS4_RESINP(val,pos) asm volatile ("dmtc2 %[rt],0x0100+"CVMX_TMP_STR(pos) : : [rt] "d" (val))
/* check_ordering stuff */
#if 0
#define CVMX_MF_CHORD(dest) asm volatile ("dmfc2 %[rt],0x400" : [rt] "=d" (dest) : )
#else
#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
#endif
#if 0
#define CVMX_MF_CYCLE(dest) asm volatile ("dmfc0 %[rt],$9,6" : [rt] "=d" (dest) : ) // Use (64-bit) CvmCount register rather than Count
#else
#define CVMX_MF_CYCLE(dest) CVMX_RDHWR(dest, 31) /* reads the current (64-bit) CvmCount value */
#endif
#define CVMX_MT_CYCLE(src) asm volatile ("dmtc0 %[rt],$9,6" :: [rt] "d" (src))
#define VASTR(...) #__VA_ARGS__
#define CVMX_MF_COP0(val, cop0) asm volatile ("dmfc0 %[rt]," VASTR(cop0) : [rt] "=d" (val));
#define CVMX_MT_COP0(val, cop0) asm volatile ("dmtc0 %[rt]," VASTR(cop0) : : [rt] "d" (val));
#define CVMX_MF_CACHE_ERR(val) CVMX_MF_COP0(val, COP0_CACHEERRI)
#define CVMX_MF_DCACHE_ERR(val) CVMX_MF_COP0(val, COP0_CACHEERRD)
#define CVMX_MF_CVM_MEM_CTL(val) CVMX_MF_COP0(val, COP0_CVMMEMCTL)
#define CVMX_MF_CVM_CTL(val) CVMX_MF_COP0(val, COP0_CVMCTL)
#define CVMX_MT_CACHE_ERR(val) CVMX_MT_COP0(val, COP0_CACHEERRI)
#define CVMX_MT_DCACHE_ERR(val) CVMX_MT_COP0(val, COP0_CACHEERRD)
#define CVMX_MT_CVM_MEM_CTL(val) CVMX_MT_COP0(val, COP0_CVMMEMCTL)
#define CVMX_MT_CVM_CTL(val) CVMX_MT_COP0(val, COP0_CVMCTL)
/* Macros for TLB */
#define CVMX_TLBWI asm volatile ("tlbwi" : : )
#define CVMX_TLBWR asm volatile ("tlbwr" : : )
#define CVMX_TLBR asm volatile ("tlbr" : : )
#define CVMX_TLBP asm volatile ("tlbp" : : )
#define CVMX_MT_ENTRY_HIGH(val) asm volatile ("dmtc0 %[rt],$10,0" : : [rt] "d" (val))
#define CVMX_MT_ENTRY_LO_0(val) asm volatile ("dmtc0 %[rt],$2,0" : : [rt] "d" (val))
#define CVMX_MT_ENTRY_LO_1(val) asm volatile ("dmtc0 %[rt],$3,0" : : [rt] "d" (val))
#define CVMX_MT_PAGEMASK(val) asm volatile ("mtc0 %[rt],$5,0" : : [rt] "d" (val))
#define CVMX_MT_PAGEGRAIN(val) asm volatile ("mtc0 %[rt],$5,1" : : [rt] "d" (val))
#define CVMX_MT_TLB_INDEX(val) asm volatile ("mtc0 %[rt],$0,0" : : [rt] "d" (val))
#define CVMX_MT_TLB_CONTEXT(val) asm volatile ("dmtc0 %[rt],$4,0" : : [rt] "d" (val))
#define CVMX_MT_TLB_WIRED(val) asm volatile ("mtc0 %[rt],$6,0" : : [rt] "d" (val))
#define CVMX_MT_TLB_RANDOM(val) asm volatile ("mtc0 %[rt],$1,0" : : [rt] "d" (val))
#define CVMX_MF_ENTRY_LO_0(val) asm volatile ("dmfc0 %[rt],$2,0" : [rt] "=d" (val):)
#define CVMX_MF_ENTRY_LO_1(val) asm volatile ("dmfc0 %[rt],$3,0" : [rt] "=d" (val):)
#define CVMX_MF_ENTRY_HIGH(val) asm volatile ("dmfc0 %[rt],$10,0" : [rt] "=d" (val):)
#define CVMX_MF_PAGEMASK(val) asm volatile ("mfc0 %[rt],$5,0" : [rt] "=d" (val):)
#define CVMX_MF_PAGEGRAIN(val) asm volatile ("mfc0 %[rt],$5,1" : [rt] "=d" (val):)
#define CVMX_MF_TLB_WIRED(val) asm volatile ("mfc0 %[rt],$6,0" : [rt] "=d" (val):)
#define CVMX_MF_TLB_INDEX(val) asm volatile ("mfc0 %[rt],$0,0" : [rt] "=d" (val):)
#define CVMX_MF_TLB_RANDOM(val) asm volatile ("mfc0 %[rt],$1,0" : [rt] "=d" (val):)
#define TLB_DIRTY (0x1ULL<<2)
#define TLB_VALID (0x1ULL<<1)
#define TLB_GLOBAL (0x1ULL<<0)
#if !defined(__FreeBSD__) || !defined(_KERNEL)
/* Macros to PUSH and POP Octeon2 ISA. */
#define CVMX_PUSH_OCTEON2 asm volatile (".set push\n.set arch=octeon2")
#define CVMX_POP_OCTEON2 asm volatile (".set pop")
#endif
/* assembler macros to guarantee byte loads/stores are used */
/* for an unaligned 16-bit access (these use AT register) */
/* we need the hidden argument (__a) so that GCC gets the dependencies right */
#define CVMX_LOADUNA_INT16(result, address, offset) \
{ char *__a = (char *)(address); \
asm ("ulh %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset]), "m"(__a[offset + 1])); }
#define CVMX_LOADUNA_UINT16(result, address, offset) \
{ char *__a = (char *)(address); \
asm ("ulhu %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : [rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1])); }
#define CVMX_STOREUNA_INT16(data, address, offset) \
{ char *__a = (char *)(address); \
asm ("ush %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : "=m"(__a[offset + 0]), "=m"(__a[offset + 1]): [rsrc] "d" (data), [rbase] "d" (__a)); }
#define CVMX_LOADUNA_INT32(result, address, offset) \
{ char *__a = (char *)(address); \
asm ("ulw %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : \
[rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1]), "m"(__a[offset + 2]), "m"(__a[offset + 3])); }
#define CVMX_STOREUNA_INT32(data, address, offset) \
{ char *__a = (char *)(address); \
asm ("usw %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : \
"=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]) : \
[rsrc] "d" (data), [rbase] "d" (__a)); }
#define CVMX_LOADUNA_INT64(result, address, offset) \
{ char *__a = (char *)(address); \
asm ("uld %[rdest], " CVMX_TMP_STR(offset) "(%[rbase])" : [rdest] "=d" (result) : \
[rbase] "d" (__a), "m"(__a[offset + 0]), "m"(__a[offset + 1]), "m"(__a[offset + 2]), "m"(__a[offset + 3]), \
"m"(__a[offset + 4]), "m"(__a[offset + 5]), "m"(__a[offset + 6]), "m"(__a[offset + 7])); }
#define CVMX_STOREUNA_INT64(data, address, offset) \
{ char *__a = (char *)(address); \
asm ("usd %[rsrc], " CVMX_TMP_STR(offset) "(%[rbase])" : \
"=m"(__a[offset + 0]), "=m"(__a[offset + 1]), "=m"(__a[offset + 2]), "=m"(__a[offset + 3]), \
"=m"(__a[offset + 4]), "=m"(__a[offset + 5]), "=m"(__a[offset + 6]), "=m"(__a[offset + 7]) : \
[rsrc] "d" (data), [rbase] "d" (__a)); }
#ifdef __cplusplus
}
#endif
#endif /* __ASSEMBLER__ */
#endif /* __CVMX_ASM_H__ */

View File

@ -1,142 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* cvmx-asx0-defs.h
*
* Configuration and status register (CSR) type definitions for
* Octeon asx0.
*
* This file is auto generated. Do not edit.
*
* <hr>$Revision$<hr>
*
*/
#ifndef __CVMX_ASX0_DEFS_H__
#define __CVMX_ASX0_DEFS_H__
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_ASX0_DBG_DATA_DRV CVMX_ASX0_DBG_DATA_DRV_FUNC()
static inline uint64_t CVMX_ASX0_DBG_DATA_DRV_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
cvmx_warn("CVMX_ASX0_DBG_DATA_DRV not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x00011800B0000208ull);
}
#else
#define CVMX_ASX0_DBG_DATA_DRV (CVMX_ADD_IO_SEG(0x00011800B0000208ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_ASX0_DBG_DATA_ENABLE CVMX_ASX0_DBG_DATA_ENABLE_FUNC()
static inline uint64_t CVMX_ASX0_DBG_DATA_ENABLE_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
cvmx_warn("CVMX_ASX0_DBG_DATA_ENABLE not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x00011800B0000200ull);
}
#else
#define CVMX_ASX0_DBG_DATA_ENABLE (CVMX_ADD_IO_SEG(0x00011800B0000200ull))
#endif
/**
* cvmx_asx0_dbg_data_drv
*
* ASX_DBG_DATA_DRV
*
*/
union cvmx_asx0_dbg_data_drv {
uint64_t u64;
struct cvmx_asx0_dbg_data_drv_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_9_63 : 55;
uint64_t pctl : 5; /**< These bits control the driving strength of the dbg
interface. */
uint64_t nctl : 4; /**< These bits control the driving strength of the dbg
interface. */
#else
uint64_t nctl : 4;
uint64_t pctl : 5;
uint64_t reserved_9_63 : 55;
#endif
} s;
struct cvmx_asx0_dbg_data_drv_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_8_63 : 56;
uint64_t pctl : 4; /**< These bits control the driving strength of the dbg
interface. */
uint64_t nctl : 4; /**< These bits control the driving strength of the dbg
interface. */
#else
uint64_t nctl : 4;
uint64_t pctl : 4;
uint64_t reserved_8_63 : 56;
#endif
} cn38xx;
struct cvmx_asx0_dbg_data_drv_cn38xx cn38xxp2;
struct cvmx_asx0_dbg_data_drv_s cn58xx;
struct cvmx_asx0_dbg_data_drv_s cn58xxp1;
};
typedef union cvmx_asx0_dbg_data_drv cvmx_asx0_dbg_data_drv_t;
/**
* cvmx_asx0_dbg_data_enable
*
* ASX_DBG_DATA_ENABLE
*
*/
union cvmx_asx0_dbg_data_enable {
uint64_t u64;
struct cvmx_asx0_dbg_data_enable_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63 : 63;
uint64_t en : 1; /**< A 1->0 transistion, turns the dbg interface OFF. */
#else
uint64_t en : 1;
uint64_t reserved_1_63 : 63;
#endif
} s;
struct cvmx_asx0_dbg_data_enable_s cn38xx;
struct cvmx_asx0_dbg_data_enable_s cn38xxp2;
struct cvmx_asx0_dbg_data_enable_s cn58xx;
struct cvmx_asx0_dbg_data_enable_s cn58xxp1;
};
typedef union cvmx_asx0_dbg_data_enable cvmx_asx0_dbg_data_enable_t;
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,770 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* This file provides atomic operations
*
* <hr>$Revision: 70030 $<hr>
*
*
*/
#ifndef __CVMX_ATOMIC_H__
#define __CVMX_ATOMIC_H__
#ifdef __cplusplus
extern "C" {
#endif
/**
* Atomically adds a signed value to a 32 bit (aligned) memory location.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints. (This should NOT be used for reference counting -
* use the standard version instead.)
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*/
static inline void cvmx_atomic_add32_nosync(int32_t *ptr, int32_t incr)
{
if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
{
uint32_t tmp;
__asm__ __volatile__(
".set noreorder \n"
"1: ll %[tmp], %[val] \n"
" addu %[tmp], %[inc] \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp)
: [inc] "r" (incr)
: "memory");
}
else
{
__asm__ __volatile__(
" saa %[inc], (%[base]) \n"
: "+m" (*ptr)
: [inc] "r" (incr), [base] "r" (ptr)
: "memory");
}
}
/**
* Atomically adds a signed value to a 32 bit (aligned) memory location.
*
* Memory access ordering is enforced before/after the atomic operation,
* so no additional 'sync' instructions are required.
*
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*/
static inline void cvmx_atomic_add32(int32_t *ptr, int32_t incr)
{
CVMX_SYNCWS;
cvmx_atomic_add32_nosync(ptr, incr);
CVMX_SYNCWS;
}
/**
* Atomically sets a 32 bit (aligned) memory location to a value
*
* @param ptr address of memory to set
* @param value value to set memory location to.
*/
static inline void cvmx_atomic_set32(int32_t *ptr, int32_t value)
{
CVMX_SYNCWS;
*ptr = value;
CVMX_SYNCWS;
}
/**
* Returns the current value of a 32 bit (aligned) memory
* location.
*
* @param ptr Address of memory to get
* @return Value of the memory
*/
static inline int32_t cvmx_atomic_get32(int32_t *ptr)
{
return *(volatile int32_t *)ptr;
}
/**
* Atomically adds a signed value to a 64 bit (aligned) memory location.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints. (This should NOT be used for reference counting -
* use the standard version instead.)
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*/
static inline void cvmx_atomic_add64_nosync(int64_t *ptr, int64_t incr)
{
if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
{
uint64_t tmp;
__asm__ __volatile__(
".set noreorder \n"
"1: lld %[tmp], %[val] \n"
" daddu %[tmp], %[inc] \n"
" scd %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp)
: [inc] "r" (incr)
: "memory");
}
else
{
__asm__ __volatile__(
" saad %[inc], (%[base]) \n"
: "+m" (*ptr)
: [inc] "r" (incr), [base] "r" (ptr)
: "memory");
}
}
/**
* Atomically adds a signed value to a 64 bit (aligned) memory location.
*
* Memory access ordering is enforced before/after the atomic operation,
* so no additional 'sync' instructions are required.
*
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*/
static inline void cvmx_atomic_add64(int64_t *ptr, int64_t incr)
{
CVMX_SYNCWS;
cvmx_atomic_add64_nosync(ptr, incr);
CVMX_SYNCWS;
}
/**
* Atomically sets a 64 bit (aligned) memory location to a value
*
* @param ptr address of memory to set
* @param value value to set memory location to.
*/
static inline void cvmx_atomic_set64(int64_t *ptr, int64_t value)
{
CVMX_SYNCWS;
*ptr = value;
CVMX_SYNCWS;
}
/**
* Returns the current value of a 64 bit (aligned) memory
* location.
*
* @param ptr Address of memory to get
* @return Value of the memory
*/
static inline int64_t cvmx_atomic_get64(int64_t *ptr)
{
return *(volatile int64_t *)ptr;
}
/**
* Atomically compares the old value with the value at ptr, and if they match,
* stores new_val to ptr.
* If *ptr and old don't match, function returns failure immediately.
* If *ptr and old match, function spins until *ptr updated to new atomically, or
* until *ptr and old no longer match
*
* Does no memory synchronization.
*
* @return 1 on success (match and store)
* 0 on no match
*/
static inline uint32_t cvmx_atomic_compare_and_store32_nosync(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
{
uint32_t tmp, ret;
__asm__ __volatile__(
".set noreorder \n"
"1: ll %[tmp], %[val] \n"
" li %[ret], 0 \n"
" bne %[tmp], %[old], 2f \n"
" move %[tmp], %[new_val] \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" li %[ret], 1 \n"
"2: nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
: [old] "r" (old_val), [new_val] "r" (new_val)
: "memory");
return(ret);
}
/**
* Atomically compares the old value with the value at ptr, and if they match,
* stores new_val to ptr.
* If *ptr and old don't match, function returns failure immediately.
* If *ptr and old match, function spins until *ptr updated to new atomically, or
* until *ptr and old no longer match
*
* Does memory synchronization that is required to use this as a locking primitive.
*
* @return 1 on success (match and store)
* 0 on no match
*/
static inline uint32_t cvmx_atomic_compare_and_store32(uint32_t *ptr, uint32_t old_val, uint32_t new_val)
{
uint32_t ret;
CVMX_SYNCWS;
ret = cvmx_atomic_compare_and_store32_nosync(ptr, old_val, new_val);
CVMX_SYNCWS;
return ret;
}
/**
* Atomically compares the old value with the value at ptr, and if they match,
* stores new_val to ptr.
* If *ptr and old don't match, function returns failure immediately.
* If *ptr and old match, function spins until *ptr updated to new atomically, or
* until *ptr and old no longer match
*
* Does no memory synchronization.
*
* @return 1 on success (match and store)
* 0 on no match
*/
static inline uint64_t cvmx_atomic_compare_and_store64_nosync(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
{
uint64_t tmp, ret;
__asm__ __volatile__(
".set noreorder \n"
"1: lld %[tmp], %[val] \n"
" li %[ret], 0 \n"
" bne %[tmp], %[old], 2f \n"
" move %[tmp], %[new_val] \n"
" scd %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" li %[ret], 1 \n"
"2: nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
: [old] "r" (old_val), [new_val] "r" (new_val)
: "memory");
return(ret);
}
/**
* Atomically compares the old value with the value at ptr, and if they match,
* stores new_val to ptr.
* If *ptr and old don't match, function returns failure immediately.
* If *ptr and old match, function spins until *ptr updated to new atomically, or
* until *ptr and old no longer match
*
* Does memory synchronization that is required to use this as a locking primitive.
*
* @return 1 on success (match and store)
* 0 on no match
*/
static inline uint64_t cvmx_atomic_compare_and_store64(uint64_t *ptr, uint64_t old_val, uint64_t new_val)
{
uint64_t ret;
CVMX_SYNCWS;
ret = cvmx_atomic_compare_and_store64_nosync(ptr, old_val, new_val);
CVMX_SYNCWS;
return ret;
}
/**
* Atomically adds a signed value to a 64 bit (aligned) memory location,
* and returns previous value.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints. (This should NOT be used for reference counting -
* use the standard version instead.)
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*
* @return Value of memory location before increment
*/
static inline int64_t cvmx_atomic_fetch_and_add64_nosync(int64_t *ptr, int64_t incr)
{
uint64_t tmp, ret;
#if !defined(__FreeBSD__) || !defined(_KERNEL)
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
{
CVMX_PUSH_OCTEON2;
if (__builtin_constant_p(incr) && incr == 1)
{
__asm__ __volatile__(
"laid %0,(%2)"
: "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
}
else if (__builtin_constant_p(incr) && incr == -1)
{
__asm__ __volatile__(
"ladd %0,(%2)"
: "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
}
else
{
__asm__ __volatile__(
"laad %0,(%2),%3"
: "=r" (ret), "+m" (ptr) : "r" (ptr), "r" (incr) : "memory");
}
CVMX_POP_OCTEON2;
}
else
{
#endif
__asm__ __volatile__(
".set noreorder \n"
"1: lld %[tmp], %[val] \n"
" move %[ret], %[tmp] \n"
" daddu %[tmp], %[inc] \n"
" scd %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
: [inc] "r" (incr)
: "memory");
#if !defined(__FreeBSD__) || !defined(_KERNEL)
}
#endif
return (ret);
}
/**
* Atomically adds a signed value to a 64 bit (aligned) memory location,
* and returns previous value.
*
* Memory access ordering is enforced before/after the atomic operation,
* so no additional 'sync' instructions are required.
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*
* @return Value of memory location before increment
*/
static inline int64_t cvmx_atomic_fetch_and_add64(int64_t *ptr, int64_t incr)
{
uint64_t ret;
CVMX_SYNCWS;
ret = cvmx_atomic_fetch_and_add64_nosync(ptr, incr);
CVMX_SYNCWS;
return ret;
}
/**
* Atomically adds a signed value to a 32 bit (aligned) memory location,
* and returns previous value.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints. (This should NOT be used for reference counting -
* use the standard version instead.)
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*
* @return Value of memory location before increment
*/
static inline int32_t cvmx_atomic_fetch_and_add32_nosync(int32_t *ptr, int32_t incr)
{
uint32_t tmp, ret;
#if !defined(__FreeBSD__) || !defined(_KERNEL)
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
{
CVMX_PUSH_OCTEON2;
if (__builtin_constant_p(incr) && incr == 1)
{
__asm__ __volatile__(
"lai %0,(%2)"
: "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
}
else if (__builtin_constant_p(incr) && incr == -1)
{
__asm__ __volatile__(
"lad %0,(%2)"
: "=r" (ret), "+m" (ptr) : "r" (ptr) : "memory");
}
else
{
__asm__ __volatile__(
"laa %0,(%2),%3"
: "=r" (ret), "+m" (ptr) : "r" (ptr), "r" (incr) : "memory");
}
CVMX_POP_OCTEON2;
}
else
{
#endif
__asm__ __volatile__(
".set noreorder \n"
"1: ll %[tmp], %[val] \n"
" move %[ret], %[tmp] \n"
" addu %[tmp], %[inc] \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
: [inc] "r" (incr)
: "memory");
#if !defined(__FreeBSD__) || !defined(_KERNEL)
}
#endif
return (ret);
}
/**
* Atomically adds a signed value to a 32 bit (aligned) memory location,
* and returns previous value.
*
* Memory access ordering is enforced before/after the atomic operation,
* so no additional 'sync' instructions are required.
*
* @param ptr address in memory to add incr to
* @param incr amount to increment memory location by (signed)
*
* @return Value of memory location before increment
*/
static inline int32_t cvmx_atomic_fetch_and_add32(int32_t *ptr, int32_t incr)
{
uint32_t ret;
CVMX_SYNCWS;
ret = cvmx_atomic_fetch_and_add32_nosync(ptr, incr);
CVMX_SYNCWS;
return ret;
}
/**
* Atomically set bits in a 64 bit (aligned) memory location,
* and returns previous value.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints.
*
* @param ptr address in memory
* @param mask mask of bits to set
*
* @return Value of memory location before setting bits
*/
static inline uint64_t cvmx_atomic_fetch_and_bset64_nosync(uint64_t *ptr, uint64_t mask)
{
uint64_t tmp, ret;
__asm__ __volatile__(
".set noreorder \n"
"1: lld %[tmp], %[val] \n"
" move %[ret], %[tmp] \n"
" or %[tmp], %[msk] \n"
" scd %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
: [msk] "r" (mask)
: "memory");
return (ret);
}
/**
* Atomically set bits in a 32 bit (aligned) memory location,
* and returns previous value.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints.
*
* @param ptr address in memory
* @param mask mask of bits to set
*
* @return Value of memory location before setting bits
*/
static inline uint32_t cvmx_atomic_fetch_and_bset32_nosync(uint32_t *ptr, uint32_t mask)
{
uint32_t tmp, ret;
__asm__ __volatile__(
".set noreorder \n"
"1: ll %[tmp], %[val] \n"
" move %[ret], %[tmp] \n"
" or %[tmp], %[msk] \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
: [msk] "r" (mask)
: "memory");
return (ret);
}
/**
* Atomically clear bits in a 64 bit (aligned) memory location,
* and returns previous value.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints.
*
* @param ptr address in memory
* @param mask mask of bits to clear
*
* @return Value of memory location before clearing bits
*/
static inline uint64_t cvmx_atomic_fetch_and_bclr64_nosync(uint64_t *ptr, uint64_t mask)
{
uint64_t tmp, ret;
__asm__ __volatile__(
".set noreorder \n"
" nor %[msk], 0 \n"
"1: lld %[tmp], %[val] \n"
" move %[ret], %[tmp] \n"
" and %[tmp], %[msk] \n"
" scd %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret), [msk] "+r" (mask)
: : "memory");
return (ret);
}
/**
* Atomically clear bits in a 32 bit (aligned) memory location,
* and returns previous value.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints.
*
* @param ptr address in memory
* @param mask mask of bits to clear
*
* @return Value of memory location before clearing bits
*/
static inline uint32_t cvmx_atomic_fetch_and_bclr32_nosync(uint32_t *ptr, uint32_t mask)
{
uint32_t tmp, ret;
__asm__ __volatile__(
".set noreorder \n"
" nor %[msk], 0 \n"
"1: ll %[tmp], %[val] \n"
" move %[ret], %[tmp] \n"
" and %[tmp], %[msk] \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret), [msk] "+r" (mask)
: : "memory");
return (ret);
}
/**
* Atomically swaps value in 64 bit (aligned) memory location,
* and returns previous value.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints.
*
* @param ptr address in memory
* @param new_val new value to write
*
* @return Value of memory location before swap operation
*/
static inline uint64_t cvmx_atomic_swap64_nosync(uint64_t *ptr, uint64_t new_val)
{
uint64_t tmp, ret;
#if !defined(__FreeBSD__) || !defined(_KERNEL)
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
{
CVMX_PUSH_OCTEON2;
if (__builtin_constant_p(new_val) && new_val == 0)
{
__asm__ __volatile__(
"lacd %0,(%1)"
: "=r" (ret) : "r" (ptr) : "memory");
}
else if (__builtin_constant_p(new_val) && new_val == ~0ull)
{
__asm__ __volatile__(
"lasd %0,(%1)"
: "=r" (ret) : "r" (ptr) : "memory");
}
else
{
__asm__ __volatile__(
"lawd %0,(%1),%2"
: "=r" (ret) : "r" (ptr), "r" (new_val) : "memory");
}
CVMX_POP_OCTEON2;
}
else
{
#endif
__asm__ __volatile__(
".set noreorder \n"
"1: lld %[ret], %[val] \n"
" move %[tmp], %[new_val] \n"
" scd %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
: [new_val] "r" (new_val)
: "memory");
#if !defined(__FreeBSD__) || !defined(_KERNEL)
}
#endif
return (ret);
}
/**
* Atomically swaps value in 32 bit (aligned) memory location,
* and returns previous value.
*
* This version does not perform 'sync' operations to enforce memory
* operations. This should only be used when there are no memory operation
* ordering constraints.
*
* @param ptr address in memory
* @param new_val new value to write
*
* @return Value of memory location before swap operation
*/
static inline uint32_t cvmx_atomic_swap32_nosync(uint32_t *ptr, uint32_t new_val)
{
uint32_t tmp, ret;
#if !defined(__FreeBSD__) || !defined(_KERNEL)
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
{
CVMX_PUSH_OCTEON2;
if (__builtin_constant_p(new_val) && new_val == 0)
{
__asm__ __volatile__(
"lac %0,(%1)"
: "=r" (ret) : "r" (ptr) : "memory");
}
else if (__builtin_constant_p(new_val) && new_val == ~0u)
{
__asm__ __volatile__(
"las %0,(%1)"
: "=r" (ret) : "r" (ptr) : "memory");
}
else
{
__asm__ __volatile__(
"law %0,(%1),%2"
: "=r" (ret) : "r" (ptr), "r" (new_val) : "memory");
}
CVMX_POP_OCTEON2;
}
else
{
#endif
__asm__ __volatile__(
".set noreorder \n"
"1: ll %[ret], %[val] \n"
" move %[tmp], %[new_val] \n"
" sc %[tmp], %[val] \n"
" beqz %[tmp], 1b \n"
" nop \n"
".set reorder \n"
: [val] "+m" (*ptr), [tmp] "=&r" (tmp), [ret] "=&r" (ret)
: [new_val] "r" (new_val)
: "memory");
#if !defined(__FreeBSD__) || !defined(_KERNEL)
}
#endif
return (ret);
}
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_ATOMIC_H__ */

View File

@ -1,152 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#ifndef __CVMX_BOOTLOADER__
#define __CVMX_BOOTLOADER__
/**
* @file
*
* Bootloader definitions that are shared with other programs
*
* <hr>$Revision: 70030 $<hr>
*/
/* The bootloader_header_t structure defines the header that is present
** at the start of binary u-boot images. This header is used to locate the bootloader
** image in NAND, and also to allow verification of images for normal NOR booting.
** This structure is placed at the beginning of a bootloader binary image, and remains
** in the executable code.
*/
#define BOOTLOADER_HEADER_MAGIC 0x424f4f54 /* "BOOT" in ASCII */
#define BOOTLOADER_HEADER_COMMENT_LEN 64
#define BOOTLOADER_HEADER_VERSION_LEN 64
#define BOOTLOADER_HEADER_MAX_SIZE 0x200 /* limited by the space to the next exception handler */
#define BOOTLOADER_HEADER_CURRENT_MAJOR_REV 1
#define BOOTLOADER_HEADER_CURRENT_MINOR_REV 2
/* Revision history
* 1.1 Initial released revision. (SDK 1.9)
* 1.2 TLB based relocatable image (SDK 2.0)
*
*
*/
/* offsets to struct bootloader_header fields for assembly use */
#define GOT_ADDRESS_OFFSET 48
#define LOOKUP_STEP (64*1024)
#ifndef __ASSEMBLY__
typedef struct bootloader_header
{
uint32_t jump_instr; /* Jump to executable code following the
** header. This allows this header to
** be (and remain) part of the executable image)
*/
uint32_t nop_instr; /* Must be 0x0 */
uint32_t magic; /* Magic number to identify header */
uint32_t hcrc; /* CRC of all of header excluding this field */
uint16_t hlen; /* Length of header in bytes */
uint16_t maj_rev; /* Major revision */
uint16_t min_rev; /* Minor revision */
uint16_t board_type; /* Board type that the image is for */
uint32_t dlen; /* Length of data (immediately following header) in bytes */
uint32_t dcrc; /* CRC of data */
uint64_t address; /* Mips virtual address */
uint32_t flags;
uint16_t image_type; /* Defined in bootloader_image_t enum */
uint16_t resv0; /* pad */
uint32_t reserved1;
uint32_t reserved2;
uint32_t reserved3;
uint32_t reserved4;
char comment_string[BOOTLOADER_HEADER_COMMENT_LEN]; /* Optional, for descriptive purposes */
char version_string[BOOTLOADER_HEADER_VERSION_LEN]; /* Optional, for descriptive purposes */
} __attribute__((packed)) bootloader_header_t;
/* Defines for flag field */
#define BL_HEADER_FLAG_FAILSAFE (1)
typedef enum
{
BL_HEADER_IMAGE_UNKNOWN = 0x0,
BL_HEADER_IMAGE_STAGE2, /* Binary bootloader stage2 image (NAND boot) */
BL_HEADER_IMAGE_STAGE3, /* Binary bootloader stage3 image (NAND boot)*/
BL_HEADER_IMAGE_NOR, /* Binary bootloader for NOR boot */
BL_HEADER_IMAGE_PCIBOOT, /* Binary bootloader for PCI boot */
BL_HEADER_IMAGE_UBOOT_ENV, /* Environment for u-boot */
BL_HEADER_IMAGE_MAX,
/* Range for customer private use. Will not be used by Cavium Inc. */
BL_HEADER_IMAGE_CUST_RESERVED_MIN = 0x1000,
BL_HEADER_IMAGE_CUST_RESERVED_MAX = 0x1fff
} bootloader_image_t;
#endif /* __ASSEMBLY__ */
/* Maximum address searched for NAND boot images and environments. This is used
** by stage1 and stage2. */
#define MAX_NAND_SEARCH_ADDR 0x400000
/* Maximum address to look for start of normal bootloader */
#define MAX_NOR_SEARCH_ADDR 0x200000
/* Defines for RAM based environment set by the host or the previous bootloader
** in a chain boot configuration. */
#define U_BOOT_RAM_ENV_ADDR (0x1000)
#define U_BOOT_RAM_ENV_SIZE (0x1000)
#define U_BOOT_RAM_ENV_CRC_SIZE (0x4)
#define U_BOOT_RAM_ENV_ADDR_2 (U_BOOT_RAM_ENV_ADDR + U_BOOT_RAM_ENV_SIZE)
#endif /* __CVMX_BOOTLOADER__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,488 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
* Simple allocate only memory allocator. Used to allocate memory at application
* start time.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifndef __CVMX_BOOTMEM_H__
#define __CVMX_BOOTMEM_H__
#ifdef __cplusplus
extern "C" {
#endif
#define CVMX_BOOTMEM_NAME_LEN 128 /* Must be multiple of 8, changing breaks ABI */
#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64 /* Can change without breaking ABI */
#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull) /* minimum alignment of bootmem alloced blocks */
/* Flags for cvmx_bootmem_phy_mem* functions */
#define CVMX_BOOTMEM_FLAG_END_ALLOC (1 << 0) /* Allocate from end of block instead of beginning */
#define CVMX_BOOTMEM_FLAG_NO_LOCKING (1 << 1) /* Don't do any locking. */
/* Real physical addresses of memory regions */
#define OCTEON_DDR0_BASE (0x0ULL)
#define OCTEON_DDR0_SIZE (0x010000000ULL)
#define OCTEON_DDR1_BASE ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 0x20000000ULL : 0x410000000ULL)
#define OCTEON_DDR1_SIZE (0x010000000ULL)
#define OCTEON_DDR2_BASE ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 0x30000000ULL : 0x20000000ULL)
#define OCTEON_DDR2_SIZE ((OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 0x7d0000000ULL : 0x3e0000000ULL)
#define OCTEON_MAX_PHY_MEM_SIZE ((OCTEON_IS_MODEL(OCTEON_CN68XX)) ? 128*1024*1024*1024ULL : (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX)) ? 32*1024*1024*1024ull : 16*1024*1024*1024ULL)
/* First bytes of each free physical block of memory contain this structure,
* which is used to maintain the free memory list. Since the bootloader is
* only 32 bits, there is a union providing 64 and 32 bit versions. The
* application init code converts addresses to 64 bit addresses before the
* application starts.
*/
typedef struct
{
/* Note: these are referenced from assembly routines in the bootloader, so this structure
** should not be changed without changing those routines as well. */
uint64_t next_block_addr;
uint64_t size;
} cvmx_bootmem_block_header_t;
/* Structure for named memory blocks
** Number of descriptors
** available can be changed without affecting compatiblity,
** but name length changes require a bump in the bootmem
** descriptor version
** Note: This structure must be naturally 64 bit aligned, as a single
** memory image will be used by both 32 and 64 bit programs.
*/
struct cvmx_bootmem_named_block_desc
{
uint64_t base_addr; /**< Base address of named block */
uint64_t size; /**< Size actually allocated for named block (may differ from requested) */
char name[CVMX_BOOTMEM_NAME_LEN]; /**< name of named block */
};
typedef struct cvmx_bootmem_named_block_desc cvmx_bootmem_named_block_desc_t;
/* Current descriptor versions */
#define CVMX_BOOTMEM_DESC_MAJ_VER 3 /* CVMX bootmem descriptor major version */
#define CVMX_BOOTMEM_DESC_MIN_VER 0 /* CVMX bootmem descriptor minor version */
/* First three members of cvmx_bootmem_desc_t are left in original
** positions for backwards compatibility.
*/
typedef struct
{
uint32_t lock; /**< spinlock to control access to list */
uint32_t flags; /**< flags for indicating various conditions */
uint64_t head_addr;
uint32_t major_version; /**< incremented changed when incompatible changes made */
uint32_t minor_version; /**< incremented changed when compatible changes made, reset to zero when major incremented */
uint64_t app_data_addr;
uint64_t app_data_size;
uint32_t named_block_num_blocks; /**< number of elements in named blocks array */
uint32_t named_block_name_len; /**< length of name array in bootmem blocks */
uint64_t named_block_array_addr; /**< address of named memory block descriptors */
} cvmx_bootmem_desc_t;
/**
* Initialize the boot alloc memory structures. This is
* normally called inside of cvmx_user_app_init()
*
* @param mem_desc_addr Address of the free memory list
* @return
*/
extern int cvmx_bootmem_init(uint64_t mem_desc_addr);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader.
* This is an allocate-only algorithm, so freeing memory is not possible.
*
* @param size Size in bytes of block to allocate
* @param alignment Alignment required - must be power of 2
*
* @return pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
/**
* Allocate a block of memory from the free list that was
* passed to the application by the bootloader at a specific
* address. This is an allocate-only algorithm, so
* freeing memory is not possible. Allocation will fail if
* memory cannot be allocated at the specified address.
*
* @param size Size in bytes of block to allocate
* @param address Physical address to allocate memory at. If this memory is not
* available, the allocation fails.
* @param alignment Alignment required - must be power of 2
* @return pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, uint64_t alignment);
/**
* Allocate a block of memory from the free list that was
* passed to the application by the bootloader within a specified
* address range. This is an allocate-only algorithm, so
* freeing memory is not possible. Allocation will fail if
* memory cannot be allocated in the requested range.
*
* @param size Size in bytes of block to allocate
* @param min_addr defines the minimum address of the range
* @param max_addr defines the maximum address of the range
* @param alignment Alignment required - must be power of 2
* @param flags Flags to control options for the allocation.
* @return pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_range_flags(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr, uint32_t flags);
/**
* Allocate a block of memory from the free list that was
* passed to the application by the bootloader within a specified
* address range. This is an allocate-only algorithm, so
* freeing memory is not possible. Allocation will fail if
* memory cannot be allocated in the requested range.
*
* @param size Size in bytes of block to allocate
* @param min_addr defines the minimum address of the range
* @param max_addr defines the maximum address of the range
* @param alignment Alignment required - must be power of 2
* @return pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
*
* @param size Size in bytes of block to allocate
* @param alignment Alignment required - must be power of 2
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
*
* @return pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, const char *name);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
*
* @param size Size in bytes of block to allocate
* @param alignment Alignment required - must be power of 2
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
* @param flags Flags to control options for the allocation.
*
* @return pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_named_flags(uint64_t size, uint64_t alignment, const char *name, uint32_t flags);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
*
* @param size Size in bytes of block to allocate
* @param address Physical address to allocate memory at. If this memory is not
* available, the allocation fails.
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
*
* @return pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, const char *name);
/**
* Allocate a block of memory from a specific range of the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
* If request cannot be satisfied within the address range specified, NULL is returned
*
* @param size Size in bytes of block to allocate
* @param min_addr minimum address of range
* @param max_addr maximum address of range
* @param align Alignment of memory to be allocated. (must be a power of 2)
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
*
* @return pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name);
/**
* Allocate if needed a block of memory from a specific range of the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
* If the requested name block is already allocated, return the pointer to block of memory.
* If request cannot be satisfied within the address range specified, NULL is returned
*
* @param size Size in bytes of block to allocate
* @param min_addr minimum address of range
* @param max_addr maximum address of range
* @param align Alignment of memory to be allocated. (must be a power of 2)
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
* @param init Initialization function
*
* @return pointer to block of memory, NULL on error
*/
extern void *cvmx_bootmem_alloc_named_range_once(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, const char *name, void (*init)(void*));
/**
* Frees a previously allocated named bootmem block.
*
* @param name name of block to free
*
* @return 0 on failure,
* !0 on success
*/
extern int cvmx_bootmem_free_named(const char *name);
/**
* Finds a named bootmem block by name.
*
* @param name name of block to free
*
* @return pointer to named block descriptor on success
* 0 on failure
*/
const cvmx_bootmem_named_block_desc_t *cvmx_bootmem_find_named_block(const char *name);
/**
* Returns the size of available memory in bytes, only
* counting blocks that are at least as big as the minimum block
* size.
*
* @param min_block_size
* Minimum block size to count in total.
*
* @return Number of bytes available for allocation that meet the block size requirement
*/
uint64_t cvmx_bootmem_available_mem(uint64_t min_block_size);
/**
* Prints out the list of named blocks that have been allocated
* along with their addresses and sizes.
* This is primarily used for debugging purposes
*/
void cvmx_bootmem_print_named(void);
/**
* Allocates a block of physical memory from the free list, at (optional) requested address and alignment.
*
* @param req_size size of region to allocate. All requests are rounded up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
* @param address_min
* Minimum address that block can occupy.
* @param address_max
* Specifies the maximum address_min (inclusive) that the allocation can use.
* @param alignment Requested alignment of the block. If this alignment cannot be met, the allocation fails.
* This must be a power of 2.
* (Note: Alignment of CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and internally enforced. Requested alignments of
* less than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to CVMX_BOOTMEM_ALIGNMENT_SIZE.)
* @param flags Flags to control options for the allocation.
*
* @return physical address of block allocated, or -1 on failure
*/
int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t address_max, uint64_t alignment, uint32_t flags);
/**
* Allocates a named block of physical memory from the free list, at (optional) requested address and alignment.
*
* @param size size of region to allocate. All requests are rounded up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
* @param min_addr
* Minimum address that block can occupy.
* @param max_addr
* Specifies the maximum address_min (inclusive) that the allocation can use.
* @param alignment Requested alignment of the block. If this alignment cannot be met, the allocation fails.
* This must be a power of 2.
* (Note: Alignment of CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and internally enforced. Requested alignments of
* less than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to CVMX_BOOTMEM_ALIGNMENT_SIZE.)
* @param name name to assign to named block
* @param flags Flags to control options for the allocation.
*
* @return physical address of block allocated, or -1 on failure
*/
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, const char *name, uint32_t flags);
/**
* Finds a named memory block by name.
* Also used for finding an unused entry in the named block table.
*
* @param name Name of memory block to find.
* If NULL pointer given, then finds unused descriptor, if available.
* @param flags Flags to control options for the allocation.
*
* @return Physical address of the memory block descriptor, zero if not
* found. If zero returned when name parameter is NULL, then no
* memory block descriptors are available.
*/
uint64_t cvmx_bootmem_phy_named_block_find(const char *name, uint32_t flags);
/**
* Returns the size of available memory in bytes, only
* counting blocks that are at least as big as the minimum block
* size.
*
* @param min_block_size
* Minimum block size to count in total.
*
* @return Number of bytes available for allocation that meet the block size requirement
*/
uint64_t cvmx_bootmem_phy_available_mem(uint64_t min_block_size);
/**
* Frees a named block.
*
* @param name name of block to free
* @param flags flags for passing options
*
* @return 0 on failure
* 1 on success
*/
int cvmx_bootmem_phy_named_block_free(const char *name, uint32_t flags);
/**
* Frees a block to the bootmem allocator list. This must
* be used with care, as the size provided must match the size
* of the block that was allocated, or the list will become
* corrupted.
*
* IMPORTANT: This is only intended to be used as part of named block
* frees and initial population of the free memory list.
* *
*
* @param phy_addr physical address of block
* @param size size of block in bytes.
* @param flags flags for passing options
*
* @return 1 on success,
* 0 on failure
*/
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
/**
* Prints the list of currently allocated named blocks
*
*/
void cvmx_bootmem_phy_named_block_print(void);
/**
* Prints the list of available memory.
*
*/
void cvmx_bootmem_phy_list_print(void);
/**
* This function initializes the free memory list used by cvmx_bootmem.
* This must be called before any allocations can be done.
*
* @param mem_size Total memory available, in bytes
* @param low_reserved_bytes
* Number of bytes to reserve (leave out of free list) at address 0x0.
* @param desc_buffer
* Buffer for the bootmem descriptor. This must be a 32 bit addressable
* address.
*
* @return 1 on success
* 0 on failure
*/
int64_t cvmx_bootmem_phy_mem_list_init(uint64_t mem_size, uint32_t low_reserved_bytes, cvmx_bootmem_desc_t *desc_buffer);
/**
* Locks the bootmem allocator. This is useful in certain situations
* where multiple allocations must be made without being interrupted.
* This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
*
*/
void cvmx_bootmem_lock(void);
/**
* Unlocks the bootmem allocator. This is useful in certain situations
* where multiple allocations must be made without being interrupted.
* This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
*
*/
void cvmx_bootmem_unlock(void);
/**
* Internal use function to get the current descriptor pointer */
void *__cvmx_bootmem_internal_get_desc_ptr(void);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_BOOTMEM_H__ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,142 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to Core, IO and DDR Clock.
*
* <hr>$Revision: 45089 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <linux/module.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-clock.h>
#include <asm/octeon/cvmx-npei-defs.h>
#include <asm/octeon/cvmx-pexp-defs.h>
#include <asm/octeon/cvmx-dbg-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#endif
#include "cvmx.h"
#endif
#ifndef CVMX_BUILD_FOR_UBOOT
static uint64_t rate_eclk = 0;
static uint64_t rate_sclk = 0;
static uint64_t rate_dclk = 0;
#endif
/**
* Get clock rate based on the clock type.
*
* @param clock - Enumeration of the clock type.
* @return - return the clock rate.
*/
uint64_t cvmx_clock_get_rate(cvmx_clock_t clock)
{
const uint64_t REF_CLOCK = 50000000;
#ifdef CVMX_BUILD_FOR_UBOOT
uint64_t rate_eclk = 0;
uint64_t rate_sclk = 0;
uint64_t rate_dclk = 0;
#endif
if (cvmx_unlikely(!rate_eclk))
{
/* Note: The order of these checks is important.
** octeon_has_feature(OCTEON_FEATURE_PCIE) is true for both 6XXX
** and 52XX/56XX, so OCTEON_FEATURE_NPEI _must_ be checked first */
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_npei_dbg_data_t npei_dbg_data;
npei_dbg_data.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DBG_DATA);
rate_eclk = REF_CLOCK * npei_dbg_data.s.c_mul;
rate_sclk = rate_eclk;
}
else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
{
cvmx_mio_rst_boot_t mio_rst_boot;
mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
rate_eclk = REF_CLOCK * mio_rst_boot.s.c_mul;
rate_sclk = REF_CLOCK * mio_rst_boot.s.pnr_mul;
}
else
{
cvmx_dbg_data_t dbg_data;
dbg_data.u64 = cvmx_read_csr(CVMX_DBG_DATA);
rate_eclk = REF_CLOCK * dbg_data.s.c_mul;
rate_sclk = rate_eclk;
}
}
switch (clock)
{
case CVMX_CLOCK_SCLK:
case CVMX_CLOCK_TIM:
case CVMX_CLOCK_IPD:
return rate_sclk;
case CVMX_CLOCK_RCLK:
case CVMX_CLOCK_CORE:
return rate_eclk;
case CVMX_CLOCK_DDR:
#if !defined(CVMX_BUILD_FOR_LINUX_HOST) && !defined(CVMX_BUILD_FOR_TOOLCHAIN)
if (cvmx_unlikely(!rate_dclk))
rate_dclk = cvmx_sysinfo_get()->dram_data_rate_hz;
#endif
return rate_dclk;
}
cvmx_dprintf("cvmx_clock_get_rate: Unknown clock type\n");
return 0;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_clock_get_rate);
#endif

View File

@ -1,139 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to Core, IO and DDR Clock.
*
* <hr>$Revision: 45089 $<hr>
*/
#ifndef __CVMX_CLOCK_H__
#define __CVMX_CLOCK_H__
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-lmcx-defs.h>
#else
#include "cvmx.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
/**
* Enumeration of different Clocks in Octeon.
*/
typedef enum{
CVMX_CLOCK_RCLK, /**< Clock used by cores, coherent bus and L2 cache. */
CVMX_CLOCK_SCLK, /**< Clock used by IO blocks. */
CVMX_CLOCK_DDR, /**< Clock used by DRAM */
CVMX_CLOCK_CORE, /**< Alias for CVMX_CLOCK_RCLK */
CVMX_CLOCK_TIM, /**< Alias for CVMX_CLOCK_SCLK */
CVMX_CLOCK_IPD, /**< Alias for CVMX_CLOCK_SCLK */
} cvmx_clock_t;
/**
* Get cycle count based on the clock type.
*
* @param clock - Enumeration of the clock type.
* @return - Get the number of cycles executed so far.
*/
static inline uint64_t cvmx_clock_get_count(cvmx_clock_t clock)
{
switch(clock)
{
case CVMX_CLOCK_RCLK:
case CVMX_CLOCK_CORE:
{
#ifndef __mips__
return cvmx_read_csr(CVMX_IPD_CLK_COUNT);
#elif defined(CVMX_ABI_O32)
uint32_t tmp_low, tmp_hi;
asm volatile (
" .set push \n"
" .set mips64r2 \n"
" .set noreorder \n"
" rdhwr %[tmpl], $31 \n"
" dsrl %[tmph], %[tmpl], 32 \n"
" sll %[tmpl], 0 \n"
" sll %[tmph], 0 \n"
" .set pop \n"
: [tmpl] "=&r" (tmp_low), [tmph] "=&r" (tmp_hi) : );
return(((uint64_t)tmp_hi << 32) + tmp_low);
#else
uint64_t cycle;
CVMX_RDHWR(cycle, 31);
return(cycle);
#endif
}
case CVMX_CLOCK_SCLK:
case CVMX_CLOCK_TIM:
case CVMX_CLOCK_IPD:
return cvmx_read_csr(CVMX_IPD_CLK_COUNT);
case CVMX_CLOCK_DDR:
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF7XXX))
return cvmx_read_csr(CVMX_LMCX_DCLK_CNT(0));
else
return ((cvmx_read_csr(CVMX_LMCX_DCLK_CNT_HI(0)) << 32) | cvmx_read_csr(CVMX_LMCX_DCLK_CNT_LO(0)));
}
cvmx_dprintf("cvmx_clock_get_count: Unknown clock type\n");
return 0;
}
extern uint64_t cvmx_clock_get_rate(cvmx_clock_t clock);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_CLOCK_H__ */

View File

@ -1,339 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Support functions for managing command queues used for
* various hardware blocks.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <linux/module.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-bootmem.h>
#include <asm/octeon/cvmx-npei-defs.h>
#include <asm/octeon/cvmx-pexp-defs.h>
#include <asm/octeon/cvmx-dpi-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-cmd-queue.h>
#else
#include "cvmx.h"
#include "cvmx-bootmem.h"
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "cvmx-config.h"
#endif
#include "cvmx-fpa.h"
#include "cvmx-cmd-queue.h"
#endif
/**
* This application uses this pointer to access the global queue
* state. It points to a bootmem named block.
*/
CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(__cvmx_cmd_queue_state_ptr);
#endif
/**
* @INTERNAL
* Initialize the Global queue state pointer.
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
{
char *alloc_name = "cvmx_cmd_queues";
#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
extern uint64_t octeon_reserve32_memory;
#endif
if (cvmx_likely(__cvmx_cmd_queue_state_ptr))
return CVMX_CMD_QUEUE_SUCCESS;
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
if (octeon_reserve32_memory)
__cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
octeon_reserve32_memory,
octeon_reserve32_memory + (CONFIG_CAVIUM_RESERVE32<<20) - 1,
128, alloc_name);
else
#endif
__cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name);
#else
__cvmx_cmd_queue_state_ptr = cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr), 128, alloc_name);
#endif
if (__cvmx_cmd_queue_state_ptr)
memset(__cvmx_cmd_queue_state_ptr, 0, sizeof(*__cvmx_cmd_queue_state_ptr));
else
{
const cvmx_bootmem_named_block_desc_t *block_desc = cvmx_bootmem_find_named_block(alloc_name);
if (block_desc)
__cvmx_cmd_queue_state_ptr = cvmx_phys_to_ptr(block_desc->base_addr);
else
{
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n", alloc_name);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
}
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Initialize a command queue for use. The initial FPA buffer is
* allocated and the hardware unit is configured to point to the
* new command queue.
*
* @param queue_id Hardware command queue to initialize.
* @param max_depth Maximum outstanding commands that can be queued.
* @param fpa_pool FPA pool the command queues should come from.
* @param pool_size Size of each buffer in the FPA pool (bytes)
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size)
{
__cvmx_cmd_queue_state_t *qstate;
cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
if (result != CVMX_CMD_QUEUE_SUCCESS)
return result;
qstate = __cvmx_cmd_queue_get_state(queue_id);
if (qstate == NULL)
return CVMX_CMD_QUEUE_INVALID_PARAM;
/* We artificially limit max_depth to 1<<20 words. It is an arbitrary limit */
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH)
{
if ((max_depth < 0) || (max_depth > 1<<20))
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
else if (max_depth != 0)
return CVMX_CMD_QUEUE_INVALID_PARAM;
if ((fpa_pool < 0) || (fpa_pool > 7))
return CVMX_CMD_QUEUE_INVALID_PARAM;
if ((pool_size < 128) || (pool_size > 65536))
return CVMX_CMD_QUEUE_INVALID_PARAM;
/* See if someone else has already initialized the queue */
if (qstate->base_ptr_div128)
{
if (max_depth != (int)qstate->max_depth)
{
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different max_depth (%d).\n", (int)qstate->max_depth);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if (fpa_pool != qstate->fpa_pool)
{
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different FPA pool (%u).\n", qstate->fpa_pool);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if ((pool_size>>3)-1 != qstate->pool_size_m1)
{
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Queue already initialized with different FPA pool size (%u).\n", (qstate->pool_size_m1+1)<<3);
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
CVMX_SYNCWS;
return CVMX_CMD_QUEUE_ALREADY_SETUP;
}
else
{
cvmx_fpa_ctl_status_t status;
void *buffer;
status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
if (!status.s.enb)
{
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: FPA is not enabled.\n");
return CVMX_CMD_QUEUE_NO_MEMORY;
}
buffer = cvmx_fpa_alloc(fpa_pool);
if (buffer == NULL)
{
cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: Unable to allocate initial buffer.\n");
return CVMX_CMD_QUEUE_NO_MEMORY;
}
memset(qstate, 0, sizeof(*qstate));
qstate->max_depth = max_depth;
qstate->fpa_pool = fpa_pool;
qstate->pool_size_m1 = (pool_size>>3)-1;
qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
/* We zeroed the now serving field so we need to also zero the ticket */
__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
CVMX_SYNCWS;
return CVMX_CMD_QUEUE_SUCCESS;
}
}
/**
* Shutdown a queue a free it's command buffers to the FPA. The
* hardware connected to the queue must be stopped before this
* function is called.
*
* @param queue_id Queue to shutdown
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (qptr == NULL)
{
cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to get queue information.\n");
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
if (cvmx_cmd_queue_length(queue_id) > 0)
{
cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still has data in it.\n");
return CVMX_CMD_QUEUE_FULL;
}
__cvmx_cmd_queue_lock(queue_id, qptr);
if (qptr->base_ptr_div128)
{
cvmx_fpa_free(cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7), qptr->fpa_pool, 0);
qptr->base_ptr_div128 = 0;
}
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Return the number of command words pending in the queue. This
* function may be relatively slow for some hardware units.
*
* @param queue_id Hardware command queue to query
*
* @return Number of outstanding commands
*/
int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
{
if (CVMX_ENABLE_PARAMETER_CHECKING)
{
if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/* The cast is here so gcc with check that all values in the
cvmx_cmd_queue_id_t enumeration are here */
switch ((cvmx_cmd_queue_id_t)(queue_id & 0xff0000))
{
case CVMX_CMD_QUEUE_PKO_BASE:
/* FIXME: Need atomic lock on CVMX_PKO_REG_READ_IDX. Right now we
are normally called with the queue lock, so that is a SLIGHT
amount of protection */
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
{
cvmx_pko_mem_debug9_t debug9;
debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
return debug9.cn38xx.doorbell;
}
else
{
cvmx_pko_mem_debug8_t debug8;
debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
if (octeon_has_feature(OCTEON_FEATURE_PKND))
return debug8.cn68xx.doorbell;
else
return debug8.cn58xx.doorbell;
}
case CVMX_CMD_QUEUE_ZIP:
case CVMX_CMD_QUEUE_DFA:
case CVMX_CMD_QUEUE_RAID:
// FIXME: Implement other lengths
return 0;
case CVMX_CMD_QUEUE_DMA_BASE:
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_npei_dmax_counts_t dmax_counts;
dmax_counts.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS(queue_id & 0x7));
return dmax_counts.s.dbell;
}
else
{
cvmx_dpi_dmax_counts_t dmax_counts;
dmax_counts.u64 = cvmx_read_csr(CVMX_DPI_DMAX_COUNTS(queue_id & 0x7));
return dmax_counts.s.dbell;
}
case CVMX_CMD_QUEUE_END:
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/**
* Return the command buffer to be written to. The purpose of this
* function is to allow CVMX routine access to the low level buffer
* for initial hardware setup. User applications should not call this
* function directly.
*
* @param queue_id Command queue to query
*
* @return Command buffer or NULL on failure
*/
void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (qptr && qptr->base_ptr_div128)
return cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
else
return NULL;
}

View File

@ -1,614 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Support functions for managing command queues used for
* various hardware blocks.
*
* The common command queue infrastructure abstracts out the
* software necessary for adding to Octeon's chained queue
* structures. These structures are used for commands to the
* PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
* hardware unit takes commands and CSRs of different types,
* they all use basic linked command buffers to store the
* pending request. In general, users of the CVMX API don't
* call cvmx-cmd-queue functions directly. Instead the hardware
* unit specific wrapper should be used. The wrappers perform
* unit specific validation and CSR writes to submit the
* commands.
*
* Even though most software will never directly interact with
* cvmx-cmd-queue, knowledge of its internal workings can help
* in diagnosing performance problems and help with debugging.
*
* Command queue pointers are stored in a global named block
* called "cvmx_cmd_queues". Except for the PKO queues, each
* hardware queue is stored in its own cache line to reduce SMP
* contention on spin locks. The PKO queues are stored such that
* every 16th queue is next to each other in memory. This scheme
* allows for queues being in separate cache lines when there
* are low number of queues per port. With 16 queues per port,
* the first queue for each port is in the same cache area. The
* second queues for each port are in another area, etc. This
* allows software to implement very efficient lockless PKO with
* 16 queues per port using a minimum of cache lines per core.
* All queues for a given core will be isolated in the same
* cache area.
*
* In addition to the memory pointer layout, cvmx-cmd-queue
* provides an optimized fair ll/sc locking mechanism for the
* queues. The lock uses a "ticket / now serving" model to
* maintain fair order on contended locks. In addition, it uses
* predicted locking time to limit cache contention. When a core
* know it must wait in line for a lock, it spins on the
* internal cycle counter to completely eliminate any causes of
* bus traffic.
*
* <hr> $Revision: 70030 $ <hr>
*/
#ifndef __CVMX_CMD_QUEUE_H__
#define __CVMX_CMD_QUEUE_H__
#if !defined(CVMX_BUILD_FOR_LINUX_KERNEL) && !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#endif
#include "cvmx-fpa.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* By default we disable the max depth support. Most programs
* don't use it and it slows down the command queue processing
* significantly.
*/
#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
#endif
/**
* Enumeration representing all hardware blocks that use command
* queues. Each hardware block has up to 65536 sub identifiers for
* multiple command queues. Not all chips support all hardware
* units.
*/
typedef enum
{
CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
#define CVMX_CMD_QUEUE_PKO(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
CVMX_CMD_QUEUE_ZIP = 0x10000,
#define CVMX_CMD_QUEUE_ZIP_QUE(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_ZIP + (0xffff&(queue))))
CVMX_CMD_QUEUE_DFA = 0x20000,
CVMX_CMD_QUEUE_RAID = 0x30000,
CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
#define CVMX_CMD_QUEUE_DMA(queue) ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
CVMX_CMD_QUEUE_END = 0x50000,
} cvmx_cmd_queue_id_t;
/**
* Command write operations can fail if the command queue needs
* a new buffer and the associated FPA pool is empty. It can also
* fail if the number of queued command words reaches the maximum
* set at initialization.
*/
typedef enum
{
CVMX_CMD_QUEUE_SUCCESS = 0,
CVMX_CMD_QUEUE_NO_MEMORY = -1,
CVMX_CMD_QUEUE_FULL = -2,
CVMX_CMD_QUEUE_INVALID_PARAM = -3,
CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
} cvmx_cmd_queue_result_t;
typedef struct
{
uint8_t now_serving; /**< You have lock when this is your ticket */
uint64_t unused1 : 24;
uint32_t max_depth; /**< Maximum outstanding command words */
uint64_t fpa_pool : 3; /**< FPA pool buffers come from */
uint64_t base_ptr_div128: 29; /**< Top of command buffer pointer shifted 7 */
uint64_t unused2 : 6;
uint64_t pool_size_m1 : 13; /**< FPA buffer size in 64bit words minus 1 */
uint64_t index : 13; /**< Number of commands already used in buffer */
} __cvmx_cmd_queue_state_t;
/**
* This structure contains the global state of all command queues.
* It is stored in a bootmem named block and shared by all
* applications running on Octeon. Tickets are stored in a different
* cache line that queue information to reduce the contention on the
* ll/sc used to get a ticket. If this is not the case, the update
* of queue state causes the ll/sc to fail quite often.
*/
typedef struct
{
uint64_t ticket[(CVMX_CMD_QUEUE_END>>16) * 256];
__cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END>>16) * 256];
} __cvmx_cmd_queue_all_state_t;
extern CVMX_SHARED __cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
/**
* Initialize a command queue for use. The initial FPA buffer is
* allocated and the hardware unit is configured to point to the
* new command queue.
*
* @param queue_id Hardware command queue to initialize.
* @param max_depth Maximum outstanding commands that can be queued.
* @param fpa_pool FPA pool the command queues should come from.
* @param pool_size Size of each buffer in the FPA pool (bytes)
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id, int max_depth, int fpa_pool, int pool_size);
/**
* Shutdown a queue a free it's command buffers to the FPA. The
* hardware connected to the queue must be stopped before this
* function is called.
*
* @param queue_id Queue to shutdown
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
/**
* Return the number of command words pending in the queue. This
* function may be relatively slow for some hardware units.
*
* @param queue_id Hardware command queue to query
*
* @return Number of outstanding commands
*/
int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
/**
* Return the command buffer to be written to. The purpose of this
* function is to allow CVMX routine access to the low level buffer
* for initial hardware setup. User applications should not call this
* function directly.
*
* @param queue_id Command queue to query
*
* @return Command buffer or NULL on failure
*/
void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
/**
* @INTERNAL
* Get the index into the state arrays for the supplied queue id.
*
* @param queue_id Queue ID to get an index for
*
* @return Index into the state arrays
*/
static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
{
/* Warning: This code currently only works with devices that have 256 queues
or less. Devices with more than 16 queues are laid out in memory to allow
cores quick access to every 16th queue. This reduces cache thrashing
when you are running 16 queues per port to support lockless operation */
int unit = queue_id>>16;
int q = (queue_id >> 4) & 0xf;
int core = queue_id & 0xf;
return unit*256 + core*16 + q;
}
/**
* @INTERNAL
* Lock the supplied queue so nobody else is updating it at the same
* time as us.
*
* @param queue_id Queue ID to lock
* @param qptr Pointer to the queue's global state
*/
static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, __cvmx_cmd_queue_state_t *qptr)
{
int tmp;
int my_ticket;
CVMX_PREFETCH(qptr, 0);
asm volatile (
".set push\n"
".set noreorder\n"
"1:\n"
"ll %[my_ticket], %[ticket_ptr]\n" /* Atomic add one to ticket_ptr */
"li %[ticket], 1\n" /* and store the original value */
"baddu %[ticket], %[my_ticket]\n" /* in my_ticket */
"sc %[ticket], %[ticket_ptr]\n"
"beqz %[ticket], 1b\n"
" nop\n"
"lbu %[ticket], %[now_serving]\n" /* Load the current now_serving ticket */
"2:\n"
"beq %[ticket], %[my_ticket], 4f\n" /* Jump out if now_serving == my_ticket */
" subu %[ticket], %[my_ticket], %[ticket]\n" /* Find out how many tickets are in front of me */
"subu %[ticket], 1\n" /* Use tickets in front of me minus one to delay */
"cins %[ticket], %[ticket], 5, 7\n" /* Delay will be ((tickets in front)-1)*32 loops */
"3:\n"
"bnez %[ticket], 3b\n" /* Loop here until our ticket might be up */
" subu %[ticket], 1\n"
"b 2b\n" /* Jump back up to check out ticket again */
" lbu %[ticket], %[now_serving]\n" /* Load the current now_serving ticket */
"4:\n"
".set pop\n"
: [ticket_ptr] "=m" (__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
[now_serving] "=m" (qptr->now_serving),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (my_ticket)
);
}
/**
* @INTERNAL
* Unlock the queue, flushing all writes.
*
* @param qptr Queue to unlock
*/
static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
{
uint8_t ns;
ns = qptr->now_serving + 1;
CVMX_SYNCWS; /* Order queue manipulation with respect to the unlock. */
qptr->now_serving = ns;
CVMX_SYNCWS; /* nudge out the unlock. */
}
/**
* @INTERNAL
* Get the queue state structure for the given queue id
*
* @param queue_id Queue id to get
*
* @return Queue structure or NULL on failure
*/
static inline __cvmx_cmd_queue_state_t *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
{
if (CVMX_ENABLE_PARAMETER_CHECKING)
{
if (cvmx_unlikely(queue_id >= CVMX_CMD_QUEUE_END))
return NULL;
if (cvmx_unlikely((queue_id & 0xffff) >= 256))
return NULL;
}
return &__cvmx_cmd_queue_state_ptr->state[__cvmx_cmd_queue_get_index(queue_id)];
}
/**
* Write an arbitrary number of command words to a command queue.
* This is a generic function; the fixed number of command word
* functions yield higher performance.
*
* @param queue_id Hardware command queue to write to
* @param use_locking
* Use internal locking to ensure exclusive access for queue
* updates. If you don't use this locking you must ensure
* exclusivity some other way. Locking is strongly recommended.
* @param cmd_count Number of command words to write
* @param cmds Array of commands to write
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t queue_id, int use_locking, int cmd_count, uint64_t *cmds)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (CVMX_ENABLE_PARAMETER_CHECKING)
{
if (cvmx_unlikely(qptr == NULL))
return CVMX_CMD_QUEUE_INVALID_PARAM;
if (cvmx_unlikely((cmd_count < 1) || (cmd_count > 32)))
return CVMX_CMD_QUEUE_INVALID_PARAM;
if (cvmx_unlikely(cmds == NULL))
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/* Make sure nobody else is updating the same queue */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_lock(queue_id, qptr);
/* If a max queue length was specified then make sure we don't
exceed it. If any part of the command would be below the limit
we allow it */
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
{
if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
{
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_FULL;
}
}
/* Normally there is plenty of room in the current buffer for the command */
if (cvmx_likely(qptr->index + cmd_count < qptr->pool_size_m1))
{
uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
ptr += qptr->index;
qptr->index += cmd_count;
while (cmd_count--)
*ptr++ = *cmds++;
}
else
{
uint64_t *ptr;
int count;
/* We need a new command buffer. Fail if there isn't one available */
uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
if (cvmx_unlikely(new_buffer == NULL))
{
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
/* Figure out how many command words will fit in this buffer. One
location will be needed for the next buffer pointer */
count = qptr->pool_size_m1 - qptr->index;
ptr += qptr->index;
cmd_count-=count;
while (count--)
*ptr++ = *cmds++;
*ptr = cvmx_ptr_to_phys(new_buffer);
/* The current buffer is full and has a link to the next buffer. Time
to write the rest of the commands into the new buffer */
qptr->base_ptr_div128 = *ptr >> 7;
qptr->index = cmd_count;
ptr = new_buffer;
while (cmd_count--)
*ptr++ = *cmds++;
}
/* All updates are complete. Release the lock and return */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Simple function to write two command words to a command
* queue.
*
* @param queue_id Hardware command queue to write to
* @param use_locking
* Use internal locking to ensure exclusive access for queue
* updates. If you don't use this locking you must ensure
* exclusivity some other way. Locking is strongly recommended.
* @param cmd1 Command
* @param cmd2 Command
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (CVMX_ENABLE_PARAMETER_CHECKING)
{
if (cvmx_unlikely(qptr == NULL))
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/* Make sure nobody else is updating the same queue */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_lock(queue_id, qptr);
/* If a max queue length was specified then make sure we don't
exceed it. If any part of the command would be below the limit
we allow it */
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
{
if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
{
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_FULL;
}
}
/* Normally there is plenty of room in the current buffer for the command */
if (cvmx_likely(qptr->index + 2 < qptr->pool_size_m1))
{
uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
ptr += qptr->index;
qptr->index += 2;
ptr[0] = cmd1;
ptr[1] = cmd2;
}
else
{
uint64_t *ptr;
/* Figure out how many command words will fit in this buffer. One
location will be needed for the next buffer pointer */
int count = qptr->pool_size_m1 - qptr->index;
/* We need a new command buffer. Fail if there isn't one available */
uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
if (cvmx_unlikely(new_buffer == NULL))
{
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
count--;
ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
ptr += qptr->index;
*ptr++ = cmd1;
if (cvmx_likely(count))
*ptr++ = cmd2;
*ptr = cvmx_ptr_to_phys(new_buffer);
/* The current buffer is full and has a link to the next buffer. Time
to write the rest of the commands into the new buffer */
qptr->base_ptr_div128 = *ptr >> 7;
qptr->index = 0;
if (cvmx_unlikely(count == 0))
{
qptr->index = 1;
new_buffer[0] = cmd2;
}
}
/* All updates are complete. Release the lock and return */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_SUCCESS;
}
/**
* Simple function to write three command words to a command
* queue.
*
* @param queue_id Hardware command queue to write to
* @param use_locking
* Use internal locking to ensure exclusive access for queue
* updates. If you don't use this locking you must ensure
* exclusivity some other way. Locking is strongly recommended.
* @param cmd1 Command
* @param cmd2 Command
* @param cmd3 Command
*
* @return CVMX_CMD_QUEUE_SUCCESS or a failure code
*/
static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t queue_id, int use_locking, uint64_t cmd1, uint64_t cmd2, uint64_t cmd3)
{
__cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
if (CVMX_ENABLE_PARAMETER_CHECKING)
{
if (cvmx_unlikely(qptr == NULL))
return CVMX_CMD_QUEUE_INVALID_PARAM;
}
/* Make sure nobody else is updating the same queue */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_lock(queue_id, qptr);
/* If a max queue length was specified then make sure we don't
exceed it. If any part of the command would be below the limit
we allow it */
if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && cvmx_unlikely(qptr->max_depth))
{
if (cvmx_unlikely(cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth))
{
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_FULL;
}
}
/* Normally there is plenty of room in the current buffer for the command */
if (cvmx_likely(qptr->index + 3 < qptr->pool_size_m1))
{
uint64_t *ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
ptr += qptr->index;
qptr->index += 3;
ptr[0] = cmd1;
ptr[1] = cmd2;
ptr[2] = cmd3;
}
else
{
uint64_t *ptr;
/* Figure out how many command words will fit in this buffer. One
location will be needed for the next buffer pointer */
int count = qptr->pool_size_m1 - qptr->index;
/* We need a new command buffer. Fail if there isn't one available */
uint64_t *new_buffer = (uint64_t *)cvmx_fpa_alloc(qptr->fpa_pool);
if (cvmx_unlikely(new_buffer == NULL))
{
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_NO_MEMORY;
}
count--;
ptr = (uint64_t *)cvmx_phys_to_ptr((uint64_t)qptr->base_ptr_div128<<7);
ptr += qptr->index;
*ptr++ = cmd1;
if (count)
{
*ptr++ = cmd2;
if (count > 1)
*ptr++ = cmd3;
}
*ptr = cvmx_ptr_to_phys(new_buffer);
/* The current buffer is full and has a link to the next buffer. Time
to write the rest of the commands into the new buffer */
qptr->base_ptr_div128 = *ptr >> 7;
qptr->index = 0;
ptr = new_buffer;
if (count == 0)
{
*ptr++ = cmd2;
qptr->index++;
}
if (count < 2)
{
*ptr++ = cmd3;
qptr->index++;
}
}
/* All updates are complete. Release the lock and return */
if (cvmx_likely(use_locking))
__cvmx_cmd_queue_unlock(qptr);
return CVMX_CMD_QUEUE_SUCCESS;
}
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_CMD_QUEUE_H__ */

View File

@ -1,216 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the EBH-30xx specific devices
*
* <hr>$Revision: 70030 $<hr>
*
*/
#include <time.h>
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-sysinfo.h"
#include "cvmx-cn3010-evb-hs5.h"
#include "cvmx-twsi.h"
static inline uint8_t bin2bcd(uint8_t bin)
{
return (bin / 10) << 4 | (bin % 10);
}
static inline uint8_t bcd2bin(uint8_t bcd)
{
return (bcd >> 4) * 10 + (bcd & 0xf);
}
#define TM_CHECK(_expr, _msg) \
do { \
if (_expr) { \
cvmx_dprintf("Warning: RTC has invalid %s field\n", (_msg)); \
rc = -1; \
} \
} while(0);
static int validate_tm_struct(struct tm * tms)
{
int rc = 0;
if (!tms)
return -1;
TM_CHECK(tms->tm_sec < 0 || tms->tm_sec > 60, "second"); /* + Leap sec */
TM_CHECK(tms->tm_min < 0 || tms->tm_min > 59, "minute");
TM_CHECK(tms->tm_hour < 0 || tms->tm_hour > 23, "hour");
TM_CHECK(tms->tm_mday < 1 || tms->tm_mday > 31, "day");
TM_CHECK(tms->tm_wday < 0 || tms->tm_wday > 6, "day of week");
TM_CHECK(tms->tm_mon < 0 || tms->tm_mon > 11, "month");
TM_CHECK(tms->tm_year < 0 || tms->tm_year > 200,"year");
return rc;
}
/*
* Board-specifc RTC read
* Time is expressed in seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
* and converted internally to calendar format.
*/
uint32_t cvmx_rtc_ds1337_read(void)
{
int i, retry;
uint32_t time;
uint8_t reg[8];
uint8_t sec;
struct tm tms;
memset(&reg, 0, sizeof(reg));
memset(&tms, 0, sizeof(struct tm));
for(retry=0; retry<2; retry++)
{
/* Lockless read: detects the infrequent roll-over and retries */
reg[0] = cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0);
for(i=1; i<7; i++)
reg[i] = cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1337_ADDR);
sec = cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0);
if ((sec & 0xf) == (reg[0] & 0xf))
break; /* Time did not roll-over, value is correct */
}
tms.tm_sec = bcd2bin(reg[0] & 0x7f);
tms.tm_min = bcd2bin(reg[1] & 0x7f);
tms.tm_hour = bcd2bin(reg[2] & 0x3f);
if ((reg[2] & 0x40) && (reg[2] & 0x20)) /* AM/PM format and is PM time */
{
tms.tm_hour = (tms.tm_hour + 12) % 24;
}
tms.tm_wday = (reg[3] & 0x7) - 1; /* Day of week field is 0..6 */
tms.tm_mday = bcd2bin(reg[4] & 0x3f);
tms.tm_mon = bcd2bin(reg[5] & 0x1f) - 1; /* Month field is 0..11 */
tms.tm_year = ((reg[5] & 0x80) ? 100 : 0) + bcd2bin(reg[6]);
if (validate_tm_struct(&tms))
cvmx_dprintf("Warning: RTC calendar is not configured properly\n");
time = mktime(&tms);
return time;
}
/*
* Board-specific RTC write
* Time returned is in seconds from epoch (Jan 1 1970 at 00:00:00 UTC)
*/
int cvmx_rtc_ds1337_write(uint32_t time)
{
int i, rc, retry;
struct tm tms;
uint8_t reg[8];
uint8_t sec;
time_t time_from_epoch = time;
localtime_r(&time_from_epoch, &tms);
if (validate_tm_struct(&tms))
{
cvmx_dprintf("Error: RTC was passed wrong calendar values, write failed\n");
goto tm_invalid;
}
reg[0] = bin2bcd(tms.tm_sec);
reg[1] = bin2bcd(tms.tm_min);
reg[2] = bin2bcd(tms.tm_hour); /* Force 0..23 format even if using AM/PM */
reg[3] = bin2bcd(tms.tm_wday + 1);
reg[4] = bin2bcd(tms.tm_mday);
reg[5] = bin2bcd(tms.tm_mon + 1);
if (tms.tm_year >= 100) /* Set century bit*/
{
reg[5] |= 0x80;
}
reg[6] = bin2bcd(tms.tm_year % 100);
/* Lockless write: detects the infrequent roll-over and retries */
for(retry=0; retry<2; retry++)
{
rc = 0;
for(i=0; i<7; i++)
{
rc |= cvmx_twsi_write8(CVMX_RTC_DS1337_ADDR, i, reg[i]);
}
sec = cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0);
if ((sec & 0xf) == (reg[0] & 0xf))
break; /* Time did not roll-over, value is correct */
}
return (rc ? -1 : 0);
tm_invalid:
return -1;
}
#ifdef CVMX_RTC_DEBUG
void cvmx_rtc_ds1337_dump_state(void)
{
int i = 0;
printf("RTC:\n");
printf("%d : %02X ", i, cvmx_twsi_read8(CVMX_RTC_DS1337_ADDR, 0x0));
for(i=1; i<16; i++) {
printf("%02X ", cvmx_twsi_read8_cur_addr(CVMX_RTC_DS1337_ADDR));
}
printf("\n");
}
#endif /* CVMX_RTC_DEBUG */

View File

@ -1,71 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#ifndef __CVMX_CN3010_EVB_HS5_H__
#define __CVMX_CN3010_EVB_HS5_H__
/**
* @file
*
* Interface to the EBH-30xx specific devices
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifdef __cplusplus
extern "C" {
#endif
#define CVMX_RTC_DS1337_ADDR (0x68)
uint32_t cvmx_rtc_ds1337_read(void);
int cvmx_rtc_ds1337_write(uint32_t time);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_CN3010_EVB_HS5_H__ */

View File

@ -1,433 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include "cvmx.h"
#include "cvmx-sysinfo.h"
#include "cvmx-compactflash.h"
#ifndef MAX
#define MAX(a,b) (((a)>(b))?(a):(b))
#endif
#define FLASH_RoundUP(_Dividend, _Divisor) (((_Dividend)+(_Divisor-1))/(_Divisor))
/**
* Convert nanosecond based time to setting used in the
* boot bus timing register, based on timing multiple
*
*
*/
static uint32_t ns_to_tim_reg(int tim_mult, uint32_t nsecs)
{
uint32_t val;
/* Compute # of eclock periods to get desired duration in nanoseconds */
val = FLASH_RoundUP(nsecs * (cvmx_clock_get_rate(CVMX_CLOCK_SCLK)/1000000), 1000);
/* Factor in timing multiple, if not 1 */
if (tim_mult != 1)
val = FLASH_RoundUP(val, tim_mult);
return (val);
}
uint64_t cvmx_compactflash_generate_dma_tim(int tim_mult, uint16_t *ident_data, int *mwdma_mode_ptr)
{
cvmx_mio_boot_dma_timx_t dma_tim;
int oe_a;
int oe_n;
int dma_acks;
int dma_ackh;
int dma_arq;
int pause;
int To,Tkr,Td;
int mwdma_mode = -1;
uint16_t word53_field_valid;
uint16_t word63_mwdma;
uint16_t word163_adv_timing_info;
if (!ident_data)
return 0;
word53_field_valid = ident_data[53];
word63_mwdma = ident_data[63];
word163_adv_timing_info = ident_data[163];
dma_tim.u64 = 0;
/* Check for basic MWDMA modes */
if (word53_field_valid & 0x2)
{
if (word63_mwdma & 0x4)
mwdma_mode = 2;
else if (word63_mwdma & 0x2)
mwdma_mode = 1;
else if (word63_mwdma & 0x1)
mwdma_mode = 0;
}
/* Check for advanced MWDMA modes */
switch ((word163_adv_timing_info >> 3) & 0x7)
{
case 1:
mwdma_mode = 3;
break;
case 2:
mwdma_mode = 4;
break;
default:
break;
}
/* DMA is not supported by this card */
if (mwdma_mode < 0)
return 0;
/* Now set up the DMA timing */
switch (tim_mult)
{
case 1:
dma_tim.s.tim_mult = 1;
break;
case 2:
dma_tim.s.tim_mult = 2;
break;
case 4:
dma_tim.s.tim_mult = 0;
break;
case 8:
dma_tim.s.tim_mult = 3;
break;
default:
cvmx_dprintf("ERROR: invalid boot bus dma tim_mult setting\n");
break;
}
switch (mwdma_mode)
{
case 4:
To = 80;
Td = 55;
Tkr = 20;
oe_a = Td + 20; // Td (Seem to need more margin here....
oe_n = MAX(To - oe_a, Tkr); // Tkr from cf spec, lengthened to meet To
// oe_n + oe_h must be >= To (cycle time)
dma_acks = 0; //Ti
dma_ackh = 5; // Tj
dma_arq = 8; // not spec'ed, value in eclocks, not affected by tim_mult
pause = 25 - dma_arq * 1000/(cvmx_clock_get_rate(CVMX_CLOCK_SCLK)/1000000); // Tz
break;
case 3:
To = 100;
Td = 65;
Tkr = 20;
oe_a = Td + 20; // Td (Seem to need more margin here....
oe_n = MAX(To - oe_a, Tkr); // Tkr from cf spec, lengthened to meet To
// oe_n + oe_h must be >= To (cycle time)
dma_acks = 0; //Ti
dma_ackh = 5; // Tj
dma_arq = 8; // not spec'ed, value in eclocks, not affected by tim_mult
pause = 25 - dma_arq * 1000/(cvmx_clock_get_rate(CVMX_CLOCK_SCLK)/1000000); // Tz
break;
case 2:
// +20 works
// +10 works
// + 10 + 0 fails
// n=40, a=80 works
To = 120;
Td = 70;
Tkr = 25;
// oe_a 0 fudge doesn't work; 10 seems to
oe_a = Td + 20 + 10; // Td (Seem to need more margin here....
oe_n = MAX(To - oe_a, Tkr) + 10; // Tkr from cf spec, lengthened to meet To
// oe_n 0 fudge fails;;; 10 boots
// 20 ns fudge needed on dma_acks
// oe_n + oe_h must be >= To (cycle time)
dma_acks = 0 + 20; //Ti
dma_ackh = 5; // Tj
dma_arq = 8; // not spec'ed, value in eclocks, not affected by tim_mult
pause = 25 - dma_arq * 1000/(cvmx_clock_get_rate(CVMX_CLOCK_SCLK)/1000000); // Tz
// no fudge needed on pause
break;
case 1:
case 0:
default:
cvmx_dprintf("ERROR: Unsupported DMA mode: %d\n", mwdma_mode);
return(-1);
break;
}
if (mwdma_mode_ptr)
*mwdma_mode_ptr = mwdma_mode;
dma_tim.s.dmack_pi = 1;
dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, dma_acks);
dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
dma_tim.s.dmarq = dma_arq;
dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
dma_tim.s.rd_dly = 0; /* Sample right on edge */
/* writes only */
dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
#if 0
cvmx_dprintf("ns to ticks (mult %d) of %d is: %d\n", TIM_MULT, 60, ns_to_tim_reg(60));
cvmx_dprintf("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s, dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
#endif
return(dma_tim.u64);
}
/**
* Setup timing and region config to support a specific IDE PIO
* mode over the bootbus.
*
* @param cs0 Bootbus region number connected to CS0 on the IDE device
* @param cs1 Bootbus region number connected to CS1 on the IDE device
* @param pio_mode PIO mode to set (0-6)
*/
void cvmx_compactflash_set_piomode(int cs0, int cs1, int pio_mode)
{
cvmx_mio_boot_reg_cfgx_t mio_boot_reg_cfg;
cvmx_mio_boot_reg_timx_t mio_boot_reg_tim;
int cs;
int clocks_us; /* Number of clock cycles per microsec */
int tim_mult;
int use_iordy; /* Set for PIO0-4, not set for PIO5-6 */
int t1; /* These t names are timing parameters from the ATA spec */
int t2;
int t2i;
int t4;
int t6;
int t6z;
int t9;
/* PIO modes 0-4 all allow the device to deassert IORDY to slow down
the host */
use_iordy = 1;
/* Use the PIO mode to determine timing parameters */
switch(pio_mode) {
case 6:
/* CF spec say IORDY should be ignore in PIO 5 */
use_iordy = 0;
t1 = 10;
t2 = 55;
t2i = 20;
t4 = 5;
t6 = 5;
t6z = 20;
t9 = 10;
break;
case 5:
/* CF spec say IORDY should be ignore in PIO 6 */
use_iordy = 0;
t1 = 15;
t2 = 65;
t2i = 25;
t4 = 5;
t6 = 5;
t6z = 20;
t9 = 10;
break;
case 4:
t1 = 25;
t2 = 70;
t2i = 25;
t4 = 10;
t6 = 5;
t6z = 30;
t9 = 10;
break;
case 3:
t1 = 30;
t2 = 80;
t2i = 70;
t4 = 10;
t6 = 5;
t6z = 30;
t9 = 10;
break;
case 2:
t1 = 30;
t2 = 100;
t2i = 0;
t4 = 15;
t6 = 5;
t6z = 30;
t9 = 10;
break;
case 1:
t1 = 50;
t2 = 125;
t2i = 0;
t4 = 20;
t6 = 5;
t6z = 30;
t9 = 15;
break;
default:
t1 = 70;
t2 = 165;
t2i = 0;
t4 = 30;
t6 = 5;
t6z = 30;
t9 = 20;
break;
}
/* Convert times in ns to clock cycles, rounding up */
clocks_us = FLASH_RoundUP(cvmx_clock_get_rate(CVMX_CLOCK_SCLK), 1000000);
/* Convert times in clock cycles, rounding up. Octeon parameters are in
minus one notation, so take off one after the conversion */
t1 = FLASH_RoundUP(t1 * clocks_us, 1000);
if (t1)
t1--;
t2 = FLASH_RoundUP(t2 * clocks_us, 1000);
if (t2)
t2--;
t2i = FLASH_RoundUP(t2i * clocks_us, 1000);
if (t2i)
t2i--;
t4 = FLASH_RoundUP(t4 * clocks_us, 1000);
if (t4)
t4--;
t6 = FLASH_RoundUP(t6 * clocks_us, 1000);
if (t6)
t6--;
t6z = FLASH_RoundUP(t6z * clocks_us, 1000);
if (t6z)
t6z--;
t9 = FLASH_RoundUP(t9 * clocks_us, 1000);
if (t9)
t9--;
/* Start using a scale factor of one cycle. Keep doubling it until
the parameters fit in their fields. Since t2 is the largest number,
we only need to check it */
tim_mult = 1;
while (t2 >= 1<<6)
{
t1 = FLASH_RoundUP(t1, 2);
t2 = FLASH_RoundUP(t2, 2);
t2i = FLASH_RoundUP(t2i, 2);
t4 = FLASH_RoundUP(t4, 2);
t6 = FLASH_RoundUP(t6, 2);
t6z = FLASH_RoundUP(t6z, 2);
t9 = FLASH_RoundUP(t9, 2);
tim_mult *= 2;
}
cs = cs0;
do {
mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
mio_boot_reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
switch(tim_mult) {
case 1:
mio_boot_reg_cfg.s.tim_mult = 1;
break;
case 2:
mio_boot_reg_cfg.s.tim_mult = 2;
break;
case 4:
mio_boot_reg_cfg.s.tim_mult = 0;
break;
case 8:
default:
mio_boot_reg_cfg.s.tim_mult = 3;
break;
}
mio_boot_reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
mio_boot_reg_cfg.s.sam = 0; /* Don't combine write and output enable */
mio_boot_reg_cfg.s.we_ext = 0; /* No write enable extension */
mio_boot_reg_cfg.s.oe_ext = 0; /* No read enable extension */
mio_boot_reg_cfg.s.en = 1; /* Enable this region */
mio_boot_reg_cfg.s.orbit = 0; /* Don't combine with previos region */
mio_boot_reg_cfg.s.width = 1; /* 16 bits wide */
cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), mio_boot_reg_cfg.u64);
if(cs == cs0)
cs = cs1;
else
cs = cs0;
} while(cs != cs0);
mio_boot_reg_tim.u64 = 0;
mio_boot_reg_tim.s.pagem = 0; /* Disable page mode */
mio_boot_reg_tim.s.waitm = use_iordy; /* Enable dynamic timing */
mio_boot_reg_tim.s.pages = 0; /* Pages are disabled */
mio_boot_reg_tim.s.ale = 8; /* If someone uses ALE, this seems to work */
mio_boot_reg_tim.s.page = 0; /* Not used */
mio_boot_reg_tim.s.wait = 0; /* Time after IORDY to coninue to assert the data */
mio_boot_reg_tim.s.pause = 0; /* Time after CE that signals stay valid */
mio_boot_reg_tim.s.wr_hld = t9; /* How long to hold after a write */
mio_boot_reg_tim.s.rd_hld = t9; /* How long to wait after a read for device to tristate */
mio_boot_reg_tim.s.we = t2; /* How long write enable is asserted */
mio_boot_reg_tim.s.oe = t2; /* How long read enable is asserted */
mio_boot_reg_tim.s.ce = t1; /* Time after CE that read/write starts */
mio_boot_reg_tim.s.adr = 1; /* Time before CE that address is valid */
/* Program the bootbus region timing for both chip selects */
cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs0), mio_boot_reg_tim.u64);
cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs1), mio_boot_reg_tim.u64);
}

View File

@ -1,78 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#ifndef __CVMX_COMPACTFLASH_H__
#define __CVMX_COMPACTFLASH_H__
#ifdef __cplusplus
extern "C" {
#endif
/**
* This function takes values from the compact flash device
* identify response, and returns the appropriate value to write
* into the boot bus DMA timing register.
*
* @param tim_mult Eclock timing multiple to use
* @param ident_data Data returned by the 'identify' command. This is used to
* determine the DMA modes supported by the card, if any.
* @param mwdma_mode_ptr
* Optional pointer to return MWDMA mode in
*
* @return 64 bit value to write to DMA timing register
*/
extern uint64_t cvmx_compactflash_generate_dma_tim(int tim_mult, uint16_t *ident_data, int *mwdma_mode_ptr);
/**
* Setup timing and region config to support a specific IDE PIO
* mode over the bootbus.
*
* @param cs0 Bootbus region number connected to CS0 on the IDE device
* @param cs1 Bootbus region number connected to CS1 on the IDE device
* @param pio_mode PIO mode to set (0-6)
*/
extern void cvmx_compactflash_set_piomode(int cs0, int cs1, int pio_mode);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_COMPACTFLASH_H__ */

View File

@ -1,162 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Module to support operations on core such as TLB config, etc.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <linux/module.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-core.h>
#else
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-core.h"
#endif
/**
* Adds a wired TLB entry, and returns the index of the entry added.
* Parameters are written to TLB registers without further processing.
*
* @param hi HI register value
* @param lo0 lo0 register value
* @param lo1 lo1 register value
* @param page_mask pagemask register value
*
* @return Success: TLB index used (0-31 Octeon, 0-63 Octeon+, or 0-127
* Octeon2). Failure: -1
*/
int cvmx_core_add_wired_tlb_entry(uint64_t hi, uint64_t lo0, uint64_t lo1, cvmx_tlb_pagemask_t page_mask)
{
uint32_t index;
CVMX_MF_TLB_WIRED(index);
if (index >= (unsigned int)cvmx_core_get_tlb_entries())
{
return(-1);
}
CVMX_MT_ENTRY_HIGH(hi);
CVMX_MT_ENTRY_LO_0(lo0);
CVMX_MT_ENTRY_LO_1(lo1);
CVMX_MT_PAGEMASK(page_mask);
CVMX_MT_TLB_INDEX(index);
CVMX_MT_TLB_WIRED(index + 1);
CVMX_EHB;
CVMX_TLBWI;
CVMX_EHB;
return(index);
}
/**
* Adds a fixed (wired) TLB mapping. Returns TLB index used or -1 on error.
* This is a wrapper around cvmx_core_add_wired_tlb_entry()
*
* @param vaddr Virtual address to map
* @param page0_addr page 0 physical address, with low 3 bits representing the DIRTY, VALID, and GLOBAL bits
* @param page1_addr page1 physical address, with low 3 bits representing the DIRTY, VALID, and GLOBAL bits
* @param page_mask page mask.
*
* @return Success: TLB index used (0-31)
* Failure: -1
*/
int cvmx_core_add_fixed_tlb_mapping_bits(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask)
{
if ((vaddr & (page_mask | 0x7ff))
|| ((page0_addr & ~0x7ULL) & ((page_mask | 0x7ff) >> 1))
|| ((page1_addr & ~0x7ULL) & ((page_mask | 0x7ff) >> 1)))
{
cvmx_dprintf("Error adding tlb mapping: invalid address alignment at vaddr: 0x%llx\n", (unsigned long long)vaddr);
return(-1);
}
return(cvmx_core_add_wired_tlb_entry(vaddr,
(page0_addr >> 6) | (page0_addr & 0x7),
(page1_addr >> 6) | (page1_addr & 0x7),
page_mask));
}
/**
* Adds a fixed (wired) TLB mapping. Returns TLB index used or -1 on error.
* Assumes both pages are valid. Use cvmx_core_add_fixed_tlb_mapping_bits for more control.
* This is a wrapper around cvmx_core_add_wired_tlb_entry()
*
* @param vaddr Virtual address to map
* @param page0_addr page 0 physical address
* @param page1_addr page1 physical address
* @param page_mask page mask.
*
* @return Success: TLB index used (0-31)
* Failure: -1
*/
int cvmx_core_add_fixed_tlb_mapping(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask)
{
return(cvmx_core_add_fixed_tlb_mapping_bits(vaddr, page0_addr | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, page1_addr | TLB_DIRTY | TLB_VALID | TLB_GLOBAL, page_mask));
}
/**
* Return number of TLB entries.
*/
int cvmx_core_get_tlb_entries(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN3XXX))
return 32;
else if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
return 64;
else
return 128;
}

View File

@ -1,188 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Module to support operations on core such as TLB config, etc.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifndef __CVMX_CORE_H__
#define __CVMX_CORE_H__
#ifdef __cplusplus
extern "C" {
#endif
/**
* The types of performance counters supported per cpu
*/
typedef enum
{
CVMX_CORE_PERF_NONE = 0, /**< Turn off the performance counter */
CVMX_CORE_PERF_CLK = 1, /**< Conditionally clocked cycles (as opposed to count/cvm_count which count even with no clocks) */
CVMX_CORE_PERF_ISSUE = 2, /**< Instructions issued but not retired */
CVMX_CORE_PERF_RET = 3, /**< Instructions retired */
CVMX_CORE_PERF_NISSUE = 4, /**< Cycles no issue */
CVMX_CORE_PERF_SISSUE = 5, /**< Cycles single issue */
CVMX_CORE_PERF_DISSUE = 6, /**< Cycles dual issue */
CVMX_CORE_PERF_IFI = 7, /**< Cycle ifetch issued (but not necessarily commit to pp_mem) */
CVMX_CORE_PERF_BR = 8, /**< Branches retired */
CVMX_CORE_PERF_BRMIS = 9, /**< Branch mispredicts */
CVMX_CORE_PERF_J = 10, /**< Jumps retired */
CVMX_CORE_PERF_JMIS = 11, /**< Jumps mispredicted */
CVMX_CORE_PERF_REPLAY = 12, /**< Mem Replays */
CVMX_CORE_PERF_IUNA = 13, /**< Cycles idle due to unaligned_replays */
CVMX_CORE_PERF_TRAP = 14, /**< trap_6a signal */
CVMX_CORE_PERF_UULOAD = 16, /**< Unexpected unaligned loads (REPUN=1) */
CVMX_CORE_PERF_UUSTORE = 17, /**< Unexpected unaligned store (REPUN=1) */
CVMX_CORE_PERF_ULOAD = 18, /**< Unaligned loads (REPUN=1 or USEUN=1) */
CVMX_CORE_PERF_USTORE = 19, /**< Unaligned store (REPUN=1 or USEUN=1) */
CVMX_CORE_PERF_EC = 20, /**< Exec clocks(must set CvmCtl[DISCE] for accurate timing) */
CVMX_CORE_PERF_MC = 21, /**< Mul clocks(must set CvmCtl[DISCE] for accurate timing) */
CVMX_CORE_PERF_CC = 22, /**< Crypto clocks(must set CvmCtl[DISCE] for accurate timing) */
CVMX_CORE_PERF_CSRC = 23, /**< Issue_csr clocks(must set CvmCtl[DISCE] for accurate timing) */
CVMX_CORE_PERF_CFETCH = 24, /**< Icache committed fetches (demand+prefetch) */
CVMX_CORE_PERF_CPREF = 25, /**< Icache committed prefetches */
CVMX_CORE_PERF_ICA = 26, /**< Icache aliases */
CVMX_CORE_PERF_II = 27, /**< Icache invalidates */
CVMX_CORE_PERF_IP = 28, /**< Icache parity error */
CVMX_CORE_PERF_CIMISS = 29, /**< Cycles idle due to imiss (must set CvmCtl[DISCE] for accurate timing) */
CVMX_CORE_PERF_WBUF = 32, /**< Number of write buffer entries created */
CVMX_CORE_PERF_WDAT = 33, /**< Number of write buffer data cycles used (may need to set CvmCtl[DISCE] for accurate counts) */
CVMX_CORE_PERF_WBUFLD = 34, /**< Number of write buffer entries forced out by loads */
CVMX_CORE_PERF_WBUFFL = 35, /**< Number of cycles that there was no available write buffer entry (may need to set CvmCtl[DISCE] and CvmMemCtl[MCLK] for accurate counts) */
CVMX_CORE_PERF_WBUFTR = 36, /**< Number of stores that found no available write buffer entries */
CVMX_CORE_PERF_BADD = 37, /**< Number of address bus cycles used (may need to set CvmCtl[DISCE] for accurate counts) */
CVMX_CORE_PERF_BADDL2 = 38, /**< Number of address bus cycles not reflected (i.e. destined for L2) (may need to set CvmCtl[DISCE] for accurate counts) */
CVMX_CORE_PERF_BFILL = 39, /**< Number of fill bus cycles used (may need to set CvmCtl[DISCE] for accurate counts) */
CVMX_CORE_PERF_DDIDS = 40, /**< Number of Dstream DIDs created */
CVMX_CORE_PERF_IDIDS = 41, /**< Number of Istream DIDs created */
CVMX_CORE_PERF_DIDNA = 42, /**< Number of cycles that no DIDs were available (may need to set CvmCtl[DISCE] and CvmMemCtl[MCLK] for accurate counts) */
CVMX_CORE_PERF_LDS = 43, /**< Number of load issues */
CVMX_CORE_PERF_LMLDS = 44, /**< Number of local memory load */
CVMX_CORE_PERF_IOLDS = 45, /**< Number of I/O load issues */
CVMX_CORE_PERF_DMLDS = 46, /**< Number of loads that were not prefetches and missed in the cache */
CVMX_CORE_PERF_STS = 48, /**< Number of store issues */
CVMX_CORE_PERF_LMSTS = 49, /**< Number of local memory store issues */
CVMX_CORE_PERF_IOSTS = 50, /**< Number of I/O store issues */
CVMX_CORE_PERF_IOBDMA = 51, /**< Number of IOBDMAs */
CVMX_CORE_PERF_DTLB = 53, /**< Number of dstream TLB refill, invalid, or modified exceptions */
CVMX_CORE_PERF_DTLBAD = 54, /**< Number of dstream TLB address errors */
CVMX_CORE_PERF_ITLB = 55, /**< Number of istream TLB refill, invalid, or address error exceptions */
CVMX_CORE_PERF_SYNC = 56, /**< Number of SYNC stall cycles (may need to set CvmCtl[DISCE] for accurate counts) */
CVMX_CORE_PERF_SYNCIOB = 57, /**< Number of SYNCIOBDMA stall cycles (may need to set CvmCtl[DISCE] for accurate counts) */
CVMX_CORE_PERF_SYNCW = 58, /**< Number of SYNCWs */
/* Added in CN63XX */
CVMX_CORE_PERF_ERETMIS = 64, /**< D/eret mispredicts */
CVMX_CORE_PERF_LIKMIS = 65, /**< Branch likely mispredicts */
CVMX_CORE_PERF_HAZTR = 66, /**< Hazard traps due to *MTC0 to CvmCtl, Perf counter control, EntryHi, or CvmMemCtl registers */
CVMX_CORE_PERF_MAX /**< This not a counter, just a marker for the highest number */
} cvmx_core_perf_t;
/**
* Bit description of the COP0 counter control register
*/
typedef union
{
uint32_t u32;
struct
{
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t m : 1; /**< Set to 1 for sel 0 and 0 for sel 2, indicating there are two performance counters */
uint32_t w : 1; /**< Set to 1 indicating counters are 64 bit */
uint32_t reserved_11_29 :15;
cvmx_core_perf_t event :10; /**< Selects the event to be counted by the corresponding Counter Register */
uint32_t ie : 1; /**< Interrupt Enable */
uint32_t u : 1; /**< Count in user mode */
uint32_t s : 1; /**< Count in supervisor mode */
uint32_t k : 1; /**< Count in kernel mode */
uint32_t ex : 1; /**< Count in exception context */
#else
uint32_t ex : 1;
uint32_t k : 1;
uint32_t s : 1;
uint32_t u : 1;
uint32_t ie : 1;
uint32_t event :10;
uint32_t reserved_11_29 :15;
uint32_t w : 1;
uint32_t m : 1;
#endif
} s;
} cvmx_core_perf_control_t;
typedef enum {
CVMX_TLB_PAGEMASK_4K = 0x3 << 11,
CVMX_TLB_PAGEMASK_16K = 0xF << 11,
CVMX_TLB_PAGEMASK_64K = 0x3F << 11,
CVMX_TLB_PAGEMASK_256K = 0xFF << 11,
CVMX_TLB_PAGEMASK_1M = 0x3FF << 11,
CVMX_TLB_PAGEMASK_4M = 0xFFF << 11,
CVMX_TLB_PAGEMASK_16M = 0x3FFF << 11,
CVMX_TLB_PAGEMASK_64M = 0xFFFF << 11,
CVMX_TLB_PAGEMASK_256M = 0x3FFFF << 11,
} cvmx_tlb_pagemask_t;
int cvmx_core_add_wired_tlb_entry(uint64_t hi, uint64_t lo0, uint64_t lo1, cvmx_tlb_pagemask_t page_mask);
int cvmx_core_add_fixed_tlb_mapping(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask);
int cvmx_core_add_fixed_tlb_mapping_bits(uint64_t vaddr, uint64_t page0_addr, uint64_t page1_addr, cvmx_tlb_pagemask_t page_mask);
/**
* Return number of TLB entries.
*/
int cvmx_core_get_tlb_entries(void);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_CORE_H__ */

View File

@ -1,134 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Module to support operations on bitmap of cores. Coremask can be used to
* select a specific core, a group of cores, or all available cores, for
* initialization and differentiation of roles within a single shared binary
* executable image.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-spinlock.h"
#include "cvmx-coremask.h"
#define CVMX_COREMASK_MAX_SYNCS 20 /* maximum number of coremasks for barrier sync */
/**
* This structure defines the private state maintained by coremask module.
*
*/
CVMX_SHARED static struct {
cvmx_spinlock_t lock; /**< mutex spinlock */
struct {
unsigned int coremask; /**< coremask specified for barrier */
unsigned int checkin; /**< bitmask of cores checking in */
volatile unsigned int exit; /**< variable to poll for exit condition */
} s[CVMX_COREMASK_MAX_SYNCS];
} state = {
{ CVMX_SPINLOCK_UNLOCKED_VAL },
{ { 0, 0, 0 } },
};
/**
* Wait (stall) until all cores in the given coremask has reached this point
* in the program execution before proceeding.
*
* @param coremask the group of cores performing the barrier sync
*
*/
void cvmx_coremask_barrier_sync(unsigned int coremask)
{
int i;
unsigned int target;
assert(coremask != 0);
cvmx_spinlock_lock(&state.lock);
for (i = 0; i < CVMX_COREMASK_MAX_SYNCS; i++) {
if (state.s[i].coremask == 0) {
/* end of existing coremask list, create new entry, fall-thru */
state.s[i].coremask = coremask;
}
if (state.s[i].coremask == coremask) {
target = state.s[i].exit + 1; /* wrap-around at 32b */
state.s[i].checkin |= cvmx_coremask_core(cvmx_get_core_num());
if (state.s[i].checkin == coremask) {
state.s[i].checkin = 0;
state.s[i].exit = target; /* signal exit condition */
}
cvmx_spinlock_unlock(&state.lock);
while (state.s[i].exit != target)
;
return;
}
}
/* error condition - coremask array overflowed */
cvmx_spinlock_unlock(&state.lock);
assert(0);
}

View File

@ -1,319 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Module to support operations on bitmap of cores. Coremask can be used to
* select a specific core, a group of cores, or all available cores, for
* initialization and differentiation of roles within a single shared binary
* executable image.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifndef __CVMX_COREMASK_H__
#define __CVMX_COREMASK_H__
#include "cvmx-asm.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef uint64_t cvmx_coremask_holder_t; /* basic type to hold the
coremask bits */
#define CVMX_COREMASK_HLDRSZ ((int)(sizeof(cvmx_coremask_holder_t) * 8))
/* bits per holder */
#define CVMX_COREMASK_BMPSZ ((int)(CVMX_MAX_CORES / CVMX_COREMASK_HLDRSZ + 1))
/* bit map size */
/*
* The macro pair implement a way to iterate active cores in the mask.
* @param fec_pcm points to the coremask.
* @param fec_ppid is the active core's id.
*/
#define CVMX_COREMASK_FOR_EACH_CORE_BEGIN(fec_pcm, fec_ppid) \
do { \
int fec_i, fec_j; \
\
for (fec_i = 0; fec_i < CVMX_COREMASK_BMPSZ; fec_i++) \
{ \
for (fec_j = 0; fec_j < CVMX_COREMASK_HLDRSZ; fec_j++) \
{ \
if (((cvmx_coremask_holder_t)1 << fec_j) & \
(fec_pcm)->coremask_bitmap[fec_i]) \
{ \
fec_ppid = fec_i * CVMX_COREMASK_HLDRSZ + fec_j;
#define CVMX_COREMASK_FOR_EACH_CORE_END \
} \
} \
} \
} while (0)
struct cvmx_coremask {
/*
* Big-endian. Array elems of larger indices represent cores of
* bigger ids. So do MSBs within a cvmx_coremask_holder_t. Ditto
* MSbs within a byte.
*/
cvmx_coremask_holder_t coremask_bitmap[CVMX_COREMASK_BMPSZ];
};
/*
* Is ``core'' set in the coremask?
*
* @param pcm is the pointer to the coremask.
* @param core
* @return 1 if core is set and 0 if not.
*/
static inline int cvmx_coremask_is_set_core(struct cvmx_coremask *pcm,
int core)
{
int n, i;
n = core % CVMX_COREMASK_HLDRSZ;
i = core / CVMX_COREMASK_HLDRSZ;
return (int)((pcm->coremask_bitmap[i] & (1ull << n)) != 0);
}
/*
* Set ``core'' in the coremask.
*
* @param pcm is the pointer to the coremask.
* @param core
* @return 0.
*/
static inline int cvmx_coremask_set_core(struct cvmx_coremask *pcm,
int core)
{
int n, i;
n = core % CVMX_COREMASK_HLDRSZ;
i = core / CVMX_COREMASK_HLDRSZ;
pcm->coremask_bitmap[i] |= (1ull << n);
return 0;
}
/*
* Clear ``core'' from the coremask.
*
* @param pcm is the pointer to the coremask.
* @param core
* @return 0.
*/
static inline int cvmx_coremask_clear_core(struct cvmx_coremask *pcm,
int core)
{
int n, i;
n = core % CVMX_COREMASK_HLDRSZ;
i = core / CVMX_COREMASK_HLDRSZ;
pcm->coremask_bitmap[i] &= ~(1ull << n);
return 0;
}
/*
* Clear the coremask.
*
* @param pcm is the pointer to the coremask.
* @return 0.
*/
static inline int cvmx_coremask_clear_all(struct cvmx_coremask *pcm)
{
int i;
for (i = 0; i < CVMX_COREMASK_BMPSZ; i++)
pcm->coremask_bitmap[i] = 0;
return 0;
}
/*
* Is the current core the first in the coremask?
*
* @param pcm is the pointer to the coremask.
* @return 1 for yes and 0 for no.
*/
static inline int cvmx_coremask_first_core_bmp(struct cvmx_coremask *pcm)
{
int n, i;
n = (int) cvmx_get_core_num();
for (i = 0; i < CVMX_COREMASK_BMPSZ; i++)
{
if (pcm->coremask_bitmap[i])
{
if (n == 0 && pcm->coremask_bitmap[i] & 1)
return 1;
if (n >= CVMX_COREMASK_HLDRSZ)
return 0;
return ((((1ull << n) - 1) & pcm->coremask_bitmap[i]) == 0);
}
else
n -= CVMX_COREMASK_HLDRSZ;
}
return 0;
}
/*
* Is the current core a member of the coremask?
*
* @param pcm is the pointer to the coremask.
* @return 1 for yes and 0 for no.
*/
static inline int cvmx_coremask_is_member_bmp(struct cvmx_coremask *pcm)
{
return cvmx_coremask_is_set_core(pcm, (int)cvmx_get_core_num());
}
/*
* coremask is simply unsigned int (32 bits).
*
* NOTE: supports up to 32 cores maximum.
*
* union of coremasks is simply bitwise-or.
* intersection of coremasks is simply bitwise-and.
*
*/
#define CVMX_COREMASK_MAX 0xFFFFFFFFu /* maximum supported mask */
/**
* Compute coremask for a specific core.
*
* @param core_id The core ID
*
* @return coremask for a specific core
*
*/
static inline unsigned int cvmx_coremask_core(unsigned int core_id)
{
return (1u << core_id);
}
/**
* Compute coremask for num_cores cores starting with core 0.
*
* @param num_cores number of cores
*
* @return coremask for num_cores cores
*
*/
static inline unsigned int cvmx_coremask_numcores(unsigned int num_cores)
{
return (CVMX_COREMASK_MAX >> (CVMX_MAX_CORES - num_cores));
}
/**
* Compute coremask for a range of cores from core low to core high.
*
* @param low first core in the range
* @param high last core in the range
*
* @return coremask for the range of cores
*
*/
static inline unsigned int cvmx_coremask_range(unsigned int low, unsigned int high)
{
return ((CVMX_COREMASK_MAX >> (CVMX_MAX_CORES - 1 - high + low)) << low);
}
/**
* Test to see if current core is a member of coremask.
*
* @param coremask the coremask to test against
*
* @return 1 if current core is a member of coremask, 0 otherwise
*
*/
static inline int cvmx_coremask_is_member(unsigned int coremask)
{
return ((cvmx_coremask_core(cvmx_get_core_num()) & coremask) != 0);
}
/**
* Test to see if current core is first core in coremask.
*
* @param coremask the coremask to test against
*
* @return 1 if current core is first core in the coremask, 0 otherwise
*
*/
static inline int cvmx_coremask_first_core(unsigned int coremask)
{
return cvmx_coremask_is_member(coremask)
&& ((cvmx_get_core_num() == 0) ||
((cvmx_coremask_numcores(cvmx_get_core_num()) & coremask) == 0));
}
/**
* Wait (stall) until all cores in the given coremask has reached this point
* in the program execution before proceeding.
*
* @param coremask the group of cores performing the barrier sync
*
*/
extern void cvmx_coremask_barrier_sync(unsigned int coremask);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_COREMASK_H__ */

View File

@ -1,78 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper utilities for enabling crypto.
*
* <hr>$Revision: $<hr>
*/
#include "executive-config.h"
#include "cvmx-config.h"
#include "cvmx.h"
int cvmx_crypto_dormant_enable(uint64_t key)
{
if (octeon_has_feature(OCTEON_FEATURE_CRYPTO))
return 1;
if (octeon_has_feature(OCTEON_FEATURE_DORM_CRYPTO)) {
cvmx_rnm_eer_key_t v;
v.s.key = key;
cvmx_write_csr(CVMX_RNM_EER_KEY, v.u64);
}
return octeon_has_feature(OCTEON_FEATURE_CRYPTO);
}
uint64_t cvmx_crypto_dormant_dbg(void)
{
cvmx_rnm_eer_dbg_t dbg;
if (!octeon_has_feature(OCTEON_FEATURE_DORM_CRYPTO))
return 0;
dbg.u64 = cvmx_read_csr(CVMX_RNM_EER_DBG);
return dbg.s.dat;
}

View File

@ -1,70 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper utilities for enabling crypto.
*
* <hr>$Revision: $<hr>
*/
#ifndef __CVMX_CRYPTO_H__
#define __CVMX_CRYPTO_H__
/**
* Enable the dormant crypto functions. If crypto is not already
* enabled and it is possible to enable it, write the enable key.
*
* @param key The dormant crypto enable key value.
*
* @return true if crypto is (or has been) enabled.
*/
extern int cvmx_crypto_dormant_enable(uint64_t key);
/**
* Read the crypto dormant debug value.
*
* @return The RNM_EER_DBG.DAT value, or zero if the feature is not
* enabled.
*/
extern uint64_t cvmx_crypto_dormant_dbg(void);
#endif /* __CVMX_CRYPTO_H__ */

View File

@ -1,185 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
* Definitions for enumerations used with Octeon CSRs.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifndef __CVMX_CSR_ENUMS_H__
#define __CVMX_CSR_ENUMS_H__
typedef enum {
CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
CVMX_IPD_OPC_MODE_STF = 1LL, /* All blocks into L2 */
CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
} cvmx_ipd_mode_t;
/**
* Enumeration representing the amount of packet processing
* and validation performed by the input hardware.
*/
typedef enum
{
CVMX_PIP_PORT_CFG_MODE_NONE = 0ull, /**< Packet input doesn't perform any
processing of the input packet. */
CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,/**< Full packet processing is performed
with pointer starting at the L2
(ethernet MAC) header. */
CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull /**< Input packets are assumed to be IP.
Results from non IP packets is
undefined. Pointers reference the
beginning of the IP header. */
} cvmx_pip_port_parse_mode_t;
/**
* This enumeration controls how a QoS watcher matches a packet.
*
* @deprecated This enumeration was used with cvmx_pip_config_watcher which has
* been deprecated.
*/
typedef enum
{
CVMX_PIP_QOS_WATCH_DISABLE = 0ull, /**< QoS watcher is diabled */
CVMX_PIP_QOS_WATCH_PROTNH = 1ull, /**< QoS watcher will match based on the IP protocol */
CVMX_PIP_QOS_WATCH_TCP = 2ull, /**< QoS watcher will match TCP packets to a specific destination port */
CVMX_PIP_QOS_WATCH_UDP = 3ull /**< QoS watcher will match UDP packets to a specific destination port */
} cvmx_pip_qos_watch_types;
/**
* This enumeration is used in PIP tag config to control how
* POW tags are generated by the hardware.
*/
typedef enum
{
CVMX_PIP_TAG_MODE_TUPLE = 0ull, /**< Always use tuple tag algorithm. This is the only mode supported on Pass 1 */
CVMX_PIP_TAG_MODE_MASK = 1ull, /**< Always use mask tag algorithm */
CVMX_PIP_TAG_MODE_IP_OR_MASK = 2ull, /**< If packet is IP, use tuple else use mask */
CVMX_PIP_TAG_MODE_TUPLE_XOR_MASK = 3ull /**< tuple XOR mask */
} cvmx_pip_tag_mode_t;
/**
* Tag type definitions
*/
typedef enum
{
CVMX_POW_TAG_TYPE_ORDERED = 0L, /**< Tag ordering is maintained */
CVMX_POW_TAG_TYPE_ATOMIC = 1L, /**< Tag ordering is maintained, and at most one PP has the tag */
CVMX_POW_TAG_TYPE_NULL = 2L, /**< The work queue entry from the order
- NEVER tag switch from NULL to NULL */
CVMX_POW_TAG_TYPE_NULL_NULL = 3L /**< A tag switch to NULL, and there is no space reserved in POW
- NEVER tag switch to NULL_NULL
- NEVER tag switch from NULL_NULL
- NULL_NULL is entered at the beginning of time and on a deschedule.
- NULL_NULL can be exited by a new work request. A NULL_SWITCH load can also switch the state to NULL */
} cvmx_pow_tag_type_t;
/**
* LCR bits 0 and 1 control the number of bits per character. See the following table for encodings:
*
* - 00 = 5 bits (bits 0-4 sent)
* - 01 = 6 bits (bits 0-5 sent)
* - 10 = 7 bits (bits 0-6 sent)
* - 11 = 8 bits (all bits sent)
*/
typedef enum
{
CVMX_UART_BITS5 = 0,
CVMX_UART_BITS6 = 1,
CVMX_UART_BITS7 = 2,
CVMX_UART_BITS8 = 3
} cvmx_uart_bits_t;
/**
* Interrupt Priority Interrupt Interrupt Interrupt
* ID Level Type Source Reset By
* ---------------------------------------------------------------------------------------------------------------------------------
* 0001 - None None -
*
* 0110 Highest Receiver Line Overrun, parity, or framing errors or break Reading the Line Status Register
* Status interrupt
*
* 0100 Second Received Data Receiver data available (FIFOs disabled) or Reading the Receiver Buffer Register
* Available RX FIFO trigger level reached (FIFOs (FIFOs disabled) or the FIFO drops below
* enabled) the trigger level (FIFOs enabled)
*
* 1100 Second Character No characters in or out of the RX FIFO Reading the Receiver Buffer Register
* Timeout during the last 4 character times and there
* Indication is at least 1 character in it during this
* time
*
* 0010 Third Transmitter Transmitter Holding Register Empty Reading the Interrupt Identity Register
* Holding (Programmable THRE Mode disabled) or TX (if source of interrupt) or writing into
* Register FIFO at or below threshold (Programmable THR (FIFOs or THRE Mode disabled) or TX
* Empty THRE Mode enabled) FIFO above threshold (FIFOs and THRE
* Mode enabled)
*
* 0000 Fourth Modem Status Clear To Send (CTS) or Data Set Ready (DSR) Reading the Modem Status Register
* Changed or Ring Indicator (RI) or Data Center
* Detect (DCD) changed
*
* 0111 Fifth Busy Detect Software has tried to write to the Line Reading the UART Status Register
* Indication Control Register while the BUSY bit of the
* UART Status Register was set
*/
typedef enum
{
CVMX_UART_IID_NONE = 1,
CVMX_UART_IID_RX_ERROR = 6,
CVMX_UART_IID_RX_DATA = 4,
CVMX_UART_IID_RX_TIMEOUT = 12,
CVMX_UART_IID_TX_EMPTY = 2,
CVMX_UART_IID_MODEM = 0,
CVMX_UART_IID_BUSY = 7
} cvmx_uart_iid_t;
#endif /* __CVMX_CSR_ENUMS_H__ */

View File

@ -1,117 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Configuration and status register (CSR) address and type definitions for
* Octeon. Include cvmx-csr.h instead of this file directly.
*
* This file is auto generated. Do not edit.
*
* <hr>$Revision: 69515 $<hr>
*
*/
#ifndef __CVMX_CSR_TYPEDEFS_H__
#define __CVMX_CSR_TYPEDEFS_H__
#include "cvmx-agl-defs.h"
#include "cvmx-asxx-defs.h"
#include "cvmx-asx0-defs.h"
#include "cvmx-ciu2-defs.h"
#include "cvmx-ciu-defs.h"
#include "cvmx-dbg-defs.h"
#include "cvmx-dfa-defs.h"
#include "cvmx-dfm-defs.h"
#include "cvmx-dpi-defs.h"
#include "cvmx-endor-defs.h"
#include "cvmx-eoi-defs.h"
#include "cvmx-fpa-defs.h"
#include "cvmx-gmxx-defs.h"
#include "cvmx-gpio-defs.h"
#include "cvmx-ilk-defs.h"
#include "cvmx-iob1-defs.h"
#include "cvmx-iob-defs.h"
#include "cvmx-ipd-defs.h"
#include "cvmx-key-defs.h"
#include "cvmx-l2c-defs.h"
#include "cvmx-l2d-defs.h"
#include "cvmx-l2t-defs.h"
#include "cvmx-led-defs.h"
#include "cvmx-lmcx-defs.h"
#include "cvmx-mio-defs.h"
#include "cvmx-mixx-defs.h"
#include "cvmx-mpi-defs.h"
#include "cvmx-ndf-defs.h"
#include "cvmx-npei-defs.h"
#include "cvmx-npi-defs.h"
#include "cvmx-pci-defs.h"
#include "cvmx-pcieepx-defs.h"
#include "cvmx-pciercx-defs.h"
#include "cvmx-pcmx-defs.h"
#include "cvmx-pcm-defs.h"
#include "cvmx-pcsx-defs.h"
#include "cvmx-pcsxx-defs.h"
#include "cvmx-pemx-defs.h"
#include "cvmx-pescx-defs.h"
#include "cvmx-pip-defs.h"
#include "cvmx-pko-defs.h"
#include "cvmx-pow-defs.h"
#include "cvmx-rad-defs.h"
#include "cvmx-rnm-defs.h"
#include "cvmx-sli-defs.h"
#include "cvmx-smix-defs.h"
#include "cvmx-smi-defs.h"
#include "cvmx-spxx-defs.h"
#include "cvmx-spx0-defs.h"
#include "cvmx-sriox-defs.h"
#include "cvmx-sriomaintx-defs.h"
#include "cvmx-srxx-defs.h"
#include "cvmx-sso-defs.h"
#include "cvmx-stxx-defs.h"
#include "cvmx-tim-defs.h"
#include "cvmx-trax-defs.h"
#include "cvmx-uahcx-defs.h"
#include "cvmx-uctlx-defs.h"
#include "cvmx-usbcx-defs.h"
#include "cvmx-usbnx-defs.h"
#include "cvmx-zip-defs.h"
#include "cvmx-pexp-defs.h"
#endif /* __CVMX_CSR_TYPEDEFS_H__ */

View File

@ -1,223 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Configuration and status register (CSR) address and type definitions for
* Octoen.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifndef __CVMX_CSR_H__
#define __CVMX_CSR_H__
#ifndef CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_ENABLE_CSR_ADDRESS_CHECKING 0
#endif
#include "cvmx-platform.h"
#include "cvmx-csr-enums.h"
#include "cvmx-csr-typedefs.h"
/* Map the HW names to the SDK historical names */
typedef cvmx_ciu_intx_en1_t cvmx_ciu_int1_t;
typedef cvmx_ciu_intx_sum0_t cvmx_ciu_intx0_t;
typedef cvmx_ciu_mbox_setx_t cvmx_ciu_mbox_t;
typedef cvmx_fpa_fpfx_marks_t cvmx_fpa_fpf_marks_t;
typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que0_page_index_t;
typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que1_page_index_t;
typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que2_page_index_t;
typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que3_page_index_t;
typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que4_page_index_t;
typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que5_page_index_t;
typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que6_page_index_t;
typedef cvmx_fpa_quex_page_index_t cvmx_fpa_que7_page_index_t;
typedef cvmx_ipd_1st_mbuff_skip_t cvmx_ipd_mbuff_first_skip_t;
typedef cvmx_ipd_1st_next_ptr_back_t cvmx_ipd_first_next_ptr_back_t;
typedef cvmx_ipd_packet_mbuff_size_t cvmx_ipd_mbuff_size_t;
typedef cvmx_ipd_qosx_red_marks_t cvmx_ipd_qos_red_marks_t;
typedef cvmx_ipd_wqe_fpa_queue_t cvmx_ipd_wqe_fpa_pool_t;
typedef cvmx_l2c_pfcx_t cvmx_l2c_pfc0_t;
typedef cvmx_l2c_pfcx_t cvmx_l2c_pfc1_t;
typedef cvmx_l2c_pfcx_t cvmx_l2c_pfc2_t;
typedef cvmx_l2c_pfcx_t cvmx_l2c_pfc3_t;
typedef cvmx_lmcx_bist_ctl_t cvmx_lmc_bist_ctl_t;
typedef cvmx_lmcx_bist_result_t cvmx_lmc_bist_result_t;
typedef cvmx_lmcx_comp_ctl_t cvmx_lmc_comp_ctl_t;
typedef cvmx_lmcx_ctl_t cvmx_lmc_ctl_t;
typedef cvmx_lmcx_ctl1_t cvmx_lmc_ctl1_t;
typedef cvmx_lmcx_dclk_cnt_hi_t cvmx_lmc_dclk_cnt_hi_t;
typedef cvmx_lmcx_dclk_cnt_lo_t cvmx_lmc_dclk_cnt_lo_t;
typedef cvmx_lmcx_dclk_ctl_t cvmx_lmc_dclk_ctl_t;
typedef cvmx_lmcx_ddr2_ctl_t cvmx_lmc_ddr2_ctl_t;
typedef cvmx_lmcx_delay_cfg_t cvmx_lmc_delay_cfg_t;
typedef cvmx_lmcx_dll_ctl_t cvmx_lmc_dll_ctl_t;
typedef cvmx_lmcx_dual_memcfg_t cvmx_lmc_dual_memcfg_t;
typedef cvmx_lmcx_ecc_synd_t cvmx_lmc_ecc_synd_t;
typedef cvmx_lmcx_fadr_t cvmx_lmc_fadr_t;
typedef cvmx_lmcx_ifb_cnt_hi_t cvmx_lmc_ifb_cnt_hi_t;
typedef cvmx_lmcx_ifb_cnt_lo_t cvmx_lmc_ifb_cnt_lo_t;
typedef cvmx_lmcx_mem_cfg0_t cvmx_lmc_mem_cfg0_t;
typedef cvmx_lmcx_mem_cfg1_t cvmx_lmc_mem_cfg1_t;
typedef cvmx_lmcx_wodt_ctl0_t cvmx_lmc_odt_ctl_t;
typedef cvmx_lmcx_ops_cnt_hi_t cvmx_lmc_ops_cnt_hi_t;
typedef cvmx_lmcx_ops_cnt_lo_t cvmx_lmc_ops_cnt_lo_t;
typedef cvmx_lmcx_pll_bwctl_t cvmx_lmc_pll_bwctl_t;
typedef cvmx_lmcx_pll_ctl_t cvmx_lmc_pll_ctl_t;
typedef cvmx_lmcx_pll_status_t cvmx_lmc_pll_status_t;
typedef cvmx_lmcx_read_level_ctl_t cvmx_lmc_read_level_ctl_t;
typedef cvmx_lmcx_read_level_dbg_t cvmx_lmc_read_level_dbg_t;
typedef cvmx_lmcx_read_level_rankx_t cvmx_lmc_read_level_rankx_t;
typedef cvmx_lmcx_rodt_comp_ctl_t cvmx_lmc_rodt_comp_ctl_t;
typedef cvmx_lmcx_rodt_ctl_t cvmx_lmc_rodt_ctl_t;
typedef cvmx_lmcx_wodt_ctl0_t cvmx_lmc_wodt_ctl_t;
typedef cvmx_lmcx_wodt_ctl0_t cvmx_lmc_wodt_ctl0_t;
typedef cvmx_lmcx_wodt_ctl1_t cvmx_lmc_wodt_ctl1_t;
typedef cvmx_mio_boot_reg_cfgx_t cvmx_mio_boot_reg_cfg0_t;
typedef cvmx_mio_boot_reg_timx_t cvmx_mio_boot_reg_tim0_t;
typedef cvmx_mio_twsx_int_t cvmx_mio_tws_int_t;
typedef cvmx_mio_twsx_sw_twsi_t cvmx_mio_tws_sw_twsi_t;
typedef cvmx_mio_twsx_sw_twsi_ext_t cvmx_mio_tws_sw_twsi_ext_t;
typedef cvmx_mio_twsx_twsi_sw_t cvmx_mio_tws_twsi_sw_t;
typedef cvmx_npi_base_addr_inputx_t cvmx_npi_base_addr_input_t;
typedef cvmx_npi_base_addr_outputx_t cvmx_npi_base_addr_output_t;
typedef cvmx_npi_buff_size_outputx_t cvmx_npi_buff_size_output_t;
typedef cvmx_npi_dma_highp_counts_t cvmx_npi_dma_counts_t;
typedef cvmx_npi_dma_highp_naddr_t cvmx_npi_dma_naddr_t;
typedef cvmx_npi_highp_dbell_t cvmx_npi_dbell_t;
typedef cvmx_npi_highp_ibuff_saddr_t cvmx_npi_dma_ibuff_saddr_t;
typedef cvmx_npi_mem_access_subidx_t cvmx_npi_mem_access_subid_t;
typedef cvmx_npi_num_desc_outputx_t cvmx_npi_num_desc_output_t;
typedef cvmx_npi_px_dbpair_addr_t cvmx_npi_dbpair_addr_t;
typedef cvmx_npi_px_instr_addr_t cvmx_npi_instr_addr_t;
typedef cvmx_npi_px_instr_cnts_t cvmx_npi_instr_cnts_t;
typedef cvmx_npi_px_pair_cnts_t cvmx_npi_pair_cnts_t;
typedef cvmx_npi_size_inputx_t cvmx_npi_size_input_t;
typedef cvmx_pci_dbellx_t cvmx_pci_dbell_t;
typedef cvmx_pci_dma_cntx_t cvmx_pci_dma_cnt_t;
typedef cvmx_pci_dma_int_levx_t cvmx_pci_dma_int_lev_t;
typedef cvmx_pci_dma_timex_t cvmx_pci_dma_time_t;
typedef cvmx_pci_instr_countx_t cvmx_pci_instr_count_t;
typedef cvmx_pci_pkt_creditsx_t cvmx_pci_pkt_credits_t;
typedef cvmx_pci_pkts_sent_int_levx_t cvmx_pci_pkts_sent_int_lev_t;
typedef cvmx_pci_pkts_sent_timex_t cvmx_pci_pkts_sent_time_t;
typedef cvmx_pci_pkts_sentx_t cvmx_pci_pkts_sent_t;
typedef cvmx_pip_prt_cfgx_t cvmx_pip_port_cfg_t;
typedef cvmx_pip_prt_tagx_t cvmx_pip_port_tag_cfg_t;
typedef cvmx_pip_qos_watchx_t cvmx_pip_port_watcher_cfg_t;
typedef cvmx_pko_mem_queue_ptrs_t cvmx_pko_queue_cfg_t;
typedef cvmx_pko_reg_cmd_buf_t cvmx_pko_pool_cfg_t;
typedef cvmx_smix_clk_t cvmx_smi_clk_t;
typedef cvmx_smix_cmd_t cvmx_smi_cmd_t;
typedef cvmx_smix_en_t cvmx_smi_en_t;
typedef cvmx_smix_rd_dat_t cvmx_smi_rd_dat_t;
typedef cvmx_smix_wr_dat_t cvmx_smi_wr_dat_t;
typedef cvmx_tim_reg_flags_t cvmx_tim_control_t;
/* The CSRs for bootbus region zero used to be independent of the
other 1-7. As of SDK 1.7.0 these were combined. These macros
are for backwards compactability */
#define CVMX_MIO_BOOT_REG_CFG0 CVMX_MIO_BOOT_REG_CFGX(0)
#define CVMX_MIO_BOOT_REG_TIM0 CVMX_MIO_BOOT_REG_TIMX(0)
/* The CN3XXX and CN58XX chips used to not have a LMC number
passed to the address macros. These are here to supply backwards
compatability with old code. Code should really use the new addresses
with bus arguments for support on other chips */
#define CVMX_LMC_BIST_CTL CVMX_LMCX_BIST_CTL(0)
#define CVMX_LMC_BIST_RESULT CVMX_LMCX_BIST_RESULT(0)
#define CVMX_LMC_COMP_CTL CVMX_LMCX_COMP_CTL(0)
#define CVMX_LMC_CTL CVMX_LMCX_CTL(0)
#define CVMX_LMC_CTL1 CVMX_LMCX_CTL1(0)
#define CVMX_LMC_DCLK_CNT_HI CVMX_LMCX_DCLK_CNT_HI(0)
#define CVMX_LMC_DCLK_CNT_LO CVMX_LMCX_DCLK_CNT_LO(0)
#define CVMX_LMC_DCLK_CTL CVMX_LMCX_DCLK_CTL(0)
#define CVMX_LMC_DDR2_CTL CVMX_LMCX_DDR2_CTL(0)
#define CVMX_LMC_DELAY_CFG CVMX_LMCX_DELAY_CFG(0)
#define CVMX_LMC_DLL_CTL CVMX_LMCX_DLL_CTL(0)
#define CVMX_LMC_DUAL_MEMCFG CVMX_LMCX_DUAL_MEMCFG(0)
#define CVMX_LMC_ECC_SYND CVMX_LMCX_ECC_SYND(0)
#define CVMX_LMC_FADR CVMX_LMCX_FADR(0)
#define CVMX_LMC_IFB_CNT_HI CVMX_LMCX_IFB_CNT_HI(0)
#define CVMX_LMC_IFB_CNT_LO CVMX_LMCX_IFB_CNT_LO(0)
#define CVMX_LMC_MEM_CFG0 CVMX_LMCX_MEM_CFG0(0)
#define CVMX_LMC_MEM_CFG1 CVMX_LMCX_MEM_CFG1(0)
#define CVMX_LMC_OPS_CNT_HI CVMX_LMCX_OPS_CNT_HI(0)
#define CVMX_LMC_OPS_CNT_LO CVMX_LMCX_OPS_CNT_LO(0)
#define CVMX_LMC_PLL_BWCTL CVMX_LMCX_PLL_BWCTL(0)
#define CVMX_LMC_PLL_CTL CVMX_LMCX_PLL_CTL(0)
#define CVMX_LMC_PLL_STATUS CVMX_LMCX_PLL_STATUS(0)
#define CVMX_LMC_READ_LEVEL_CTL CVMX_LMCX_READ_LEVEL_CTL(0)
#define CVMX_LMC_READ_LEVEL_DBG CVMX_LMCX_READ_LEVEL_DBG(0)
#define CVMX_LMC_READ_LEVEL_RANKX CVMX_LMCX_READ_LEVEL_RANKX(0)
#define CVMX_LMC_RODT_COMP_CTL CVMX_LMCX_RODT_COMP_CTL(0)
#define CVMX_LMC_RODT_CTL CVMX_LMCX_RODT_CTL(0)
#define CVMX_LMC_WODT_CTL CVMX_LMCX_WODT_CTL0(0)
#define CVMX_LMC_WODT_CTL0 CVMX_LMCX_WODT_CTL0(0)
#define CVMX_LMC_WODT_CTL1 CVMX_LMCX_WODT_CTL1(0)
/* The CN3XXX and CN58XX chips used to not have a TWSI bus number
passed to the address macros. These are here to supply backwards
compatability with old code. Code should really use the new addresses
with bus arguments for support on other chips */
#define CVMX_MIO_TWS_INT CVMX_MIO_TWSX_INT(0)
#define CVMX_MIO_TWS_SW_TWSI CVMX_MIO_TWSX_SW_TWSI(0)
#define CVMX_MIO_TWS_SW_TWSI_EXT CVMX_MIO_TWSX_SW_TWSI_EXT(0)
#define CVMX_MIO_TWS_TWSI_SW CVMX_MIO_TWSX_TWSI_SW(0)
/* The CN3XXX and CN58XX chips used to not have a SMI/MDIO bus number
passed to the address macros. These are here to supply backwards
compatability with old code. Code should really use the new addresses
with bus arguments for support on other chips */
#define CVMX_SMI_CLK CVMX_SMIX_CLK(0)
#define CVMX_SMI_CMD CVMX_SMIX_CMD(0)
#define CVMX_SMI_EN CVMX_SMIX_EN(0)
#define CVMX_SMI_RD_DAT CVMX_SMIX_RD_DAT(0)
#define CVMX_SMI_WR_DAT CVMX_SMIX_WR_DAT(0)
#endif /* __CVMX_CSR_H__ */

View File

@ -1,151 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* cvmx-dbg-defs.h
*
* Configuration and status register (CSR) type definitions for
* Octeon dbg.
*
* This file is auto generated. Do not edit.
*
* <hr>$Revision$<hr>
*
*/
#ifndef __CVMX_DBG_DEFS_H__
#define __CVMX_DBG_DEFS_H__
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_DBG_DATA CVMX_DBG_DATA_FUNC()
static inline uint64_t CVMX_DBG_DATA_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
cvmx_warn("CVMX_DBG_DATA not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x00011F00000001E8ull);
}
#else
#define CVMX_DBG_DATA (CVMX_ADD_IO_SEG(0x00011F00000001E8ull))
#endif
/**
* cvmx_dbg_data
*
* DBG_DATA = Debug Data Register
*
* Value returned on the debug-data lines from the RSLs
*/
union cvmx_dbg_data {
uint64_t u64;
struct cvmx_dbg_data_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_23_63 : 41;
uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */
uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
debug select value. */
uint64_t data : 17; /**< Value on the debug data lines. */
#else
uint64_t data : 17;
uint64_t dsel_ext : 1;
uint64_t c_mul : 5;
uint64_t reserved_23_63 : 41;
#endif
} s;
struct cvmx_dbg_data_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_31_63 : 33;
uint64_t pll_mul : 3; /**< pll_mul pins sampled at DCOK assertion */
uint64_t reserved_23_27 : 5;
uint64_t c_mul : 5; /**< Core PLL multiplier sampled at DCOK assertion */
uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
debug select value. */
uint64_t data : 17; /**< Value on the debug data lines. */
#else
uint64_t data : 17;
uint64_t dsel_ext : 1;
uint64_t c_mul : 5;
uint64_t reserved_23_27 : 5;
uint64_t pll_mul : 3;
uint64_t reserved_31_63 : 33;
#endif
} cn30xx;
struct cvmx_dbg_data_cn30xx cn31xx;
struct cvmx_dbg_data_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63 : 35;
uint64_t d_mul : 4; /**< D_MUL pins sampled on DCOK assertion */
uint64_t dclk_mul2 : 1; /**< Should always be set for fast DDR-II operation */
uint64_t cclk_div2 : 1; /**< Should always be clear for fast core clock */
uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */
uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
debug select value. */
uint64_t data : 17; /**< Value on the debug data lines. */
#else
uint64_t data : 17;
uint64_t dsel_ext : 1;
uint64_t c_mul : 5;
uint64_t cclk_div2 : 1;
uint64_t dclk_mul2 : 1;
uint64_t d_mul : 4;
uint64_t reserved_29_63 : 35;
#endif
} cn38xx;
struct cvmx_dbg_data_cn38xx cn38xxp2;
struct cvmx_dbg_data_cn30xx cn50xx;
struct cvmx_dbg_data_cn58xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_29_63 : 35;
uint64_t rem : 6; /**< Remaining debug_select pins sampled at DCOK */
uint64_t c_mul : 5; /**< C_MUL pins sampled at DCOK assertion */
uint64_t dsel_ext : 1; /**< Allows changes in the external pins to set the
debug select value. */
uint64_t data : 17; /**< Value on the debug data lines. */
#else
uint64_t data : 17;
uint64_t dsel_ext : 1;
uint64_t c_mul : 5;
uint64_t rem : 6;
uint64_t reserved_29_63 : 35;
#endif
} cn58xx;
struct cvmx_dbg_data_cn58xx cn58xxp1;
};
typedef union cvmx_dbg_data cvmx_dbg_data_t;
#endif

View File

@ -1,278 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#undef __ASSEMBLY__
#define __ASSEMBLY__
#ifdef __linux__
#include <asm/asm.h>
#include <asm/regdef.h>
#else
#include <machine/asm.h>
#include <machine/regdef.h>
#endif
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx-asm.h>
#include <asm/octeon/octeon-boot-info.h>
#else
#include "cvmx-asm.h"
#ifndef _OCTEON_TOOLCHAIN_RUNTIME
#include <octeon_mem_map.h>
#else
#include "cvmx-platform.h"
#include "octeon-boot-info.h"
#endif
#endif
/* The registers saving/restoring is split into two because k0 is stored in the COP0_DESAVE register. */
#define REGS0 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25
#define REGS1 27,28,29,30,31
#define SAVE_REGISTER(reg) \
sd reg, 0(k0); \
addi k0, 8
#define RESTORE_REGISTER(reg) \
ld reg, -8(k0); \
addi k0, -8
#define SAVE_COP0(reg) \
dmfc0 k1,reg; \
sd k1, 0(k0); \
addi k0, 8
#define RESTORE_COP0(reg) \
ld k1, -8(k0); \
addi k0, -8; \
dmtc0 k1,reg
#define SAVE_ADDRESS(addr) \
dli k1, addr; \
ld k1, 0(k1); \
sd k1, 0(k0); \
addi k0, 8
#define RESTORE_ADDRESS(addr) \
dli t0, addr; \
ld k1, -8(k0); \
sd k1, 0(t0); \
addi k0, -8
#define REG_SAVE_BASE_DIV_8 (BOOTLOADER_DEBUG_REG_SAVE_BASE >> 3)
#define HW_INSTRUCTION_BREAKPOINT_STATUS (0xFFFFFFFFFF301000)
#define HW_INSTRUCTION_BREAKPOINT_ADDRESS(num) (0xFFFFFFFFFF301100 + 0x100 * (num))
#define HW_INSTRUCTION_BREAKPOINT_ADDRESS_MASK(num) (0xFFFFFFFFFF301108 + 0x100 * (num))
#define HW_INSTRUCTION_BREAKPOINT_ASID(num) (0xFFFFFFFFFF301110 + 0x100 * (num))
#define HW_INSTRUCTION_BREAKPOINT_CONTROL(num) (0xFFFFFFFFFF301118 + 0x100 * (num))
#define HW_DATA_BREAKPOINT_STATUS (0xFFFFFFFFFF302000)
#define HW_DATA_BREAKPOINT_ADDRESS(num) (0xFFFFFFFFFF302100 + 0x100 * (num))
#define HW_DATA_BREAKPOINT_ADDRESS_MASK(num) (0xFFFFFFFFFF302108 + 0x100 * (num))
#define HW_DATA_BREAKPOINT_ASID(num) (0xFFFFFFFFFF302110 + 0x100 * (num))
#define HW_DATA_BREAKPOINT_CONTROL(num) (0xFFFFFFFFFF302118 + 0x100 * (num))
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#define loadaddr(reg, addr, shift) \
dla reg, addr##_all; \
mfc0 $1, $15, 1; \
andi $1, 0xff; \
sll $1, shift; \
add reg, reg, $1
#else
#define loadaddr(reg, addr, shift) \
dla reg, addr
#endif
.set noreorder
.set noat
.text
// Detect debug-mode exception, save all registers, create a stack and then
// call the stage3 C function.
.ent __cvmx_debug_handler_stage2
.globl __cvmx_debug_handler_stage2
__cvmx_debug_handler_stage2:
// Save off k0 in COP0_DESAVE
dmtc0 k0, COP0_DESAVE
// Use reserved space in kseg0 to save off some temp regs
mfc0 k0, $15, 1 // read exception base reg.
andi k0, 0xff // mask off core ID
sll k0, 12 // multiply by 4096 (512 dwords) DEBUG_NUMREGS
addiu k0, REG_SAVE_BASE_DIV_8
addiu k0, REG_SAVE_BASE_DIV_8
addiu k0, REG_SAVE_BASE_DIV_8
addiu k0, REG_SAVE_BASE_DIV_8
addiu k0, REG_SAVE_BASE_DIV_8
addiu k0, REG_SAVE_BASE_DIV_8
addiu k0, REG_SAVE_BASE_DIV_8
addiu k0, REG_SAVE_BASE_DIV_8
// add base offset - after exeption vectors for all cores
rotr k0, k0, 31 // set bit 31 for kseg0 access
addi k0, 1
rotr k0, k0, 1
// save off k1 and at ($1) off to the bootloader reg save area
// at is used by dla
sd $1, 8(k0) // save at for temp usage
sd k1, 216(k0) // save k1 for temp usage
// Detect debug-mode exception.
// If COP0_MULTICOREDEBUG[DExecC] is set,
dmfc0 k1, COP0_MULTICOREDEBUG
bbit0 k1, 16, noexc
nop
// COP0_DEBUG[DINT,DIB,DDBS,DBp,DSS] are not set and
dmfc0 k1, COP0_DEBUG
andi k1, 0x3f
bnez k1, noexc
nop
// COP0_DEBUG[DExecC] is set.
dmfc0 k1, COP0_DEBUG
dext k1,k1,10,5
beqz k1,noexc
nop
// We don't handle debug-mode exceptions in delay-slots so DEBUG[DBD]
// should not be set. If yes spin forever.
dmfc0 k1, COP0_DEBUG
1:
bbit1 k1, 31, 1b
nop
// It's a debug-mode exception. Flag the occurence. Also if it's
// expected just ignore it but returning the subsequent instruction
// after the fault.
loadaddr (k1, __cvmx_debug_mode_exception_occured, 3)
sd k1, 0(k1)
loadaddr (k1, __cvmx_debug_mode_exception_ignore, 3)
ld k1, 0(k1)
beqz k1, noexc
nop
// Restore k1 and at from the bootloader reg save area
ld $1, 8(k0) // save at for temp usage
ld k1, 216(k0) // save k1 for temp usage
dmfc0 k0, COP0_DEPC
// Skip the faulting instruction.
daddiu k0, 4
jr k0
dmfc0 k0, COP0_DESAVE
noexc:
loadaddr (k1, __cvmx_debug_save_regs_area, 8)
// Restore at
ld $1, 8(k0) // restore at for temp usage
.irp n, REGS0
sd $\n, 0(k1)
addiu k1, 8
.endr
move $25, k1
ld k1, 216(k0) // restore k1 for temp usage
move k0, $25
// Store out k0, we can use $25 here because we just saved it
dmfc0 $25, COP0_DESAVE
sd $25, 0(k0)
addiu k0, 8
.irp n, REGS1
sd $\n, 0(k0)
addiu k0, 8
.endr
loadaddr(sp, __cvmx_debug_stack_top, 3)
// Load the stack pointer as a pointer size.
#ifdef _ABIN32
lw sp,0(sp)
#else
ld sp,0(sp)
#endif
mflo $4
mfhi $5
jal __cvmx_debug_handler_stage3
nop
loadaddr(k0, __cvmx_debug_save_regs_area, 8)
.irp n, REGS0
ld $\n, 0(k0)
addiu k0, 8
.endr
// Restore k0 to COP0_DESAVE via k1
ld k1, 0(k0)
addiu k0, 8
dmtc0 k1, COP0_DESAVE
.irp n, REGS1
ld $\n, 0(k0)
addiu k0, 8
.endr
dmfc0 k0, COP0_DESAVE
// Flush the icache; by adding and removing SW breakpoints we change
// the instruction stream.
synci 0($0)
deret
nop
.end __cvmx_debug_handler_stage2

View File

@ -1,94 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <linux/module.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-debug.h>
#define cvmx_interrupt_in_isr 0
#else
#include "cvmx.h"
#include "cvmx-debug.h"
#ifndef CVMX_BUILD_FOR_TOOLCHAIN
extern int cvmx_interrupt_in_isr;
#else
#define cvmx_interrupt_in_isr 0
#endif
#endif
static void cvmx_debug_remote_mem_wait_for_resume(volatile cvmx_debug_core_context_t *context, cvmx_debug_state_t state)
{
//
// If we are stepping and not stepping into an interrupt and the debug
// exception happened in an interrupt, continue the execution.
//
if(!state.step_isr &&
(context->cop0.debug & 0x1) && /* Single stepping */
!(context->cop0.debug & 0x1e) && /* Did not hit a breakpoint */
((context->cop0.status & 0x2) || cvmx_interrupt_in_isr))
return;
context->remote_controlled = 1;
CVMX_SYNCW;
while (context->remote_controlled)
;
CVMX_SYNCW;
}
static void cvmx_debug_memory_change_core(int oldcore, int newcore)
{
/* FIXME, this should change the core on the host side too. */
}
cvmx_debug_comm_t cvmx_debug_remote_comm =
{
.init = NULL,
.install_break_handler = NULL,
.needs_proxy = 0,
.getpacket = NULL,
.putpacket = NULL,
.wait_for_resume = cvmx_debug_remote_mem_wait_for_resume,
.change_core = cvmx_debug_memory_change_core,
};

View File

@ -1,267 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <linux/module.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-debug.h>
#include <asm/octeon/cvmx-uart.h>
#include <asm/octeon/octeon-boot-info.h>
#include <asm/octeon/cvmx-spinlock.h>
int cvmx_debug_uart = 1;
#else
#include <limits.h>
#include "executive-config.h"
#include "cvmx.h"
#include "cvmx-debug.h"
#include "cvmx-uart.h"
#include "cvmx-spinlock.h"
#include "octeon-boot-info.h"
#endif
/*
* NOTE: CARE SHOULD BE TAKEN USING STD C LIBRARY FUNCTIONS IN
* THIS FILE IF SOMEONE PUTS A BREAKPOINT ON THOSE FUNCTIONS
* DEBUGGING WILL FAIL.
*/
#ifdef CVMX_BUILD_FOR_TOOLCHAIN
#pragma weak cvmx_uart_enable_intr
int cvmx_debug_uart = 1;
#endif
/* Default to second uart port for backward compatibility. The default (if
-debug does not set the uart number) can now be overridden with
CVMX_DEBUG_COMM_UART_NUM. */
#ifndef CVMX_DEBUG_COMM_UART_NUM
# define CVMX_DEBUG_COMM_UART_NUM 1
#endif
static CVMX_SHARED cvmx_spinlock_t cvmx_debug_uart_lock;
/**
* Interrupt handler for debugger Control-C interrupts.
*
* @param irq_number IRQ interrupt number
* @param registers CPU registers at the time of the interrupt
* @param user_arg Unused user argument
*/
void cvmx_debug_uart_process_debug_interrupt(int irq_number, uint64_t registers[32], void *user_arg)
{
cvmx_uart_lsr_t lsrval;
/* Check for a Control-C interrupt from the debugger. This loop will eat
all input received on the uart */
lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(cvmx_debug_uart));
while (lsrval.s.dr)
{
int c = cvmx_read_csr(CVMX_MIO_UARTX_RBR(cvmx_debug_uart));
if (c == '\003')
{
register uint64_t tmp;
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
fflush(stderr);
fflush(stdout);
#endif
/* Pulse MCD0 signal on Ctrl-C to stop all the cores. Also
set the MCD0 to be not masked by this core so we know
the signal is received by someone */
asm volatile (
"dmfc0 %0, $22\n"
"ori %0, %0, 0x1110\n"
"dmtc0 %0, $22\n"
: "=r" (tmp));
}
lsrval.u64 = cvmx_read_csr(CVMX_MIO_UARTX_LSR(cvmx_debug_uart));
}
}
static void cvmx_debug_uart_init(void)
{
if (cvmx_debug_uart == -1)
cvmx_debug_uart = CVMX_DEBUG_COMM_UART_NUM;
}
static void cvmx_debug_uart_install_break_handler(void)
{
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
#ifdef CVMX_BUILD_FOR_TOOLCHAIN
if (cvmx_uart_enable_intr)
#endif
cvmx_uart_enable_intr(cvmx_debug_uart, cvmx_debug_uart_process_debug_interrupt);
#endif
}
/**
* Routines to handle hex data
*
* @param ch
* @return
*/
static inline int cvmx_debug_uart_hex(char ch)
{
if ((ch >= 'a') && (ch <= 'f'))
return(ch - 'a' + 10);
if ((ch >= '0') && (ch <= '9'))
return(ch - '0');
if ((ch >= 'A') && (ch <= 'F'))
return(ch - 'A' + 10);
return(-1);
}
/* Get a packet from the UART, return 0 on failure and 1 on success. */
static int cvmx_debug_uart_getpacket(char *buffer, size_t size)
{
while (1)
{
unsigned char checksum;
int timedout = 0;
size_t count;
char ch;
ch = cvmx_uart_read_byte_with_timeout(cvmx_debug_uart, &timedout, __SHRT_MAX__);
if (timedout)
return 0;
/* if this is not the start character, ignore it. */
if (ch != '$')
continue;
retry:
checksum = 0;
count = 0;
/* now, read until a # or end of buffer is found */
while (count < size)
{
ch = cvmx_uart_read_byte(cvmx_debug_uart);
if (ch == '$')
goto retry;
if (ch == '#')
break;
checksum = checksum + ch;
buffer[count] = ch;
count = count + 1;
}
buffer[count] = 0;
if (ch == '#')
{
char csumchars0, csumchars1;
unsigned xmitcsum;
int n0, n1;
csumchars0 = cvmx_uart_read_byte(cvmx_debug_uart);
csumchars1 = cvmx_uart_read_byte(cvmx_debug_uart);
n0 = cvmx_debug_uart_hex(csumchars0);
n1 = cvmx_debug_uart_hex(csumchars1);
if (n0 == -1 || n1 == -1)
return 0;
xmitcsum = (n0 << 4) | n1;
return checksum == xmitcsum;
}
}
return 0;
}
/* Put the hex value of t into str. */
static void cvmx_debug_uart_strhex(char *str, unsigned char t)
{
char hexchar[] = "0123456789ABCDEF";
str[0] = hexchar[(t>>4)];
str[1] = hexchar[t&0xF];
str[2] = 0;
}
static int cvmx_debug_uart_putpacket(char *packet)
{
size_t i;
unsigned char csum;
unsigned char *ptr = (unsigned char *) packet;
char csumstr[3];
for (csum = 0, i = 0; ptr[i]; i++)
csum += ptr[i];
cvmx_debug_uart_strhex(csumstr, csum);
cvmx_spinlock_lock(&cvmx_debug_uart_lock);
cvmx_uart_write_byte(cvmx_debug_uart, '$');
cvmx_uart_write_string(cvmx_debug_uart, packet);
cvmx_uart_write_byte(cvmx_debug_uart, '#');
cvmx_uart_write_string(cvmx_debug_uart, csumstr);
cvmx_spinlock_unlock(&cvmx_debug_uart_lock);
return 0;
}
static void cvmx_debug_uart_change_core(int oldcore, int newcore)
{
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
cvmx_ciu_intx0_t irq_control;
irq_control.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(newcore * 2));
irq_control.s.uart |= (1u<<cvmx_debug_uart);
cvmx_write_csr(CVMX_CIU_INTX_EN0(newcore * 2), irq_control.u64);
/* Disable interrupts to this core since he is about to die */
irq_control.u64 = cvmx_read_csr(CVMX_CIU_INTX_EN0(oldcore * 2));
irq_control.s.uart &= ~(1u<<cvmx_debug_uart);
cvmx_write_csr(CVMX_CIU_INTX_EN0(oldcore* 2), irq_control.u64);
#endif
}
cvmx_debug_comm_t cvmx_debug_uart_comm =
{
.init = cvmx_debug_uart_init,
.install_break_handler = cvmx_debug_uart_install_break_handler,
.needs_proxy = 1,
.getpacket = cvmx_debug_uart_getpacket,
.putpacket = cvmx_debug_uart_putpacket,
.wait_for_resume = NULL,
.change_core = cvmx_debug_uart_change_core,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,454 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to debug exception handler
*
* <hr>$Revision: $<hr>
*/
#ifndef __CVMX_DEBUG_H__
#define __CVMX_DEBUG_H__
#include "cvmx-core.h"
#include "cvmx-spinlock.h"
#define CVMX_DEBUG_MAX_REQUEST_SIZE 1024 + 34 /* Enough room for setting memory of 512 bytes. */
#define CVMX_DEBUG_MAX_RESPONSE_SIZE 1024 + 5
#define CVMX_DEBUG_GLOBALS_BLOCK_NAME "cvmx-debug-globals"
#define CVMX_DEBUG_GLOBALS_VERSION 3
#ifdef __cplusplus
extern "C" {
#endif
void cvmx_debug_init(void);
void cvmx_debug_finish(void);
void cvmx_debug_trigger_exception(void);
#ifdef CVMX_BUILD_FOR_TOOLCHAIN
extern int __octeon_debug_booted;
static inline int cvmx_debug_booted(void)
{
return __octeon_debug_booted;
}
#else
static inline int cvmx_debug_booted(void)
{
return cvmx_sysinfo_get()->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_DEBUG;
}
#endif
/* There are 64 TLB entries in CN5XXX and 32 TLB entries in CN3XXX and
128 TLB entries in CN6XXX. */
#define CVMX_DEBUG_N_TLB_ENTRIES 128
/* Maximium number of hardware breakpoints/watchpoints allowed */
#define CVMX_DEBUG_MAX_OCTEON_HW_BREAKPOINTS 4
typedef struct
{
volatile uint64_t remote_controlled;
uint64_t regs[32];
uint64_t lo;
uint64_t hi;
#define CVMX_DEBUG_BASIC_CONTEXT \
F(remote_controlled); \
{ int i; \
for (i = 0; i < 32; i++) \
F(regs[i]); \
} \
F(lo); \
F(hi);
struct {
uint64_t index;
uint64_t entrylo[2];
uint64_t entryhi;
uint64_t pagemask;
uint64_t status;
uint64_t badvaddr;
uint64_t cause;
uint64_t depc;
uint64_t desave;
uint64_t debug;
uint64_t multicoredebug;
uint64_t perfval[2];
uint64_t perfctrl[2];
} cop0;
#define CVMX_DEBUG_COP0_CONTEXT \
F(cop0.index); \
F(cop0.entrylo[0]); \
F(cop0.entrylo[1]); \
F(cop0.entryhi); \
F(cop0.pagemask); \
F(cop0.status); \
F(cop0.badvaddr); \
F(cop0.cause); \
F(cop0.depc); \
F(cop0.desave); \
F(cop0.debug); \
F(cop0.multicoredebug); \
F(cop0.perfval[0]); \
F(cop0.perfval[1]); \
F(cop0.perfctrl[0]); \
F(cop0.perfctrl[1]);
struct
{
uint64_t status;
uint64_t address[4];
uint64_t address_mask[4];
uint64_t asid[4];
uint64_t control[4];
} hw_ibp, hw_dbp;
/* Hardware Instruction Break Point */
#define CVMX_DEBUG_HW_IBP_CONTEXT \
F(hw_ibp.status); \
F(hw_ibp.address[0]); \
F(hw_ibp.address[1]); \
F(hw_ibp.address[2]); \
F(hw_ibp.address[3]); \
F(hw_ibp.address_mask[0]); \
F(hw_ibp.address_mask[1]); \
F(hw_ibp.address_mask[2]); \
F(hw_ibp.address_mask[3]); \
F(hw_ibp.asid[0]); \
F(hw_ibp.asid[1]); \
F(hw_ibp.asid[2]); \
F(hw_ibp.asid[3]); \
F(hw_ibp.control[0]); \
F(hw_ibp.control[1]); \
F(hw_ibp.control[2]); \
F(hw_ibp.control[3]);
/* Hardware Data Break Point */
#define CVMX_DEBUG_HW_DBP_CONTEXT \
F(hw_dbp.status); \
F(hw_dbp.address[0]); \
F(hw_dbp.address[1]); \
F(hw_dbp.address[2]); \
F(hw_dbp.address[3]); \
F(hw_dbp.address_mask[0]); \
F(hw_dbp.address_mask[1]); \
F(hw_dbp.address_mask[2]); \
F(hw_dbp.address_mask[3]); \
F(hw_dbp.asid[0]); \
F(hw_dbp.asid[1]); \
F(hw_dbp.asid[2]); \
F(hw_dbp.asid[3]); \
F(hw_dbp.control[0]); \
F(hw_dbp.control[1]); \
F(hw_dbp.control[2]); \
F(hw_dbp.control[3]);
struct cvmx_debug_tlb_t
{
uint64_t entryhi;
uint64_t pagemask;
uint64_t entrylo[2];
uint64_t reserved;
} tlbs[CVMX_DEBUG_N_TLB_ENTRIES];
#define CVMX_DEBUG_TLB_CONTEXT \
{ int i; \
for (i = 0; i < CVMX_DEBUG_N_TLB_ENTRIES; i++) \
{ \
F(tlbs[i].entryhi); \
F(tlbs[i].pagemask); \
F(tlbs[i].entrylo[0]); \
F(tlbs[i].entrylo[1]); \
} \
}
} cvmx_debug_core_context_t;
typedef struct cvmx_debug_tlb_t cvmx_debug_tlb_t;
typedef enum cvmx_debug_comm_type_e
{
COMM_UART,
COMM_REMOTE,
COMM_SIZE
}cvmx_debug_comm_type_t;
typedef enum
{
COMMAND_NOP = 0, /**< Core doesn't need to do anything. Just stay in exception handler */
COMMAND_STEP, /**< Core needs to perform a single instruction step */
COMMAND_CONTINUE /**< Core need to start running. Doesn't return until some debug event occurs */
} cvmx_debug_command_t;
/* Every field in this struct has to be uint32_t. */
typedef struct
{
uint32_t known_cores;
uint32_t step_isr; /**< True if we are going to step into ISR's. */
uint32_t focus_switch; /**< Focus can be switched. */
uint32_t core_finished; /**< True if a core has finished and not been processed yet. */
uint32_t command; /**< Command for all cores (cvmx_debug_command_t) */
uint32_t step_all; /**< True if step and continue should affect all cores. False, only the focus core is affected */
uint32_t focus_core; /**< Core currently under control of the debugger */
uint32_t active_cores; /**< Bitmask of cores that should stop on a breakpoint */
uint32_t handler_cores; /**< Bitmask of cores currently running the exception handler */
uint32_t ever_been_in_debug; /**< True if we have been ever been in the debugger stub at all. */
}__attribute__ ((aligned(sizeof(uint64_t)))) cvmx_debug_state_t;
typedef int cvmx_debug_state_t_should_fit_inside_a_cache_block[sizeof(cvmx_debug_state_t)+sizeof(cvmx_spinlock_t)+4*sizeof(uint64_t) > 128 ? -1 : 1];
typedef struct cvmx_debug_globals_s
{
uint64_t version; /* This is always the first element of this struct */
uint64_t comm_type; /* cvmx_debug_comm_type_t */
volatile uint64_t comm_changed; /* cvmx_debug_comm_type_t+1 when someone wants to change it. */
volatile uint64_t init_complete;
uint32_t tlb_entries;
uint32_t state[sizeof(cvmx_debug_state_t)/sizeof(uint32_t)];
cvmx_spinlock_t lock;
volatile cvmx_debug_core_context_t contextes[CVMX_MAX_CORES];
} cvmx_debug_globals_t;
typedef union
{
uint64_t u64;
struct
{
uint64_t rsrvd:32; /**< Unused */
uint64_t dbd:1; /**< Indicates whether the last debug exception or
exception in Debug Mode occurred in a branch or
jump delay slot */
uint64_t dm:1; /**< Indicates that the processor is operating in Debug
Mode: */
uint64_t nodcr:1; /**< Indicates whether the dseg segment is present */
uint64_t lsnm:1; /**< Controls access of loads/stores between the dseg
segment and remaining memory when the dseg
segment is present */
uint64_t doze:1; /**< Indicates that the processor was in a low-power mode
when a debug exception occurred */
uint64_t halt:1; /**< Indicates that the internal processor system bus clock
was stopped when the debug exception occurred */
uint64_t countdm:1; /**< Controls or indicates the Count register behavior in
Debug Mode. Implementations can have fixed
behavior, in which case this bit is read-only (R), or
the implementation can allow this bit to control the
behavior, in which case this bit is read/write (R/W).
The reset value of this bit indicates the behavior after
reset, and depends on the implementation.
Encoding of the bit is:
- 0 Count register stopped in Debug Mode Count register is running in Debug
- 1 Mode
This bit is read-only (R) and reads as zero if not implemented. */
uint64_t ibusep:1; /**< Indicates if a Bus Error exception is pending from an
instruction fetch. Set when an instruction fetch bus
error event occurs or a 1 is written to the bit by
software. Cleared when a Bus Error exception on an
instruction fetch is taken by the processor. If IBusEP
is set when IEXI is cleared, a Bus Error exception on
an instruction fetch is taken by the processor, and
IBusEP is cleared.
In Debug Mode, a Bus Error exception applies to a
Debug Mode Bus Error exception.
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t mcheckp:1; /**< Indicates if a Machine Check exception is pending.
Set when a machine check event occurs or a 1 is
written to the bit by software. Cleared when a
Machine Check exception is taken by the processor.
If MCheckP is set when IEXI is cleared, a Machine
Check exception is taken by the processor, and
MCheckP is cleared.
In Debug Mode, a Machine Check exception applies
to a Debug Mode Machine Check exception.
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t cacheep:1; /**< Indicates if a Cache Error is pending. Set when a
cache error event occurs or a 1 is written to the bit by
software. Cleared when a Cache Error exception is
taken by the processor. If CacheEP is set when IEXI
is cleared, a Cache Error exception is taken by the
processor, and CacheEP is cleared.
In Debug Mode, a Cache Error exception applies to a
Debug Mode Cache Error exception.
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t dbusep:1; /**< Indicates if a Data Access Bus Error exception is
pending. Set when a data access bus error event
occurs or a 1 is written to the bit by software. Cleared
when a Bus Error exception on data access is taken by
the processor. If DBusEP is set when IEXI is cleared,
a Bus Error exception on data access is taken by the
processor, and DBusEP is cleared.
In Debug Mode, a Bus Error exception applies to a
Debug Mode Bus Error exception.
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t iexi:1; /**< An Imprecise Error eXception Inhibit (IEXI) controls
exceptions taken due to imprecise error indications.
Set when the processor takes a debug exception or an
exception in Debug Mode occurs. Cleared by
execution of the DERET instruction. Otherwise
modifiable by Debug Mode software.
When IEXI is set, then the imprecise error exceptions
from bus errors on instruction fetches or data
accesses, cache errors, or machine checks are
inhibited and deferred until the bit is cleared.
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t ddbsimpr:1; /**< Indicates that a Debug Data Break Store Imprecise
exception due to a store was the cause of the debug
exception, or that an imprecise data hardware break
due to a store was indicated after another debug
exception occurred. Cleared on exception in Debug
Mode.
- 0 No match of an imprecise data hardware breakpoint on store
- 1 Match of imprecise data hardware breakpoint on store
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t ddblimpr:1; /**< Indicates that a Debug Data Break Load Imprecise
exception due to a load was the cause of the debug
exception, or that an imprecise data hardware break
due to a load was indicated after another debug
exception occurred. Cleared on exception in Debug
Mode.
- 0 No match of an imprecise data hardware breakpoint on load
- 1 Match of imprecise data hardware breakpoint on load
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t ejtagver:3; /**< Provides the EJTAG version.
- 0 Version 1 and 2.0
- 1 Version 2.5
- 2 Version 2.6
- 3-7 Reserved */
uint64_t dexccode:5; /**< Indicates the cause of the latest exception in Debug
Mode.
The field is encoded as the ExcCode field in the
Cause register for those exceptions that can occur in
Debug Mode (the encoding is shown in MIPS32 and
MIPS64 specifications), with addition of code 30
with the mnemonic CacheErr for cache errors and the
use of code 9 with mnemonic Bp for the SDBBP
instruction.
This value is undefined after a debug exception. */
uint64_t nosst:1; /**< Indicates whether the single-step feature controllable
by the SSt bit is available in this implementation:
- 0 Single-step feature available
- 1 No single-step feature available
A minimum number of hardware instruction
breakpoints must be available if no single-step
feature is implemented in hardware. Refer to Section
4.8.1 on page 69 for more information. */
uint64_t sst:1; /**< Controls whether single-step feature is enabled:
- 0 No enable of single-step feature
- 1 Single-step feature enabled
This bit is read-only (R) and reads as zero if not
implemented due to no single-step feature (NoSSt is
1). */
uint64_t rsrvd2:2; /**< Must be zero */
uint64_t dint:1; /**< Indicates that a Debug Interrupt exception occurred.
Cleared on exception in Debug Mode.
- 0 No Debug Interrupt exception
- 1 Debug Interrupt exception
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t dib:1; /**< Indicates that a Debug Instruction Break exception
occurred. Cleared on exception in Debug Mode.
- 0 No Debug Instruction Break exception
- 1 Debug Instruction Break exception
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t ddbs:1; /**< Indicates that a Debug Data Break Store exception
occurred on a store due to a precise data hardware
break. Cleared on exception in Debug Mode.
- 0 No Debug Data Break Store Exception
- 1 Debug Data Break Store Exception
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t ddbl:1; /**< Indicates that a Debug Data Break Load exception
occurred on a load due to a precise data hardware
break. Cleared on exception in Debug Mode.
- 0 No Debug Data Break Store Exception
- 1 Debug Data Break Store Exception
This bit is read-only (R) and reads as zero if not
implemented. */
uint64_t dbp:1; /**< Indicates that a Debug Breakpoint exception
occurred. Cleared on exception in Debug Mode.
- 0 No Debug Breakpoint exception
- 1 Debug Breakpoint exception */
uint64_t dss:1; /**< Indicates that a Debug Single Step exception
occurred. Cleared on exception in Debug Mode.
- 0 No debug single-step exception
- 1 Debug single-step exception
This bit is read-only (R) and reads as zero if not
implemented. */
} s;
} cvmx_debug_register_t;
typedef struct
{
void (*init)(void);
void (*install_break_handler)(void);
int needs_proxy;
int (*getpacket)(char *, size_t);
int (*putpacket)(char *);
void (*wait_for_resume)(volatile cvmx_debug_core_context_t *, cvmx_debug_state_t);
void (*change_core)(int, int);
} cvmx_debug_comm_t;
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_DEBUG_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,122 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Support library for the CN31XX, CN38XX, and CN58XX hardware DFA engine.
*
* <hr>$Revision: 70030 $<hr>
*/
#include "executive-config.h"
#ifdef CVMX_ENABLE_DFA_FUNCTIONS
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-fau.h"
#include "cvmx-dfa.h"
/**
* Initialize the DFA hardware before use
*/
int cvmx_dfa_initialize(void)
{
cvmx_dfa_difctl_t control;
void *initial_base_address;
cvmx_dfa_state_t initial_state;
if (!octeon_has_feature(OCTEON_FEATURE_DFA))
{
cvmx_dprintf("ERROR: attempting to initialize DFA when no DFA hardware present\n.");
return -1;
}
control.u64 = 0;
control.s.dwbcnt = CVMX_FPA_DFA_POOL_SIZE / 128;
control.s.pool = CVMX_FPA_DFA_POOL;
control.s.size = (CVMX_FPA_DFA_POOL_SIZE - 8) / sizeof(cvmx_dfa_command_t);
CVMX_SYNCWS;
cvmx_write_csr(CVMX_DFA_DIFCTL, control.u64);
initial_base_address = cvmx_fpa_alloc(CVMX_FPA_DFA_POOL);
initial_state.u64 = 0;
initial_state.s.base_address_div16 = (CAST64(initial_base_address))/16;
cvmx_fau_atomic_write64(CVMX_FAU_DFA_STATE, initial_state.u64);
CVMX_SYNCWS;
cvmx_write_csr(CVMX_DFA_DIFRDPTR, cvmx_ptr_to_phys(initial_base_address));
return 0;
}
/**
* Shutdown and cleanup resources used by the DFA
*/
void cvmx_dfa_shutdown(void)
{
void *final_base_address;
cvmx_dfa_state_t final_state;
CVMX_SYNCWS;
final_state.u64 = cvmx_fau_fetch_and_add64(CVMX_FAU_DFA_STATE, 0);
// make sure the carry is clear
final_base_address = CASTPTR(void, (final_state.s2.base_address_div32 * 32ull));
if (final_base_address)
{
cvmx_fpa_free(final_base_address, CVMX_FPA_DFA_POOL, 0);
}
CVMX_SYNCWS;
final_state.u64 = 0;
cvmx_fau_atomic_write64(CVMX_FAU_DFA_STATE, final_state.u64);
}
#endif

View File

@ -1,802 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the CN31XX, CN38XX, and CN58XX hardware DFA engine.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_DFA_H__
#define __CVMX_DFA_H__
#include "cvmx-llm.h"
#include "cvmx-wqe.h"
#include "cvmx-fpa.h"
#include "executive-config.h"
#ifdef CVMX_ENABLE_DFA_FUNCTIONS
#include "cvmx-config.h"
#endif
#define ENABLE_DEPRECATED /* Set to enable the old 18/36 bit names */
#ifdef __cplusplus
extern "C" {
#endif
/* Maximum nodes available in a small encoding */
#define CVMX_DFA_NODESM_MAX_NODES ((OCTEON_IS_MODEL(OCTEON_CN31XX)) ? 0x8000 : 0x20000)
#define CVMX_DFA_NODESM_SIZE 512 /* Size of each node for small encoding */
#define CVMX_DFA_NODELG_SIZE 1024 /* Size of each node for large encoding */
#define CVMX_DFA_NODESM_LAST_TERMINAL (CVMX_DFA_NODESM_MAX_NODES-1)
#ifdef ENABLE_DEPRECATED
/* These defines are for compatability with old code. They are deprecated */
#define CVMX_DFA_NODE18_SIZE CVMX_DFA_NODESM_SIZE
#define CVMX_DFA_NODE36_SIZE CVMX_DFA_NODELG_SIZE
#define CVMX_DFA_NODE18_MAX_NODES CVMX_DFA_NODESM_MAX_NODES
#define CVMX_DFA_NODE18_LAST_TERMINAL CVMX_DFA_NODESM_LAST_TERMINAL
#endif
/**
* Which type of memory encoding is this graph using. Make sure you setup
* the LLM to match.
*/
typedef enum
{
CVMX_DFA_GRAPH_TYPE_SM = 0,
CVMX_DFA_GRAPH_TYPE_LG = 1,
#ifdef ENABLE_DEPRECATED
CVMX_DFA_GRAPH_TYPE_18b = 0, /* Deprecated */
CVMX_DFA_GRAPH_TYPE_36b = 1 /* Deprecated */
#endif
} cvmx_dfa_graph_type_t;
/**
* The possible node types.
*/
typedef enum
{
CVMX_DFA_NODE_TYPE_NORMAL = 0, /**< Node is a branch */
CVMX_DFA_NODE_TYPE_MARKED = 1, /**< Node is marked special */
CVMX_DFA_NODE_TYPE_TERMINAL = 2 /**< Node is a terminal leaf */
} cvmx_dfa_node_type_t;
/**
* The possible reasons the DFA stopped processing.
*/
typedef enum
{
CVMX_DFA_STOP_REASON_DATA_GONE = 0, /**< DFA ran out of data */
CVMX_DFA_STOP_REASON_PARITY_ERROR = 1, /**< DFA encountered a memory error */
CVMX_DFA_STOP_REASON_FULL = 2, /**< DFA is full */
CVMX_DFA_STOP_REASON_TERMINAL = 3 /**< DFA hit a terminal */
} cvmx_dfa_stop_reason_t;
/**
* This format describes the DFA pointers in small mode
*/
typedef union
{
uint64_t u64;
struct
{
uint64_t mbz :32;/**< Must be zero */
uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
uint64_t next_node1 :15;/**< Next node if an odd character match */
uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
uint64_t next_node0 :15;/**< Next node if an even character match */
} w32;
struct
{
uint64_t mbz :28;/**< Must be zero */
uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
uint64_t next_node1 :17;/**< Next node if an odd character match */
uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
uint64_t next_node0 :17;/**< Next node if an even character match */
} w36;
struct /**< @ this structure only applies starting in CN58XX and if DFA_CFG[NRPL_ENA] == 1 and IWORD0[NREPLEN] == 1. */
{
uint64_t mbz :28;/**< Must be zero */
uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
uint64_t per_node_repl1 : 1;/**< enable for extra replicaiton for next node (CN58XX) */
uint64_t next_node_repl1 : 2;/**< extra replicaiton for next node (CN58XX) (if per_node_repl1 is set) */
uint64_t next_node1 :14;/**< Next node if an odd character match - IWORD3[Msize], if per_node_repl1==1. */
uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
uint64_t per_node_repl0 : 1;/**< enable for extra replicaiton for next node (CN58XX) */
uint64_t next_node_repl0 : 2;/**< extra replicaiton for next node (CN58XX) (if per_node_repl0 is set) */
uint64_t next_node0 :14;/**< Next node if an odd character match - IWORD3[Msize], if per_node_repl0==1. */
} w36nrepl_en; /**< use when next_node_repl[01] is 1. */
struct /**< this structure only applies starting in CN58XX and if DFA_CFG[NRPL_ENA] == 1 and IWORD0[NREPLEN] == 1. */
{
uint64_t mbz :28;/**< Must be zero */
uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
uint64_t per_node_repl1 : 1;/**< enable for extra replicaiton for next node (CN58XX) */
uint64_t next_node1 :16;/**< Next node if an odd character match, if per_node_repl1==0. */
uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
uint64_t per_node_repl0 : 1;/**< enable for extra replicaiton for next node (CN58XX) */
uint64_t next_node0 :16;/**< Next node if an odd character match, if per_node_repl0==0. */
} w36nrepl_dis; /**< use when next_node_repl[01] is 0. */
#if defined(ENABLE_DEPRECATED) && !OCTEON_IS_COMMON_BINARY()
#if CVMX_COMPILED_FOR(OCTEON_CN31XX)
struct /**< @deprecated unnamed reference to members */
{
uint64_t mbz :32;/**< Must be zero */
uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
uint64_t next_node1 :15;/**< Next node if an odd character match */
uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
uint64_t next_node0 :15;/**< Next node if an even character match */
};
#elif CVMX_COMPILED_FOR(OCTEON_CN38XX)
struct /**< @deprecated unnamed reference to members */
{
uint64_t mbz :28;/**< Must be zero */
uint64_t p1 : 1;/**< Set if next_node1 is odd parity */
uint64_t next_node1 :17;/**< Next node if an odd character match */
uint64_t p0 : 1;/**< Set if next_node0 is odd parity */
uint64_t next_node0 :17;/**< Next node if an even character match */
};
#else
/* Other chips don't support the deprecated unnamed unions */
#endif
#endif
} cvmx_dfa_node_next_sm_t;
/**
* This format describes the DFA pointers in large mode
*/
typedef union
{
uint64_t u64;
struct
{
uint64_t mbz :32;/**< Must be zero */
uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
cvmx_dfa_node_type_t type : 2;/**< Node type */
uint64_t mbz2 : 3;/**< Must be zero */
uint64_t next_node :20;/**< Next node */
} w32;
struct
{
uint64_t mbz :28;/**< Must be zero */
uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
cvmx_dfa_node_type_t type : 2;/**< Node type */
uint64_t extra_bits : 5;/**< bits copied to report (PASS3/CN58XX), Must be zero previously */
uint64_t next_node_repl : 2;/**< extra replicaiton for next node (PASS3/CN58XX), Must be zero previously */
uint64_t next_node :20;/**< Next node ID, Note, combine with next_node_repl to use as start_node
for continuation, as in cvmx_dfa_node_next_lgb_t. */
} w36;
#if defined(ENABLE_DEPRECATED) && !OCTEON_IS_COMMON_BINARY()
#if CVMX_COMPILED_FOR(OCTEON_CN31XX)
struct /**< @deprecated unnamed reference to members */
{
uint64_t mbz :32;/**< Must be zero */
uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
cvmx_dfa_node_type_t type : 2;/**< Node type */
uint64_t mbz2 : 3;/**< Must be zero */
uint64_t next_node :20;/**< Next node */
};
#elif CVMX_COMPILED_FOR(OCTEON_CN38XX)
struct /**< @deprecated unnamed reference to members */
{
uint64_t mbz :28;/**< Must be zero */
uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
cvmx_dfa_node_type_t type : 2;/**< Node type */
uint64_t extra_bits : 5;/**< bits copied to report (PASS3/CN58XX), Must be zero previously */
uint64_t next_node_repl : 2;/**< extra replicaiton for next node (PASS3/CN58XX), Must be zero previously */
uint64_t next_node :20;/**< Next node ID, Note, combine with next_node_repl to use as start_node
for continuation, as in cvmx_dfa_node_next_lgb_t. */
};
#else
/* Other chips don't support the deprecated unnamed unions */
#endif
#endif
} cvmx_dfa_node_next_lg_t;
/**
* This format describes the DFA pointers in large mode, another way
*/
typedef union
{
uint64_t u64;
struct
{
uint64_t mbz :32;/**< Must be zero */
uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
uint64_t type_terminal : 1;/**< Node type */
uint64_t type_marked : 1;/**< Node type */
uint64_t mbz2 : 3;/**< Must be zero */
uint64_t next_node :20;/**< Next node */
} w32;
struct
{
uint64_t mbz :28;/**< Must be zero */
uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
uint64_t type_terminal : 1;/**< Node type */
uint64_t type_marked : 1;/**< Node type */
uint64_t extra_bits : 5;/**< bits copied to report (PASS3/CN58XX), Must be zero previously */
uint64_t next_node_id_and_repl :22;/**< Next node ID (and repl for PASS3/CN58XX or repl=0 if not),
use this as start node for continuation. */
} w36;
#if defined(ENABLE_DEPRECATED) && !OCTEON_IS_COMMON_BINARY()
#if CVMX_COMPILED_FOR(OCTEON_CN31XX)
struct /**< @deprecated unnamed reference to members */
{
uint64_t mbz :32;/**< Must be zero */
uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
uint64_t type_terminal : 1;/**< Node type */
uint64_t type_marked : 1;/**< Node type */
uint64_t mbz2 : 3;/**< Must be zero */
uint64_t next_node :20;/**< Next node */
};
#elif CVMX_COMPILED_FOR(OCTEON_CN38XX)
struct /**< @deprecated unnamed reference to members */
{
uint64_t mbz :28;/**< Must be zero */
uint64_t ecc : 7;/**< ECC checksum on the rest of the bits */
uint64_t type_terminal : 1;/**< Node type */
uint64_t type_marked : 1;/**< Node type */
uint64_t extra_bits : 5;/**< bits copied to report (PASS3/CN58XX), Must be zero previously */
uint64_t next_node_id_and_repl :22;/**< Next node ID (and repl for PASS3/CN58XX or repl=0 if not),
use this as start node for continuation. */
};
#else
/* Other chips don't support the deprecated unnamed unions */
#endif
#endif
} cvmx_dfa_node_next_lgb_t;
/**
* This format describes the DFA pointers in large mode
*/
typedef union
{
uint64_t u64;
struct
{
uint64_t mbz :27;/**< Must be zero */
uint64_t x0 : 1;/**< XOR of the rest of the bits */
uint64_t reserved : 4;/**< Must be zero */
uint64_t data :32;/**< LLM Data */
} w32;
struct
{
uint64_t mbz :27;/**< Must be zero */
uint64_t x0 : 1;/**< XOR of the rest of the bits */
uint64_t data :36;/**< LLM Data */
} w36;
#if defined(ENABLE_DEPRECATED) && !OCTEON_IS_COMMON_BINARY()
#if CVMX_COMPILED_FOR(OCTEON_CN31XX)
struct /**< @deprecated unnamed reference to members */
{
uint64_t mbz :27;/**< Must be zero */
uint64_t x0 : 1;/**< XOR of the rest of the bits */
uint64_t reserved : 4;/**< Must be zero */
uint64_t data :32;/**< LLM Data */
};
#elif CVMX_COMPILED_FOR(OCTEON_CN38XX)
struct /**< @deprecated unnamed reference to members */
{
uint64_t mbz :27;/**< Must be zero */
uint64_t x0 : 1;/**< XOR of the rest of the bits */
uint64_t data :36;/**< LLM Data */
};
#else
/* Other chips don't support the deprecated unnamed unions */
#endif
#endif
} cvmx_dfa_node_next_read_t;
/**
* This structure defines the data format in the low-latency memory
*/
typedef union
{
uint64_t u64;
cvmx_dfa_node_next_sm_t sm; /**< This format describes the DFA pointers in small mode */
cvmx_dfa_node_next_lg_t lg; /**< This format describes the DFA pointers in large mode */
cvmx_dfa_node_next_lgb_t lgb; /**< This format describes the DFA pointers in large mode, another way */
cvmx_dfa_node_next_read_t read; /**< This format describes the DFA pointers in large mode */
#ifdef ENABLE_DEPRECATED
cvmx_dfa_node_next_sm_t s18; /**< Deprecated */
cvmx_dfa_node_next_lg_t s36; /**< Deprecated */
cvmx_dfa_node_next_lgb_t s36b; /**< Deprecated */
#endif
} cvmx_dfa_node_next_t;
/**
* These structures define a DFA instruction
*/
typedef union
{
uint64_t u64[4];
uint32_t u32;
struct
{
// WORD 0
uint64_t gxor : 8; /**< Graph XOR value (PASS3/CN58XX), Must be zero for other chips
or if DFA_CFG[GXOR_ENA] == 0. */
uint64_t nxoren : 1; /**< Node XOR enable (PASS3/CN58XX), Must be zero for other chips
or if DFA_CFG[NXOR_ENA] == 0. */
uint64_t nreplen : 1; /**< Node Replication mode enable (PASS3/CN58XX), Must be zero for other chips
or if DFA_CFG[NRPL_ENA] == 0 or IWORD0[Ty] == 0. */
#if 0
uint64_t snrepl : 2; /**< Start_Node Replication (PASS3/CN58XX), Must be zero for other chips
or if DFA_CFG[NRPL_ENA] == 0 or IWORD0[Ty] == 0 or IWORD0[NREPLEN] == 0. */
uint64_t start_node_id : 20; /**< Node to start the walk from */
#else
uint64_t start_node : 22; /**< Node to start the walk from, includes ID and snrepl, see notes above. */
#endif
uint64_t unused02 : 2; /**< Must be zero */
cvmx_llm_replication_t replication : 2; /**< Type of memory replication to use */
uint64_t unused03 : 3; /**< Must be zero */
cvmx_dfa_graph_type_t type : 1; /**< Type of graph */
uint64_t unused04 : 4; /**< Must be zero */
uint64_t base : 20; /**< All tables start on 1KB boundary */
// WORD 1
uint64_t input_length : 16; /**< In bytes, # pointers in gather case */
uint64_t use_gather : 1; /**< Set to use gather */
uint64_t no_L2_alloc : 1; /**< Set to disable loading of the L2 cache by the DFA */
uint64_t full_block_write : 1; /**< If set, HW can write entire cache blocks @ result_ptr */
uint64_t little_endian : 1; /**< Affects only packet data, not instruction, gather list, or result */
uint64_t unused1 : 8; /**< Must be zero */
uint64_t data_ptr : 36; /**< Either directly points to data or the gather list. If gather list,
data_ptr<2:0> must be zero (i.e. 8B aligned) */
// WORD 2
uint64_t max_results : 16; /**< in 64-bit quantities, mbz for store */
uint64_t unused2 : 12; /**< Must be zero */
uint64_t result_ptr : 36; /**< must be 128 byte aligned */
// WORD 3
uint64_t tsize : 8; /**< tsize*256 is the number of terminal nodes for GRAPH_TYPE_SM */
uint64_t msize : 16; /**< msize is the number of marked nodes for GRAPH_TYPE_SM */
uint64_t unused3 : 4; /**< Must be zero */
uint64_t wq_ptr : 36; /**< 0 for no work queue entry creation */
} s;
} cvmx_dfa_command_t;
/**
* Format of the first result word written by the hardware.
*/
typedef union
{
uint64_t u64;
struct
{
cvmx_dfa_stop_reason_t reas : 2;/**< Reason the DFA stopped */
uint64_t mbz :44;/**< Zero */
uint64_t last_marked : 1;/**< Set if the last entry written is marked */
uint64_t done : 1;/**< Set to 1 when the DFA completes */
uint64_t num_entries :16;/**< Number of result words written */
} s;
} cvmx_dfa_result0_t;
/**
* Format of the second result word and subsequent result words written by the hardware.
*/
typedef union
{
uint64_t u64;
struct
{
uint64_t byte_offset : 16; /**< Number of bytes consumed */
uint64_t extra_bits_high: 4; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1,
then set to <27:24> of the last next-node pointer. Else set to 0x0. */
uint64_t prev_node : 20; /**< Index of the previous node */
uint64_t extra_bits_low : 2; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1,
then set to <23:22> of the last next-node pointer. Else set to 0x0. */
uint64_t next_node_repl : 2; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1, then set
to next_node_repl (<21:20>) of the last next-node pointer. Else set to 0x0. */
uint64_t current_node : 20; /**< Index of the current node */
} s;
struct
{
uint64_t byte_offset : 16; /**< Number of bytes consumed */
uint64_t extra_bits_high: 4; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1,
then set to <27:24> of the last next-node pointer. Else set to 0x0. */
uint64_t prev_node : 20; /**< Index of the previous node */
uint64_t extra_bits_low : 2; /**< If PASS3 or CN58XX and DFA_CFG[NRPL_ENA] == 1 and IWORD0[Ty] == 1,
then set to <23:22> of the last next-node pointer. Else set to 0x0. */
uint64_t curr_id_and_repl:22; /**< Use ths as start_node for continuation. */
} s2;
} cvmx_dfa_result1_t;
/**
* Abstract DFA graph
*/
typedef struct
{
cvmx_llm_replication_t replication; /**< Level of memory replication to use. Must match the LLM setup */
cvmx_dfa_graph_type_t type; /**< Type of graph */
uint64_t base_address; /**< LLM start address of the graph */
union {
struct {
uint64_t gxor : 8; /**< Graph XOR value (PASS3/CN58XX), Must be zero for other chips
or if DFA_CFG[GXOR_ENA] == 0. */
uint64_t nxoren : 1; /**< Node XOR enable (PASS3/CN58XX), Must be zero for other chips
or if DFA_CFG[NXOR_ENA] == 0. */
uint64_t nreplen : 1; /**< Node Replication mode enable (PASS3/CN58XX), Must be zero for other chips
or if DFA_CFG[NRPL_ENA] == 0 or IWORD0[Ty] == 0. */
uint64_t snrepl : 2; /**< Start_Node Replication (PASS3/CN58XX), Must be zero for other chips
or if DFA_CFG[NRPL_ENA] == 0 or IWORD0[Ty] == 0 or IWORD0[NREPLEN] == 0.*/
uint64_t start_node_id : 20; /**< Start node index for the root of the graph */
};
uint32_t start_node; /**< Start node index for the root of the graph, incl. snrepl (PASS3/CN58XX)
NOTE: for backwards compatibility this name includes the the
gxor, nxoren, nreplen, and snrepl fields which will all be
zero in applicaitons existing before the introduction of these
fields, so that existing applicaiton do not need to change. */
};
int num_terminal_nodes; /**< Number of terminal nodes in the graph. Only needed for small graphs. */
int num_marked_nodes; /**< Number of marked nodes in the graph. Only needed for small graphs. */
} cvmx_dfa_graph_t;
/**
* DFA internal global state -- stored in 8 bytes of FAU
*/
typedef union
{
uint64_t u64;
struct {
#define CVMX_DFA_STATE_TICKET_BIT_POS 16
#ifdef __BIG_ENDIAN_BITFIELD
// NOTE: must clear LSB of base_address_div16 due to ticket overflow
uint32_t base_address_div16; /**< Current DFA instruction queue chunck base address/16 (clear LSB). */
uint8_t ticket_loops; /**< bits [15:8] of total number of tickets requested. */
uint8_t ticket; /**< bits [7:0] of total number of tickets requested (current ticket held). */
// NOTE: index and now_serving are written together
uint8_t now_serving; /**< current ticket being served (or ready to be served). */
uint8_t index; /**< index into current chunk: (base_address_div16*16)[index] = next entry. */
#else // NOTE: little endian mode probably won't work
uint8_t index;
uint8_t now_serving;
uint8_t ticket;
uint8_t ticket_loops;
uint32_t base_address_div16;
#endif
} s;
struct { // a bitfield version of the same thing to extract base address while clearing carry.
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t base_address_div32 : 31; /**< Current DFA instruction queue chunck base address/32. */
uint64_t carry : 1; /**< Carry out from total_tickets. */
uint64_t total_tickets : 16; /**< Total tickets. */
uint64_t now_serving : 8 ; /**< current ticket being served (or ready to be served). */
uint64_t index : 8 ; /**< index into current chunk. */
#else // NOTE: little endian mode probably won't work
uint64_t index : 8 ;
uint64_t now_serving : 8 ;
uint64_t total_tickets : 16;
uint64_t carry : 1;
uint64_t base_address_div32 : 31;
#endif
} s2;
} cvmx_dfa_state_t;
/* CSR typedefs have been moved to cvmx-dfa-defs.h */
/**
* Write a small node edge to LLM.
*
* @param graph Graph to modify
* @param source_node
* Source node for this edge
* @param match_index
* Index into the node edge table. This is the match character/2.
* @param destination_node0
* Destination if the character matches (match_index*2).
* @param destination_node1
* Destination if the character matches (match_index*2+1).
*/
static inline void cvmx_dfa_write_edge_sm(const cvmx_dfa_graph_t *graph,
uint64_t source_node, uint64_t match_index,
uint64_t destination_node0, uint64_t destination_node1)
{
cvmx_llm_address_t address;
cvmx_dfa_node_next_t next_ptr;
address.u64 = graph->base_address + source_node * CVMX_DFA_NODESM_SIZE + match_index * 4;
next_ptr.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN31XX))
{
next_ptr.sm.w32.next_node0 = destination_node0;
next_ptr.sm.w32.p0 = cvmx_llm_parity(destination_node0);
next_ptr.sm.w32.next_node1 = destination_node1;
next_ptr.sm.w32.p1 = cvmx_llm_parity(destination_node1);
}
else
{
next_ptr.sm.w36.next_node0 = destination_node0;
next_ptr.sm.w36.p0 = cvmx_llm_parity(destination_node0);
next_ptr.sm.w36.next_node1 = destination_node1;
next_ptr.sm.w36.p1 = cvmx_llm_parity(destination_node1);
}
cvmx_llm_write36(address, next_ptr.u64, 0);
}
#ifdef ENABLE_DEPRECATED
#define cvmx_dfa_write_edge18 cvmx_dfa_write_edge_sm
#endif
/**
* Write a large node edge to LLM.
*
* @param graph Graph to modify
* @param source_node
* Source node for this edge
* @param match Character to match before taking this edge.
* @param destination_node
* Destination node of the edge.
* @param destination_type
* Node type at the end of this edge.
*/
static inline void cvmx_dfa_write_node_lg(const cvmx_dfa_graph_t *graph,
uint64_t source_node, unsigned char match,
uint64_t destination_node, cvmx_dfa_node_type_t destination_type)
{
cvmx_llm_address_t address;
cvmx_dfa_node_next_t next_ptr;
address.u64 = graph->base_address + source_node * CVMX_DFA_NODELG_SIZE + (uint64_t)match * 4;
next_ptr.u64 = 0;
if (OCTEON_IS_MODEL(OCTEON_CN31XX))
{
next_ptr.lg.w32.type = destination_type;
next_ptr.lg.w32.next_node = destination_node;
next_ptr.lg.w32.ecc = cvmx_llm_ecc(next_ptr.u64);
}
else
{
next_ptr.lg.w36.type = destination_type;
next_ptr.lg.w36.next_node = destination_node;
next_ptr.lg.w36.ecc = cvmx_llm_ecc(next_ptr.u64);
}
cvmx_llm_write36(address, next_ptr.u64, 0);
}
#ifdef ENABLE_DEPRECATED
#define cvmx_dfa_write_node36 cvmx_dfa_write_node_lg
#endif
/**
* Ring the DFA doorbell telling it that new commands are
* available.
*
* @param num_commands
* Number of new commands
*/
static inline void cvmx_dfa_write_doorbell(uint64_t num_commands)
{
CVMX_SYNCWS;
cvmx_write_csr(CVMX_DFA_DBELL, num_commands);
}
/**
* @INTERNAL
* Write a new command to the DFA. Calls to this function
* are internally synchronized across all processors, and
* the doorbell is rung during this function.
*
* @param command Command to write
*/
#ifdef CVMX_ENABLE_DFA_FUNCTIONS
static inline void __cvmx_dfa_write_command(cvmx_dfa_command_t *command)
{
cvmx_dfa_state_t cvmx_dfa_state;
uint64_t my_ticket; // needs to wrap to 8 bits
uint64_t index;
cvmx_dfa_command_t *head;
CVMX_PREFETCH0(command);
// take a ticket.
cvmx_dfa_state.u64 = cvmx_fau_fetch_and_add64(CVMX_FAU_DFA_STATE, 1ull<<CVMX_DFA_STATE_TICKET_BIT_POS);
my_ticket = cvmx_dfa_state.s.ticket;
// see if it is our turn
while (my_ticket != cvmx_dfa_state.s.now_serving) {
int delta = my_ticket - cvmx_dfa_state.s.now_serving;
if (delta < 0) delta += 256;
cvmx_wait(10*delta); // reduce polling load on system
cvmx_dfa_state.u64 = cvmx_fau_fetch_and_add64(CVMX_FAU_DFA_STATE, 0); // poll for my_ticket==now_serving
}
// compute index and instruction queue head pointer
index = cvmx_dfa_state.s.index;
// NOTE: the DFA only supports 36-bit addressing
head = &((CASTPTR(cvmx_dfa_command_t, (cvmx_dfa_state.s2.base_address_div32 * 32ull))[index]));
head = (cvmx_dfa_command_t*)cvmx_phys_to_ptr(CAST64(head)); // NOTE: since we are not storing bit 63 of address, we must set it now
// copy the command to the instruction queue
*head++ = *command;
// check if a new chunk is needed
if (cvmx_unlikely((++index >= ((CVMX_FPA_DFA_POOL_SIZE-8)/sizeof(cvmx_dfa_command_t))))) {
uint64_t *new_base = (uint64_t*)cvmx_fpa_alloc(CVMX_FPA_DFA_POOL); // could make this async
if (new_base) {
// put the link into the instruction queue's "Next Chunk Buffer Ptr"
*(uint64_t *)head = cvmx_ptr_to_phys(new_base);
// update our state (note 32-bit write to not disturb other fields)
cvmx_fau_atomic_write32((cvmx_fau_reg_32_t)(CVMX_FAU_DFA_STATE + (CAST64(&cvmx_dfa_state.s.base_address_div16)-CAST64(&cvmx_dfa_state))),
(CAST64(new_base))/16);
}
else {
cvmx_dprintf("__cvmx_dfa_write_command: Out of memory. Expect crashes.\n");
}
index=0;
}
cvmx_dfa_write_doorbell(1);
// update index and now_serving in the DFA state FAU location (NOTE: this write16 updates to 8-bit values.)
// NOTE: my_ticket+1 carry out is lost due to write16 and index has already been wrapped to fit in uint8.
cvmx_fau_atomic_write16((cvmx_fau_reg_16_t)(CVMX_FAU_DFA_STATE+(CAST64(&cvmx_dfa_state.s.now_serving) - CAST64(&cvmx_dfa_state))),
((my_ticket+1)<<8) | index);
}
/**
* Submit work to the DFA units for processing
*
* @param graph Graph to process
* @param start_node
* The node to start (or continue) walking from
* includes. start_node_id and snrepl (PASS3/CN58XX), but gxor,
* nxoren, and nreplen are taken from the graph structure
* @param input The input to match against
* @param input_length
* The length of the input in bytes
* @param use_gather
* The input and input_length are of a gather list
* @param is_little_endian
* Set to 1 if the input is in little endian format and must
* be swapped before compare.
* @param result Location the DFA should put the results in. This must be
* an area sized in multiples of a cache line.
* @param max_results
* The maximum number of 64-bit result1 words after result0.
* That is, "size of the result area in 64-bit words" - 1.
* max_results must be at least 1.
* @param work Work queue entry to submit when DFA completes. Can be NULL.
*/
static inline void cvmx_dfa_submit(const cvmx_dfa_graph_t *graph, int start_node,
void *input, int input_length, int use_gather, int is_little_endian,
cvmx_dfa_result0_t *result, int max_results, cvmx_wqe_t *work)
{
cvmx_dfa_command_t command;
/* Make sure the result's first 64bit word is zero so we can tell when the
DFA is done. */
result->u64 = 0;
// WORD 0
command.u64[0] = 0;
command.s.gxor = graph->gxor; // (PASS3/CN58XX)
command.s.nxoren = graph->nxoren; // (PASS3/CN58XX)
command.s.nreplen = graph->nreplen; // (PASS3/CN58XX)
command.s.start_node = start_node; // includes snrepl (PASS3/CN58XX)
command.s.replication = graph->replication;
command.s.type = graph->type;
command.s.base = graph->base_address>>10;
// WORD 1
command.u64[1] = 0;
command.s.input_length = input_length;
command.s.use_gather = use_gather;
command.s.no_L2_alloc = 0;
command.s.full_block_write = 1;
command.s.little_endian = is_little_endian;
command.s.data_ptr = cvmx_ptr_to_phys(input);
// WORD 2
command.u64[2] = 0;
command.s.max_results = max_results;
command.s.result_ptr = cvmx_ptr_to_phys(result);
// WORD 3
command.u64[3] = 0;
if (graph->type == CVMX_DFA_GRAPH_TYPE_SM)
{
command.s.tsize = (graph->num_terminal_nodes + 255) / 256;
command.s.msize = graph->num_marked_nodes;
}
command.s.wq_ptr = cvmx_ptr_to_phys(work);
__cvmx_dfa_write_command(&command); // NOTE: this does synchronization and rings doorbell
}
#endif
/**
* DFA gather list element
*/
typedef struct {
uint64_t length : 16; /**< length of piece of data at addr */
uint64_t reserved : 12; /**< reserved, set to 0 */
uint64_t addr : 36; /**< pointer to piece of data */
} cvmx_dfa_gather_entry_t;
/**
* Check if a DFA has completed processing
*
* @param result_ptr Result area the DFA is using
* @return Non zero if the DFA is done
*/
static inline uint64_t cvmx_dfa_is_done(cvmx_dfa_result0_t *result_ptr)
{
/* DFA sets the first result 64bit word to non zero when it's done */
return ((volatile cvmx_dfa_result0_t *)result_ptr)->s.done;
}
#ifdef CVMX_ENABLE_DFA_FUNCTIONS
/**
* Initialize the DFA hardware before use
* Returns 0 on success, -1 on failure
*/
int cvmx_dfa_initialize(void);
/**
* Shutdown and cleanup resources used by the DFA
*/
void cvmx_dfa_shutdown(void);
#endif
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_DFA_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,553 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the PCI / PCIe DMA engines. These are only avialable
* on chips with PCI / PCIe.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <linux/module.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/octeon-model.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-cmd-queue.h>
#include <asm/octeon/cvmx-dma-engine.h>
#include <asm/octeon/octeon-feature.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-npei-defs.h>
#include <asm/octeon/cvmx-dpi-defs.h>
#include <asm/octeon/cvmx-pexp-defs.h>
#include <asm/octeon/cvmx-helper-cfg.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#endif
#include "cvmx.h"
#include "cvmx-cmd-queue.h"
#include "cvmx-dma-engine.h"
#include "cvmx-helper-cfg.h"
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
/**
* Return the number of DMA engimes supported by this chip
*
* @return Number of DMA engines
*/
int cvmx_dma_engine_get_num(void)
{
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
return 4;
else
return 5;
}
else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
return 8;
else
return 2;
}
/**
* Initialize the DMA engines for use
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_initialize(void)
{
int engine;
for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
{
cvmx_cmd_queue_result_t result;
result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_DMA(engine),
0, CVMX_FPA_OUTPUT_BUFFER_POOL,
CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE);
if (result != CVMX_CMD_QUEUE_SUCCESS)
return -1;
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_npei_dmax_ibuff_saddr_t dmax_ibuff_saddr;
dmax_ibuff_saddr.u64 = 0;
dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7;
cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), dmax_ibuff_saddr.u64);
}
else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
{
cvmx_dpi_dmax_ibuff_saddr_t dpi_dmax_ibuff_saddr;
dpi_dmax_ibuff_saddr.u64 = 0;
dpi_dmax_ibuff_saddr.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
dpi_dmax_ibuff_saddr.s.saddr = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine))) >> 7;
cvmx_write_csr(CVMX_DPI_DMAX_IBUFF_SADDR(engine), dpi_dmax_ibuff_saddr.u64);
}
else
{
uint64_t address = cvmx_ptr_to_phys(cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DMA(engine)));
if (engine)
cvmx_write_csr(CVMX_NPI_HIGHP_IBUFF_SADDR, address);
else
cvmx_write_csr(CVMX_NPI_LOWP_IBUFF_SADDR, address);
}
}
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_npei_dma_control_t dma_control;
dma_control.u64 = 0;
if (cvmx_dma_engine_get_num() >= 5)
dma_control.s.dma4_enb = 1;
dma_control.s.dma3_enb = 1;
dma_control.s.dma2_enb = 1;
dma_control.s.dma1_enb = 1;
dma_control.s.dma0_enb = 1;
dma_control.s.o_mode = 1; /* Pull NS and RO from this register, not the pointers */
//dma_control.s.dwb_denb = 1;
//dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL;
dma_control.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
cvmx_write_csr(CVMX_PEXP_NPEI_DMA_CONTROL, dma_control.u64);
/* As a workaround for errata PCIE-811 we only allow a single
outstanding DMA read over PCIe at a time. This limits performance,
but works in all cases. If you need higher performance, remove
this code and implement the more complicated workaround documented
in the errata. This only affects CN56XX pass 2.0 chips */
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_0))
{
cvmx_npei_dma_pcie_req_num_t pcie_req_num;
pcie_req_num.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM);
pcie_req_num.s.dma_cnt = 1;
cvmx_write_csr(CVMX_PEXP_NPEI_DMA_PCIE_REQ_NUM, pcie_req_num.u64);
}
}
else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
{
cvmx_dpi_engx_buf_t dpi_engx_buf;
cvmx_dpi_dma_engx_en_t dpi_dma_engx_en;
cvmx_dpi_dma_control_t dma_control;
cvmx_dpi_ctl_t dpi_ctl;
/* Give engine 0-4 1KB, and 5 3KB. This gives the packet engines better
performance. Total must not exceed 8KB */
dpi_engx_buf.u64 = 0;
dpi_engx_buf.s.blks = 2;
cvmx_write_csr(CVMX_DPI_ENGX_BUF(0), dpi_engx_buf.u64);
cvmx_write_csr(CVMX_DPI_ENGX_BUF(1), dpi_engx_buf.u64);
cvmx_write_csr(CVMX_DPI_ENGX_BUF(2), dpi_engx_buf.u64);
cvmx_write_csr(CVMX_DPI_ENGX_BUF(3), dpi_engx_buf.u64);
cvmx_write_csr(CVMX_DPI_ENGX_BUF(4), dpi_engx_buf.u64);
dpi_engx_buf.s.blks = 6;
cvmx_write_csr(CVMX_DPI_ENGX_BUF(5), dpi_engx_buf.u64);
dma_control.u64 = cvmx_read_csr(CVMX_DPI_DMA_CONTROL);
dma_control.s.pkt_hp = 1;
dma_control.s.pkt_en = 1;
dma_control.s.dma_enb = 0x1f;
dma_control.s.dwb_denb = cvmx_helper_cfg_opt_get(CVMX_HELPER_CFG_OPT_USE_DWB);
dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL;
dma_control.s.o_mode = 1;
cvmx_write_csr(CVMX_DPI_DMA_CONTROL, dma_control.u64);
/* When dma_control[pkt_en] = 1, engine 5 is used for packets and is not
available for DMA. */
dpi_dma_engx_en.u64 = cvmx_read_csr(CVMX_DPI_DMA_ENGX_EN(5));
dpi_dma_engx_en.s.qen = 0;
cvmx_write_csr(CVMX_DPI_DMA_ENGX_EN(5), dpi_dma_engx_en.u64);
dpi_ctl.u64 = cvmx_read_csr(CVMX_DPI_CTL);
dpi_ctl.s.en = 1;
cvmx_write_csr(CVMX_DPI_CTL, dpi_ctl.u64);
}
else
{
cvmx_npi_dma_control_t dma_control;
dma_control.u64 = 0;
//dma_control.s.dwb_denb = 1;
//dma_control.s.dwb_ichk = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/128;
dma_control.s.o_add1 = 1;
dma_control.s.fpa_que = CVMX_FPA_OUTPUT_BUFFER_POOL;
dma_control.s.hp_enb = 1;
dma_control.s.lp_enb = 1;
dma_control.s.csize = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE/8;
cvmx_write_csr(CVMX_NPI_DMA_CONTROL, dma_control.u64);
}
return 0;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_dma_engine_initialize);
#endif
/**
* Shutdown all DMA engines. The engines must be idle when this
* function is called.
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_shutdown(void)
{
int engine;
for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
{
if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_DMA(engine)))
{
cvmx_dprintf("ERROR: cvmx_dma_engine_shutdown: Engine not idle.\n");
return -1;
}
}
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
cvmx_npei_dma_control_t dma_control;
dma_control.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_DMA_CONTROL);
if (cvmx_dma_engine_get_num() >= 5)
dma_control.s.dma4_enb = 0;
dma_control.s.dma3_enb = 0;
dma_control.s.dma2_enb = 0;
dma_control.s.dma1_enb = 0;
dma_control.s.dma0_enb = 0;
cvmx_write_csr(CVMX_PEXP_NPEI_DMA_CONTROL, dma_control.u64);
/* Make sure the disable completes */
cvmx_read_csr(CVMX_PEXP_NPEI_DMA_CONTROL);
}
else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
{
cvmx_dpi_dma_control_t dma_control;
dma_control.u64 = cvmx_read_csr(CVMX_DPI_DMA_CONTROL);
dma_control.s.dma_enb = 0;
cvmx_write_csr(CVMX_DPI_DMA_CONTROL, dma_control.u64);
/* Make sure the disable completes */
cvmx_read_csr(CVMX_DPI_DMA_CONTROL);
}
else
{
cvmx_npi_dma_control_t dma_control;
dma_control.u64 = cvmx_read_csr(CVMX_NPI_DMA_CONTROL);
dma_control.s.hp_enb = 0;
dma_control.s.lp_enb = 0;
cvmx_write_csr(CVMX_NPI_DMA_CONTROL, dma_control.u64);
/* Make sure the disable completes */
cvmx_read_csr(CVMX_NPI_DMA_CONTROL);
}
for (engine=0; engine < cvmx_dma_engine_get_num(); engine++)
{
cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_DMA(engine));
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
cvmx_write_csr(CVMX_PEXP_NPEI_DMAX_IBUFF_SADDR(engine), 0);
else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
cvmx_write_csr(CVMX_DPI_DMAX_IBUFF_SADDR(engine), 0);
else
{
if (engine)
cvmx_write_csr(CVMX_NPI_HIGHP_IBUFF_SADDR, 0);
else
cvmx_write_csr(CVMX_NPI_LOWP_IBUFF_SADDR, 0);
}
}
return 0;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_dma_engine_shutdown);
#endif
/**
* Submit a series of DMA command to the DMA engines.
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param header Command header
* @param num_buffers
* The number of data pointers
* @param buffers Command data pointers
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_submit(int engine, cvmx_dma_engine_header_t header, int num_buffers, cvmx_dma_engine_buffer_t buffers[])
{
cvmx_cmd_queue_result_t result;
int cmd_count = 1;
uint64_t cmds[num_buffers + 1];
if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
{
/* Check for Errata PCIe-604 */
if ((header.s.nfst > 11) || (header.s.nlst > 11) || (header.s.nfst + header.s.nlst > 15))
{
cvmx_dprintf("DMA engine submit too large\n");
return -1;
}
}
cmds[0] = header.u64;
while (num_buffers--)
{
cmds[cmd_count++] = buffers->u64;
buffers++;
}
/* Due to errata PCIE-13315, it is necessary to have the queue lock while we
ring the doorbell for the DMA engines. This prevents doorbells from
possibly arriving out of order with respect to the command queue
entries */
__cvmx_cmd_queue_lock(CVMX_CMD_QUEUE_DMA(engine), __cvmx_cmd_queue_get_state(CVMX_CMD_QUEUE_DMA(engine)));
result = cvmx_cmd_queue_write(CVMX_CMD_QUEUE_DMA(engine), 0, cmd_count, cmds);
/* This SYNCWS is needed since the command queue didn't do locking, which
normally implies the SYNCWS. This one makes sure the command queue
updates make it to L2 before we ring the doorbell */
CVMX_SYNCWS;
/* A syncw isn't needed here since the command queue did one as part of the queue unlock */
if (cvmx_likely(result == CVMX_CMD_QUEUE_SUCCESS))
{
if (octeon_has_feature(OCTEON_FEATURE_NPEI))
{
/* DMA doorbells are 32bit writes in little endian space. This means we need to xor the address with 4 */
cvmx_write64_uint32(CVMX_PEXP_NPEI_DMAX_DBELL(engine)^4, cmd_count);
}
else if (octeon_has_feature(OCTEON_FEATURE_PCIE))
cvmx_write_csr(CVMX_DPI_DMAX_DBELL(engine), cmd_count);
else
{
if (engine)
cvmx_write_csr(CVMX_NPI_HIGHP_DBELL, cmd_count);
else
cvmx_write_csr(CVMX_NPI_LOWP_DBELL, cmd_count);
}
}
/* Here is the unlock for the above errata workaround */
__cvmx_cmd_queue_unlock(__cvmx_cmd_queue_get_state(CVMX_CMD_QUEUE_DMA(engine)));
return result;
}
/**
* @INTERNAL
* Function used by cvmx_dma_engine_transfer() to build the
* internal address list.
*
* @param buffers Location to store the list
* @param address Address to build list for
* @param size Length of the memory pointed to by address
*
* @return Number of internal pointer chunks created
*/
static inline int __cvmx_dma_engine_build_internal_pointers(cvmx_dma_engine_buffer_t *buffers, uint64_t address, int size)
{
int segments = 0;
while (size)
{
/* Each internal chunk can contain a maximum of 8191 bytes */
int chunk = size;
if (chunk > 8191)
chunk = 8191;
buffers[segments].u64 = 0;
buffers[segments].internal.size = chunk;
buffers[segments].internal.addr = address;
address += chunk;
size -= chunk;
segments++;
}
return segments;
}
/**
* @INTERNAL
* Function used by cvmx_dma_engine_transfer() to build the PCI / PCIe address
* list.
* @param buffers Location to store the list
* @param address Address to build list for
* @param size Length of the memory pointed to by address
*
* @return Number of PCI / PCIe address chunks created. The number of words used
* will be segments + (segments-1)/4 + 1.
*/
static inline int __cvmx_dma_engine_build_external_pointers(cvmx_dma_engine_buffer_t *buffers, uint64_t address, int size)
{
const int MAX_SIZE = 65535;
int segments = 0;
while (size)
{
/* Each block of 4 PCI / PCIe pointers uses one dword for lengths followed by
up to 4 addresses. This then repeats if more data is needed */
buffers[0].u64 = 0;
if (size <= MAX_SIZE)
{
/* Only one more segment needed */
buffers[0].pcie_length.len0 = size;
buffers[1].u64 = address;
segments++;
break;
}
else if (size <= MAX_SIZE * 2)
{
/* Two more segments needed */
buffers[0].pcie_length.len0 = MAX_SIZE;
buffers[0].pcie_length.len1 = size - MAX_SIZE;
buffers[1].u64 = address;
address += MAX_SIZE;
buffers[2].u64 = address;
segments+=2;
break;
}
else if (size <= MAX_SIZE * 3)
{
/* Three more segments needed */
buffers[0].pcie_length.len0 = MAX_SIZE;
buffers[0].pcie_length.len1 = MAX_SIZE;
buffers[0].pcie_length.len2 = size - MAX_SIZE * 2;
buffers[1].u64 = address;
address += MAX_SIZE;
buffers[2].u64 = address;
address += MAX_SIZE;
buffers[3].u64 = address;
segments+=3;
break;
}
else if (size <= MAX_SIZE * 4)
{
/* Four more segments needed */
buffers[0].pcie_length.len0 = MAX_SIZE;
buffers[0].pcie_length.len1 = MAX_SIZE;
buffers[0].pcie_length.len2 = MAX_SIZE;
buffers[0].pcie_length.len3 = size - MAX_SIZE * 3;
buffers[1].u64 = address;
address += MAX_SIZE;
buffers[2].u64 = address;
address += MAX_SIZE;
buffers[3].u64 = address;
address += MAX_SIZE;
buffers[4].u64 = address;
segments+=4;
break;
}
else
{
/* Five or more segments are needed */
buffers[0].pcie_length.len0 = MAX_SIZE;
buffers[0].pcie_length.len1 = MAX_SIZE;
buffers[0].pcie_length.len2 = MAX_SIZE;
buffers[0].pcie_length.len3 = MAX_SIZE;
buffers[1].u64 = address;
address += MAX_SIZE;
buffers[2].u64 = address;
address += MAX_SIZE;
buffers[3].u64 = address;
address += MAX_SIZE;
buffers[4].u64 = address;
address += MAX_SIZE;
size -= MAX_SIZE*4;
buffers += 5;
segments+=4;
}
}
return segments;
}
/**
* Build the first and last pointers based on a DMA engine header
* and submit them to the engine. The purpose of this function is
* to simplify the building of DMA engine commands by automatically
* converting a simple address and size into the apropriate internal
* or PCI / PCIe address list. This function does not support gather lists,
* so you will need to build your own lists in that case.
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param header DMA Command header. Note that the nfst and nlst fields do not
* need to be filled in. All other fields must be set properly.
* @param first_address
* Address to use for the first pointers. In the case of INTERNAL,
* INBOUND, and OUTBOUND this is an Octeon memory address. In the
* case of EXTERNAL, this is the source PCI / PCIe address.
* @param last_address
* Address to use for the last pointers. In the case of EXTERNAL,
* INBOUND, and OUTBOUND this is a PCI / PCIe address. In the
* case of INTERNAL, this is the Octeon memory destination address.
* @param size Size of the transfer to perform.
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_transfer(int engine, cvmx_dma_engine_header_t header,
uint64_t first_address, uint64_t last_address,
int size)
{
cvmx_dma_engine_buffer_t buffers[32];
int words = 0;
switch (header.s.type)
{
case CVMX_DMA_ENGINE_TRANSFER_INTERNAL:
header.s.nfst = __cvmx_dma_engine_build_internal_pointers(buffers, first_address, size);
words += header.s.nfst;
header.s.nlst = __cvmx_dma_engine_build_internal_pointers(buffers + words, last_address, size);
words += header.s.nlst;
break;
case CVMX_DMA_ENGINE_TRANSFER_INBOUND:
case CVMX_DMA_ENGINE_TRANSFER_OUTBOUND:
header.s.nfst = __cvmx_dma_engine_build_internal_pointers(buffers, first_address, size);
words += header.s.nfst;
header.s.nlst = __cvmx_dma_engine_build_external_pointers(buffers + words, last_address, size);
words += header.s.nlst + ((header.s.nlst-1) >> 2) + 1;
break;
case CVMX_DMA_ENGINE_TRANSFER_EXTERNAL:
header.s.nfst = __cvmx_dma_engine_build_external_pointers(buffers, first_address, size);
words += header.s.nfst + ((header.s.nfst-1) >> 2) + 1;
header.s.nlst = __cvmx_dma_engine_build_external_pointers(buffers + words, last_address, size);
words += header.s.nlst + ((header.s.nlst-1) >> 2) + 1;
break;
}
return cvmx_dma_engine_submit(engine, header, words, buffers);
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_dma_engine_transfer);
#endif
#endif

View File

@ -1,378 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the PCI / PCIe DMA engines. These are only avialable
* on chips with PCI / PCIe.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_DMA_ENGINES_H__
#define __CVMX_DMA_ENGINES_H__
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx-dpi-defs.h>
#else
#include "cvmx-dpi-defs.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef enum
{
CVMX_DMA_ENGINE_TRANSFER_OUTBOUND = 0, /**< OUTBOUND (read from L2/DRAM, write into PCI / PCIe memory space) */
CVMX_DMA_ENGINE_TRANSFER_INBOUND = 1, /**< INBOUND (read from PCI / PCIe memory space, write into L2/DRAM) */
CVMX_DMA_ENGINE_TRANSFER_INTERNAL = 2, /**< INTERNAL-ONLY (read from L2/DRAM, write into L2/DRAM). Only available on chips with PCIe */
CVMX_DMA_ENGINE_TRANSFER_EXTERNAL = 3, /**< EXTERNAL-ONLY (read from PCIe memory space, write into PCIe memory space). Only available on chips with PCIe */
} cvmx_dma_engine_transfer_t;
typedef union
{
uint64_t u64;
struct
{
uint64_t reserved_60_63 : 4; /**< Must be zero */
uint64_t fport : 2; /**< First port. FPort indicates the physical PCIe port used for the
PCIe memory space pointers in the FIRST POINTERS block in the
EXTERNAL-ONLY case. Must be zero in the OUTBOUND, INBOUND and
INTERNAL-ONLY cases. Must be zero on chips with PCI */
uint64_t lport : 2; /**< Last port. LPort indicates the physical PCIe port used for the
PCIe memory space pointers in the LAST POINTERS block in the
OUTBOUND, INBOUND, and EXTERNAL-ONLY cases. Must be zero in the
INTERNAL-ONLY case. Must be zero on chips with PCI */
cvmx_dma_engine_transfer_t type : 2; /**< Type - A given PCI DMA transfer is either OUTBOUND (read from L2/DRAM,
write into PCI / PCIe memory space), INBOUND (read from PCI / PCIe memory space, write
into L2/DRAM), INTERNAL-ONLY (read from L2/DRAM, write into L2/DRAM), or
EXTERNAL-ONLY (read from PCIe memory space, write into PCIe memory space). */
uint64_t wqp : 1; /**< Work-queue pointer. When WQP = 1, PTR (if non-zero) is a pointer to a
work-queue entry that is submitted by the hardware after completing the DMA;
when WQP = 0, PTR (if non-zero) is a pointer to a byte in local memory that
is written to 0 by the hardware after completing the DMA. */
uint64_t c : 1; /**< C - Counter. 1 = use counter 1, 0 = use counter 0.
The C bit selects between the two counters (NPEI_DMA_CNTS[DMA0,DMA1])
that can optionally be updated after an OUTBOUND or EXTERNAL-ONLY
transfer, and also selects between the two forced-interrupt bits
(NPEI_INT_SUMn[DMA0_FI, DMA1_FI]) that can optionally be set after an
OUTBOUND or EXTERNAL-ONLY transfer. C must be zero for INBOUND or
INTERNAL-ONLY transfers. */
uint64_t ca : 1; /**< CA - Counter add.
When CA = 1, the hardware updates the selected counter after it completes the
PCI DMA OUTBOUND or EXTERNAL-ONLY Instruction.
- If C = 0, PCIE_DMA_CNT0 is updated
- If C = 1, PCIE_DMA_CNT1 is updated.
Note that this update may indirectly cause
NPEI_INT_SUM[DCNT0,DCNT1,DTIME0,DTIME1] to become set (depending
on the NPEI_DMA*_INT_LEVEL settings), so may cause interrupts to occur on a
remote PCI host.
- If NPEI_DMA_CONTROL[O_ADD1] = 1, the counter is updated by 1.
- If NPEI_DMA_CONTROL[O_ADD1] = 0, the counter is updated by the total
bytes in the transfer.
When CA = 0, the hardware does not update any counters.
For an INBOUND or INTERNAL-ONLY PCI DMA transfer, CA must never be
set, and the hardware never adds to the counters. */
uint64_t fi : 1; /**< FI - Force interrupt.
When FI is set for an OUTBOUND or EXTERNAL-ONLY transfer, the hardware
sets a forced interrupt bit after it completes the PCI DMA Instruction. If C = 0,
NPEI_INT_SUMn[DMA0_FI] is set, else NPEI_INT_SUMn[DMA1_FI] is set. For
an INBOUND or INTERNAL-ONLY PCI DMA operation, FI must never be set,
and the hardware never generates interrupts. */
uint64_t ii : 1; /**< II- Ignore the I bit (i.e. the I bit of the PCI DMA instruction local pointer).
For OUTBOUND transfers when II = 1, ignore the I bit and the FL bit in the
DMA HDR alone determines whether the hardware frees any/all of the local
buffers in the FIRST POINTERS area:
- when FL = 1, the hardware frees the local buffer when II=1.
- when FL = 0, the hardware does not free the local buffer when II=1.
For OUTBOUND transfers when II = 0, the I bit in the local pointer selects
whether local buffers are freed on a pointer-by-pointer basis:
- when (FL I) is true, the hardware frees the local buffer when II=0.
For INBOUND, INTERNAL-ONLY, and EXTERNAL-ONLY PCI DMA transfers,
II must never be set, and local buffers are never freed. */
uint64_t fl : 1; /**< FL - Free local buffer.
When FL = 1, for an OUTBOUND operation, it indicates that the local buffers in
the FIRST BUFFERS area should be freed.
If II = 1, the FL bit alone indicates whether the local buffer should be freed:
- when FL = 1, the hardware frees the local buffer when II=1.
- when FL = 0, the hardware does not free the local buffer when II=1.
If II = 0, the I bit in the local pointer (refer to Section 9.5.2) determines whether
the local buffer is freed:
- when (FL I) is true, the hardware frees the local buffer when II=0.
For an INBOUND, INTERNAL-ONLY, or EXTERNAL-ONLY PCI DMA transfer,
FL must never be set, and local buffers are never freed. */
uint64_t nlst : 4; /**< NLST - Number Last pointers.
The number of pointers in the LAST POINTERS area.
In the INBOUND, OUTBOUND, and EXTERNAL-ONLY cases, the LAST
POINTERS area contains PCI components, and the number of 64-bit words
required in the LAST POINTERS area is:
- HDR.NLST + ((HDR.NLST + 3)/4) where the division removes the fraction.
In the INTERNAL-ONLY case, the LAST POINTERS area contains local
pointers, and the number of 64-bit words required in the LAST POINTERS area is:
- HDR.NLST
Note that the sum of the number of 64-bit words in the LAST POINTERS and
FIRST POINTERS area must never exceed 31. */
uint64_t nfst : 4; /**< NFST - Number First pointers.
The number of pointers in the FIRST POINTERS area.
In the INBOUND, OUTBOUND, and INTERNAL-ONLY cases, the FIRST
POINTERS area contains local pointers, and the number of 64-bit words required
in the FIRST POINTERS area is:
- HDR.NFST
In the EXTERNAL-ONLY case, the FIRST POINTERS area contains PCI
components, and the number of 64-bit words required in the FIRST POINTERS
area is:
- HDR.NFST + ((HDR.NFST + 3)/4) where the division removes the fraction. */
uint64_t addr : 40; /**< PTR - Pointer, either a work-queue-entry pointer (when WQP = 1) or a local
memory pointer (WQP = 0).
When WQP = 1 and PTR 0x0, the hardware inserts the work-queue entry
indicated by PTR into a POW input queue after the PCI DMA operation is
complete. (Section 5.4 describes the work queue entry requirements in this
case.) When WQP = 1, PTR<2:0> must be 0x0.
When WQP = 0 and PTR 0x0, the hardware writes the single byte in local
memory indicated by PTR to 0x0 after the PCI DMA operation is complete.
NPEI_DMA_CONTROL[B0_LEND] selects the endian-ness of PTR in this
case.
When PTR = 0x0, the hardware performs no operation after the PCI DMA
operation is complete. */
} s;
} cvmx_dma_engine_header_t;
typedef union
{
uint64_t u64;
struct
{
uint64_t i : 1; /**< I - Invert free.
This bit gives the software the ability to free buffers independently for an
OUTBOUND PCI DMA transfer. I is not used by the hardware when II is set. I
must not be set, and buffers are never freed, for INBOUND, INTERNAL-ONLY,
and EXTERNAL-ONLY PCI DMA transfers. */
uint64_t back : 4; /**< Back - Backup amount.
Allows the start of a buffer that is to be freed during an OUTBOUND transfer to
be different from the ptr value. Back specifies the amount to subtract from the
pointer to reach the start when freeing a buffer.
The address that is the start of the buffer being freed is:
- Buffer start address = ((ptr >> 7) - Back) << 7.
Back is only used by the hardware when the buffer corresponding to ptr is freed.
Back must be 0x0, and buffers are never freed, for INBOUND, INTERNAL-ONLY,
and EXTERNAL-ONLY PCI DMA transfers. */
uint64_t pool : 3; /**< Pool - Free pool.
Specifies which pool (of the eight hardware-managed FPA free pools) receives the
buffer associated with ptr when freed during an OUTBOUND transfer.
Pool is only used when the buffer corresponding to ptr is freed. Pool must be 0x0,
and buffers are never freed, for INBOUND, INTERNAL-ONLY, and EXTERNAL-ONLY
PCI DMA transfers. */
uint64_t f : 1; /**< F - Full-block writes are allowed.
When set, the hardware is permitted to write all the bytes in the cache blocks
covered by ptr, ptr + Size - 1. This can improve memory system performance
when the write misses in the L2 cache.
F can only be set for local pointers that can be written to:
- The local pointers in the FIRST POINTERS area that are write pointers for
INBOUND transfers.
- The local pointers in the LAST POINTERS area that are always write
pointers (when present for INTERNAL-ONLY transfers).
F must not be set for local pointers that are not written to:
- The local pointers in the FIRST POINTERS area for OUTBOUND and
INTERNAL-ONLY transfers. */
uint64_t a : 1; /**< A - Allocate L2.
This is a hint to the hardware that the cache blocks should be allocated in the L2
cache (if they were not already). */
uint64_t l : 1; /**< L - Little-endian.
When L is set, the data at ptr is in little-endian format rather than big-endian. */
uint64_t size : 13; /**< Size - Size in bytes of the contiguous space specified by ptr. A Size value of 0 is
illegal. Note that the sum of the sizes in the FIRST POINTERS area must always
exactly equal the sum of the sizes/lengths in the LAST POINTERS area:
- In the OUTBOUND and INBOUND cases, the HDR.NFST size fields in the
local pointers in the FIRST POINTERS area must exactly equal the lengths
of the HDR.NLST fragments in the PCI components in the LAST POINTERS
area.
- In the INTERNAL-ONLY case, the HDR.NFST size fields in the local
pointers in the FIRST POINTERS area must equal the HDR.NLST size
fields in the local pointers in the LAST POINTERS area. */
uint64_t reserved_36_39 : 4; /**< Must be zero */
uint64_t addr : 36; /**< L2/DRAM byte pointer. Points to where the packet data starts.
Ptr can be any byte alignment. Note that ptr is interpreted as a big-endian byte
pointer when L is clear, a little-endian byte pointer when L is set. */
} internal;
struct
{
uint64_t len0 : 16; /**< Length of PCI / PCIe memory for address 0 */
uint64_t len1 : 16; /**< Length of PCI / PCIe memory for address 1 */
uint64_t len2 : 16; /**< Length of PCI / PCIe memory for address 2 */
uint64_t len3 : 16; /**< Length of PCI / PCIe memory for address 3 */
} pcie_length;
} cvmx_dma_engine_buffer_t;
/**
* Initialize the DMA engines for use
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_initialize(void);
/**
* Shutdown all DMA engines. The engeines must be idle when this
* function is called.
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_shutdown(void);
/**
* Return the number of DMA engimes supported by this chip
*
* @return Number of DMA engines
*/
int cvmx_dma_engine_get_num(void);
/**
* Submit a series of DMA command to the DMA engines.
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param header Command header
* @param num_buffers
* The number of data pointers
* @param buffers Command data pointers
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_submit(int engine, cvmx_dma_engine_header_t header, int num_buffers, cvmx_dma_engine_buffer_t buffers[]);
/**
* Build the first and last pointers based on a DMA engine header
* and submit them to the engine. The purpose of this function is
* to simplify the building of DMA engine commands by automatically
* converting a simple address and size into the apropriate internal
* or PCI / PCIe address list. This function does not support gather lists,
* so you will need to build your own lists in that case.
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param header DMA Command header. Note that the nfst and nlst fields do not
* need to be filled in. All other fields must be set properly.
* @param first_address
* Address to use for the first pointers. In the case of INTERNAL,
* INBOUND, and OUTBOUND this is an Octeon memory address. In the
* case of EXTERNAL, this is the source PCI / PCIe address.
* @param last_address
* Address to use for the last pointers. In the case of EXTERNAL,
* INBOUND, and OUTBOUND this is a PCI / PCIe address. In the
* case of INTERNAL, this is the Octeon memory destination address.
* @param size Size of the transfer to perform.
*
* @return Zero on success, negative on failure
*/
int cvmx_dma_engine_transfer(int engine, cvmx_dma_engine_header_t header,
uint64_t first_address, uint64_t last_address,
int size);
/**
* Simplified interface to the DMA engines to emulate memcpy()
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param dest Pointer to the destination memory. cvmx_ptr_to_phys() will be
* used to turn this into a physical address. It cannot be a local
* or CVMX_SHARED block.
* @param source Pointer to the source memory.
* cvmx_ptr_to_phys() will be used to turn this
* into a physical address. It cannot be a local
* or CVMX_SHARED block.
* @param length Number of bytes to copy
*
* @return Zero on success, negative on failure
*/
static inline int cvmx_dma_engine_memcpy(int engine, void *dest, void *source, int length)
{
cvmx_dma_engine_header_t header;
header.u64 = 0;
header.s.type = CVMX_DMA_ENGINE_TRANSFER_INTERNAL;
return cvmx_dma_engine_transfer(engine, header, cvmx_ptr_to_phys(source),
cvmx_ptr_to_phys(dest), length);
}
/**
* Simplified interface to the DMA engines to emulate memcpy()
* When dici_mode is enabled, send zero byte.
*
* @param engine Engine to submit to (0 to cvmx_dma_engine_get_num()-1)
* @param dest Pointer to the destination memory. cvmx_ptr_to_phys() will be
* used to turn this into a physical address. It cannot be a local
* or CVMX_SHARED block.
* @param source Pointer to the source memory.
* cvmx_ptr_to_phys() will be used to turn this
* into a physical address. It cannot be a local
* or CVMX_SHARED block.
* @param length Number of bytes to copy
* @param core core number for zero byte write
*
* @return Zero on success, negative on failure
*/
static inline int cvmx_dma_engine_memcpy_zero_byte(int engine, void *dest, void *source, int length, int core)
{
cvmx_dma_engine_header_t header;
header.u64 = 0;
header.s.type = CVMX_DMA_ENGINE_TRANSFER_INTERNAL;
/* If dici_mode is set, DPI increments the DPI_DMA_PPn_CNT[CNT], where the
value of core n is PTR<5:0>-1 when WQP=0 and PTR != 0 && PTR < 64. */
if (octeon_has_feature(OCTEON_FEATURE_DICI_MODE))
{
cvmx_dpi_dma_control_t dma_control;
dma_control.u64 = cvmx_read_csr(CVMX_DPI_DMA_CONTROL);
if (dma_control.s.dici_mode)
{
header.s.wqp = 0; // local memory pointer
header.s.addr = core + 1;
}
}
return cvmx_dma_engine_transfer(engine, header, cvmx_ptr_to_phys(source),
cvmx_ptr_to_phys(dest), length);
}
#ifdef __cplusplus
}
#endif
#endif // __CVMX_CMD_QUEUE_H__

File diff suppressed because it is too large Load Diff

View File

@ -1,117 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the EBT3000 specific devices
*
* <hr>$Revision: 70030 $<hr>
*
*/
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "cvmx-config.h"
#endif
#include "cvmx.h"
#include "cvmx-ebt3000.h"
#include "cvmx-sysinfo.h"
void ebt3000_char_write(int char_position, char val)
{
/* Note: phys_to_ptr won't work here, as we are most likely going to access the boot bus. */
char *led_base = CASTPTR(char , CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, cvmx_sysinfo_get()->led_display_base_addr));
if (!led_base)
return;
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBT3000 && cvmx_sysinfo_get()->board_rev_major == 1)
{
/* Rev 1 board */
char *ptr = (char *)(led_base + 4);
char_position &= 0x3; /* only 4 chars */
ptr[3 - char_position] = val;
}
else
{
/* rev 2 or later board */
char *ptr = (char *)(led_base);
char_position &= 0x7; /* only 8 chars */
ptr[char_position] = val;
}
}
void ebt3000_str_write(const char *str)
{
/* Note: phys_to_ptr won't work here, as we are most likely going to access the boot bus. */
char *led_base;
if (!cvmx_sysinfo_get()->led_display_base_addr)
return;
led_base = CASTPTR(char, CVMX_ADD_SEG32(CVMX_MIPS32_SPACE_KSEG0, cvmx_sysinfo_get()->led_display_base_addr));
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_EBT3000 && cvmx_sysinfo_get()->board_rev_major == 1)
{
char *ptr = (char *)(led_base + 4);
int i;
for (i=0; i<4; i++)
{
if (*str)
ptr[3 - i] = *str++;
else
ptr[3 - i] = ' ';
}
}
else
{
/* rev 2 board */
char *ptr = (char *)(led_base);
int i;
for (i=0; i<8; i++)
{
if (*str)
ptr[i] = *str++;
else
ptr[i] = ' ';
}
}
}

View File

@ -1,69 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#ifndef __CVMX_EBT3000_H__
#define __CVMX_EBT3000_H__
/**
* @file
*
* Interface to the EBT3000 specific devices
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifdef __cplusplus
extern "C" {
#endif
void ebt3000_str_write(const char *str);
void ebt3000_char_write(int char_position, char val);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_EBT3000_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,689 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* cvmx-eoi-defs.h
*
* Configuration and status register (CSR) type definitions for
* Octeon eoi.
*
* This file is auto generated. Do not edit.
*
* <hr>$Revision: 69515 $<hr>
*
*/
#ifndef __CVMX_EOI_DEFS_H__
#define __CVMX_EOI_DEFS_H__
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_BIST_CTL_STA CVMX_EOI_BIST_CTL_STA_FUNC()
static inline uint64_t CVMX_EOI_BIST_CTL_STA_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_BIST_CTL_STA not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000118ull);
}
#else
#define CVMX_EOI_BIST_CTL_STA (CVMX_ADD_IO_SEG(0x0001180013000118ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_CTL_STA CVMX_EOI_CTL_STA_FUNC()
static inline uint64_t CVMX_EOI_CTL_STA_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_CTL_STA not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000000ull);
}
#else
#define CVMX_EOI_CTL_STA (CVMX_ADD_IO_SEG(0x0001180013000000ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_DEF_STA0 CVMX_EOI_DEF_STA0_FUNC()
static inline uint64_t CVMX_EOI_DEF_STA0_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_DEF_STA0 not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000020ull);
}
#else
#define CVMX_EOI_DEF_STA0 (CVMX_ADD_IO_SEG(0x0001180013000020ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_DEF_STA1 CVMX_EOI_DEF_STA1_FUNC()
static inline uint64_t CVMX_EOI_DEF_STA1_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_DEF_STA1 not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000028ull);
}
#else
#define CVMX_EOI_DEF_STA1 (CVMX_ADD_IO_SEG(0x0001180013000028ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_DEF_STA2 CVMX_EOI_DEF_STA2_FUNC()
static inline uint64_t CVMX_EOI_DEF_STA2_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_DEF_STA2 not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000030ull);
}
#else
#define CVMX_EOI_DEF_STA2 (CVMX_ADD_IO_SEG(0x0001180013000030ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_ECC_CTL CVMX_EOI_ECC_CTL_FUNC()
static inline uint64_t CVMX_EOI_ECC_CTL_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_ECC_CTL not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000110ull);
}
#else
#define CVMX_EOI_ECC_CTL (CVMX_ADD_IO_SEG(0x0001180013000110ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_ENDOR_BISTR_CTL_STA CVMX_EOI_ENDOR_BISTR_CTL_STA_FUNC()
static inline uint64_t CVMX_EOI_ENDOR_BISTR_CTL_STA_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_ENDOR_BISTR_CTL_STA not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000120ull);
}
#else
#define CVMX_EOI_ENDOR_BISTR_CTL_STA (CVMX_ADD_IO_SEG(0x0001180013000120ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_ENDOR_CLK_CTL CVMX_EOI_ENDOR_CLK_CTL_FUNC()
static inline uint64_t CVMX_EOI_ENDOR_CLK_CTL_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_ENDOR_CLK_CTL not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000038ull);
}
#else
#define CVMX_EOI_ENDOR_CLK_CTL (CVMX_ADD_IO_SEG(0x0001180013000038ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_ENDOR_CTL CVMX_EOI_ENDOR_CTL_FUNC()
static inline uint64_t CVMX_EOI_ENDOR_CTL_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_ENDOR_CTL not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000100ull);
}
#else
#define CVMX_EOI_ENDOR_CTL (CVMX_ADD_IO_SEG(0x0001180013000100ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_INT_ENA CVMX_EOI_INT_ENA_FUNC()
static inline uint64_t CVMX_EOI_INT_ENA_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_INT_ENA not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000010ull);
}
#else
#define CVMX_EOI_INT_ENA (CVMX_ADD_IO_SEG(0x0001180013000010ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_INT_STA CVMX_EOI_INT_STA_FUNC()
static inline uint64_t CVMX_EOI_INT_STA_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_INT_STA not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000008ull);
}
#else
#define CVMX_EOI_INT_STA (CVMX_ADD_IO_SEG(0x0001180013000008ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_IO_DRV CVMX_EOI_IO_DRV_FUNC()
static inline uint64_t CVMX_EOI_IO_DRV_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_IO_DRV not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000018ull);
}
#else
#define CVMX_EOI_IO_DRV (CVMX_ADD_IO_SEG(0x0001180013000018ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_EOI_THROTTLE_CTL CVMX_EOI_THROTTLE_CTL_FUNC()
static inline uint64_t CVMX_EOI_THROTTLE_CTL_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_EOI_THROTTLE_CTL not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x0001180013000108ull);
}
#else
#define CVMX_EOI_THROTTLE_CTL (CVMX_ADD_IO_SEG(0x0001180013000108ull))
#endif
/**
* cvmx_eoi_bist_ctl_sta
*
* EOI_BIST_CTL_STA = EOI BIST Status Register
*
* Description:
* This register control EOI memory BIST and contains the bist result of EOI memories.
*/
union cvmx_eoi_bist_ctl_sta {
uint64_t u64;
struct cvmx_eoi_bist_ctl_sta_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_18_63 : 46;
uint64_t clear_bist : 1; /**< Clear BIST on the HCLK memories */
uint64_t start_bist : 1; /**< Starts BIST on the HCLK memories during 0-to-1
transition. */
uint64_t reserved_3_15 : 13;
uint64_t stdf : 1; /**< STDF Bist Status. */
uint64_t ppaf : 1; /**< PPAF Bist Status. */
uint64_t lddf : 1; /**< LDDF Bist Status. */
#else
uint64_t lddf : 1;
uint64_t ppaf : 1;
uint64_t stdf : 1;
uint64_t reserved_3_15 : 13;
uint64_t start_bist : 1;
uint64_t clear_bist : 1;
uint64_t reserved_18_63 : 46;
#endif
} s;
struct cvmx_eoi_bist_ctl_sta_s cnf71xx;
};
typedef union cvmx_eoi_bist_ctl_sta cvmx_eoi_bist_ctl_sta_t;
/**
* cvmx_eoi_ctl_sta
*
* EOI_CTL_STA = EOI Configure Control Reigster
* This register configures EOI.
*/
union cvmx_eoi_ctl_sta {
uint64_t u64;
struct cvmx_eoi_ctl_sta_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_13_63 : 51;
uint64_t ppaf_wm : 5; /**< Number of entries when PP Access FIFO will assert
full (back pressure) */
uint64_t reserved_5_7 : 3;
uint64_t busy : 1; /**< 1: EOI is busy; 0: EOI is idle */
uint64_t rwam : 2; /**< Rread Write Aribitration Mode:
- 10: Reads have higher priority
- 01: Writes have higher priority
00,11: Round-Robin between Reads and Writes */
uint64_t ena : 1; /**< When reset, all the inbound DMA accesses will be
drop and all the outbound read response and write
commits will be drop. It must be set to 1'b1 for
normal access. */
uint64_t reset : 1; /**< EOI block Software Reset. */
#else
uint64_t reset : 1;
uint64_t ena : 1;
uint64_t rwam : 2;
uint64_t busy : 1;
uint64_t reserved_5_7 : 3;
uint64_t ppaf_wm : 5;
uint64_t reserved_13_63 : 51;
#endif
} s;
struct cvmx_eoi_ctl_sta_s cnf71xx;
};
typedef union cvmx_eoi_ctl_sta cvmx_eoi_ctl_sta_t;
/**
* cvmx_eoi_def_sta0
*
* Note: Working settings tabulated for each corner.
* ================================
* Corner pctl nctl
* ===============================
* 1 26 22
* 2 30 28
* 3 32 31
* 4 23 19
* 5 27 24
* 6 29 27
* 7 21 17
* 8 25 22
* 9 27 24
* 10 29 24
* 11 34 31
* 12 36 35
* 13 26 21
* 14 31 27
* 15 33 30
* 16 23 18
* 17 28 24
* 18 30 27
* 19 21 17
* 20 27 25
* 21 29 28
* 22 21 17
* 23 25 22
* 24 27 25
* 25 19 15
* 26 23 20
* 27 25 22
* 28 24 24
* 29 28 31
* 30 30 35
* 31 21 21
* 32 25 27
* 33 27 30
* 34 19 18
* 35 23 24
* 36 25 27
* 37 29 19
* 38 33 25
* 39 36 28
* 40 25 17
* 41 30 22
* 42 32 25
* 43 23 15
* 44 27 20
* 45 29 22
* ===============================
*
* EOI_DEF_STA0 = EOI Defect Status Register 0
*
* Register to hold repairout 0/1/2
*/
union cvmx_eoi_def_sta0 {
uint64_t u64;
struct cvmx_eoi_def_sta0_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_54_63 : 10;
uint64_t rout2 : 18; /**< Repairout2 */
uint64_t rout1 : 18; /**< Repairout1 */
uint64_t rout0 : 18; /**< Repairout0 */
#else
uint64_t rout0 : 18;
uint64_t rout1 : 18;
uint64_t rout2 : 18;
uint64_t reserved_54_63 : 10;
#endif
} s;
struct cvmx_eoi_def_sta0_s cnf71xx;
};
typedef union cvmx_eoi_def_sta0 cvmx_eoi_def_sta0_t;
/**
* cvmx_eoi_def_sta1
*
* EOI_DEF_STA1 = EOI Defect Status Register 1
*
* Register to hold repairout 3/4/5
*/
union cvmx_eoi_def_sta1 {
uint64_t u64;
struct cvmx_eoi_def_sta1_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_54_63 : 10;
uint64_t rout5 : 18; /**< Repairout5 */
uint64_t rout4 : 18; /**< Repairout4 */
uint64_t rout3 : 18; /**< Repairout3 */
#else
uint64_t rout3 : 18;
uint64_t rout4 : 18;
uint64_t rout5 : 18;
uint64_t reserved_54_63 : 10;
#endif
} s;
struct cvmx_eoi_def_sta1_s cnf71xx;
};
typedef union cvmx_eoi_def_sta1 cvmx_eoi_def_sta1_t;
/**
* cvmx_eoi_def_sta2
*
* EOI_DEF_STA2 = EOI Defect Status Register 2
*
* Register to hold repairout 6 and toomanydefects.
*/
union cvmx_eoi_def_sta2 {
uint64_t u64;
struct cvmx_eoi_def_sta2_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_25_63 : 39;
uint64_t toomany : 1; /**< Toomanydefects */
uint64_t reserved_18_23 : 6;
uint64_t rout6 : 18; /**< Repairout6 */
#else
uint64_t rout6 : 18;
uint64_t reserved_18_23 : 6;
uint64_t toomany : 1;
uint64_t reserved_25_63 : 39;
#endif
} s;
struct cvmx_eoi_def_sta2_s cnf71xx;
};
typedef union cvmx_eoi_def_sta2 cvmx_eoi_def_sta2_t;
/**
* cvmx_eoi_ecc_ctl
*
* EOI_ECC_CTL = EOI ECC Control Register
*
* Description:
* This register enables ECC for each individual internal memory that requires ECC. For debug purpose, it can also
* control 1 or 2 bits be flipped in the ECC data.
*/
union cvmx_eoi_ecc_ctl {
uint64_t u64;
struct cvmx_eoi_ecc_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63 : 61;
uint64_t rben : 1; /**< 1: ECC Enable for read buffer
- 0: ECC Enable for instruction buffer */
uint64_t rbsf : 2; /**< read buffer ecc syndrome flip
2'b00 : No Error Generation
2'b10, 2'b01: Flip 1 bit
2'b11 : Flip 2 bits */
#else
uint64_t rbsf : 2;
uint64_t rben : 1;
uint64_t reserved_3_63 : 61;
#endif
} s;
struct cvmx_eoi_ecc_ctl_s cnf71xx;
};
typedef union cvmx_eoi_ecc_ctl cvmx_eoi_ecc_ctl_t;
/**
* cvmx_eoi_endor_bistr_ctl_sta
*
* EOI_ENDOR_BISTR_CTL_STA = EOI BIST/BISR Control Status Register
*
* Description:
* This register the bist result of EOI memories.
*/
union cvmx_eoi_endor_bistr_ctl_sta {
uint64_t u64;
struct cvmx_eoi_endor_bistr_ctl_sta_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63 : 54;
uint64_t bisr_done : 1; /**< Endor DSP Memroy Bisr Done Status: 1 - done;
0 - Not done. */
uint64_t failed : 1; /**< Bist/Bisr Status: 1 - failed; 0 - Not failed. */
uint64_t reserved_3_7 : 5;
uint64_t bisr_hr : 1; /**< BISR Hardrepair */
uint64_t bisr_dir : 1; /**< BISR Direction: 0 = input repair packets;
1 = output defect packets. */
uint64_t start_bist : 1; /**< Start Bist */
#else
uint64_t start_bist : 1;
uint64_t bisr_dir : 1;
uint64_t bisr_hr : 1;
uint64_t reserved_3_7 : 5;
uint64_t failed : 1;
uint64_t bisr_done : 1;
uint64_t reserved_10_63 : 54;
#endif
} s;
struct cvmx_eoi_endor_bistr_ctl_sta_s cnf71xx;
};
typedef union cvmx_eoi_endor_bistr_ctl_sta cvmx_eoi_endor_bistr_ctl_sta_t;
/**
* cvmx_eoi_endor_clk_ctl
*
* EOI_ENDOR_CLK_CTL = EOI Endor Clock Control
*
* Register control the generation of Endor DSP and HAB clocks.
*/
union cvmx_eoi_endor_clk_ctl {
uint64_t u64;
struct cvmx_eoi_endor_clk_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_28_63 : 36;
uint64_t habclk_sel : 1; /**< HAB CLK select
0x0: HAB CLK select from PHY_PLL output from HAB PS
0x1: HAB CLK select from DDR_PLL output from HAB PS */
uint64_t reserved_26_26 : 1;
uint64_t dsp_div_reset : 1; /**< DSP postscalar divider reset */
uint64_t dsp_ps_en : 3; /**< DSP postscalar divide ratio
Determines the DSP CK speed.
0x0 : Divide DSP PLL output by 1
0x1 : Divide DSP PLL output by 2
0x2 : Divide DSP PLL output by 3
0x3 : Divide DSP PLL output by 4
0x4 : Divide DSP PLL output by 6
0x5 : Divide DSP PLL output by 8
0x6 : Divide DSP PLL output by 12
0x7 : Divide DSP PLL output by 12
DSP_PS_EN is not used when DSP_DIV_RESET = 1 */
uint64_t hab_div_reset : 1; /**< HAB postscalar divider reset */
uint64_t hab_ps_en : 3; /**< HAB postscalar divide ratio
Determines the LMC CK speed.
0x0 : Divide HAB PLL output by 1
0x1 : Divide HAB PLL output by 2
0x2 : Divide HAB PLL output by 3
0x3 : Divide HAB PLL output by 4
0x4 : Divide HAB PLL output by 6
0x5 : Divide HAB PLL output by 8
0x6 : Divide HAB PLL output by 12
0x7 : Divide HAB PLL output by 12
HAB_PS_EN is not used when HAB_DIV_RESET = 1 */
uint64_t diffamp : 4; /**< PLL diffamp input transconductance */
uint64_t cps : 3; /**< PLL charge-pump current */
uint64_t cpb : 3; /**< PLL charge-pump current */
uint64_t reset_n : 1; /**< PLL reset */
uint64_t clkf : 7; /**< Multiply reference by CLKF
32 <= CLKF <= 64
PHY PLL frequency = 50 * CLKF
min = 1.6 GHz, max = 3.2 GHz */
#else
uint64_t clkf : 7;
uint64_t reset_n : 1;
uint64_t cpb : 3;
uint64_t cps : 3;
uint64_t diffamp : 4;
uint64_t hab_ps_en : 3;
uint64_t hab_div_reset : 1;
uint64_t dsp_ps_en : 3;
uint64_t dsp_div_reset : 1;
uint64_t reserved_26_26 : 1;
uint64_t habclk_sel : 1;
uint64_t reserved_28_63 : 36;
#endif
} s;
struct cvmx_eoi_endor_clk_ctl_s cnf71xx;
};
typedef union cvmx_eoi_endor_clk_ctl cvmx_eoi_endor_clk_ctl_t;
/**
* cvmx_eoi_endor_ctl
*
* EOI_ENDOR_CTL_STA = Endor Control Reigster
* This register controls Endor phy reset and access.
*/
union cvmx_eoi_endor_ctl {
uint64_t u64;
struct cvmx_eoi_endor_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63 : 52;
uint64_t r_emod : 2; /**< Endian format for data read from the L2C.
IN: A-B-C-D-E-F-G-H
OUT0: A-B-C-D-E-F-G-H
OUT1: H-G-F-E-D-C-B-A
OUT2: D-C-B-A-H-G-F-E
OUT3: E-F-G-H-A-B-C-D */
uint64_t w_emod : 2; /**< Endian format for data written the L2C.
IN: A-B-C-D-E-F-G-H
OUT0: A-B-C-D-E-F-G-H
OUT1: H-G-F-E-D-C-B-A
OUT2: D-C-B-A-H-G-F-E
OUT3: E-F-G-H-A-B-C-D */
uint64_t inv_rsl_ra2 : 1; /**< Invert RSL CSR read address bit 2. */
uint64_t inv_rsl_wa2 : 1; /**< Invert RSL CSR write address bit 2. */
uint64_t inv_pp_ra2 : 1; /**< Invert PP CSR read address bit 2. */
uint64_t inv_pp_wa2 : 1; /**< Invert PP CSR write address bit 2. */
uint64_t reserved_1_3 : 3;
uint64_t reset : 1; /**< Endor block software reset. After hardware reset,
this bit is set to 1'b1 which put Endor into reset
state. Software must clear this bit to use Endor. */
#else
uint64_t reset : 1;
uint64_t reserved_1_3 : 3;
uint64_t inv_pp_wa2 : 1;
uint64_t inv_pp_ra2 : 1;
uint64_t inv_rsl_wa2 : 1;
uint64_t inv_rsl_ra2 : 1;
uint64_t w_emod : 2;
uint64_t r_emod : 2;
uint64_t reserved_12_63 : 52;
#endif
} s;
struct cvmx_eoi_endor_ctl_s cnf71xx;
};
typedef union cvmx_eoi_endor_ctl cvmx_eoi_endor_ctl_t;
/**
* cvmx_eoi_int_ena
*
* EOI_INT_ENA = EOI Interrupt Enable Register
*
* Register to enable individual interrupt source in corresponding to EOI_INT_STA
*/
union cvmx_eoi_int_ena {
uint64_t u64;
struct cvmx_eoi_int_ena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63 : 62;
uint64_t rb_dbe : 1; /**< Read Buffer ECC DBE */
uint64_t rb_sbe : 1; /**< Read Buffer ECC SBE */
#else
uint64_t rb_sbe : 1;
uint64_t rb_dbe : 1;
uint64_t reserved_2_63 : 62;
#endif
} s;
struct cvmx_eoi_int_ena_s cnf71xx;
};
typedef union cvmx_eoi_int_ena cvmx_eoi_int_ena_t;
/**
* cvmx_eoi_int_sta
*
* EOI_INT_STA = EOI Interrupt Status Register
*
* Summary of different bits of RSL interrupt status.
*/
union cvmx_eoi_int_sta {
uint64_t u64;
struct cvmx_eoi_int_sta_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_2_63 : 62;
uint64_t rb_dbe : 1; /**< Read Buffer ECC DBE */
uint64_t rb_sbe : 1; /**< Read Buffer ECC SBE */
#else
uint64_t rb_sbe : 1;
uint64_t rb_dbe : 1;
uint64_t reserved_2_63 : 62;
#endif
} s;
struct cvmx_eoi_int_sta_s cnf71xx;
};
typedef union cvmx_eoi_int_sta cvmx_eoi_int_sta_t;
/**
* cvmx_eoi_io_drv
*
* EOI_IO_DRV = EOI Endor IO Drive Control
*
* Register to control Endor Phy IOs
*/
union cvmx_eoi_io_drv {
uint64_t u64;
struct cvmx_eoi_io_drv_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63 : 40;
uint64_t rfif_p : 6; /**< RFIF output driver P-Mos control */
uint64_t rfif_n : 6; /**< RFIF output driver N-Mos control */
uint64_t gpo_p : 6; /**< GPO output driver P-Mos control */
uint64_t gpo_n : 6; /**< GPO output driver N-Mos control */
#else
uint64_t gpo_n : 6;
uint64_t gpo_p : 6;
uint64_t rfif_n : 6;
uint64_t rfif_p : 6;
uint64_t reserved_24_63 : 40;
#endif
} s;
struct cvmx_eoi_io_drv_s cnf71xx;
};
typedef union cvmx_eoi_io_drv cvmx_eoi_io_drv_t;
/**
* cvmx_eoi_throttle_ctl
*
* EOI_THROTTLE_CTL = EOI THROTTLE Control Reigster
* This register controls number of outstanding EOI loads to L2C . It is in phy_clock domain.
*/
union cvmx_eoi_throttle_ctl {
uint64_t u64;
struct cvmx_eoi_throttle_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_21_63 : 43;
uint64_t std : 5; /**< Number of outstanding store data accepted by EOI on
AXI before backpressure ADMA. The value must be from
from 16 to 31 inclusively. */
uint64_t reserved_10_15 : 6;
uint64_t stc : 2; /**< Number of outstanding L2C store command accepted by
EOI on AXI before backpressure ADMA. The value must be
from 1 to 3 inclusively. */
uint64_t reserved_4_7 : 4;
uint64_t ldc : 4; /**< Number of outstanding L2C loads. The value must be
from 1 to 8 inclusively. */
#else
uint64_t ldc : 4;
uint64_t reserved_4_7 : 4;
uint64_t stc : 2;
uint64_t reserved_10_15 : 6;
uint64_t std : 5;
uint64_t reserved_21_63 : 43;
#endif
} s;
struct cvmx_eoi_throttle_ctl_s cnf71xx;
};
typedef union cvmx_eoi_throttle_ctl cvmx_eoi_throttle_ctl_t;
#endif

View File

@ -1,604 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the hardware Fetch and Add Unit.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_FAU_H__
#define __CVMX_FAU_H__
#ifndef CVMX_DONT_INCLUDE_CONFIG
#include "cvmx-config.h"
#else
typedef int cvmx_fau_reg_64_t;
typedef int cvmx_fau_reg_32_t;
typedef int cvmx_fau_reg_16_t;
typedef int cvmx_fau_reg_8_t;
#endif
#ifdef __cplusplus
extern "C" {
#endif
/*
* Octeon Fetch and Add Unit (FAU)
*/
#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
#define CVMX_FAU_BITS_SCRADDR 63,56
#define CVMX_FAU_BITS_LEN 55,48
#define CVMX_FAU_BITS_INEVAL 35,14
#define CVMX_FAU_BITS_TAGWAIT 13,13
#define CVMX_FAU_BITS_NOADD 13,13
#define CVMX_FAU_BITS_SIZE 12,11
#define CVMX_FAU_BITS_REGISTER 10,0
typedef enum {
CVMX_FAU_OP_SIZE_8 = 0,
CVMX_FAU_OP_SIZE_16 = 1,
CVMX_FAU_OP_SIZE_32 = 2,
CVMX_FAU_OP_SIZE_64 = 3
} cvmx_fau_op_size_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct
{
uint64_t error : 1;
int64_t value : 63;
} cvmx_fau_tagwait64_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct
{
uint64_t error : 1;
int32_t value : 31;
} cvmx_fau_tagwait32_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct
{
uint64_t error : 1;
int16_t value : 15;
} cvmx_fau_tagwait16_t;
/**
* Tagwait return definition. If a timeout occurs, the error
* bit will be set. Otherwise the value of the register before
* the update will be returned.
*/
typedef struct
{
uint64_t error : 1;
int8_t value : 7;
} cvmx_fau_tagwait8_t;
/**
* Asynchronous tagwait return definition. If a timeout occurs,
* the error bit will be set. Otherwise the value of the
* register before the update will be returned.
*/
typedef union {
uint64_t u64;
struct {
uint64_t invalid: 1;
uint64_t data :63; /* unpredictable if invalid is set */
} s;
} cvmx_fau_async_tagwait_result_t;
/**
* @INTERNAL
* Builds a store I/O address for writing to the FAU
*
* @param noadd 0 = Store value is atomically added to the current value
* 1 = Store value is atomically written over the current value
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* - Step by 4 for 32 bit access.
* - Step by 8 for 64 bit access.
* @return Address to store for atomic update
*/
static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
{
return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
}
/**
* @INTERNAL
* Builds a I/O address for accessing the FAU
*
* @param tagwait Should the atomic add wait for the current tag switch
* operation to complete.
* - 0 = Don't wait
* - 1 = Wait for tag switch to complete
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* - Step by 4 for 32 bit access.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: When performing 32 and 64 bit access, only the low
* 22 bits are available.
* @return Address to read from for atomic update
*/
static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg, int64_t value)
{
return (CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
}
/**
* Perform an atomic 64 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Value of the register before the update
*/
static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
{
return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 32 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Value of the register before the update
*/
static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
{
return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 16 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return Value of the register before the update
*/
static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
{
return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 8 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return Value of the register before the update
*/
static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
{
return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
}
/**
* Perform an atomic 64 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait64_t cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
{
union
{
uint64_t i64;
cvmx_fau_tagwait64_t t;
} result;
result.i64 = cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
}
/**
* Perform an atomic 32 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait32_t cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
{
union
{
uint64_t i32;
cvmx_fau_tagwait32_t t;
} result;
result.i32 = cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
}
/**
* Perform an atomic 16 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait16_t cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
{
union
{
uint64_t i16;
cvmx_fau_tagwait16_t t;
} result;
result.i16 = cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
}
/**
* Perform an atomic 8 bit add after the current tag switch
* completes
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return If a timeout occurs, the error bit will be set. Otherwise
* the value of the register before the update will be
* returned
*/
static inline cvmx_fau_tagwait8_t cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
{
union
{
uint64_t i8;
cvmx_fau_tagwait8_t t;
} result;
result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
return result.t;
}
/**
* @INTERNAL
* Builds I/O data for async operations
*
* @param scraddr Scratch pad byte addres to write to. Must be 8 byte aligned
* @param value Signed value to add.
* Note: When performing 32 and 64 bit access, only the low
* 22 bits are available.
* @param tagwait Should the atomic add wait for the current tag switch
* operation to complete.
* - 0 = Don't wait
* - 1 = Wait for tag switch to complete
* @param size The size of the operation:
* - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
* - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
* - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
* - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* - Step by 4 for 32 bit access.
* - Step by 8 for 64 bit access.
* @return Data to write using cvmx_send_single
*/
static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value, uint64_t tagwait,
cvmx_fau_op_size_t size, uint64_t reg)
{
return (CVMX_FAU_LOAD_IO_ADDRESS |
cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr>>3) |
cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg));
}
/**
* Perform an async atomic 64 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
}
/**
* Perform an async atomic 32 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
}
/**
* Perform an async atomic 16 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
}
/**
* Perform an async atomic 8 bit add. The old value is
* placed in the scratch memory at byte address scraddr.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
}
/**
* Perform an async atomic 64 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr, cvmx_fau_reg_64_t reg, int64_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
}
/**
* Perform an async atomic 32 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
* Note: Only the low 22 bits are available.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr, cvmx_fau_reg_32_t reg, int32_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
}
/**
* Perform an async atomic 16 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr, cvmx_fau_reg_16_t reg, int16_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
}
/**
* Perform an async atomic 8 bit add after the current tag
* switch completes.
*
* @param scraddr Scratch memory byte address to put response in.
* Must be 8 byte aligned.
* If a timeout occurs, the error bit (63) will be set. Otherwise
* the value of the register before the update will be
* returned
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
* @return Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr, cvmx_fau_reg_8_t reg, int8_t value)
{
cvmx_send_single(__cvmx_fau_iobdma_data(scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
}
/**
* Perform an atomic 64 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to add.
*/
static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
{
cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
}
/**
* Perform an atomic 32 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to add.
*/
static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
{
cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
}
/**
* Perform an atomic 16 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to add.
*/
static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
{
cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
}
/**
* Perform an atomic 8 bit add
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to add.
*/
static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
{
cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
}
/**
* Perform an atomic 64 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 8 for 64 bit access.
* @param value Signed value to write.
*/
static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
{
cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
}
/**
* Perform an atomic 32 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 4 for 32 bit access.
* @param value Signed value to write.
*/
static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
{
cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
}
/**
* Perform an atomic 16 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* - Step by 2 for 16 bit access.
* @param value Signed value to write.
*/
static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
{
cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
}
/**
* Perform an atomic 8 bit write
*
* @param reg FAU atomic register to access. 0 <= reg < 2048.
* @param value Signed value to write.
*/
static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
{
cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
}
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_FAU_H__ */

View File

@ -1,674 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* This file provides bootbus flash operations
*
* <hr>$Revision: 70030 $<hr>
*
*
*/
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-sysinfo.h"
#include "cvmx-spinlock.h"
#include "cvmx-flash.h"
#define MAX_NUM_FLASH_CHIPS 8 /* Maximum number of flash chips */
#define MAX_NUM_REGIONS 8 /* Maximum number of block regions per chip */
#define DEBUG 1
#define CFI_CMDSET_NONE 0
#define CFI_CMDSET_INTEL_EXTENDED 1
#define CFI_CMDSET_AMD_STANDARD 2
#define CFI_CMDSET_INTEL_STANDARD 3
#define CFI_CMDSET_AMD_EXTENDED 4
#define CFI_CMDSET_MITSU_STANDARD 256
#define CFI_CMDSET_MITSU_EXTENDED 257
#define CFI_CMDSET_SST 258
typedef struct
{
void * base_ptr; /**< Memory pointer to start of flash */
int is_16bit; /**< Chip is 16bits wide in 8bit mode */
uint16_t vendor; /**< Vendor ID of Chip */
int size; /**< Size of the chip in bytes */
uint64_t erase_timeout; /**< Erase timeout in cycles */
uint64_t write_timeout; /**< Write timeout in cycles */
int num_regions; /**< Number of block regions */
cvmx_flash_region_t region[MAX_NUM_REGIONS];
} cvmx_flash_t;
static CVMX_SHARED cvmx_flash_t flash_info[MAX_NUM_FLASH_CHIPS];
static CVMX_SHARED cvmx_spinlock_t flash_lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER;
/**
* @INTERNAL
* Read a byte from flash
*
* @param chip_id Chip to read from
* @param offset Offset into the chip
* @return Value read
*/
static uint8_t __cvmx_flash_read8(int chip_id, int offset)
{
return *(volatile uint8_t *)(flash_info[chip_id].base_ptr + offset);
}
/**
* @INTERNAL
* Read a byte from flash (for commands)
*
* @param chip_id Chip to read from
* @param offset Offset into the chip
* @return Value read
*/
static uint8_t __cvmx_flash_read_cmd(int chip_id, int offset)
{
if (flash_info[chip_id].is_16bit)
offset<<=1;
return __cvmx_flash_read8(chip_id, offset);
}
/**
* @INTERNAL
* Read 16bits from flash (for commands)
*
* @param chip_id Chip to read from
* @param offset Offset into the chip
* @return Value read
*/
static uint16_t __cvmx_flash_read_cmd16(int chip_id, int offset)
{
uint16_t v = __cvmx_flash_read_cmd(chip_id, offset);
v |= __cvmx_flash_read_cmd(chip_id, offset + 1)<<8;
return v;
}
/**
* @INTERNAL
* Write a byte to flash
*
* @param chip_id Chip to write to
* @param offset Offset into the chip
* @param data Value to write
*/
static void __cvmx_flash_write8(int chip_id, int offset, uint8_t data)
{
volatile uint8_t *flash_ptr = (volatile uint8_t *)flash_info[chip_id].base_ptr;
flash_ptr[offset] = data;
}
/**
* @INTERNAL
* Write a byte to flash (for commands)
*
* @param chip_id Chip to write to
* @param offset Offset into the chip
* @param data Value to write
*/
static void __cvmx_flash_write_cmd(int chip_id, int offset, uint8_t data)
{
volatile uint8_t *flash_ptr = (volatile uint8_t *)flash_info[chip_id].base_ptr;
flash_ptr[offset<<flash_info[chip_id].is_16bit] = data;
}
/**
* @INTERNAL
* Query a address and see if a CFI flash chip is there.
*
* @param chip_id Chip ID data to fill in if the chip is there
* @param base_ptr Memory pointer to the start address to query
* @return Zero on success, Negative on failure
*/
static int __cvmx_flash_queury_cfi(int chip_id, void *base_ptr)
{
int region;
cvmx_flash_t *flash = flash_info + chip_id;
/* Set the minimum needed for the read and write primitives to work */
flash->base_ptr = base_ptr;
flash->is_16bit = 1; /* FIXME: Currently assumes the chip is 16bits */
/* Put flash in CFI query mode */
__cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
__cvmx_flash_write_cmd(chip_id, 0x55, 0x98);
/* Make sure we get the QRY response we should */
if ((__cvmx_flash_read_cmd(chip_id, 0x10) != 'Q') ||
(__cvmx_flash_read_cmd(chip_id, 0x11) != 'R') ||
(__cvmx_flash_read_cmd(chip_id, 0x12) != 'Y'))
{
flash->base_ptr = NULL;
return -1;
}
/* Read the 16bit vendor ID */
flash->vendor = __cvmx_flash_read_cmd16(chip_id, 0x13);
/* Read the write timeout. The timeout is microseconds(us) is 2^0x1f
typically. The worst case is this value time 2^0x23 */
flash->write_timeout = 1ull << (__cvmx_flash_read_cmd(chip_id, 0x1f) +
__cvmx_flash_read_cmd(chip_id, 0x23));
/* Read the erase timeout. The timeout is milliseconds(ms) is 2^0x21
typically. The worst case is this value time 2^0x25 */
flash->erase_timeout = 1ull << (__cvmx_flash_read_cmd(chip_id, 0x21) +
__cvmx_flash_read_cmd(chip_id, 0x25));
/* Get the flash size. This is 2^0x27 */
flash->size = 1<<__cvmx_flash_read_cmd(chip_id, 0x27);
/* Get the number of different sized block regions from 0x2c */
flash->num_regions = __cvmx_flash_read_cmd(chip_id, 0x2c);
int start_offset = 0;
/* Loop through all regions get information about each */
for (region=0; region<flash->num_regions; region++)
{
cvmx_flash_region_t *rgn_ptr = flash->region + region;
rgn_ptr->start_offset = start_offset;
/* The number of blocks in each region is a 16 bit little endian
endian field. It is encoded at 0x2d + region*4 as (blocks-1) */
uint16_t blocks = __cvmx_flash_read_cmd16(chip_id, 0x2d + region*4);
rgn_ptr->num_blocks = 1u + blocks;
/* The size of each block is a 16 bit little endian endian field. It
is encoded at 0x2d + region*4 + 2 as (size/256). Zero is a special
case representing 128 */
uint16_t size = __cvmx_flash_read_cmd16(chip_id, 0x2d + region*4 + 2);
if (size == 0)
rgn_ptr->block_size = 128;
else
rgn_ptr->block_size = 256u * size;
start_offset += rgn_ptr->block_size * rgn_ptr->num_blocks;
}
/* Take the chip out of CFI query mode */
switch (flash_info[chip_id].vendor)
{
case CFI_CMDSET_AMD_STANDARD:
__cvmx_flash_write_cmd(chip_id, 0x00, 0xf0);
case CFI_CMDSET_INTEL_STANDARD:
case CFI_CMDSET_INTEL_EXTENDED:
__cvmx_flash_write_cmd(chip_id, 0x00, 0xff);
break;
}
/* Convert the timeouts to cycles */
flash->write_timeout *= cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000000;
flash->erase_timeout *= cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 1000;
#if DEBUG
/* Print the information about the chip */
cvmx_dprintf("cvmx-flash: Base pointer: %p\n"
" Vendor: 0x%04x\n"
" Size: %d bytes\n"
" Num regions: %d\n"
" Erase timeout: %llu cycles\n"
" Write timeout: %llu cycles\n",
flash->base_ptr,
(unsigned int)flash->vendor,
flash->size,
flash->num_regions,
(unsigned long long)flash->erase_timeout,
(unsigned long long)flash->write_timeout);
for (region=0; region<flash->num_regions; region++)
{
cvmx_dprintf(" Region %d: offset 0x%x, %d blocks, %d bytes/block\n",
region,
flash->region[region].start_offset,
flash->region[region].num_blocks,
flash->region[region].block_size);
}
#endif
return 0;
}
/**
* Initialize the flash access library
*/
void cvmx_flash_initialize(void)
{
int boot_region;
int chip_id = 0;
memset(flash_info, 0, sizeof(flash_info));
/* Loop through each boot bus chip select region */
for (boot_region=0; boot_region<MAX_NUM_FLASH_CHIPS; boot_region++)
{
cvmx_mio_boot_reg_cfgx_t region_cfg;
region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFG0 + boot_region*8);
/* Only try chip select regions that are enabled. This assumes the
bootloader already setup the flash */
if (region_cfg.s.en)
{
/* Convert the hardware address to a pointer. Note that the bootbus,
unlike memory, isn't 1:1 mapped in the simple exec */
void *base_ptr = cvmx_phys_to_ptr((region_cfg.s.base<<16) | 0xffffffff80000000ull);
if (__cvmx_flash_queury_cfi(chip_id, base_ptr) == 0)
{
/* Valid CFI flash chip found */
chip_id++;
}
}
}
if (chip_id == 0)
cvmx_dprintf("cvmx-flash: No CFI chips found\n");
}
/**
* Return a pointer to the flash chip
*
* @param chip_id Chip ID to return
* @return NULL if the chip doesn't exist
*/
void *cvmx_flash_get_base(int chip_id)
{
return flash_info[chip_id].base_ptr;
}
/**
* Return the number of erasable regions on the chip
*
* @param chip_id Chip to return info for
* @return Number of regions
*/
int cvmx_flash_get_num_regions(int chip_id)
{
return flash_info[chip_id].num_regions;
}
/**
* Return information about a flash chips region
*
* @param chip_id Chip to get info for
* @param region Region to get info for
* @return Region information
*/
const cvmx_flash_region_t *cvmx_flash_get_region_info(int chip_id, int region)
{
return flash_info[chip_id].region + region;
}
/**
* Erase a block on the flash chip
*
* @param chip_id Chip to erase a block on
* @param region Region to erase a block in
* @param block Block number to erase
* @return Zero on success. Negative on failure
*/
int cvmx_flash_erase_block(int chip_id, int region, int block)
{
cvmx_spinlock_lock(&flash_lock);
#if DEBUG
cvmx_dprintf("cvmx-flash: Erasing chip %d, region %d, block %d\n",
chip_id, region, block);
#endif
int offset = flash_info[chip_id].region[region].start_offset +
block * flash_info[chip_id].region[region].block_size;
switch (flash_info[chip_id].vendor)
{
case CFI_CMDSET_AMD_STANDARD:
{
/* Send the erase sector command sequence */
__cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
__cvmx_flash_write_cmd(chip_id, 0x555, 0xaa);
__cvmx_flash_write_cmd(chip_id, 0x2aa, 0x55);
__cvmx_flash_write_cmd(chip_id, 0x555, 0x80);
__cvmx_flash_write_cmd(chip_id, 0x555, 0xaa);
__cvmx_flash_write_cmd(chip_id, 0x2aa, 0x55);
__cvmx_flash_write8(chip_id, offset, 0x30);
/* Loop checking status */
uint8_t status = __cvmx_flash_read8(chip_id, offset);
uint64_t start_cycle = cvmx_get_cycle();
while (1)
{
/* Read the status and xor it with the old status so we can
find toggling bits */
uint8_t old_status = status;
status = __cvmx_flash_read8(chip_id, offset);
uint8_t toggle = status ^ old_status;
/* Check if the erase in progress bit is toggling */
if (toggle & (1<<6))
{
/* Check hardware timeout */
if (status & (1<<5))
{
/* Chip has signalled a timeout. Reread the status */
old_status = __cvmx_flash_read8(chip_id, offset);
status = __cvmx_flash_read8(chip_id, offset);
toggle = status ^ old_status;
/* Check if the erase in progress bit is toggling */
if (toggle & (1<<6))
{
cvmx_dprintf("cvmx-flash: Hardware timeout erasing block\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
else
break; /* Not toggling, erase complete */
}
}
else
break; /* Not toggling, erase complete */
if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].erase_timeout)
{
cvmx_dprintf("cvmx-flash: Timeout erasing block\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
}
__cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
cvmx_spinlock_unlock(&flash_lock);
return 0;
}
case CFI_CMDSET_INTEL_STANDARD:
case CFI_CMDSET_INTEL_EXTENDED:
{
/* Send the erase sector command sequence */
__cvmx_flash_write_cmd(chip_id, 0x00, 0xff); /* Reset the flash chip */
__cvmx_flash_write8(chip_id, offset, 0x20);
__cvmx_flash_write8(chip_id, offset, 0xd0);
/* Loop checking status */
uint8_t status = __cvmx_flash_read8(chip_id, offset);
uint64_t start_cycle = cvmx_get_cycle();
while ((status & 0x80) == 0)
{
if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].erase_timeout)
{
cvmx_dprintf("cvmx-flash: Timeout erasing block\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
status = __cvmx_flash_read8(chip_id, offset);
}
/* Check the final status */
if (status & 0x7f)
{
cvmx_dprintf("cvmx-flash: Hardware failure erasing block\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
__cvmx_flash_write_cmd(chip_id, 0x00, 0xff); /* Reset the flash chip */
cvmx_spinlock_unlock(&flash_lock);
return 0;
}
}
cvmx_dprintf("cvmx-flash: Unsupported flash vendor\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
/**
* Write a block on the flash chip
*
* @param chip_id Chip to write a block on
* @param region Region to write a block in
* @param block Block number to write
* @param data Data to write
* @return Zero on success. Negative on failure
*/
int cvmx_flash_write_block(int chip_id, int region, int block, const void *data)
{
cvmx_spinlock_lock(&flash_lock);
#if DEBUG
cvmx_dprintf("cvmx-flash: Writing chip %d, region %d, block %d\n",
chip_id, region, block);
#endif
int offset = flash_info[chip_id].region[region].start_offset +
block * flash_info[chip_id].region[region].block_size;
int len = flash_info[chip_id].region[region].block_size;
const uint8_t *ptr = (const uint8_t *)data;
switch (flash_info[chip_id].vendor)
{
case CFI_CMDSET_AMD_STANDARD:
{
/* Loop through one byte at a time */
while (len--)
{
/* Send the program sequence */
__cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
__cvmx_flash_write_cmd(chip_id, 0x555, 0xaa);
__cvmx_flash_write_cmd(chip_id, 0x2aa, 0x55);
__cvmx_flash_write_cmd(chip_id, 0x555, 0xa0);
__cvmx_flash_write8(chip_id, offset, *ptr);
/* Loop polling for status */
uint64_t start_cycle = cvmx_get_cycle();
while (1)
{
uint8_t status = __cvmx_flash_read8(chip_id, offset);
if (((status ^ *ptr) & (1<<7)) == 0)
break; /* Data matches, this byte is done */
else if (status & (1<<5))
{
/* Hardware timeout, recheck status */
status = __cvmx_flash_read8(chip_id, offset);
if (((status ^ *ptr) & (1<<7)) == 0)
break; /* Data matches, this byte is done */
else
{
cvmx_dprintf("cvmx-flash: Hardware write timeout\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
}
if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].write_timeout)
{
cvmx_dprintf("cvmx-flash: Timeout writing block\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
}
/* Increment to the next byte */
ptr++;
offset++;
}
__cvmx_flash_write_cmd(chip_id, 0x00, 0xf0); /* Reset the flash chip */
cvmx_spinlock_unlock(&flash_lock);
return 0;
}
case CFI_CMDSET_INTEL_STANDARD:
case CFI_CMDSET_INTEL_EXTENDED:
{
cvmx_dprintf("%s:%d len=%d\n", __FUNCTION__, __LINE__, len);
/* Loop through one byte at a time */
while (len--)
{
/* Send the program sequence */
__cvmx_flash_write_cmd(chip_id, 0x00, 0xff); /* Reset the flash chip */
__cvmx_flash_write8(chip_id, offset, 0x40);
__cvmx_flash_write8(chip_id, offset, *ptr);
/* Loop polling for status */
uint8_t status = __cvmx_flash_read8(chip_id, offset);
uint64_t start_cycle = cvmx_get_cycle();
while ((status & 0x80) == 0)
{
if (cvmx_get_cycle() > start_cycle + flash_info[chip_id].write_timeout)
{
cvmx_dprintf("cvmx-flash: Timeout writing block\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
status = __cvmx_flash_read8(chip_id, offset);
}
/* Check the final status */
if (status & 0x7f)
{
cvmx_dprintf("cvmx-flash: Hardware failure erasing block\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
/* Increment to the next byte */
ptr++;
offset++;
}
cvmx_dprintf("%s:%d\n", __FUNCTION__, __LINE__);
__cvmx_flash_write_cmd(chip_id, 0x00, 0xff); /* Reset the flash chip */
cvmx_spinlock_unlock(&flash_lock);
return 0;
}
}
cvmx_dprintf("cvmx-flash: Unsupported flash vendor\n");
cvmx_spinlock_unlock(&flash_lock);
return -1;
}
/**
* Erase and write data to a flash
*
* @param address Memory address to write to
* @param data Data to write
* @param len Length of the data
* @return Zero on success. Negative on failure
*/
int cvmx_flash_write(void *address, const void *data, int len)
{
int chip_id;
/* Find which chip controls this address. Don't allow the write to span
multiple chips */
for (chip_id=0; chip_id<MAX_NUM_FLASH_CHIPS; chip_id++)
{
if ((flash_info[chip_id].base_ptr <= address) &&
(flash_info[chip_id].base_ptr + flash_info[chip_id].size >= address + len))
break;
}
if (chip_id == MAX_NUM_FLASH_CHIPS)
{
cvmx_dprintf("cvmx-flash: Unable to find chip that contains address %p\n", address);
return -1;
}
cvmx_flash_t *flash = flash_info + chip_id;
/* Determine which block region we need to start writing to */
void *region_base = flash->base_ptr;
int region = 0;
while (region_base + flash->region[region].num_blocks * flash->region[region].block_size <= address)
{
region++;
region_base = flash->base_ptr + flash->region[region].start_offset;
}
/* Determine which block in the region to start at */
int block = (address - region_base) / flash->region[region].block_size;
/* Require all writes to start on block boundries */
if (address != region_base + block*flash->region[region].block_size)
{
cvmx_dprintf("cvmx-flash: Write address not aligned on a block boundry\n");
return -1;
}
/* Loop until we're out of data */
while (len > 0)
{
/* Erase the current block */
if (cvmx_flash_erase_block(chip_id, region, block))
return -1;
/* Write the new data */
if (cvmx_flash_write_block(chip_id, region, block, data))
return -1;
/* Increment to the next block */
data += flash->region[region].block_size;
len -= flash->region[region].block_size;
block++;
if (block >= flash->region[region].num_blocks)
{
block = 0;
region++;
}
}
return 0;
}

View File

@ -1,136 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* This file provides bootbus flash operations
*
* <hr>$Revision: 70030 $<hr>
*
*
*/
#ifndef __CVMX_FLASH_H__
#define __CVMX_FLASH_H__
#ifdef __cplusplus
extern "C" {
#endif
typedef struct
{
int start_offset;
int block_size;
int num_blocks;
} cvmx_flash_region_t;
/**
* Initialize the flash access library
*/
void cvmx_flash_initialize(void);
/**
* Return a pointer to the flash chip
*
* @param chip_id Chip ID to return
* @return NULL if the chip doesn't exist
*/
void *cvmx_flash_get_base(int chip_id);
/**
* Return the number of erasable regions on the chip
*
* @param chip_id Chip to return info for
* @return Number of regions
*/
int cvmx_flash_get_num_regions(int chip_id);
/**
* Return information about a flash chips region
*
* @param chip_id Chip to get info for
* @param region Region to get info for
* @return Region information
*/
const cvmx_flash_region_t *cvmx_flash_get_region_info(int chip_id, int region);
/**
* Erase a block on the flash chip
*
* @param chip_id Chip to erase a block on
* @param region Region to erase a block in
* @param block Block number to erase
* @return Zero on success. Negative on failure
*/
int cvmx_flash_erase_block(int chip_id, int region, int block);
/**
* Write a block on the flash chip
*
* @param chip_id Chip to write a block on
* @param region Region to write a block in
* @param block Block number to write
* @param data Data to write
* @return Zero on success. Negative on failure
*/
int cvmx_flash_write_block(int chip_id, int region, int block, const void *data);
/**
* Erase and write data to a flash
*
* @param address Memory address to write to
* @param data Data to write
* @param len Length of the data
* @return Zero on success. Negative on failure
*/
int cvmx_flash_write(void *address, const void *data, int len);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_FLASH_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -1,207 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Support library for the hardware Free Pool Allocator.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#include "cvmx.h"
#include "cvmx-fpa.h"
#include "cvmx-ipd.h"
/**
* Current state of all the pools. Use access functions
* instead of using it directly.
*/
CVMX_SHARED cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
/**
* Setup a FPA pool to control a new block of memory. The
* buffer pointer must be a physical address.
*
* @param pool Pool to initialize
* 0 <= pool < 8
* @param name Constant character string to name this pool.
* String is not copied.
* @param buffer Pointer to the block of memory to use. This must be
* accessable by all processors and external hardware.
* @param block_size Size for each block controlled by the FPA
* @param num_blocks Number of blocks
*
* @return 0 on Success,
* -1 on failure
*/
int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
uint64_t block_size, uint64_t num_blocks)
{
char *ptr;
if (!buffer)
{
cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: NULL buffer pointer!\n");
return -1;
}
if (pool >= CVMX_FPA_NUM_POOLS)
{
cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Illegal pool!\n");
return -1;
}
if (block_size < CVMX_FPA_MIN_BLOCK_SIZE)
{
cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Block size too small.\n");
return -1;
}
if (((unsigned long)buffer & (CVMX_FPA_ALIGNMENT-1)) != 0)
{
cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Buffer not aligned properly.\n");
return -1;
}
cvmx_fpa_pool_info[pool].name = name;
cvmx_fpa_pool_info[pool].size = block_size;
cvmx_fpa_pool_info[pool].starting_element_count = num_blocks;
cvmx_fpa_pool_info[pool].base = buffer;
ptr = (char*)buffer;
while (num_blocks--)
{
cvmx_fpa_free(ptr, pool, 0);
ptr += block_size;
}
return 0;
}
/**
* Shutdown a Memory pool and validate that it had all of
* the buffers originally placed in it. This should only be
* called by one processor after all hardware has finished
* using the pool. Most like you will want to have called
* cvmx_helper_shutdown_packet_io_global() before this
* function to make sure all FPA buffers are out of the packet
* IO hardware.
*
* @param pool Pool to shutdown
*
* @return Zero on success
* - Positive is count of missing buffers
* - Negative is too many buffers or corrupted pointers
*/
uint64_t cvmx_fpa_shutdown_pool(uint64_t pool)
{
int errors = 0;
int count = 0;
int expected_count = cvmx_fpa_pool_info[pool].starting_element_count;
uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base);
uint64_t finish = base + cvmx_fpa_pool_info[pool].size * expected_count;
count = 0;
while (1)
{
uint64_t address;
void *ptr = cvmx_fpa_alloc(pool);
if (!ptr)
break;
address = cvmx_ptr_to_phys(ptr);
if ((address >= base) && (address < finish) &&
(((address - base) % cvmx_fpa_pool_info[pool].size) == 0))
{
count++;
}
else
{
cvmx_dprintf("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n",
(unsigned long long)address, cvmx_fpa_pool_info[pool].name, (int)pool);
errors++;
}
}
if (count < expected_count)
{
cvmx_dprintf("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) missing %d buffers\n",
cvmx_fpa_pool_info[pool].name, (int)pool, expected_count - count);
}
else if (count > expected_count)
{
cvmx_dprintf("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) had %d duplicate buffers\n",
cvmx_fpa_pool_info[pool].name, (int)pool, count - expected_count);
}
if (errors)
{
cvmx_dprintf("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%x\n",
cvmx_fpa_pool_info[pool].name, (int)pool, (unsigned long long)base, (unsigned long long)finish, (int)cvmx_fpa_pool_info[pool].size);
return -errors;
}
else
return expected_count - count;
}
uint64_t cvmx_fpa_get_block_size(uint64_t pool)
{
switch (pool)
{
case 0:
return CVMX_FPA_POOL_0_SIZE;
case 1:
return CVMX_FPA_POOL_1_SIZE;
case 2:
return CVMX_FPA_POOL_2_SIZE;
case 3:
return CVMX_FPA_POOL_3_SIZE;
case 4:
return CVMX_FPA_POOL_4_SIZE;
case 5:
return CVMX_FPA_POOL_5_SIZE;
case 6:
return CVMX_FPA_POOL_6_SIZE;
case 7:
return CVMX_FPA_POOL_7_SIZE;
default:
return 0;
}
}

View File

@ -1,337 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the hardware Free Pool Allocator.
*
* <hr>$Revision: 70030 $<hr>
*
*/
#ifndef __CVMX_FPA_H__
#define __CVMX_FPA_H__
#include "cvmx-scratch.h"
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include "cvmx-fpa-defs.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define CVMX_FPA_NUM_POOLS 8
#define CVMX_FPA_MIN_BLOCK_SIZE 128
#define CVMX_FPA_ALIGNMENT 128
/**
* Structure describing the data format used for stores to the FPA.
*/
typedef union
{
uint64_t u64;
struct {
uint64_t scraddr : 8; /**< the (64-bit word) location in scratchpad to write to (if len != 0) */
uint64_t len : 8; /**< the number of words in the response (0 => no response) */
uint64_t did : 8; /**< the ID of the device on the non-coherent bus */
uint64_t addr :40; /**< the address that will appear in the first tick on the NCB bus */
} s;
} cvmx_fpa_iobdma_data_t;
/**
* Structure describing the current state of a FPA pool.
*/
typedef struct
{
const char *name; /**< Name it was created under */
uint64_t size; /**< Size of each block */
void * base; /**< The base memory address of whole block */
uint64_t starting_element_count; /**< The number of elements in the pool at creation */
} cvmx_fpa_pool_info_t;
/**
* Current state of all the pools. Use access functions
* instead of using it directly.
*/
extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
/* CSR typedefs have been moved to cvmx-fpa-defs.h */
/**
* Return the name of the pool
*
* @param pool Pool to get the name of
* @return The name
*/
static inline const char *cvmx_fpa_get_name(uint64_t pool)
{
return cvmx_fpa_pool_info[pool].name;
}
/**
* Return the base of the pool
*
* @param pool Pool to get the base of
* @return The base
*/
static inline void *cvmx_fpa_get_base(uint64_t pool)
{
return cvmx_fpa_pool_info[pool].base;
}
/**
* Check if a pointer belongs to an FPA pool. Return non-zero
* if the supplied pointer is inside the memory controlled by
* an FPA pool.
*
* @param pool Pool to check
* @param ptr Pointer to check
* @return Non-zero if pointer is in the pool. Zero if not
*/
static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
{
return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
((char*)ptr < ((char*)(cvmx_fpa_pool_info[pool].base)) + cvmx_fpa_pool_info[pool].size * cvmx_fpa_pool_info[pool].starting_element_count));
}
/**
* Enable the FPA for use. Must be performed after any CSR
* configuration but before any other FPA functions.
*/
static inline void cvmx_fpa_enable(void)
{
cvmx_fpa_ctl_status_t status;
status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
if (status.s.enb)
{
/*
* CN68XXP1 should not reset the FPA (doing so may break the
* SSO, so we may end up enabling it more than once. Just
* return and don't spew messages.
*/
return;
}
status.u64 = 0;
status.s.enb = 1;
cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
}
/**
* Reset FPA to disable. Make sure buffers from all FPA pools are freed
* before disabling FPA.
*/
static inline void cvmx_fpa_disable(void)
{
cvmx_fpa_ctl_status_t status;
status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
status.s.reset = 1;
cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
}
/**
* Get a new block from the FPA
*
* @param pool Pool to get the block from
* @return Pointer to the block or NULL on failure
*/
static inline void *cvmx_fpa_alloc(uint64_t pool)
{
uint64_t address;
for (;;) {
address = cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool)));
if (cvmx_likely(address)) {
return cvmx_phys_to_ptr(address);
} else {
/* If pointers are available, continuously retry. */
if (cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool)) > 0)
cvmx_wait(50);
else
return NULL;
}
}
}
/**
* Asynchronously get a new block from the FPA
*
* The result of cvmx_fpa_async_alloc() may be retrieved using
* cvmx_fpa_async_alloc_finish().
*
* @param scr_addr Local scratch address to put response in. This is a byte address,
* but must be 8 byte aligned.
* @param pool Pool to get the block from
*/
static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
{
cvmx_fpa_iobdma_data_t data;
/* Hardware only uses 64 bit aligned locations, so convert from byte address
** to 64-bit index
*/
data.s.scraddr = scr_addr >> 3;
data.s.len = 1;
data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool);
data.s.addr = 0;
cvmx_send_single(data.u64);
}
/**
* Retrieve the result of cvmx_fpa_async_alloc
*
* @param scr_addr The Local scratch address. Must be the same value
* passed to cvmx_fpa_async_alloc().
*
* @param pool Pool the block came from. Must be the same value
* passed to cvmx_fpa_async_alloc.
*
* @return Pointer to the block or NULL on failure
*/
static inline void *cvmx_fpa_async_alloc_finish(uint64_t scr_addr, uint64_t pool)
{
uint64_t address;
CVMX_SYNCIOBDMA;
address = cvmx_scratch_read64(scr_addr);
if (cvmx_likely(address))
return cvmx_phys_to_ptr(address);
else
return cvmx_fpa_alloc(pool);
}
/**
* Free a block allocated with a FPA pool.
* Does NOT provide memory ordering in cases where the memory block was modified by the core.
*
* @param ptr Block to free
* @param pool Pool to put it in
* @param num_cache_lines
* Cache lines to invalidate
*/
static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool, uint64_t num_cache_lines)
{
cvmx_addr_t newptr;
newptr.u64 = cvmx_ptr_to_phys(ptr);
newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool));
asm volatile ("" : : : "memory"); /* Prevent GCC from reordering around free */
/* value written is number of cache lines not written back */
cvmx_write_io(newptr.u64, num_cache_lines);
}
/**
* Free a block allocated with a FPA pool. Provides required memory
* ordering in cases where memory block was modified by core.
*
* @param ptr Block to free
* @param pool Pool to put it in
* @param num_cache_lines
* Cache lines to invalidate
*/
static inline void cvmx_fpa_free(void *ptr, uint64_t pool, uint64_t num_cache_lines)
{
cvmx_addr_t newptr;
newptr.u64 = cvmx_ptr_to_phys(ptr);
newptr.sfilldidspace.didspace = CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA,pool));
/* Make sure that any previous writes to memory go out before we free this buffer.
** This also serves as a barrier to prevent GCC from reordering operations to after
** the free. */
CVMX_SYNCWS;
/* value written is number of cache lines not written back */
cvmx_write_io(newptr.u64, num_cache_lines);
}
/**
* Setup a FPA pool to control a new block of memory.
* This can only be called once per pool. Make sure proper
* locking enforces this.
*
* @param pool Pool to initialize
* 0 <= pool < 8
* @param name Constant character string to name this pool.
* String is not copied.
* @param buffer Pointer to the block of memory to use. This must be
* accessable by all processors and external hardware.
* @param block_size Size for each block controlled by the FPA
* @param num_blocks Number of blocks
*
* @return 0 on Success,
* -1 on failure
*/
extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
uint64_t block_size, uint64_t num_blocks);
/**
* Shutdown a Memory pool and validate that it had all of
* the buffers originally placed in it. This should only be
* called by one processor after all hardware has finished
* using the pool. Most like you will want to have called
* cvmx_helper_shutdown_packet_io_global() before this
* function to make sure all FPA buffers are out of the packet
* IO hardware.
*
* @param pool Pool to shutdown
*
* @return Zero on success
* - Positive is count of missing buffers
* - Negative is too many buffers or corrupted pointers
*/
extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
/**
* Get the size of blocks controlled by the pool
* This is resolved to a constant at compile time.
*
* @param pool Pool to access
* @return Size of the block in bytes
*/
uint64_t cvmx_fpa_get_block_size(uint64_t pool);
#ifdef __cplusplus
}
#endif
#endif /* __CVM_FPA_H__ */

View File

@ -1,96 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the GMX hardware.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_GMX_H__
#define __CVMX_GMX_H__
#ifdef __cplusplus
extern "C" {
#endif
/* CSR typedefs have been moved to cvmx-gmx-defs.h */
/**
* Disables the sending of flow control (pause) frames on the specified
* RGMII port(s).
*
* @param interface Which interface (0 or 1)
* @param port_mask Mask (4bits) of which ports on the interface to disable
* backpressure on.
* 1 => disable backpressure
* 0 => enable backpressure
*
* @return 0 on success
* -1 on error
*/
static inline int cvmx_gmx_set_backpressure_override(uint32_t interface, uint32_t port_mask)
{
cvmx_gmxx_tx_ovr_bp_t gmxx_tx_ovr_bp;
/* Check for valid arguments */
if (port_mask & ~0xf || interface & ~0x1)
return(-1);
gmxx_tx_ovr_bp.u64 = 0;
gmxx_tx_ovr_bp.s.en = port_mask; /* Per port Enable back pressure override */
gmxx_tx_ovr_bp.s.ign_full = port_mask; /* Ignore the RX FIFO full when computing BP */
cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmxx_tx_ovr_bp.u64);
return(0);
}
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,842 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2012 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* cvmx-gpio-defs.h
*
* Configuration and status register (CSR) type definitions for
* Octeon gpio.
*
* This file is auto generated. Do not edit.
*
* <hr>$Revision$<hr>
*
*/
#ifndef __CVMX_GPIO_DEFS_H__
#define __CVMX_GPIO_DEFS_H__
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
static inline uint64_t CVMX_GPIO_BIT_CFGX(unsigned long offset)
{
if (!(
(OCTEON_IS_MODEL(OCTEON_CN30XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN31XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN38XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN50XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN58XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 15))) ||
(OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 15)))))
cvmx_warn("CVMX_GPIO_BIT_CFGX(%lu) is invalid on this chip\n", offset);
return CVMX_ADD_IO_SEG(0x0001070000000800ull) + ((offset) & 15) * 8;
}
#else
#define CVMX_GPIO_BIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000800ull) + ((offset) & 15) * 8)
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_GPIO_BOOT_ENA CVMX_GPIO_BOOT_ENA_FUNC()
static inline uint64_t CVMX_GPIO_BOOT_ENA_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)))
cvmx_warn("CVMX_GPIO_BOOT_ENA not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x00010700000008A8ull);
}
#else
#define CVMX_GPIO_BOOT_ENA (CVMX_ADD_IO_SEG(0x00010700000008A8ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
static inline uint64_t CVMX_GPIO_CLK_GENX(unsigned long offset)
{
if (!(
(OCTEON_IS_MODEL(OCTEON_CN52XX) && ((offset <= 3))) ||
(OCTEON_IS_MODEL(OCTEON_CN56XX) && ((offset <= 3))) ||
(OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 3))) ||
(OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 3))) ||
(OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 3))) ||
(OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 3))) ||
(OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 3)))))
cvmx_warn("CVMX_GPIO_CLK_GENX(%lu) is invalid on this chip\n", offset);
return CVMX_ADD_IO_SEG(0x00010700000008C0ull) + ((offset) & 3) * 8;
}
#else
#define CVMX_GPIO_CLK_GENX(offset) (CVMX_ADD_IO_SEG(0x00010700000008C0ull) + ((offset) & 3) * 8)
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
static inline uint64_t CVMX_GPIO_CLK_QLMX(unsigned long offset)
{
if (!(
(OCTEON_IS_MODEL(OCTEON_CN61XX) && ((offset <= 1))) ||
(OCTEON_IS_MODEL(OCTEON_CN63XX) && ((offset <= 1))) ||
(OCTEON_IS_MODEL(OCTEON_CN66XX) && ((offset <= 1))) ||
(OCTEON_IS_MODEL(OCTEON_CN68XX) && ((offset <= 1))) ||
(OCTEON_IS_MODEL(OCTEON_CNF71XX) && ((offset <= 1)))))
cvmx_warn("CVMX_GPIO_CLK_QLMX(%lu) is invalid on this chip\n", offset);
return CVMX_ADD_IO_SEG(0x00010700000008E0ull) + ((offset) & 1) * 8;
}
#else
#define CVMX_GPIO_CLK_QLMX(offset) (CVMX_ADD_IO_SEG(0x00010700000008E0ull) + ((offset) & 1) * 8)
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_GPIO_DBG_ENA CVMX_GPIO_DBG_ENA_FUNC()
static inline uint64_t CVMX_GPIO_DBG_ENA_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)))
cvmx_warn("CVMX_GPIO_DBG_ENA not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x00010700000008A0ull);
}
#else
#define CVMX_GPIO_DBG_ENA (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
#endif
#define CVMX_GPIO_INT_CLR (CVMX_ADD_IO_SEG(0x0001070000000898ull))
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_GPIO_MULTI_CAST CVMX_GPIO_MULTI_CAST_FUNC()
static inline uint64_t CVMX_GPIO_MULTI_CAST_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)))
cvmx_warn("CVMX_GPIO_MULTI_CAST not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x00010700000008B0ull);
}
#else
#define CVMX_GPIO_MULTI_CAST (CVMX_ADD_IO_SEG(0x00010700000008B0ull))
#endif
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_GPIO_PIN_ENA CVMX_GPIO_PIN_ENA_FUNC()
static inline uint64_t CVMX_GPIO_PIN_ENA_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CN66XX)))
cvmx_warn("CVMX_GPIO_PIN_ENA not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x00010700000008B8ull);
}
#else
#define CVMX_GPIO_PIN_ENA (CVMX_ADD_IO_SEG(0x00010700000008B8ull))
#endif
#define CVMX_GPIO_RX_DAT (CVMX_ADD_IO_SEG(0x0001070000000880ull))
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
#define CVMX_GPIO_TIM_CTL CVMX_GPIO_TIM_CTL_FUNC()
static inline uint64_t CVMX_GPIO_TIM_CTL_FUNC(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CN68XX)))
cvmx_warn("CVMX_GPIO_TIM_CTL not supported on this chip\n");
return CVMX_ADD_IO_SEG(0x00010700000008A0ull);
}
#else
#define CVMX_GPIO_TIM_CTL (CVMX_ADD_IO_SEG(0x00010700000008A0ull))
#endif
#define CVMX_GPIO_TX_CLR (CVMX_ADD_IO_SEG(0x0001070000000890ull))
#define CVMX_GPIO_TX_SET (CVMX_ADD_IO_SEG(0x0001070000000888ull))
#if CVMX_ENABLE_CSR_ADDRESS_CHECKING
static inline uint64_t CVMX_GPIO_XBIT_CFGX(unsigned long offset)
{
if (!(
(OCTEON_IS_MODEL(OCTEON_CN30XX) && (((offset >= 16) && (offset <= 23)))) ||
(OCTEON_IS_MODEL(OCTEON_CN31XX) && (((offset >= 16) && (offset <= 23)))) ||
(OCTEON_IS_MODEL(OCTEON_CN50XX) && (((offset >= 16) && (offset <= 23)))) ||
(OCTEON_IS_MODEL(OCTEON_CN61XX) && (((offset >= 16) && (offset <= 19)))) ||
(OCTEON_IS_MODEL(OCTEON_CN66XX) && (((offset >= 16) && (offset <= 19)))) ||
(OCTEON_IS_MODEL(OCTEON_CNF71XX) && (((offset >= 16) && (offset <= 19))))))
cvmx_warn("CVMX_GPIO_XBIT_CFGX(%lu) is invalid on this chip\n", offset);
return CVMX_ADD_IO_SEG(0x0001070000000900ull) + ((offset) & 31) * 8 - 8*16;
}
#else
#define CVMX_GPIO_XBIT_CFGX(offset) (CVMX_ADD_IO_SEG(0x0001070000000900ull) + ((offset) & 31) * 8 - 8*16)
#endif
/**
* cvmx_gpio_bit_cfg#
*
* Notes:
* Only first 16 GPIO pins can introduce interrupts, GPIO_XBIT_CFG16(17,18,19)[INT_EN] and [INT_TYPE]
* will not be used, read out always zero.
*/
union cvmx_gpio_bit_cfgx {
uint64_t u64;
struct cvmx_gpio_bit_cfgx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63 : 47;
uint64_t synce_sel : 2; /**< Selects the QLM clock output
x0=Normal GPIO output
01=GPIO QLM clock selected by CSR GPIO_CLK_QLM0
11=GPIO QLM clock selected by CSR GPIO_CLK_QLM1 */
uint64_t clk_gen : 1; /**< When TX_OE is set, GPIO pin becomes a clock */
uint64_t clk_sel : 2; /**< Selects which of the 4 GPIO clock generators */
uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
uint64_t int_type : 1; /**< Type of interrupt
0 = level (default)
1 = rising edge */
uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */
uint64_t rx_xor : 1; /**< Invert the GPIO pin */
uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
#else
uint64_t tx_oe : 1;
uint64_t rx_xor : 1;
uint64_t int_en : 1;
uint64_t int_type : 1;
uint64_t fil_cnt : 4;
uint64_t fil_sel : 4;
uint64_t clk_sel : 2;
uint64_t clk_gen : 1;
uint64_t synce_sel : 2;
uint64_t reserved_17_63 : 47;
#endif
} s;
struct cvmx_gpio_bit_cfgx_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63 : 52;
uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
uint64_t int_type : 1; /**< Type of interrupt
0 = level (default)
1 = rising edge */
uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */
uint64_t rx_xor : 1; /**< Invert the GPIO pin */
uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
#else
uint64_t tx_oe : 1;
uint64_t rx_xor : 1;
uint64_t int_en : 1;
uint64_t int_type : 1;
uint64_t fil_cnt : 4;
uint64_t fil_sel : 4;
uint64_t reserved_12_63 : 52;
#endif
} cn30xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn31xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn38xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn38xxp2;
struct cvmx_gpio_bit_cfgx_cn30xx cn50xx;
struct cvmx_gpio_bit_cfgx_cn52xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_15_63 : 49;
uint64_t clk_gen : 1; /**< When TX_OE is set, GPIO pin becomes a clock */
uint64_t clk_sel : 2; /**< Selects which of the 4 GPIO clock generators */
uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
uint64_t int_type : 1; /**< Type of interrupt
0 = level (default)
1 = rising edge */
uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */
uint64_t rx_xor : 1; /**< Invert the GPIO pin */
uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
#else
uint64_t tx_oe : 1;
uint64_t rx_xor : 1;
uint64_t int_en : 1;
uint64_t int_type : 1;
uint64_t fil_cnt : 4;
uint64_t fil_sel : 4;
uint64_t clk_sel : 2;
uint64_t clk_gen : 1;
uint64_t reserved_15_63 : 49;
#endif
} cn52xx;
struct cvmx_gpio_bit_cfgx_cn52xx cn52xxp1;
struct cvmx_gpio_bit_cfgx_cn52xx cn56xx;
struct cvmx_gpio_bit_cfgx_cn52xx cn56xxp1;
struct cvmx_gpio_bit_cfgx_cn30xx cn58xx;
struct cvmx_gpio_bit_cfgx_cn30xx cn58xxp1;
struct cvmx_gpio_bit_cfgx_s cn61xx;
struct cvmx_gpio_bit_cfgx_s cn63xx;
struct cvmx_gpio_bit_cfgx_s cn63xxp1;
struct cvmx_gpio_bit_cfgx_s cn66xx;
struct cvmx_gpio_bit_cfgx_s cn68xx;
struct cvmx_gpio_bit_cfgx_s cn68xxp1;
struct cvmx_gpio_bit_cfgx_s cnf71xx;
};
typedef union cvmx_gpio_bit_cfgx cvmx_gpio_bit_cfgx_t;
/**
* cvmx_gpio_boot_ena
*/
union cvmx_gpio_boot_ena {
uint64_t u64;
struct cvmx_gpio_boot_ena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63 : 52;
uint64_t boot_ena : 4; /**< Drive boot bus chip enables [7:4] on gpio [11:8] */
uint64_t reserved_0_7 : 8;
#else
uint64_t reserved_0_7 : 8;
uint64_t boot_ena : 4;
uint64_t reserved_12_63 : 52;
#endif
} s;
struct cvmx_gpio_boot_ena_s cn30xx;
struct cvmx_gpio_boot_ena_s cn31xx;
struct cvmx_gpio_boot_ena_s cn50xx;
};
typedef union cvmx_gpio_boot_ena cvmx_gpio_boot_ena_t;
/**
* cvmx_gpio_clk_gen#
*/
union cvmx_gpio_clk_genx {
uint64_t u64;
struct cvmx_gpio_clk_genx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_32_63 : 32;
uint64_t n : 32; /**< Determines the frequency of the GPIO clk generator
NOTE: Fgpio_clk = Feclk * N / 2^32
N = (Fgpio_clk / Feclk) * 2^32
NOTE: writing N == 0 stops the clock generator
N should be <= 2^31-1. */
#else
uint64_t n : 32;
uint64_t reserved_32_63 : 32;
#endif
} s;
struct cvmx_gpio_clk_genx_s cn52xx;
struct cvmx_gpio_clk_genx_s cn52xxp1;
struct cvmx_gpio_clk_genx_s cn56xx;
struct cvmx_gpio_clk_genx_s cn56xxp1;
struct cvmx_gpio_clk_genx_s cn61xx;
struct cvmx_gpio_clk_genx_s cn63xx;
struct cvmx_gpio_clk_genx_s cn63xxp1;
struct cvmx_gpio_clk_genx_s cn66xx;
struct cvmx_gpio_clk_genx_s cn68xx;
struct cvmx_gpio_clk_genx_s cn68xxp1;
struct cvmx_gpio_clk_genx_s cnf71xx;
};
typedef union cvmx_gpio_clk_genx cvmx_gpio_clk_genx_t;
/**
* cvmx_gpio_clk_qlm#
*
* Notes:
* QLM0(A) and QLM1(B) can configured to source any of QLM0 or QLM2 as clock source.
* Clock speed output for different modes ...
*
* Speed With Speed with
* SERDES speed (Gbaud) DIV=0 (MHz) DIV=1 (MHz)
* **********************************************************
* 1.25 62.5 31.25
* 2.5 125 62.5
* 3.125 156.25 78.125
* 5.0 250 125
* 6.25 312.5 156.25
*/
union cvmx_gpio_clk_qlmx {
uint64_t u64;
struct cvmx_gpio_clk_qlmx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_11_63 : 53;
uint64_t qlm_sel : 3; /**< Selects which DLM to select from
x0 = select DLM0 as clock source
x1 = Disabled */
uint64_t reserved_3_7 : 5;
uint64_t div : 1; /**< Internal clock divider
0=DIV2
1=DIV4 */
uint64_t lane_sel : 2; /**< Selects which RX lane clock from QLMx to use as
the GPIO internal QLMx clock. The GPIO block can
support upto two unique clocks to send out any
GPIO pin as configured by $GPIO_BIT_CFG[SYNCE_SEL]
The clock can either be a divided by 2 or divide
by 4 of the selected RX lane clock. */
#else
uint64_t lane_sel : 2;
uint64_t div : 1;
uint64_t reserved_3_7 : 5;
uint64_t qlm_sel : 3;
uint64_t reserved_11_63 : 53;
#endif
} s;
struct cvmx_gpio_clk_qlmx_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_10_63 : 54;
uint64_t qlm_sel : 2; /**< Selects which QLM to select from
01 = select QLM0 as clock source
1x = select QLM2 as clock source
0 = Disabled */
uint64_t reserved_3_7 : 5;
uint64_t div : 1; /**< Internal clock divider
0=DIV2
1=DIV4 */
uint64_t lane_sel : 2; /**< Selects which RX lane clock from QLMx to use as
the GPIO internal QLMx clock. The GPIO block can
support upto two unique clocks to send out any
GPIO pin as configured by $GPIO_BIT_CFG[SYNCE_SEL]
The clock can either be a divided by 2 or divide
by 4 of the selected RX lane clock. */
#else
uint64_t lane_sel : 2;
uint64_t div : 1;
uint64_t reserved_3_7 : 5;
uint64_t qlm_sel : 2;
uint64_t reserved_10_63 : 54;
#endif
} cn61xx;
struct cvmx_gpio_clk_qlmx_cn63xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_3_63 : 61;
uint64_t div : 1; /**< Internal clock divider
0=DIV2
1=DIV4 */
uint64_t lane_sel : 2; /**< Selects which RX lane clock from QLM2 to use as
the GPIO internal QLMx clock. The GPIO block can
support upto two unique clocks to send out any
GPIO pin as configured by $GPIO_BIT_CFG[SYNCE_SEL]
The clock can either be a divided by 2 or divide
by 4 of the selected RX lane clock. */
#else
uint64_t lane_sel : 2;
uint64_t div : 1;
uint64_t reserved_3_63 : 61;
#endif
} cn63xx;
struct cvmx_gpio_clk_qlmx_cn63xx cn63xxp1;
struct cvmx_gpio_clk_qlmx_cn61xx cn66xx;
struct cvmx_gpio_clk_qlmx_s cn68xx;
struct cvmx_gpio_clk_qlmx_s cn68xxp1;
struct cvmx_gpio_clk_qlmx_cn61xx cnf71xx;
};
typedef union cvmx_gpio_clk_qlmx cvmx_gpio_clk_qlmx_t;
/**
* cvmx_gpio_dbg_ena
*/
union cvmx_gpio_dbg_ena {
uint64_t u64;
struct cvmx_gpio_dbg_ena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_21_63 : 43;
uint64_t dbg_ena : 21; /**< Enable the debug port to be driven on the gpio */
#else
uint64_t dbg_ena : 21;
uint64_t reserved_21_63 : 43;
#endif
} s;
struct cvmx_gpio_dbg_ena_s cn30xx;
struct cvmx_gpio_dbg_ena_s cn31xx;
struct cvmx_gpio_dbg_ena_s cn50xx;
};
typedef union cvmx_gpio_dbg_ena cvmx_gpio_dbg_ena_t;
/**
* cvmx_gpio_int_clr
*
* Notes:
* Only 16 out of 20 GPIOs support interrupt.GPIO_INT_CLR only apply to GPIO0-GPIO15.
*
*/
union cvmx_gpio_int_clr {
uint64_t u64;
struct cvmx_gpio_int_clr_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63 : 48;
uint64_t type : 16; /**< Clear the interrupt rising edge detector */
#else
uint64_t type : 16;
uint64_t reserved_16_63 : 48;
#endif
} s;
struct cvmx_gpio_int_clr_s cn30xx;
struct cvmx_gpio_int_clr_s cn31xx;
struct cvmx_gpio_int_clr_s cn38xx;
struct cvmx_gpio_int_clr_s cn38xxp2;
struct cvmx_gpio_int_clr_s cn50xx;
struct cvmx_gpio_int_clr_s cn52xx;
struct cvmx_gpio_int_clr_s cn52xxp1;
struct cvmx_gpio_int_clr_s cn56xx;
struct cvmx_gpio_int_clr_s cn56xxp1;
struct cvmx_gpio_int_clr_s cn58xx;
struct cvmx_gpio_int_clr_s cn58xxp1;
struct cvmx_gpio_int_clr_s cn61xx;
struct cvmx_gpio_int_clr_s cn63xx;
struct cvmx_gpio_int_clr_s cn63xxp1;
struct cvmx_gpio_int_clr_s cn66xx;
struct cvmx_gpio_int_clr_s cn68xx;
struct cvmx_gpio_int_clr_s cn68xxp1;
struct cvmx_gpio_int_clr_s cnf71xx;
};
typedef union cvmx_gpio_int_clr cvmx_gpio_int_clr_t;
/**
* cvmx_gpio_multi_cast
*
* Notes:
* GPIO<7:4> have the option of operating in GPIO Interrupt Multicast mode. In
* this mode, the PP GPIO interrupts (CIU_INT<0-7>_SUM0/CIU_INT<0-3>_SUM4[GPIO<7:4>] values are
* stored per cnMIPS core.
* For GPIO<7:4> (x=4-7):
* When GPIO_MULTI_CAST[EN] = 1:
* When GPIO_BIT_CFGx[INT_EN]==1 & GPIO_BIT_CFGx[INT_TYPE]==1 (edge detection and interrupt enabled):
* * Reads to CIU_INT<0-7>_SUM0/<0-3>_SUM4[GPIO<x>] will return a unique interrupt state per
* cnMIPS core.
* * Reads to CIU_INT32/33_SUM0/4[GPIO<x>] will return the common GPIO<x>
* interrupt state.
* * Write of '1' to CIU_INT<0-7>_SUM0/<0-3>_SUM4[GPIO<x>] will clear the individual
* interrupt associated with the cnMIPS core.
* * Write of '1' to CIU_INT32/33_SUM0/4[GPIO<x>] will clear the common GPIO<x>
* interrupt state.
* * Write of '1' to GPIO_INT_CLR[TYPE<x>] will clear all
* CIU_INT*_SUM0/4[GPIO<x>] state across all cnMIPS cores and common GPIO<x> interrupt states.
* When GPIO_BIT_CFGx[INT_EN]==0 or GPIO_BIT_CFGx[INT_TYPE]==0,
* * either leveled interrupt or interrupt not enabled, write of '1' to CIU_INT*_SUM0/4[GPIO<x>]
* will have no effects.
* When GPIO_MULTI_CAST[EN] = 0:
* * Write of '1' to CIU_INT_SUM0/4[GPIO<x>] will have no effects, as this field is RO,
* backward compatible with o63.
* When GPIO_BIT_CFGx[INT_EN]==1 & GPIO_BIT_CFGx[INT_TYPE]==1 (edge detection and interrupt enabled):
* * Reads to CIU_INT*_SUM0/4[GPIO<x>] will return the common GPIO<X> interrupt state.
* * Write of '1' to GPIO_INT_CLR[TYPE<x>] will clear all
* CIU_INT*_SUM0/4[GPIO<x>] state across all cnMIPS cores and common GPIO<x> interrupt states.
* When GPIO_BIT_CFGx[INT_EN]==0 or GPIO_BIT_CFGx[INT_TYPE]==0,
* * either leveled interrupt or interrupt not enabled, write of '1' to CIU_INT*_SUM0/4[GPIO<x>]
* will have no effects.
*
* GPIO<15:8> and GPIO<3:0> will never be in multicast mode as those don't have per cnMIPS capabilities.
* For GPIO<y> (y=0-3,8-15):
* When GPIO_BIT_CFGx[INT_EN]==1 & GPIO_BIT_CFGx[INT_TYPE]==1 (edge detection and interrupt enabled):
* * Reads to CIU_INT*_SUM0/4[GPIO<y>] will return the common GPIO<y> interrupt state.
* * Write of '1' to GPIO_INT_CLR[TYPE<y>] will clear all CIU_INT*_SUM0/4[GPIO<y>] common GPIO<y>
* interrupt states.
* When GPIO_MULTI_CAST[EN] = 1:
* * Write of '1' to CIU_INT*_SUM0/4[GPIO<y>] will clear the common GPIO<y> interrupt state.
* When GPIO_MULTI_CAST[EN] = 0:
* * Write of '1' to CIU_INT*_SUM0/4[GPIO<y>] has no effect, as this field is RO,
* backward compatible to o63.
* When GPIO_BIT_CFGx[INT_EN]==0 or GPIO_BIT_CFGx[INT_TYPE]==0,
* * either leveled interrupt or interrupt not enabled, write of '1' to CIU_INT*_SUM0/4[GPIO<y>]
* will have no effects.
*
* Whenever there is mode change, (GPIO_BIT_CFGx[INT_EN] or GPIO_BIT_CFGx[INT_TYPE] or GPIO_MULTI_CAST[EN])
* software needs to write to $GPIO_INT_CLR to clear up all pending/stale interrupts.
*/
union cvmx_gpio_multi_cast {
uint64_t u64;
struct cvmx_gpio_multi_cast_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_1_63 : 63;
uint64_t en : 1; /**< Enable GPIO Interrupt Multicast mode
When EN is set, GPIO<7:4> will function in
multicast mode allowing these four GPIOs to
interrupt multi-cores.
Multicast functionality allows the GPIO to exist
as per cnMIPS interrupts as opposed to a global
interrupt. */
#else
uint64_t en : 1;
uint64_t reserved_1_63 : 63;
#endif
} s;
struct cvmx_gpio_multi_cast_s cn61xx;
struct cvmx_gpio_multi_cast_s cnf71xx;
};
typedef union cvmx_gpio_multi_cast cvmx_gpio_multi_cast_t;
/**
* cvmx_gpio_pin_ena
*
* Notes:
* GPIO0-GPIO17 has dedicated pins.
* GPIO18 share pin with UART (UART0_CTS_L/GPIO_18), GPIO18 enabled when $GPIO_PIN_ENA[ENA18]=1
* GPIO19 share pin with UART (UART1_CTS_L/GPIO_19), GPIO18 enabled when $GPIO_PIN_ENA[ENA19]=1
*/
union cvmx_gpio_pin_ena {
uint64_t u64;
struct cvmx_gpio_pin_ena_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63 : 44;
uint64_t ena19 : 1; /**< If 0, UART1_CTS_L/GPIO_19 pin is UART pin
If 1, UART1_CTS_L/GPIO_19 pin is GPIO19 pin */
uint64_t ena18 : 1; /**< If 0, UART0_CTS_L/GPIO_18 pin is UART pin
If 1, UART0_CTS_L/GPIO_18 pin is GPIO18 pin */
uint64_t reserved_0_17 : 18;
#else
uint64_t reserved_0_17 : 18;
uint64_t ena18 : 1;
uint64_t ena19 : 1;
uint64_t reserved_20_63 : 44;
#endif
} s;
struct cvmx_gpio_pin_ena_s cn66xx;
};
typedef union cvmx_gpio_pin_ena cvmx_gpio_pin_ena_t;
/**
* cvmx_gpio_rx_dat
*/
union cvmx_gpio_rx_dat {
uint64_t u64;
struct cvmx_gpio_rx_dat_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63 : 40;
uint64_t dat : 24; /**< GPIO Read Data */
#else
uint64_t dat : 24;
uint64_t reserved_24_63 : 40;
#endif
} s;
struct cvmx_gpio_rx_dat_s cn30xx;
struct cvmx_gpio_rx_dat_s cn31xx;
struct cvmx_gpio_rx_dat_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63 : 48;
uint64_t dat : 16; /**< GPIO Read Data */
#else
uint64_t dat : 16;
uint64_t reserved_16_63 : 48;
#endif
} cn38xx;
struct cvmx_gpio_rx_dat_cn38xx cn38xxp2;
struct cvmx_gpio_rx_dat_s cn50xx;
struct cvmx_gpio_rx_dat_cn38xx cn52xx;
struct cvmx_gpio_rx_dat_cn38xx cn52xxp1;
struct cvmx_gpio_rx_dat_cn38xx cn56xx;
struct cvmx_gpio_rx_dat_cn38xx cn56xxp1;
struct cvmx_gpio_rx_dat_cn38xx cn58xx;
struct cvmx_gpio_rx_dat_cn38xx cn58xxp1;
struct cvmx_gpio_rx_dat_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63 : 44;
uint64_t dat : 20; /**< GPIO Read Data */
#else
uint64_t dat : 20;
uint64_t reserved_20_63 : 44;
#endif
} cn61xx;
struct cvmx_gpio_rx_dat_cn38xx cn63xx;
struct cvmx_gpio_rx_dat_cn38xx cn63xxp1;
struct cvmx_gpio_rx_dat_cn61xx cn66xx;
struct cvmx_gpio_rx_dat_cn38xx cn68xx;
struct cvmx_gpio_rx_dat_cn38xx cn68xxp1;
struct cvmx_gpio_rx_dat_cn61xx cnf71xx;
};
typedef union cvmx_gpio_rx_dat cvmx_gpio_rx_dat_t;
/**
* cvmx_gpio_tim_ctl
*/
union cvmx_gpio_tim_ctl {
uint64_t u64;
struct cvmx_gpio_tim_ctl_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_4_63 : 60;
uint64_t sel : 4; /**< Selects the GPIO pin to route to TIM */
#else
uint64_t sel : 4;
uint64_t reserved_4_63 : 60;
#endif
} s;
struct cvmx_gpio_tim_ctl_s cn68xx;
struct cvmx_gpio_tim_ctl_s cn68xxp1;
};
typedef union cvmx_gpio_tim_ctl cvmx_gpio_tim_ctl_t;
/**
* cvmx_gpio_tx_clr
*/
union cvmx_gpio_tx_clr {
uint64_t u64;
struct cvmx_gpio_tx_clr_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63 : 40;
uint64_t clr : 24; /**< Bit mask to indicate which GPIO_TX_DAT bits to set
to '0'. When read, CLR returns the GPIO_TX_DAT
storage. */
#else
uint64_t clr : 24;
uint64_t reserved_24_63 : 40;
#endif
} s;
struct cvmx_gpio_tx_clr_s cn30xx;
struct cvmx_gpio_tx_clr_s cn31xx;
struct cvmx_gpio_tx_clr_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63 : 48;
uint64_t clr : 16; /**< Bit mask to indicate which bits to drive to '0'. */
#else
uint64_t clr : 16;
uint64_t reserved_16_63 : 48;
#endif
} cn38xx;
struct cvmx_gpio_tx_clr_cn38xx cn38xxp2;
struct cvmx_gpio_tx_clr_s cn50xx;
struct cvmx_gpio_tx_clr_cn38xx cn52xx;
struct cvmx_gpio_tx_clr_cn38xx cn52xxp1;
struct cvmx_gpio_tx_clr_cn38xx cn56xx;
struct cvmx_gpio_tx_clr_cn38xx cn56xxp1;
struct cvmx_gpio_tx_clr_cn38xx cn58xx;
struct cvmx_gpio_tx_clr_cn38xx cn58xxp1;
struct cvmx_gpio_tx_clr_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63 : 44;
uint64_t clr : 20; /**< Bit mask to indicate which GPIO_TX_DAT bits to set
to '0'. When read, CLR returns the GPIO_TX_DAT
storage. */
#else
uint64_t clr : 20;
uint64_t reserved_20_63 : 44;
#endif
} cn61xx;
struct cvmx_gpio_tx_clr_cn38xx cn63xx;
struct cvmx_gpio_tx_clr_cn38xx cn63xxp1;
struct cvmx_gpio_tx_clr_cn61xx cn66xx;
struct cvmx_gpio_tx_clr_cn38xx cn68xx;
struct cvmx_gpio_tx_clr_cn38xx cn68xxp1;
struct cvmx_gpio_tx_clr_cn61xx cnf71xx;
};
typedef union cvmx_gpio_tx_clr cvmx_gpio_tx_clr_t;
/**
* cvmx_gpio_tx_set
*/
union cvmx_gpio_tx_set {
uint64_t u64;
struct cvmx_gpio_tx_set_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_24_63 : 40;
uint64_t set : 24; /**< Bit mask to indicate which GPIO_TX_DAT bits to set
to '1'. When read, SET returns the GPIO_TX_DAT
storage. */
#else
uint64_t set : 24;
uint64_t reserved_24_63 : 40;
#endif
} s;
struct cvmx_gpio_tx_set_s cn30xx;
struct cvmx_gpio_tx_set_s cn31xx;
struct cvmx_gpio_tx_set_cn38xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_16_63 : 48;
uint64_t set : 16; /**< Bit mask to indicate which bits to drive to '1'. */
#else
uint64_t set : 16;
uint64_t reserved_16_63 : 48;
#endif
} cn38xx;
struct cvmx_gpio_tx_set_cn38xx cn38xxp2;
struct cvmx_gpio_tx_set_s cn50xx;
struct cvmx_gpio_tx_set_cn38xx cn52xx;
struct cvmx_gpio_tx_set_cn38xx cn52xxp1;
struct cvmx_gpio_tx_set_cn38xx cn56xx;
struct cvmx_gpio_tx_set_cn38xx cn56xxp1;
struct cvmx_gpio_tx_set_cn38xx cn58xx;
struct cvmx_gpio_tx_set_cn38xx cn58xxp1;
struct cvmx_gpio_tx_set_cn61xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_20_63 : 44;
uint64_t set : 20; /**< Bit mask to indicate which GPIO_TX_DAT bits to set
to '1'. When read, SET returns the GPIO_TX_DAT
storage. */
#else
uint64_t set : 20;
uint64_t reserved_20_63 : 44;
#endif
} cn61xx;
struct cvmx_gpio_tx_set_cn38xx cn63xx;
struct cvmx_gpio_tx_set_cn38xx cn63xxp1;
struct cvmx_gpio_tx_set_cn61xx cn66xx;
struct cvmx_gpio_tx_set_cn38xx cn68xx;
struct cvmx_gpio_tx_set_cn38xx cn68xxp1;
struct cvmx_gpio_tx_set_cn61xx cnf71xx;
};
typedef union cvmx_gpio_tx_set cvmx_gpio_tx_set_t;
/**
* cvmx_gpio_xbit_cfg#
*
* Notes:
* Only first 16 GPIO pins can introduce interrupts, GPIO_XBIT_CFG16(17,18,19)[INT_EN] and [INT_TYPE]
* will not be used, read out always zero.
*/
union cvmx_gpio_xbit_cfgx {
uint64_t u64;
struct cvmx_gpio_xbit_cfgx_s {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_17_63 : 47;
uint64_t synce_sel : 2; /**< Selects the QLM clock output
x0=Normal GPIO output
01=GPIO QLM clock selected by CSR GPIO_CLK_QLM0
11=GPIO QLM clock selected by CSR GPIO_CLK_QLM1 */
uint64_t clk_gen : 1; /**< When TX_OE is set, GPIO pin becomes a clock */
uint64_t clk_sel : 2; /**< Selects which of the 4 GPIO clock generators */
uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
uint64_t int_type : 1; /**< Type of interrupt
0 = level (default)
1 = rising edge */
uint64_t int_en : 1; /**< Bit mask to indicate which bits to raise interrupt */
uint64_t rx_xor : 1; /**< Invert the GPIO pin */
uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
#else
uint64_t tx_oe : 1;
uint64_t rx_xor : 1;
uint64_t int_en : 1;
uint64_t int_type : 1;
uint64_t fil_cnt : 4;
uint64_t fil_sel : 4;
uint64_t clk_sel : 2;
uint64_t clk_gen : 1;
uint64_t synce_sel : 2;
uint64_t reserved_17_63 : 47;
#endif
} s;
struct cvmx_gpio_xbit_cfgx_cn30xx {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t reserved_12_63 : 52;
uint64_t fil_sel : 4; /**< Global counter bit-select (controls sample rate) */
uint64_t fil_cnt : 4; /**< Number of consecutive samples to change state */
uint64_t reserved_2_3 : 2;
uint64_t rx_xor : 1; /**< Invert the GPIO pin */
uint64_t tx_oe : 1; /**< Drive the GPIO pin as an output pin */
#else
uint64_t tx_oe : 1;
uint64_t rx_xor : 1;
uint64_t reserved_2_3 : 2;
uint64_t fil_cnt : 4;
uint64_t fil_sel : 4;
uint64_t reserved_12_63 : 52;
#endif
} cn30xx;
struct cvmx_gpio_xbit_cfgx_cn30xx cn31xx;
struct cvmx_gpio_xbit_cfgx_cn30xx cn50xx;
struct cvmx_gpio_xbit_cfgx_s cn61xx;
struct cvmx_gpio_xbit_cfgx_s cn66xx;
struct cvmx_gpio_xbit_cfgx_s cnf71xx;
};
typedef union cvmx_gpio_xbit_cfgx cvmx_gpio_xbit_cfgx_t;
#endif

View File

@ -1,185 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* General Purpose IO interface.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_GPIO_H__
#define __CVMX_GPIO_H__
#ifdef __cplusplus
extern "C" {
#endif
/* CSR typedefs have been moved to cvmx-gpio-defs.h */
/**
* Clear the interrupt rising edge detector for the supplied
* pins in the mask. Chips which have more than 16 GPIO pins
* can't use them for interrupts.
e
* @param clear_mask Mask of pins to clear
*/
static inline void cvmx_gpio_interrupt_clear(uint16_t clear_mask)
{
if (OCTEON_IS_MODEL(OCTEON_CN61XX))
{
cvmx_gpio_multi_cast_t multi_cast;
cvmx_gpio_bit_cfgx_t gpio_bit;
int core = cvmx_get_core_num();
multi_cast.u64 = cvmx_read_csr(CVMX_GPIO_MULTI_CAST);
gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(core));
/* If Multicast mode is enabled, and GPIO interrupt is enabled for
edge detection, then GPIO<4..7> interrupts are per core */
if (multi_cast.s.en && gpio_bit.s.int_en && gpio_bit.s.int_type)
{
/* Clear GPIO<4..7> per core */
cvmx_ciu_intx_sum0_t ciu_sum0;
ciu_sum0.u64 = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core * 2));
ciu_sum0.s.gpio = clear_mask & 0xf0;
cvmx_write_csr(CVMX_CIU_INTX_SUM0(core * 2), ciu_sum0.u64);
/* Clear other GPIO pins for all cores. */
cvmx_write_csr(CVMX_GPIO_INT_CLR, (clear_mask & ~0xf0));
return;
}
}
/* Clear GPIO pins state across all cores and common interrupt states. */
cvmx_gpio_int_clr_t gpio_int_clr;
gpio_int_clr.u64 = 0;
gpio_int_clr.s.type = clear_mask;
cvmx_write_csr(CVMX_GPIO_INT_CLR, gpio_int_clr.u64);
}
/**
* GPIO Output Pin
*
* @param bit The GPIO to use
* @param mode Drive GPIO as output pin or not.
*
*/
static inline void cvmx_gpio_cfg(int bit, int mode)
{
if (bit > 15 && bit < 20)
{
/* CN61XX/CN66XX has 20 GPIO pins and only 16 are interruptable. */
if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CN66XX))
{
cvmx_gpio_xbit_cfgx_t gpio_xbit;
gpio_xbit.u64 = cvmx_read_csr(CVMX_GPIO_XBIT_CFGX(bit));
if (mode)
gpio_xbit.s.tx_oe = 1;
else
gpio_xbit.s.tx_oe = 0;
cvmx_write_csr(CVMX_GPIO_XBIT_CFGX(bit), gpio_xbit.u64);
}
else
cvmx_dprintf("cvmx_gpio_cfg: Invalid GPIO bit(%d)\n", bit);
}
else
{
cvmx_gpio_bit_cfgx_t gpio_bit;
gpio_bit.u64 = cvmx_read_csr(CVMX_GPIO_BIT_CFGX(bit));
if (mode)
gpio_bit.s.tx_oe = 1;
else
gpio_bit.s.tx_oe = 0;
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(bit), gpio_bit.u64);
}
}
/**
* GPIO Read Data
*
* @return Status of the GPIO pins
*/
static inline uint32_t cvmx_gpio_read(void)
{
cvmx_gpio_rx_dat_t gpio_rx_dat;
gpio_rx_dat.u64 = cvmx_read_csr(CVMX_GPIO_RX_DAT);
return gpio_rx_dat.s.dat;
}
/**
* GPIO Clear pin
*
* @param clear_mask Bit mask to indicate which bits to drive to '0'.
*/
static inline void cvmx_gpio_clear(uint32_t clear_mask)
{
cvmx_gpio_tx_clr_t gpio_tx_clr;
gpio_tx_clr.u64 = 0;
gpio_tx_clr.s.clr = clear_mask;
cvmx_write_csr(CVMX_GPIO_TX_CLR, gpio_tx_clr.u64);
}
/**
* GPIO Set pin
*
* @param set_mask Bit mask to indicate which bits to drive to '1'.
*/
static inline void cvmx_gpio_set(uint32_t set_mask)
{
cvmx_gpio_tx_set_t gpio_tx_set;
gpio_tx_set.u64 = 0;
gpio_tx_set.s.set = set_mask;
cvmx_write_csr(CVMX_GPIO_TX_SET, gpio_tx_set.u64);
}
#ifdef __cplusplus
}
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,222 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper functions to abstract board specific data about
* network ports from the rest of the cvmx-helper files.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_BOARD_H__
#define __CVMX_HELPER_BOARD_H__
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
USB_CLOCK_TYPE_REF_12,
USB_CLOCK_TYPE_REF_24,
USB_CLOCK_TYPE_REF_48,
USB_CLOCK_TYPE_CRYSTAL_12,
} cvmx_helper_board_usb_clock_types_t;
typedef enum {
BROADCOM_GENERIC_PHY,
MARVELL_GENERIC_PHY,
} cvmx_phy_type_t;
typedef enum {
set_phy_link_flags_autoneg = 0x1,
set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
set_phy_link_flags_flow_control_enable = 0x1 << 1,
set_phy_link_flags_flow_control_disable = 0x2 << 1,
set_phy_link_flags_flow_control_mask = 0x3 << 1, /* Mask for 2 bit wide flow control field */
} cvmx_helper_board_set_phy_link_flags_types_t;
/* Fake IPD port, the RGMII/MII interface may use different PHY, use this
macro to return appropriate MIX address to read the PHY. */
#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
/**
* cvmx_override_board_link_get(int ipd_port) is a function
* pointer. It is meant to allow customization of the process of
* talking to a PHY to determine link speed. It is called every
* time a PHY must be polled for link status. Users should set
* this pointer to a function before calling any cvmx-helper
* operations.
*/
extern cvmx_helper_link_info_t (*cvmx_override_board_link_get)(int ipd_port);
/**
* Return the MII PHY address associated with the given IPD
* port. A result of -1 means there isn't a MII capable PHY
* connected to this port. On chips supporting multiple MII
* busses the bus number is encoded in bits <15:8>.
*
* This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @param ipd_port Octeon IPD port to get the MII address for.
*
* @return MII PHY address and bus number or -1.
*/
extern int cvmx_helper_board_get_mii_address(int ipd_port);
/**
* This function as a board specific method of changing the PHY
* speed, duplex, and autonegotiation. This programs the PHY and
* not Octeon. This can be used to force Octeon's links to
* specific settings.
*
* @param phy_addr The address of the PHY to program
* @param link_flags
* Flags to control autonegotiation. Bit 0 is autonegotiation
* enable/disable to maintain backward compatibility.
* @param link_info Link speed to program. If the speed is zero and autonegotiation
* is enabled, all possible negotiation speeds are advertised.
*
* @return Zero on success, negative on failure
*/
int cvmx_helper_board_link_set_phy(int phy_addr, cvmx_helper_board_set_phy_link_flags_types_t link_flags,
cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* This function is the board specific method of determining an
* ethernet ports link speed. Most Octeon boards have Marvell PHYs
* and are handled by the fall through case. This function must be
* updated for boards that don't have the normal Marvell PHYs.
*
* This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @param ipd_port IPD input port associated with the port we want to get link
* status for.
*
* @return The ports link status. If the link isn't fully resolved, this must
* return zero.
*/
extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
/**
* @INTERNAL
* This function is called by cvmx_helper_interface_probe() after it
* determines the number of ports Octeon can support on a specific
* interface. This function is the per board location to override
* this value. It is called with the number of ports Octeon might
* support and should return the number of actual ports on the
* board.
*
* This function must be modified for every new Octeon board.
* Internally it uses switch statements based on the cvmx_sysinfo
* data to determine board types and revisions. It relies on the
* fact that every Octeon board receives a unique board type
* enumeration from the bootloader.
*
* @param interface Interface to probe
* @param supported_ports
* Number of ports Octeon supports.
*
* @return Number of ports the actual board supports. Many times this will
* simple be "support_ports".
*/
extern int __cvmx_helper_board_interface_probe(int interface, int supported_ports);
/**
* @INTERNAL
* Enable packet input/output from the hardware. This function is
* called after by cvmx_helper_packet_hardware_enable() to
* perform board specific initialization. For most boards
* nothing is needed.
*
* @param interface Interface to enable
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_board_hardware_enable(int interface);
/**
* @INTERNAL
* Gets the clock type used for the USB block based on board type.
* Used by the USB code for auto configuration of clock type.
*
* @return USB clock type enumeration
*/
cvmx_helper_board_usb_clock_types_t __cvmx_helper_board_usb_get_clock_type(void);
/**
* @INTERNAL
* Adjusts the number of available USB ports on Octeon based on board
* specifics.
*
* @param supported_ports expected number of ports based on chip type;
*
*
* @return number of available usb ports, based on board specifics.
* Return value is supported_ports if function does not
* override.
*/
int __cvmx_helper_board_usb_get_num_ports(int supported_ports);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_HELPER_BOARD_H__ */

View File

@ -1,717 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper Functions for the Configuration Framework
*
* <hr>$Revision: 0 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <linux/module.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-util.h>
#include <asm/octeon/cvmx-helper-cfg.h>
#include <asm/octeon/cvmx-helper-ilk.h>
#include <asm/octeon/cvmx-ilk.h>
#include <asm/octeon/cvmx-config.h>
#else
#include "cvmx.h"
#include "cvmx-bootmem.h"
#include "cvmx-helper.h"
#include "cvmx-helper-util.h"
#include "cvmx-helper-cfg.h"
#include "cvmx-ilk.h"
#include "cvmx-helper-ilk.h"
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "cvmx-config.h"
#include "executive-config.h"
#endif
#endif
#if defined(min)
#else
#define min( a, b ) ( ( a ) < ( b ) ) ? ( a ) : ( b )
#endif
/* #define CVMX_HELPER_CFG_DEBUG */
/*
* Per physical port
*/
struct cvmx_cfg_port_param {
int8_t ccpp_pknd;
int8_t ccpp_bpid;
int8_t ccpp_pko_port_base;
int8_t ccpp_pko_num_ports;
uint8_t ccpp_pko_nqueues; /*
* When the user explicitly
* assigns queues,
* cvmx_cfg_pko_nqueue_pool[
* ccpp_pko_nqueues ...
* ccpp_pko_nqueues +
* ccpp_pko_num_ports - 1]
* are the numbers of PKO queues
* assigned to the PKO ports for
* this physical port.
*/
};
/*
* Per pko_port
*/
struct cvmx_cfg_pko_port_param {
int16_t ccppp_queue_base;
int16_t ccppp_num_queues;
};
/*
* A map from pko_port to
* interface,
* index, and
* pko engine id
*/
struct cvmx_cfg_pko_port_map {
int16_t ccppl_interface;
int16_t ccppl_index;
int16_t ccppl_eid;
};
/*
* This is for looking up pko_base_port and pko_nport for ipd_port
*/
struct cvmx_cfg_pko_port_pair {
int8_t ccppp_base_port;
int8_t ccppp_nports;
};
static CVMX_SHARED struct cvmx_cfg_port_param cvmx_cfg_port
[CVMX_HELPER_CFG_MAX_IFACE][CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] =
{[0 ... CVMX_HELPER_CFG_MAX_IFACE - 1] =
{[0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE - 1] =
{CVMX_HELPER_CFG_INVALID_VALUE,
CVMX_HELPER_CFG_INVALID_VALUE,
CVMX_HELPER_CFG_INVALID_VALUE,
CVMX_HELPER_CFG_INVALID_VALUE,
CVMX_HELPER_CFG_INVALID_VALUE}}};
/*
* Indexed by the pko_port number
*/
static CVMX_SHARED struct cvmx_cfg_pko_port_param cvmx_cfg_pko_port
[CVMX_HELPER_CFG_MAX_PKO_PORT] =
{[0 ... CVMX_HELPER_CFG_MAX_PKO_PORT - 1] =
{CVMX_HELPER_CFG_INVALID_VALUE,
CVMX_HELPER_CFG_INVALID_VALUE}};
static CVMX_SHARED struct cvmx_cfg_pko_port_map cvmx_cfg_pko_port_map
[CVMX_HELPER_CFG_MAX_PKO_PORT] =
{[0 ... CVMX_HELPER_CFG_MAX_PKO_PORT - 1] =
{CVMX_HELPER_CFG_INVALID_VALUE,
CVMX_HELPER_CFG_INVALID_VALUE,
CVMX_HELPER_CFG_INVALID_VALUE}};
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
/*
* This array assists translation from ipd_port to pko_port.
* The ``16'' is the rounded value for the 3rd 4-bit value of
* ipd_port, used to differentiate ``interfaces.''
*/
static CVMX_SHARED struct cvmx_cfg_pko_port_pair ipd2pko_port_cache[16]
[CVMX_HELPER_CFG_MAX_PORT_PER_IFACE] =
{[0 ... 15] =
{[0 ... CVMX_HELPER_CFG_MAX_PORT_PER_IFACE - 1] =
{CVMX_HELPER_CFG_INVALID_VALUE,
CVMX_HELPER_CFG_INVALID_VALUE}}};
#ifdef CVMX_USER_DEFINED_HELPER_CONFIG_INIT
static CVMX_SHARED int cvmx_cfg_default_pko_nqueues = 1;
/*
* A pool for holding the pko_nqueues for the pko_ports assigned to a
* physical port.
*/
static CVMX_SHARED uint8_t cvmx_cfg_pko_nqueue_pool
[CVMX_HELPER_CFG_MAX_PKO_QUEUES] =
{[0 ... CVMX_HELPER_CFG_MAX_PKO_QUEUES - 1] = 1};
#endif
#endif
/*
* Options
*
* Each array-elem's intial value is also the option's default value.
*/
static CVMX_SHARED uint64_t cvmx_cfg_opts[CVMX_HELPER_CFG_OPT_MAX] =
{[0 ... CVMX_HELPER_CFG_OPT_MAX - 1] = 1};
/*
* MISC
*/
static CVMX_SHARED int cvmx_cfg_max_pko_engines; /* # of PKO DMA engines
allocated */
int __cvmx_helper_cfg_pknd(int interface, int index)
{
return cvmx_cfg_port[interface][index].ccpp_pknd;
}
int __cvmx_helper_cfg_bpid(int interface, int index)
{
return cvmx_cfg_port[interface][index].ccpp_bpid;
}
int __cvmx_helper_cfg_pko_port_base(int interface, int index)
{
return cvmx_cfg_port[interface][index].ccpp_pko_port_base;
}
int __cvmx_helper_cfg_pko_port_num(int interface, int index)
{
return cvmx_cfg_port[interface][index].ccpp_pko_num_ports;
}
int __cvmx_helper_cfg_pko_queue_num(int pko_port)
{
return cvmx_cfg_pko_port[pko_port].ccppp_num_queues;
}
int __cvmx_helper_cfg_pko_queue_base(int pko_port)
{
return cvmx_cfg_pko_port[pko_port].ccppp_queue_base;
}
int __cvmx_helper_cfg_pko_max_queue(void)
{
int i;
i = CVMX_HELPER_CFG_MAX_PKO_PORT - 1;
while (i >= 0)
{
if (cvmx_cfg_pko_port[i].ccppp_queue_base !=
CVMX_HELPER_CFG_INVALID_VALUE)
{
cvmx_helper_cfg_assert(cvmx_cfg_pko_port[i].ccppp_num_queues > 0);
return (cvmx_cfg_pko_port[i].ccppp_queue_base +
cvmx_cfg_pko_port[i].ccppp_num_queues);
}
i --;
}
cvmx_helper_cfg_assert(0); /* shouldn't get here */
return 0;
}
int __cvmx_helper_cfg_pko_max_engine(void)
{
return cvmx_cfg_max_pko_engines;
}
int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, uint64_t val)
{
if (opt >= CVMX_HELPER_CFG_OPT_MAX)
return -1;
cvmx_cfg_opts[opt] = val;
return 0;
}
uint64_t cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt)
{
if (opt >= CVMX_HELPER_CFG_OPT_MAX)
return (uint64_t)CVMX_HELPER_CFG_INVALID_VALUE;
return cvmx_cfg_opts[opt];
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(__cvmx_helper_cfg_init);
EXPORT_SYMBOL(__cvmx_helper_cfg_pknd);
EXPORT_SYMBOL(__cvmx_helper_cfg_bpid);
EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_base);
EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_num);
EXPORT_SYMBOL(__cvmx_helper_cfg_pko_queue_base);
EXPORT_SYMBOL(__cvmx_helper_cfg_pko_queue_num);
EXPORT_SYMBOL(__cvmx_helper_cfg_pko_max_queue);
EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_interface);
EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_index);
EXPORT_SYMBOL(__cvmx_helper_cfg_pko_port_eid);
EXPORT_SYMBOL(__cvmx_helper_cfg_pko_max_engine);
EXPORT_SYMBOL(cvmx_helper_cfg_opt_get);
EXPORT_SYMBOL(cvmx_helper_cfg_opt_set);
EXPORT_SYMBOL(cvmx_helper_cfg_ipd2pko_port_base);
EXPORT_SYMBOL(cvmx_helper_cfg_ipd2pko_port_num);
#endif
#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
#ifdef CVMX_HELPER_CFG_DEBUG
void cvmx_helper_cfg_show_cfg(void)
{
int i, j;
for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++)
{
cvmx_dprintf(
"cvmx_helper_cfg_show_cfg: interface%d mode %10s nports%4d\n", i,
cvmx_helper_interface_mode_to_string(cvmx_helper_interface_get_mode(i)),
cvmx_helper_interface_enumerate(i));
for (j = 0; j < cvmx_helper_interface_enumerate(i); j++)
{
cvmx_dprintf("\tpknd[%i][%d]%d", i, j,
__cvmx_helper_cfg_pknd(i, j));
cvmx_dprintf(" pko_port_base[%i][%d]%d", i, j,
__cvmx_helper_cfg_pko_port_base(i, j));
cvmx_dprintf(" pko_port_num[%i][%d]%d\n", i, j,
__cvmx_helper_cfg_pko_port_num(i, j));
}
}
for (i = 0; i < CVMX_HELPER_CFG_MAX_PKO_PORT; i++)
{
if (__cvmx_helper_cfg_pko_queue_base(i) !=
CVMX_HELPER_CFG_INVALID_VALUE)
{
cvmx_dprintf("cvmx_helper_cfg_show_cfg: pko_port%d qbase%d nqueues%d "
"interface%d index%d\n", i,
__cvmx_helper_cfg_pko_queue_base(i),
__cvmx_helper_cfg_pko_queue_num(i),
__cvmx_helper_cfg_pko_port_interface(i),
__cvmx_helper_cfg_pko_port_index(i));
}
}
}
#endif
/*
* initialize cvmx_cfg_pko_port_map
*/
static void cvmx_helper_cfg_init_pko_port_map(void)
{
int i, j, k;
int pko_eid;
int pko_port_base, pko_port_max;
cvmx_helper_interface_mode_t mode;
/*
* one pko_eid is allocated to each port except for ILK, NPI, and
* LOOP. Each of the three has one eid.
*/
pko_eid = 0;
for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++)
{
mode = cvmx_helper_interface_get_mode(i);
for (j = 0; j < cvmx_helper_interface_enumerate(i); j++)
{
pko_port_base = cvmx_cfg_port[i][j].ccpp_pko_port_base;
pko_port_max = pko_port_base +
cvmx_cfg_port[i][j].ccpp_pko_num_ports;
cvmx_helper_cfg_assert(pko_port_base !=
CVMX_HELPER_CFG_INVALID_VALUE);
cvmx_helper_cfg_assert(pko_port_max >= pko_port_base);
for (k = pko_port_base; k < pko_port_max; k++)
{
cvmx_cfg_pko_port_map[k].ccppl_interface = i;
cvmx_cfg_pko_port_map[k].ccppl_index = j;
cvmx_cfg_pko_port_map[k].ccppl_eid = pko_eid;
}
#if 0
/*
* For a physical port that is not configured a PKO port,
* pko_port_base here equals to pko_port_max. In this
* case, the physical port does not take a DMA engine.
*/
if (pko_port_base > pko_port_max)
#endif
if (!(mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
mode == CVMX_HELPER_INTERFACE_MODE_LOOP ||
mode == CVMX_HELPER_INTERFACE_MODE_ILK))
pko_eid ++;
}
if (mode == CVMX_HELPER_INTERFACE_MODE_NPI ||
mode == CVMX_HELPER_INTERFACE_MODE_LOOP ||
mode == CVMX_HELPER_INTERFACE_MODE_ILK)
pko_eid ++;
}
/*
* Legal pko_eids [0, 0x13] should not be exhausted.
*/
cvmx_helper_cfg_assert(pko_eid <= 0x14);
cvmx_cfg_max_pko_engines = pko_eid;
}
#endif
int __cvmx_helper_cfg_pko_port_interface(int pko_port)
{
return cvmx_cfg_pko_port_map[pko_port].ccppl_interface;
}
int __cvmx_helper_cfg_pko_port_index(int pko_port)
{
return cvmx_cfg_pko_port_map[pko_port].ccppl_index;
}
int __cvmx_helper_cfg_pko_port_eid(int pko_port)
{
return cvmx_cfg_pko_port_map[pko_port].ccppl_eid;
}
/**
* Perform common init tasks for all chips.
* @return 1 for the caller to continue init and 0 otherwise.
*
* Note: ``common'' means this function is executed regardless of
* - chip, and
* - CVMX_ENABLE_HELPER_FUNCTIONS.
*
* This function decides based on these conditions if the
* configuration stage of the init process should continue.
*
* This is only meant to be called by __cvmx_helper_cfg_init().
*/
static int __cvmx_helper_cfg_init_common(void)
{
int val;
#ifndef CVMX_ENABLE_HELPER_FUNCTIONS
val = 0;
#else
val = (octeon_has_feature(OCTEON_FEATURE_PKND));
#endif
return val;
}
#define IPD2PKO_CACHE_Y(ipd_port) (ipd_port) >> 8
#define IPD2PKO_CACHE_X(ipd_port) (ipd_port) & 0xff
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
/*
* ipd_port to pko_port translation cache
*/
static int __cvmx_helper_cfg_init_ipd2pko_cache(void)
{
int i, j, n;
int ipd_y, ipd_x, ipd_port;
for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++)
{
n = cvmx_helper_interface_enumerate(i);
for (j = 0; j < n; j++)
{
ipd_port = cvmx_helper_get_ipd_port(i, j);
ipd_y = IPD2PKO_CACHE_Y(ipd_port);
ipd_x = IPD2PKO_CACHE_X(ipd_port);
ipd2pko_port_cache[ipd_y]
[(ipd_port & 0x800) ? ((ipd_x >> 4) & 3) : ipd_x] =
(struct cvmx_cfg_pko_port_pair)
{__cvmx_helper_cfg_pko_port_base(i, j),
__cvmx_helper_cfg_pko_port_num(i, j)};
}
}
return 0;
}
int cvmx_helper_cfg_ipd2pko_port_base(int ipd_port)
{
int ipd_y, ipd_x;
ipd_y = IPD2PKO_CACHE_Y(ipd_port);
ipd_x = IPD2PKO_CACHE_X(ipd_port);
return ipd2pko_port_cache[ipd_y]
[(ipd_port & 0x800) ? ((ipd_x >> 4) & 3) : ipd_x].ccppp_base_port;
}
int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port)
{
int ipd_y, ipd_x;
ipd_y = IPD2PKO_CACHE_Y(ipd_port);
ipd_x = IPD2PKO_CACHE_X(ipd_port);
return ipd2pko_port_cache[ipd_y]
[(ipd_port & 0x800) ? ((ipd_x >> 4) & 3) : ipd_x].ccppp_nports;
}
#endif
#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
#ifdef CVMX_USER_DEFINED_HELPER_CONFIG_INIT
/**
* Return the number of queues assigned to this pko_port by user
*
* @param pko_port
* @return the number of queues for this pko_port
*
* Note: Called after the pko_port map is set up.
*/
static int __cvmx_ucfg_nqueues(int pko_port)
{
int interface, index;
int i, k;
interface = __cvmx_helper_cfg_pko_port_interface(pko_port);
index = __cvmx_helper_cfg_pko_port_index(pko_port);
/*
* pko_port belongs to no physical port,
* don't assign a queue to it.
*/
if (interface == CVMX_HELPER_CFG_INVALID_VALUE ||
index == CVMX_HELPER_CFG_INVALID_VALUE)
return 0;
/*
* Assign the default number of queues to those pko_ports not
* assigned explicitly.
*/
i = cvmx_cfg_port[interface][index].ccpp_pko_nqueues;
if (i == (uint8_t)CVMX_HELPER_CFG_INVALID_VALUE)
return cvmx_cfg_default_pko_nqueues;
/*
* The user has assigned nqueues to this pko_port,
* recorded in the pool.
*/
k = pko_port - cvmx_cfg_port[interface][index].ccpp_pko_port_base;
cvmx_helper_cfg_assert(k <
cvmx_cfg_port[interface][index].ccpp_pko_num_ports);
return cvmx_cfg_pko_nqueue_pool[i + k];
}
#else
/**
* Return the number of queues to be assigned to this pko_port
*
* @param pko_port
* @return the number of queues for this pko_port
*
* Note: This function exists for backward compatibility.
* CVMX_PKO_QUEUES_PER_PORT_XXXX defines no of queues per HW port.
* pko_port is equivalent in pre-o68 SDK.
*/
static int cvmx_helper_cfg_dft_nqueues(int pko_port)
{
cvmx_helper_interface_mode_t mode;
int interface;
int n;
#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE0
#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE0 1
#endif
#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE1
#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE1 1
#endif
#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE2
#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE2 1
#endif
#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE3
#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE3 1
#endif
#ifndef CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE4
#define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE4 1
#endif
n = 1;
interface = __cvmx_helper_cfg_pko_port_interface(pko_port);
if (interface == 0)
{
#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE0
n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
#endif
}
if (interface == 1)
{
#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE1
n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
#endif
}
if (interface == 2)
{
#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE2
n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE2;
#endif
}
if (interface == 3)
{
#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE3
n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE3;
#endif
}
if (interface == 4)
{
#ifdef CVMX_PKO_QUEUES_PER_PORT_INTERFACE4
n = CVMX_PKO_QUEUES_PER_PORT_INTERFACE4;
#endif
}
mode = cvmx_helper_interface_get_mode(interface);
if (mode == CVMX_HELPER_INTERFACE_MODE_LOOP)
{
#ifdef CVMX_PKO_QUEUES_PER_PORT_LOOP
n = CVMX_PKO_QUEUES_PER_PORT_LOOP;
#endif
}
if (mode == CVMX_HELPER_INTERFACE_MODE_NPI)
{
#ifdef CVMX_PKO_QUEUES_PER_PORT_PCI
n = CVMX_PKO_QUEUES_PER_PORT_PCI;
#endif
}
return n;
}
#endif /* CVMX_USER_DEFINED_HELPER_CONFIG_INIT */
#endif /* CVMX_ENABLE_HELPER_FUNCTIONS */
int __cvmx_helper_cfg_init(void)
{
#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
struct cvmx_cfg_port_param *pport;
int cvmx_cfg_default_pko_nports;
int pknd, bpid, pko_port_base;
int qbase;
int i, j, n;
cvmx_cfg_default_pko_nports = 1;
#endif
if (!__cvmx_helper_cfg_init_common())
return 0;
#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
#ifdef CVMX_USER_DEFINED_HELPER_CONFIG_INIT
{
int cvmx_ucfg_nq;
cvmx_ucfg_nq = 0;
#include "cvmx-helper-cfg-init.c"
}
#endif
/*
* per-port parameters
*/
pknd = 0;
bpid = 0;
pko_port_base = 0;
for (i = 0; i < cvmx_helper_get_number_of_interfaces(); i++)
{
n = cvmx_helper_interface_enumerate(i);
pport = cvmx_cfg_port[i];
for (j = 0; j < n; j++, pport++)
{
int t;
t = cvmx_cfg_default_pko_nports;
if (pport->ccpp_pko_num_ports != CVMX_HELPER_CFG_INVALID_VALUE)
t = pport->ccpp_pko_num_ports;
*pport = (struct cvmx_cfg_port_param) {
pknd++,
bpid++,
pko_port_base,
t,
pport->ccpp_pko_nqueues};
pko_port_base += t;
}
}
cvmx_helper_cfg_assert(pknd <= CVMX_HELPER_CFG_MAX_PIP_PKND);
cvmx_helper_cfg_assert(bpid <= CVMX_HELPER_CFG_MAX_PIP_BPID);
cvmx_helper_cfg_assert(pko_port_base <= CVMX_HELPER_CFG_MAX_PKO_PORT);
/*
* pko_port map
*/
cvmx_helper_cfg_init_pko_port_map();
/*
* per-pko_port parameters
*/
qbase = 0;
for (i = 0; i < pko_port_base; i++)
{
#ifdef CVMX_USER_DEFINED_HELPER_CONFIG_INIT
n = __cvmx_ucfg_nqueues(i);
#else
n = cvmx_helper_cfg_dft_nqueues(i);
#endif
cvmx_cfg_pko_port[i] = (struct cvmx_cfg_pko_port_param) {qbase, n};
qbase += n;
cvmx_helper_cfg_assert(qbase <= CVMX_HELPER_CFG_MAX_PKO_QUEUES);
}
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
__cvmx_helper_cfg_init_ipd2pko_cache();
#endif
#ifdef CVMX_HELPER_CFG_DEBUG
cvmx_helper_cfg_show_cfg();
#endif /* CVMX_HELPER_CFG_DEBUG */
#endif
return 0;
}

View File

@ -1,282 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper Functions for the Configuration Framework
*
* OCTEON_CN68XX introduces a flexible hw interface configuration
* scheme. To cope with this change and the requirements of
* configurability for other system resources, e.g., IPD/PIP pknd and
* PKO ports and queues, a configuration framework for the SDK is
* designed. It has two goals: first to recognize and establish the
* default configuration and, second, to allow the user to define key
* parameters in a high-level language.
*
* The helper functions query the QLM setup to help achieving the
* first goal.
*
* The second goal is accomplished by generating
* cvmx_helper_cfg_init() from a high-level lanaguage.
*
* <hr>$Revision: 0 $<hr>
*/
#ifndef __CVMX_HELPER_CFG_H__
#define __CVMX_HELPER_CFG_H__
#define CVMX_HELPER_CFG_MAX_IFACE 9
#define CVMX_HELPER_CFG_MAX_PKO_PORT 128
#define CVMX_HELPER_CFG_MAX_PIP_BPID 64
#define CVMX_HELPER_CFG_MAX_PIP_PKND 64
#define CVMX_HELPER_CFG_MAX_PKO_QUEUES 256
#define CVMX_HELPER_CFG_MAX_PORT_PER_IFACE 256
#define CVMX_HELPER_CFG_INVALID_VALUE -1 /* The default return
* value upon failure
*/
#ifdef __cplusplus
extern "C" {
#endif
#define cvmx_helper_cfg_assert(cond) \
do { \
if (!(cond)) \
{ \
cvmx_dprintf("cvmx_helper_cfg_assert (%s) at %s:%d\n", \
#cond, __FILE__, __LINE__); \
} \
} while (0)
/*
* Config Options
*
* These options have to be set via cvmx_helper_cfg_opt_set() before calling the
* routines that set up the hw. These routines process the options and set them
* correctly to take effect at runtime.
*/
enum cvmx_helper_cfg_option {
CVMX_HELPER_CFG_OPT_USE_DWB, /*
* Global option to control if
* the SDK configures units (DMA,
* SSO, and PKO) to send don't
* write back (DWB) requests for
* freed buffers. Set to 1/0 to
* enable/disable DWB.
*
* For programs that fit inside
* L2, sending DWB just causes
* more L2 operations without
* benefit.
*/
CVMX_HELPER_CFG_OPT_MAX
};
typedef enum cvmx_helper_cfg_option cvmx_helper_cfg_option_t;
/*
* @INTERNAL
* Return configured pknd for the port
*
* @param interface the interface number
* @param index the port's index number
* @return the pknd
*/
extern int __cvmx_helper_cfg_pknd(int interface, int index);
/*
* @INTERNAL
* Return the configured bpid for the port
*
* @param interface the interface number
* @param index the port's index number
* @return the bpid
*/
extern int __cvmx_helper_cfg_bpid(int interface, int index);
/*
* @INTERNAL
* Return the configured pko_port base for the port
*
* @param interface the interface number
* @param index the port's index number
* @return the pko_port base
*/
extern int __cvmx_helper_cfg_pko_port_base(int interface, int index);
/*
* @INTERNAL
* Return the configured number of pko_ports for the port
*
* @param interface the interface number
* @param index the port's index number
* @return the number of pko_ports
*/
extern int __cvmx_helper_cfg_pko_port_num(int interface, int index);
/*
* @INTERNAL
* Return the configured pko_queue base for the pko_port
*
* @param pko_port
* @return the pko_queue base
*/
extern int __cvmx_helper_cfg_pko_queue_base(int pko_port);
/*
* @INTERNAL
* Return the configured number of pko_queues for the pko_port
*
* @param pko_port
* @return the number of pko_queues
*/
extern int __cvmx_helper_cfg_pko_queue_num(int pko_port);
/*
* @INTERNAL
* Return the interface the pko_port is configured for
*
* @param pko_port
* @return the interface for the pko_port
*/
extern int __cvmx_helper_cfg_pko_port_interface(int pko_port);
/*
* @INTERNAL
* Return the index of the port the pko_port is configured for
*
* @param pko_port
* @return the index of the port
*/
extern int __cvmx_helper_cfg_pko_port_index(int pko_port);
/*
* @INTERNAL
* Return the pko_eid of the pko_port
*
* @param pko_port
* @return the pko_eid
*/
extern int __cvmx_helper_cfg_pko_port_eid(int pko_port);
/*
* @INTERNAL
* Return the max# of pko queues allocated.
*
* @return the max# of pko queues
*
* Note: there might be holes in the queue space depending on user
* configuration. The function returns the highest queue's index in
* use.
*/
extern int __cvmx_helper_cfg_pko_max_queue(void);
/*
* @INTERNAL
* Return the max# of PKO DMA engines allocated.
*
* @return the max# of DMA engines
*
* NOTE: the DMA engines are allocated contiguously and starting from
* 0.
*/
extern int __cvmx_helper_cfg_pko_max_engine(void);
/*
* Get the value set for the config option ``opt''.
*
* @param opt is the config option.
* @return the value set for the option
*/
extern uint64_t cvmx_helper_cfg_opt_get(cvmx_helper_cfg_option_t opt);
/*
* Set the value for a config option.
*
* @param opt is the config option.
* @param val is the value to set for the opt.
* @return 0 for success and -1 on error
*
* Note an option here is a config-time parameter and this means that
* it has to be set before calling the corresponding setup functions
* that actually sets the option in hw.
*/
extern int cvmx_helper_cfg_opt_set(cvmx_helper_cfg_option_t opt, uint64_t val);
/*
* Retrieve the pko_port base given ipd_port.
*
* @param ipd_port is the IPD eport
* @return the corresponding PKO port base for the physical port
* represented by the IPD eport or CVMX_HELPER_CFG_INVALID_VALUE.
*/
extern int cvmx_helper_cfg_ipd2pko_port_base(int ipd_port);
/*
* Retrieve the number of pko_ports given ipd_port.
*
* @param ipd_port is the IPD eport
* @return the corresponding number of PKO ports for the physical port
* represented by IPD eport or CVMX_HELPER_CFG_INVALID_VALUE.
*/
extern int cvmx_helper_cfg_ipd2pko_port_num(int ipd_port);
/*
* @INTERNAL
* The init function
*
* @param none
* @return 0 for success.
*
* Note: this function is meant to be called to set the ``configured
* parameters,'' e.g., pknd, bpid, etc. and therefore should be before
* any of the corresponding cvmx_helper_cfg_xxxx() functions are
* called.
*/
extern int __cvmx_helper_cfg_init(void);
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_HELPER_CFG_H__ */

View File

@ -1,97 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Validate defines required by cvmx-helper. This header file
* validates a number of defines required for cvmx-helper to
* function properly. It either supplies a default or fails
* compile if a define is incorrect.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_CHECK_DEFINES_H__
#define __CVMX_HELPER_CHECK_DEFINES_H__
/* CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve before
the beginning of the packet. Override in executive-config.h */
#ifndef CVMX_HELPER_FIRST_MBUFF_SKIP
#define CVMX_HELPER_FIRST_MBUFF_SKIP 184
#warning WARNING: default CVMX_HELPER_FIRST_MBUFF_SKIP used. Defaults deprecated, please set in executive-config.h
#endif
/* CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve in each
chained packet element. Override in executive-config.h */
#ifndef CVMX_HELPER_NOT_FIRST_MBUFF_SKIP
#define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0
#warning WARNING: default CVMX_HELPER_NOT_FIRST_MBUFF_SKIP used. Defaults deprecated, please set in executive-config.h
#endif
/* CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper
function. Once it is enabled the hardware starts accepting packets. You
might want to skip the IPD enable if configuration changes are need
from the default helper setup. Override in executive-config.h */
#ifndef CVMX_HELPER_ENABLE_IPD
#define CVMX_HELPER_ENABLE_IPD 1
#warning WARNING: default CVMX_HELPER_ENABLE_IPD used. Defaults deprecated, please set in executive-config.h
#endif
/* Set default (defaults are deprecated) input tag type */
#ifndef CVMX_HELPER_INPUT_TAG_TYPE
#define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED
#warning WARNING: default CVMX_HELPER_INPUT_TAG_TYPE used. Defaults deprecated, please set in executive-config.h
#endif
#ifndef CVMX_HELPER_INPUT_PORT_SKIP_MODE
#define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2
#warning WARNING: default CVMX_HELPER_INPUT_PORT_SKIP_MODE used. Defaults deprecated, please set in executive-config.h
#endif
#if defined(CVMX_ENABLE_HELPER_FUNCTIONS) && !defined(CVMX_HELPER_INPUT_TAG_INPUT_PORT)
#error CVMX_HELPER_INPUT_TAG_* values for determining tag hash inputs must be defined in executive-config.h
#endif
#endif

View File

@ -1,329 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Fixes and workaround for Octeon chip errata. This file
* contains functions called by cvmx-helper to workaround known
* chip errata. For the most part, code doesn't need to call
* these functions directly.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-jtag.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#endif
#include "cvmx.h"
#include "cvmx-fpa.h"
#include "cvmx-pip.h"
#include "cvmx-pko.h"
#include "cvmx-ipd.h"
#include "cvmx-gmx.h"
#include "cvmx-spi.h"
#include "cvmx-pow.h"
#include "cvmx-sysinfo.h"
#include "cvmx-helper.h"
#include "cvmx-helper-jtag.h"
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
/**
* @INTERNAL
* Function to adjust internal IPD pointer alignments
*
* @return 0 on success
* !0 on failure
*/
int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
{
#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
#define FIX_IPD_OUTPORT 0
#define INTERFACE(port) (port >> 4) /* Ports 0-15 are interface 0, 16-31 are interface 1 */
#define INDEX(port) (port & 0xf)
uint64_t *p64;
cvmx_pko_command_word0_t pko_command;
cvmx_buf_ptr_t g_buffer, pkt_buffer;
cvmx_wqe_t *work;
int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
cvmx_gmxx_prtx_cfg_t gmx_cfg;
int retry_cnt;
int retry_loop_cnt;
int i;
cvmx_helper_link_info_t link_info;
/* Save values for restore at end */
uint64_t prtx_cfg = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
uint64_t tx_ptr_en = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
uint64_t rx_ptr_en = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
uint64_t rxx_jabber = cvmx_read_csr(CVMX_GMXX_RXX_JABBER(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
uint64_t frame_max = cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
/* Configure port to gig FDX as required for loopback mode */
cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
/* Disable reception on all ports so if traffic is present it will not interfere. */
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
cvmx_wait(100000000ull);
for (retry_loop_cnt = 0;retry_loop_cnt < 10;retry_loop_cnt++)
{
retry_cnt = 100000;
wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
wqe_pcnt &= 0x7f;
num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
if (num_segs == 0)
goto fix_ipd_exit;
num_segs += 1;
size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES + ((num_segs-1)*FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
(FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 1 << INDEX(FIX_IPD_OUTPORT));
CVMX_SYNC;
g_buffer.u64 = 0;
g_buffer.s.addr = cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
if (g_buffer.s.addr == 0) {
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT buffer allocation failure.\n");
goto fix_ipd_exit;
}
g_buffer.s.pool = CVMX_FPA_WQE_POOL;
g_buffer.s.size = num_segs;
pkt_buffer.u64 = 0;
pkt_buffer.s.addr = cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
if (pkt_buffer.s.addr == 0) {
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT buffer allocation failure.\n");
goto fix_ipd_exit;
}
pkt_buffer.s.i = 1;
pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
p64 = (uint64_t*) cvmx_phys_to_ptr(pkt_buffer.s.addr);
p64[0] = 0xffffffffffff0000ull;
p64[1] = 0x08004510ull;
p64[2] = ((uint64_t)(size-14) << 48) | 0x5ae740004000ull;
p64[3] = 0x3a5fc0a81073c0a8ull;
for (i=0;i<num_segs;i++)
{
if (i>0)
pkt_buffer.s.size = FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
if (i==(num_segs-1))
pkt_buffer.s.i = 0;
*(uint64_t*)cvmx_phys_to_ptr(g_buffer.s.addr + 8*i) = pkt_buffer.u64;
}
/* Build the PKO command */
pko_command.u64 = 0;
pko_command.s.segs = num_segs;
pko_command.s.total_bytes = size;
pko_command.s.dontfree = 0;
pko_command.s.gather = 1;
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 1 << INDEX(FIX_IPD_OUTPORT));
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 1 << INDEX(FIX_IPD_OUTPORT));
cvmx_write_csr(CVMX_GMXX_RXX_JABBER(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), 65392-14-4);
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), 65392-14-4);
cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT, cvmx_pko_get_base_queue(FIX_IPD_OUTPORT), CVMX_PKO_LOCK_CMD_QUEUE);
cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT, cvmx_pko_get_base_queue(FIX_IPD_OUTPORT), pko_command, g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
CVMX_SYNC;
do {
work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
retry_cnt--;
} while ((work == NULL) && (retry_cnt > 0));
if (!retry_cnt)
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT get_work() timeout occurred.\n");
/* Free packet */
if (work)
cvmx_helper_free_packet_data(work);
}
fix_ipd_exit:
/* Return CSR configs to saved values */
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), prtx_cfg);
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), tx_ptr_en);
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), rx_ptr_en);
cvmx_write_csr(CVMX_GMXX_RXX_JABBER(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), rxx_jabber);
cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)), frame_max);
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
link_info.u64 = 0; /* Set link to down so autonegotiation will set it up again */
cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info);
/* Bring the link back up as autonegotiation is not done in user applications. */
cvmx_helper_link_autoconf(FIX_IPD_OUTPORT);
CVMX_SYNC;
if (num_segs)
cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
return(!!num_segs);
}
/**
* This function needs to be called on all Octeon chips with
* errata PKI-100.
*
* The Size field is 8 too large in WQE and next pointers
*
* The Size field generated by IPD is 8 larger than it should
* be. The Size field is <55:40> of both:
* - WORD3 in the work queue entry, and
* - the next buffer pointer (which precedes the packet data
* in each buffer).
*
* @param work Work queue entry to fix
* @return Zero on success. Negative on failure
*/
int cvmx_helper_fix_ipd_packet_chain(cvmx_wqe_t *work)
{
uint64_t number_buffers = work->word2.s.bufs;
/* We only need to do this if the work has buffers */
if (number_buffers)
{
cvmx_buf_ptr_t buffer_ptr = work->packet_ptr;
/* Check for errata PKI-100 */
if ( (buffer_ptr.s.pool == 0) && (((uint64_t)buffer_ptr.s.size +
((uint64_t)buffer_ptr.s.back << 7) + ((uint64_t)buffer_ptr.s.addr & 0x7F))
!= (CVMX_FPA_PACKET_POOL_SIZE+8))) {
/* fix is not needed */
return 0;
}
/* Decrement the work packet pointer */
buffer_ptr.s.size -= 8;
work->packet_ptr = buffer_ptr;
/* Now loop through decrementing the size for each additional buffer */
while (--number_buffers)
{
/* Chain pointers are 8 bytes before the data */
cvmx_buf_ptr_t *ptr = (cvmx_buf_ptr_t*)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
buffer_ptr = *ptr;
buffer_ptr.s.size -= 8;
*ptr = buffer_ptr;
}
}
/* Make sure that these write go out before other operations such as FPA frees */
CVMX_SYNCWS;
return 0;
}
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
/**
* Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
* 1 doesn't work properly. The following code disables 2nd order
* CDR for the specified QLM.
*
* @param qlm QLM to disable 2nd order CDR for.
*/
void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm)
{
int lane;
/* Apply the workaround only once. */
cvmx_ciu_qlm_jtgd_t qlm_jtgd;
qlm_jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
if (qlm_jtgd.s.select != 0)
return;
cvmx_helper_qlm_jtag_init();
/* We need to load all four lanes of the QLM, a total of 1072 bits */
for (lane=0; lane<4; lane++)
{
/* Each lane has 268 bits. We need to set cfg_cdr_incx<67:64>=3 and
cfg_cdr_secord<77>=1. All other bits are zero. Bits go in LSB
first, so start off with the zeros for bits <63:0> */
cvmx_helper_qlm_jtag_shift_zeros(qlm, 63 - 0 + 1);
/* cfg_cdr_incx<67:64>=3 */
cvmx_helper_qlm_jtag_shift(qlm, 67 - 64 + 1, 3);
/* Zeros for bits <76:68> */
cvmx_helper_qlm_jtag_shift_zeros(qlm, 76 - 68 + 1);
/* cfg_cdr_secord<77>=1 */
cvmx_helper_qlm_jtag_shift(qlm, 77 - 77 + 1, 1);
/* Zeros for bits <267:78> */
cvmx_helper_qlm_jtag_shift_zeros(qlm, 267 - 78 + 1);
}
cvmx_helper_qlm_jtag_update(qlm);
}

View File

@ -1,93 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Fixes and workaround for Octeon chip errata. This file
* contains functions called by cvmx-helper to workaround known
* chip errata. For the most part, code doesn't need to call
* these functions directly.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_ERRATA_H__
#define __CVMX_HELPER_ERRATA_H__
/**
* @INTERNAL
* Function to adjust internal IPD pointer alignments
*
* @return 0 on success
* !0 on failure
*/
extern int __cvmx_helper_errata_fix_ipd_ptr_alignment(void);
/**
* This function needs to be called on all Octeon chips with
* errata PKI-100.
*
* The Size field is 8 too large in WQE and next pointers
*
* The Size field generated by IPD is 8 larger than it should
* be. The Size field is <55:40> of both:
* - WORD3 in the work queue entry, and
* - the next buffer pointer (which precedes the packet data
* in each buffer).
*
* @param work Work queue entry to fix
* @return Zero on success. Negative on failure
*/
extern int cvmx_helper_fix_ipd_packet_chain(cvmx_wqe_t *work);
/**
* Due to errata G-720, the 2nd order CDR circuit on CN52XX pass
* 1 doesn't work properly. The following code disables 2nd order
* CDR for the specified QLM.
*
* @param qlm QLM to disable 2nd order CDR for.
*/
extern void __cvmx_helper_errata_qlm_disable_2nd_order_cdr(int qlm);
#endif

View File

@ -1,246 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper functions for FPA setup.
*
* <hr>$Revision: 70030 $<hr>
*/
#include "cvmx.h"
#include "cvmx-bootmem.h"
#include "cvmx-fpa.h"
#include "cvmx-helper-fpa.h"
/**
* @INTERNAL
* Allocate memory for and initialize a single FPA pool.
*
* @param pool Pool to initialize
* @param buffer_size Size of buffers to allocate in bytes
* @param buffers Number of buffers to put in the pool. Zero is allowed
* @param name String name of the pool for debugging purposes
* @return Zero on success, non-zero on failure
*/
static int __cvmx_helper_initialize_fpa_pool(int pool, uint64_t buffer_size,
uint64_t buffers, const char *name)
{
uint64_t current_num;
void *memory;
uint64_t align = CVMX_CACHE_LINE_SIZE;
/* Align the allocation so that power of 2 size buffers are naturally aligned */
while (align < buffer_size)
align = align << 1;
if (buffers == 0)
return 0;
current_num = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool));
if (current_num)
{
cvmx_dprintf("Fpa pool %d(%s) already has %llu buffers. Skipping setup.\n",
pool, name, (unsigned long long)current_num);
return 0;
}
memory = cvmx_bootmem_alloc(buffer_size * buffers, align);
if (memory == NULL)
{
cvmx_dprintf("Out of memory initializing fpa pool %d(%s).\n", pool, name);
return -1;
}
cvmx_fpa_setup_pool(pool, name, memory, buffer_size, buffers);
return 0;
}
/**
* @INTERNAL
* Allocate memory and initialize the FPA pools using memory
* from cvmx-bootmem. Specifying zero for the number of
* buffers will cause that FPA pool to not be setup. This is
* useful if you aren't using some of the hardware and want
* to save memory. Use cvmx_helper_initialize_fpa instead of
* this function directly.
*
* @param pip_pool Should always be CVMX_FPA_PACKET_POOL
* @param pip_size Should always be CVMX_FPA_PACKET_POOL_SIZE
* @param pip_buffers
* Number of packet buffers.
* @param wqe_pool Should always be CVMX_FPA_WQE_POOL
* @param wqe_size Should always be CVMX_FPA_WQE_POOL_SIZE
* @param wqe_entries
* Number of work queue entries
* @param pko_pool Should always be CVMX_FPA_OUTPUT_BUFFER_POOL
* @param pko_size Should always be CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
* @param pko_buffers
* PKO Command buffers. You should at minimum have two per
* each PKO queue.
* @param tim_pool Should always be CVMX_FPA_TIMER_POOL
* @param tim_size Should always be CVMX_FPA_TIMER_POOL_SIZE
* @param tim_buffers
* TIM ring buffer command queues. At least two per timer bucket
* is recommended.
* @param dfa_pool Should always be CVMX_FPA_DFA_POOL
* @param dfa_size Should always be CVMX_FPA_DFA_POOL_SIZE
* @param dfa_buffers
* DFA command buffer. A relatively small (32 for example)
* number should work.
* @return Zero on success, non-zero if out of memory
*/
static int __cvmx_helper_initialize_fpa(int pip_pool, int pip_size, int pip_buffers,
int wqe_pool, int wqe_size, int wqe_entries,
int pko_pool, int pko_size, int pko_buffers,
int tim_pool, int tim_size, int tim_buffers,
int dfa_pool, int dfa_size, int dfa_buffers)
{
int status;
cvmx_fpa_enable();
if ((pip_buffers > 0) && (pip_buffers <= 64))
cvmx_dprintf("Warning: %d packet buffers may not be enough for hardware"
" prefetch. 65 or more is recommended.\n", pip_buffers);
if (pip_pool >= 0)
{
status = __cvmx_helper_initialize_fpa_pool(pip_pool, pip_size, pip_buffers,
"Packet Buffers");
if (status)
return status;
}
if (wqe_pool >= 0)
{
status = __cvmx_helper_initialize_fpa_pool(wqe_pool, wqe_size, wqe_entries,
"Work Queue Entries");
if (status)
return status;
}
if (pko_pool >= 0)
{
status = __cvmx_helper_initialize_fpa_pool(pko_pool, pko_size, pko_buffers,
"PKO Command Buffers");
if (status)
return status;
}
if (tim_pool >= 0)
{
status = __cvmx_helper_initialize_fpa_pool(tim_pool, tim_size, tim_buffers,
"TIM Command Buffers");
if (status)
return status;
}
if (dfa_pool >= 0)
{
status = __cvmx_helper_initialize_fpa_pool(dfa_pool, dfa_size, dfa_buffers,
"DFA Command Buffers");
if (status)
return status;
}
return 0;
}
/**
* Allocate memory and initialize the FPA pools using memory
* from cvmx-bootmem. Sizes of each element in the pools is
* controlled by the cvmx-config.h header file. Specifying
* zero for any parameter will cause that FPA pool to not be
* setup. This is useful if you aren't using some of the
* hardware and want to save memory.
*
* @param packet_buffers
* Number of packet buffers to allocate
* @param work_queue_entries
* Number of work queue entries
* @param pko_buffers
* PKO Command buffers. You should at minimum have two per
* each PKO queue.
* @param tim_buffers
* TIM ring buffer command queues. At least two per timer bucket
* is recommended.
* @param dfa_buffers
* DFA command buffer. A relatively small (32 for example)
* number should work.
* @return Zero on success, non-zero if out of memory
*/
int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
int pko_buffers, int tim_buffers, int dfa_buffers)
{
#ifndef CVMX_FPA_PACKET_POOL
#define CVMX_FPA_PACKET_POOL -1
#define CVMX_FPA_PACKET_POOL_SIZE 0
#endif
#ifndef CVMX_FPA_WQE_POOL
#define CVMX_FPA_WQE_POOL -1
#define CVMX_FPA_WQE_POOL_SIZE 0
#endif
#ifndef CVMX_FPA_OUTPUT_BUFFER_POOL
#define CVMX_FPA_OUTPUT_BUFFER_POOL -1
#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 0
#endif
#ifndef CVMX_FPA_TIMER_POOL
#define CVMX_FPA_TIMER_POOL -1
#define CVMX_FPA_TIMER_POOL_SIZE 0
#endif
#ifndef CVMX_FPA_DFA_POOL
#define CVMX_FPA_DFA_POOL -1
#define CVMX_FPA_DFA_POOL_SIZE 0
#endif
return __cvmx_helper_initialize_fpa(
CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, packet_buffers,
CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, work_queue_entries,
CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, pko_buffers,
CVMX_FPA_TIMER_POOL, CVMX_FPA_TIMER_POOL_SIZE, tim_buffers,
CVMX_FPA_DFA_POOL, CVMX_FPA_DFA_POOL_SIZE, dfa_buffers);
}

View File

@ -1,83 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper functions for FPA setup.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_H_FPA__
#define __CVMX_HELPER_H_FPA__
/**
* Allocate memory and initialize the FPA pools using memory
* from cvmx-bootmem. Sizes of each element in the pools is
* controlled by the cvmx-config.h header file. Specifying
* zero for any parameter will cause that FPA pool to not be
* setup. This is useful if you aren't using some of the
* hardware and want to save memory.
*
* @param packet_buffers
* Number of packet buffers to allocate
* @param work_queue_entries
* Number of work queue entries
* @param pko_buffers
* PKO Command buffers. You should at minimum have two per
* each PKO queue.
* @param tim_buffers
* TIM ring buffer command queues. At least two per timer bucket
* is recommended.
* @param dfa_buffers
* DFA command buffer. A relatively small (32 for example)
* number should work.
* @return Zero on success, non-zero if out of memory
*/
extern int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
int pko_buffers, int tim_buffers,
int dfa_buffers);
#endif /* __CVMX_HELPER_H__ */

View File

@ -1,442 +0,0 @@
/***********************license start***************
* Copyright (c) 2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for ILK initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 41586 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <linux/module.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-cfg.h>
#include <asm/octeon/cvmx-ilk.h>
#include <asm/octeon/cvmx-bootmem.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-qlm.h>
#include <asm/octeon/cvmx-ilk-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#endif
#include "cvmx.h"
#include "cvmx-helper.h"
#include "cvmx-helper-cfg.h"
#include "cvmx-ilk.h"
#include "cvmx-bootmem.h"
#include "cvmx-pko.h"
#include "cvmx-qlm.h"
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
int __cvmx_helper_ilk_enumerate(int interface)
{
interface -= CVMX_ILK_GBL_BASE;
return cvmx_ilk_chans[interface];
}
/**
* @INTERNAL
* Probe a ILK interface and determine the number of ports
* connected to it. The ILK interface should still be down
* after this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_ilk_probe(int interface)
{
int i, j, res = -1;
static int pipe_base = 0, pknd_base = 0;
static cvmx_ilk_pipe_chan_t *pch = NULL, *tmp;
static cvmx_ilk_chan_pknd_t *chpknd = NULL, *tmp1;
static cvmx_ilk_cal_entry_t *calent = NULL, *tmp2;
if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
return 0;
interface -= CVMX_ILK_GBL_BASE;
if (interface >= CVMX_NUM_ILK_INTF)
return 0;
/* the configuration should be done only once */
if (cvmx_ilk_get_intf_ena (interface))
return cvmx_ilk_chans[interface];
/* configure lanes and enable the link */
res = cvmx_ilk_start_interface (interface, cvmx_ilk_lane_mask[interface]);
if (res < 0)
return 0;
/* set up the group of pipes available to ilk */
if (pipe_base == 0)
pipe_base = __cvmx_pko_get_pipe (interface + CVMX_ILK_GBL_BASE, 0);
if (pipe_base == -1)
{
pipe_base = 0;
return 0;
}
res = cvmx_ilk_set_pipe (interface, pipe_base, cvmx_ilk_chans[interface]);
if (res < 0)
return 0;
/* set up pipe to channel mapping */
i = pipe_base;
if (pch == NULL)
{
pch = (cvmx_ilk_pipe_chan_t *)
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
kmalloc(CVMX_MAX_ILK_CHANS * sizeof(cvmx_ilk_pipe_chan_t), GFP_KERNEL);
#else
cvmx_bootmem_alloc (CVMX_MAX_ILK_CHANS * sizeof(cvmx_ilk_pipe_chan_t),
sizeof(cvmx_ilk_pipe_chan_t));
#endif
if (pch == NULL)
return 0;
}
memset (pch, 0, CVMX_MAX_ILK_CHANS * sizeof(cvmx_ilk_pipe_chan_t));
tmp = pch;
for (j = 0; j < cvmx_ilk_chans[interface]; j++)
{
tmp->pipe = i++;
tmp->chan = cvmx_ilk_chan_map[interface][j];
tmp++;
}
res = cvmx_ilk_tx_set_channel (interface, pch, cvmx_ilk_chans[interface]);
if (res < 0)
{
res = 0;
goto err_free_pch;
}
pipe_base += cvmx_ilk_chans[interface];
/* set up channel to pkind mapping */
if (pknd_base == 0)
pknd_base = cvmx_helper_get_pknd (interface + CVMX_ILK_GBL_BASE, 0);
i = pknd_base;
if (chpknd == NULL)
{
chpknd = (cvmx_ilk_chan_pknd_t *)
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
kmalloc(CVMX_MAX_ILK_PKNDS * sizeof(cvmx_ilk_chan_pknd_t), GFP_KERNEL);
#else
cvmx_bootmem_alloc (CVMX_MAX_ILK_PKNDS * sizeof(cvmx_ilk_chan_pknd_t),
sizeof(cvmx_ilk_chan_pknd_t));
#endif
if (chpknd == NULL)
{
pipe_base -= cvmx_ilk_chans[interface];
res = 0;
goto err_free_pch;
}
}
memset (chpknd, 0, CVMX_MAX_ILK_PKNDS * sizeof(cvmx_ilk_chan_pknd_t));
tmp1 = chpknd;
for (j = 0; j < cvmx_ilk_chans[interface]; j++)
{
tmp1->chan = cvmx_ilk_chan_map[interface][j];
tmp1->pknd = i++;
tmp1++;
}
res = cvmx_ilk_rx_set_pknd (interface, chpknd, cvmx_ilk_chans[interface]);
if (res < 0)
{
pipe_base -= cvmx_ilk_chans[interface];
res = 0;
goto err_free_chpknd;
}
pknd_base += cvmx_ilk_chans[interface];
/* Set up tx calendar */
if (calent == NULL)
{
calent = (cvmx_ilk_cal_entry_t *)
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
kmalloc(CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t), GFP_KERNEL);
#else
cvmx_bootmem_alloc (CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t),
sizeof(cvmx_ilk_cal_entry_t));
#endif
if (calent == NULL)
{
pipe_base -= cvmx_ilk_chans[interface];
pknd_base -= cvmx_ilk_chans[interface];
res = 0;
goto err_free_chpknd;
}
}
memset (calent, 0, CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t));
tmp1 = chpknd;
tmp2 = calent;
for (j = 0; j < cvmx_ilk_chans[interface]; j++)
{
tmp2->pipe_bpid = tmp1->pknd;
tmp2->ent_ctrl = PIPE_BPID;
tmp1++;
tmp2++;
}
res = cvmx_ilk_cal_setup_tx (interface, cvmx_ilk_chans[interface],
calent, 1);
if (res < 0)
{
pipe_base -= cvmx_ilk_chans[interface];
pknd_base -= cvmx_ilk_chans[interface];
res = 0;
goto err_free_calent;
}
/* set up rx calendar. allocated memory can be reused.
* this is because max pkind is always less than max pipe */
memset (calent, 0, CVMX_MAX_ILK_PIPES * sizeof(cvmx_ilk_cal_entry_t));
tmp = pch;
tmp2 = calent;
for (j = 0; j < cvmx_ilk_chans[interface]; j++)
{
tmp2->pipe_bpid = tmp->pipe;
tmp2->ent_ctrl = PIPE_BPID;
tmp++;
tmp2++;
}
res = cvmx_ilk_cal_setup_rx (interface, cvmx_ilk_chans[interface],
calent, CVMX_ILK_RX_FIFO_WM, 1);
if (res < 0)
{
pipe_base -= cvmx_ilk_chans[interface];
pknd_base -= cvmx_ilk_chans[interface];
res = 0;
goto err_free_calent;
}
res = __cvmx_helper_ilk_enumerate(interface + CVMX_ILK_GBL_BASE);
goto out;
err_free_calent:
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
kfree (calent);
#else
/* no free() for cvmx_bootmem_alloc() */
#endif
err_free_chpknd:
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
kfree (chpknd);
#else
/* no free() for cvmx_bootmem_alloc() */
#endif
err_free_pch:
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
kfree (pch);
#else
/* no free() for cvmx_bootmem_alloc() */
#endif
out:
return res;
}
/**
* @INTERNAL
* Bringup and enable ILK interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_ilk_enable(int interface)
{
interface -= CVMX_ILK_GBL_BASE;
return cvmx_ilk_enable(interface);
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by ILK link status.
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
int interface = cvmx_helper_get_interface_num(ipd_port);
int retry_count = 0;
cvmx_ilk_rxx_cfg1_t ilk_rxx_cfg1;
cvmx_ilk_rxx_int_t ilk_rxx_int;
int lanes = 0;
result.u64 = 0;
interface -= CVMX_ILK_GBL_BASE;
retry:
retry_count++;
if (retry_count > 10)
goto out;
ilk_rxx_cfg1.u64 = cvmx_read_csr (CVMX_ILK_RXX_CFG1(interface));
ilk_rxx_int.u64 = cvmx_read_csr (CVMX_ILK_RXX_INT(interface));
/* Clear all RX status bits */
if (ilk_rxx_int.u64)
cvmx_write_csr(CVMX_ILK_RXX_INT(interface), ilk_rxx_int.u64);
if (ilk_rxx_cfg1.s.rx_bdry_lock_ena == 0)
{
/* We need to start looking for work boundary lock */
ilk_rxx_cfg1.s.rx_bdry_lock_ena = cvmx_ilk_get_intf_ln_msk(interface);
ilk_rxx_cfg1.s.rx_align_ena = 0;
cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
//cvmx_dprintf("ILK%d: Looking for word boundary lock\n", interface);
goto retry;
}
if (ilk_rxx_cfg1.s.rx_align_ena == 0)
{
if (ilk_rxx_int.s.word_sync_done)
{
ilk_rxx_cfg1.s.rx_align_ena = 1;
cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
//printf("ILK%d: Looking for lane alignment\n", interface);
goto retry;
}
goto out;
}
if (ilk_rxx_int.s.lane_align_fail)
{
ilk_rxx_cfg1.s.rx_bdry_lock_ena = 0;
ilk_rxx_cfg1.s.rx_align_ena = 0;
cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
cvmx_dprintf("ILK%d: Lane alignment failed\n", interface);
goto out;
}
if (ilk_rxx_int.s.lane_align_done)
{
//cvmx_dprintf("ILK%d: Lane alignment complete\n", interface);
}
lanes = cvmx_pop(ilk_rxx_cfg1.s.rx_bdry_lock_ena);
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = cvmx_qlm_get_gbaud_mhz(1+interface) * 64 / 67;
result.s.speed *= lanes;
out:
/* If the link is down we will force disable the RX path. If it up, we'll
set it to match the TX state set by the if_enable call */
if (result.s.link_up)
{
cvmx_ilk_txx_cfg1_t ilk_txx_cfg1;
ilk_txx_cfg1.u64 = cvmx_read_csr(CVMX_ILK_TXX_CFG1(interface));
ilk_rxx_cfg1.s.pkt_ena = ilk_txx_cfg1.s.pkt_ena;
cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
//cvmx_dprintf("ILK%d: link up, %d Mbps, Full duplex mode, %d lanes\n", interface, result.s.speed, lanes);
}
else
{
ilk_rxx_cfg1.s.pkt_ena = 0;
cvmx_write_csr(CVMX_ILK_RXX_CFG1(interface), ilk_rxx_cfg1.u64);
//cvmx_dprintf("ILK link down\n");
}
return result;
}
/**
* @INTERNAL
* Set the link state of an IPD/PKO port.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_ilk_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
/* nothing to do */
return 0;
}
/**
* Display ilk interface statistics.
*
*/
void __cvmx_helper_ilk_show_stats (void)
{
int i, j;
unsigned char *pchans, num_chans;
unsigned int chan_tmp[CVMX_MAX_ILK_CHANS];
cvmx_ilk_stats_ctrl_t ilk_stats_ctrl;
for (i = 0; i < CVMX_NUM_ILK_INTF; i++)
{
cvmx_ilk_get_chan_info (i, &pchans, &num_chans);
memset (chan_tmp, 0, CVMX_MAX_ILK_CHANS * sizeof (int));
for (j = 0; j < num_chans; j++)
chan_tmp[j] = pchans[j];
ilk_stats_ctrl.chan_list = chan_tmp;
ilk_stats_ctrl.num_chans = num_chans;
ilk_stats_ctrl.clr_on_rd = 0;
cvmx_ilk_show_stats (i, &ilk_stats_ctrl);
}
}
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */

View File

@ -1,110 +0,0 @@
/***********************license start***************
* Copyright (c) 2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for ILK initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 41586 $<hr>
*/
#ifndef __CVMX_HELPER_ILK_H__
#define __CVMX_HELPER_ILK_H__
extern int __cvmx_helper_ilk_enumerate(int interface);
/**
* @INTERNAL
* Probe a ILK interface and determine the number of ports
* connected to it. The ILK interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_ilk_probe(int interface);
/**
* @INTERNAL
* Bringup and enable a ILK interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_ilk_enable(int interface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by ILK link status.
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_ilk_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_ilk_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
extern void __cvmx_helper_ilk_show_stats (void);
#endif

View File

@ -1,230 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper utilities for qlm_jtag.
*
* <hr>$Revision: 42480 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-clock.h>
#include <asm/octeon/cvmx-helper-jtag.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#endif
#include "cvmx.h"
#if defined(__FreeBSD__) && defined(_KERNEL)
#include "cvmx-helper-jtag.h"
#endif
#endif
/**
* Initialize the internal QLM JTAG logic to allow programming
* of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
* These functions should only be used at the direction of Cavium
* Networks. Programming incorrect values into the JTAG chain
* can cause chip damage.
*/
void cvmx_helper_qlm_jtag_init(void)
{
cvmx_ciu_qlm_jtgc_t jtgc;
int clock_div = 0;
int divisor;
divisor = cvmx_clock_get_rate(CVMX_CLOCK_SCLK) / (1000000 *
(OCTEON_IS_MODEL(OCTEON_CN68XX) ? 10 : 25));
divisor = (divisor-1)>>2;
/* Convert the divisor into a power of 2 shift */
while (divisor)
{
clock_div++;
divisor>>=1;
}
/* Clock divider for QLM JTAG operations. sclk is divided by 2^(CLK_DIV + 2) */
jtgc.u64 = 0;
jtgc.s.clk_div = clock_div;
jtgc.s.mux_sel = 0;
if (OCTEON_IS_MODEL(OCTEON_CN52XX))
jtgc.s.bypass = 0x3;
else if (OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX))
jtgc.s.bypass = 0x7;
else
jtgc.s.bypass = 0xf;
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
jtgc.s.bypass_ext = 1;
cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
cvmx_read_csr(CVMX_CIU_QLM_JTGC);
}
/**
* Write up to 32bits into the QLM jtag chain. Bits are shifted
* into the MSB and out the LSB, so you should shift in the low
* order bits followed by the high order bits. The JTAG chain for
* CN52XX and CN56XX is 4 * 268 bits long, or 1072. The JTAG chain
* for CN63XX is 4 * 300 bits long, or 1200.
*
* @param qlm QLM to shift value into
* @param bits Number of bits to shift in (1-32).
* @param data Data to shift in. Bit 0 enters the chain first, followed by
* bit 1, etc.
*
* @return The low order bits of the JTAG chain that shifted out of the
* circle.
*/
uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
{
cvmx_ciu_qlm_jtgc_t jtgc;
cvmx_ciu_qlm_jtgd_t jtgd;
jtgc.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGC);
jtgc.s.mux_sel = qlm;
if (!OCTEON_IS_MODEL(OCTEON_CN6XXX) && !OCTEON_IS_MODEL(OCTEON_CNF7XXX))
jtgc.s.bypass = 1<<qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
cvmx_read_csr(CVMX_CIU_QLM_JTGC);
jtgd.u64 = 0;
jtgd.s.shift = 1;
jtgd.s.shft_cnt = bits-1;
jtgd.s.shft_reg = data;
if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
jtgd.s.select = 1 << qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do
{
jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.shift);
return jtgd.s.shft_reg >> (32-bits);
}
/**
* Shift long sequences of zeros into the QLM JTAG chain. It is
* common to need to shift more than 32 bits of zeros into the
* chain. This function is a convience wrapper around
* cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
* zeros at a time.
*
* @param qlm QLM to shift zeros into
* @param bits
*/
void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits)
{
while (bits > 0)
{
int n = bits;
if (n > 32)
n = 32;
cvmx_helper_qlm_jtag_shift(qlm, n, 0);
bits -= n;
}
}
/**
* Program the QLM JTAG chain into all lanes of the QLM. You must
* have already shifted in the proper number of bits into the
* JTAG chain. Updating invalid values can possibly cause chip damage.
*
* @param qlm QLM to program
*/
void cvmx_helper_qlm_jtag_update(int qlm)
{
cvmx_ciu_qlm_jtgc_t jtgc;
cvmx_ciu_qlm_jtgd_t jtgd;
jtgc.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGC);
jtgc.s.mux_sel = qlm;
if (!OCTEON_IS_MODEL(OCTEON_CN6XXX) && !OCTEON_IS_MODEL(OCTEON_CNF7XXX))
jtgc.s.bypass = 1<<qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
cvmx_read_csr(CVMX_CIU_QLM_JTGC);
/* Update the new data */
jtgd.u64 = 0;
jtgd.s.update = 1;
if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
jtgd.s.select = 1 << qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do
{
jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.update);
}
/**
* Load the QLM JTAG chain with data from all lanes of the QLM.
*
* @param qlm QLM to program
*/
void cvmx_helper_qlm_jtag_capture(int qlm)
{
cvmx_ciu_qlm_jtgc_t jtgc;
cvmx_ciu_qlm_jtgd_t jtgd;
jtgc.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGC);
jtgc.s.mux_sel = qlm;
if (!OCTEON_IS_MODEL(OCTEON_CN6XXX) && !OCTEON_IS_MODEL(OCTEON_CNF7XXX))
jtgc.s.bypass = 1<<qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGC, jtgc.u64);
cvmx_read_csr(CVMX_CIU_QLM_JTGC);
jtgd.u64 = 0;
jtgd.s.capture = 1;
if (!OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
jtgd.s.select = 1 << qlm;
cvmx_write_csr(CVMX_CIU_QLM_JTGD, jtgd.u64);
do
{
jtgd.u64 = cvmx_read_csr(CVMX_CIU_QLM_JTGD);
} while (jtgd.s.capture);
}

View File

@ -1,106 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper utilities for qlm_jtag.
*
* <hr>$Revision: 41586 $<hr>
*/
#ifndef __CVMX_HELPER_JTAG_H__
#define __CVMX_HELPER_JTAG_H__
/**
* Initialize the internal QLM JTAG logic to allow programming
* of the JTAG chain by the cvmx_helper_qlm_jtag_*() functions.
* These functions should only be used at the direction of Cavium
* Networks. Programming incorrect values into the JTAG chain
* can cause chip damage.
*/
extern void cvmx_helper_qlm_jtag_init(void);
/**
* Write up to 32bits into the QLM jtag chain. Bits are shifted
* into the MSB and out the LSB, so you should shift in the low
* order bits followed by the high order bits. The JTAG chain for
* CN52XX and CN56XX is 4 * 268 bits long, or 1072. The JTAG chain
* for CN63XX is 4 * 300 bits long, or 1200.
*
* @param qlm QLM to shift value into
* @param bits Number of bits to shift in (1-32).
* @param data Data to shift in. Bit 0 enters the chain first, followed by
* bit 1, etc.
*
* @return The low order bits of the JTAG chain that shifted out of the
* circle.
*/
extern uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data);
/**
* Shift long sequences of zeros into the QLM JTAG chain. It is
* common to need to shift more than 32 bits of zeros into the
* chain. This function is a convience wrapper around
* cvmx_helper_qlm_jtag_shift() to shift more than 32 bits of
* zeros at a time.
*
* @param qlm QLM to shift zeros into
* @param bits
*/
extern void cvmx_helper_qlm_jtag_shift_zeros(int qlm, int bits);
/**
* Program the QLM JTAG chain into all lanes of the QLM. You must
* have already shifted in the proper number of bits into the
* JTAG chain. Updating invalid values can possibly cause chip damage.
*
* @param qlm QLM to program
*/
extern void cvmx_helper_qlm_jtag_update(int qlm);
/**
* Load the QLM JTAG chain with data from all lanes of the QLM.
*
* @param qlm QLM to program
*/
extern void cvmx_helper_qlm_jtag_capture(int qlm);
#endif /* __CVMX_HELPER_JTAG_H__ */

View File

@ -1,146 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for LOOP initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include <asm/octeon/cvmx-helper.h>
#endif
#include <asm/octeon/cvmx-pip-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include "cvmx.h"
#include "cvmx-helper.h"
#endif
#else
#include "cvmx.h"
#include "cvmx-helper.h"
#endif
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
int __cvmx_helper_loop_enumerate(int interface)
{
return (OCTEON_IS_MODEL(OCTEON_CN68XX) ? 8 : 4);
}
/**
* @INTERNAL
* Probe a LOOP interface and determine the number of ports
* connected to it. The LOOP interface should still be down
* after this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_loop_probe(int interface)
{
return __cvmx_helper_loop_enumerate(interface);
}
/**
* @INTERNAL
* Bringup and enable a LOOP interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_loop_enable(int interface)
{
cvmx_pip_prt_cfgx_t port_cfg;
int num_ports, index;
unsigned long offset;
num_ports = __cvmx_helper_get_num_ipd_ports(interface);
/*
* We need to disable length checking so packet < 64 bytes and jumbo
* frames don't get errors
*/
for (index = 0; index < num_ports; index++) {
offset = ((octeon_has_feature(OCTEON_FEATURE_PKND)) ?
cvmx_helper_get_pknd(interface, index) :
cvmx_helper_get_ipd_port(interface, index));
port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(offset));
port_cfg.s.maxerr_en = 0;
port_cfg.s.minerr_en = 0;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(offset), port_cfg.u64);
}
/*
* Disable FCS stripping for loopback ports
*/
if (!octeon_has_feature(OCTEON_FEATURE_PKND)) {
cvmx_ipd_sub_port_fcs_t ipd_sub_port_fcs;
ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
ipd_sub_port_fcs.s.port_bit2 = 0;
cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
}
return 0;
}
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */

View File

@ -1,83 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for LOOP initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_LOOP_H__
#define __CVMX_HELPER_LOOP_H__
/**
* @INTERNAL
* Probe a LOOP interface and determine the number of ports
* connected to it. The LOOP interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_loop_probe(int interface);
extern int __cvmx_helper_loop_enumerate(int interface);
/**
* @INTERNAL
* Bringup and enable a LOOP interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_loop_enable(int interface);
#endif

View File

@ -1,179 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for NPI initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-pexp-defs.h>
#include <asm/octeon/cvmx-sli-defs.h>
#endif
#include <asm/octeon/cvmx-pip-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-pko.h"
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include "cvmx-helper.h"
#endif
#else
#include "cvmx.h"
#include "cvmx-pko.h"
#include "cvmx-helper.h"
#endif
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
/**
* @INTERNAL
* Probe a NPI interface and determine the number of ports
* connected to it. The NPI interface should still be down
* after this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_npi_probe(int interface)
{
#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
return 32;
#if 0
/* Technically CN30XX, CN31XX, and CN50XX contain packet engines, but
nobody ever uses them. Since this is the case, we disable them here */
else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
return 2;
else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
return 1;
#endif
else if (!(OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)))
return 4; /* The packet engines didn't exist before cn56xx pass 2 */
#endif
return 0;
}
/**
* @INTERNAL
* Bringup and enable a NPI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_npi_enable(int interface)
{
int num_ports = cvmx_helper_ports_on_interface(interface);
/* On CN50XX, CN52XX, and CN56XX we need to disable length checking
so packet < 64 bytes and jumbo frames don't get errors */
if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) && !OCTEON_IS_MODEL(OCTEON_CN58XX))
{
int port;
for (port=0; port<num_ports; port++)
{
cvmx_pip_prt_cfgx_t port_cfg;
int ipd_port = (OCTEON_IS_MODEL(OCTEON_CN68XX)) ?
cvmx_helper_get_pknd(interface, port) :
cvmx_helper_get_ipd_port(interface, port);
port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
port_cfg.s.lenerr_en = 0;
port_cfg.s.maxerr_en = 0;
port_cfg.s.minerr_en = 0;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
{
/*
* Set up pknd and bpid
*/
cvmx_sli_portx_pkind_t config;
config.u64 = cvmx_read_csr(CVMX_PEXP_SLI_PORTX_PKIND(port));
config.s.bpkind = cvmx_helper_get_bpid(interface, port);
config.s.pkind = cvmx_helper_get_pknd(interface, port);
cvmx_write_csr(CVMX_PEXP_SLI_PORTX_PKIND(port), config.u64);
}
}
}
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
{
/*
* Set up pko pipes.
*/
cvmx_sli_tx_pipe_t config;
config.u64 = cvmx_read_csr(CVMX_PEXP_SLI_TX_PIPE);
config.s.base = __cvmx_pko_get_pipe (interface, 0);
#ifdef CVMX_HELPER_NPI_MAX_PIPES
config.s.nump = CVMX_HELPER_NPI_MAX_PIPES;
#else
config.s.nump = num_ports;
#endif
cvmx_write_csr(CVMX_PEXP_SLI_TX_PIPE, config.u64);
}
/* Enables are controlled by the remote host, so nothing to do here */
return 0;
}
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */

View File

@ -1,86 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for NPI initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_NPI_H__
#define __CVMX_HELPER_NPI_H__
/**
* @INTERNAL
* Probe a NPI interface and determine the number of ports
* connected to it. The NPI interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_npi_probe(int interface);
static inline int __cvmx_helper_npi_enumerate(int interface)
{
return __cvmx_helper_npi_probe(interface);
}
/**
* @INTERNAL
* Bringup and enable a NPI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_npi_enable(int interface);
#endif

View File

@ -1,557 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for RGMII/GMII/MII initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#endif
#include <asm/octeon/cvmx-asxx-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-npi-defs.h>
#include <asm/octeon/cvmx-dbg-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include "cvmx.h"
#include "cvmx-sysinfo.h"
#include "cvmx-mdio.h"
#include "cvmx-pko.h"
#include "cvmx-helper.h"
#include "cvmx-helper-board.h"
#endif
#else
#include "cvmx.h"
#include "cvmx-sysinfo.h"
#include "cvmx-mdio.h"
#include "cvmx-pko.h"
#include "cvmx-helper.h"
#include "cvmx-helper-board.h"
#endif
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
/**
* @INTERNAL
* Probe RGMII ports and determine the number present
*
* @param interface Interface to probe
*
* @return Number of RGMII/GMII/MII ports (0-4).
*/
int __cvmx_helper_rgmii_probe(int interface)
{
int num_ports = 0;
cvmx_gmxx_inf_mode_t mode;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (mode.s.type)
{
if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
{
cvmx_dprintf("ERROR: RGMII initialize called in SPI interface\n");
}
else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
{
/* On these chips "type" says we're in GMII/MII mode. This
limits us to 2 ports */
num_ports = 2;
}
else
{
cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", __FUNCTION__);
}
}
else
{
if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
{
num_ports = 4;
}
else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
{
num_ports = 3;
}
else
{
cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n", __FUNCTION__);
}
}
return num_ports;
}
/**
* Put an RGMII interface in loopback mode. Internal packets sent
* out will be received back again on the same port. Externally
* received packets will echo back out.
*
* @param port IPD port number to loop.
*/
void cvmx_helper_rgmii_internal_loopback(int port)
{
int interface = (port >> 4) & 1;
int index = port & 0xf;
uint64_t tmp;
cvmx_gmxx_prtx_cfg_t gmx_cfg;
gmx_cfg.u64 = 0;
gmx_cfg.s.duplex = 1;
gmx_cfg.s.slottime = 1;
gmx_cfg.s.speed = 1;
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
}
/**
* @INTERNAL
* Configure all of the ASX, GMX, and PKO regsiters required
* to get RGMII to function on the supplied interface.
*
* @param interface PKO Interface to configure (0 or 1)
*
* @return Zero on success
*/
int __cvmx_helper_rgmii_enable(int interface)
{
int num_ports = cvmx_helper_ports_on_interface(interface);
int port;
cvmx_gmxx_inf_mode_t mode;
cvmx_asxx_tx_prt_en_t asx_tx;
cvmx_asxx_rx_prt_en_t asx_rx;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (mode.s.en == 0)
return -1;
if ((OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1) /* Ignore SPI interfaces */
return -1;
/* Configure the ASX registers needed to use the RGMII ports */
asx_tx.u64 = 0;
asx_tx.s.prt_en = cvmx_build_mask(num_ports);
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
asx_rx.u64 = 0;
asx_rx.s.prt_en = cvmx_build_mask(num_ports);
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
/* Configure the GMX registers needed to use the RGMII ports */
for (port=0; port<num_ports; port++)
{
/* Setting of CVMX_GMXX_TXX_THRESH has been moved to
__cvmx_helper_setup_gmx() */
/* Configure more flexible RGMII preamble checking. Pass 1 doesn't
support this feature. */
cvmx_gmxx_rxx_frm_ctl_t frm_ctl;
frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface));
frm_ctl.s.pre_free = 1; /* New field, so must be compile time */
cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface), frm_ctl.u64);
/* Each pause frame transmitted will ask for about 10M bit times
before resume. If buffer space comes available before that time
has expired, an XON pause frame (0 time) will be transmitted to
restart the flow. */
cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface), 20000);
cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(port, interface), 19000);
/*
* Board types we have to know at compile-time.
*/
#if defined(OCTEON_BOARD_CAPK_0100ND)
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 26);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 26);
#else
/*
* Vendor-defined board types.
*/
#if defined(OCTEON_VENDOR_LANNER)
switch (cvmx_sysinfo_get()->board_type) {
case CVMX_BOARD_TYPE_CUST_LANNER_MR320:
case CVMX_BOARD_TYPE_CUST_LANNER_MR321X:
if (port == 0) {
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 4);
} else {
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 7);
}
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 0);
break;
}
#else
/*
* For board types we can determine at runtime.
*/
if (OCTEON_IS_MODEL(OCTEON_CN50XX))
{
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 16);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 16);
}
else
{
cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface), 24);
cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface), 24);
}
#endif
#endif
}
__cvmx_helper_setup_gmx(interface, num_ports);
/* enable the ports now */
for (port=0; port<num_ports; port++)
{
cvmx_gmxx_prtx_cfg_t gmx_cfg;
cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, port));
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface), gmx_cfg.u64);
}
return 0;
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
cvmx_asxx_prt_loop_t asxx_prt_loop;
asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
if (asxx_prt_loop.s.int_loop & (1<<index))
{
/* Force 1Gbps full duplex on internal loopback */
cvmx_helper_link_info_t result;
result.u64 = 0;
result.s.full_duplex = 1;
result.s.link_up = 1;
result.s.speed = 1000;
return result;
}
else
return __cvmx_helper_board_link_get(ipd_port);
}
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
int result = 0;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
cvmx_gmxx_prtx_cfg_t original_gmx_cfg;
cvmx_gmxx_prtx_cfg_t new_gmx_cfg;
cvmx_pko_mem_queue_qos_t pko_mem_queue_qos;
cvmx_pko_mem_queue_qos_t pko_mem_queue_qos_save[16];
cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp;
cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp_save;
int i;
/* Ignore speed sets in the simulator */
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
return 0;
/* Read the current settings so we know the current enable state */
original_gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
new_gmx_cfg = original_gmx_cfg;
/* Disable the lowest level RX */
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) & ~(1<<index));
memset(pko_mem_queue_qos_save, 0, sizeof(pko_mem_queue_qos_save));
/* Disable all queues so that TX should become idle */
for (i=0; i<cvmx_pko_get_num_queues(ipd_port); i++)
{
int queue = cvmx_pko_get_base_queue(ipd_port) + i;
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
pko_mem_queue_qos.s.pid = ipd_port;
pko_mem_queue_qos.s.qid = queue;
pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
pko_mem_queue_qos.s.qos_mask = 0;
cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
}
/* Disable backpressure */
gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
gmx_tx_ovr_bp.s.bp &= ~(1<<index);
gmx_tx_ovr_bp.s.en |= 1<<index;
cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
/* Poll the GMX state machine waiting for it to become idle. Preferably we
should only change speed when it is idle. If it doesn't become idle we
will still do the speed change, but there is a slight chance that GMX
will lockup */
cvmx_write_csr(CVMX_NPI_DBG_SELECT, interface*0x800 + index*0x100 + 0x880);
CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&7, ==, 0, 10000);
CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, cvmx_dbg_data_t, data&0xf, ==, 0, 10000);
/* Disable the port before we make any changes */
new_gmx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Set full/half duplex */
if (!link_info.s.link_up)
new_gmx_cfg.s.duplex = 1; /* Force full duplex on down links */
else
new_gmx_cfg.s.duplex = link_info.s.full_duplex;
/* Set the link speed. Anything unknown is set to 1Gbps */
if (link_info.s.speed == 10)
{
new_gmx_cfg.s.slottime = 0;
new_gmx_cfg.s.speed = 0;
}
else if (link_info.s.speed == 100)
{
new_gmx_cfg.s.slottime = 0;
new_gmx_cfg.s.speed = 0;
}
else
{
new_gmx_cfg.s.slottime = 1;
new_gmx_cfg.s.speed = 1;
}
/* Adjust the clocks */
if (link_info.s.speed == 10)
{
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
}
else if (link_info.s.speed == 100)
{
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
}
else
{
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
}
if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
{
if ((link_info.s.speed == 10) || (link_info.s.speed == 100))
{
cvmx_gmxx_inf_mode_t mode;
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
/*
** Port .en .type .p0mii Configuration
** ---- --- ----- ------ -----------------------------------------
** X 0 X X All links are disabled.
** 0 1 X 0 Port 0 is RGMII
** 0 1 X 1 Port 0 is MII
** 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
** 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
** MII port is selected by GMX_PRT1_CFG[SPEED].
*/
/* In MII mode, CLK_CNT = 1. */
if (((index == 0) && (mode.s.p0mii == 1)) || ((index != 0) && (mode.s.type == 1)))
{
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
}
}
}
/* Do a read to make sure all setup stuff is complete */
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Save the new GMX setting without enabling the port */
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
/* Enable the lowest level RX */
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1<<index));
/* Re-enable the TX path */
for (i=0; i<cvmx_pko_get_num_queues(ipd_port); i++)
{
int queue = cvmx_pko_get_base_queue(ipd_port) + i;
cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos_save[i].u64);
}
/* Restore backpressure */
cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
/* Restore the GMX enable state. Port config is complete */
new_gmx_cfg.s.en = original_gmx_cfg.s.en;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
return result;
}
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
int original_enable;
cvmx_gmxx_prtx_cfg_t gmx_cfg;
cvmx_asxx_prt_loop_t asxx_prt_loop;
/* Read the current enable state and save it */
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
original_enable = gmx_cfg.s.en;
/* Force port to be disabled */
gmx_cfg.s.en = 0;
if (enable_internal)
{
/* Force speed if we're doing internal loopback */
gmx_cfg.s.duplex = 1;
gmx_cfg.s.slottime = 1;
gmx_cfg.s.speed = 1;
cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
}
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
/* Set the loopback bits */
asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
if (enable_internal)
asxx_prt_loop.s.int_loop |= 1<<index;
else
asxx_prt_loop.s.int_loop &= ~(1<<index);
if (enable_external)
asxx_prt_loop.s.ext_loop |= 1<<index;
else
asxx_prt_loop.s.ext_loop &= ~(1<<index);
cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
/* Force enables in internal loopback */
if (enable_internal)
{
uint64_t tmp;
tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
original_enable = 1;
}
/* Restore the enable state */
gmx_cfg.s.en = original_enable;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
return 0;
}
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */

View File

@ -1,135 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for RGMII/GMII/MII initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_RGMII_H__
#define __CVMX_HELPER_RGMII_H__
/**
* @INTERNAL
* Probe RGMII ports and determine the number present
*
* @param interface Interface to probe
*
* @return Number of RGMII/GMII/MII ports (0-4).
*/
extern int __cvmx_helper_rgmii_probe(int interface);
static inline int __cvmx_helper_rgmii_enumerate(int interface)
{
return __cvmx_helper_rgmii_probe(interface);
}
/**
* Put an RGMII interface in loopback mode. Internal packets sent
* out will be received back again on the same port. Externally
* received packets will echo back out.
*
* @param port IPD port number to loop.
*/
extern void cvmx_helper_rgmii_internal_loopback(int port);
/**
* @INTERNAL
* Configure all of the ASX, GMX, and PKO regsiters required
* to get RGMII to function on the supplied interface.
*
* @param interface PKO Interface to configure (0 or 1)
*
* @return Zero on success
*/
extern int __cvmx_helper_rgmii_enable(int interface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_rgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external);
#endif

View File

@ -1,778 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for SGMII initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-clock.h>
#include <asm/octeon/cvmx-qlm.h>
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-helper-cfg.h>
#endif
#include <asm/octeon/cvmx-pcsx-defs.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-ciu-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include "cvmx.h"
#include "cvmx-sysinfo.h"
#include "cvmx-mdio.h"
#include "cvmx-helper.h"
#include "cvmx-helper-board.h"
#include "cvmx-helper-cfg.h"
#include "cvmx-qlm.h"
#endif
#else
#include "cvmx.h"
#include "cvmx-sysinfo.h"
#include "cvmx-mdio.h"
#include "cvmx-helper.h"
#include "cvmx-helper-board.h"
#include "cvmx-qlm.h"
#endif
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
/**
* @INTERNAL
* Perform initialization required only once for an SGMII port.
*
* @param interface Interface to init
* @param index Index of prot on the interface
*
* @return Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
{
const uint64_t clock_mhz = cvmx_clock_get_rate(CVMX_CLOCK_SCLK) / 1000000;
cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
cvmx_pcsx_linkx_timer_count_reg_t pcsx_linkx_timer_count_reg;
cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
/* Disable GMX */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the appropriate
value. 1000BASE-X specifies a 10ms interval. SGMII specifies a 1.6ms
interval. */
pcsx_miscx_ctl_reg.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
pcsx_linkx_timer_count_reg.u64 = cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
if (pcsx_miscx_ctl_reg.s.mode
#if defined(OCTEON_VENDOR_GEFES)
/* GEF Fiber SFP testing on W5650 showed this to cause link issues for 1000BASE-X*/
&& (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_CUST_W5650)
&& (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_CUST_W63XX)
#endif
)
{
/* 1000BASE-X */
pcsx_linkx_timer_count_reg.s.count = (10000ull * clock_mhz) >> 10;
}
else
{
/* SGMII */
pcsx_linkx_timer_count_reg.s.count = (1600ull * clock_mhz) >> 10;
}
cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface), pcsx_linkx_timer_count_reg.u64);
/* Write the advertisement register to be used as the
tx_Config_Reg<D15:D0> of the autonegotiation.
In 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
In SGMII PHY mode, tx_Config_Reg<D15:D0> is PCS*_SGM*_AN_ADV_REG.
In SGMII MAC mode, tx_Config_Reg<D15:D0> is the fixed value 0x4001, so
this step can be skipped. */
if (pcsx_miscx_ctl_reg.s.mode)
{
/* 1000BASE-X */
cvmx_pcsx_anx_adv_reg_t pcsx_anx_adv_reg;
pcsx_anx_adv_reg.u64 = cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
pcsx_anx_adv_reg.s.rem_flt = 0;
pcsx_anx_adv_reg.s.pause = 3;
pcsx_anx_adv_reg.s.hfd = 1;
pcsx_anx_adv_reg.s.fd = 1;
cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface), pcsx_anx_adv_reg.u64);
}
else
{
#ifdef CVMX_HELPER_CONFIG_NO_PHY
/* If the interface does not have PHY, then set explicitly in PHY mode
so that link will be set during auto negotiation. */
if (!pcsx_miscx_ctl_reg.s.mac_phy)
{
cvmx_dprintf("SGMII%d%d: Forcing PHY mode as PHY address is not set\n", interface, index);
pcsx_miscx_ctl_reg.s.mac_phy = 1;
cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), pcsx_miscx_ctl_reg.u64);
}
#endif
if (pcsx_miscx_ctl_reg.s.mac_phy)
{
/* PHY Mode */
cvmx_pcsx_sgmx_an_adv_reg_t pcsx_sgmx_an_adv_reg;
pcsx_sgmx_an_adv_reg.u64 = cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG(index, interface));
pcsx_sgmx_an_adv_reg.s.dup = 1;
pcsx_sgmx_an_adv_reg.s.speed= 2;
cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG(index, interface), pcsx_sgmx_an_adv_reg.u64);
}
else
{
/* MAC Mode - Nothing to do */
}
}
return 0;
}
static int __cvmx_helper_need_g15618(void)
{
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM
|| OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)
|| OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)
|| OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1)
|| OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X)
|| OCTEON_IS_MODEL(OCTEON_CN68XX_PASS1_X))
return 1;
else
return 0;
}
/**
* @INTERNAL
* Initialize the SERTES link for the first time or after a loss
* of link.
*
* @param interface Interface to init
* @param index Index of prot on the interface
*
* @return Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
{
cvmx_pcsx_mrx_control_reg_t control_reg;
uint64_t link_timeout;
#if defined(OCTEON_VENDOR_GEFES)
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_TNPA5651X) {
return 0; /* no auto-negotiation */
}
#endif
/* Take PCS through a reset sequence.
PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the value of
the other PCS*_MR*_CONTROL_REG bits).
Read PCS*_MR*_CONTROL_REG[RESET] until it changes value to zero. */
control_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
/* Errata G-15618 requires disabling PCS soft reset in CN63XX pass upto 2.1. */
if (!__cvmx_helper_need_g15618())
{
link_timeout = 200000;
#if defined(OCTEON_VENDOR_GEFES)
if( (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_TNPA56X4) && (interface == 0) )
{
link_timeout = 5000000;
}
#endif
control_reg.s.reset = 1;
cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_CONTROL_REG(index, interface), cvmx_pcsx_mrx_control_reg_t, reset, ==, 0, link_timeout))
{
cvmx_dprintf("SGMII%d: Timeout waiting for port %d to finish reset\n", interface, index);
return -1;
}
}
/* Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh sgmii negotiation starts. */
control_reg.s.rst_an = 1;
control_reg.s.an_en = 1;
control_reg.s.pwr_dn = 0;
cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
/* Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating that
sgmii autonegotiation is complete. In MAC mode this isn't an ethernet
link, but a link between Octeon and the PHY */
if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface), cvmx_pcsx_mrx_status_reg_t, an_cpt, ==, 1, 10000))
{
//cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index);
return -1;
}
return 0;
}
/**
* @INTERNAL
* Configure an SGMII link to the specified speed after the SERTES
* link is up.
*
* @param interface Interface to init
* @param index Index of prot on the interface
* @param link_info Link state to configure
*
* @return Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface, int index, cvmx_helper_link_info_t link_info)
{
int is_enabled;
cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
#if defined(OCTEON_VENDOR_GEFES)
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_TNPA5651X)
return 0; /* no auto-negotiation */
#endif
/* Disable GMX before we make any changes. Remember the enable state */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
is_enabled = gmxx_prtx_cfg.s.en;
gmxx_prtx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Wait for GMX to be idle */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, 10000) ||
CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface), cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, 10000))
{
cvmx_dprintf("SGMII%d: Timeout waiting for port %d to be idle\n", interface, index);
return -1;
}
/* Read GMX CFG again to make sure the disable completed */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Get the misc control for PCS. We will need to set the duplication amount */
pcsx_miscx_ctl_reg.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
/* Use GMXENO to force the link down if the status we get says it should be down */
pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
/* Only change the duplex setting if the link is up */
if (link_info.s.link_up)
gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
/* Do speed based setting for GMX */
switch (link_info.s.speed)
{
case 10:
gmxx_prtx_cfg.s.speed = 0;
gmxx_prtx_cfg.s.speed_msb = 1;
gmxx_prtx_cfg.s.slottime = 0;
pcsx_miscx_ctl_reg.s.samp_pt = 25; /* Setting from GMX-603 */
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
break;
case 100:
gmxx_prtx_cfg.s.speed = 0;
gmxx_prtx_cfg.s.speed_msb = 0;
gmxx_prtx_cfg.s.slottime = 0;
pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
break;
case 1000:
gmxx_prtx_cfg.s.speed = 1;
gmxx_prtx_cfg.s.speed_msb = 0;
gmxx_prtx_cfg.s.slottime = 1;
pcsx_miscx_ctl_reg.s.samp_pt = 1;
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
if (gmxx_prtx_cfg.s.duplex)
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0); // full duplex
else
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192); // half duplex
break;
default:
break;
}
/* Write the new misc control for PCS */
cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), pcsx_miscx_ctl_reg.u64);
/* Write the new GMX settings with the port still disabled */
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Read GMX CFG again to make sure the config completed */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Restore the enabled / disabled state */
gmxx_prtx_cfg.s.en = is_enabled;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
return 0;
}
/**
* @INTERNAL
* Bring up the SGMII interface to be ready for packet I/O but
* leave I/O disabled using the GMX override. This function
* follows the bringup documented in 10.6.3 of the manual.
*
* @param interface Interface to bringup
* @param num_ports Number of ports on the interface
*
* @return Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
{
int index;
int do_link_set = 1;
/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
{
cvmx_ciu_qlm2_t ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM2);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 0xf;
ciu_qlm.s.txmargin = 0xd;
cvmx_write_csr(CVMX_CIU_QLM2, ciu_qlm.u64);
}
/* CN63XX Pass 2.0 and 2.1 errata G-15273 requires the QLM De-emphasis be
programmed when using a 156.25Mhz ref clock */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0) ||
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1))
{
/* Read the QLM speed pins */
cvmx_mio_rst_boot_t mio_rst_boot;
mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
if (mio_rst_boot.cn63xx.qlm2_spd == 4)
{
cvmx_ciu_qlm2_t ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM2);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 0x0;
ciu_qlm.s.txmargin = 0xf;
cvmx_write_csr(CVMX_CIU_QLM2, ciu_qlm.u64);
}
}
__cvmx_helper_setup_gmx(interface, num_ports);
for (index=0; index<num_ports; index++)
{
int ipd_port = cvmx_helper_get_ipd_port(interface, index);
__cvmx_helper_sgmii_hardware_init_one_time(interface, index);
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
/* Linux kernel driver will call ....link_set with the proper link
state. In the simulator there is no link state polling and
hence it is set from here. */
if (!(cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM))
do_link_set = 0;
#endif
if (do_link_set)
__cvmx_helper_sgmii_link_set(ipd_port, __cvmx_helper_sgmii_link_get(ipd_port));
}
return 0;
}
int __cvmx_helper_sgmii_enumerate(int interface)
{
if (OCTEON_IS_MODEL(OCTEON_CNF71XX))
return 2;
return 4;
}
/**
* @INTERNAL
* Probe a SGMII interface and determine the number of ports
* connected to it. The SGMII interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_sgmii_probe(int interface)
{
cvmx_gmxx_inf_mode_t mode;
/* Check if QLM is configured correct for SGMII, verify the speed
as well as mode */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
{
int qlm = cvmx_qlm_interface(interface);
if (cvmx_qlm_get_status(qlm) != 1)
return 0;
}
/* Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the interface
needs to be enabled before IPD otherwise per port backpressure
may not work properly */
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
mode.s.en = 1;
cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
return __cvmx_helper_sgmii_enumerate(interface);
}
/**
* @INTERNAL
* Bringup and enable a SGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_enable(int interface)
{
int num_ports = cvmx_helper_ports_on_interface(interface);
int index;
/* Setup PKND and BPID */
if (octeon_has_feature(OCTEON_FEATURE_PKND))
{
for (index = 0; index < num_ports; index++)
{
cvmx_gmxx_bpid_msk_t bpid_msk;
cvmx_gmxx_bpid_mapx_t bpid_map;
cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
/* Setup PKIND */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.pknd = cvmx_helper_get_pknd(interface, index);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Setup BPID */
bpid_map.u64 = cvmx_read_csr(CVMX_GMXX_BPID_MAPX(index, interface));
bpid_map.s.val = 1;
bpid_map.s.bpid = cvmx_helper_get_bpid(interface, index);
cvmx_write_csr(CVMX_GMXX_BPID_MAPX(index, interface), bpid_map.u64);
bpid_msk.u64 = cvmx_read_csr(CVMX_GMXX_BPID_MSK(interface));
bpid_msk.s.msk_or |= (1<<index);
bpid_msk.s.msk_and &= ~(1<<index);
cvmx_write_csr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
}
}
__cvmx_helper_sgmii_hardware_init(interface, num_ports);
/* CN68XX adds the padding and FCS in PKO, not GMX */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
{
cvmx_gmxx_txx_append_t gmxx_txx_append_cfg;
for (index = 0; index < num_ports; index++)
{
gmxx_txx_append_cfg.u64 = cvmx_read_csr(
CVMX_GMXX_TXX_APPEND(index, interface));
gmxx_txx_append_cfg.s.fcs = 0;
gmxx_txx_append_cfg.s.pad = 0;
cvmx_write_csr(CVMX_GMXX_TXX_APPEND(index, interface),
gmxx_txx_append_cfg.u64);
}
}
for (index=0; index<num_ports; index++)
{
cvmx_gmxx_txx_append_t append_cfg;
cvmx_gmxx_txx_sgmii_ctl_t sgmii_ctl;
cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
/* Clear the align bit if preamble is set to attain maximum tx rate. */
append_cfg.u64 = cvmx_read_csr(CVMX_GMXX_TXX_APPEND(index, interface));
sgmii_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TXX_SGMII_CTL(index, interface));
sgmii_ctl.s.align = append_cfg.s.preamble ? 0 : 1;
cvmx_write_csr(CVMX_GMXX_TXX_SGMII_CTL(index, interface), sgmii_ctl.u64);
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
}
return 0;
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
cvmx_pcsx_mrx_control_reg_t pcsx_mrx_control_reg;
int speed = 1000;
int qlm;
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
{
/* The simulator gives you a simulated 1Gbps full duplex link */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = speed;
return result;
}
if (OCTEON_IS_MODEL(OCTEON_CN66XX))
{
cvmx_gmxx_inf_mode_t inf_mode;
inf_mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
if (inf_mode.s.rate & (1<<index))
speed = 2500;
else
speed = 1000;
}
else if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
{
qlm = cvmx_qlm_interface(interface);
speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
}
result.u64 = 0;
pcsx_mrx_control_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
if (pcsx_mrx_control_reg.s.loopbck1)
{
/* Force 1Gbps full duplex link for internal loopback */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = speed;
return result;
}
pcsx_miscx_ctl_reg.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
if (pcsx_miscx_ctl_reg.s.mode)
{
#if defined(OCTEON_VENDOR_GEFES)
/* 1000BASE-X */
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
cvmx_pcsx_miscx_ctl_reg_t mode_type;
cvmx_pcsx_anx_results_reg_t inband_status;
cvmx_pcsx_mrx_status_reg_t mrx_status;
cvmx_pcsx_anx_adv_reg_t anxx_adv;
anxx_adv.u64 = cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
mrx_status.u64 = cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG(index, interface));
mode_type.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
/* Read Octeon's inband status */
inband_status.u64 = cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG(index, interface));
result.s.link_up = inband_status.s.link_ok;/* this is only accurate for 1000-base x */
result.s.full_duplex = inband_status.s.dup;
switch (inband_status.s.spd)
{
case 0: /* 10 Mbps */
result.s.speed = 10;
break;
case 1: /* 100 Mbps */
result.s.speed = 100;
break;
case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
case 3: /* Illegal */
result.s.speed = 0;
result.s.link_up = 0;
break;
}
#endif /* Actually not 100% this is GEFES specific */
}
else
{
if (pcsx_miscx_ctl_reg.s.mac_phy)
{
/* PHY Mode */
cvmx_pcsx_mrx_status_reg_t pcsx_mrx_status_reg;
cvmx_pcsx_anx_results_reg_t pcsx_anx_results_reg;
/* Don't bother continuing if the SERTES low level link is down */
pcsx_mrx_status_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG(index, interface));
if (pcsx_mrx_status_reg.s.lnk_st == 0)
{
if (__cvmx_helper_sgmii_hardware_init_link(interface, index) != 0)
return result;
}
/* Read the autoneg results */
pcsx_anx_results_reg.u64 = cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG(index, interface));
if (pcsx_anx_results_reg.s.an_cpt)
{
/* Auto negotiation is complete. Set status accordingly */
result.s.full_duplex = pcsx_anx_results_reg.s.dup;
result.s.link_up = pcsx_anx_results_reg.s.link_ok;
switch (pcsx_anx_results_reg.s.spd)
{
case 0:
result.s.speed = speed / 100;
break;
case 1:
result.s.speed = speed / 10;
break;
case 2:
result.s.speed = speed;
break;
default:
result.s.speed = 0;
result.s.link_up = 0;
break;
}
}
else
{
/* Auto negotiation isn't complete. Return link down */
result.s.speed = 0;
result.s.link_up = 0;
}
}
else /* MAC Mode */
{
result = __cvmx_helper_board_link_get(ipd_port);
}
}
return result;
}
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
if (link_info.s.link_up || !__cvmx_helper_need_g15618()) {
__cvmx_helper_sgmii_hardware_init_link(interface, index);
} else {
cvmx_pcsx_mrx_control_reg_t control_reg;
cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
control_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
control_reg.s.an_en = 0;
cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), control_reg.u64);
cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
/* Use GMXENO to force the link down it will get reenabled later... */
pcsx_miscx_ctl_reg.s.gmxeno = 1;
cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), pcsx_miscx_ctl_reg.u64);
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
return 0;
}
return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index, link_info);
}
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
cvmx_pcsx_mrx_control_reg_t pcsx_mrx_control_reg;
cvmx_pcsx_miscx_ctl_reg_t pcsx_miscx_ctl_reg;
pcsx_mrx_control_reg.u64 = cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface), pcsx_mrx_control_reg.u64);
pcsx_miscx_ctl_reg.u64 = cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface), pcsx_miscx_ctl_reg.u64);
__cvmx_helper_sgmii_hardware_init_link(interface, index);
return 0;
}
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */

View File

@ -1,126 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for SGMII initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_SGMII_H__
#define __CVMX_HELPER_SGMII_H__
/**
* @INTERNAL
* Probe a SGMII interface and determine the number of ports
* connected to it. The SGMII interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_sgmii_probe(int interface);
extern int __cvmx_helper_sgmii_enumerate(int interface);
/**
* @INTERNAL
* Bringup and enable a SGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_sgmii_enable(int interface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_sgmii_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal, int enable_external);
#endif

View File

@ -1,277 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for SPI initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include <asm/octeon/cvmx-spi.h>
#include <asm/octeon/cvmx-helper.h>
#endif
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-pip-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include "cvmx.h"
#include "cvmx-spi.h"
#include "cvmx-sysinfo.h"
#include "cvmx-helper.h"
#endif
#else
#include "cvmx.h"
#include "cvmx-spi.h"
#include "cvmx-sysinfo.h"
#include "cvmx-helper.h"
#endif
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
/* CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI initialization
routines wait for SPI training. You can override the value using
executive-config.h if necessary */
#ifndef CVMX_HELPER_SPI_TIMEOUT
#define CVMX_HELPER_SPI_TIMEOUT 10
#endif
int __cvmx_helper_spi_enumerate(int interface)
{
#if defined(OCTEON_VENDOR_LANNER)
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_LANNER_MR955)
{
cvmx_pko_reg_crc_enable_t enable;
enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
enable.s.enable &= 0xffff << (16 - (interface*16));
cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
if (interface == 1)
return 12;
/* XXX This is not entirely true. */
return 0;
}
#endif
#if defined(OCTEON_VENDOR_RADISYS)
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
if (interface == 0)
return 13;
if (interface == 1)
return 8;
return 0;
}
#endif
if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
cvmx_spi4000_is_present(interface))
return 10;
else
return 16;
}
/**
* @INTERNAL
* Probe a SPI interface and determine the number of ports
* connected to it. The SPI interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_spi_probe(int interface)
{
int num_ports = __cvmx_helper_spi_enumerate(interface);
if (num_ports == 16) {
cvmx_pko_reg_crc_enable_t enable;
/*
* Unlike the SPI4000, most SPI devices don't
* automatically put on the L2 CRC. For everything
* except for the SPI4000 have PKO append the L2 CRC
* to the packet
*/
enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
enable.s.enable |= 0xffff << (interface*16);
cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
}
__cvmx_helper_setup_gmx(interface, num_ports);
return num_ports;
}
/**
* @INTERNAL
* Bringup and enable a SPI interface. After this call packet I/O
* should be fully functional. This is called with IPD enabled but
* PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_spi_enable(int interface)
{
/* Normally the ethernet L2 CRC is checked and stripped in the GMX block.
When you are using SPI, this isn' the case and IPD needs to check
the L2 CRC */
int num_ports = cvmx_helper_ports_on_interface(interface);
int ipd_port;
for (ipd_port=interface*16; ipd_port<interface*16+num_ports; ipd_port++)
{
cvmx_pip_prt_cfgx_t port_config;
port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
port_config.s.crc_en = 1;
#ifdef OCTEON_VENDOR_RADISYS
/*
* Incoming packets on the RSYS4GBE have the FCS stripped.
*/
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE)
port_config.s.crc_en = 0;
#endif
cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
}
if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM)
{
cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX, CVMX_HELPER_SPI_TIMEOUT, num_ports);
if (cvmx_spi4000_is_present(interface))
cvmx_spi4000_initialize(interface);
}
return 0;
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
result.u64 = 0;
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
{
/* The simulator gives you a simulated full duplex link */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 10000;
}
else if (cvmx_spi4000_is_present(interface))
{
cvmx_gmxx_rxx_rx_inbnd_t inband = cvmx_spi4000_check_speed(interface, index);
result.s.link_up = inband.s.status;
result.s.full_duplex = inband.s.duplex;
switch (inband.s.speed)
{
case 0: /* 10 Mbps */
result.s.speed = 10;
break;
case 1: /* 100 Mbps */
result.s.speed = 100;
break;
case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
case 3: /* Illegal */
result.s.speed = 0;
result.s.link_up = 0;
break;
}
}
else
{
/* For generic SPI we can't determine the link, just return some
sane results */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 10000;
}
return result;
}
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
/* Nothing to do. If we have a SPI4000 then the setup was already performed
by cvmx_spi4000_check_speed(). If not then there isn't any link
info */
return 0;
}
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */

View File

@ -1,110 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for SPI initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_SPI_H__
#define __CVMX_HELPER_SPI_H__
/**
* @INTERNAL
* Probe a SPI interface and determine the number of ports
* connected to it. The SPI interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_spi_probe(int interface);
extern int __cvmx_helper_spi_enumerate(int interface);
/**
* @INTERNAL
* Bringup and enable a SPI interface. After this call packet I/O
* should be fully functional. This is called with IPD enabled but
* PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_spi_enable(int interface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
#endif

View File

@ -1,357 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for SRIO initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 41586 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-clock.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-qlm.h>
#include <asm/octeon/cvmx-srio.h>
#include <asm/octeon/cvmx-pip-defs.h>
#include <asm/octeon/cvmx-sriox-defs.h>
#include <asm/octeon/cvmx-sriomaintx-defs.h>
#include <asm/octeon/cvmx-dpi-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include "cvmx.h"
#include "cvmx-helper.h"
#include "cvmx-srio.h"
#endif
#include "cvmx-qlm.h"
#else
#include "cvmx.h"
#include "cvmx-helper.h"
#include "cvmx-qlm.h"
#include "cvmx-srio.h"
#endif
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
/**
* @INTERNAL
* Probe a SRIO interface and determine the number of ports
* connected to it. The SRIO interface should still be down
* after this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_srio_probe(int interface)
{
cvmx_sriox_status_reg_t srio0_status_reg;
cvmx_sriox_status_reg_t srio1_status_reg;
if (!octeon_has_feature(OCTEON_FEATURE_SRIO))
return 0;
/* Read MIO_QLMX_CFG CSRs to find SRIO status. */
if (OCTEON_IS_MODEL(OCTEON_CN66XX))
{
int status = cvmx_qlm_get_status(0);
int srio_port = interface - 4;
switch(srio_port)
{
case 0: /* 1x4 lane */
if (status == 4)
return 2;
break;
case 2: /* 2x2 lane */
if (status == 5)
return 2;
break;
case 1: /* 4x1 long/short */
case 3: /* 4x1 long/short */
if (status == 6)
return 2;
break;
}
return 0;
}
srio0_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0));
srio1_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1));
if (srio0_status_reg.s.srio || srio1_status_reg.s.srio)
return 2;
else
return 0;
}
/**
* @INTERNAL
* Bringup and enable SRIO interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_srio_enable(int interface)
{
int num_ports = cvmx_helper_ports_on_interface(interface);
int index;
cvmx_sriomaintx_core_enables_t sriomaintx_core_enables;
cvmx_sriox_imsg_ctrl_t sriox_imsg_ctrl;
cvmx_sriox_status_reg_t srio_status_reg;
cvmx_dpi_ctl_t dpi_ctl;
int srio_port = interface - 4;
/* All SRIO ports have a cvmx_srio_rx_message_header_t header
on them that must be skipped by IPD */
for (index=0; index<num_ports; index++)
{
cvmx_pip_prt_cfgx_t port_config;
cvmx_sriox_omsg_portx_t sriox_omsg_portx;
cvmx_sriox_omsg_sp_mrx_t sriox_omsg_sp_mrx;
cvmx_sriox_omsg_fmp_mrx_t sriox_omsg_fmp_mrx;
cvmx_sriox_omsg_nmp_mrx_t sriox_omsg_nmp_mrx;
int ipd_port = cvmx_helper_get_ipd_port(interface, index);
port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
/* Only change the skip if the user hasn't already set it */
if (!port_config.s.skip)
{
port_config.s.skip = sizeof(cvmx_srio_rx_message_header_t);
cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
}
/* Enable TX with PKO */
sriox_omsg_portx.u64 = cvmx_read_csr(CVMX_SRIOX_OMSG_PORTX(index, srio_port));
sriox_omsg_portx.s.port = (srio_port) * 2 + index;
sriox_omsg_portx.s.enable = 1;
cvmx_write_csr(CVMX_SRIOX_OMSG_PORTX(index, srio_port), sriox_omsg_portx.u64);
/* Allow OMSG controller to send regardless of the state of any other
controller. Allow messages to different IDs and MBOXes to go in
parallel */
sriox_omsg_sp_mrx.u64 = 0;
sriox_omsg_sp_mrx.s.xmbox_sp = 1;
sriox_omsg_sp_mrx.s.ctlr_sp = 1;
sriox_omsg_sp_mrx.s.ctlr_fmp = 1;
sriox_omsg_sp_mrx.s.ctlr_nmp = 1;
sriox_omsg_sp_mrx.s.id_sp = 1;
sriox_omsg_sp_mrx.s.id_fmp = 1;
sriox_omsg_sp_mrx.s.id_nmp = 1;
sriox_omsg_sp_mrx.s.mbox_sp = 1;
sriox_omsg_sp_mrx.s.mbox_fmp = 1;
sriox_omsg_sp_mrx.s.mbox_nmp = 1;
sriox_omsg_sp_mrx.s.all_psd = 1;
cvmx_write_csr(CVMX_SRIOX_OMSG_SP_MRX(index, srio_port), sriox_omsg_sp_mrx.u64);
/* Allow OMSG controller to send regardless of the state of any other
controller. Allow messages to different IDs and MBOXes to go in
parallel */
sriox_omsg_fmp_mrx.u64 = 0;
sriox_omsg_fmp_mrx.s.ctlr_sp = 1;
sriox_omsg_fmp_mrx.s.ctlr_fmp = 1;
sriox_omsg_fmp_mrx.s.ctlr_nmp = 1;
sriox_omsg_fmp_mrx.s.id_sp = 1;
sriox_omsg_fmp_mrx.s.id_fmp = 1;
sriox_omsg_fmp_mrx.s.id_nmp = 1;
sriox_omsg_fmp_mrx.s.mbox_sp = 1;
sriox_omsg_fmp_mrx.s.mbox_fmp = 1;
sriox_omsg_fmp_mrx.s.mbox_nmp = 1;
sriox_omsg_fmp_mrx.s.all_psd = 1;
cvmx_write_csr(CVMX_SRIOX_OMSG_FMP_MRX(index, srio_port), sriox_omsg_fmp_mrx.u64);
/* Once the first part of a message is accepted, always acept the rest
of the message */
sriox_omsg_nmp_mrx.u64 = 0;
sriox_omsg_nmp_mrx.s.all_sp = 1;
sriox_omsg_nmp_mrx.s.all_fmp = 1;
sriox_omsg_nmp_mrx.s.all_nmp = 1;
cvmx_write_csr(CVMX_SRIOX_OMSG_NMP_MRX(index, srio_port), sriox_omsg_nmp_mrx.u64);
}
/* Choose the receive controller based on the mailbox */
sriox_imsg_ctrl.u64 = cvmx_read_csr(CVMX_SRIOX_IMSG_CTRL(srio_port));
sriox_imsg_ctrl.s.prt_sel = 0;
sriox_imsg_ctrl.s.mbox = 0xa;
cvmx_write_csr(CVMX_SRIOX_IMSG_CTRL(srio_port), sriox_imsg_ctrl.u64);
/* DPI must be enabled for us to RX messages */
dpi_ctl.u64 = cvmx_read_csr(CVMX_DPI_CTL);
dpi_ctl.s.clk = 1;
dpi_ctl.s.en = 1;
cvmx_write_csr(CVMX_DPI_CTL, dpi_ctl.u64);
/* Make sure register access is allowed */
srio_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(srio_port));
if (!srio_status_reg.s.access)
return 0;
/* Enable RX */
if (!cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), &sriomaintx_core_enables.u32))
{
sriomaintx_core_enables.s.imsg0 = 1;
sriomaintx_core_enables.s.imsg1 = 1;
cvmx_srio_config_write32(srio_port, 0, -1, 0, 0,
CVMX_SRIOMAINTX_CORE_ENABLES(srio_port), sriomaintx_core_enables.u32);
}
return 0;
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by SRIO link status.
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_srio_link_get(int ipd_port)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int srio_port = interface - 4;
cvmx_helper_link_info_t result;
cvmx_sriox_status_reg_t srio_status_reg;
cvmx_sriomaintx_port_0_err_stat_t sriomaintx_port_0_err_stat;
cvmx_sriomaintx_port_0_ctl_t sriomaintx_port_0_ctl;
cvmx_sriomaintx_port_0_ctl2_t sriomaintx_port_0_ctl2;
result.u64 = 0;
/* Make sure register access is allowed */
srio_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(srio_port));
if (!srio_status_reg.s.access)
return result;
/* Read the port link status */
if (cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
CVMX_SRIOMAINTX_PORT_0_ERR_STAT(srio_port),
&sriomaintx_port_0_err_stat.u32))
return result;
/* Return if link is down */
if (!sriomaintx_port_0_err_stat.s.pt_ok)
return result;
/* Read the port link width and speed */
if (cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
CVMX_SRIOMAINTX_PORT_0_CTL(srio_port),
&sriomaintx_port_0_ctl.u32))
return result;
if (cvmx_srio_config_read32(srio_port, 0, -1, 0, 0,
CVMX_SRIOMAINTX_PORT_0_CTL2(srio_port),
&sriomaintx_port_0_ctl2.u32))
return result;
/* Link is up */
result.s.full_duplex = 1;
result.s.link_up = 1;
switch (sriomaintx_port_0_ctl2.s.sel_baud)
{
case 1:
result.s.speed = 1250;
break;
case 2:
result.s.speed = 2500;
break;
case 3:
result.s.speed = 3125;
break;
case 4:
result.s.speed = 5000;
break;
case 5:
result.s.speed = 6250;
break;
default:
result.s.speed = 0;
break;
}
switch (sriomaintx_port_0_ctl.s.it_width)
{
case 2: /* Four lanes */
result.s.speed += 40000;
break;
case 3: /* Two lanes */
result.s.speed += 20000;
break;
default: /* One lane */
result.s.speed += 10000;
break;
}
return result;
}
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_srio_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
return 0;
}
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */

View File

@ -1,111 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for SRIO initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 41586 $<hr>
*/
#ifndef __CVMX_HELPER_SRIO_H__
#define __CVMX_HELPER_SRIO_H__
/**
* @INTERNAL
* Probe a SRIO interface and determine the number of ports
* connected to it. The SRIO interface should still be down after
* this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_srio_probe(int interface);
static inline int __cvmx_helper_srio_enumerate(int interface)
{
return __cvmx_helper_srio_probe(interface);
}
/**
* @INTERNAL
* Bringup and enable a SRIO interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_srio_enable(int interface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by SRIO link status.
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_srio_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_srio_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
#endif

View File

@ -1,864 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Small helper utilities.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <linux/module.h>
#include <linux/slab.h> \
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-pip.h>
#include <asm/octeon/cvmx-ipd.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-sli-defs.h>
#include <asm/octeon/cvmx-pexp-defs.h>
#include <asm/octeon/cvmx-helper-cfg.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#endif
#include "cvmx.h"
#include "cvmx-bootmem.h"
#include "cvmx-fpa.h"
#include "cvmx-pip.h"
#include "cvmx-pko.h"
#include "cvmx-ilk.h"
#include "cvmx-ipd.h"
#include "cvmx-gmx.h"
#include "cvmx-spi.h"
#include "cvmx-sysinfo.h"
#include "cvmx-helper.h"
#include "cvmx-helper-util.h"
#include "cvmx-version.h"
#include "cvmx-helper-ilk.h"
#include "cvmx-helper-cfg.h"
#endif
#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
struct cvmx_iface {
int cvif_ipd_nports;
int cvif_has_fcs; /* PKO fcs for this interface. */
enum cvmx_pko_padding cvif_padding;
cvmx_helper_link_info_t *cvif_ipd_port_link_info;
};
/*
* This has to be static as u-boot expects to probe an interface and
* gets the number of its ports.
*/
static CVMX_SHARED struct cvmx_iface cvmx_interfaces[CVMX_HELPER_MAX_IFACE];
#ifndef CVMX_BUILD_FOR_LINUX_KERNEL
/**
* Get the version of the CVMX libraries.
*
* @return Version string. Note this buffer is allocated statically
* and will be shared by all callers.
*/
const char *cvmx_helper_get_version(void)
{
return OCTEON_SDK_VERSION_STRING;
}
#endif
/**
* Convert a interface mode into a human readable string
*
* @param mode Mode to convert
*
* @return String
*/
const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode)
{
switch (mode)
{
case CVMX_HELPER_INTERFACE_MODE_DISABLED: return "DISABLED";
case CVMX_HELPER_INTERFACE_MODE_RGMII: return "RGMII";
case CVMX_HELPER_INTERFACE_MODE_GMII: return "GMII";
case CVMX_HELPER_INTERFACE_MODE_SPI: return "SPI";
case CVMX_HELPER_INTERFACE_MODE_PCIE: return "PCIE";
case CVMX_HELPER_INTERFACE_MODE_XAUI: return "XAUI";
case CVMX_HELPER_INTERFACE_MODE_RXAUI: return "RXAUI";
case CVMX_HELPER_INTERFACE_MODE_SGMII: return "SGMII";
case CVMX_HELPER_INTERFACE_MODE_PICMG: return "PICMG";
case CVMX_HELPER_INTERFACE_MODE_NPI: return "NPI";
case CVMX_HELPER_INTERFACE_MODE_LOOP: return "LOOP";
case CVMX_HELPER_INTERFACE_MODE_SRIO: return "SRIO";
case CVMX_HELPER_INTERFACE_MODE_ILK: return "ILK";
}
return "UNKNOWN";
}
/**
* Debug routine to dump the packet structure to the console
*
* @param work Work queue entry containing the packet to dump
* @return
*/
int cvmx_helper_dump_packet(cvmx_wqe_t *work)
{
uint64_t count;
uint64_t remaining_bytes;
cvmx_buf_ptr_t buffer_ptr;
uint64_t start_of_buffer;
uint8_t * data_address;
uint8_t * end_of_data;
cvmx_dprintf("Packet Length: %u\n", cvmx_wqe_get_len(work));
cvmx_dprintf(" Input Port: %u\n", cvmx_wqe_get_port(work));
cvmx_dprintf(" QoS: %u\n", cvmx_wqe_get_qos(work));
cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
if (work->word2.s.bufs == 0)
{
cvmx_ipd_wqe_fpa_queue_t wqe_pool;
wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
buffer_ptr.u64 = 0;
buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
buffer_ptr.s.size = 128;
buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
if (cvmx_likely(!work->word2.s.not_IP))
{
cvmx_pip_ip_offset_t pip_ip_offset;
pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
buffer_ptr.s.addr += (pip_ip_offset.s.offset<<3) - work->word2.s.ip_offset;
buffer_ptr.s.addr += (work->word2.s.is_v6^1)<<2;
}
else
{
/* WARNING: This code assume that the packet is not RAW. If it was,
we would use PIP_GBL_CFG[RAW_SHF] instead of
PIP_GBL_CFG[NIP_SHF] */
cvmx_pip_gbl_cfg_t pip_gbl_cfg;
pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
}
}
else
buffer_ptr = work->packet_ptr;
remaining_bytes = cvmx_wqe_get_len(work);
while (remaining_bytes)
{
start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
cvmx_dprintf(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
cvmx_dprintf(" Buffer Data: %llx\n", (unsigned long long)buffer_ptr.s.addr);
cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
cvmx_dprintf("\t\t");
data_address = (uint8_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr);
end_of_data = data_address + buffer_ptr.s.size;
count = 0;
while (data_address < end_of_data)
{
if (remaining_bytes == 0)
break;
else
remaining_bytes--;
cvmx_dprintf("%02x", (unsigned int)*data_address);
data_address++;
if (remaining_bytes && (count == 7))
{
cvmx_dprintf("\n\t\t");
count = 0;
}
else
count++;
}
cvmx_dprintf("\n");
if (remaining_bytes)
buffer_ptr = *(cvmx_buf_ptr_t*)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
}
return 0;
}
/**
* Setup Random Early Drop on a specific input queue
*
* @param queue Input queue to setup RED on (0-7)
* @param pass_thresh
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @param drop_thresh
* All incomming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* @return Zero on success. Negative on failure
*/
int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
{
cvmx_ipd_qosx_red_marks_t red_marks;
cvmx_ipd_red_quex_param_t red_param;
/* Set RED to begin dropping packets when there are pass_thresh buffers
left. It will linearly drop more packets until reaching drop_thresh
buffers */
red_marks.u64 = 0;
red_marks.s.drop = drop_thresh;
red_marks.s.pass = pass_thresh;
cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
/* Use the actual queue 0 counter, not the average */
red_param.u64 = 0;
red_param.s.prb_con = (255ul<<24) / (red_marks.s.pass - red_marks.s.drop);
red_param.s.avg_con = 1;
red_param.s.new_con = 255;
red_param.s.use_pcnt = 1;
cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
return 0;
}
/**
* Setup Random Early Drop to automatically begin dropping packets.
*
* @param pass_thresh
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @param drop_thresh
* All incomming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* @return Zero on success. Negative on failure
*/
int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
{
int queue;
int interface;
int port;
/*
* Disable backpressure based on queued buffers. It needs SW support
*/
if (octeon_has_feature(OCTEON_FEATURE_PKND))
{
int bpid;
for (interface = 0; interface < CVMX_HELPER_MAX_GMX; interface++)
{
int num_ports;
num_ports = cvmx_helper_ports_on_interface(interface);
for (port = 0; port < num_ports; port++) {
bpid = cvmx_helper_get_bpid(interface, port);
if (bpid == CVMX_INVALID_BPID)
cvmx_dprintf(
"setup_red: cvmx_helper_get_bpid(%d, %d) = %d\n",
interface, port, cvmx_helper_get_bpid(interface, port));
else
cvmx_write_csr(CVMX_IPD_BPIDX_MBUF_TH(bpid), 0);
}
}
}
else
{
cvmx_ipd_portx_bp_page_cnt_t page_cnt;
page_cnt.u64 = 0;
page_cnt.s.bp_enb = 0;
page_cnt.s.page_cnt = 100;
for (interface = 0; interface < CVMX_HELPER_MAX_GMX; interface++)
{
for (port = cvmx_helper_get_first_ipd_port(interface);
port < cvmx_helper_get_last_ipd_port(interface); port++)
cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port), page_cnt.u64);
}
}
for (queue = 0; queue < 8; queue++)
cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
/*
* Shutoff the dropping based on the per port page count. SW isn't
* decrementing it right now
*/
if (octeon_has_feature(OCTEON_FEATURE_PKND))
cvmx_write_csr(CVMX_IPD_ON_BP_DROP_PKTX(0), 0);
else
cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, 0);
#define IPD_RED_AVG_DLY 1000
#define IPD_RED_PRB_DLY 1000
/*
* Setting up avg_dly and prb_dly, enable bits
*/
if (octeon_has_feature(OCTEON_FEATURE_PKND))
{
cvmx_ipd_red_delay_t red_delay;
cvmx_ipd_red_bpid_enablex_t red_bpid_enable;
red_delay.u64 = 0;
red_delay.s.avg_dly = IPD_RED_AVG_DLY;
red_delay.s.prb_dly = IPD_RED_PRB_DLY;
cvmx_write_csr(CVMX_IPD_RED_DELAY, red_delay.u64);
/*
* Only enable the gmx ports
*/
red_bpid_enable.u64 = 0;
for (interface = 0; interface < CVMX_HELPER_MAX_GMX; interface++)
{
int num_ports = cvmx_helper_ports_on_interface(interface);
for (port = 0; port < num_ports; port++)
red_bpid_enable.u64 |= (((uint64_t) 1) <<
cvmx_helper_get_bpid(interface, port));
}
cvmx_write_csr(CVMX_IPD_RED_BPID_ENABLEX(0), red_bpid_enable.u64);
}
else
{
cvmx_ipd_red_port_enable_t red_port_enable;
red_port_enable.u64 = 0;
red_port_enable.s.prt_enb = 0xfffffffffull;
red_port_enable.s.avg_dly = IPD_RED_AVG_DLY;
red_port_enable.s.prb_dly = IPD_RED_PRB_DLY;
cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
/*
* Shutoff the dropping of packets based on RED for SRIO ports
*/
if (octeon_has_feature(OCTEON_FEATURE_SRIO))
{
cvmx_ipd_red_port_enable2_t red_port_enable2;
red_port_enable2.u64 = 0;
red_port_enable2.s.prt_enb = 0xf0;
cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE2, red_port_enable2.u64);
}
}
return 0;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_helper_setup_red);
#endif
/**
* @INTERNAL
* Setup the common GMX settings that determine the number of
* ports. These setting apply to almost all configurations of all
* chips.
*
* @param interface Interface to configure
* @param num_ports Number of ports on the interface
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_setup_gmx(int interface, int num_ports)
{
cvmx_gmxx_tx_prts_t gmx_tx_prts;
cvmx_gmxx_rx_prts_t gmx_rx_prts;
cvmx_pko_reg_gmx_port_mode_t pko_mode;
cvmx_gmxx_txx_thresh_t gmx_tx_thresh;
int index;
/*
* Tell GMX the number of TX ports on this interface
*/
gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
gmx_tx_prts.s.prts = num_ports;
cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
/*
* Tell GMX the number of RX ports on this interface. This only applies
* to GMII and XAUI ports
*/
if (cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_RGMII
|| cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_SGMII
|| cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_GMII
|| cvmx_helper_interface_get_mode(interface) == CVMX_HELPER_INTERFACE_MODE_XAUI)
{
if (num_ports > 4)
{
cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal num_ports\n");
return(-1);
}
gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
gmx_rx_prts.s.prts = num_ports;
cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
}
/*
* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, 50XX,
* and 68XX.
*/
if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX) &&
!OCTEON_IS_MODEL(OCTEON_CN50XX) && !OCTEON_IS_MODEL(OCTEON_CN68XX))
{
/* Tell PKO the number of ports on this interface */
pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
if (interface == 0)
{
if (num_ports == 1)
pko_mode.s.mode0 = 4;
else if (num_ports == 2)
pko_mode.s.mode0 = 3;
else if (num_ports <= 4)
pko_mode.s.mode0 = 2;
else if (num_ports <= 8)
pko_mode.s.mode0 = 1;
else
pko_mode.s.mode0 = 0;
}
else
{
if (num_ports == 1)
pko_mode.s.mode1 = 4;
else if (num_ports == 2)
pko_mode.s.mode1 = 3;
else if (num_ports <= 4)
pko_mode.s.mode1 = 2;
else if (num_ports <= 8)
pko_mode.s.mode1 = 1;
else
pko_mode.s.mode1 = 0;
}
cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
}
/*
* Set GMX to buffer as much data as possible before starting
* transmit. This reduces the chances that we have a TX under run
* due to memory contention. Any packet that fits entirely in the
* GMX FIFO can never have an under run regardless of memory load.
*/
gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX) ||
OCTEON_IS_MODEL(OCTEON_CN50XX))
/* These chips have a fixed max threshold of 0x40 */
gmx_tx_thresh.s.cnt = 0x40;
else
{
/* ccn - common cnt numberator */
int ccn = 0x100;
/* Choose the max value for the number of ports */
if (num_ports <= 1)
gmx_tx_thresh.s.cnt = ccn / 1;
else if (num_ports == 2)
gmx_tx_thresh.s.cnt = ccn / 2;
else
gmx_tx_thresh.s.cnt = ccn / 4;
}
/*
* SPI and XAUI can have lots of ports but the GMX hardware only ever has
* a max of 4
*/
if (num_ports > 4)
num_ports = 4;
for (index = 0; index < num_ports; index++)
cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
gmx_tx_thresh.u64);
/*
* For o68, we need to setup the pipes
*/
if (OCTEON_IS_MODEL(OCTEON_CN68XX) && interface < CVMX_HELPER_MAX_GMX)
{
cvmx_gmxx_txx_pipe_t config;
for (index = 0; index < num_ports; index++)
{
config.u64 = 0;
if (__cvmx_helper_cfg_pko_port_base(interface, index) >= 0)
{
config.u64 = cvmx_read_csr(
CVMX_GMXX_TXX_PIPE(index, interface));
config.s.nump = __cvmx_helper_cfg_pko_port_num(interface, index);
config.s.base = __cvmx_helper_cfg_pko_port_base(interface, index);
cvmx_write_csr(CVMX_GMXX_TXX_PIPE(index, interface),
config.u64);
}
}
}
return 0;
}
int cvmx_helper_get_pko_port(int interface, int port)
{
return cvmx_pko_get_base_pko_port(interface, port);
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_helper_get_pko_port);
#endif
int cvmx_helper_get_ipd_port(int interface, int port)
{
if (octeon_has_feature(OCTEON_FEATURE_PKND))
{
if (interface >= 0 && interface <= 4)
{
cvmx_helper_interface_mode_t mode = cvmx_helper_interface_get_mode(interface);
if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI || mode == CVMX_HELPER_INTERFACE_MODE_RXAUI)
return 0x840 + (interface * 0x100);
else
return 0x800 + (interface * 0x100) + (port * 16);
}
else if (interface == 5 || interface == 6)
return 0x400 + (interface - 5) * 0x100 + port;
else if (interface == 7)
return 0x100 + port;
else if (interface == 8)
return port;
else
return -1;
}
switch (interface)
{
case 0: return port;
case 1: return port + 16;
case 2: return port + 32;
case 3: return port + 36;
case 4: return port + 40;
case 5: return port + 42;
case 6: return port + 44;
}
return -1;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_helper_get_ipd_port);
#endif
int __cvmx_helper_get_num_ipd_ports(int interface)
{
struct cvmx_iface *piface;
if (interface >= cvmx_helper_get_number_of_interfaces())
return -1;
piface = &cvmx_interfaces[interface];
return piface->cvif_ipd_nports;
}
enum cvmx_pko_padding __cvmx_helper_get_pko_padding(int interface)
{
struct cvmx_iface *piface;
if (interface >= cvmx_helper_get_number_of_interfaces())
return CVMX_PKO_PADDING_NONE;
piface = &cvmx_interfaces[interface];
return piface->cvif_padding;
}
int __cvmx_helper_init_interface(int interface, int num_ipd_ports, int has_fcs, enum cvmx_pko_padding pad)
{
struct cvmx_iface *piface;
int sz;
if (interface >= cvmx_helper_get_number_of_interfaces())
return -1;
piface = &cvmx_interfaces[interface];
piface->cvif_ipd_nports = num_ipd_ports;
piface->cvif_padding = pad;
piface->cvif_has_fcs = has_fcs;
/*
* allocate the per-ipd_port link_info structure
*/
sz = piface->cvif_ipd_nports * sizeof(cvmx_helper_link_info_t);
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
if (sz == 0)
sz = sizeof(cvmx_helper_link_info_t);
piface->cvif_ipd_port_link_info = (cvmx_helper_link_info_t *)kmalloc(sz, GFP_KERNEL);
if (ZERO_OR_NULL_PTR(piface->cvif_ipd_port_link_info))
panic("Cannot allocate memory in __cvmx_helper_init_interface.");
#else
piface->cvif_ipd_port_link_info = (cvmx_helper_link_info_t *)cvmx_bootmem_alloc(sz, sizeof(cvmx_helper_link_info_t));
#endif
if (!piface->cvif_ipd_port_link_info)
return -1;
/* Initialize 'em */ {
int i;
cvmx_helper_link_info_t *p;
p = piface->cvif_ipd_port_link_info;
for (i = 0; i < piface->cvif_ipd_nports; i++)
{
(*p).u64 = 0;
p++;
}
}
return 0;
}
/*
* Shut down the interfaces; free the resources.
* @INTERNAL
*/
void __cvmx_helper_shutdown_interfaces(void)
{
int i;
int nifaces; /* number of interfaces */
struct cvmx_iface *piface;
nifaces = cvmx_helper_get_number_of_interfaces();
for (i = 0; i < nifaces; i++)
{
piface = cvmx_interfaces + i;
if (piface->cvif_ipd_port_link_info)
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
kfree(piface->cvif_ipd_port_link_info);
#else
/*
* For SE apps, bootmem was meant to be allocated and never
* freed.
*/
#endif
piface->cvif_ipd_port_link_info = 0;
}
}
int __cvmx_helper_set_link_info(int interface, int port,
cvmx_helper_link_info_t link_info)
{
struct cvmx_iface *piface;
if (interface >= cvmx_helper_get_number_of_interfaces())
return -1;
piface = &cvmx_interfaces[interface];
if (piface->cvif_ipd_port_link_info)
{
piface->cvif_ipd_port_link_info[port] = link_info;
return 0;
}
return -1;
}
cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface, int port)
{
struct cvmx_iface *piface;
cvmx_helper_link_info_t err;
err.u64 = 0;
if (interface >= cvmx_helper_get_number_of_interfaces())
return err;
piface = &cvmx_interfaces[interface];
if (piface->cvif_ipd_port_link_info)
return piface->cvif_ipd_port_link_info[port];
return err;
}
int __cvmx_helper_get_has_fcs(int interface)
{
return cvmx_interfaces[interface].cvif_has_fcs;
}
int cvmx_helper_get_pknd(int interface, int port)
{
if (octeon_has_feature(OCTEON_FEATURE_PKND))
return __cvmx_helper_cfg_pknd(interface, port);
return CVMX_INVALID_PKND;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_helper_get_pknd);
#endif
int cvmx_helper_get_bpid(int interface, int port)
{
if (octeon_has_feature(OCTEON_FEATURE_PKND))
return __cvmx_helper_cfg_bpid(interface, port);
return CVMX_INVALID_BPID;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_helper_get_bpid);
#endif
/**
* Display interface statistics.
*
* @param port IPD/PKO port number
*
* @return none
*/
void cvmx_helper_show_stats(int port)
{
cvmx_pip_port_status_t status;
cvmx_pko_port_status_t pko_status;
/* ILK stats */
if (octeon_has_feature(OCTEON_FEATURE_ILK))
__cvmx_helper_ilk_show_stats();
/* PIP stats */
cvmx_pip_get_port_status (port, 0, &status);
cvmx_dprintf ("port %d: the number of packets - ipd: %d\n", port, (int)status.packets);
/* PKO stats */
cvmx_pko_get_port_status (port, 0, &pko_status);
cvmx_dprintf ("port %d: the number of packets - pko: %d\n", port, (int)pko_status.packets);
/* TODO: other stats */
}
#endif /* CVMX_ENABLE_HELPER_FUNCTIONS */
/**
* Returns the interface number for an IPD/PKO port number.
*
* @param ipd_port IPD/PKO port number
*
* @return Interface number
*/
int cvmx_helper_get_interface_num(int ipd_port)
{
if (octeon_has_feature(OCTEON_FEATURE_PKND))
{
if (ipd_port >= 0x800 && ipd_port < 0x900)
return 0;
else if (ipd_port >= 0x900 && ipd_port < 0xa00)
return 1;
else if (ipd_port >= 0xa00 && ipd_port < 0xb00)
return 2;
else if (ipd_port >= 0xb00 && ipd_port < 0xc00)
return 3;
else if (ipd_port >= 0xc00 && ipd_port < 0xd00)
return 4;
else if (ipd_port >= 0x400 && ipd_port < 0x500)
return 5;
else if (ipd_port >= 0x500 && ipd_port < 0x600)
return 6;
else if (ipd_port >= 0x100 && ipd_port < 0x120)
return 7;
else if (ipd_port < 8)
return 8;
} else {
if (ipd_port < 16)
return 0;
else if (ipd_port < 32)
return 1;
else if (ipd_port < 36)
return 2;
else if (ipd_port < 40)
return 3;
else if (ipd_port < 42)
return 4;
else if (ipd_port < 44)
return 5;
else if (ipd_port < 46)
return 6;
}
cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD port number %d\n", ipd_port);
return -1;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_helper_get_interface_num);
#endif
/**
* Returns the interface index number for an IPD/PKO port
* number.
*
* @param ipd_port IPD/PKO port number
*
* @return Interface index number
*/
int cvmx_helper_get_interface_index_num(int ipd_port)
{
if (octeon_has_feature(OCTEON_FEATURE_PKND))
{
if (ipd_port >= 0x800 && ipd_port < 0xd00)
{
int port = ((ipd_port & 0xff) >> 6);
return ((port) ? (port - 1) : ((ipd_port & 0xff) >> 4));
}
else if (ipd_port >= 0x400 && ipd_port < 0x600)
return (ipd_port & 0xff);
else if (ipd_port >= 0x100 && ipd_port < 0x120)
return (ipd_port & 0xff);
else if (ipd_port < 8)
return ipd_port;
else
cvmx_dprintf("cvmx_helper_get_interface_index_num: Illegal IPD port number %d\n", ipd_port);
return -1;
}
if (ipd_port < 32)
return ipd_port & 15;
else if (ipd_port < 40)
return ipd_port & 3;
else if (ipd_port < 44)
return ipd_port & 1;
else if (ipd_port < 46)
return ipd_port & 1;
else
cvmx_dprintf("cvmx_helper_get_interface_index_num: Illegal IPD port number\n");
return -1;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_helper_get_interface_index_num);
#endif

View File

@ -1,354 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Small helper utilities.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_UTIL_H__
#define __CVMX_HELPER_UTIL_H__
#include "cvmx.h"
#include "cvmx-mio-defs.h"
#ifdef CVMX_ENABLE_HELPER_FUNCTIONS
typedef char cvmx_pknd_t;
typedef char cvmx_bpid_t;
#define CVMX_INVALID_PKND ((cvmx_pknd_t) -1)
#define CVMX_INVALID_BPID ((cvmx_bpid_t) -1)
#define CVMX_MAX_PKND ((cvmx_pknd_t) 64)
#define CVMX_MAX_BPID ((cvmx_bpid_t) 64)
#define CVMX_HELPER_MAX_IFACE 9
/**
* Convert a interface mode into a human readable string
*
* @param mode Mode to convert
*
* @return String
*/
extern const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
/**
* Debug routine to dump the packet structure to the console
*
* @param work Work queue entry containing the packet to dump
* @return
*/
extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
/**
* Setup Random Early Drop on a specific input queue
*
* @param queue Input queue to setup RED on (0-7)
* @param pass_thresh
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @param drop_thresh
* All incomming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* @return Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh);
/**
* Setup Random Early Drop to automatically begin dropping packets.
*
* @param pass_thresh
* Packets will begin slowly dropping when there are less than
* this many packet buffers free in FPA 0.
* @param drop_thresh
* All incomming packets will be dropped when there are less
* than this many free packet buffers in FPA 0.
* @return Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
/**
* Get the version of the CVMX libraries.
*
* @return Version string. Note this buffer is allocated statically
* and will be shared by all callers.
*/
extern const char *cvmx_helper_get_version(void);
/**
* @INTERNAL
* Setup the common GMX settings that determine the number of
* ports. These setting apply to almost all configurations of all
* chips.
*
* @param interface Interface to configure
* @param num_ports Number of ports on the interface
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_setup_gmx(int interface, int num_ports);
/**
* @INTERNAL
* Get the number of ipd_ports on an interface.
*
* @param interface
*
* @return the number of ipd_ports on the interface and -1 for error.
*/
extern int __cvmx_helper_get_num_ipd_ports(int interface);
/**
* @INTERNAL
* Get the number of pko_ports on an interface.
*
* @param interface
*
* @return the number of pko_ports on the interface.
*/
extern int __cvmx_helper_get_num_pko_ports(int interface);
/*
* @INTERNAL
*
* @param interface
* @param port
* @param link_info
*
* @return 0 for success and -1 for failure
*/
extern int __cvmx_helper_set_link_info(int interface, int port,
cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
*
* @param interface
* @param port
*
* @return valid link_info on success or -1 on failure
*/
extern cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface,
int port);
enum cvmx_pko_padding {
CVMX_PKO_PADDING_NONE = 0,
CVMX_PKO_PADDING_60 = 1,
};
/**
* @INTERNAL
*
* @param interface
* @param num_ipd_ports is the number of ipd_ports on the interface
* @param has_fcs indicates if PKO does FCS for the ports on this
* @param pad The padding that PKO should apply.
* interface.
*
* @return 0 for success and -1 for failure
*/
extern int __cvmx_helper_init_interface(int interface, int num_ipd_ports, int has_fcs, enum cvmx_pko_padding pad);
/**
* @INTERNAL
*
* @param interface
*
* @return 0 if PKO does not do FCS and 1 otherwise.
*/
extern int __cvmx_helper_get_has_fcs(int interface);
extern enum cvmx_pko_padding __cvmx_helper_get_pko_padding(int interface);
/**
* Returns the IPD port number for a port on the given
* interface.
*
* @param interface Interface to use
* @param port Port on the interface
*
* @return IPD port number
*/
extern int cvmx_helper_get_ipd_port(int interface, int port);
/**
* Returns the PKO port number for a port on the given interface,
* This is the base pko_port for o68 and ipd_port for older models.
*
* @param interface Interface to use
* @param port Port on the interface
*
* @return PKO port number and -1 on error.
*/
extern int cvmx_helper_get_pko_port(int interface, int port);
/**
* Returns the IPD/PKO port number for the first port on the given
* interface.
*
* @param interface Interface to use
*
* @return IPD/PKO port number
*/
static inline int cvmx_helper_get_first_ipd_port(int interface)
{
return (cvmx_helper_get_ipd_port (interface, 0));
}
/**
* Returns the IPD/PKO port number for the last port on the given
* interface.
*
* @param interface Interface to use
*
* @return IPD/PKO port number
*/
static inline int cvmx_helper_get_last_ipd_port (int interface)
{
return (cvmx_helper_get_first_ipd_port (interface) +
cvmx_helper_ports_on_interface (interface) - 1);
}
/**
* Free the packet buffers contained in a work queue entry.
* The work queue entry is not freed.
*
* @param work Work queue entry with packet to free
*/
static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
{
uint64_t number_buffers;
cvmx_buf_ptr_t buffer_ptr;
cvmx_buf_ptr_t next_buffer_ptr;
uint64_t start_of_buffer;
number_buffers = work->word2.s.bufs;
if (number_buffers == 0)
return;
buffer_ptr = work->packet_ptr;
/* Since the number of buffers is not zero, we know this is not a dynamic
short packet. We need to check if it is a packet received with
IPD_CTL_STATUS[NO_WPTR]. If this is true, we need to free all buffers
except for the first one. The caller doesn't expect their WQE pointer
to be freed */
start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
if (cvmx_ptr_to_phys(work) == start_of_buffer)
{
next_buffer_ptr = *(cvmx_buf_ptr_t*)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
buffer_ptr = next_buffer_ptr;
number_buffers--;
}
while (number_buffers--)
{
/* Remember the back pointer is in cache lines, not 64bit words */
start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
/* Read pointer to next buffer before we free the current buffer. */
next_buffer_ptr = *(cvmx_buf_ptr_t*)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer), buffer_ptr.s.pool, 0);
buffer_ptr = next_buffer_ptr;
}
}
#endif /* CVMX_ENABLE_HELPER_FUNCTIONS */
/**
* Returns the interface number for an IPD/PKO port number.
*
* @param ipd_port IPD/PKO port number
*
* @return Interface number
*/
extern int cvmx_helper_get_interface_num(int ipd_port);
/**
* Returns the interface index number for an IPD/PKO port
* number.
*
* @param ipd_port IPD/PKO port number
*
* @return Interface index number
*/
extern int cvmx_helper_get_interface_index_num(int ipd_port);
/**
* Get port kind for a given port in an interface.
*
* @param interface Interface
* @param port index of the port in the interface
*
* @return port kind on sucicess and -1 on failure
*/
extern int cvmx_helper_get_pknd(int interface, int port);
/**
* Get bpid for a given port in an interface.
*
* @param interface Interface
* @param port index of the port in the interface
*
* @return port kind on sucicess and -1 on failure
*/
extern int cvmx_helper_get_bpid(int interface, int port);
/**
* Internal functions.
*/
extern int __cvmx_helper_post_init_interfaces(void);
extern void __cvmx_helper_shutdown_interfaces(void);
extern void cvmx_helper_show_stats(int port);
#endif /* __CVMX_HELPER_H__ */

View File

@ -1,476 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for XAUI initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include <asm/octeon/cvmx-qlm.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-cfg.h>
#endif
#include <asm/octeon/cvmx-gmxx-defs.h>
#include <asm/octeon/cvmx-pko-defs.h>
#include <asm/octeon/cvmx-pcsx-defs.h>
#include <asm/octeon/cvmx-pcsxx-defs.h>
#include <asm/octeon/cvmx-ciu-defs.h>
#else
#if !defined(__FreeBSD__) || !defined(_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include "cvmx.h"
#include "cvmx-helper.h"
#include "cvmx-helper-cfg.h"
#include "cvmx-qlm.h"
#endif
#else
#include "cvmx.h"
#include "cvmx-helper.h"
#include "cvmx-qlm.h"
#endif
#endif
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
int __cvmx_helper_xaui_enumerate(int interface)
{
union cvmx_gmxx_hg2_control gmx_hg2_control;
/* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
if (gmx_hg2_control.s.hg2tx_en)
return 16;
else
return 1;
}
/**
* @INTERNAL
* Probe a XAUI interface and determine the number of ports
* connected to it. The XAUI interface should still be down
* after this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_xaui_probe(int interface)
{
int i;
cvmx_gmxx_inf_mode_t mode;
/* CN63XX Pass 1.0 errata G-14395 requires the QLM De-emphasis be programmed */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_0))
{
cvmx_ciu_qlm2_t ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM2);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 0x5;
ciu_qlm.s.txmargin = 0x1a;
cvmx_write_csr(CVMX_CIU_QLM2, ciu_qlm.u64);
}
/* CN63XX Pass 2.0 and 2.1 errata G-15273 requires the QLM De-emphasis be
programmed when using a 156.25Mhz ref clock */
if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0) ||
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1))
{
/* Read the QLM speed pins */
cvmx_mio_rst_boot_t mio_rst_boot;
mio_rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
if (mio_rst_boot.cn63xx.qlm2_spd == 0xb)
{
cvmx_ciu_qlm2_t ciu_qlm;
ciu_qlm.u64 = cvmx_read_csr(CVMX_CIU_QLM2);
ciu_qlm.s.txbypass = 1;
ciu_qlm.s.txdeemph = 0xa;
ciu_qlm.s.txmargin = 0x1f;
cvmx_write_csr(CVMX_CIU_QLM2, ciu_qlm.u64);
}
}
/* Check if QLM is configured correct for XAUI/RXAUI, verify the
speed as well as mode */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
{
int qlm, status;
qlm = cvmx_qlm_interface(interface);
status = cvmx_qlm_get_status(qlm);
if (status != 2 && status != 10)
return 0;
}
/* Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the interface
needs to be enabled before IPD otherwise per port backpressure
may not work properly */
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
mode.s.en = 1;
cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
__cvmx_helper_setup_gmx(interface, 1);
if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
{
/* Setup PKO to support 16 ports for HiGig2 virtual ports. We're pointing
all of the PKO packet ports for this interface to the XAUI. This allows
us to use HiGig2 backpressure per port */
for (i=0; i<16; i++)
{
cvmx_pko_mem_port_ptrs_t pko_mem_port_ptrs;
pko_mem_port_ptrs.u64 = 0;
/* We set each PKO port to have equal priority in a round robin
fashion */
pko_mem_port_ptrs.s.static_p = 0;
pko_mem_port_ptrs.s.qos_mask = 0xff;
/* All PKO ports map to the same XAUI hardware port */
pko_mem_port_ptrs.s.eid = interface*4;
pko_mem_port_ptrs.s.pid = interface*16 + i;
cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
}
}
return __cvmx_helper_xaui_enumerate(interface);
}
/**
* @INTERNAL
* Bringup XAUI interface. After this call packet I/O should be
* fully functional.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
static int __cvmx_helper_xaui_link_init(int interface)
{
cvmx_gmxx_prtx_cfg_t gmx_cfg;
cvmx_pcsxx_control1_reg_t xauiCtl;
cvmx_pcsxx_misc_ctl_reg_t xauiMiscCtl;
cvmx_gmxx_tx_xaui_ctl_t gmxXauiTxCtl;
/* (1) Interface has already been enabled. */
/* (2) Disable GMX. */
xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
xauiMiscCtl.s.gmxeno = 1;
cvmx_write_csr (CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
/* (3) Disable GMX and PCSX interrupts. */
cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0,interface), 0x0);
cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
/* (4) Bring up the PCSX and GMX reconciliation layer. */
/* (4)a Set polarity and lane swapping. */
/* (4)b */
gmxXauiTxCtl.u64 = cvmx_read_csr (CVMX_GMXX_TX_XAUI_CTL(interface));
gmxXauiTxCtl.s.dic_en = 1; /* Enable better IFG packing and improves performance */
gmxXauiTxCtl.s.uni_en = 0;
cvmx_write_csr (CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64);
/* (4)c Aply reset sequence */
xauiCtl.u64 = cvmx_read_csr (CVMX_PCSXX_CONTROL1_REG(interface));
xauiCtl.s.lo_pwr = 0;
/* Errata G-15618 requires disabling PCS soft reset in some OCTEON II models. */
if (!OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)
&& !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)
&& !OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_1)
&& !OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_X)
&& !OCTEON_IS_MODEL(OCTEON_CN68XX))
xauiCtl.s.reset = 1;
cvmx_write_csr (CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
/* Wait for PCS to come out of reset */
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_CONTROL1_REG(interface), cvmx_pcsxx_control1_reg_t, reset, ==, 0, 10000))
return -1;
/* Wait for PCS to be aligned */
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_10GBX_STATUS_REG(interface), cvmx_pcsxx_10gbx_status_reg_t, alignd, ==, 1, 10000))
return -1;
/* Wait for RX to be ready */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_RX_XAUI_CTL(interface), cvmx_gmxx_rx_xaui_ctl_t, status, ==, 0, 10000))
return -1;
/* (6) Configure GMX */
/* Wait for GMX RX to be idle */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface), cvmx_gmxx_prtx_cfg_t, rx_idle, ==, 1, 10000))
return -1;
/* Wait for GMX TX to be idle */
if (CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(0, interface), cvmx_gmxx_prtx_cfg_t, tx_idle, ==, 1, 10000))
return -1;
/* GMX configure */
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
gmx_cfg.s.speed = 1;
gmx_cfg.s.speed_msb = 0;
gmx_cfg.s.slottime = 1;
cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1);
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
/* Wait for receive link */
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS1_REG(interface), cvmx_pcsxx_status1_reg_t, rcv_lnk, ==, 1, 10000))
return -1;
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface), cvmx_pcsxx_status2_reg_t, xmtflt, ==, 0, 10000))
return -1;
if (CVMX_WAIT_FOR_FIELD64(CVMX_PCSXX_STATUS2_REG(interface), cvmx_pcsxx_status2_reg_t, rcvflt, ==, 0, 10000))
return -1;
/* (8) Enable packet reception */
xauiMiscCtl.s.gmxeno = 0;
cvmx_write_csr (CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
/* Clear all error interrupts before enabling the interface. */
cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0,interface), ~0x0ull);
cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface), ~0x0ull);
cvmx_write_csr(CVMX_PCSXX_INT_REG(interface), ~0x0ull);
/* Enable GMX */
gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
gmx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
return 0;
}
/**
* @INTERNAL
* Bringup and enable a XAUI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_xaui_enable(int interface)
{
/* Setup PKND and BPID */
if (octeon_has_feature(OCTEON_FEATURE_PKND))
{
cvmx_gmxx_bpid_msk_t bpid_msk;
cvmx_gmxx_bpid_mapx_t bpid_map;
cvmx_gmxx_prtx_cfg_t gmxx_prtx_cfg;
cvmx_gmxx_txx_append_t gmxx_txx_append_cfg;
/* Setup PKIND */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
gmxx_prtx_cfg.s.pknd = cvmx_helper_get_pknd(interface, 0);
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmxx_prtx_cfg.u64);
/* Setup BPID */
bpid_map.u64 = cvmx_read_csr(CVMX_GMXX_BPID_MAPX(0, interface));
bpid_map.s.val = 1;
bpid_map.s.bpid = cvmx_helper_get_bpid(interface, 0);
cvmx_write_csr(CVMX_GMXX_BPID_MAPX(0, interface), bpid_map.u64);
bpid_msk.u64 = cvmx_read_csr(CVMX_GMXX_BPID_MSK(interface));
bpid_msk.s.msk_or |= 1;
bpid_msk.s.msk_and &= ~1;
cvmx_write_csr(CVMX_GMXX_BPID_MSK(interface), bpid_msk.u64);
/* CN68XX adds the padding and FCS in PKO, not GMX */
gmxx_txx_append_cfg.u64 = cvmx_read_csr(CVMX_GMXX_TXX_APPEND(0, interface));
gmxx_txx_append_cfg.s.fcs = 0;
gmxx_txx_append_cfg.s.pad = 0;
cvmx_write_csr(CVMX_GMXX_TXX_APPEND(0, interface), gmxx_txx_append_cfg.u64);
}
__cvmx_helper_xaui_link_init(interface);
return 0;
}
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
cvmx_gmxx_tx_xaui_ctl_t gmxx_tx_xaui_ctl;
cvmx_gmxx_rx_xaui_ctl_t gmxx_rx_xaui_ctl;
cvmx_pcsxx_status1_reg_t pcsxx_status1_reg;
cvmx_helper_link_info_t result;
gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
pcsxx_status1_reg.u64 = cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface));
result.u64 = 0;
/* Only return a link if both RX and TX are happy */
if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) &&
(pcsxx_status1_reg.s.rcv_lnk == 1))
{
cvmx_pcsxx_misc_ctl_reg_t misc_ctl;
result.s.link_up = 1;
result.s.full_duplex = 1;
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
{
cvmx_mio_qlmx_cfg_t qlm_cfg;
int lanes;
int qlm = (interface == 1) ? 0 : interface;
qlm_cfg.u64 = cvmx_read_csr(CVMX_MIO_QLMX_CFG(qlm));
result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
lanes = (qlm_cfg.s.qlm_cfg == 7) ? 2 : 4;
result.s.speed *= lanes;
}
else if (OCTEON_IS_MODEL(OCTEON_CN6XXX))
{
int qlm = cvmx_qlm_interface(interface);
result.s.speed = cvmx_qlm_get_gbaud_mhz(qlm) * 8 / 10;
result.s.speed *= 4;
}
else
result.s.speed = 10000;
misc_ctl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
if (misc_ctl.s.gmxeno)
__cvmx_helper_xaui_link_init(interface);
}
else
{
/* Disable GMX and PCSX interrupts. */
cvmx_write_csr (CVMX_GMXX_RXX_INT_EN(0,interface), 0x0);
cvmx_write_csr (CVMX_GMXX_TX_INT_EN(interface), 0x0);
cvmx_write_csr (CVMX_PCSXX_INT_EN_REG(interface), 0x0);
}
return result;
}
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
cvmx_gmxx_tx_xaui_ctl_t gmxx_tx_xaui_ctl;
cvmx_gmxx_rx_xaui_ctl_t gmxx_rx_xaui_ctl;
gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
/* If the link shouldn't be up, then just return */
if (!link_info.s.link_up)
return 0;
/* Do nothing if both RX and TX are happy */
if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0))
return 0;
/* Bring the link up */
return __cvmx_helper_xaui_link_init(interface);
}
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
extern int __cvmx_helper_xaui_configure_loopback(int ipd_port, int enable_internal, int enable_external)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
cvmx_pcsxx_control1_reg_t pcsxx_control1_reg;
cvmx_gmxx_xaui_ext_loopback_t gmxx_xaui_ext_loopback;
/* Set the internal loop */
pcsxx_control1_reg.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
pcsxx_control1_reg.s.loopbck1 = enable_internal;
cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), pcsxx_control1_reg.u64);
/* Set the external loop */
gmxx_xaui_ext_loopback.u64 = cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
gmxx_xaui_ext_loopback.s.en = enable_external;
cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface), gmxx_xaui_ext_loopback.u64);
/* Take the link through a reset */
return __cvmx_helper_xaui_link_init(interface);
}
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */

View File

@ -1,127 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions for XAUI initialization, configuration,
* and monitoring.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_XAUI_H__
#define __CVMX_HELPER_XAUI_H__
/**
* @INTERNAL
* Probe a XAUI interface and determine the number of ports
* connected to it. The XAUI interface should still be down
* after this call.
*
* @param interface Interface to probe
*
* @return Number of ports on the interface. Zero to disable.
*/
extern int __cvmx_helper_xaui_probe(int interface);
extern int __cvmx_helper_xaui_enumerate(int interface);
/**
* @INTERNAL
* Bringup and enable a XAUI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @param interface Interface to bring up
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_xaui_enable(int interface);
/**
* @INTERNAL
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port);
/**
* @INTERNAL
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
extern int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
/**
* @INTERNAL
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
extern int __cvmx_helper_xaui_configure_loopback(int ipd_port, int enable_internal, int enable_external);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,373 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Helper functions for common, but complicated tasks.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HELPER_H__
#define __CVMX_HELPER_H__
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#elif !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL)
#include "executive-config.h"
#include "cvmx-config.h"
#endif
#include "cvmx-fpa.h"
#include "cvmx-wqe.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Max number of GMXX */
#define CVMX_HELPER_MAX_GMX (OCTEON_IS_MODEL(OCTEON_CN68XX) ? 5 : 2)
#define CVMX_HELPER_CSR_INIT0 0 /* Do not change as
CVMX_HELPER_WRITE_CSR()
assumes it */
#define CVMX_HELPER_CSR_INIT_READ -1
/*
* CVMX_HELPER_WRITE_CSR--set a field in a CSR with a value.
*
* @param chcsr_init intial value of the csr (CVMX_HELPER_CSR_INIT_READ
* means to use the existing csr value as the
* initial value.)
* @param chcsr_csr the name of the csr
* @param chcsr_type the type of the csr (see the -defs.h)
* @param chcsr_chip the chip for the csr/field
* @param chcsr_fld the field in the csr
* @param chcsr_val the value for field
*/
#define CVMX_HELPER_WRITE_CSR(chcsr_init, chcsr_csr, chcsr_type, \
chcsr_chip, chcsr_fld, chcsr_val) \
do { \
chcsr_type csr; \
if ((chcsr_init) == CVMX_HELPER_CSR_INIT_READ) \
csr.u64 = cvmx_read_csr(chcsr_csr); \
else \
csr.u64 = (chcsr_init); \
csr.chcsr_chip.chcsr_fld = (chcsr_val); \
cvmx_write_csr((chcsr_csr), csr.u64); \
} while(0)
/*
* CVMX_HELPER_WRITE_CSR0--set a field in a CSR with the initial value of 0
*/
#define CVMX_HELPER_WRITE_CSR0(chcsr_csr, chcsr_type, chcsr_chip, \
chcsr_fld, chcsr_val) \
CVMX_HELPER_WRITE_CSR(CVMX_HELPER_CSR_INIT0, chcsr_csr, \
chcsr_type, chcsr_chip, chcsr_fld, chcsr_val)
/*
* CVMX_HELPER_WRITE_CSR1--set a field in a CSR with the initial value of
* the CSR's current value.
*/
#define CVMX_HELPER_WRITE_CSR1(chcsr_csr, chcsr_type, chcsr_chip, \
chcsr_fld, chcsr_val) \
CVMX_HELPER_WRITE_CSR(CVMX_HELPER_CSR_INIT_READ, chcsr_csr, \
chcsr_type, chcsr_chip, chcsr_fld, chcsr_val)
typedef enum
{
CVMX_HELPER_INTERFACE_MODE_DISABLED,
CVMX_HELPER_INTERFACE_MODE_RGMII,
CVMX_HELPER_INTERFACE_MODE_GMII,
CVMX_HELPER_INTERFACE_MODE_SPI,
CVMX_HELPER_INTERFACE_MODE_PCIE,
CVMX_HELPER_INTERFACE_MODE_XAUI,
CVMX_HELPER_INTERFACE_MODE_SGMII,
CVMX_HELPER_INTERFACE_MODE_PICMG,
CVMX_HELPER_INTERFACE_MODE_NPI,
CVMX_HELPER_INTERFACE_MODE_LOOP,
CVMX_HELPER_INTERFACE_MODE_SRIO,
CVMX_HELPER_INTERFACE_MODE_ILK,
CVMX_HELPER_INTERFACE_MODE_RXAUI,
} cvmx_helper_interface_mode_t;
typedef union
{
uint64_t u64;
struct
{
uint64_t reserved_20_63 : 44;
uint64_t link_up : 1; /**< Is the physical link up? */
uint64_t full_duplex : 1; /**< 1 if the link is full duplex */
uint64_t speed : 18; /**< Speed of the link in Mbps */
} s;
} cvmx_helper_link_info_t;
#include "cvmx-helper-fpa.h"
#ifdef CVMX_ENABLE_PKO_FUNCTIONS
#include "cvmx-helper-errata.h"
#include "cvmx-helper-ilk.h"
#include "cvmx-helper-loop.h"
#include "cvmx-helper-npi.h"
#include "cvmx-helper-rgmii.h"
#include "cvmx-helper-sgmii.h"
#include "cvmx-helper-spi.h"
#include "cvmx-helper-srio.h"
#include "cvmx-helper-xaui.h"
/**
* cvmx_override_pko_queue_priority(int ipd_port, uint64_t
* priorities[16]) is a function pointer. It is meant to allow
* customization of the PKO queue priorities based on the port
* number. Users should set this pointer to a function before
* calling any cvmx-helper operations.
*/
extern CVMX_SHARED void (*cvmx_override_pko_queue_priority)(int ipd_port, uint64_t *priorities);
/**
* cvmx_override_ipd_port_setup(int ipd_port) is a function
* pointer. It is meant to allow customization of the IPD port/port kind
* setup before packet input/output comes online. It is called
* after cvmx-helper does the default IPD configuration, but
* before IPD is enabled. Users should set this pointer to a
* function before calling any cvmx-helper operations.
*/
extern CVMX_SHARED void (*cvmx_override_ipd_port_setup)(int ipd_port);
/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the
* IPD. This should be called by the user program after any additional
* IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
* is not set in the executive-config.h file.
*
* @return 0 on success
* -1 on failure
*/
extern int cvmx_helper_ipd_and_packet_input_enable(void);
/**
* Initialize and allocate memory for the SSO.
*
* @param wqe_entries The maximum number of work queue entries to be
* supported.
*
* @return Zero on success, non-zero on failure.
*/
extern int cvmx_helper_initialize_sso(int wqe_entries);
/**
* Undo the effect of cvmx_helper_initialize_sso().
*
* Warning: since cvmx_bootmem_alloc() memory cannot be freed, the
* memory allocated by cvmx_helper_initialize_sso() will be leaked.
*
* @return Zero on success, non-zero on failure.
*/
extern int cvmx_helper_uninitialize_sso(void);
/**
* Initialize the PIP, IPD, and PKO hardware to support
* simple priority based queues for the ethernet ports. Each
* port is configured with a number of priority queues based
* on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
* priority than the previous.
*
* @return Zero on success, non-zero on failure
*/
extern int cvmx_helper_initialize_packet_io_global(void);
/**
* Does core local initialization for packet io
*
* @return Zero on success, non-zero on failure
*/
extern int cvmx_helper_initialize_packet_io_local(void);
/**
* Undo the initialization performed in
* cvmx_helper_initialize_packet_io_global(). After calling this routine and the
* local version on each core, packet IO for Octeon will be disabled and placed
* in the initial reset state. It will then be safe to call the initialize
* later on. Note that this routine does not empty the FPA pools. It frees all
* buffers used by the packet IO hardware to the FPA so a function emptying the
* FPA after shutdown should find all packet buffers in the FPA.
*
* @return Zero on success, negative on failure.
*/
extern int cvmx_helper_shutdown_packet_io_global(void);
/**
* Does core local shutdown of packet io
*
* @return Zero on success, non-zero on failure
*/
extern int cvmx_helper_shutdown_packet_io_local(void);
/**
* Returns the number of ports on the given interface.
* The interface must be initialized before the port count
* can be returned.
*
* @param interface Which interface to return port count for.
*
* @return Port count for interface
* -1 for uninitialized interface
*/
extern int cvmx_helper_ports_on_interface(int interface);
/**
* Return the number of interfaces the chip has. Each interface
* may have multiple ports. Most chips support two interfaces,
* but the CNX0XX and CNX1XX are exceptions. These only support
* one interface.
*
* @return Number of interfaces on chip
*/
extern int cvmx_helper_get_number_of_interfaces(void);
/**
* Get the operating mode of an interface. Depending on the Octeon
* chip and configuration, this function returns an enumeration
* of the type of packet I/O supported by an interface.
*
* @param interface Interface to probe
*
* @return Mode of the interface. Unknown or unsupported interfaces return
* DISABLED.
*/
extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface);
/**
* Auto configure an IPD/PKO port link state and speed. This
* function basically does the equivalent of:
* cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
*
* @param ipd_port IPD/PKO port to auto configure
*
* @return Link state after configure
*/
extern cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port);
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @param ipd_port IPD/PKO port to query
*
* @return Link state
*/
extern cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port);
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @param ipd_port IPD/PKO port to configure
* @param link_info The new link state
*
* @return Zero on success, negative on failure
*/
extern int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info);
/**
* This function probes an interface to determine the actual number of
* hardware ports connected to it. It does some setup the ports but
* doesn't enable them. The main goal here is to set the global
* interface_port_count[interface] correctly. Final hardware setup of
* the ports will be performed later.
*
* @param interface Interface to probe
*
* @return Zero on success, negative on failure
*/
extern int cvmx_helper_interface_probe(int interface);
/**
* Determine the actual number of hardware ports connected to an
* interface. It doesn't setup the ports or enable them.
*
* @param interface Interface to enumerate
*
* @return Zero on success, negative on failure
*/
extern int cvmx_helper_interface_enumerate(int interface);
/**
* Configure a port for internal and/or external loopback. Internal loopback
* causes packets sent by the port to be received by Octeon. External loopback
* causes packets received from the wire to sent out again.
*
* @param ipd_port IPD/PKO port to loopback.
* @param enable_internal
* Non zero if you want internal loopback
* @param enable_external
* Non zero if you want external loopback
*
* @return Zero on success, negative on failure.
*/
extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal, int enable_external);
#include "cvmx-helper-util.h"
#endif /* CVMX_ENABLE_PKO_FUNCTIONS */
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_HELPER_H__ */

View File

@ -1,174 +0,0 @@
/***********************license start***************
* Copyright (c) 2011 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Support library for the CN63XX, CN68XX hardware HFA engine.
*
*/
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#include <asm/octeon/cvmx-pko.h>
#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-clock.h>
#include <asm/octeon/cvmx-dfa-defs.h>
#include <asm/octeon/cvmx-hfa.h>
#else
#include "executive-config.h"
#ifdef CVMX_ENABLE_DFA_FUNCTIONS
#include "cvmx-config.h"
#include "cvmx.h"
#include "cvmx-fau.h"
#include "cvmx-cmd-queue.h"
#include "cvmx-hfa.h"
#endif
#endif
#ifdef CVMX_ENABLE_DFA_FUNCTIONS
/**
* Initialize the DFA block
*
* @return Zero on success, negative on failure
*/
int cvmx_hfa_initialize(void)
{
cvmx_dfa_difctl_t control;
cvmx_cmd_queue_result_t result;
void *initial_base_address;
int cmdsize;
cmdsize = ((CVMX_FPA_DFA_POOL_SIZE - 8) / sizeof (cvmx_dfa_command_t)) *
sizeof (cvmx_dfa_command_t);
result = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_DFA, 0,
CVMX_FPA_DFA_POOL, cmdsize + 8);
if (result != CVMX_CMD_QUEUE_SUCCESS)
return -1;
control.u64 = 0;
control.s.dwbcnt = CVMX_FPA_DFA_POOL_SIZE / 128;
control.s.pool = CVMX_FPA_DFA_POOL;
control.s.size = cmdsize / sizeof(cvmx_dfa_command_t);
CVMX_SYNCWS;
cvmx_write_csr(CVMX_DFA_DIFCTL, control.u64);
initial_base_address = cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_DFA);
CVMX_SYNCWS;
cvmx_write_csr(CVMX_DFA_DIFRDPTR, cvmx_ptr_to_phys(initial_base_address));
cvmx_read_csr(CVMX_DFA_DIFRDPTR); /* Read to make sure setup is complete */
return 0;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_hfa_initialize);
#endif
/**
* Shutdown the DFA block. DFA must be idle when
* this function is called.
*
* @return Zero on success, negative on failure
*/
int cvmx_hfa_shutdown(void)
{
if (cvmx_cmd_queue_length(CVMX_CMD_QUEUE_DFA))
{
cvmx_dprintf("ERROR: cvmx_hfa_shutdown: DFA not idle.\n");
return -1;
}
cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_DFA);
return 0;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_hfa_shutdown);
#endif
/**
* Submit a command to the DFA block
*
* @param command DFA command to submit
*
* @return Zero on success, negative on failure
*/
int cvmx_hfa_submit(cvmx_dfa_command_t *command)
{
cvmx_cmd_queue_result_t result = cvmx_cmd_queue_write(CVMX_CMD_QUEUE_DFA, 1, 4, command->u64);
if (result == CVMX_CMD_QUEUE_SUCCESS)
cvmx_write_csr(CVMX_DFA_DBELL, 1);
return result;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(cvmx_hfa_submit);
#endif
void *hfa_bootmem_alloc (uint64_t size, uint64_t alignment)
{
int64_t address;
address = cvmx_bootmem_phy_alloc(size, 0, 0, alignment, 0);
if (address > 0)
return cvmx_phys_to_ptr(address);
else
return NULL;
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(hfa_bootmem_alloc);
#endif
int hfa_bootmem_free (void *ptr, uint64_t size)
{
uint64_t address;
address = cvmx_ptr_to_phys (ptr);
return __cvmx_bootmem_phy_free (address, size, 0);
}
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
EXPORT_SYMBOL(hfa_bootmem_free);
#endif
#endif

View File

@ -1,437 +0,0 @@
/***********************license start***************
* Copyright (c) 2011 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Interface to the CN63XX, CN68XX hardware HFA engine.
*
* <hr>$Revision: 49448 $<hr>
*/
#ifndef __CVMX_HFA_H__
#define __CVMX_HFA_H__
#ifndef CVMX_BUILD_FOR_LINUX_USER
#include "cvmx-llm.h"
#include "cvmx-wqe.h"
#include "cvmx-fpa.h"
#include "cvmx-bootmem.h"
#ifdef CVMX_BUILD_FOR_LINUX_KERNEL
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-config.h>
#ifdef CVMX_ENABLE_DFA_FUNCTIONS
/* DFA queue cmd buffers */
#define CVMX_FPA_DFA_POOL (4) /**< DFA command buffers */
#define CVMX_FPA_DFA_POOL_SIZE (2 * CVMX_CACHE_LINE_SIZE)
#endif
#else
#include "executive-config.h"
#ifdef CVMX_ENABLE_DFA_FUNCTIONS
#include "cvmx-config.h"
#endif
#endif
#endif
#define ENABLE_DEPRECATED /* Set to enable the old 18/36 bit names */
#ifdef __cplusplus
extern "C" {
#endif
#define CVMX_DFA_ITYPE_MEMLOAD 0x0
#define CVMX_DFA_ITYPE_CACHELOAD 0x1
#define CVMX_DFA_ITYPE_GRAPHFREE 0x3
#define CVMX_DFA_ITYPE_GRAPHWALK 0x4
typedef union {
uint64_t u64;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t size:24;
uint64_t addr:40;
#else
uint64_t addr:40;
uint64_t size:24;
#endif
} s;
} cvmx_dfa_gather_entry_t;
typedef union {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t f1:3;
uint64_t unused1:2;
uint64_t snode:27;
uint64_t gather_mode:1;
uint64_t little_endian:1;
uint64_t store_full:1;
uint64_t load_through:1;
uint64_t small:1;
uint64_t itype:3;
uint64_t unused0:2;
uint64_t mbase:22;
#else
uint64_t mbase:22;
uint64_t unused0:2;
uint64_t itype:3;
uint64_t small:1;
uint64_t load_through:1;
uint64_t store_full:1;
uint64_t little_endian:1;
uint64_t gather_mode:1;
uint64_t snode:27;
uint64_t unused1:2;
uint64_t f1:3;
#endif
} walk;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused4:7;
uint64_t dbase:9;
uint64_t unused3:2;
uint64_t cbase:14;
uint64_t gather_mode:1;
uint64_t little_endian:1;
uint64_t store_full:1;
uint64_t load_through:1;
uint64_t unused2:1;
uint64_t itype:3;
uint64_t unused1:6;
uint64_t dsize:10;
uint64_t unused0:2;
uint64_t pgid:6;
#else
uint64_t pgid:6;
uint64_t unused0:2;
uint64_t dsize:10;
uint64_t unused1:6;
uint64_t itype:3;
uint64_t unused2:1;
uint64_t load_through:1;
uint64_t store_full:1;
uint64_t little_endian:1;
uint64_t gather_mode:1;
uint64_t cbase:14;
uint64_t unused3:2;
uint64_t dbase:9;
uint64_t unused4:7;
#endif
} cload;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused2:32;
uint64_t gather_mode:1;
uint64_t little_endian:1;
uint64_t store_full:1;
uint64_t load_through:1;
uint64_t unused1:1;
uint64_t itype:3;
uint64_t unused0:2;
uint64_t mbase:22;
#else
uint64_t mbase:22;
uint64_t unused0:2;
uint64_t itype:3;
uint64_t unused1:1;
uint64_t load_through:1;
uint64_t store_full:1;
uint64_t little_endian:1;
uint64_t gather_mode:1;
uint64_t unused2:32;
#endif
} mload;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused2:34;
uint64_t store_full:1;
uint64_t unused1:2;
uint64_t itype:3;
uint64_t unused0:24;
#else
uint64_t unused0:24;
uint64_t itype:3;
uint64_t unused1:2;
uint64_t store_full:1;
uint64_t unused2:34;
#endif
} free;
} cvmx_dfa_word0_t;
typedef union {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t rmax:16;
uint64_t f2:8;
uint64_t rptr:40;
#else
uint64_t rptr:40;
uint64_t f2:8;
uint64_t rmax:16;
#endif
} walk;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused1:13;
uint64_t rmax:3;
uint64_t unused0:8;
uint64_t rptr:40;
#else
uint64_t rptr:40;
uint64_t unused0:8;
uint64_t rmax:3;
uint64_t unused1:13;
#endif
} cload;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused1:4;
uint64_t rmax:12;
uint64_t unused0:8;
uint64_t rptr:40;
#else
uint64_t rptr:40;
uint64_t unused0:8;
uint64_t rmax:12;
uint64_t unused1:4;
#endif
} mload;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused:24;
uint64_t rptr:40;
#else
uint64_t rptr:40;
uint64_t unused:24;
#endif
} free;
} cvmx_dfa_word1_t;
typedef union {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t dlen:16;
uint64_t srepl:2;
uint64_t unused:2;
uint64_t clmsk:4;
uint64_t dptr:40;
#else
uint64_t dptr:40;
uint64_t clmsk:4;
uint64_t unused:2;
uint64_t srepl:2;
uint64_t dlen:16;
#endif
} walk;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t dlen:16;
uint64_t unused:4;
uint64_t clmsk:4;
uint64_t dptr:40;
#else
uint64_t dptr:40;
uint64_t clmsk:4;
uint64_t unused:4;
uint64_t dlen:16;
#endif
} cload;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t dlen:16;
uint64_t repl:2;
uint64_t unused:2;
uint64_t clmsk:4;
uint64_t dptr:40;
#else
uint64_t dptr:40;
uint64_t clmsk:4;
uint64_t unused:2;
uint64_t repl:2;
uint64_t dlen:16;
#endif
} mload;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused1:20;
uint64_t clmsk:4;
uint64_t unused0:40;
#else
uint64_t unused0:40;
uint64_t clmsk:4;
uint64_t unused1:20;
#endif
} free;
} cvmx_dfa_word2_t;
typedef union {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused1:2;
uint64_t vgid:8;
uint64_t unused0:5;
uint64_t f3:9;
uint64_t wqptr:40;
#else
uint64_t wqptr:40;
uint64_t f3:9;
uint64_t unused0:5;
uint64_t vgid:8;
uint64_t unused1:2;
#endif
} walk;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused1:2;
uint64_t vgid:8;
uint64_t unused0:7;
uint64_t f4:7;
uint64_t wqptr:40;
#else
uint64_t wqptr:40;
uint64_t f4:7;
uint64_t unused0:7;
uint64_t vgid:8;
uint64_t unused1:2;
#endif
} cload;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused1:2;
uint64_t vgid:8;
uint64_t unused0:7;
uint64_t f4:7;
uint64_t wqptr:40;
#else
uint64_t wqptr:40;
uint64_t f4:7;
uint64_t unused0:7;
uint64_t vgid:8;
uint64_t unused1:2;
#endif
} mload;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint64_t unused1:2;
uint64_t vgid:8;
uint64_t unused0:14;
uint64_t wqptr:40;
#else
uint64_t wqptr:40;
uint64_t unused0:14;
uint64_t vgid:8;
uint64_t unused1:2;
#endif
} free;
} cvmx_dfa_word3_t;
typedef union {
uint64_t u64[4];
struct {
cvmx_dfa_word0_t word0;
cvmx_dfa_word1_t word1;
cvmx_dfa_word2_t word2;
cvmx_dfa_word3_t word3;
};
} cvmx_dfa_command_t;
#ifdef CVMX_ENABLE_DFA_FUNCTIONS
/**
* Initialize the DFA hardware before use
* Returns 0 on success, -1 on failure
*/
int cvmx_hfa_initialize(void);
/**
* Shutdown and cleanup resources used by the DFA
*/
int cvmx_hfa_shutdown(void);
/**
* Submit a command to the HFA block
*
* @param command HFA command to submit
*
* @return Zero on success, negative on failure
*/
int cvmx_hfa_submit(cvmx_dfa_command_t *command);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader.
*
* @param size Size in bytes of block to allocate
* @param alignment Alignment required - must be power of 2
*
* @return pointer to block of memory, NULL on error
*/
void *hfa_bootmem_alloc (uint64_t size, uint64_t alignment);
/**
* Frees a block to the bootmem allocator list.
*
* @param ptr address of block (memory pointer (void*))
* @param size size of block in bytes.
*
* @return 1 on success,
* 0 on failure
*
*/
int hfa_bootmem_free (void *ptr, uint64_t size);
#endif
#ifdef __cplusplus
}
#endif
#endif /* __CVMX_HFA_H__ */

View File

@ -1,418 +0,0 @@
/***********************license start***************
* Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights
* reserved.
*
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Cavium Inc. nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
* This Software, including technical data, may be subject to U.S. export control
* laws, including the U.S. Export Administration Act and its associated
* regulations, and may be subject to export or import regulations in other
* countries.
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
* THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
* DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
* SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
* MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
* VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
* CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
/**
* @file
*
* Functions and typedefs for using Octeon in HiGig/HiGig+/HiGig2 mode over
* XAUI.
*
* <hr>$Revision: 70030 $<hr>
*/
#ifndef __CVMX_HIGIG_H__
#define __CVMX_HIGIG_H__
#include "cvmx-wqe.h"
#include "cvmx-helper.h"
#include "cvmx-helper-util.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct
{
union
{
uint32_t u32;
struct
{
uint32_t start : 8; /**< 8-bits of Preamble indicating start of frame */
uint32_t hgi : 2; /**< HiGig interface format indicator
00 = Reserved
01 = Pure preamble - IEEE standard framing of 10GE
10 = XGS header - framing based on XGS family definition In this
format, the default length of the header is 12 bytes and additional
bytes are indicated by the HDR_EXT_LEN field
11 = Reserved */
uint32_t cng_high : 1; /**< Congestion Bit High flag */
uint32_t hdr_ext_len : 3; /**< This field is valid only if the HGI field is a b'10' and it indicates the extension
to the standard 12-bytes of XGS HiGig header. Each unit represents 4
bytes, giving a total of 16 additional extension bytes. Value of b'101', b'110'
and b'111' are reserved. For HGI field value of b'01' this field should be
b'01'. For all other values of HGI it is don't care. */
uint32_t src_modid_6 : 1; /**< This field is valid only if the HGI field is a b'10' and it represents Bit 6 of
SRC_MODID (bits 4:0 are in Byte 4 and bit 5 is in Byte 9). For HGI field
value of b'01' this field should be b'0'. For all other values of HGI it is don't
care. */
uint32_t dst_modid_6 : 1; /**< This field is valid only if the HGI field is a b'10' and it represents Bit 6 of
DST_MODID (bits 4:0 are in Byte 7 and bit 5 is in Byte 9). ). For HGI field
value of b'01' this field should be b'1'. For all other values of HGI it is don't
care. */
uint32_t vid_high : 8; /**< 8-bits of the VLAN tag information */
uint32_t vid_low : 8; /**< 8 bits LSB of the VLAN tag information */
} s;
} dw0;
union
{
uint32_t u32;
struct
{
uint32_t src_modid_low : 5; /**< Bits 4:0 of Module ID of the source module on which the packet ingress (bit
5 is in Byte 9 and bit 6 Is in Byte 1) */
uint32_t opcode : 3; /**< XGS HiGig op-code, indicating the type of packet
000 = Control frames used for CPU to CPU communications
001 = Unicast packet with destination resolved; The packet can be
either Layer 2 unicast packet or L3 unicast packet that was
routed in the ingress chip.
010 = Broadcast or unknown Unicast packet or unknown multicast,
destined to all members of the VLAN
011 = L2 Multicast packet, destined to all ports of the group indicated
in the L2MC_INDEX which is overlayed on DST_PORT/DST_MODID fields
100 = IP Multicast packet, destined to all ports of the group indicated
in the IPMC_INDEX which is overlayed on DST_PORT/DST_MODID fields
101 = Reserved
110 = Reserved
111 = Reserved */
uint32_t pfm : 2; /**< Three Port Filtering Modes (0, 1, 2) used in handling registed/unregistered
multicast (unknown L2 multicast and IPMC) packets. This field is used
when OPCODE is 011 or 100 Semantics of PFM bits are as follows;
For registered L2 multicast packets:
PFM= 0 - Flood to VLAN
PFM= 1 or 2 - Send to group members in the L2MC table
For unregistered L2 multicast packets:
PFM= 0 or 1 - Flood to VLAN
PFM= 2 - Drop the packet */
uint32_t src_port_tgid : 6; /**< If the MSB of this field is set, then it indicates the LAG the packet ingressed
on, else it represents the physical port the packet ingressed on. */
uint32_t dst_port : 5; /**< Port number of destination port on which the packet needs to egress. */
uint32_t priority : 3; /**< This is the internal priority of the packet. This internal priority will go through
COS_SEL mapping registers to map to the actual MMU queues. */
uint32_t header_type : 2; /**< Indicates the format of the next 4 bytes of the XGS HiGig header
00 = Overlay 1 (default)
01 = Overlay 2 (Classification Tag)
10 = Reserved
11 = Reserved */
uint32_t cng_low : 1; /**< Semantics of CNG_HIGH and CNG_LOW are as follows: The following
encodings are to make it backward compatible:
[CNG_HIGH, CNG_LOW] - COLOR
[0, 0] - Packet is green
[0, 1] - Packet is red
[1, 1] - Packet is yellow
[1, 0] - Undefined */
uint32_t dst_modid_low : 5; /**< Bits [4-: 0] of Module ID of the destination port on which the packet needs to egress. */
} s;
} dw1;
union
{
uint32_t u32;
struct
{
uint32_t dst_t : 1; /**< Destination Trunk: Indicates that the destination port is a member of a trunk
group. */
uint32_t dst_tgid : 3; /**< Destination Trunk Group ID: Trunk group ID of the destination port. The
DO_NOT_LEARN bit is overlaid on the second bit of this field. */
uint32_t ingress_tagged : 1; /**< Ingress Tagged: Indicates whether the packet was tagged when it originally
ingressed the system. */
uint32_t mirror_only : 1; /**< Mirror Only: XGS 1/2 mode: Indicates that the packet was switched and only
needs to be mirrored. */
uint32_t mirror_done : 1; /**< Mirroring Done: XGS1/2 mode: Indicates that the packet was mirrored and
may still need to be switched. */
uint32_t mirror : 1; /**< Mirror: XGS3 mode: a mirror copy packet. XGS1/2 mode: Indicates that the
packet was switched and only needs to be mirrored. */
uint32_t src_modid_5 : 1; /**< Source Module ID: Bit 5 of Src_ModID (bits 4:0 are in byte 4 and bit 6 is in
byte 1) */
uint32_t dst_modid_5 : 1; /**< Destination Module ID: Bit 5 of Dst_ModID (bits 4:0 are in byte 7 and bit 6
is in byte 1) */
uint32_t l3 : 1; /**< L3: Indicates that the packet is L3 switched */
uint32_t label_present : 1; /**< Label Present: Indicates that header contains a 20-bit VC label: HiGig+
added field. */
uint32_t vc_label_16_19 : 4; /**< VC Label: Bits 19:16 of VC label: HiGig+ added field */
uint32_t vc_label_0_15 : 16;/**< VC Label: Bits 15:0 of VC label: HiGig+ added field */
} o1;
struct
{
uint32_t classification : 16; /**< Classification tag information from the HiGig device FFP */
uint32_t reserved_0_15 : 16;
} o2;
} dw2;
} cvmx_higig_header_t;
typedef struct
{
union
{
uint32_t u32;
struct
{
uint32_t k_sop : 8; /**< The delimiter indicating the start of a packet transmission */
uint32_t reserved_21_23 : 3;
uint32_t mcst : 1; /**< MCST indicates whether the packet should be unicast or
multicast forwarded through the XGS switching fabric
- 0: Unicast
- 1: Mulitcast */
uint32_t tc : 4; /**< Traffic Class [3:0] indicates the distinctive Quality of Service (QoS)
the switching fabric will provide when forwarding the packet
through the fabric */
uint32_t dst_modid_mgid : 8; /**< When MCST=0, this field indicates the destination XGS module to
which the packet will be delivered. When MCST=1, this field indicates
higher order bits of the Multicast Group ID. */
uint32_t dst_pid_mgid : 8; /**< When MCST=0, this field indicates a port associated with the
module indicated by the DST_MODID, through which the packet
will exit the system. When MCST=1, this field indicates lower order
bits of the Multicast Group ID */
} s;
} dw0;
union
{
uint32_t u32;
struct
{
uint32_t src_modid : 8; /**< Source Module ID indicates the source XGS module from which
the packet is originated. (It can also be used for the fabric multicast
load balancing purpose.) */
uint32_t src_pid : 8; /**< Source Port ID indicates a port associated with the module
indicated by the SRC_MODID, through which the packet has
entered the system */
uint32_t lbid : 8; /**< Load Balancing ID indicates a packet flow hashing index
computed by the ingress XGS module for statistical distribution of
packet flows through a multipath fabric */
uint32_t dp : 2; /**< Drop Precedence indicates the traffic rate violation status of the
packet measured by the ingress module.
- 00: GREEN
- 01: RED
- 10: Reserved
- 11: Yellow */
uint32_t reserved_3_5 : 3;
uint32_t ppd_type : 3; /**< Packet Processing Descriptor Type
- 000: PPD Overlay1
- 001: PPD Overlay2
- 010~111: Reserved */
} s;
} dw1;
union
{
uint32_t u32;
struct
{
uint32_t dst_t : 1; /**< Destination Trunk: Indicates that the destination port is a member of a trunk
group. */
uint32_t dst_tgid : 3; /**< Destination Trunk Group ID: Trunk group ID of the destination port. The
DO_NOT_LEARN bit is overlaid on the second bit of this field. */
uint32_t ingress_tagged : 1; /**< Ingress Tagged: Indicates whether the packet was tagged when it originally
ingressed the system. */
uint32_t mirror_only : 1; /**< Mirror Only: XGS 1/2 mode: Indicates that the packet was switched and only
needs to be mirrored. */
uint32_t mirror_done : 1; /**< Mirroring Done: XGS1/2 mode: Indicates that the packet was mirrored and
may still need to be switched. */
uint32_t mirror : 1; /**< Mirror: XGS3 mode: a mirror copy packet. XGS1/2 mode: Indicates that the
packet was switched and only needs to be mirrored. */
uint32_t reserved_22_23 : 2;
uint32_t l3 : 1; /**< L3: Indicates that the packet is L3 switched */
uint32_t label_present : 1; /**< Label Present: Indicates that header contains a 20-bit VC label: HiGig+
added field. */
uint32_t vc_label : 20; /**< Refer to the HiGig+ Architecture Specification */
} o1;
struct
{
uint32_t classification : 16; /**< Classification tag information from the HiGig device FFP */
uint32_t reserved_0_15 : 16;
} o2;
} dw2;
union
{
uint32_t u32;
struct
{
uint32_t vid : 16; /**< VLAN tag information */
uint32_t pfm : 2; /**< Three Port Filtering Modes (0, 1, 2) used in handling registed/unregistered
multicast (unknown L2 multicast and IPMC) packets. This field is used
when OPCODE is 011 or 100 Semantics of PFM bits are as follows;
For registered L2 multicast packets:
PFM= 0 - Flood to VLAN
PFM= 1 or 2 - Send to group members in the L2MC table
For unregistered L2 multicast packets:
PFM= 0 or 1 - Flood to VLAN
PFM= 2 - Drop the packet */
uint32_t src_t : 1; /**< If the MSB of this field is set, then it indicates the LAG the packet ingressed
on, else it represents the physical port the packet ingressed on. */
uint32_t reserved_11_12 : 2;
uint32_t opcode : 3; /**< XGS HiGig op-code, indicating the type of packet
000 = Control frames used for CPU to CPU communications
001 = Unicast packet with destination resolved; The packet can be
either Layer 2 unicast packet or L3 unicast packet that was
routed in the ingress chip.
010 = Broadcast or unknown Unicast packet or unknown multicast,
destined to all members of the VLAN
011 = L2 Multicast packet, destined to all ports of the group indicated
in the L2MC_INDEX which is overlayed on DST_PORT/DST_MODID fields
100 = IP Multicast packet, destined to all ports of the group indicated
in the IPMC_INDEX which is overlayed on DST_PORT/DST_MODID fields
101 = Reserved
110 = Reserved
111 = Reserved */
uint32_t hdr_ext_len : 3; /**< This field is valid only if the HGI field is a b'10' and it indicates the extension
to the standard 12-bytes of XGS HiGig header. Each unit represents 4
bytes, giving a total of 16 additional extension bytes. Value of b'101', b'110'
and b'111' are reserved. For HGI field value of b'01' this field should be
b'01'. For all other values of HGI it is don't care. */
uint32_t reserved_0_4 : 5;
} s;
} dw3;
} cvmx_higig2_header_t;
/**
* Initialize the HiGig aspects of a XAUI interface. This function
* should be called before the cvmx-helper generic init.
*
* @param interface Interface to initialize HiGig on (0-1)
* @param enable_higig2
* Non zero to enable HiGig2 support. Zero to support HiGig
* and HiGig+.
*
* @return Zero on success, negative on failure
*/
static inline int cvmx_higig_initialize(int interface, int enable_higig2)
{
cvmx_pip_prt_cfgx_t pip_prt_cfg;
cvmx_gmxx_rxx_udd_skp_t gmx_rx_udd_skp;
cvmx_gmxx_txx_min_pkt_t gmx_tx_min_pkt;
cvmx_gmxx_txx_append_t gmx_tx_append;
cvmx_gmxx_tx_ifg_t gmx_tx_ifg;
cvmx_gmxx_tx_ovr_bp_t gmx_tx_ovr_bp;
cvmx_gmxx_rxx_frm_ctl_t gmx_rx_frm_ctl;
cvmx_gmxx_tx_xaui_ctl_t gmx_tx_xaui_ctl;
int i, pknd;
int header_size = (enable_higig2) ? 16 : 12;
/* Setup PIP to handle HiGig */
if (octeon_has_feature(OCTEON_FEATURE_PKND))
pknd = cvmx_helper_get_pknd(interface, 0);
else
pknd = interface*16;
pip_prt_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(pknd));
pip_prt_cfg.s.dsa_en = 0;
pip_prt_cfg.s.higig_en = 1;
pip_prt_cfg.s.hg_qos = 1;
pip_prt_cfg.s.skip = header_size;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(pknd), pip_prt_cfg.u64);
/* Setup some sample QoS defaults. These can be changed later */
if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
{
for (i=0; i<64; i++)
{
cvmx_pip_hg_pri_qos_t pip_hg_pri_qos;
pip_hg_pri_qos.u64 = 0;
pip_hg_pri_qos.s.up_qos = 1;
pip_hg_pri_qos.s.pri = i;
pip_hg_pri_qos.s.qos = i&7;
cvmx_write_csr(CVMX_PIP_HG_PRI_QOS, pip_hg_pri_qos.u64);
}
}
/* Setup GMX RX to treat the HiGig header as user data to ignore */
gmx_rx_udd_skp.u64 = cvmx_read_csr(CVMX_GMXX_RXX_UDD_SKP(0, interface));
gmx_rx_udd_skp.s.len = header_size;
gmx_rx_udd_skp.s.fcssel = 0;
cvmx_write_csr(CVMX_GMXX_RXX_UDD_SKP(0, interface), gmx_rx_udd_skp.u64);
/* Disable GMX preamble checking */
gmx_rx_frm_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(0, interface));
gmx_rx_frm_ctl.s.pre_chk = 0;
cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(0, interface), gmx_rx_frm_ctl.u64);
/* Setup GMX TX to pad properly min sized packets */
gmx_tx_min_pkt.u64 = cvmx_read_csr(CVMX_GMXX_TXX_MIN_PKT(0, interface));
gmx_tx_min_pkt.s.min_size = 59 + header_size;
cvmx_write_csr(CVMX_GMXX_TXX_MIN_PKT(0, interface), gmx_tx_min_pkt.u64);
/* Setup GMX TX to not add a preamble */
gmx_tx_append.u64 = cvmx_read_csr(CVMX_GMXX_TXX_APPEND(0, interface));
gmx_tx_append.s.preamble = 0;
cvmx_write_csr(CVMX_GMXX_TXX_APPEND(0, interface), gmx_tx_append.u64);
/* Reduce the inter frame gap to 8 bytes */
gmx_tx_ifg.u64 = cvmx_read_csr(CVMX_GMXX_TX_IFG(interface));
gmx_tx_ifg.s.ifg1 = 4;
gmx_tx_ifg.s.ifg2 = 4;
cvmx_write_csr(CVMX_GMXX_TX_IFG(interface), gmx_tx_ifg.u64);
/* Disable GMX backpressure */
gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
gmx_tx_ovr_bp.s.bp = 0;
gmx_tx_ovr_bp.s.en = 0xf;
gmx_tx_ovr_bp.s.ign_full = 0xf;
cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
if (enable_higig2)
{
/* Enable HiGig2 support and forwarding of virtual port backpressure
to PKO */
cvmx_gmxx_hg2_control_t gmx_hg2_control;
gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
gmx_hg2_control.s.hg2rx_en = 1;
gmx_hg2_control.s.hg2tx_en = 1;
gmx_hg2_control.s.logl_en = 0xffff;
gmx_hg2_control.s.phys_en = 1;
cvmx_write_csr(CVMX_GMXX_HG2_CONTROL(interface), gmx_hg2_control.u64);
}
/* Enable HiGig */
gmx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
gmx_tx_xaui_ctl.s.hg_en = 1;
cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmx_tx_xaui_ctl.u64);
return 0;
}
#ifdef __cplusplus
}
#endif
#endif // __CVMX_HIGIG_H__

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More