MIPS XLP platform code update.

* Update the hardware access register definitions and functions to bring
  them in line with other Netlogic software.
* Update the platform bus to use PCI even for on-chip devices. Add a dummy
  PCI driver to ignore on-chip devices which do not need driver.
* Provide memory and IRQ resource allocation code for on-chip devices
  which cannot get it from PCI config.
* add support for on-chip PCI and USB interfaces.
* update conf files, enable pci and retain old MAXCPU until we can support
  >32 cpus.

Approved by:	re(kib), jmallett
This commit is contained in:
Jayachandran C. 2011-09-05 10:45:29 +00:00
parent 0fe082e7d5
commit cd4c8d64ff
39 changed files with 3954 additions and 2515 deletions

View File

@ -36,7 +36,7 @@ options SCHED_ULE # ULE scheduler
options SMP
options PREEMPTION # Enable kernel thread preemption
#options FULL_PREEMPTION # Enable kernel thread preemption
options MAXCPU=128 # XLP can probe 128 CPUs
#options MAXCPU=128 # XLP can probe 128 CPUs
options INET # InterNETworking
options INET6 # IPv6 communications protocols
options FFS # Berkeley Fast Filesystem
@ -89,6 +89,7 @@ device bpf
# UART
device uart
device pci
# Network
device ether

View File

@ -21,7 +21,7 @@ machine mips mips64eb
ident XLP64
options ISA_MIPS64
makeoptions ARCH_FLAGS="-march=mips64 -mabi=64"
makeoptions ARCH_FLAGS="-march=mips64r2 -mabi=64"
makeoptions KERNLOADADDR=0xffffffff80100000
include "../nlm/std.xlp"
@ -38,7 +38,7 @@ options SCHED_ULE # ULE scheduler
options SMP
options PREEMPTION # Enable kernel thread preemption
#options FULL_PREEMPTION # Enable kernel thread preemption
options MAXCPU=128 # XLP can probe 128 CPUs
#options MAXCPU=128 # XLP can probe 128 CPUs
options INET # InterNETworking
options INET6 # IPv6 communications protocols
options FFS # Berkeley Fast Filesystem
@ -91,6 +91,7 @@ device bpf
# UART
device uart
device pci
# Network
device ether

View File

@ -37,7 +37,7 @@ options SCHED_ULE # ULE scheduler
options SMP
options PREEMPTION # Enable kernel thread preemption
#options FULL_PREEMPTION # Enable kernel thread preemption
options MAXCPU=128 # XLP can probe 128 CPUs
#options MAXCPU=128 # XLP can probe 128 CPUs
options INET # InterNETworking
options INET6 # IPv6 communications protocols
options FFS # Berkeley Fast Filesystem
@ -90,6 +90,7 @@ device bpf
# UART
device uart
device pci
# Network
device ether

View File

@ -29,7 +29,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
@ -38,7 +37,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <mips/nlm/hal/mips-extns.h>
#include <mips/nlm/hal/mmio.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/fmn.h>
#include <mips/nlm/hal/pic.h>

View File

@ -25,8 +25,9 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __NLM_BOARD_H__
#define __NLM_BOARD_H__

View File

@ -401,14 +401,14 @@ static u_int8_t
rmi_bus_space_read_1(void *tag, bus_space_handle_t handle,
bus_size_t offset)
{
return (u_int8_t) (*(volatile u_int32_t *)(handle + offset));
return (u_int8_t) (*(volatile u_int8_t *)(handle + offset));
}
static u_int16_t
rmi_bus_space_read_2(void *tag, bus_space_handle_t handle,
bus_size_t offset)
{
return (u_int16_t)(*(volatile u_int32_t *)(handle + offset));
return (u_int16_t)(*(volatile u_int16_t *)(handle + offset));
}
static u_int32_t
@ -453,14 +453,14 @@ static void
rmi_bus_space_write_1(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int8_t value)
{
*(volatile u_int32_t *)(handle + offset) = (u_int32_t)value;
*(volatile u_int8_t *)(handle + offset) = value;
}
static void
rmi_bus_space_write_2(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int16_t value)
{
*(volatile u_int32_t *)(handle + offset) = (u_int32_t)value;
*(volatile u_int16_t *)(handle + offset) = value;
}
static void

View File

@ -0,0 +1,768 @@
/*-
* Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/endian.h>
#include <sys/malloc.h>
#include <sys/ktr.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <machine/bus.h>
#include <machine/cache.h>
static int
rmi_pci_bus_space_map(void *t, bus_addr_t addr,
bus_size_t size, int flags,
bus_space_handle_t * bshp);
static void
rmi_pci_bus_space_unmap(void *t, bus_space_handle_t bsh,
bus_size_t size);
static int
rmi_pci_bus_space_subregion(void *t,
bus_space_handle_t bsh,
bus_size_t offset, bus_size_t size,
bus_space_handle_t * nbshp);
static u_int8_t
rmi_pci_bus_space_read_1(void *t,
bus_space_handle_t handle,
bus_size_t offset);
static u_int16_t
rmi_pci_bus_space_read_2(void *t,
bus_space_handle_t handle,
bus_size_t offset);
static u_int32_t
rmi_pci_bus_space_read_4(void *t,
bus_space_handle_t handle,
bus_size_t offset);
static void
rmi_pci_bus_space_read_multi_1(void *t,
bus_space_handle_t handle,
bus_size_t offset, u_int8_t * addr,
size_t count);
static void
rmi_pci_bus_space_read_multi_2(void *t,
bus_space_handle_t handle,
bus_size_t offset, u_int16_t * addr,
size_t count);
static void
rmi_pci_bus_space_read_multi_4(void *t,
bus_space_handle_t handle,
bus_size_t offset, u_int32_t * addr,
size_t count);
static void
rmi_pci_bus_space_read_region_1(void *t,
bus_space_handle_t bsh,
bus_size_t offset, u_int8_t * addr,
size_t count);
static void
rmi_pci_bus_space_read_region_2(void *t,
bus_space_handle_t bsh,
bus_size_t offset, u_int16_t * addr,
size_t count);
static void
rmi_pci_bus_space_read_region_4(void *t,
bus_space_handle_t bsh,
bus_size_t offset, u_int32_t * addr,
size_t count);
static void
rmi_pci_bus_space_write_1(void *t,
bus_space_handle_t handle,
bus_size_t offset, u_int8_t value);
static void
rmi_pci_bus_space_write_2(void *t,
bus_space_handle_t handle,
bus_size_t offset, u_int16_t value);
static void
rmi_pci_bus_space_write_4(void *t,
bus_space_handle_t handle,
bus_size_t offset, u_int32_t value);
static void
rmi_pci_bus_space_write_multi_1(void *t,
bus_space_handle_t handle,
bus_size_t offset,
const u_int8_t * addr,
size_t count);
static void
rmi_pci_bus_space_write_multi_2(void *t,
bus_space_handle_t handle,
bus_size_t offset,
const u_int16_t * addr,
size_t count);
static void
rmi_pci_bus_space_write_multi_4(void *t,
bus_space_handle_t handle,
bus_size_t offset,
const u_int32_t * addr,
size_t count);
static void
rmi_pci_bus_space_write_region_2(void *t,
bus_space_handle_t bsh,
bus_size_t offset,
const u_int16_t * addr,
size_t count);
static void
rmi_pci_bus_space_write_region_4(void *t,
bus_space_handle_t bsh,
bus_size_t offset,
const u_int32_t * addr,
size_t count);
static void
rmi_pci_bus_space_set_region_2(void *t,
bus_space_handle_t bsh,
bus_size_t offset, u_int16_t value,
size_t count);
static void
rmi_pci_bus_space_set_region_4(void *t,
bus_space_handle_t bsh,
bus_size_t offset, u_int32_t value,
size_t count);
static void
rmi_pci_bus_space_barrier(void *tag __unused, bus_space_handle_t bsh __unused,
bus_size_t offset __unused, bus_size_t len __unused, int flags);
static void
rmi_pci_bus_space_copy_region_2(void *t,
bus_space_handle_t bsh1,
bus_size_t off1,
bus_space_handle_t bsh2,
bus_size_t off2, size_t count);
u_int8_t
rmi_pci_bus_space_read_stream_1(void *t, bus_space_handle_t handle,
bus_size_t offset);
static u_int16_t
rmi_pci_bus_space_read_stream_2(void *t, bus_space_handle_t handle,
bus_size_t offset);
static u_int32_t
rmi_pci_bus_space_read_stream_4(void *t, bus_space_handle_t handle,
bus_size_t offset);
static void
rmi_pci_bus_space_read_multi_stream_1(void *t,
bus_space_handle_t handle,
bus_size_t offset, u_int8_t * addr,
size_t count);
static void
rmi_pci_bus_space_read_multi_stream_2(void *t,
bus_space_handle_t handle,
bus_size_t offset, u_int16_t * addr,
size_t count);
static void
rmi_pci_bus_space_read_multi_stream_4(void *t,
bus_space_handle_t handle,
bus_size_t offset, u_int32_t * addr,
size_t count);
void
rmi_pci_bus_space_write_stream_1(void *t, bus_space_handle_t bsh,
bus_size_t offset, u_int8_t value);
static void
rmi_pci_bus_space_write_stream_2(void *t, bus_space_handle_t handle,
bus_size_t offset, u_int16_t value);
static void
rmi_pci_bus_space_write_stream_4(void *t, bus_space_handle_t handle,
bus_size_t offset, u_int32_t value);
static void
rmi_pci_bus_space_write_multi_stream_1(void *t,
bus_space_handle_t handle,
bus_size_t offset,
const u_int8_t * addr,
size_t count);
static void
rmi_pci_bus_space_write_multi_stream_2(void *t,
bus_space_handle_t handle,
bus_size_t offset,
const u_int16_t * addr,
size_t count);
static void
rmi_pci_bus_space_write_multi_stream_4(void *t,
bus_space_handle_t handle,
bus_size_t offset,
const u_int32_t * addr,
size_t count);
#define TODO() printf("XLR memory bus space function '%s' unimplemented\n", __func__)
static struct bus_space local_rmi_pci_bus_space = {
/* cookie */
(void *)0,
/* mapping/unmapping */
rmi_pci_bus_space_map,
rmi_pci_bus_space_unmap,
rmi_pci_bus_space_subregion,
/* allocation/deallocation */
NULL,
NULL,
/* barrier */
rmi_pci_bus_space_barrier,
/* read (single) */
rmi_pci_bus_space_read_1,
rmi_pci_bus_space_read_2,
rmi_pci_bus_space_read_4,
NULL,
/* read multiple */
rmi_pci_bus_space_read_multi_1,
rmi_pci_bus_space_read_multi_2,
rmi_pci_bus_space_read_multi_4,
NULL,
/* read region */
rmi_pci_bus_space_read_region_1,
rmi_pci_bus_space_read_region_2,
rmi_pci_bus_space_read_region_4,
NULL,
/* write (single) */
rmi_pci_bus_space_write_1,
rmi_pci_bus_space_write_2,
rmi_pci_bus_space_write_4,
NULL,
/* write multiple */
rmi_pci_bus_space_write_multi_1,
rmi_pci_bus_space_write_multi_2,
rmi_pci_bus_space_write_multi_4,
NULL,
/* write region */
NULL,
rmi_pci_bus_space_write_region_2,
rmi_pci_bus_space_write_region_4,
NULL,
/* set multiple */
NULL,
NULL,
NULL,
NULL,
/* set region */
NULL,
rmi_pci_bus_space_set_region_2,
rmi_pci_bus_space_set_region_4,
NULL,
/* copy */
NULL,
rmi_pci_bus_space_copy_region_2,
NULL,
NULL,
/* read (single) stream */
rmi_pci_bus_space_read_stream_1,
rmi_pci_bus_space_read_stream_2,
rmi_pci_bus_space_read_stream_4,
NULL,
/* read multiple stream */
rmi_pci_bus_space_read_multi_stream_1,
rmi_pci_bus_space_read_multi_stream_2,
rmi_pci_bus_space_read_multi_stream_4,
NULL,
/* read region stream */
rmi_pci_bus_space_read_region_1,
rmi_pci_bus_space_read_region_2,
rmi_pci_bus_space_read_region_4,
NULL,
/* write (single) stream */
rmi_pci_bus_space_write_stream_1,
rmi_pci_bus_space_write_stream_2,
rmi_pci_bus_space_write_stream_4,
NULL,
/* write multiple stream */
rmi_pci_bus_space_write_multi_stream_1,
rmi_pci_bus_space_write_multi_stream_2,
rmi_pci_bus_space_write_multi_stream_4,
NULL,
/* write region stream */
NULL,
rmi_pci_bus_space_write_region_2,
rmi_pci_bus_space_write_region_4,
NULL,
};
/* generic bus_space tag */
bus_space_tag_t rmi_pci_bus_space = &local_rmi_pci_bus_space;
/*
* Map a region of device bus space into CPU virtual address space.
*/
static int
rmi_pci_bus_space_map(void *t __unused, bus_addr_t addr,
bus_size_t size __unused, int flags __unused,
bus_space_handle_t * bshp)
{
*bshp = addr;
return (0);
}
/*
* Unmap a region of device bus space.
*/
static void
rmi_pci_bus_space_unmap(void *t __unused, bus_space_handle_t bsh __unused,
bus_size_t size __unused)
{
}
/*
* Get a new handle for a subregion of an already-mapped area of bus space.
*/
static int
rmi_pci_bus_space_subregion(void *t __unused, bus_space_handle_t bsh,
bus_size_t offset, bus_size_t size __unused,
bus_space_handle_t * nbshp)
{
*nbshp = bsh + offset;
return (0);
}
/*
* Read a 1, 2, 4, or 8 byte quantity from bus space
* described by tag/handle/offset.
*/
static u_int8_t
rmi_pci_bus_space_read_1(void *tag, bus_space_handle_t handle,
bus_size_t offset)
{
return (u_int8_t) (*(volatile u_int8_t *)(handle + offset));
}
static u_int16_t
rmi_pci_bus_space_read_2(void *tag, bus_space_handle_t handle,
bus_size_t offset)
{
u_int16_t value;
value = *(volatile u_int16_t *)(handle + offset);
return bswap16(value);
}
static u_int32_t
rmi_pci_bus_space_read_4(void *tag, bus_space_handle_t handle,
bus_size_t offset)
{
uint32_t value;
value = *(volatile u_int32_t *)(handle + offset);
return bswap32(value);
}
/*
* Read `count' 1, 2, 4, or 8 byte quantities from bus space
* described by tag/handle/offset and copy into buffer provided.
*/
static void
rmi_pci_bus_space_read_multi_1(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int8_t * addr, size_t count)
{
while (count--) {
*addr = *(volatile u_int8_t *)(handle + offset);
addr++;
}
}
static void
rmi_pci_bus_space_read_multi_2(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int16_t * addr, size_t count)
{
while (count--) {
*addr = *(volatile u_int16_t *)(handle + offset);
*addr = bswap16(*addr);
addr++;
}
}
static void
rmi_pci_bus_space_read_multi_4(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int32_t * addr, size_t count)
{
while (count--) {
*addr = *(volatile u_int32_t *)(handle + offset);
*addr = bswap32(*addr);
addr++;
}
}
/*
* Write the 1, 2, 4, or 8 byte value `value' to bus space
* described by tag/handle/offset.
*/
static void
rmi_pci_bus_space_write_1(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int8_t value)
{
mips_sync();
*(volatile u_int8_t *)(handle + offset) = value;
}
static void
rmi_pci_bus_space_write_2(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int16_t value)
{
mips_sync();
*(volatile u_int16_t *)(handle + offset) = bswap16(value);
}
static void
rmi_pci_bus_space_write_4(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int32_t value)
{
mips_sync();
*(volatile u_int32_t *)(handle + offset) = bswap32(value);
}
/*
* Write `count' 1, 2, 4, or 8 byte quantities from the buffer
* provided to bus space described by tag/handle/offset.
*/
static void
rmi_pci_bus_space_write_multi_1(void *tag, bus_space_handle_t handle,
bus_size_t offset, const u_int8_t * addr, size_t count)
{
mips_sync();
while (count--) {
(*(volatile u_int8_t *)(handle + offset)) = *addr;
addr++;
}
}
static void
rmi_pci_bus_space_write_multi_2(void *tag, bus_space_handle_t handle,
bus_size_t offset, const u_int16_t * addr, size_t count)
{
mips_sync();
while (count--) {
(*(volatile u_int16_t *)(handle + offset)) = bswap16(*addr);
addr++;
}
}
static void
rmi_pci_bus_space_write_multi_4(void *tag, bus_space_handle_t handle,
bus_size_t offset, const u_int32_t * addr, size_t count)
{
mips_sync();
while (count--) {
(*(volatile u_int32_t *)(handle + offset)) = bswap32(*addr);
addr++;
}
}
/*
* Write `count' 1, 2, 4, or 8 byte value `val' to bus space described
* by tag/handle starting at `offset'.
*/
static void
rmi_pci_bus_space_set_region_2(void *t, bus_space_handle_t bsh,
bus_size_t offset, u_int16_t value, size_t count)
{
bus_addr_t addr = bsh + offset;
for (; count != 0; count--, addr += 2)
(*(volatile u_int16_t *)(addr)) = value;
}
static void
rmi_pci_bus_space_set_region_4(void *t, bus_space_handle_t bsh,
bus_size_t offset, u_int32_t value, size_t count)
{
bus_addr_t addr = bsh + offset;
for (; count != 0; count--, addr += 4)
(*(volatile u_int32_t *)(addr)) = value;
}
/*
* Copy `count' 1, 2, 4, or 8 byte values from bus space starting
* at tag/bsh1/off1 to bus space starting at tag/bsh2/off2.
*/
static void
rmi_pci_bus_space_copy_region_2(void *t, bus_space_handle_t bsh1,
bus_size_t off1, bus_space_handle_t bsh2,
bus_size_t off2, size_t count)
{
TODO();
}
/*
* Read `count' 1, 2, 4, or 8 byte quantities from bus space
* described by tag/handle/offset and copy into buffer provided.
*/
u_int8_t
rmi_pci_bus_space_read_stream_1(void *t, bus_space_handle_t handle,
bus_size_t offset)
{
return *((volatile u_int8_t *)(handle + offset));
}
static u_int16_t
rmi_pci_bus_space_read_stream_2(void *t, bus_space_handle_t handle,
bus_size_t offset)
{
return *(volatile u_int16_t *)(handle + offset);
}
static u_int32_t
rmi_pci_bus_space_read_stream_4(void *t, bus_space_handle_t handle,
bus_size_t offset)
{
return (*(volatile u_int32_t *)(handle + offset));
}
static void
rmi_pci_bus_space_read_multi_stream_1(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int8_t * addr, size_t count)
{
while (count--) {
*addr = (*(volatile u_int8_t *)(handle + offset));
addr++;
}
}
static void
rmi_pci_bus_space_read_multi_stream_2(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int16_t * addr, size_t count)
{
while (count--) {
*addr = (*(volatile u_int16_t *)(handle + offset));
addr++;
}
}
static void
rmi_pci_bus_space_read_multi_stream_4(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int32_t * addr, size_t count)
{
while (count--) {
*addr = (*(volatile u_int32_t *)(handle + offset));
addr++;
}
}
/*
* Read `count' 1, 2, 4, or 8 byte quantities from bus space
* described by tag/handle and starting at `offset' and copy into
* buffer provided.
*/
void
rmi_pci_bus_space_read_region_1(void *t, bus_space_handle_t bsh,
bus_size_t offset, u_int8_t * addr, size_t count)
{
bus_addr_t baddr = bsh + offset;
while (count--) {
*addr++ = (*(volatile u_int8_t *)(baddr));
baddr += 1;
}
}
void
rmi_pci_bus_space_read_region_2(void *t, bus_space_handle_t bsh,
bus_size_t offset, u_int16_t * addr, size_t count)
{
bus_addr_t baddr = bsh + offset;
while (count--) {
*addr++ = (*(volatile u_int16_t *)(baddr));
baddr += 2;
}
}
void
rmi_pci_bus_space_read_region_4(void *t, bus_space_handle_t bsh,
bus_size_t offset, u_int32_t * addr, size_t count)
{
bus_addr_t baddr = bsh + offset;
while (count--) {
*addr++ = (*(volatile u_int32_t *)(baddr));
baddr += 4;
}
}
void
rmi_pci_bus_space_write_stream_1(void *t, bus_space_handle_t handle,
bus_size_t offset, u_int8_t value)
{
mips_sync();
*(volatile u_int8_t *)(handle + offset) = value;
}
static void
rmi_pci_bus_space_write_stream_2(void *t, bus_space_handle_t handle,
bus_size_t offset, u_int16_t value)
{
mips_sync();
*(volatile u_int16_t *)(handle + offset) = value;
}
static void
rmi_pci_bus_space_write_stream_4(void *t, bus_space_handle_t handle,
bus_size_t offset, u_int32_t value)
{
mips_sync();
*(volatile u_int32_t *)(handle + offset) = value;
}
static void
rmi_pci_bus_space_write_multi_stream_1(void *tag, bus_space_handle_t handle,
bus_size_t offset, const u_int8_t * addr, size_t count)
{
mips_sync();
while (count--) {
(*(volatile u_int8_t *)(handle + offset)) = *addr;
addr++;
}
}
static void
rmi_pci_bus_space_write_multi_stream_2(void *tag, bus_space_handle_t handle,
bus_size_t offset, const u_int16_t * addr, size_t count)
{
mips_sync();
while (count--) {
(*(volatile u_int16_t *)(handle + offset)) = *addr;
addr++;
}
}
static void
rmi_pci_bus_space_write_multi_stream_4(void *tag, bus_space_handle_t handle,
bus_size_t offset, const u_int32_t * addr, size_t count)
{
mips_sync();
while (count--) {
(*(volatile u_int32_t *)(handle + offset)) = *addr;
addr++;
}
}
void
rmi_pci_bus_space_write_region_2(void *t,
bus_space_handle_t bsh,
bus_size_t offset,
const u_int16_t * addr,
size_t count)
{
bus_addr_t baddr = (bus_addr_t) bsh + offset;
while (count--) {
(*(volatile u_int16_t *)(baddr)) = *addr;
addr++;
baddr += 2;
}
}
void
rmi_pci_bus_space_write_region_4(void *t, bus_space_handle_t bsh,
bus_size_t offset, const u_int32_t * addr, size_t count)
{
bus_addr_t baddr = bsh + offset;
while (count--) {
(*(volatile u_int32_t *)(baddr)) = *addr;
addr++;
baddr += 4;
}
}
static void
rmi_pci_bus_space_barrier(void *tag __unused, bus_space_handle_t bsh __unused,
bus_size_t offset __unused, bus_size_t len __unused, int flags)
{
}

View File

@ -25,8 +25,9 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef _RMI_CLOCK_H_
#define _RMI_CLOCK_H_

View File

@ -56,9 +56,8 @@ __FBSDID("$FreeBSD$");
#include <machine/intr_machdep.h>
#include <mips/nlm/hal/mips-extns.h>
#include <mips/nlm/hal/mmio.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/cop0.h>
#include <mips/nlm/hal/cop2.h>
#include <mips/nlm/hal/fmn.h>
#include <mips/nlm/hal/pic.h>
@ -108,7 +107,7 @@ xlp_msgring_config(void)
int i;
/* TODO: Add other nodes */
xlp_cms_base = nlm_regbase_cms(0);
xlp_cms_base = nlm_get_cms_regbase(0);
mtx_init(&msgmap_lock, "msgring", NULL, MTX_SPIN);
if (xlp_threads_per_core < xlp_msg_threads_per_core)
@ -147,62 +146,58 @@ xlp_msgring_iodi_config(void)
void
nlm_cms_credit_setup(int credit)
{
int src, qid, i;
int src, qid, i;
#if 0
/* there are a total of 18 src stations on XLP. */
/* there are a total of 18 src stations on XLP. */
printf("Setting up CMS credits!\n");
for(src=0; src<18; src++) {
for(qid=0; qid<1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
}
for (src=0; src<18; src++) {
for(qid=0; qid<1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
}
#endif
printf("Setting up CMS credits!\n");
/* CPU Credits */
for(i = 1; i < 8; i++) {
for (i = 1; i < 8; i++) {
src = (i << 4);
for(qid = 0; qid < 1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
}
for (qid = 0; qid < 1024; qid++)
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
/* PCIE Credits */
for(i = 0; i < 4; i++) {
for(i = 0; i < 4; i++) {
src = (256 + (i * 2));
for(qid = 0; qid < 1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
}
for(qid = 0; qid < 1024; qid++)
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
/* DTE Credits */
src = 264;
for(qid = 0; qid < 1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
for (qid = 0; qid < 1024; qid++)
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
/* RSA Credits */
src = 272;
for(qid = 0; qid < 1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
for (qid = 0; qid < 1024; qid++)
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
/* Crypto Credits */
src = 281;
for(qid = 0; qid < 1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
for (qid = 0; qid < 1024; qid++)
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
/* CMP Credits */
src = 298;
for(qid = 0; qid < 1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
for (qid = 0; qid < 1024; qid++)
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
/* POE Credits */
src = 384;
for(qid = 0; qid < 1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
for(qid = 0; qid < 1024; qid++)
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
/* NAE Credits */
src = 476;
for(qid = 0; qid < 1024; qid++) {
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
for(qid = 0; qid < 1024; qid++)
nlm_cms_setup_credits(xlp_cms_base, qid, src, credit);
}
void
@ -210,7 +205,7 @@ xlp_msgring_cpu_init(uint32_t cpuid)
{
int queue,i;
queue = XLP_CMS_CPU_PUSHQ(0, ((cpuid >> 2) & 0x7), (cpuid & 0x3), 0);
queue = CMS_CPU_PUSHQ(0, ((cpuid >> 2) & 0x7), (cpuid & 0x3), 0);
/* temp allocate 4 segments to each output queue */
nlm_cms_alloc_onchip_q(xlp_cms_base, queue, 4);
/* Enable high watermark and non empty interrupt */
@ -247,9 +242,9 @@ xlp_handle_msg_vc(int vc, int max_msgs)
uint32_t mflags, status;
for (i = 0; i < max_msgs; i++) {
mflags = nlm_fmn_saveflags();
mflags = nlm_save_flags_cop2();
status = nlm_fmn_msgrcv(vc, &srcid, &size, &code, &msg);
nlm_fmn_restoreflags(mflags);
nlm_restore_flags(mflags);
if (status != 0) /* If there is no msg or error */
break;
if (srcid < 0 && srcid >= 1024) {
@ -273,27 +268,27 @@ xlp_handle_msg_vc(int vc, int max_msgs)
static int
msgring_process_fast_intr(void *arg)
{
struct msgring_thread *mthd;
struct thread *td;
int cpu;
struct msgring_thread *mthd;
struct thread *td;
int cpu;
cpu = nlm_cpuid();
mthd = &msgring_threads[cpu];
td = mthd->thread;
mthd = &msgring_threads[cpu];
td = mthd->thread;
/* clear pending interrupts */
nlm_write_c0_eirr(1ULL << IRQ_MSGRING);
/* clear pending interrupts */
nlm_write_c0_eirr(1ULL << IRQ_MSGRING);
/* wake up the target thread */
mthd->needed = 1;
thread_lock(td);
if (TD_AWAITING_INTR(td)) {
TD_CLR_IWAIT(td);
sched_add(td, SRQ_INTR);
}
/* wake up the target thread */
mthd->needed = 1;
thread_lock(td);
if (TD_AWAITING_INTR(td)) {
TD_CLR_IWAIT(td);
sched_add(td, SRQ_INTR);
}
thread_unlock(td);
return (FILTER_HANDLED);
thread_unlock(td);
return (FILTER_HANDLED);
}
u_int fmn_msgcount[32][4];
@ -302,31 +297,31 @@ u_int fmn_loops[32];
static void
msgring_process(void * arg)
{
volatile struct msgring_thread *mthd;
struct thread *td;
uint32_t mflags;
volatile struct msgring_thread *mthd;
struct thread *td;
uint32_t mflags;
int hwtid, vc, handled, nmsgs;
hwtid = (intptr_t)arg;
mthd = &msgring_threads[hwtid];
td = mthd->thread;
KASSERT(curthread == td,
("%s:msg_ithread and proc linkage out of sync", __func__));
mthd = &msgring_threads[hwtid];
td = mthd->thread;
KASSERT(curthread == td,
("%s:msg_ithread and proc linkage out of sync", __func__));
/* First bind this thread to the right CPU */
thread_lock(td);
sched_bind(td, xlp_hwtid_to_cpuid[hwtid]);
thread_unlock(td);
/* First bind this thread to the right CPU */
thread_lock(td);
sched_bind(td, xlp_hwtid_to_cpuid[hwtid]);
thread_unlock(td);
if (hwtid != nlm_cpuid())
printf("Misscheduled hwtid %d != cpuid %d\n", hwtid, nlm_cpuid());
mflags = nlm_fmn_saveflags();
nlm_fmn_cpu_init(IRQ_MSGRING, 0, 0, 0, 0, 0);
nlm_fmn_restoreflags(mflags);
mflags = nlm_save_flags_cop2();
nlm_fmn_cpu_init(IRQ_MSGRING, 0, 0, 0, 0, 0);
nlm_restore_flags(mflags);
/* start processing messages */
for( ; ; ) {
/*atomic_store_rel_int(&mthd->needed, 0);*/
/* start processing messages */
for( ; ; ) {
/*atomic_store_rel_int(&mthd->needed, 0);*/
/* enable cop2 access */
do {
@ -338,22 +333,22 @@ msgring_process(void * arg)
}
} while (handled);
/* sleep */
/* sleep */
#if 0
thread_lock(td);
if (mthd->needed) {
thread_unlock(td);
continue;
}
sched_class(td, PRI_ITHD);
TD_SET_IWAIT(td);
mi_switch(SW_VOL, NULL);
thread_unlock(td);
thread_lock(td);
if (mthd->needed) {
thread_unlock(td);
continue;
}
sched_class(td, PRI_ITHD);
TD_SET_IWAIT(td);
mi_switch(SW_VOL, NULL);
thread_unlock(td);
#else
pause("wmsg", 1);
#endif
fmn_loops[hwtid]++;
}
}
}
static void
@ -396,7 +391,6 @@ register_msgring_handler(int startb, int endb, msgring_handler action,
msgmap[i].arg = arg;
}
mtx_unlock_spin(&msgmap_lock);
return (0);
}

View File

@ -3,10 +3,13 @@ mips/nlm/hal/fmn.c standard
mips/nlm/xlp_machdep.c standard
mips/nlm/intr_machdep.c standard
mips/nlm/tick.c standard
mips/nlm/iodi.c standard
mips/nlm/board.c standard
mips/nlm/cms.c standard
mips/nlm/bus_space_rmi.c standard
mips/nlm/bus_space_rmi.c standard
mips/nlm/bus_space_rmi_pci.c standard
mips/nlm/mpreset.S standard
mips/nlm/uart_bus_xlp_iodi.c optional uart
mips/nlm/uart_cpu_mips_xlp.c optional uart
mips/nlm/xlp_pci.c optional pci
mips/nlm/intern_dev.c optional pci
mips/nlm/uart_pci_xlp.c optional uart
mips/nlm/uart_cpu_xlp.c optional uart
mips/nlm/usb_init.c optional usb

View File

@ -25,11 +25,12 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __NLM_BRIDGE_H__
#define __NLM_BRIDGE_H__
#ifndef __NLM_HAL_BRIDGE_H__
#define __NLM_HAL_BRIDGE_H__
/**
* @file_name mio.h
@ -37,141 +38,147 @@
* @brief Basic definitions of XLP memory and io subsystem
*/
/* BRIDGE specific registers */
#define XLP_BRIDGE_MODE_REG 0x40
#define XLP_BRIDGE_PCI_CFG_BASE_REG 0x41
#define XLP_BRIDGE_PCI_CFG_LIMIT_REG 0x42
#define XLP_BRIDGE_PCIE_CFG_BASE_REG 0x43
#define XLP_BRIDGE_PCIE_CFG_LIMIT_REG 0x44
#define XLP_BRIDGE_BUSNUM_BAR0_REG 0x45
#define XLP_BRIDGE_BUSNUM_BAR1_REG 0x46
#define XLP_BRIDGE_BUSNUM_BAR2_REG 0x47
#define XLP_BRIDGE_BUSNUM_BAR3_REG 0x48
#define XLP_BRIDGE_BUSNUM_BAR4_REG 0x49
#define XLP_BRIDGE_BUSNUM_BAR5_REG 0x4a
#define XLP_BRIDGE_BUSNUM_BAR6_REG 0x4b
#define XLP_BRIDGE_FLASH_BAR0_REG 0x4c
#define XLP_BRIDGE_FLASH_BAR1_REG 0x4d
#define XLP_BRIDGE_FLASH_BAR2_REG 0x4e
#define XLP_BRIDGE_FLASH_BAR3_REG 0x4f
#define XLP_BRIDGE_FLASH_LIMIT0_REG 0x50
#define XLP_BRIDGE_FLASH_LIMIT1_REG 0x51
#define XLP_BRIDGE_FLASH_LIMIT2_REG 0x52
#define XLP_BRIDGE_FLASH_LIMIT3_REG 0x53
/*
* BRIDGE specific registers
*
* These registers start after the PCIe header, which has 0x40
* standard entries
*/
#define BRIDGE_MODE 0x00
#define BRIDGE_PCI_CFG_BASE 0x01
#define BRIDGE_PCI_CFG_LIMIT 0x02
#define BRIDGE_PCIE_CFG_BASE 0x03
#define BRIDGE_PCIE_CFG_LIMIT 0x04
#define BRIDGE_BUSNUM_BAR0 0x05
#define BRIDGE_BUSNUM_BAR1 0x06
#define BRIDGE_BUSNUM_BAR2 0x07
#define BRIDGE_BUSNUM_BAR3 0x08
#define BRIDGE_BUSNUM_BAR4 0x09
#define BRIDGE_BUSNUM_BAR5 0x0a
#define BRIDGE_BUSNUM_BAR6 0x0b
#define BRIDGE_FLASH_BAR0 0x0c
#define BRIDGE_FLASH_BAR1 0x0d
#define BRIDGE_FLASH_BAR2 0x0e
#define BRIDGE_FLASH_BAR3 0x0f
#define BRIDGE_FLASH_LIMIT0 0x10
#define BRIDGE_FLASH_LIMIT1 0x11
#define BRIDGE_FLASH_LIMIT2 0x12
#define BRIDGE_FLASH_LIMIT3 0x13
#define XLP_BRIDGE_DRAM_BAR_REG(i) (0x54 + (i))
#define XLP_BRIDGE_DRAM_BAR0_REG 0x54
#define XLP_BRIDGE_DRAM_BAR1_REG 0x55
#define XLP_BRIDGE_DRAM_BAR2_REG 0x56
#define XLP_BRIDGE_DRAM_BAR3_REG 0x57
#define XLP_BRIDGE_DRAM_BAR4_REG 0x58
#define XLP_BRIDGE_DRAM_BAR5_REG 0x59
#define XLP_BRIDGE_DRAM_BAR6_REG 0x5a
#define XLP_BRIDGE_DRAM_BAR7_REG 0x5b
#define BRIDGE_DRAM_BAR(i) (0x14 + (i))
#define BRIDGE_DRAM_BAR0 0x14
#define BRIDGE_DRAM_BAR1 0x15
#define BRIDGE_DRAM_BAR2 0x16
#define BRIDGE_DRAM_BAR3 0x17
#define BRIDGE_DRAM_BAR4 0x18
#define BRIDGE_DRAM_BAR5 0x19
#define BRIDGE_DRAM_BAR6 0x1a
#define BRIDGE_DRAM_BAR7 0x1b
#define XLP_BRIDGE_DRAM_LIMIT_REG(i) (0x5c + (i))
#define XLP_BRIDGE_DRAM_LIMIT0_REG 0x5c
#define XLP_BRIDGE_DRAM_LIMIT1_REG 0x5d
#define XLP_BRIDGE_DRAM_LIMIT2_REG 0x5e
#define XLP_BRIDGE_DRAM_LIMIT3_REG 0x5f
#define XLP_BRIDGE_DRAM_LIMIT4_REG 0x60
#define XLP_BRIDGE_DRAM_LIMIT5_REG 0x61
#define XLP_BRIDGE_DRAM_LIMIT6_REG 0x62
#define XLP_BRIDGE_DRAM_LIMIT7_REG 0x63
#define BRIDGE_DRAM_LIMIT(i) (0x1c + (i))
#define BRIDGE_DRAM_LIMIT0 0x1c
#define BRIDGE_DRAM_LIMIT1 0x1d
#define BRIDGE_DRAM_LIMIT2 0x1e
#define BRIDGE_DRAM_LIMIT3 0x1f
#define BRIDGE_DRAM_LIMIT4 0x20
#define BRIDGE_DRAM_LIMIT5 0x21
#define BRIDGE_DRAM_LIMIT6 0x22
#define BRIDGE_DRAM_LIMIT7 0x23
#define XLP_BRIDGE_DRAM_NODE_TRANSLN0_REG 0x64
#define XLP_BRIDGE_DRAM_NODE_TRANSLN1_REG 0x65
#define XLP_BRIDGE_DRAM_NODE_TRANSLN2_REG 0x66
#define XLP_BRIDGE_DRAM_NODE_TRANSLN3_REG 0x67
#define XLP_BRIDGE_DRAM_NODE_TRANSLN4_REG 0x68
#define XLP_BRIDGE_DRAM_NODE_TRANSLN5_REG 0x69
#define XLP_BRIDGE_DRAM_NODE_TRANSLN6_REG 0x6a
#define XLP_BRIDGE_DRAM_NODE_TRANSLN7_REG 0x6b
#define XLP_BRIDGE_DRAM_CHNL_TRANSLN0_REG 0x6c
#define XLP_BRIDGE_DRAM_CHNL_TRANSLN1_REG 0x6d
#define XLP_BRIDGE_DRAM_CHNL_TRANSLN2_REG 0x6e
#define XLP_BRIDGE_DRAM_CHNL_TRANSLN3_REG 0x6f
#define XLP_BRIDGE_DRAM_CHNL_TRANSLN4_REG 0x70
#define XLP_BRIDGE_DRAM_CHNL_TRANSLN5_REG 0x71
#define XLP_BRIDGE_DRAM_CHNL_TRANSLN6_REG 0x72
#define XLP_BRIDGE_DRAM_CHNL_TRANSLN7_REG 0x73
#define XLP_BRIDGE_PCIEMEM_BASE0_REG 0x74
#define XLP_BRIDGE_PCIEMEM_BASE1_REG 0x75
#define XLP_BRIDGE_PCIEMEM_BASE2_REG 0x76
#define XLP_BRIDGE_PCIEMEM_BASE3_REG 0x77
#define XLP_BRIDGE_PCIEMEM_LIMIT0_REG 0x78
#define XLP_BRIDGE_PCIEMEM_LIMIT1_REG 0x79
#define XLP_BRIDGE_PCIEMEM_LIMIT2_REG 0x7a
#define XLP_BRIDGE_PCIEMEM_LIMIT3_REG 0x7b
#define XLP_BRIDGE_PCIEIO_BASE0_REG 0x7c
#define XLP_BRIDGE_PCIEIO_BASE1_REG 0x7d
#define XLP_BRIDGE_PCIEIO_BASE2_REG 0x7e
#define XLP_BRIDGE_PCIEIO_BASE3_REG 0x7f
#define XLP_BRIDGE_PCIEIO_LIMIT0_REG 0x80
#define XLP_BRIDGE_PCIEIO_LIMIT1_REG 0x81
#define XLP_BRIDGE_PCIEIO_LIMIT2_REG 0x82
#define XLP_BRIDGE_PCIEIO_LIMIT3_REG 0x83
#define XLP_BRIDGE_PCIEMEM_BASE4_REG 0x84
#define XLP_BRIDGE_PCIEMEM_BASE5_REG 0x85
#define XLP_BRIDGE_PCIEMEM_BASE6_REG 0x86
#define XLP_BRIDGE_PCIEMEM_LIMIT4_REG 0x87
#define XLP_BRIDGE_PCIEMEM_LIMIT5_REG 0x88
#define XLP_BRIDGE_PCIEMEM_LIMIT6_REG 0x89
#define XLP_BRIDGE_PCIEIO_BASE4_REG 0x8a
#define XLP_BRIDGE_PCIEIO_BASE5_REG 0x8b
#define XLP_BRIDGE_PCIEIO_BASE6_REG 0x8c
#define XLP_BRIDGE_PCIEIO_LIMIT4_REG 0x8d
#define XLP_BRIDGE_PCIEIO_LIMIT5_REG 0x8e
#define XLP_BRIDGE_PCIEIO_LIMIT6_REG 0x8f
#define XLP_BRIDGE_NBU_EVENT_CNT_CTL_REG 0x90
#define XLP_BRIDGE_EVNTCTR1_LOW_REG 0x91
#define XLP_BRIDGE_EVNTCTR1_HI_REG 0x92
#define XLP_BRIDGE_EVNT_CNT_CTL2_REG 0x93
#define XLP_BRIDGE_EVNTCTR2_LOW_REG 0x94
#define XLP_BRIDGE_EVNTCTR2_HI_REG 0x95
#define XLP_BRIDGE_TRACEBUF_MATCH_REG0 0x96
#define XLP_BRIDGE_TRACEBUF_MATCH_REG1 0x97
#define XLP_BRIDGE_TRACEBUF_MATCH_LOW_REG 0x98
#define XLP_BRIDGE_TRACEBUF_MATCH_HI_REG 0x99
#define XLP_BRIDGE_TRACEBUF_CTRL_REG 0x9a
#define XLP_BRIDGE_TRACEBUF_INIT_REG 0x9b
#define XLP_BRIDGE_TRACEBUF_ACCESS_REG 0x9c
#define XLP_BRIDGE_TRACEBUF_READ_DATA_REG0 0x9d
#define XLP_BRIDGE_TRACEBUF_READ_DATA_REG1 0x9d
#define XLP_BRIDGE_TRACEBUF_READ_DATA_REG2 0x9f
#define XLP_BRIDGE_TRACEBUF_READ_DATA_REG3 0xa0
#define XLP_BRIDGE_TRACEBUF_STATUS_REG 0xa1
#define XLP_BRIDGE_ADDRESS_ERROR0_REG 0xa2
#define XLP_BRIDGE_ADDRESS_ERROR1_REG 0xa3
#define XLP_BRIDGE_ADDRESS_ERROR2_REG 0xa4
#define XLP_BRIDGE_TAG_ECC_ADDR_ERROR0_REG 0xa5
#define XLP_BRIDGE_TAG_ECC_ADDR_ERROR1_REG 0xa6
#define XLP_BRIDGE_TAG_ECC_ADDR_ERROR2_REG 0xa7
#define XLP_BRIDGE_LINE_FLUSH_REG0 0xa8
#define XLP_BRIDGE_LINE_FLUSH_REG1 0xa9
#define XLP_BRIDGE_NODE_ID_REG 0xaa
#define XLP_BRIDGE_ERROR_INTERRUPT_EN_REG 0xab
#define XLP_BRIDGE_PCIE0_WEIGHT_REG 0x300
#define XLP_BRIDGE_PCIE1_WEIGHT_REG 0x301
#define XLP_BRIDGE_PCIE2_WEIGHT_REG 0x302
#define XLP_BRIDGE_PCIE3_WEIGHT_REG 0x303
#define XLP_BRIDGE_USB_WEIGHT_REG 0x304
#define XLP_BRIDGE_NET_WEIGHT_REG 0x305
#define XLP_BRIDGE_POE_WEIGHT_REG 0x306
#define XLP_BRIDGE_CMS_WEIGHT_REG 0x307
#define XLP_BRIDGE_DMAENG_WEIGHT_REG 0x308
#define XLP_BRIDGE_SEC_WEIGHT_REG 0x309
#define XLP_BRIDGE_COMP_WEIGHT_REG 0x30a
#define XLP_BRIDGE_GIO_WEIGHT_REG 0x30b
#define XLP_BRIDGE_FLASH_WEIGHT_REG 0x30c
#define BRIDGE_DRAM_NODE_TRANSLN0 0x24
#define BRIDGE_DRAM_NODE_TRANSLN1 0x25
#define BRIDGE_DRAM_NODE_TRANSLN2 0x26
#define BRIDGE_DRAM_NODE_TRANSLN3 0x27
#define BRIDGE_DRAM_NODE_TRANSLN4 0x28
#define BRIDGE_DRAM_NODE_TRANSLN5 0x29
#define BRIDGE_DRAM_NODE_TRANSLN6 0x2a
#define BRIDGE_DRAM_NODE_TRANSLN7 0x2b
#define BRIDGE_DRAM_CHNL_TRANSLN0 0x2c
#define BRIDGE_DRAM_CHNL_TRANSLN1 0x2d
#define BRIDGE_DRAM_CHNL_TRANSLN2 0x2e
#define BRIDGE_DRAM_CHNL_TRANSLN3 0x2f
#define BRIDGE_DRAM_CHNL_TRANSLN4 0x30
#define BRIDGE_DRAM_CHNL_TRANSLN5 0x31
#define BRIDGE_DRAM_CHNL_TRANSLN6 0x32
#define BRIDGE_DRAM_CHNL_TRANSLN7 0x33
#define BRIDGE_PCIEMEM_BASE0 0x34
#define BRIDGE_PCIEMEM_BASE1 0x35
#define BRIDGE_PCIEMEM_BASE2 0x36
#define BRIDGE_PCIEMEM_BASE3 0x37
#define BRIDGE_PCIEMEM_LIMIT0 0x38
#define BRIDGE_PCIEMEM_LIMIT1 0x39
#define BRIDGE_PCIEMEM_LIMIT2 0x3a
#define BRIDGE_PCIEMEM_LIMIT3 0x3b
#define BRIDGE_PCIEIO_BASE0 0x3c
#define BRIDGE_PCIEIO_BASE1 0x3d
#define BRIDGE_PCIEIO_BASE2 0x3e
#define BRIDGE_PCIEIO_BASE3 0x3f
#define BRIDGE_PCIEIO_LIMIT0 0x40
#define BRIDGE_PCIEIO_LIMIT1 0x41
#define BRIDGE_PCIEIO_LIMIT2 0x42
#define BRIDGE_PCIEIO_LIMIT3 0x43
#define BRIDGE_PCIEMEM_BASE4 0x44
#define BRIDGE_PCIEMEM_BASE5 0x45
#define BRIDGE_PCIEMEM_BASE6 0x46
#define BRIDGE_PCIEMEM_LIMIT4 0x47
#define BRIDGE_PCIEMEM_LIMIT5 0x48
#define BRIDGE_PCIEMEM_LIMIT6 0x49
#define BRIDGE_PCIEIO_BASE4 0x4a
#define BRIDGE_PCIEIO_BASE5 0x4b
#define BRIDGE_PCIEIO_BASE6 0x4c
#define BRIDGE_PCIEIO_LIMIT4 0x4d
#define BRIDGE_PCIEIO_LIMIT5 0x4e
#define BRIDGE_PCIEIO_LIMIT6 0x4f
#define BRIDGE_NBU_EVENT_CNT_CTL 0x50
#define BRIDGE_EVNTCTR1_LOW 0x51
#define BRIDGE_EVNTCTR1_HI 0x52
#define BRIDGE_EVNT_CNT_CTL2 0x53
#define BRIDGE_EVNTCTR2_LOW 0x54
#define BRIDGE_EVNTCTR2_HI 0x55
#define BRIDGE_TRACEBUF_MATCH0 0x56
#define BRIDGE_TRACEBUF_MATCH1 0x57
#define BRIDGE_TRACEBUF_MATCH_LOW 0x58
#define BRIDGE_TRACEBUF_MATCH_HI 0x59
#define BRIDGE_TRACEBUF_CTRL 0x5a
#define BRIDGE_TRACEBUF_INIT 0x5b
#define BRIDGE_TRACEBUF_ACCESS 0x5c
#define BRIDGE_TRACEBUF_READ_DATA0 0x5d
#define BRIDGE_TRACEBUF_READ_DATA1 0x5d
#define BRIDGE_TRACEBUF_READ_DATA2 0x5f
#define BRIDGE_TRACEBUF_READ_DATA3 0x60
#define BRIDGE_TRACEBUF_STATUS 0x61
#define BRIDGE_ADDRESS_ERROR0 0x62
#define BRIDGE_ADDRESS_ERROR1 0x63
#define BRIDGE_ADDRESS_ERROR2 0x64
#define BRIDGE_TAG_ECC_ADDR_ERROR0 0x65
#define BRIDGE_TAG_ECC_ADDR_ERROR1 0x66
#define BRIDGE_TAG_ECC_ADDR_ERROR2 0x67
#define BRIDGE_LINE_FLUSH0 0x68
#define BRIDGE_LINE_FLUSH1 0x69
#define BRIDGE_NODE_ID 0x6a
#define BRIDGE_ERROR_INTERRUPT_EN 0x6b
#define BRIDGE_PCIE0_WEIGHT 0x2c0
#define BRIDGE_PCIE1_WEIGHT 0x2c1
#define BRIDGE_PCIE2_WEIGHT 0x2c2
#define BRIDGE_PCIE3_WEIGHT 0x2c3
#define BRIDGE_USB_WEIGHT 0x2c4
#define BRIDGE_NET_WEIGHT 0x2c5
#define BRIDGE_POE_WEIGHT 0x2c6
#define BRIDGE_CMS_WEIGHT 0x2c7
#define BRIDGE_DMAENG_WEIGHT 0x2c8
#define BRIDGE_SEC_WEIGHT 0x2c9
#define BRIDGE_COMP_WEIGHT 0x2ca
#define BRIDGE_GIO_WEIGHT 0x2cb
#define BRIDGE_FLASH_WEIGHT 0x2cc
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
#define nlm_rdreg_bridge(b, r) nlm_read_reg_kseg(b, r)
#define nlm_wreg_bridge(b, r, v) nlm_write_reg_kseg(b, r, v)
#define nlm_pcibase_bridge(node) nlm_pcicfg_base(XLP_IO_BRIDGE_OFFSET(node))
#define nlm_regbase_bridge(node) nlm_pcibase_bridge(node)
#define nlm_read_bridge_reg(b, r) nlm_read_reg(b, r)
#define nlm_write_bridge_reg(b, r, v) nlm_write_reg(b, r, v)
#define nlm_get_bridge_pcibase(node) \
nlm_pcicfg_base(XLP_IO_BRIDGE_OFFSET(node))
#define nlm_get_bridge_regbase(node) \
(nlm_get_bridge_pcibase(node) + XLP_IO_PCI_HDRSZ)
#endif
#endif

View File

@ -1,280 +0,0 @@
/*-
* Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
* NETLOGIC_BSD */
#ifndef __NLM_COP0_H__
#define __NLM_COP0_H__
#define NLM_C0_INDEX 0
#define NLM_C0_RANDOM 1
#define NLM_C0_ENTRYLO0 2
#define NLM_C0_ENTRYLO1 3
#define NLM_C0_CONTEXT 4
#define NLM_C0_USERLOCAL 4
#define NLM_C0_PAGEMASK 5
#define NLM_C0_WIRED 6
#define NLM_C0_BADVADDR 8
#define NLM_C0_COUNT 9
#define NLM_C0_EIRR 9
#define NLM_C0_EIMR 9
#define NLM_C0_ENTRYHI 10
#define NLM_C0_COMPARE 11
#define NLM_C0_STATUS 12
#define NLM_C0_INTCTL 12
#define NLM_C0_SRSCTL 12
#define NLM_C0_CAUSE 13
#define NLM_C0_EPC 14
#define NLM_C0_PRID 15
#define NLM_C0_EBASE 15
#define NLM_C0_CONFIG 16
#define NLM_C0_CONFIG0 16
#define NLM_C0_CONFIG1 16
#define NLM_C0_CONFIG2 16
#define NLM_C0_CONFIG3 16
#define NLM_C0_CONFIG4 16
#define NLM_C0_CONFIG5 16
#define NLM_C0_CONFIG6 16
#define NLM_C0_CONFIG7 16
#define NLM_C0_WATCHLO 18
#define NLM_C0_WATCHHI 19
#define NLM_C0_XCONTEXT 20
#define NLM_C0_SCRATCH 22
#define NLM_C0_SCRATCH0 22
#define NLM_C0_SCRATCH1 22
#define NLM_C0_SCRATCH2 22
#define NLM_C0_SCRATCH3 22
#define NLM_C0_SCRATCH4 22
#define NLM_C0_SCRATCH5 22
#define NLM_C0_SCRATCH6 22
#define NLM_C0_SCRATCH7 22
#define NLM_C0_DEBUG 23
#define NLM_C0_DEPC 24
#define NLM_C0_PERFCNT 25
#define NLM_C0_PERFCNT0 25
#define NLM_C0_PERFCNT1 25
#define NLM_C0_TAGLO 28
#define NLM_C0_DATALO 28
#define NLM_C0_TAGHI 29
#define NLM_C0_DATAHI 29
#define NLM_C0_ERROREPC 30
#define NLM_C0_DESAVE 31
/* cop0 status bits */
#define NLM_STATUS_CP0_EN (1<<28)
#define NLM_STATUS_CP1_EN (1<<29)
#define NLM_STATUS_CP2_EN (1<<30)
#define NLM_STATUS_KX_EN (1<<7)
#define NLM_STATUS_UX_EN (1<<5)
#ifndef LOCORE
#define nlm_memory_barrier() \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
" sync\n\t" \
".set pop" \
::: "memory")
#define NLM_DEFINE_ACCESSORS32(name, reg, sel) \
static __inline__ uint32_t nlm_read_c0_##name(void) \
{ \
uint32_t __rv; \
__asm__ __volatile__ ( \
".set push\n" \
".set noreorder\n" \
".set mips64\n" \
"mfc0 %0, $%1, %2\n" \
".set pop\n" \
: "=r" (__rv) \
: "i" (reg), "i" (sel) \
); \
return __rv; \
} \
\
static __inline__ void nlm_write_c0_##name(uint32_t val) \
{ \
__asm__ __volatile__( \
".set push\n" \
".set noreorder\n" \
".set mips64\n" \
"mtc0 %0, $%1, %2\n" \
".set pop\n" \
:: "r" (val), "i" (reg), "i" (sel) \
); \
} struct __hack
/* struct __hack above swallows a semicolon - otherwise the macro
* usage below cannot have the terminating semicolon */
#if (__mips == 64)
#define NLM_DEFINE_ACCESSORS64(name, reg, sel) \
static __inline__ uint64_t nlm_read_c0_##name(void) \
{ \
uint64_t __rv; \
__asm__ __volatile__ ( \
".set push\n" \
".set noreorder\n" \
".set mips64\n" \
"dmfc0 %0,$%1,%2\n" \
".set pop\n" \
: "=r" (__rv) \
: "i" (reg), "i" (sel) ); \
return __rv; \
} \
\
static __inline__ void nlm_write_c0_##name(uint64_t val) \
{ \
__asm__ __volatile__ ( \
".set push\n" \
".set noreorder\n" \
".set mips64\n" \
"dmtc0 %0,$%1,%2\n" \
".set pop\n" \
:: "r" (val), "i" (reg), "i" (sel) ); \
} struct __hack
#else
#define NLM_DEFINE_ACCESSORS64(name, reg, sel) \
static __inline__ uint64_t nlm_read_c0_##name(void) \
{ \
uint32_t __high, __low; \
__asm__ __volatile__ ( \
".set push\n" \
".set noreorder\n" \
".set mips64\n" \
"dmfc0 $8, $%2, %3\n" \
"dsra32 %0, $8, 0\n" \
"sll %1, $8, 0\n" \
".set pop\n" \
: "=r"(__high), "=r"(__low) \
: "i"(reg), "i"(sel) \
: "$8" ); \
\
return (((uint64_t)__high << 32) | __low); \
} \
\
static __inline__ void nlm_write_c0_##name(uint64_t val) \
{ \
uint32_t __high = val >> 32; \
uint32_t __low = val & 0xffffffff; \
__asm__ __volatile__ ( \
".set push\n" \
".set noreorder\n" \
".set mips64\n" \
"dsll32 $8, %1, 0\n" \
"dsll32 $9, %0, 0\n" \
"dsrl32 $8, $8, 0\n" \
"or $8, $8, $9\n" \
"dmtc0 $8, $%2, %3\n" \
".set pop\n" \
:: "r"(__high), "r"(__low), "i"(reg), "i"(sel) \
: "$8", "$9"); \
} struct __hack
#endif
NLM_DEFINE_ACCESSORS32(index, 0, 0);
NLM_DEFINE_ACCESSORS32(random, 1, 0);
NLM_DEFINE_ACCESSORS64(entrylo0, 2, 0);
NLM_DEFINE_ACCESSORS64(entrylo1, 3, 0);
NLM_DEFINE_ACCESSORS64(context, 4, 0);
NLM_DEFINE_ACCESSORS64(userlocal, 4, 0);
NLM_DEFINE_ACCESSORS32(pagemask, 5, 0);
NLM_DEFINE_ACCESSORS32(wired, 6, 0);
NLM_DEFINE_ACCESSORS64(badvaddr, 8, 0);
NLM_DEFINE_ACCESSORS32(count, 9, 0);
NLM_DEFINE_ACCESSORS64(eirr, 9, 6);
NLM_DEFINE_ACCESSORS64(eimr, 9, 7);
NLM_DEFINE_ACCESSORS64(entryhi, 10, 0);
NLM_DEFINE_ACCESSORS32(compare, 11, 0);
NLM_DEFINE_ACCESSORS32(status, 12, 0);
NLM_DEFINE_ACCESSORS32(intctl, 12, 1);
NLM_DEFINE_ACCESSORS32(srsctl, 12, 2);
NLM_DEFINE_ACCESSORS32(cause, 13, 0);
NLM_DEFINE_ACCESSORS64(epc, 14, 0);
NLM_DEFINE_ACCESSORS32(prid, 15, 0);
NLM_DEFINE_ACCESSORS32(ebase, 15, 1);
NLM_DEFINE_ACCESSORS32(config0, 16, 0);
NLM_DEFINE_ACCESSORS32(config1, 16, 1);
NLM_DEFINE_ACCESSORS32(config2, 16, 2);
NLM_DEFINE_ACCESSORS32(config3, 16, 3);
NLM_DEFINE_ACCESSORS32(config6, 16, 6);
NLM_DEFINE_ACCESSORS32(config7, 16, 7);
NLM_DEFINE_ACCESSORS64(watchlo0, 18, 0);
NLM_DEFINE_ACCESSORS32(watchhi0, 19, 0);
NLM_DEFINE_ACCESSORS64(xcontext, 20, 0);
NLM_DEFINE_ACCESSORS64(scratch0, 22, 0);
NLM_DEFINE_ACCESSORS64(scratch1, 22, 1);
NLM_DEFINE_ACCESSORS64(scratch2, 22, 2);
NLM_DEFINE_ACCESSORS64(scratch3, 22, 3);
NLM_DEFINE_ACCESSORS64(scratch4, 22, 4);
NLM_DEFINE_ACCESSORS64(scratch5, 22, 5);
NLM_DEFINE_ACCESSORS64(scratch6, 22, 6);
NLM_DEFINE_ACCESSORS64(scratch7, 22, 7);
NLM_DEFINE_ACCESSORS32(debug, 23, 0);
NLM_DEFINE_ACCESSORS32(depc, 24, 0);
NLM_DEFINE_ACCESSORS32(perfctrl0, 25, 0);
NLM_DEFINE_ACCESSORS64(perfcntr0, 25, 1);
NLM_DEFINE_ACCESSORS32(perfctrl1, 25, 2);
NLM_DEFINE_ACCESSORS64(perfcntr1, 25, 3);
NLM_DEFINE_ACCESSORS32(perfctrl2, 25, 4);
NLM_DEFINE_ACCESSORS64(perfcntr2, 25, 5);
NLM_DEFINE_ACCESSORS32(perfctrl3, 25, 6);
NLM_DEFINE_ACCESSORS64(perfcntr3, 25, 7);
NLM_DEFINE_ACCESSORS64(taglo0, 28, 0);
NLM_DEFINE_ACCESSORS64(taglo2, 28, 2);
NLM_DEFINE_ACCESSORS64(taghi0, 29, 0);
NLM_DEFINE_ACCESSORS64(taghi2, 29, 2);
NLM_DEFINE_ACCESSORS64(errorepc, 30, 0);
NLM_DEFINE_ACCESSORS64(desave, 31, 0);
static __inline__ int nlm_nodeid(void)
{
return (nlm_read_c0_ebase() >> 5) & 0x3;
}
static __inline__ int nlm_cpuid(void)
{
return nlm_read_c0_ebase() & 0x1f;
}
static __inline__ int nlm_threadid(void)
{
return nlm_read_c0_ebase() & 0x3;
}
static __inline__ int nlm_coreid(void)
{
return (nlm_read_c0_ebase() >> 2) & 0x7;
}
#endif
#endif

View File

@ -25,27 +25,28 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __NLM_COP2_H__
#define __NLM_COP2_H__
#ifndef __NLM_HAL_COP2_H__
#define __NLM_HAL_COP2_H__
#define XLP_COP2_TX_BUF_REG 0
#define XLP_COP2_RX_BUF_REG 1
#define XLP_COP2_TXMSGSTATUS_REG 2
#define XLP_COP2_RXMSGSTATUS_REG 3
#define XLP_COP2_MSGSTATUS1_REG 4
#define XLP_COP2_MSGCONFIG_REG 5
#define XLP_COP2_MSGCONFIG1_REG 6
#define COP2_TX_BUF 0
#define COP2_RX_BUF 1
#define COP2_TXMSGSTATUS 2
#define COP2_RXMSGSTATUS 3
#define COP2_MSGSTATUS1 4
#define COP2_MSGCONFIG 5
#define COP2_MSGCONFIG1 6
#define CROSSTHR_POPQ_EN 0x01
#define VC0_POPQ_EN 0x02
#define VC1_POPQ_EN 0x04
#define VC2_POPQ_EN 0x08
#define VC3_POPQ_EN 0x10
#define ALL_VC_POPQ_EN 0x1E
#define ALL_VC_CT_POPQ_EN 0x1F
#define CROSSTHR_POPQ_EN 0x01
#define VC0_POPQ_EN 0x02
#define VC1_POPQ_EN 0x04
#define VC2_POPQ_EN 0x08
#define VC3_POPQ_EN 0x10
#define ALL_VC_POPQ_EN 0x1E
#define ALL_VC_CT_POPQ_EN 0x1F
struct nlm_fmn_msg {
uint64_t msg[4];
@ -62,8 +63,7 @@ static inline uint32_t nlm_read_c2_##name(void) \
"mfc2 %0, $%1, %2\n" \
".set pop\n" \
: "=r" (__rv) \
: "i" (reg), "i" (sel) \
); \
: "i" (reg), "i" (sel)); \
return __rv; \
} \
\
@ -75,8 +75,7 @@ static inline void nlm_write_c2_##name(uint32_t val) \
".set mips64\n" \
"mtc2 %0, $%1, %2\n" \
".set pop\n" \
:: "r" (val), "i" (reg), "i" (sel) \
); \
: : "r" (val), "i" (reg), "i" (sel)); \
} struct __hack
#if (__mips == 64)
@ -91,7 +90,7 @@ static inline uint64_t nlm_read_c2_##name(void) \
"dmfc2 %0, $%1, %2\n" \
".set pop\n" \
: "=r" (__rv) \
: "i" (reg), "i" (sel) ); \
: "i" (reg), "i" (sel)); \
return __rv; \
} \
\
@ -103,7 +102,7 @@ static inline void nlm_write_c2_##name(uint64_t val) \
".set mips64\n" \
"dmtc2 %0, $%1, %2\n" \
".set pop\n" \
:: "r" (val), "i" (reg), "i" (sel) ); \
: : "r" (val), "i" (reg), "i" (sel)); \
} struct __hack
#else
@ -122,15 +121,15 @@ static inline uint64_t nlm_read_c2_##name(void) \
".set pop\n" \
: "=r"(__high), "=r"(__low) \
: "i"(reg), "i"(sel) \
: "$8" ); \
: "$8"); \
\
return (((uint64_t)__high << 32) | __low); \
return ((uint64_t)__high << 32) | __low; \
} \
\
static inline void nlm_write_c2_##name(uint64_t val) \
{ \
uint32_t __high = val >> 32; \
uint32_t __low = val & 0xffffffff; \
uint32_t __high = val >> 32; \
uint32_t __low = val & 0xffffffff; \
__asm__ __volatile__ ( \
".set push\n" \
".set noreorder\n" \
@ -141,113 +140,100 @@ static inline void nlm_write_c2_##name(uint64_t val) \
"or $8, $8, $9\n" \
"dmtc2 $8, $%2, %3\n" \
".set pop\n" \
:: "r"(__high), "r"(__low), "i"(reg), "i"(sel) \
:"$8", "$9"); \
: : "r"(__high), "r"(__low), "i"(reg), "i"(sel) \
: "$8", "$9"); \
} struct __hack
#endif
NLM_DEFINE_COP2_ACCESSORS64(txbuf0, XLP_COP2_TX_BUF_REG, 0);
NLM_DEFINE_COP2_ACCESSORS64(txbuf1, XLP_COP2_TX_BUF_REG, 1);
NLM_DEFINE_COP2_ACCESSORS64(txbuf2, XLP_COP2_TX_BUF_REG, 2);
NLM_DEFINE_COP2_ACCESSORS64(txbuf3, XLP_COP2_TX_BUF_REG, 3);
NLM_DEFINE_COP2_ACCESSORS64(txbuf0, COP2_TX_BUF, 0);
NLM_DEFINE_COP2_ACCESSORS64(txbuf1, COP2_TX_BUF, 1);
NLM_DEFINE_COP2_ACCESSORS64(txbuf2, COP2_TX_BUF, 2);
NLM_DEFINE_COP2_ACCESSORS64(txbuf3, COP2_TX_BUF, 3);
NLM_DEFINE_COP2_ACCESSORS64(rxbuf0, XLP_COP2_RX_BUF_REG, 0);
NLM_DEFINE_COP2_ACCESSORS64(rxbuf1, XLP_COP2_RX_BUF_REG, 1);
NLM_DEFINE_COP2_ACCESSORS64(rxbuf2, XLP_COP2_RX_BUF_REG, 2);
NLM_DEFINE_COP2_ACCESSORS64(rxbuf3, XLP_COP2_RX_BUF_REG, 3);
NLM_DEFINE_COP2_ACCESSORS64(rxbuf0, COP2_RX_BUF, 0);
NLM_DEFINE_COP2_ACCESSORS64(rxbuf1, COP2_RX_BUF, 1);
NLM_DEFINE_COP2_ACCESSORS64(rxbuf2, COP2_RX_BUF, 2);
NLM_DEFINE_COP2_ACCESSORS64(rxbuf3, COP2_RX_BUF, 3);
NLM_DEFINE_COP2_ACCESSORS32(txmsgstatus, XLP_COP2_TXMSGSTATUS_REG, 0);
NLM_DEFINE_COP2_ACCESSORS32(rxmsgstatus, XLP_COP2_RXMSGSTATUS_REG, 0);
NLM_DEFINE_COP2_ACCESSORS32(msgstatus1, XLP_COP2_MSGSTATUS1_REG, 0);
NLM_DEFINE_COP2_ACCESSORS32(msgconfig, XLP_COP2_MSGCONFIG_REG, 0);
NLM_DEFINE_COP2_ACCESSORS32(msgconfig1, XLP_COP2_MSGCONFIG1_REG, 0);
NLM_DEFINE_COP2_ACCESSORS32(txmsgstatus, COP2_TXMSGSTATUS, 0);
NLM_DEFINE_COP2_ACCESSORS32(rxmsgstatus, COP2_RXMSGSTATUS, 0);
NLM_DEFINE_COP2_ACCESSORS32(msgstatus1, COP2_MSGSTATUS1, 0);
NLM_DEFINE_COP2_ACCESSORS32(msgconfig, COP2_MSGCONFIG, 0);
NLM_DEFINE_COP2_ACCESSORS32(msgconfig1, COP2_MSGCONFIG1, 0);
/* successful completion returns 1, else 0 */
static __inline__ int nlm_msgsend(int val)
static inline int
nlm_msgsend(int val)
{
int result;
__asm__ volatile (
".set push \n"
".set noreorder \n"
".set mips64 \n"
"move $8, %1 \n"
"sync \n"
"/* msgsnds $9, $8 */ \n"
".word 0x4a084801 \n"
"move %0, $9 \n"
".set pop \n"
".set push\n"
".set noreorder\n"
".set mips64\n"
"move $8, %1\n"
"sync\n"
"/* msgsnds $9, $8 */\n"
".word 0x4a084801\n"
"move %0, $9\n"
".set pop\n"
: "=r" (result)
: "r" (val)
: "$8", "$9"
);
: "$8", "$9");
return result;
}
static __inline__ int nlm_msgld(int vc)
static inline int
nlm_msgld(int vc)
{
int val;
__asm__ volatile (
".set push \n"
".set noreorder \n"
".set mips64 \n"
"move $8, %1 \n"
"/* msgld $9, $8 */ \n"
".word 0x4a084802 \n"
"move %0, $9 \n"
".set pop \n"
".set push\n"
".set noreorder\n"
".set mips64\n"
"move $8, %1\n"
"/* msgld $9, $8 */\n"
".word 0x4a084802\n"
"move %0, $9\n"
".set pop\n"
: "=r" (val)
: "r" (vc)
: "$8", "$9"
);
: "$8", "$9");
return val;
}
static __inline__ void nlm_msgwait(int vc)
static inline void
nlm_msgwait(int vc)
{
__asm__ volatile (
".set push \n"
".set noreorder \n"
".set mips64 \n"
"move $8, %0 \n"
"/* msgwait $8 */ \n"
".word 0x4a080003 \n"
".set pop \n"
:: "r" (vc)
: "$8"
);
".set push\n"
".set noreorder\n"
".set mips64\n"
"move $8, %0\n"
"/* msgwait $8 */\n"
".word 0x4a080003\n"
".set pop\n"
: : "r" (vc)
: "$8");
}
/* TODO this is not needed in n32 and n64 */
static __inline uint32_t
nlm_fmn_saveflags(void)
{
uint32_t sr = mips_rd_status();
mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_COP_2_BIT);
return (sr);
}
static __inline void
nlm_fmn_restoreflags(uint32_t sr)
{
mips_wr_status(sr);
}
static __inline__ int nlm_fmn_msgsend(int dstid, int size, int swcode,
struct nlm_fmn_msg *m)
static inline int
nlm_fmn_msgsend(int dstid, int size, int swcode, struct nlm_fmn_msg *m)
{
uint32_t flags, status;
int rv;
size -= 1;
flags = nlm_fmn_saveflags();
switch(size) {
case 3: nlm_write_c2_txbuf3(m->msg[3]);
case 2: nlm_write_c2_txbuf2(m->msg[2]);
case 1: nlm_write_c2_txbuf1(m->msg[1]);
case 0: nlm_write_c2_txbuf0(m->msg[0]);
flags = nlm_save_flags_cop2();
switch (size) {
case 3:
nlm_write_c2_txbuf3(m->msg[3]);
case 2:
nlm_write_c2_txbuf2(m->msg[2]);
case 1:
nlm_write_c2_txbuf1(m->msg[1]);
case 0:
nlm_write_c2_txbuf0(m->msg[0]);
}
dstid |= ((swcode << 24) | (size << 16));
@ -255,19 +241,19 @@ static __inline__ int nlm_fmn_msgsend(int dstid, int size, int swcode,
rv = !status;
if (rv != 0)
rv = nlm_read_c2_txmsgstatus();
nlm_fmn_restoreflags(flags);
nlm_restore_flags(flags);
return (rv);
return rv;
}
static __inline__ int nlm_fmn_msgrcv(int vc, int *srcid, int *size, int *code,
struct nlm_fmn_msg *m)
static inline int
nlm_fmn_msgrcv(int vc, int *srcid, int *size, int *code, struct nlm_fmn_msg *m)
{
uint32_t status;
uint32_t msg_status, flags;
int tmp_sz, rv;
flags = nlm_fmn_saveflags();
flags = nlm_save_flags_cop2();
status = nlm_msgld(vc); /* will return 0, if error */
rv = !status;
if (rv == 0) {
@ -276,38 +262,24 @@ static __inline__ int nlm_fmn_msgrcv(int vc, int *srcid, int *size, int *code,
*code = (msg_status >> 18) & 0xff;
*srcid = (msg_status >> 4) & 0xfff;
tmp_sz = *size - 1;
switch(tmp_sz) {
case 3: m->msg[3] = nlm_read_c2_rxbuf3();
case 2: m->msg[2] = nlm_read_c2_rxbuf2();
case 1: m->msg[1] = nlm_read_c2_rxbuf1();
case 0: m->msg[0] = nlm_read_c2_rxbuf0();
switch (tmp_sz) {
case 3:
m->msg[3] = nlm_read_c2_rxbuf3();
case 2:
m->msg[2] = nlm_read_c2_rxbuf2();
case 1:
m->msg[1] = nlm_read_c2_rxbuf1();
case 0:
m->msg[0] = nlm_read_c2_rxbuf0();
}
}
nlm_fmn_restoreflags(flags);
nlm_restore_flags(flags);
return rv;
}
/**
* nlm_fmn_cpu_init() initializes the per-h/w thread cop2 w.r.t the following
* configuration parameters. It needs to be individually setup on each
* hardware thread.
*
* int_vec - interrupt vector getting placed into msgconfig reg
* ctpe - cross thread message pop enable. When set to 1, the thread (h/w cpu)
* associated where this cop2 register is setup, can pop messages
* intended for any other thread in the same core.
* v0pe - VC0 pop message request mode enable. When set to 1, the thread
* can send pop requests to vc0.
* v1pe - VC1 pop message request mode enable. When set to 1, the thread
* can send pop requests to vc1.
* v2pe - VC2 pop message request mode enable. When set to 1, the thread
* can send pop requests to vc2.
* v3pe - VC3 pop message request mode enable. When set to 1, the thread
* can send pop requests to vc3.
*/
static __inline__ void nlm_fmn_cpu_init(int int_vec, int ctpe, int v0pe,
int v1pe, int v2pe, int v3pe)
static inline void
nlm_fmn_cpu_init(int int_vec, int ctpe, int v0pe, int v1pe, int v2pe, int v3pe)
{
uint32_t val = nlm_read_c2_msgconfig();

View File

@ -25,46 +25,168 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __NLM_CPUCONTROL_H__
#define __NLM_CPUCONTROL_H__
#ifndef __NLM_HAL_CPUCONTROL_H__
#define __NLM_HAL_CPUCONTROL_H__
#define XLP_CPU_BLOCKID_IFU 0
#define XLP_CPU_BLOCKID_ICU 1
#define CPU_BLOCKID_IFU 0
#define CPU_BLOCKID_ICU 1
#define CPU_BLOCKID_IEU 2
#define CPU_BLOCKID_LSU 3
#define CPU_BLOCKID_MMU 4
#define CPU_BLOCKID_PRF 5
#define CPU_BLOCKID_SCH 7
#define CPU_BLOCKID_SCU 8
#define CPU_BLOCKID_FPU 9
#define CPU_BLOCKID_MAP 10
#define XLP_CPU_BLOCKID_IEU 2
#define XLP_CPU_BLOCKID_LSU 3
#define XLP_LSU_DEFEATURE 0x304
#define XLP_LSU_CERRLOG_REGID 0x09
#define XLP_CPU_BLOCKID_MMU 4
#define XLP_CPU_BLOCKID_PRF 5
#define XLP_CPU_BLOCKID_SCH 7
#define XLP_SCHED_DEFEATURE 0x700
#define XLP_CPU_BLOCKID_SCU 8
#define XLP_CPU_BLOCKID_FPU 9
#define XLP_CPU_BLOCKID_MAP 10
#define LSU_DEFEATURE 0x304
#define LSU_CERRLOG_REGID 0x09
#define SCHED_DEFEATURE 0x700
/* Offsets of interest from the 'MAP' Block */
#define XLP_BLKID_MAP_THREADMODE 0x00
#define XLP_BLKID_MAP_EXT_EBASE_ENABLE 0x04
#define XLP_BLKID_MAP_CCDI_CONFIG 0x08
#define XLP_BLKID_MAP_THRD0_CCDI_STATUS 0x0c
#define XLP_BLKID_MAP_THRD1_CCDI_STATUS 0x10
#define XLP_BLKID_MAP_THRD2_CCDI_STATUS 0x14
#define XLP_BLKID_MAP_THRD3_CCDI_STATUS 0x18
#define XLP_BLKID_MAP_THRD0_DEBUG_MODE 0x1c
#define XLP_BLKID_MAP_THRD1_DEBUG_MODE 0x20
#define XLP_BLKID_MAP_THRD2_DEBUG_MODE 0x24
#define XLP_BLKID_MAP_THRD3_DEBUG_MODE 0x28
#define XLP_BLKID_MAP_MISC_STATE 0x60
#define XLP_BLKID_MAP_DEBUG_READ_CTL 0x64
#define XLP_BLKID_MAP_DEBUG_READ_REG0 0x68
#define XLP_BLKID_MAP_DEBUG_READ_REG1 0x6c
#define MAP_THREADMODE 0x00
#define MAP_EXT_EBASE_ENABLE 0x04
#define MAP_CCDI_CONFIG 0x08
#define MAP_THRD0_CCDI_STATUS 0x0c
#define MAP_THRD1_CCDI_STATUS 0x10
#define MAP_THRD2_CCDI_STATUS 0x14
#define MAP_THRD3_CCDI_STATUS 0x18
#define MAP_THRD0_DEBUG_MODE 0x1c
#define MAP_THRD1_DEBUG_MODE 0x20
#define MAP_THRD2_DEBUG_MODE 0x24
#define MAP_THRD3_DEBUG_MODE 0x28
#define MAP_MISC_STATE 0x60
#define MAP_DEBUG_READ_CTL 0x64
#define MAP_DEBUG_READ_REG0 0x68
#define MAP_DEBUG_READ_REG1 0x6c
#define MMU_SETUP 0x400
#define MMU_LFSRSEED 0x401
#define MMU_HPW_NUM_PAGE_LVL 0x410
#define MMU_PGWKR_PGDBASE 0x411
#define MMU_PGWKR_PGDSHFT 0x412
#define MMU_PGWKR_PGDMASK 0x413
#define MMU_PGWKR_PUDSHFT 0x414
#define MMU_PGWKR_PUDMASK 0x415
#define MMU_PGWKR_PMDSHFT 0x416
#define MMU_PGWKR_PMDMASK 0x417
#define MMU_PGWKR_PTESHFT 0x418
#define MMU_PGWKR_PTEMASK 0x419
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
#if defined(__mips_n64) || defined(__mips_n32)
static __inline uint64_t
nlm_mfcr(uint32_t reg)
{
uint64_t res;
__asm__ __volatile__(
".set push\n\t"
".set noreorder\n\t"
"move $9, %1\n\t"
".word 0x71280018\n\t" /* mfcr $8, $9 */
"move %0, $8\n\t"
".set pop\n"
: "=r" (res) : "r"(reg)
: "$8", "$9"
);
return (res);
}
static __inline void
nlm_mtcr(uint32_t reg, uint64_t value)
{
__asm__ __volatile__(
".set push\n\t"
".set noreorder\n\t"
"move $8, %0\n"
"move $9, %1\n"
".word 0x71280019\n" /* mtcr $8, $9 */
".set pop\n"
:
: "r" (value), "r" (reg)
: "$8", "$9"
);
}
#else /* !(defined(__mips_n64) || defined(__mips_n32)) */
static __inline__ uint64_t
nlm_mfcr(uint32_t reg)
{
uint32_t hi, lo;
__asm__ __volatile__ (
".set push\n"
".set mips64\n"
"move $8, %2\n"
".word 0x71090018\n"
"nop \n"
"dsra32 %0, $9, 0\n"
"sll %1, $9, 0\n"
".set pop\n"
: "=r"(hi), "=r"(lo)
: "r"(reg) : "$8", "$9");
return (((uint64_t)hi) << 32) | lo;
}
static __inline__ void
nlm_mtcr(uint32_t reg, uint64_t val)
{
uint32_t hi, lo;
hi = val >> 32;
lo = val & 0xffffffff;
__asm__ __volatile__ (
".set push\n"
".set mips64\n"
"move $9, %0\n"
"dsll32 $9, %1, 0\n"
"dsll32 $8, %0, 0\n"
"dsrl32 $9, $9, 0\n"
"or $9, $9, $8\n"
"move $8, %2\n"
".word 0x71090019\n"
"nop \n"
".set pop\n"
: :"r"(hi), "r"(lo), "r"(reg)
: "$8", "$9");
}
#endif /* (defined(__mips_n64) || defined(__mips_n32)) */
/* hashindex_en = 1 to enable hash mode, hashindex_en=0 to disable
* global_mode = 1 to enable global mode, global_mode=0 to disable
* clk_gating = 0 to enable clock gating, clk_gating=1 to disable
*/
static __inline__ void nlm_mmu_setup(int hashindex_en, int global_mode,
int clk_gating)
{
uint32_t mmusetup = 0;
mmusetup |= (hashindex_en << 13);
mmusetup |= (clk_gating << 3);
mmusetup |= (global_mode << 0);
nlm_mtcr(MMU_SETUP, mmusetup);
}
static __inline__ void nlm_mmu_lfsr_seed (int thr0_seed, int thr1_seed,
int thr2_seed, int thr3_seed)
{
uint32_t seed = nlm_mfcr(MMU_LFSRSEED);
seed |= ((thr3_seed & 0x7f) << 23);
seed |= ((thr2_seed & 0x7f) << 16);
seed |= ((thr1_seed & 0x7f) << 7);
seed |= ((thr0_seed & 0x7f) << 0);
nlm_mtcr(MMU_LFSRSEED, seed);
}
#endif /* __ASSEMBLY__ */
#endif /* __NLM_CPUCONTROL_H__ */

View File

@ -25,20 +25,19 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
* NETLOGIC_BSD */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <mips/nlm/hal/mips-extns.h>
#include <mips/nlm/hal/mmio.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/fmn.h>
#include <sys/systm.h>
uint32_t bad_xlp_num_nodes = 4;
#include <machine/cpufunc.h>
#include <mips/nlm/hal/mips-extns.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/fmn.h>
/* XLP can take upto 16K of FMN messages per hardware queue, as spill.
* But, configuring all 16K causes the total spill memory to required
* to blow upto 192MB for single chip configuration, and 768MB in four
@ -68,27 +67,7 @@ uint64_t nlm_cms_spill_total_messages = 1 * 1024;
* For all 4 nodes, there are 18*4 = 72 FMN stations
*/
uint32_t nlm_cms_total_stations = 18 * 4 /*xlp_num_nodes*/;
uint32_t cms_onchip_seg_availability[XLP_CMS_ON_CHIP_PER_QUEUE_SPACE];
int nlm_cms_verify_credit_config (int spill_en, int tot_credit)
{
/* Note: In XLP there seem to be no mechanism to read back
* the credit count that has been programmed into a sid / did pair;
* since we have only one register 0x2000 to read.
* Hence it looks like all credit mgmt/verification needs to
* be done by software. Software could keep track of total credits
* getting programmed and verify it from this function.
*/
if (spill_en) {
/* TODO */
}
if (tot_credit > (XLP_CMS_ON_CHIP_MESG_SPACE*bad_xlp_num_nodes))
return 1; /* credits overflowed - should not happen */
return 0;
}
uint32_t cms_onchip_seg_availability[CMS_ON_CHIP_PER_QUEUE_SPACE];
/**
* Takes inputs as node, queue_size and maximum number of queues.
@ -163,7 +142,7 @@ void nlm_cms_setup_credits(uint64_t base, int destid, int srcid, int credit)
uint32_t val;
val = ((credit << 24) | (destid << 12) | (srcid << 0));
nlm_wreg_cms(base, XLP_CMS_OUTPUTQ_CREDIT_CFG_REG, val);
nlm_write_cms_reg(base, CMS_OUTPUTQ_CREDIT_CFG, val);
}
@ -182,93 +161,93 @@ int nlm_cms_config_onchip_queue (uint64_t base, uint64_t spill_base,
#if 0
/* configure credits for src cpu0, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CPU0_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CPU0_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src cpu1, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CPU1_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CPU1_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src cpu2, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CPU2_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CPU2_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src cpu3, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CPU3_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CPU3_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src cpu4, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CPU4_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CPU4_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src cpu5, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CPU5_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CPU5_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src cpu6, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CPU6_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CPU6_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src cpu7, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CPU7_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CPU7_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src pcie0, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_PCIE0_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_PCIE0_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src pcie1, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_PCIE1_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_PCIE1_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src pcie2, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_PCIE2_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_PCIE2_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src pcie3, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_PCIE3_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_PCIE3_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src dte, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_DTE_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_DTE_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src rsa_ecc, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_RSA_ECC_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_RSA_ECC_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src crypto, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CRYPTO_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CRYPTO_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src cmp, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_CMP_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_CMP_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src poe, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_POE_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_POE_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
/* configure credits for src nae, on this queue */
nlm_cms_setup_credits(base, qid, XLP_CMS_NAE_SRC_STID,
XLP_CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_setup_credits(base, qid, CMS_NAE_SRC_STID,
CMS_DEFAULT_CREDIT(nlm_cms_total_stations,
nlm_cms_spill_total_messages));
#endif
@ -289,20 +268,20 @@ int nlm_cms_alloc_spill_q(uint64_t base, int qid, uint64_t spill_base,
uint64_t queue_config;
uint32_t spill_start;
if(nsegs > XLP_CMS_MAX_SPILL_SEGMENTS_PER_QUEUE) {
if(nsegs > CMS_MAX_SPILL_SEGMENTS_PER_QUEUE) {
return 1;
}
queue_config = nlm_rdreg_cms(base,(XLP_CMS_OUTPUTQ_CONFIG_REG(qid)));
queue_config = nlm_read_cms_reg(base,(CMS_OUTPUTQ_CONFIG(qid)));
spill_start = ((spill_base >> 12) & 0x3F);
/* Spill configuration */
queue_config = (((uint64_t)XLP_CMS_SPILL_ENA << 62) |
queue_config = (((uint64_t)CMS_SPILL_ENA << 62) |
(((spill_base >> 18) & 0x3FFFFF) << 27) |
(spill_start + nsegs - 1) << 21 |
(spill_start << 15));
nlm_wreg_cms(base,(XLP_CMS_OUTPUTQ_CONFIG_REG(qid)),queue_config);
nlm_write_cms_reg(base,(CMS_OUTPUTQ_CONFIG(qid)),queue_config);
return 0;
}
@ -320,8 +299,8 @@ int nlm_cms_alloc_onchip_q(uint64_t base, int qid, int nsegs)
int onchipbase, start, last;
uint8_t i;
if( ((curr_end + nsegs) > XLP_CMS_MAX_ONCHIP_SEGMENTS) ||
(nsegs > XLP_CMS_ON_CHIP_PER_QUEUE_SPACE) ) {
if( ((curr_end + nsegs) > CMS_MAX_ONCHIP_SEGMENTS) ||
(nsegs > CMS_ON_CHIP_PER_QUEUE_SPACE) ) {
/* Invalid configuration */
return 1;
}
@ -347,15 +326,15 @@ int nlm_cms_alloc_onchip_q(uint64_t base, int qid, int nsegs)
cms_onchip_seg_availability[onchipbase] |= (1 << i);
}
queue_config = nlm_rdreg_cms(base,(XLP_CMS_OUTPUTQ_CONFIG_REG(qid)));
queue_config = nlm_read_cms_reg(base,(CMS_OUTPUTQ_CONFIG(qid)));
/* On chip configuration */
queue_config = (((uint64_t)XLP_CMS_QUEUE_ENA << 63) |
queue_config = (((uint64_t)CMS_QUEUE_ENA << 63) |
((onchipbase & 0x1f) << 10) |
((last & 0x1f) << 5) |
(start & 0x1f));
nlm_wreg_cms(base,(XLP_CMS_OUTPUTQ_CONFIG_REG(qid)),queue_config);
nlm_write_cms_reg(base,(CMS_OUTPUTQ_CONFIG(qid)),queue_config);
return 0;
}
@ -367,85 +346,86 @@ void nlm_cms_default_setup(int node, uint64_t spill_base, int spill_en,
int queue;
uint64_t base;
base = nlm_regbase_cms(node);
base = nlm_get_cms_regbase(node);
for(j=0; j<1024; j++) {
printf("Qid:0x%04d Val:0x%016jx\n",j, (uintmax_t)nlm_cms_get_onchip_queue (base, j));
printf("Qid:0x%04d Val:0x%016jx\n",j,
(uintmax_t)nlm_cms_get_onchip_queue (base, j));
}
/* Enable all cpu push queues */
for (j=0; j<XLP_MAX_CORES; j++)
for (k=0; k<XLP_MAX_THREADS; k++)
for (vc=0; vc<XLP_CMS_MAX_VCPU_VC; vc++) {
for (vc=0; vc<CMS_MAX_VCPU_VC; vc++) {
/* TODO : remove this once SMP works */
if( (j == 0) && (k == 0) )
continue;
queue = XLP_CMS_CPU_PUSHQ(node, j, k, vc);
queue = CMS_CPU_PUSHQ(node, j, k, vc);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable pcie 0 push queue */
for (j=XLP_CMS_PCIE0_QID(0); j<XLP_CMS_PCIE0_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE0_QID(0); j<CMS_PCIE0_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable pcie 1 push queue */
for (j=XLP_CMS_PCIE1_QID(0); j<XLP_CMS_PCIE1_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE1_QID(0); j<CMS_PCIE1_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable pcie 2 push queue */
for (j=XLP_CMS_PCIE2_QID(0); j<XLP_CMS_PCIE2_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE2_QID(0); j<CMS_PCIE2_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable pcie 3 push queue */
for (j=XLP_CMS_PCIE3_QID(0); j<XLP_CMS_PCIE3_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE3_QID(0); j<CMS_PCIE3_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable DTE push queue */
for (j=XLP_CMS_DTE_QID(0); j<XLP_CMS_DTE_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_DTE_QID(0); j<CMS_DTE_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable RSA/ECC push queue */
for (j=XLP_CMS_RSA_ECC_QID(0); j<XLP_CMS_RSA_ECC_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_RSA_ECC_QID(0); j<CMS_RSA_ECC_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable crypto push queue */
for (j=XLP_CMS_CRYPTO_QID(0); j<XLP_CMS_CRYPTO_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_CRYPTO_QID(0); j<CMS_CRYPTO_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable CMP push queue */
for (j=XLP_CMS_CMP_QID(0); j<XLP_CMS_CMP_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_CMP_QID(0); j<CMS_CMP_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable POE push queue */
for (j=XLP_CMS_POE_QID(0); j<XLP_CMS_POE_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_POE_QID(0); j<CMS_POE_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable NAE push queue */
for (j=XLP_CMS_NAE_QID(0); j<XLP_CMS_NAE_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_NAE_QID(0); j<CMS_NAE_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue, spill_en);
}
/* Enable all pop queues */
if (popq_en) {
for (j=XLP_CMS_POPQ_QID(0); j<XLP_CMS_POPQ_MAXQID; j++) {
queue = XLP_CMS_POPQ(node, j);
for (j=CMS_POPQ_QID(0); j<CMS_POPQ_MAXQID; j++) {
queue = CMS_POPQ(node, j);
nlm_cms_config_onchip_queue(base, spill_base, queue,
spill_en);
}
@ -454,16 +434,16 @@ void nlm_cms_default_setup(int node, uint64_t spill_base, int spill_en,
uint64_t nlm_cms_get_onchip_queue (uint64_t base, int qid)
{
return nlm_rdreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid));
return nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
}
void nlm_cms_set_onchip_queue (uint64_t base, int qid, uint64_t val)
{
uint64_t rdval;
rdval = nlm_rdreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid));
rdval = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
rdval |= val;
nlm_wreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid), rdval);
nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), rdval);
}
void nlm_cms_per_queue_level_intr(uint64_t base, int qid, int sub_type,
@ -471,12 +451,12 @@ void nlm_cms_per_queue_level_intr(uint64_t base, int qid, int sub_type,
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid));
val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
val |= (((uint64_t)sub_type<<54) |
((uint64_t)intr_val<<56));
nlm_wreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid), val);
nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), val);
}
void nlm_cms_level_intr(int node, int sub_type, int intr_val)
@ -485,78 +465,78 @@ void nlm_cms_level_intr(int node, int sub_type, int intr_val)
int queue;
uint64_t base;
base = nlm_regbase_cms(node);
base = nlm_get_cms_regbase(node);
/* setup level intr config on all cpu push queues */
for (j=0; j<XLP_MAX_CORES; j++)
for (k=0; k<XLP_MAX_THREADS; k++)
for (vc=0; vc<XLP_CMS_MAX_VCPU_VC; vc++) {
queue = XLP_CMS_CPU_PUSHQ(node, j, k, vc);
for (vc=0; vc<CMS_MAX_VCPU_VC; vc++) {
queue = CMS_CPU_PUSHQ(node, j, k, vc);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all pcie 0 push queue */
for (j=XLP_CMS_PCIE0_QID(0); j<XLP_CMS_PCIE0_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE0_QID(0); j<CMS_PCIE0_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all pcie 1 push queue */
for (j=XLP_CMS_PCIE1_QID(0); j<XLP_CMS_PCIE1_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE1_QID(0); j<CMS_PCIE1_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all pcie 2 push queue */
for (j=XLP_CMS_PCIE2_QID(0); j<XLP_CMS_PCIE2_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE2_QID(0); j<CMS_PCIE2_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all pcie 3 push queue */
for (j=XLP_CMS_PCIE3_QID(0); j<XLP_CMS_PCIE3_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE3_QID(0); j<CMS_PCIE3_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all DTE push queue */
for (j=XLP_CMS_DTE_QID(0); j<XLP_CMS_DTE_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_DTE_QID(0); j<CMS_DTE_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all RSA/ECC push queue */
for (j=XLP_CMS_RSA_ECC_QID(0); j<XLP_CMS_RSA_ECC_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_RSA_ECC_QID(0); j<CMS_RSA_ECC_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all crypto push queue */
for (j=XLP_CMS_CRYPTO_QID(0); j<XLP_CMS_CRYPTO_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_CRYPTO_QID(0); j<CMS_CRYPTO_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all CMP push queue */
for (j=XLP_CMS_CMP_QID(0); j<XLP_CMS_CMP_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_CMP_QID(0); j<CMS_CMP_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all POE push queue */
for (j=XLP_CMS_POE_QID(0); j<XLP_CMS_POE_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_POE_QID(0); j<CMS_POE_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all NAE push queue */
for (j=XLP_CMS_NAE_QID(0); j<XLP_CMS_NAE_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_NAE_QID(0); j<CMS_NAE_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
/* setup level intr config on all pop queues */
for (j=XLP_CMS_POPQ_QID(0); j<XLP_CMS_POPQ_MAXQID; j++) {
queue = XLP_CMS_POPQ(node, j);
for (j=CMS_POPQ_QID(0); j<CMS_POPQ_MAXQID; j++) {
queue = CMS_POPQ(node, j);
nlm_cms_per_queue_level_intr(base, queue, sub_type, intr_val);
}
}
@ -566,12 +546,12 @@ void nlm_cms_per_queue_timer_intr(uint64_t base, int qid, int sub_type,
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid));
val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
val |= (((uint64_t)sub_type<<49) |
((uint64_t)intr_val<<51));
nlm_wreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid), val);
nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), val);
}
void nlm_cms_timer_intr(int node, int en, int sub_type, int intr_val)
@ -580,78 +560,78 @@ void nlm_cms_timer_intr(int node, int en, int sub_type, int intr_val)
int queue;
uint64_t base;
base = nlm_regbase_cms(node);
base = nlm_get_cms_regbase(node);
/* setup timer intr config on all cpu push queues */
for (j=0; j<XLP_MAX_CORES; j++)
for (k=0; k<XLP_MAX_THREADS; k++)
for (vc=0; vc<XLP_CMS_MAX_VCPU_VC; vc++) {
queue = XLP_CMS_CPU_PUSHQ(node, j, k, vc);
for (vc=0; vc<CMS_MAX_VCPU_VC; vc++) {
queue = CMS_CPU_PUSHQ(node, j, k, vc);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all pcie 0 push queue */
for (j=XLP_CMS_PCIE0_QID(0); j<XLP_CMS_PCIE0_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE0_QID(0); j<CMS_PCIE0_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all pcie 1 push queue */
for (j=XLP_CMS_PCIE1_QID(0); j<XLP_CMS_PCIE1_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE1_QID(0); j<CMS_PCIE1_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all pcie 2 push queue */
for (j=XLP_CMS_PCIE2_QID(0); j<XLP_CMS_PCIE2_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE2_QID(0); j<CMS_PCIE2_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all pcie 3 push queue */
for (j=XLP_CMS_PCIE3_QID(0); j<XLP_CMS_PCIE3_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_PCIE3_QID(0); j<CMS_PCIE3_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all DTE push queue */
for (j=XLP_CMS_DTE_QID(0); j<XLP_CMS_DTE_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_DTE_QID(0); j<CMS_DTE_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all RSA/ECC push queue */
for (j=XLP_CMS_RSA_ECC_QID(0); j<XLP_CMS_RSA_ECC_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_RSA_ECC_QID(0); j<CMS_RSA_ECC_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all crypto push queue */
for (j=XLP_CMS_CRYPTO_QID(0); j<XLP_CMS_CRYPTO_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_CRYPTO_QID(0); j<CMS_CRYPTO_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all CMP push queue */
for (j=XLP_CMS_CMP_QID(0); j<XLP_CMS_CMP_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_CMP_QID(0); j<CMS_CMP_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all POE push queue */
for (j=XLP_CMS_POE_QID(0); j<XLP_CMS_POE_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_POE_QID(0); j<CMS_POE_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all NAE push queue */
for (j=XLP_CMS_NAE_QID(0); j<XLP_CMS_NAE_MAXQID; j++) {
queue = XLP_CMS_IO_PUSHQ(node, j);
for (j=CMS_NAE_QID(0); j<CMS_NAE_MAXQID; j++) {
queue = CMS_IO_PUSHQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
/* setup timer intr config on all pop queues */
for (j=XLP_CMS_POPQ_QID(0); j<XLP_CMS_POPQ_MAXQID; j++) {
queue = XLP_CMS_POPQ(node, j);
for (j=CMS_POPQ_QID(0); j<CMS_POPQ_MAXQID; j++) {
queue = CMS_POPQ(node, j);
nlm_cms_per_queue_timer_intr(base, queue, sub_type, intr_val);
}
}
@ -660,7 +640,7 @@ void nlm_cms_timer_intr(int node, int en, int sub_type, int intr_val)
int nlm_cms_outputq_intr_check(uint64_t base, int qid)
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid));
val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
return ((val >> 59) & 0x1);
}
@ -668,77 +648,77 @@ int nlm_cms_outputq_intr_check(uint64_t base, int qid)
void nlm_cms_outputq_clr_intr(uint64_t base, int qid)
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid));
val = nlm_read_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid));
val |= (1ULL<<59);
nlm_wreg_cms(base, XLP_CMS_OUTPUTQ_CONFIG_REG(qid), val);
nlm_write_cms_reg(base, CMS_OUTPUTQ_CONFIG(qid), val);
}
void nlm_cms_illegal_dst_error_intr(uint64_t base, int en)
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_MSG_CONFIG_REG);
val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
val |= (en<<8);
nlm_wreg_cms(base, XLP_CMS_MSG_CONFIG_REG, val);
nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
}
void nlm_cms_timeout_error_intr(uint64_t base, int en)
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_MSG_CONFIG_REG);
val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
val |= (en<<7);
nlm_wreg_cms(base, XLP_CMS_MSG_CONFIG_REG, val);
nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
}
void nlm_cms_biu_error_resp_intr(uint64_t base, int en)
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_MSG_CONFIG_REG);
val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
val |= (en<<6);
nlm_wreg_cms(base, XLP_CMS_MSG_CONFIG_REG, val);
nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
}
void nlm_cms_spill_uncorrectable_ecc_error_intr(uint64_t base, int en)
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_MSG_CONFIG_REG);
val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
val |= (en<<5) | (en<<3);
nlm_wreg_cms(base, XLP_CMS_MSG_CONFIG_REG, val);
nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
}
void nlm_cms_spill_correctable_ecc_error_intr(uint64_t base, int en)
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_MSG_CONFIG_REG);
val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
val |= (en<<4) | (en<<2);
nlm_wreg_cms(base, XLP_CMS_MSG_CONFIG_REG, val);
nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
}
void nlm_cms_outputq_uncorrectable_ecc_error_intr(uint64_t base, int en)
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_MSG_CONFIG_REG);
val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
val |= (en<<1);
nlm_wreg_cms(base, XLP_CMS_MSG_CONFIG_REG, val);
nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
}
void nlm_cms_outputq_correctable_ecc_error_intr(uint64_t base, int en)
{
uint64_t val;
val = nlm_rdreg_cms(base, XLP_CMS_MSG_CONFIG_REG);
val = nlm_read_cms_reg(base, CMS_MSG_CONFIG);
val |= (en<<0);
nlm_wreg_cms(base, XLP_CMS_MSG_CONFIG_REG, val);
nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
}
uint64_t nlm_cms_network_error_status(uint64_t base)
{
return nlm_rdreg_cms(base, XLP_CMS_MSG_ERR_REG);
return nlm_read_cms_reg(base, CMS_MSG_ERR);
}
int nlm_cms_get_net_error_code(uint64_t err)
@ -770,20 +750,20 @@ void nlm_cms_trace_setup(uint64_t base, int en, uint64_t trace_base,
{
uint64_t val;
nlm_wreg_cms(base, XLP_CMS_TRACE_BASE_ADDR_REG, trace_base);
nlm_wreg_cms(base, XLP_CMS_TRACE_LIMIT_ADDR_REG, trace_limit);
nlm_write_cms_reg(base, CMS_TRACE_BASE_ADDR, trace_base);
nlm_write_cms_reg(base, CMS_TRACE_LIMIT_ADDR, trace_limit);
val = nlm_rdreg_cms(base, XLP_CMS_TRACE_CONFIG_REG);
val = nlm_read_cms_reg(base, CMS_TRACE_CONFIG);
val |= (((uint64_t)match_dstid_en << 39) |
((dst_id & 0xfff) << 24) |
(match_srcid_en << 23) |
((src_id & 0xfff) << 8) |
(wrap << 1) |
(en << 0));
nlm_wreg_cms(base, XLP_CMS_MSG_CONFIG_REG, val);
nlm_write_cms_reg(base, CMS_MSG_CONFIG, val);
}
void nlm_cms_endian_byte_swap (uint64_t base, int en)
{
nlm_wreg_cms(base, XLP_CMS_MSG_ENDIAN_SWAP_REG, en);
nlm_write_cms_reg(base, CMS_MSG_ENDIAN_SWAP, en);
}

View File

@ -25,8 +25,9 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __NLM_FMNV2_H__
#define __NLM_FMNV2_H__
@ -38,156 +39,156 @@
*/
/* FMN configuration registers */
#define XLP_CMS_OUTPUTQ_CONFIG_REG(i) ((i)*2)
#define XLP_CMS_MAX_OUTPUTQ 1024
#define XLP_CMS_OUTPUTQ_CREDIT_CFG_REG (0x2000/4)
#define XLP_CMS_MSG_CONFIG_REG (0x2008/4)
#define XLP_CMS_MSG_ERR_REG (0x2010/4)
#define XLP_CMS_TRACE_CONFIG_REG (0x2018/4)
#define XLP_CMS_TRACE_BASE_ADDR_REG (0x2020/4)
#define XLP_CMS_TRACE_LIMIT_ADDR_REG (0x2028/4)
#define XLP_CMS_TRACE_CURRENT_ADDR_REG (0x2030/4)
#define XLP_CMS_MSG_ENDIAN_SWAP_REG (0x2038/4)
#define CMS_OUTPUTQ_CONFIG(i) ((i)*2)
#define CMS_MAX_OUTPUTQ 1024
#define CMS_OUTPUTQ_CREDIT_CFG (0x2000/4)
#define CMS_MSG_CONFIG (0x2008/4)
#define CMS_MSG_ERR (0x2010/4)
#define CMS_TRACE_CONFIG (0x2018/4)
#define CMS_TRACE_BASE_ADDR (0x2020/4)
#define CMS_TRACE_LIMIT_ADDR (0x2028/4)
#define CMS_TRACE_CURRENT_ADDR (0x2030/4)
#define CMS_MSG_ENDIAN_SWAP (0x2038/4)
#define XLP_CMS_CPU_PUSHQ(node, core, thread, vc) \
#define CMS_CPU_PUSHQ(node, core, thread, vc) \
(((node)<<10) | ((core)<<4) | ((thread)<<2) | ((vc)<<0))
#define XLP_CMS_POPQ(node, queue) (((node)<<10) | (queue))
#define XLP_CMS_IO_PUSHQ(node, queue) (((node)<<10) | (queue))
#define CMS_POPQ(node, queue) (((node)<<10) | (queue))
#define CMS_IO_PUSHQ(node, queue) (((node)<<10) | (queue))
#define XLP_CMS_POPQ_QID(i) (128+(i))
#define XLP_CMS_POPQ_MAXQID 255
#define XLP_CMS_PCIE0_QID(i) (256+(i))
#define XLP_CMS_PCIE0_MAXQID 257
#define XLP_CMS_PCIE1_QID(i) (258+(i))
#define XLP_CMS_PCIE1_MAXQID 259
#define XLP_CMS_PCIE2_QID(i) (260+(i))
#define XLP_CMS_PCIE2_MAXQID 261
#define XLP_CMS_PCIE3_QID(i) (262+(i))
#define XLP_CMS_PCIE3_MAXQID 263
#define XLP_CMS_DTE_QID(i) (264+(i))
#define XLP_CMS_DTE_MAXQID 267
#define XLP_CMS_RSA_ECC_QID(i) (272+(i))
#define XLP_CMS_RSA_ECC_MAXQID 280
#define XLP_CMS_CRYPTO_QID(i) (281+(i))
#define XLP_CMS_CRYPTO_MAXQID 296
#define CMS_POPQ_QID(i) (128+(i))
#define CMS_POPQ_MAXQID 255
#define CMS_PCIE0_QID(i) (256+(i))
#define CMS_PCIE0_MAXQID 257
#define CMS_PCIE1_QID(i) (258+(i))
#define CMS_PCIE1_MAXQID 259
#define CMS_PCIE2_QID(i) (260+(i))
#define CMS_PCIE2_MAXQID 261
#define CMS_PCIE3_QID(i) (262+(i))
#define CMS_PCIE3_MAXQID 263
#define CMS_DTE_QID(i) (264+(i))
#define CMS_DTE_MAXQID 267
#define CMS_RSA_ECC_QID(i) (272+(i))
#define CMS_RSA_ECC_MAXQID 280
#define CMS_CRYPTO_QID(i) (281+(i))
#define CMS_CRYPTO_MAXQID 296
/* TODO PCI header register 0x3C says CMP starts at 297(0x129) VERIFY */
#define XLP_CMS_CMP_QID(i) (298+(i))
#define XLP_CMS_CMP_MAXQID 305
#define XLP_CMS_POE_QID(i) (384+(i))
#define XLP_CMS_POE_MAXQID 391
#define XLP_CMS_NAE_QID(i) (476+(i))
#define XLP_CMS_NAE_MAXQID 1023
#define CMS_CMP_QID(i) (298+(i))
#define CMS_CMP_MAXQID 305
#define CMS_POE_QID(i) (384+(i))
#define CMS_POE_MAXQID 391
#define CMS_NAE_QID(i) (476+(i))
#define CMS_NAE_MAXQID 1023
#define XLP_CMS_NAE_TX_VC_BASE 476
#define XLP_CMS_NAE_TX_VC_LIMIT 999
#define XLP_CMS_NAE_RX_VC_BASE 1000
#define XLP_CMS_NAE_RX_VC_LIMIT 1019
#define CMS_NAE_TX_VC_BASE 476
#define CMS_NAE_TX_VC_LIMIT 999
#define CMS_NAE_RX_VC_BASE 1000
#define CMS_NAE_RX_VC_LIMIT 1019
#define XLP_MAX_CMS_QUEUES 1024
#define MAX_CMS_QUEUES 1024
/* FMN Level Interrupt Type */
#define XLP_CMS_LVL_INTR_DISABLE 0
#define XLP_CMS_LVL_LOW_WATERMARK 1
#define XLP_CMS_LVL_HI_WATERMARK 2
#define CMS_LVL_INTR_DISABLE 0
#define CMS_LVL_LOW_WATERMARK 1
#define CMS_LVL_HI_WATERMARK 2
/* FMN Level interrupt trigger values */
#define XLP_CMS_QUEUE_NON_EMPTY 0
#define XLP_CMS_QUEUE_QUARTER_FULL 1
#define XLP_CMS_QUEUE_HALF_FULL 2
#define XLP_CMS_QUEUE_THREE_QUARTER_FULL 3
#define XLP_CMS_QUEUE_FULL 4
#define CMS_QUEUE_NON_EMPTY 0
#define CMS_QUEUE_QUARTER_FULL 1
#define CMS_QUEUE_HALF_FULL 2
#define CMS_QUEUE_THREE_QUARTER_FULL 3
#define CMS_QUEUE_FULL 4
/* FMN Timer Interrupt Type */
#define XLP_CMS_TIMER_INTR_DISABLE 0
#define XLP_CMS_TIMER_CONSUMER 1
#define XLP_CMS_TIMER_PRODUCER 1
#define CMS_TIMER_INTR_DISABLE 0
#define CMS_TIMER_CONSUMER 1
#define CMS_TIMER_PRODUCER 1
/* FMN timer interrupt trigger values */
#define XLP_CMS_TWO_POW_EIGHT_CYCLES 0
#define XLP_CMS_TWO_POW_TEN_CYCLES 1
#define XLP_CMS_TWO_POW_TWELVE_CYCLES 2
#define XLP_CMS_TWO_POW_FOURTEEN_CYCLES 3
#define XLP_CMS_TWO_POW_SIXTEEN_CYCLES 4
#define XLP_CMS_TWO_POW_EIGHTTEEN_CYCLES 5
#define XLP_CMS_TWO_POW_TWENTY_CYCLES 6
#define XLP_CMS_TWO_POW_TWENTYTWO_CYCLES 7
#define CMS_TWO_POW_EIGHT_CYCLES 0
#define CMS_TWO_POW_TEN_CYCLES 1
#define CMS_TWO_POW_TWELVE_CYCLES 2
#define CMS_TWO_POW_FOURTEEN_CYCLES 3
#define CMS_TWO_POW_SIXTEEN_CYCLES 4
#define CMS_TWO_POW_EIGHTTEEN_CYCLES 5
#define CMS_TWO_POW_TWENTY_CYCLES 6
#define CMS_TWO_POW_TWENTYTWO_CYCLES 7
#define XLP_CMS_QUEUE_ENA 1ULL
#define XLP_CMS_QUEUE_DIS 0
#define XLP_CMS_SPILL_ENA 1ULL
#define XLP_CMS_SPILL_DIS 0
#define CMS_QUEUE_ENA 1ULL
#define CMS_QUEUE_DIS 0
#define CMS_SPILL_ENA 1ULL
#define CMS_SPILL_DIS 0
#define XLP_CMS_MAX_VCPU_VC 4
#define CMS_MAX_VCPU_VC 4
/* Each XLP chip can hold upto 32K messages on the chip itself */
#define XLP_CMS_ON_CHIP_MESG_SPACE (32*1024)
#define XLP_CMS_ON_CHIP_PER_QUEUE_SPACE \
((XLP_CMS_ON_CHIP_MESG_SPACE)/(XLP_MAX_CMS_QUEUES))
#define XLP_CMS_MAX_ONCHIP_SEGMENTS 1024
#define XLP_CMS_MAX_SPILL_SEGMENTS_PER_QUEUE 64
#define CMS_ON_CHIP_MESG_SPACE (32*1024)
#define CMS_ON_CHIP_PER_QUEUE_SPACE \
((CMS_ON_CHIP_MESG_SPACE)/(MAX_CMS_QUEUES))
#define CMS_MAX_ONCHIP_SEGMENTS 1024
#define CMS_MAX_SPILL_SEGMENTS_PER_QUEUE 64
/* FMN Network error */
#define XLP_CMS_ILLEGAL_DST_ERROR 0x100
#define XLP_CMS_BIU_TIMEOUT_ERROR 0x080
#define XLP_CMS_BIU_ERROR 0x040
#define XLP_CMS_SPILL_FILL_UNCORRECT_ECC_ERROR 0x020
#define XLP_CMS_SPILL_FILL_CORRECT_ECC_ERROR 0x010
#define XLP_CMS_SPILL_UNCORRECT_ECC_ERROR 0x008
#define XLP_CMS_SPILL_CORRECT_ECC_ERROR 0x004
#define XLP_CMS_OUTPUTQ_UNCORRECT_ECC_ERROR 0x002
#define XLP_CMS_OUTPUTQ_CORRECT_ECC_ERROR 0x001
#define CMS_ILLEGAL_DST_ERROR 0x100
#define CMS_BIU_TIMEOUT_ERROR 0x080
#define CMS_BIU_ERROR 0x040
#define CMS_SPILL_FILL_UNCORRECT_ECC_ERROR 0x020
#define CMS_SPILL_FILL_CORRECT_ECC_ERROR 0x010
#define CMS_SPILL_UNCORRECT_ECC_ERROR 0x008
#define CMS_SPILL_CORRECT_ECC_ERROR 0x004
#define CMS_OUTPUTQ_UNCORRECT_ECC_ERROR 0x002
#define CMS_OUTPUTQ_CORRECT_ECC_ERROR 0x001
/* worst case, a single entry message consists of a 4 byte header
* and an 8-byte entry = 12 bytes in total
*/
#define XLP_CMS_SINGLE_ENTRY_MSG_SIZE 12
#define CMS_SINGLE_ENTRY_MSG_SIZE 12
/* total spill memory needed for one FMN queue */
#define XLP_CMS_PER_QUEUE_SPILL_MEM(spilltotmsgs) \
((spilltotmsgs) * (XLP_CMS_SINGLE_ENTRY_MSG_SIZE))
#define CMS_PER_QUEUE_SPILL_MEM(spilltotmsgs) \
((spilltotmsgs) * (CMS_SINGLE_ENTRY_MSG_SIZE))
/* total spill memory needed */
#define XLP_CMS_TOTAL_SPILL_MEM(spilltotmsgs) \
((XLP_CMS_PER_QUEUE_SPILL_MEM(spilltotmsgs)) * \
(XLP_MAX_CMS_QUEUES))
#define CMS_TOTAL_SPILL_MEM(spilltotmsgs) \
((CMS_PER_QUEUE_SPILL_MEM(spilltotmsgs)) * \
(MAX_CMS_QUEUES))
/* total number of FMN messages possible in a queue */
#define XLP_CMS_TOTAL_QUEUE_SIZE(spilltotmsgs) \
((spilltotmsgs) + (XLP_CMS_ON_CHIP_PER_QUEUE_SPACE))
#define CMS_TOTAL_QUEUE_SIZE(spilltotmsgs) \
((spilltotmsgs) + (CMS_ON_CHIP_PER_QUEUE_SPACE))
/* FMN Src station id's */
#define XLP_CMS_CPU0_SRC_STID (0 << 4)
#define XLP_CMS_CPU1_SRC_STID (1 << 4)
#define XLP_CMS_CPU2_SRC_STID (2 << 4)
#define XLP_CMS_CPU3_SRC_STID (3 << 4)
#define XLP_CMS_CPU4_SRC_STID (4 << 4)
#define XLP_CMS_CPU5_SRC_STID (5 << 4)
#define XLP_CMS_CPU6_SRC_STID (6 << 4)
#define XLP_CMS_CPU7_SRC_STID (7 << 4)
#define XLP_CMS_PCIE0_SRC_STID 256
#define XLP_CMS_PCIE1_SRC_STID 258
#define XLP_CMS_PCIE2_SRC_STID 260
#define XLP_CMS_PCIE3_SRC_STID 262
#define XLP_CMS_DTE_SRC_STID 264
#define XLP_CMS_RSA_ECC_SRC_STID 272
#define XLP_CMS_CRYPTO_SRC_STID 281
#define XLP_CMS_CMP_SRC_STID 298
#define XLP_CMS_POE_SRC_STID 384
#define XLP_CMS_NAE_SRC_STID 476
#define CMS_CPU0_SRC_STID (0 << 4)
#define CMS_CPU1_SRC_STID (1 << 4)
#define CMS_CPU2_SRC_STID (2 << 4)
#define CMS_CPU3_SRC_STID (3 << 4)
#define CMS_CPU4_SRC_STID (4 << 4)
#define CMS_CPU5_SRC_STID (5 << 4)
#define CMS_CPU6_SRC_STID (6 << 4)
#define CMS_CPU7_SRC_STID (7 << 4)
#define CMS_PCIE0_SRC_STID 256
#define CMS_PCIE1_SRC_STID 258
#define CMS_PCIE2_SRC_STID 260
#define CMS_PCIE3_SRC_STID 262
#define CMS_DTE_SRC_STID 264
#define CMS_RSA_ECC_SRC_STID 272
#define CMS_CRYPTO_SRC_STID 281
#define CMS_CMP_SRC_STID 298
#define CMS_POE_SRC_STID 384
#define CMS_NAE_SRC_STID 476
#if 0
#define XLP_CMS_DEFAULT_CREDIT(cmstotstns,spilltotmsgs) \
((XLP_CMS_TOTAL_QUEUE_SIZE(spilltotmsgs)) / \
#define CMS_DEFAULT_CREDIT(cmstotstns,spilltotmsgs) \
((CMS_TOTAL_QUEUE_SIZE(spilltotmsgs)) / \
(cmstotstns))
#endif
#define XLP_CMS_DEFAULT_CREDIT(cmstotstns,spilltotmsgs) 8
#define CMS_DEFAULT_CREDIT(cmstotstns,spilltotmsgs) 8
/* POPQ related defines */
#define XLP_CMS_POPQID_START 128
#define XLP_CMS_POPQID_END 255
#define CMS_POPQID_START 128
#define CMS_POPQID_END 255
#define XLP_CMS_INT_RCVD 0x800000000000000ULL
#define CMS_INT_RCVD 0x800000000000000ULL
#define nlm_rdreg_cms(b, r) nlm_read_reg64_xkseg(b,r)
#define nlm_wreg_cms(b, r, v) nlm_write_reg64_xkseg(b,r,v)
#define nlm_pcibase_cms(node) nlm_pcicfg_base(XLP_IO_CMS_OFFSET(node))
#define nlm_regbase_cms(node) nlm_pcibar0_base_xkphys(nlm_pcibase_cms(node))
#define nlm_read_cms_reg(b, r) nlm_read_reg64_xkphys(b,r)
#define nlm_write_cms_reg(b, r, v) nlm_write_reg64_xkphys(b,r,v)
#define nlm_get_cms_pcibase(node) nlm_pcicfg_base(XLP_IO_CMS_OFFSET(node))
#define nlm_get_cms_regbase(node) nlm_xkphys_map_pcibar0(nlm_get_cms_pcibase(node))
enum fmn_swcode {
FMN_SWCODE_CPU0=1,
@ -237,7 +238,7 @@ enum fmn_swcode {
extern uint64_t nlm_cms_spill_total_messages;
extern uint32_t nlm_cms_total_stations;
extern uint32_t cms_onchip_seg_availability[XLP_CMS_ON_CHIP_PER_QUEUE_SPACE];
extern uint32_t cms_onchip_seg_availability[CMS_ON_CHIP_PER_QUEUE_SPACE];
extern uint64_t cms_base_addr(int node);
extern int nlm_cms_verify_credit_config (int spill_en, int tot_credit);

437
sys/mips/nlm/hal/haldefs.h Normal file
View File

@ -0,0 +1,437 @@
/*-
* Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
*/
#ifndef __NLM_HAL_MMIO_H__
#define __NLM_HAL_MMIO_H__
/*
* This file contains platform specific memory mapped IO implementation
* and will provide a way to read 32/64 bit memory mapped registers in
* all ABIs
*/
/*
* For o32 compilation, we have to disable interrupts and enable KX bit to
* access 64 bit addresses or data.
*
* We need to disable interrupts because we save just the lower 32 bits of
* registers in interrupt handling. So if we get hit by an interrupt while
* using the upper 32 bits of a register, we lose.
*/
static inline uint32_t nlm_save_flags_kx(void)
{
uint32_t sr = mips_rd_status();
mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_KX);
return (sr);
}
static inline uint32_t nlm_save_flags_cop2(void)
{
uint32_t sr = mips_rd_status();
mips_wr_status((sr & ~MIPS_SR_INT_IE) | MIPS_SR_COP_2_BIT);
return (sr);
}
static inline void nlm_restore_flags(uint32_t sr)
{
mips_wr_status(sr);
}
static inline uint32_t
nlm_load_word(uint64_t addr)
{
volatile uint32_t *p = (volatile uint32_t *)(long)addr;
return *p;
}
static inline void
nlm_store_word(uint64_t addr, uint32_t val)
{
volatile uint32_t *p = (volatile uint32_t *)(long)addr;
*p = val;
}
#if defined(__mips_n64) || defined(__mips_n32)
static inline uint64_t
nlm_load_dword(volatile uint64_t addr)
{
volatile uint64_t *p = (volatile uint64_t *)(long)addr;
return *p;
}
static inline void
nlm_store_dword(volatile uint64_t addr, uint64_t val)
{
volatile uint64_t *p = (volatile uint64_t *)(long)addr;
*p = val;
}
#else /* o32 */
static inline uint64_t
nlm_load_dword(uint64_t addr)
{
volatile uint64_t *p = (volatile uint64_t *)(long)addr;
uint32_t valhi, vallo, sr;
sr = nlm_save_flags_kx();
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"ld $8, 0(%2)\n\t"
"dsra32 %0, $8, 0\n\t"
"sll %1, $8, 0\n\t"
".set pop\n"
: "=r"(valhi), "=r"(vallo)
: "r"(p)
: "$8");
nlm_restore_flags(sr);
return ((uint64_t)valhi << 32) | vallo;
}
static inline void
nlm_store_dword(uint64_t addr, uint64_t val)
{
volatile uint64_t *p = (volatile uint64_t *)(long)addr;
uint32_t valhi, vallo, sr;
valhi = val >> 32;
vallo = val & 0xffffffff;
sr = nlm_save_flags_kx();
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"dsll32 $8, %1, 0\n\t"
"dsll32 $9, %2, 0\n\t" /* get rid of the */
"dsrl32 $9, $9, 0\n\t" /* sign extend */
"or $9, $9, $8\n\t"
"sd $9, 0(%0)\n\t"
".set pop\n"
: : "r"(p), "r"(valhi), "r"(vallo)
: "$8", "$9", "memory");
nlm_restore_flags(sr);
}
#endif
#if defined(__mips_n64)
static inline uint64_t
nlm_load_word_daddr(uint64_t addr)
{
volatile uint32_t *p = (volatile uint32_t *)(long)addr;
return *p;
}
static inline void
nlm_store_word_daddr(uint64_t addr, uint32_t val)
{
volatile uint32_t *p = (volatile uint32_t *)(long)addr;
*p = val;
}
static inline uint64_t
nlm_load_dword_daddr(uint64_t addr)
{
volatile uint64_t *p = (volatile uint64_t *)(long)addr;
return *p;
}
static inline void
nlm_store_dword_daddr(uint64_t addr, uint64_t val)
{
volatile uint64_t *p = (volatile uint64_t *)(long)addr;
*p = val;
}
#elif defined(__mips_n32)
static inline uint64_t
nlm_load_word_daddr(uint64_t addr)
{
uint32_t val;
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"lw %0, 0(%1)\n\t"
".set pop\n"
: "=r"(val)
: "r"(addr));
return val;
}
static inline void
nlm_store_word_daddr(uint64_t addr, uint32_t val)
{
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"sw %0, 0(%1)\n\t"
".set pop\n"
: : "r"(val), "r"(addr)
: "memory");
}
static inline uint64_t
nlm_load_dword_daddr(uint64_t addr)
{
uint64_t val;
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"ld %0, 0(%1)\n\t"
".set pop\n"
: "=r"(val)
: "r"(addr));
return val;
}
static inline void
nlm_store_dword_daddr(uint64_t addr, uint64_t val)
{
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"sd %0, 0(%1)\n\t"
".set pop\n"
: : "r"(val), "r"(addr)
: "memory");
}
#else /* o32 */
static inline uint64_t
nlm_load_word_daddr(uint64_t addr)
{
uint32_t val, addrhi, addrlo, sr;
addrhi = addr >> 32;
addrlo = addr & 0xffffffff;
sr = nlm_save_flags_kx();
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"dsll32 $8, %1, 0\n\t"
"dsll32 $9, %2, 0\n\t"
"dsrl32 $9, $9, 0\n\t"
"or $9, $9, $8\n\t"
"lw %0, 0($9)\n\t"
".set pop\n"
: "=r"(val)
: "r"(addrhi), "r"(addrlo)
: "$8", "$9");
nlm_restore_flags(sr);
return val;
}
static inline void
nlm_store_word_daddr(uint64_t addr, uint32_t val)
{
uint32_t addrhi, addrlo, sr;
addrhi = addr >> 32;
addrlo = addr & 0xffffffff;
sr = nlm_save_flags_kx();
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"dsll32 $8, %1, 0\n\t"
"dsll32 $9, %2, 0\n\t"
"dsrl32 $9, $9, 0\n\t"
"or $9, $9, $8\n\t"
"sw %0, 0($9)\n\t"
".set pop\n"
: : "r"(val), "r"(addrhi), "r"(addrlo)
: "$8", "$9", "memory");
nlm_restore_flags(sr);
}
static inline uint64_t
nlm_load_dword_daddr(uint64_t addr)
{
uint32_t addrh, addrl, sr;
uint32_t valh, vall;
addrh = addr >> 32;
addrl = addr & 0xffffffff;
sr = nlm_save_flags_kx();
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"dsll32 $8, %2, 0\n\t"
"dsll32 $9, %3, 0\n\t"
"dsrl32 $9, $9, 0\n\t"
"or $9, $9, $8\n\t"
"ld $8, 0($9)\n\t"
"dsra32 %0, $8, 0\n\t"
"sll %1, $8, 0\n\t"
".set pop\n"
: "=r"(valh), "=r"(vall)
: "r"(addrh), "r"(addrl)
: "$8", "$9");
nlm_restore_flags(sr);
return ((uint64_t)valh << 32) | vall;
}
static inline void
nlm_store_dword_daddr(uint64_t addr, uint64_t val)
{
uint32_t addrh, addrl, sr;
uint32_t valh, vall;
addrh = addr >> 32;
addrl = addr & 0xffffffff;
valh = val >> 32;
vall = val & 0xffffffff;
sr = nlm_save_flags_kx();
__asm__ __volatile__(
".set push\n\t"
".set mips64\n\t"
"dsll32 $8, %2, 0\n\t"
"dsll32 $9, %3, 0\n\t"
"dsrl32 $9, $9, 0\n\t"
"or $9, $9, $8\n\t"
"dsll32 $8, %0, 0\n\t"
"dsll32 $10, %1, 0\n\t"
"dsrl32 $10, $10, 0\n\t"
"or $8, $8, $10\n\t"
"sd $8, 0($9)\n\t"
".set pop\n"
: : "r"(valh), "r"(vall), "r"(addrh), "r"(addrl)
: "$8", "$9", "memory");
nlm_restore_flags(sr);
}
#endif /* __mips_n64 */
static inline uint32_t
nlm_read_reg(uint64_t base, uint32_t reg)
{
volatile uint32_t *addr = (volatile uint32_t *)(long)base + reg;
return *addr;
}
static inline void
nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val)
{
volatile uint32_t *addr = (volatile uint32_t *)(long)base + reg;
*addr = val;
}
static inline uint64_t
nlm_read_reg64(uint64_t base, uint32_t reg)
{
uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
return nlm_load_dword(addr);
}
static inline void
nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val)
{
uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
return nlm_store_dword(addr, val);
}
/*
* Routines to store 32/64 bit values to 64 bit addresses,
* used when going thru XKPHYS to access registers
*/
static inline uint32_t
nlm_read_reg_xkphys(uint64_t base, uint32_t reg)
{
uint64_t addr = base + reg * sizeof(uint32_t);
return nlm_load_word_daddr(addr);
}
static inline void
nlm_write_reg_xkphys(uint64_t base, uint32_t reg, uint32_t val)
{
uint64_t addr = base + reg * sizeof(uint32_t);
return nlm_store_word_daddr(addr, val);
}
static inline uint64_t
nlm_read_reg64_xkphys(uint64_t base, uint32_t reg)
{
uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
return nlm_load_dword_daddr(addr);
}
static inline void
nlm_write_reg64_xkphys(uint64_t base, uint32_t reg, uint64_t val)
{
uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
return nlm_store_dword_daddr(addr, val);
}
/* Location where IO base is mapped */
extern uint64_t xlp_io_base;
static inline uint64_t
nlm_pcicfg_base(uint32_t devoffset)
{
return xlp_io_base + devoffset;
}
static inline uint64_t
nlm_xkphys_map_pcibar0(uint64_t pcibase)
{
uint64_t paddr;
paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu;
return (uint64_t)0x9000000000000000 | paddr;
}
#endif

View File

@ -25,82 +25,75 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
#ifndef __NLM_IOMAP_H__
#define __NLM_IOMAP_H__
/**
* @file_name xlpiomap.h
* @author Netlogic Microsystems
* @brief Basic definitions Netlogic XLP IO BASEs
*/
/* ----------------------------------
* XLP RESET Physical Address Map
* ----------------------------------
* PCI ECFG : 0x18000000 - 0x1bffffff
* PCI CFG : 0x1c000000 - 0x1cffffff
* FLASH : 0x1fc00000 - 0x1fffffff
* ----------------------------------
*/
#ifndef __NLM_HAL_IOMAP_H__
#define __NLM_HAL_IOMAP_H__
#define XLP_DEFAULT_IO_BASE 0x18000000
#define XLP_DEFAULT_IO_BASE_KSEG1 0xb8000000
#define XLP_IO_SIZE (64 << 20) /* Size of the ECFG Space */
#define NMI_BASE 0xbfc00000
#define XLP_IO_CLK 133333333
#define XLP_PCIE_CFG_SIZE 0x1000 /* 4K */
#define XLP_PCIE_DEV_BLK_SIZE (8 * XLP_PCIE_CFG_SIZE)
#define XLP_PCIE_BUS_BLK_SIZE (256 * XLP_PCIE_DEV_BLK_SIZE)
#define XLP_IO_SIZE (64 << 20) /* ECFG space size */
#define XLP_IO_PCI_HDRSZ 0x100
#define XLP_IO_DEV(node, dev) ((dev) + (node) * 8)
#define XLP_HDR_OFFSET(node, bus, dev, fn) (((bus) << 20) | \
((XLP_IO_DEV(node, dev)) << 15) | ((fn) << 12))
#define XLP_IO_BRIDGE_OFFSET(node) XLP_HDR_OFFSET(node,0,0,0)
#define XLP_IO_BRIDGE_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 0)
/* coherent inter chip */
#define XLP_IO_CIC0_OFFSET(node) XLP_HDR_OFFSET(node,0,0,1)
#define XLP_IO_CIC1_OFFSET(node) XLP_HDR_OFFSET(node,0,0,2)
#define XLP_IO_CIC2_OFFSET(node) XLP_HDR_OFFSET(node,0,0,3)
#define XLP_IO_PIC_OFFSET(node) XLP_HDR_OFFSET(node,0,0,4)
#define XLP_IO_CIC0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 1)
#define XLP_IO_CIC1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 2)
#define XLP_IO_CIC2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 3)
#define XLP_IO_PIC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 0, 4)
#define XLP_IO_PCIE_OFFSET(node,i) XLP_HDR_OFFSET(node,0,1,i)
#define XLP_IO_PCIE0_OFFSET(node) XLP_HDR_OFFSET(node,0,1,0)
#define XLP_IO_PCIE1_OFFSET(node) XLP_HDR_OFFSET(node,0,1,1)
#define XLP_IO_PCIE2_OFFSET(node) XLP_HDR_OFFSET(node,0,1,2)
#define XLP_IO_PCIE3_OFFSET(node) XLP_HDR_OFFSET(node,0,1,3)
#define XLP_IO_PCIE_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 1, i)
#define XLP_IO_PCIE0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 0)
#define XLP_IO_PCIE1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 1)
#define XLP_IO_PCIE2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 2)
#define XLP_IO_PCIE3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 1, 3)
#define XLP_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node,0,2,i)
#define XLP_IO_USB_EHCI0_OFFSET(node) XLP_HDR_OFFSET(node,0,2,0)
#define XLP_IO_USB_OHCI0_OFFSET(node) XLP_HDR_OFFSET(node,0,2,1)
#define XLP_IO_USB_OHCI1_OFFSET(node) XLP_HDR_OFFSET(node,0,2,2)
#define XLP_IO_USB_EHCI1_OFFSET(node) XLP_HDR_OFFSET(node,0,2,3)
#define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node,0,2,4)
#define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node,0,2,5)
#define XLP_IO_USB_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 2, i)
#define XLP_IO_USB_EHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 0)
#define XLP_IO_USB_OHCI0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 1)
#define XLP_IO_USB_OHCI1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 2)
#define XLP_IO_USB_EHCI1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 3)
#define XLP_IO_USB_OHCI2_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 4)
#define XLP_IO_USB_OHCI3_OFFSET(node) XLP_HDR_OFFSET(node, 0, 2, 5)
#define XLP_IO_NAE_OFFSET(node) XLP_HDR_OFFSET(node,0,3,0)
#define XLP_IO_POE_OFFSET(node) XLP_HDR_OFFSET(node,0,3,1)
#define XLP_IO_NAE_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 0)
#define XLP_IO_POE_OFFSET(node) XLP_HDR_OFFSET(node, 0, 3, 1)
#define XLP_IO_CMS_OFFSET(node) XLP_HDR_OFFSET(node,0,4,0)
#define XLP_IO_CMS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 4, 0)
#define XLP_IO_DMA_OFFSET(node) XLP_HDR_OFFSET(node,0,5,1)
#define XLP_IO_SEC_OFFSET(node) XLP_HDR_OFFSET(node,0,5,2)
#define XLP_IO_CMP_OFFSET(node) XLP_HDR_OFFSET(node,0,5,3)
#define XLP_IO_DMA_OFFSET(node) XLP_HDR_OFFSET(node, 0, 5, 1)
#define XLP_IO_SEC_OFFSET(node) XLP_HDR_OFFSET(node, 0, 5, 2)
#define XLP_IO_CMP_OFFSET(node) XLP_HDR_OFFSET(node, 0, 5, 3)
#define XLP_IO_UART_OFFSET(node, i) XLP_HDR_OFFSET(node,0,6,i)
#define XLP_IO_UART0_OFFSET(node) XLP_HDR_OFFSET(node,0,6,0)
#define XLP_IO_UART1_OFFSET(node) XLP_HDR_OFFSET(node,0,6,1)
#define XLP_IO_I2C_OFFSET(node, i) XLP_HDR_OFFSET(node,0,6,2+i)
#define XLP_IO_I2C0_OFFSET(node) XLP_HDR_OFFSET(node,0,6,2)
#define XLP_IO_I2C1_OFFSET(node) XLP_HDR_OFFSET(node,0,6,3)
#define XLP_IO_GPIO_OFFSET(node) XLP_HDR_OFFSET(node,0,6,4)
#define XLP_IO_UART_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 6, i)
#define XLP_IO_UART0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 0)
#define XLP_IO_UART1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 1)
#define XLP_IO_I2C_OFFSET(node, i) XLP_HDR_OFFSET(node, 0, 6, 2 + i)
#define XLP_IO_I2C0_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 2)
#define XLP_IO_I2C1_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 3)
#define XLP_IO_GPIO_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 4)
/* system management */
#define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node,0,6,5)
#define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node,0,6,6)
#define XLP_IO_SYS_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 5)
#define XLP_IO_JTAG_OFFSET(node) XLP_HDR_OFFSET(node, 0, 6, 6)
#define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node,0,7,0)
#define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node,0,7,1)
#define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node,0,7,2)
#define XLP_IO_NOR_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 0)
#define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1)
#define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2)
/* SD flash */
#define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node,0,7,3)
#define XLP_IO_MMC_OFFSET(node, slot) ((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ)
#define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
#define XLP_IO_MMC_OFFSET(node, slot) \
((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ)
/* PCI config header register id's */
#define XLP_PCI_CFGREG0 0x00
#define XLP_PCI_CFGREG1 0x01
@ -125,113 +118,35 @@
#define XLP_PCI_UCODEINFO_REG 0x3e
#define XLP_PCI_SBB_WT_REG 0x3f
/* PCI IDs for SoC device */
#define PCI_VENDOR_NETLOGIC 0x184e
#define PCI_DEVICE_ID_NLM_ROOT 0x1001
#define PCI_DEVICE_ID_NLM_ICI 0x1002
#define PCI_DEVICE_ID_NLM_PIC 0x1003
#define PCI_DEVICE_ID_NLM_PCIE 0x1004
#define PCI_DEVICE_ID_NLM_EHCI 0x1007
#define PCI_DEVICE_ID_NLM_ILK 0x1008
#define PCI_DEVICE_ID_NLM_NAE 0x1009
#define PCI_DEVICE_ID_NLM_POE 0x100A
#define PCI_DEVICE_ID_NLM_FMN 0x100B
#define PCI_DEVICE_ID_NLM_RAID 0x100D
#define PCI_DEVICE_ID_NLM_SAE 0x100D
#define PCI_DEVICE_ID_NLM_RSA 0x100E
#define PCI_DEVICE_ID_NLM_CMP 0x100F
#define PCI_DEVICE_ID_NLM_UART 0x1010
#define PCI_DEVICE_ID_NLM_I2C 0x1011
#define PCI_DEVICE_ID_NLM_NOR 0x1015
#define PCI_DEVICE_ID_NLM_NAND 0x1016
#define PCI_DEVICE_ID_NLM_MMC 0x1018
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
#ifndef __NLM_NLMIO_H__
#error iomap.h needs mmio.h to be included
#endif
#define nlm_read_pci_reg(b, r) nlm_read_reg(b, r)
#define nlm_write_pci_reg(b, r, v) nlm_write_reg(b, r, v)
static __inline__ uint32_t
nlm_read_reg_kseg(uint64_t base, uint32_t reg)
{
volatile uint32_t *addr = (volatile uint32_t *)(intptr_t)base + reg;
extern uint64_t xlp_sys_base;
extern uint64_t xlp_pic_base;
#endif /* !LOCORE or !__ASSEMBLY */
return (*addr);
}
static __inline__ void
nlm_write_reg_kseg(uint64_t base, uint32_t reg, uint32_t val)
{
volatile uint32_t *addr = (volatile uint32_t *)(intptr_t)base + reg;
*addr = val;
}
static __inline__ uint64_t
nlm_read_reg64_kseg(uint64_t base, uint32_t reg)
{
volatile uint64_t *addr = (volatile uint64_t *)(intptr_t)base + (reg >> 1);
return (nlm_load_dword(addr));
}
static __inline__ void
nlm_write_reg64_kseg(uint64_t base, uint32_t reg, uint64_t val)
{
volatile uint64_t *addr = (volatile uint64_t *)(intptr_t)base + (reg >> 1);
return (nlm_store_dword(addr, val));
}
/*
* Routines to store 32/64 bit values to 64 bit addresses,
* used when going thru XKPHYS to access registers
*/
static __inline__ uint32_t
nlm_read_reg_xkseg(uint64_t base, uint32_t reg)
{
uint64_t addr = base + reg * sizeof(uint32_t);
return (nlm_load_word_daddr(addr));
}
static __inline__ void
nlm_write_reg_xkseg(uint64_t base, uint32_t reg, uint32_t val)
{
uint64_t addr = base + reg * sizeof(uint32_t);
return (nlm_store_word_daddr(addr, val));
}
static __inline__ uint64_t
nlm_read_reg64_xkseg(uint64_t base, uint32_t reg)
{
uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
return (nlm_load_dword_daddr(addr));
}
static __inline__ void
nlm_write_reg64_xkseg(uint64_t base, uint32_t reg, uint64_t val)
{
uint64_t addr = base + (reg >> 1) * sizeof(uint64_t);
return (nlm_store_dword_daddr(addr, val));
}
/* Location where IO base is mapped */
extern uint64_t nlm_pcicfg_baseaddr;
static __inline__ uint64_t
nlm_pcicfg_base(uint32_t devoffset)
{
return (nlm_pcicfg_baseaddr + devoffset);
}
static __inline__ uint64_t
nlm_pcibar0_base_xkphys(uint64_t pcibase)
{
uint64_t paddr;
paddr = nlm_read_reg_kseg(pcibase, XLP_PCI_CFGREG4) & ~0xfu;
return (0x9000000000000000 | paddr);
}
#define nlm_pci_rdreg(b, r) nlm_read_reg_kseg(b, r)
#define nlm_pci_wreg(b, r, v) nlm_write_reg_kseg(b, r, v)
#endif /* !LOCORE && !__ASSEMBLY__*/
/* COMPAT stuff - TODO remove */
#define bit_set(p, m) ((p) |= (m))
#define bit_clear(p, m) ((p) &= ~(m))
#define bit_get(p,m) ((p) & (m))
#define BIT(x) (0x01 << (x))
#define XLP_MAX_NODES 4
#define XLP_MAX_CORES 8
#define XLP_MAX_THREADS 4
#define XLP_CACHELINE_SIZE 64
#define XLP_NUM_NODES 1 /* we support only one now */
#endif
#endif /* __NLM_HAL_IOMAP_H__ */

View File

@ -25,8 +25,9 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __NLM_MIPS_EXTNS_H__
#define __NLM_MIPS_EXTNS_H__
@ -91,91 +92,6 @@ static __inline__ uint64_t nlm_swapd(int32_t *loc, uint64_t val)
}
#endif
#if defined(__mips_n64) || defined(__mips_n32)
static __inline uint64_t
nlm_mfcr(uint32_t reg)
{
uint64_t res;
__asm__ __volatile__(
".set push\n\t"
".set noreorder\n\t"
"move $9, %1\n\t"
".word 0x71280018\n\t" /* mfcr $8, $9 */
"move %0, $8\n\t"
".set pop\n"
: "=r" (res) : "r"(reg)
: "$8", "$9"
);
return (res);
}
static __inline void
nlm_mtcr(uint32_t reg, uint64_t value)
{
__asm__ __volatile__(
".set push\n\t"
".set noreorder\n\t"
"move $8, %0\n"
"move $9, %1\n"
".word 0x71280019\n" /* mtcr $8, $9 */
".set pop\n"
:
: "r" (value), "r" (reg)
: "$8", "$9"
);
}
#else /* !(defined(__mips_n64) || defined(__mips_n32)) */
static __inline__ uint64_t
nlm_mfcr(uint32_t reg)
{
uint64_t hi;
uint64_t lo;
__asm__ __volatile__ (
".set push\n"
".set mips64\n"
"move $8, %2\n"
".word 0x71090018\n"
"nop \n"
"dsra32 %0, $9, 0\n"
"sll %1, $9, 0\n"
".set pop\n"
: "=r"(hi), "=r"(lo)
: "r"(reg) : "$8", "$9");
return (((uint64_t)hi) << 32) | lo;
}
static __inline__ void
nlm_mtcr(uint32_t reg, uint64_t val)
{
uint32_t hi, lo;
hi = val >> 32;
lo = val & 0xffffffff;
__asm__ __volatile__ (
".set push\n"
".set mips64\n"
"move $9, %0\n"
"dsll32 $9, %1, 0\n"
"dsll32 $8, %0, 0\n"
"dsrl32 $9, $9, 0\n"
"or $9, $9, $8\n"
"move $8, %2\n"
".word 0x71090019\n"
"nop \n"
".set pop\n"
::"r"(hi), "r"(lo), "r"(reg)
: "$8", "$9");
}
#endif /* (defined(__mips_n64) || defined(__mips_n32)) */
/* dcrc2 */
/* XLP additional instructions */
/*
* Atomic increment a unsigned int
*/
@ -196,5 +112,163 @@ nlm_ldaddwu(unsigned int value, unsigned int *addr)
return (value);
}
/*
* 32 bit read write for c0
*/
#define read_c0_register32(reg, sel) \
({ \
uint32_t __rv; \
__asm__ __volatile__( \
".set push\n\t" \
".set mips32\n\t" \
"mfc0 %0, $%1, %2\n\t" \
".set pop\n" \
: "=r" (__rv) : "i" (reg), "i" (sel) ); \
__rv; \
})
#define write_c0_register32(reg, sel, value) \
__asm__ __volatile__( \
".set push\n\t" \
".set mips32\n\t" \
"mtc0 %0, $%1, %2\n\t" \
".set pop\n" \
: : "r" (value), "i" (reg), "i" (sel) );
#if defined(__mips_n64) || defined(__mips_n32)
/*
* On 64 bit compilation, the operations are simple
*/
#define read_c0_register64(reg, sel) \
({ \
uint64_t __rv; \
__asm__ __volatile__( \
".set push\n\t" \
".set mips64\n\t" \
"dmfc0 %0, $%1, %2\n\t" \
".set pop\n" \
: "=r" (__rv) : "i" (reg), "i" (sel) ); \
__rv; \
})
#define write_c0_register64(reg, sel, value) \
__asm__ __volatile__( \
".set push\n\t" \
".set mips64\n\t" \
"dmtc0 %0, $%1, %2\n\t" \
".set pop\n" \
: : "r" (value), "i" (reg), "i" (sel) );
#else /* ! (defined(__mips_n64) || defined(__mips_n32)) */
/*
* 32 bit compilation, 64 bit values has to split
*/
#define read_c0_register64(reg, sel) \
({ \
uint32_t __high, __low; \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
".set mips64\n\t" \
"dmfc0 $8, $%2, %3\n\t" \
"dsra32 %0, $8, 0\n\t" \
"sll %1, $8, 0\n\t" \
".set pop\n" \
: "=r"(__high), "=r"(__low): "i"(reg), "i"(sel) \
: "$8"); \
((uint64_t)__high << 32) | __low; \
})
#define write_c0_register64(reg, sel, value) \
do { \
uint32_t __high = value >> 32; \
uint32_t __low = value & 0xffffffff; \
__asm__ __volatile__( \
".set push\n\t" \
".set noreorder\n\t" \
".set mips64\n\t" \
"dsll32 $8, %1, 0\n\t" \
"dsll32 $9, %0, 0\n\t" \
"dsrl32 $8, $8, 0\n\t" \
"or $8, $8, $9\n\t" \
"dmtc0 $8, $%2, %3\n\t" \
".set pop" \
:: "r"(__high), "r"(__low), "i"(reg), "i"(sel) \
:"$8", "$9"); \
} while(0)
#endif
/* functions to write to and read from the extended
* cp0 registers.
* EIRR : Extended Interrupt Request Register
* cp0 register 9 sel 6
* bits 0...7 are same as cause register 8...15
* EIMR : Extended Interrupt Mask Register
* cp0 register 9 sel 7
* bits 0...7 are same as status register 8...15
*/
static __inline uint64_t
nlm_read_c0_eirr(void)
{
return (read_c0_register64(9, 6));
}
static __inline void
nlm_write_c0_eirr(uint64_t val)
{
write_c0_register64(9, 6, val);
}
static __inline uint64_t
nlm_read_c0_eimr(void)
{
return (read_c0_register64(9, 7));
}
static __inline void
nlm_write_c0_eimr(uint64_t val)
{
write_c0_register64(9, 7, val);
}
static __inline__ uint32_t
nlm_read_c0_ebase(void)
{
return (read_c0_register32(15, 1));
}
static __inline__ int
nlm_nodeid(void)
{
return (nlm_read_c0_ebase() >> 5) & 0x3;
}
static __inline__ int
nlm_cpuid(void)
{
return nlm_read_c0_ebase() & 0x1f;
}
static __inline__ int
nlm_threadid(void)
{
return nlm_read_c0_ebase() & 0x3;
}
static __inline__ int
nlm_coreid(void)
{
return (nlm_read_c0_ebase() >> 2) & 0x7;
}
#endif
#define XLP_MAX_NODES 4
#define XLP_MAX_CORES 8
#define XLP_MAX_THREADS 4
#endif

View File

@ -1,338 +0,0 @@
/*-
* Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
* NETLOGIC_BSD */
#ifndef __NLM_NLMIO_H__
#define __NLM_NLMIO_H__
#if !defined(__mips_n32) && !defined(__mips_n64)
/*
* For o32 compilation, we have to disable interrupts and enable KX bit to
* access 64 bit addresses or data.
*
* We need to disable interrupts because we save just the lower 32 bits of
* registers in interrupt handling. So if we get hit by an interrupt while
* using the upper 32 bits of a register, we lose.
*/
static __inline__ uint32_t nlm_enable_kx(void)
{
uint32_t sr;
__asm__ __volatile__(
"mfc0 %0, $12 \n\t" /* read status reg */
"move $8, %0 \n\t"
"ori $8, $8, 0x81 \n\t" /* set KX, and IE */
"xori $8, $8, 0x1 \n\t" /* flip IE */
"mtc0 $8, $12 \n\t" /* update status reg */
: "=r"(sr)
: : "$8");
return (sr);
}
static __inline__ void nlm_restore_kx(uint32_t sr)
{
__asm__ __volatile__("mtc0 %0, $12" : : "r"(sr));
}
#endif
static __inline__ uint32_t
nlm_load_word(volatile uint32_t *addr)
{
return (*addr);
}
static __inline__ void
nlm_store_word(volatile uint32_t *addr, uint32_t val)
{
*addr = val;
}
#if defined(__mips_n64) || defined(__mips_n32)
static __inline__ uint64_t
nlm_load_dword(volatile uint64_t *addr)
{
return (*addr);
}
static __inline__ void
nlm_store_dword(volatile uint64_t *addr, uint64_t val)
{
*addr = val;
}
#else /* o32 */
static __inline__ uint64_t
nlm_load_dword(volatile uint64_t *addr)
{
uint32_t valhi, vallo, sr;
sr = nlm_enable_kx();
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"ld $8, 0(%2) \n\t"
"dsra32 %0, $8, 0 \n\t"
"sll %1, $8, 0 \n\t"
".set pop \n"
: "=r"(valhi), "=r"(vallo)
: "r"(addr)
: "$8" );
nlm_restore_kx(sr);
return (((uint64_t)valhi << 32) | vallo);
}
static __inline__ void
nlm_store_dword(volatile uint64_t *addr, uint64_t val)
{
uint32_t valhi, vallo, sr;
valhi = val >> 32;
vallo = val & 0xffffffff;
sr = nlm_enable_kx();
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"dsll32 $8, %1, 0 \n\t"
"dsll32 $9, %2, 0 \n\t" /* get rid of the */
"dsrl32 $9, $9, 0 \n\t" /* sign extend */
"or $9, $9, $8 \n\t"
"sd $9, 0(%0) \n\t"
".set pop \n"
: : "r"(addr), "r"(valhi), "r"(vallo)
: "$8", "$9", "memory");
nlm_restore_kx(sr);
}
#endif
#if defined(__mips_n64)
static __inline__ uint64_t
nlm_load_word_daddr(uint64_t addr)
{
volatile uint32_t *p = (volatile uint32_t *)(intptr_t)addr;
return (*p);
}
static __inline__ void
nlm_store_word_daddr(uint64_t addr, uint32_t val)
{
volatile uint32_t *p = (volatile uint32_t *)(intptr_t)addr;
*p = val;
}
static __inline__ uint64_t
nlm_load_dword_daddr(uint64_t addr)
{
volatile uint64_t *p = (volatile uint64_t *)(intptr_t)addr;
return (*p);
}
static __inline__ void
nlm_store_dword_daddr(uint64_t addr, uint64_t val)
{
volatile uint64_t *p = (volatile uint64_t *)(intptr_t)addr;
*p = val;
}
#elif defined(__mips_n32)
static __inline__ uint64_t
nlm_load_word_daddr(uint64_t addr)
{
uint32_t val;
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"lw %0, 0(%1) \n\t"
".set pop \n"
: "=r"(val)
: "r"(addr));
return (val);
}
static __inline__ void
nlm_store_word_daddr(uint64_t addr, uint32_t val)
{
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"sw %0, 0(%1) \n\t"
".set pop \n"
: : "r"(val), "r"(addr)
: "memory");
}
static __inline__ uint64_t
nlm_load_dword_daddr(uint64_t addr)
{
uint64_t val;
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"ld %0, 0(%1) \n\t"
".set pop \n"
: "=r"(val)
: "r"(addr));
return (val);
}
static __inline__ void
nlm_store_dword_daddr(uint64_t addr, uint64_t val)
{
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"sd %0, 0(%1) \n\t"
".set pop \n"
: : "r"(val), "r"(addr)
: "memory");
}
#else /* o32 */
static __inline__ uint64_t
nlm_load_word_daddr(uint64_t addr)
{
uint32_t val, addrhi, addrlo, sr;
addrhi = addr >> 32;
addrlo = addr & 0xffffffff;
sr = nlm_enable_kx();
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"dsll32 $8, %1, 0 \n\t"
"dsll32 $9, %2, 0 \n\t" /* get rid of the */
"dsrl32 $9, $9, 0 \n\t" /* sign extend */
"or $9, $9, $8 \n\t"
"lw %0, 0($9) \n\t"
".set pop \n"
: "=r"(val)
: "r"(addrhi), "r"(addrlo)
: "$8", "$9");
nlm_restore_kx(sr);
return (val);
}
static __inline__ void
nlm_store_word_daddr(uint64_t addr, uint32_t val)
{
uint32_t addrhi, addrlo, sr;
addrhi = addr >> 32;
addrlo = addr & 0xffffffff;
sr = nlm_enable_kx();
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"dsll32 $8, %1, 0 \n\t"
"dsll32 $9, %2, 0 \n\t" /* get rid of the */
"dsrl32 $9, $9, 0 \n\t" /* sign extend */
"or $9, $9, $8 \n\t"
"sw %0, 0($9) \n\t"
".set pop \n"
:: "r"(val), "r"(addrhi), "r"(addrlo)
: "$8", "$9", "memory");
nlm_restore_kx(sr);
}
static __inline__ uint64_t
nlm_load_dword_daddr(uint64_t addr)
{
uint32_t addrh, addrl, sr;
uint32_t valh, vall;
addrh = addr >> 32;
addrl = addr & 0xffffffff;
sr = nlm_enable_kx();
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"dsll32 $8, %2, 0 \n\t"
"dsll32 $9, %3, 0 \n\t" /* get rid of the */
"dsrl32 $9, $9, 0 \n\t" /* sign extend */
"or $9, $9, $8 \n\t"
"ld $8, 0($9) \n\t"
"dsra32 %0, $8, 0 \n\t"
"sll %1, $8, 0 \n\t"
".set pop \n"
: "=r"(valh), "=r"(vall)
: "r"(addrh), "r"(addrl)
: "$8", "$9");
nlm_restore_kx(sr);
return (((uint64_t)valh << 32) | vall);
}
static __inline__ void
nlm_store_dword_daddr(uint64_t addr, uint64_t val)
{
uint32_t addrh, addrl, sr;
uint32_t valh, vall;
addrh = addr >> 32;
addrl = addr & 0xffffffff;
valh = val >> 32;
vall = val & 0xffffffff;
sr = nlm_enable_kx();
__asm__ __volatile__(
".set push \n\t"
".set mips64 \n\t"
"dsll32 $8, %2, 0 \n\t"
"dsll32 $9, %3, 0 \n\t" /* get rid of the */
"dsrl32 $9, $9, 0 \n\t" /* sign extend */
"or $9, $9, $8 \n\t"
"dsll32 $8, %0, 0 \n\t"
"dsll32 $10, %1, 0 \n\t" /* get rid of the */
"dsrl32 $10, $10, 0 \n\t" /* sign extend */
"or $8, $8, $10 \n\t"
"sd $8, 0($9) \n\t"
".set pop \n"
: : "r"(valh), "r"(vall), "r"(addrh), "r"(addrl)
: "$8", "$9", "memory");
nlm_restore_kx(sr);
}
#endif /* __mips_n64 */
#endif

View File

@ -25,44 +25,66 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __XLP_MMU_H__
#define __XLP_MMU_H__
#include <mips/nlm/hal/cop0.h>
#include <mips/nlm/hal/mips-extns.h>
#define XLP_MMU_SETUP_REG 0x400
#define XLP_MMU_LFSRSEED_REG 0x401
#define XLP_MMU_HPW_NUM_PAGE_LVL_REG 0x410
#define XLP_MMU_PGWKR_PGDBASE_REG 0x411
#define XLP_MMU_PGWKR_PGDSHFT_REG 0x412
#define XLP_MMU_PGWKR_PGDMASK_REG 0x413
#define XLP_MMU_PGWKR_PUDSHFT_REG 0x414
#define XLP_MMU_PGWKR_PUDMASK_REG 0x415
#define XLP_MMU_PGWKR_PMDSHFT_REG 0x416
#define XLP_MMU_PGWKR_PMDMASK_REG 0x417
#define XLP_MMU_PGWKR_PTESHFT_REG 0x418
#define XLP_MMU_PGWKR_PTEMASK_REG 0x419
static __inline__ uint32_t
nlm_read_c0_config6(void)
{
uint32_t rv;
typedef struct hw_pagewalker {
int pgd_present;
int pud_present;
int pmd_present;
int pte_present;
uint64_t pgd_baseaddr;
uint32_t pgd_shift;
uint32_t pgd_mask;
uint32_t pud_shift;
uint32_t pud_mask;
uint32_t pmd_shift;
uint32_t pmd_mask;
uint32_t pte_shift;
uint32_t pte_mask;
} nlm_pagewalker;
__asm__ __volatile__ (
".set push\n"
".set mips64\n"
"mfc0 %0, $16, 6\n"
".set pop\n"
: "=r" (rv));
return rv;
}
static __inline__ void
nlm_write_c0_config6(uint32_t value)
{
__asm__ __volatile__ (
".set push\n"
".set mips64\n"
"mtc0 %0, $16, 6\n"
".set pop\n"
: : "r" (value));
}
static __inline__ uint32_t
nlm_read_c0_config7(void)
{
uint32_t rv;
__asm__ __volatile__ (
".set push\n"
".set mips64\n"
"mfc0 %0, $16, 7\n"
".set pop\n"
: "=r" (rv));
return rv;
}
static __inline__ void
nlm_write_c0_config7(uint32_t value)
{
__asm__ __volatile__ (
".set push\n"
".set mips64\n"
"mtc0 %0, $16, 7\n"
".set pop\n"
: : "r" (value));
}
/**
* On power on reset, XLP comes up with 64 TLBs.
* Large-variable-tlb's (ELVT) and extended TLB is disabled.
@ -101,7 +123,7 @@ static __inline__ void nlm_large_variable_tlb_en (int en)
/* en = 1 to enable
* en = 0 to disable
*/
static __inline__ void nlm_pagewalker_en (int en)
static __inline__ void nlm_pagewalker_en(int en)
{
unsigned int val;
@ -114,7 +136,7 @@ static __inline__ void nlm_pagewalker_en (int en)
/* en = 1 to enable
* en = 0 to disable
*/
static __inline__ void nlm_extended_tlb_en (int en)
static __inline__ void nlm_extended_tlb_en(int en)
{
unsigned int val;
@ -135,70 +157,9 @@ static __inline__ int nlm_get_num_vtlbs(void)
return (((nlm_read_c0_config6() >> 6) & 0x3ff) + 1);
}
static __inline__ void nlm_setup_extended_pagemask (int mask)
static __inline__ void nlm_setup_extended_pagemask(int mask)
{
nlm_write_c0_config7(mask);
}
/* hashindex_en = 1 to enable hash mode, hashindex_en=0 to disable
* global_mode = 1 to enable global mode, global_mode=0 to disable
* clk_gating = 0 to enable clock gating, clk_gating=1 to disable
*/
static __inline__ void nlm_mmu_setup(int hashindex_en, int global_mode,
int clk_gating)
{
/*uint32_t mmusetup = nlm_mfcr(XLP_MMU_SETUP_REG);*/
uint32_t mmusetup = 0;
mmusetup |= (hashindex_en << 13);
mmusetup |= (clk_gating << 3);
mmusetup |= (global_mode << 0);
nlm_mtcr(XLP_MMU_SETUP_REG, mmusetup);
}
static __inline__ void nlm_mmu_lfsr_seed (int thr0_seed, int thr1_seed,
int thr2_seed, int thr3_seed)
{
uint32_t seed = nlm_mfcr(XLP_MMU_LFSRSEED_REG);
seed |= ((thr3_seed & 0x7f) << 23);
seed |= ((thr2_seed & 0x7f) << 16);
seed |= ((thr1_seed & 0x7f) << 7);
seed |= ((thr0_seed & 0x7f) << 0);
nlm_mtcr(XLP_MMU_LFSRSEED_REG, seed);
}
static __inline__ void nlm_pagewalker_setup (nlm_pagewalker *walker)
{
uint64_t val;
if (!walker->pgd_present)
return;
val = nlm_mfcr(XLP_MMU_HPW_NUM_PAGE_LVL_REG);
if (walker->pgd_present)
val |= (1 << 3);
if (walker->pud_present)
val |= (1 << 2);
if (walker->pmd_present)
val |= (1 << 1);
if (walker->pte_present)
val |= (1 << 0);
nlm_mtcr(XLP_MMU_HPW_NUM_PAGE_LVL_REG, val);
nlm_mtcr(XLP_MMU_PGWKR_PGDBASE_REG, walker->pgd_baseaddr);
nlm_mtcr(XLP_MMU_PGWKR_PGDSHFT_REG, walker->pgd_shift);
nlm_mtcr(XLP_MMU_PGWKR_PGDMASK_REG, walker->pgd_mask);
nlm_mtcr(XLP_MMU_PGWKR_PUDSHFT_REG, walker->pud_shift);
nlm_mtcr(XLP_MMU_PGWKR_PUDMASK_REG, walker->pud_mask);
nlm_mtcr(XLP_MMU_PGWKR_PMDSHFT_REG, walker->pmd_shift);
nlm_mtcr(XLP_MMU_PGWKR_PMDMASK_REG, walker->pmd_mask);
nlm_mtcr(XLP_MMU_PGWKR_PTESHFT_REG, walker->pte_shift);
nlm_mtcr(XLP_MMU_PGWKR_PTEMASK_REG, walker->pte_mask);
}
#endif

89
sys/mips/nlm/hal/pcibus.h Normal file
View File

@ -0,0 +1,89 @@
/*-
* Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
*/
#ifndef __XLP_PCIBUS_H__
#define __XLP_PCIBUS_H__
#define MSI_MIPS_ADDR_BASE 0xfee00000
/* MSI support */
#define MSI_MIPS_ADDR_DEST 0x000ff000
#define MSI_MIPS_ADDR_RH 0x00000008
#define MSI_MIPS_ADDR_RH_OFF 0x00000000
#define MSI_MIPS_ADDR_RH_ON 0x00000008
#define MSI_MIPS_ADDR_DM 0x00000004
#define MSI_MIPS_ADDR_DM_PHYSICAL 0x00000000
#define MSI_MIPS_ADDR_DM_LOGICAL 0x00000004
/* Fields in data for Intel MSI messages. */
#define MSI_MIPS_DATA_TRGRMOD 0x00008000 /* Trigger mode */
#define MSI_MIPS_DATA_TRGREDG 0x00000000 /* edge */
#define MSI_MIPS_DATA_TRGRLVL 0x00008000 /* level */
#define MSI_MIPS_DATA_LEVEL 0x00004000 /* Polarity. */
#define MSI_MIPS_DATA_DEASSERT 0x00000000
#define MSI_MIPS_DATA_ASSERT 0x00004000
#define MSI_MIPS_DATA_DELMOD 0x00000700 /* Delivery Mode */
#define MSI_MIPS_DATA_DELFIXED 0x00000000 /* fixed */
#define MSI_MIPS_DATA_DELLOPRI 0x00000100 /* lowest priority */
#define MSI_MIPS_DATA_INTVEC 0x000000ff
/*
* Build Intel MSI message and data values from a source. AMD64 systems
* seem to be compatible, so we use the same function for both.
*/
#define MIPS_MSI_ADDR(cpu) \
(MSI_MIPS_ADDR_BASE | (cpu) << 12 | \
MSI_MIPS_ADDR_RH_OFF | MSI_MIPS_ADDR_DM_PHYSICAL)
#define MIPS_MSI_DATA(irq) \
(MSI_MIPS_DATA_TRGRLVL | MSI_MIPS_DATA_DELFIXED | \
MSI_MIPS_DATA_ASSERT | (irq))
#define PCIE_BRIDGE_CMD 0x1
#define PCIE_BRIDGE_MSI_CAP 0x14
#define PCIE_BRIDGE_MSI_ADDRL 0x15
#define PCIE_BRIDGE_MSI_ADDRH 0x16
#define PCIE_BRIDGE_MSI_DATA 0x17
/* XLP Global PCIE configuration space registers */
#define PCIE_MSI_STATUS 0x25A
#define PCIE_MSI_EN 0x25B
#define PCIE_INT_EN0 0x261
/* PCIE_MSI_EN */
#define PCIE_MSI_VECTOR_INT_EN 0xFFFFFFFF
/* PCIE_INT_EN0 */
#define PCIE_MSI_INT_EN (1 << 9)
#endif /* __XLP_PCIBUS_H__ */

View File

@ -25,403 +25,356 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __XLP_PIC_H__
#define __XLP_PIC_H__
#ifndef _NLM_HAL_PIC_H
#define _NLM_HAL_PIC_H
/* PIC Specific registers */
#define XLP_PIC_CTRL_REG 0x40
#define XLP_PIC_BYTESWAP_REG 0x42
#define XLP_PIC_STATUS_REG 0x44
#define XLP_PIC_INTR_TIMEOUT 0x46
#define XLP_PIC_ICI0_INTR_TIMEOUT 0x48
#define XLP_PIC_ICI1_INTR_TIMEOUT 0x4a
#define XLP_PIC_ICI2_INTR_TIMEOUT 0x4c
#define XLP_PIC_IPI_CTRL_REG 0x4e
#define XLP_PIC_INT_ACK_REG 0x50
#define XLP_PIC_INT_PENDING0_REG 0x52
#define XLP_PIC_INT_PENDING1_REG 0x54
#define XLP_PIC_INT_PENDING2_REG 0x56
#define XLP_PIC_WDOG0_MAXVAL_REG 0x58
#define XLP_PIC_WDOG0_COUNT_REG 0x5a
#define XLP_PIC_WDOG0_ENABLE0_REG 0x5c
#define XLP_PIC_WDOG0_ENABLE1_REG 0x5e
#define XLP_PIC_WDOG0_BEATCMD_REG 0x60
#define XLP_PIC_WDOG0_BEAT0_REG 0x62
#define XLP_PIC_WDOG0_BEAT1_REG 0x64
#define XLP_PIC_WDOG1_MAXVAL_REG 0x66
#define XLP_PIC_WDOG1_COUNT_REG 0x68
#define XLP_PIC_WDOG1_ENABLE0_REG 0x6a
#define XLP_PIC_WDOG1_ENABLE1_REG 0x6c
#define XLP_PIC_WDOG1_BEATCMD_REG 0x6e
#define XLP_PIC_WDOG1_BEAT0_REG 0x70
#define XLP_PIC_WDOG1_BEAT1_REG 0x72
#define XLP_PIC_WDOG_MAXVAL_REG(i) (XLP_PIC_WDOG0_MAXVAL_REG + ((i) ? 7 : 0))
#define XLP_PIC_WDOG_COUNT_REG(i) (XLP_PIC_WDOG0_COUNT_REG + ((i) ? 7 : 0))
#define XLP_PIC_WDOG_ENABLE0_REG(i) (XLP_PIC_WDOG0_ENABLE0_REG + ((i) ? 7 : 0))
#define XLP_PIC_WDOG_ENABLE1_REG(i) (XLP_PIC_WDOG0_ENABLE1_REG + ((i) ? 7 : 0))
#define XLP_PIC_WDOG_BEATCMD_REG(i) (XLP_PIC_WDOG0_BEATCMD_REG + ((i) ? 7 : 0))
#define XLP_PIC_WDOG_BEAT0_REG(i) (XLP_PIC_WDOG0_BEAT0_REG + ((i) ? 7 : 0))
#define XLP_PIC_WDOG_BEAT1_REG(i) (XLP_PIC_WDOG0_BEAT1_REG + ((i) ? 7 : 0))
#define XLP_PIC_SYSTIMER0_MAXVAL_REG 0x74
#define XLP_PIC_SYSTIMER1_MAXVAL_REG 0x76
#define XLP_PIC_SYSTIMER2_MAXVAL_REG 0x78
#define XLP_PIC_SYSTIMER3_MAXVAL_REG 0x7a
#define XLP_PIC_SYSTIMER4_MAXVAL_REG 0x7c
#define XLP_PIC_SYSTIMER5_MAXVAL_REG 0x7e
#define XLP_PIC_SYSTIMER6_MAXVAL_REG 0x80
#define XLP_PIC_SYSTIMER7_MAXVAL_REG 0x82
#define XLP_PIC_SYSTIMER_MAXVAL_REG(i) (XLP_PIC_SYSTIMER0_MAXVAL_REG + ((i)*2))
#define XLP_PIC_SYSTIMER0_COUNT_REG 0x84
#define XLP_PIC_SYSTIMER1_COUNT_REG 0x86
#define XLP_PIC_SYSTIMER2_COUNT_REG 0x88
#define XLP_PIC_SYSTIMER3_COUNT_REG 0x8a
#define XLP_PIC_SYSTIMER4_COUNT_REG 0x8c
#define XLP_PIC_SYSTIMER5_COUNT_REG 0x8e
#define XLP_PIC_SYSTIMER6_COUNT_REG 0x90
#define XLP_PIC_SYSTIMER7_COUNT_REG 0x92
#define XLP_PIC_SYSTIMER_COUNT_REG(i) (XLP_PIC_SYSTIMER0_COUNT_REG + ((i)*2))
#define XLP_PIC_ITE0_N0_N1_REG 0x94
#define XLP_PIC_ITE1_N0_N1_REG 0x98
#define XLP_PIC_ITE2_N0_N1_REG 0x9c
#define XLP_PIC_ITE3_N0_N1_REG 0xa0
#define XLP_PIC_ITE4_N0_N1_REG 0xa4
#define XLP_PIC_ITE5_N0_N1_REG 0xa8
#define XLP_PIC_ITE6_N0_N1_REG 0xac
#define XLP_PIC_ITE7_N0_N1_REG 0xb0
#define XLP_PIC_ITE_N0_N1_REG(i) (XLP_PIC_ITE0_N0_N1_REG + ((i)*4))
#define XLP_PIC_ITE0_N2_N3_REG 0x96
#define XLP_PIC_ITE1_N2_N3_REG 0x9a
#define XLP_PIC_ITE2_N2_N3_REG 0x9e
#define XLP_PIC_ITE3_N2_N3_REG 0xa2
#define XLP_PIC_ITE4_N2_N3_REG 0xa6
#define XLP_PIC_ITE5_N2_N3_REG 0xaa
#define XLP_PIC_ITE6_N2_N3_REG 0xae
#define XLP_PIC_ITE7_N2_N3_REG 0xb2
#define XLP_PIC_ITE_N2_N3_REG(i) (XLP_PIC_ITE0_N2_N3_REG + ((i)*4))
#define XLP_PIC_IRT0_REG 0xb4
#define XLP_PIC_IRT_REG(i) (XLP_PIC_IRT0_REG + ((i)*2))
/* PIC IRT indices */
#define XLP_PIC_IRT_WD0_INDEX 0
#define XLP_PIC_IRT_WD1_INDEX 1
#define XLP_PIC_IRT_WD_NMI0_INDEX 2
#define XLP_PIC_IRT_WD_NMI1_INDEX 3
#define XLP_PIC_IRT_TIMER0_INDEX 4
#define XLP_PIC_IRT_TIMER1_INDEX 5
#define XLP_PIC_IRT_TIMER2_INDEX 6
#define XLP_PIC_IRT_TIMER3_INDEX 7
#define XLP_PIC_IRT_TIMER4_INDEX 8
#define XLP_PIC_IRT_TIMER5_INDEX 9
#define XLP_PIC_IRT_TIMER6_INDEX 10
#define XLP_PIC_IRT_TIMER7_INDEX 11
#define XLP_PIC_IRT_TIMER_INDEX(i) (XLP_PIC_IRT_TIMER0_INDEX + (i))
#define XLP_PIC_IRT_MSGQ0_INDEX 12
#define XLP_PIC_IRT_MSGQ_INDEX(i) (XLP_PIC_IRT_MSGQ0_INDEX + (i))
/* 12 to 43 */
#define XLP_PIC_IRT_MSG0_INDEX 44
#define XLP_PIC_IRT_MSG1_INDEX 45
#define XLP_PIC_IRT_PCIE_MSIX0_INDEX 46
#define XLP_PIC_IRT_PCIE_MSIX_INDEX(i) (XLP_PIC_IRT_PCIE_MSIX0_INDEX + (i))
/* 46 to 77 */
#define XLP_PIC_IRT_PCIE_LINK0_INDEX 78
#define XLP_PIC_IRT_PCIE_LINK1_INDEX 79
#define XLP_PIC_IRT_PCIE_LINK2_INDEX 80
#define XLP_PIC_IRT_PCIE_LINK3_INDEX 81
#define XLP_PIC_IRT_PCIE_LINK_INDEX(i) (XLP_PIC_IRT_PCIE_LINK0_INDEX + (i))
/* 78 to 81 */
#define XLP_PIC_IRT_NA0_INDEX 82
#define XLP_PIC_IRT_NA_INDEX(i) (XLP_PIC_IRT_NA0_INDEX + (i))
/* 82 to 113 */
#define XLP_PIC_IRT_POE_INDEX 114
#define XLP_PIC_IRT_USB0_INDEX 115
#define XLP_PIC_IRT_EHCI0_INDEX 115
#define XLP_PIC_IRT_EHCI1_INDEX 118
#define XLP_PIC_IRT_USB_INDEX(i) (XLP_PIC_IRT_USB0_INDEX + (i))
/* 115 to 120 */
#define XLP_PIC_IRT_GDX_INDEX 121
#define XLP_PIC_IRT_SEC_INDEX 122
#define XLP_PIC_IRT_RSA_INDEX 123
#define XLP_PIC_IRT_COMP0_INDEX 124
#define XLP_PIC_IRT_COMP_INDEX(i) (XLP_PIC_IRT_COMP0_INDEX + (i))
/* 124 to 127 */
#define XLP_PIC_IRT_GBU_INDEX 128
/* coherent inter chip */
#define XLP_PIC_IRT_CIC0_INDEX 129
#define XLP_PIC_IRT_CIC1_INDEX 130
#define XLP_PIC_IRT_CIC2_INDEX 131
#define XLP_PIC_IRT_CAM_INDEX 132
#define XLP_PIC_IRT_UART0_INDEX 133
#define XLP_PIC_IRT_UART1_INDEX 134
#define XLP_PIC_IRT_I2C0_INDEX 135
#define XLP_PIC_IRT_I2C1_INDEX 136
#define XLP_PIC_IRT_SYS0_INDEX 137
#define XLP_PIC_IRT_SYS1_INDEX 138
#define XLP_PIC_IRT_JTAG_INDEX 139
#define XLP_PIC_IRT_PIC_INDEX 140
#define XLP_PIC_IRT_NBU_INDEX 141
#define XLP_PIC_IRT_TCU_INDEX 142
/* global coherency */
#define XLP_PIC_IRT_GCU_INDEX 143
#define XLP_PIC_IRT_DMC0_INDEX 144
#define XLP_PIC_IRT_DMC1_INDEX 145
#define XLP_PIC_IRT_GPIO0_INDEX 146
#define XLP_PIC_IRT_GPIO_INDEX(i) (XLP_PIC_IRT_GPIO0_INDEX + (i))
/* 146 to 149 */
#define XLP_PIC_IRT_NOR_INDEX 150
#define XLP_PIC_IRT_NAND_INDEX 151
#define XLP_PIC_IRT_SPI_INDEX 152
#define XLP_PIC_IRT_MMC_INDEX 153
#define PIC_CTRL 0x00
/* PIC control register defines */
#define XLP_PIC_ITV_OFFSET 32 /* interrupt timeout value */
#define XLP_PIC_ICI_OFFSET 19 /* ICI interrupt timeout enable */
#define XLP_PIC_ITE_OFFSET 18 /* interrupt timeout enable */
#define XLP_PIC_STE_OFFSET 10 /* system timer interrupt enable */
#define XLP_PIC_WWR1_OFFSET 8 /* watchdog timer 1 wraparound count for reset */
#define XLP_PIC_WWR0_OFFSET 6 /* watchdog timer 0 wraparound count for reset */
#define XLP_PIC_WWN1_OFFSET 4 /* watchdog timer 1 wraparound count for NMI */
#define XLP_PIC_WWN0_OFFSET 2 /* watchdog timer 0 wraparound count for NMI */
#define XLP_PIC_WTE_OFFSET 0 /* watchdog timer enable */
#define PIC_CTRL_ITV 32 /* interrupt timeout value */
#define PIC_CTRL_ICI 19 /* ICI interrupt timeout enable */
#define PIC_CTRL_ITE 18 /* interrupt timeout enable */
#define PIC_CTRL_STE 10 /* system timer interrupt enable */
#define PIC_CTRL_WWR1 8 /* watchdog 1 wraparound count for reset */
#define PIC_CTRL_WWR0 6 /* watchdog 0 wraparound count for reset */
#define PIC_CTRL_WWN1 4 /* watchdog 1 wraparound count for NMI */
#define PIC_CTRL_WWN0 2 /* watchdog 0 wraparound count for NMI */
#define PIC_CTRL_WTE 0 /* watchdog timer enable */
/* PIC Status register defines */
#define XLP_PIC_ICI_STATUS_OFFSET 33 /* ICI interrupt timeout interrupt status */
#define XLP_PIC_ITE_STATUS_OFFSET 32 /* interrupt timeout interrupt status */
#define XLP_PIC_STS_STATUS_OFFSET 4 /* System timer interrupt status */
#define XLP_PIC_WNS_STATUS_OFFSET 2 /* NMI interrupt status for watchdog timers */
#define XLP_PIC_WIS_STATUS_OFFSET 0 /* Interrupt status for watchdog timers */
#define PIC_ICI_STATUS 33 /* ICI interrupt timeout status */
#define PIC_ITE_STATUS 32 /* interrupt timeout status */
#define PIC_STS_STATUS 4 /* System timer interrupt status */
#define PIC_WNS_STATUS 2 /* NMI status for watchdog timers */
#define PIC_WIS_STATUS 0 /* Interrupt status for watchdog timers */
/* PIC IPI control register offsets */
#define XLP_PIC_IPICTRL_NMI_OFFSET 32
#define XLP_PIC_IPICTRL_RIV_OFFSET 20 /* received interrupt vector */
#define XLP_PIC_IPICTRL_IDB_OFFSET 16 /* interrupt destination base */
#define XLP_PIC_IPICTRL_DTE_OFFSET 16 /* interrupt destination thread enables */
#define PIC_IPICTRL_NMI 32
#define PIC_IPICTRL_RIV 20 /* received interrupt vector */
#define PIC_IPICTRL_IDB 16 /* interrupt destination base */
#define PIC_IPICTRL_DTE 0 /* interrupt destination thread enables */
/* PIC IRT register offsets */
#define XLP_PIC_IRT_ENABLE_OFFSET 31
#define XLP_PIC_IRT_NMI_OFFSET 29
#define XLP_PIC_IRT_SCH_OFFSET 28 /* Scheduling scheme */
#define XLP_PIC_IRT_RVEC_OFFSET 20 /* Interrupt receive vectors */
#define XLP_PIC_IRT_DT_OFFSET 19 /* Destination type */
#define XLP_PIC_IRT_DB_OFFSET 16 /* Destination base */
#define XLP_PIC_IRT_DTE_OFFSET 0 /* Destination thread enables */
#define PIC_IRT_ENABLE 31
#define PIC_IRT_NMI 29
#define PIC_IRT_SCH 28 /* Scheduling scheme */
#define PIC_IRT_RVEC 20 /* Interrupt receive vectors */
#define PIC_IRT_DT 19 /* Destination type */
#define PIC_IRT_DB 16 /* Destination base */
#define PIC_IRT_DTE 0 /* Destination thread enables */
#define XLP_PIC_MAX_IRQ 64
#define XLP_PIC_MAX_IRT 160
#define XLP_PIC_TIMER_FREQ 133000000
#define PIC_BYTESWAP 0x02
#define PIC_STATUS 0x04
#define PIC_INTR_TIMEOUT 0x06
#define PIC_ICI0_INTR_TIMEOUT 0x08
#define PIC_ICI1_INTR_TIMEOUT 0x0a
#define PIC_ICI2_INTR_TIMEOUT 0x0c
#define PIC_IPI_CTL 0x0e
#define PIC_INT_ACK 0x10
#define PIC_INT_PENDING0 0x12
#define PIC_INT_PENDING1 0x14
#define PIC_INT_PENDING2 0x16
#define PIC_WDOG0_MAXVAL 0x18
#define PIC_WDOG0_COUNT 0x1a
#define PIC_WDOG0_ENABLE0 0x1c
#define PIC_WDOG0_ENABLE1 0x1e
#define PIC_WDOG0_BEATCMD 0x20
#define PIC_WDOG0_BEAT0 0x22
#define PIC_WDOG0_BEAT1 0x24
#define PIC_WDOG1_MAXVAL 0x26
#define PIC_WDOG1_COUNT 0x28
#define PIC_WDOG1_ENABLE0 0x2a
#define PIC_WDOG1_ENABLE1 0x2c
#define PIC_WDOG1_BEATCMD 0x2e
#define PIC_WDOG1_BEAT0 0x30
#define PIC_WDOG1_BEAT1 0x32
#define PIC_WDOG_MAXVAL(i) (PIC_WDOG0_MAXVAL + ((i) ? 7 : 0))
#define PIC_WDOG_COUNT(i) (PIC_WDOG0_COUNT + ((i) ? 7 : 0))
#define PIC_WDOG_ENABLE0(i) (PIC_WDOG0_ENABLE0 + ((i) ? 7 : 0))
#define PIC_WDOG_ENABLE1(i) (PIC_WDOG0_ENABLE1 + ((i) ? 7 : 0))
#define PIC_WDOG_BEATCMD(i) (PIC_WDOG0_BEATCMD + ((i) ? 7 : 0))
#define PIC_WDOG_BEAT0(i) (PIC_WDOG0_BEAT0 + ((i) ? 7 : 0))
#define PIC_WDOG_BEAT1(i) (PIC_WDOG0_BEAT1 + ((i) ? 7 : 0))
#define PIC_TIMER0_MAXVAL 0x34
#define PIC_TIMER1_MAXVAL 0x36
#define PIC_TIMER2_MAXVAL 0x38
#define PIC_TIMER3_MAXVAL 0x3a
#define PIC_TIMER4_MAXVAL 0x3c
#define PIC_TIMER5_MAXVAL 0x3e
#define PIC_TIMER6_MAXVAL 0x40
#define PIC_TIMER7_MAXVAL 0x42
#define PIC_TIMER_MAXVAL(i) (PIC_TIMER0_MAXVAL + ((i) * 2))
#define PIC_TIMER0_COUNT 0x44
#define PIC_TIMER1_COUNT 0x46
#define PIC_TIMER2_COUNT 0x48
#define PIC_TIMER3_COUNT 0x4a
#define PIC_TIMER4_COUNT 0x4c
#define PIC_TIMER5_COUNT 0x4e
#define PIC_TIMER6_COUNT 0x50
#define PIC_TIMER7_COUNT 0x52
#define PIC_TIMER_COUNT(i) (PIC_TIMER0_COUNT + ((i) * 2))
#define PIC_ITE0_N0_N1 0x54
#define PIC_ITE1_N0_N1 0x58
#define PIC_ITE2_N0_N1 0x5c
#define PIC_ITE3_N0_N1 0x60
#define PIC_ITE4_N0_N1 0x64
#define PIC_ITE5_N0_N1 0x68
#define PIC_ITE6_N0_N1 0x6c
#define PIC_ITE7_N0_N1 0x70
#define PIC_ITE_N0_N1(i) (PIC_ITE0_N0_N1 + ((i) * 4))
#define PIC_ITE0_N2_N3 0x56
#define PIC_ITE1_N2_N3 0x5a
#define PIC_ITE2_N2_N3 0x5e
#define PIC_ITE3_N2_N3 0x62
#define PIC_ITE4_N2_N3 0x66
#define PIC_ITE5_N2_N3 0x6a
#define PIC_ITE6_N2_N3 0x6e
#define PIC_ITE7_N2_N3 0x72
#define PIC_ITE_N2_N3(i) (PIC_ITE0_N2_N3 + ((i) * 4))
#define PIC_IRT0 0x74
#define PIC_IRT(i) (PIC_IRT0 + ((i) * 2))
#define TIMER_CYCLES_MAXVAL 0xffffffffffffffffULL
/*
* IRT Map
*/
#define PIC_NUM_IRTS 160
#define PIC_IRT_WD_0_INDEX 0
#define PIC_IRT_WD_1_INDEX 1
#define PIC_IRT_WD_NMI_0_INDEX 2
#define PIC_IRT_WD_NMI_1_INDEX 3
#define PIC_IRT_TIMER_0_INDEX 4
#define PIC_IRT_TIMER_1_INDEX 5
#define PIC_IRT_TIMER_2_INDEX 6
#define PIC_IRT_TIMER_3_INDEX 7
#define PIC_IRT_TIMER_4_INDEX 8
#define PIC_IRT_TIMER_5_INDEX 9
#define PIC_IRT_TIMER_6_INDEX 10
#define PIC_IRT_TIMER_7_INDEX 11
#define PIC_IRT_CLOCK_INDEX PIC_IRT_TIMER_7_INDEX
#define PIC_IRT_TIMER_INDEX(num) ((num) + PIC_IRT_TIMER_0_INDEX)
/* 11 and 12 */
#define PIC_NUM_MSG_Q_IRTS 32
#define PIC_IRT_MSG_Q0_INDEX 12
#define PIC_IRT_MSG_Q_INDEX(qid) ((qid) + PIC_IRT_MSG_Q0_INDEX)
/* 12 to 43 */
#define PIC_IRT_MSG_0_INDEX 44
#define PIC_IRT_MSG_1_INDEX 45
/* 44 and 45 */
#define PIC_NUM_PCIE_MSIX_IRTS 32
#define PIC_IRT_PCIE_MSIX_0_INDEX 46
#define PIC_IRT_PCIE_MSIX_INDEX(num) ((num) + PIC_IRT_PCIE_MSIX_0_INDEX)
/* 46 to 77 */
#define PIC_NUM_PCIE_LINK_IRTS 4
#define PIC_IRT_PCIE_LINK_0_INDEX 78
#define PIC_IRT_PCIE_LINK_1_INDEX 79
#define PIC_IRT_PCIE_LINK_2_INDEX 80
#define PIC_IRT_PCIE_LINK_3_INDEX 81
#define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX)
/* 78 to 81 */
#define PIC_NUM_NA_IRTS 32
/* 82 to 113 */
#define PIC_IRT_NA_0_INDEX 82
#define PIC_IRT_NA_INDEX(num) ((num) + PIC_IRT_NA_0_INDEX)
#define PIC_IRT_POE_INDEX 114
#define PIC_NUM_USB_IRTS 6
#define PIC_IRT_USB_0_INDEX 115
#define PIC_IRT_EHCI_0_INDEX 115
#define PIC_IRT_EHCI_1_INDEX 118
#define PIC_IRT_USB_INDEX(num) ((num) + PIC_IRT_USB_0_INDEX)
/* 115 to 120 */
#define PIC_IRT_GDX_INDEX 121
#define PIC_IRT_SEC_INDEX 122
#define PIC_IRT_RSA_INDEX 123
#define PIC_NUM_COMP_IRTS 4
#define PIC_IRT_COMP_0_INDEX 124
#define PIC_IRT_COMP_INDEX(num) ((num) + PIC_IRT_COMP_0_INDEX)
/* 124 to 127 */
#define PIC_IRT_GBU_INDEX 128
#define PIC_IRT_ICC_0_INDEX 129 /* ICC - Inter Chip Coherency */
#define PIC_IRT_ICC_1_INDEX 130
#define PIC_IRT_ICC_2_INDEX 131
#define PIC_IRT_CAM_INDEX 132
#define PIC_IRT_UART_0_INDEX 133
#define PIC_IRT_UART_1_INDEX 134
#define PIC_IRT_I2C_0_INDEX 135
#define PIC_IRT_I2C_1_INDEX 136
#define PIC_IRT_SYS_0_INDEX 137
#define PIC_IRT_SYS_1_INDEX 138
#define PIC_IRT_JTAG_INDEX 139
#define PIC_IRT_PIC_INDEX 140
#define PIC_IRT_NBU_INDEX 141
#define PIC_IRT_TCU_INDEX 142
#define PIC_IRT_GCU_INDEX 143 /* GBC - Global Coherency */
#define PIC_IRT_DMC_0_INDEX 144
#define PIC_IRT_DMC_1_INDEX 145
#define PIC_NUM_GPIO_IRTS 4
#define PIC_IRT_GPIO_0_INDEX 146
#define PIC_IRT_GPIO_INDEX(num) ((num) + PIC_IRT_GPIO_0_INDEX)
/* 146 to 149 */
#define PIC_IRT_NOR_INDEX 150
#define PIC_IRT_NAND_INDEX 151
#define PIC_IRT_SPI_INDEX 152
#define PIC_IRT_MMC_INDEX 153
#define PIC_CLOCK_TIMER 7
#define PIC_IRQ_BASE 8
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
#define nlm_rdreg_pic(b, r) nlm_read_reg64_kseg(b,r)
#define nlm_wreg_pic(b, r, v) nlm_write_reg64_kseg(b,r,v)
#define nlm_pcibase_pic(node) nlm_pcicfg_base(XLP_IO_PIC_OFFSET(node))
#define nlm_regbase_pic(node) nlm_pcibase_pic(node)
#define PIC_IRT_FIRST_IRQ (PIC_IRQ_BASE)
#define PIC_IRT_LAST_IRQ 63
#define PIC_IRQ_IS_IRT(irq) ((irq) >= PIC_IRT_FIRST_IRQ)
/*
* Misc
*/
#define PIC_IRT_VALID 1
#define PIC_LOCAL_SCHEDULING 1
#define PIC_GLOBAL_SCHEDULING 0
#define nlm_read_pic_reg(b, r) nlm_read_reg64(b, r)
#define nlm_write_pic_reg(b, r, v) nlm_write_reg64(b, r, v)
#define nlm_get_pic_pcibase(node) nlm_pcicfg_base(XLP_IO_PIC_OFFSET(node))
#define nlm_get_pic_regbase(node) (nlm_get_pic_pcibase(node) + XLP_IO_PCI_HDRSZ)
/* IRT and h/w interrupt routines */
static __inline__ int
nlm_pic_get_numirts(uint64_t pcibase)
{
return (nlm_pci_rdreg(pcibase, XLP_PCI_IRTINFO_REG) >> 16);
}
static __inline__ int
nlm_pic_get_startirt(uint64_t base)
{
return (nlm_pci_rdreg(base, XLP_PCI_IRTINFO_REG) & 0xff);
}
static __inline__ int
static inline int
nlm_pic_read_irt(uint64_t base, int irt_index)
{
return nlm_rdreg_pic(base, XLP_PIC_IRT_REG(irt_index));
return nlm_read_pic_reg(base, PIC_IRT(irt_index));
}
/* IRT's can be written into in two modes
* ITE mode - Here the destination of the interrupt is one of the
* eight interrupt-thread-enable groups, allowing the interrupt
* to be distributed to any thread on any node
* ID mode - In ID mode, the IRT has the DB and DTE fields.
* DB[18:17] hold the node select and DB[16], if set to 0 selects
* cpu-cores 0-3, and if set to 1 selects cpu-cores 4-7.
* The DTE[15:0] field is a thread mask, allowing the PIC to broadcast
* the interrupt to 1-16 threads selectable from that mask
*/
static __inline__ void
nlm_pic_write_irt_raw(uint64_t base, int irt_index, int en, int nmi, int sch,
int vec, int dt, int db, int dte)
static inline void
nlm_pic_send_ipi(uint64_t base, int cpu, int vec, int nmi)
{
uint64_t val =
(((en & 0x1) << XLP_PIC_IRT_ENABLE_OFFSET) |
((nmi & 0x1) << XLP_PIC_IRT_NMI_OFFSET) |
((sch & 0x1) << XLP_PIC_IRT_SCH_OFFSET) |
((vec & 0x3f) << XLP_PIC_IRT_RVEC_OFFSET) |
((dt & 0x1 ) << XLP_PIC_IRT_DT_OFFSET) |
((db & 0x7) << XLP_PIC_IRT_DB_OFFSET) |
(dte & 0xffff));
nlm_wreg_pic(base, XLP_PIC_IRT_REG(irt_index), val);
uint64_t ipi;
int node, ncpu;
node = cpu / 32;
ncpu = cpu & 0x1f;
ipi = ((uint64_t)nmi << 31) | (vec << 20) | (node << 17) |
(1 << (cpu & 0xf));
if (ncpu > 15)
ipi |= 0x10000; /* Setting bit 16 to select cpus 16-31 */
nlm_write_pic_reg(base, PIC_IPI_CTL, ipi);
}
/* write IRT in ID mode */
static __inline__ void
nlm_pic_write_irt_id(uint64_t base, int irt_index, int en, int nmi, int vec,
int node, int cpugroup, uint32_t cpu_mask)
static inline uint64_t
nlm_pic_read_control(uint64_t base)
{
nlm_pic_write_irt_raw(base, irt_index, en, nmi, 1, vec, 1,
(node << 1) | cpugroup , cpu_mask);
return nlm_read_pic_reg(base, PIC_CTRL);
}
/* write IRT in ITE mode */
static __inline__ void
nlm_pic_write_ite(uint64_t base, int ite, uint32_t node0_thrmask,
uint32_t node1_thrmask, uint32_t node2_thrmask, uint32_t node3_thrmask)
static inline void
nlm_pic_write_control(uint64_t base, uint64_t control)
{
uint64_t tm10 = ((uint64_t)node1_thrmask << 32) | node0_thrmask;
uint64_t tm32 = ((uint64_t)node1_thrmask << 32) | node0_thrmask;
/* Enable the ITE register for all nodes */
nlm_wreg_pic(base, XLP_PIC_ITE_N0_N1_REG(ite), tm10);
nlm_wreg_pic(base, XLP_PIC_ITE_N2_N3_REG(ite), tm32);
nlm_write_pic_reg(base, PIC_CTRL, control);
}
static __inline__ void
nlm_pic_write_irt_ite(uint64_t base, int irt_index, int ite, int en, int nmi,
int sch, int vec)
static inline void
nlm_pic_update_control(uint64_t base, uint64_t control)
{
nlm_pic_write_irt_raw(base, irt_index, en, nmi, sch, vec, 0, ite, 0);
uint64_t val;
val = nlm_read_pic_reg(base, PIC_CTRL);
nlm_write_pic_reg(base, PIC_CTRL, control | val);
}
/* Goto PIC on that node, and ack the interrupt */
static __inline__ void nlm_pic_ack(uint64_t src_base, int irt)
static inline void
nlm_pic_ack(uint64_t base, int irt_num)
{
nlm_wreg_pic(src_base, XLP_PIC_INT_ACK_REG, irt);
/* ack in the status registers for watchdog and system timers */
if (irt < 12)
nlm_wreg_pic(src_base, XLP_PIC_STATUS_REG, (1 << irt));
nlm_write_pic_reg(base, PIC_INT_ACK, irt_num);
/* Ack the Status register for Watchdog & System timers */
if (irt_num < 12)
nlm_write_pic_reg(base, PIC_STATUS, (1 << irt_num));
}
/* IPI routines */
static __inline__ void
nlm_pic_send_ipi(uint64_t local_base, int target_node, int vcpu, int vec, int nmi)
static inline void
nlm_set_irt_to_cpu(uint64_t base, int irt, int cpu)
{
uint64_t ipi =
(((uint64_t)nmi << XLP_PIC_IPICTRL_NMI_OFFSET) |
(vec << XLP_PIC_IPICTRL_RIV_OFFSET) |
(target_node << 17) |
(1 << (vcpu & 0xf)));
if (vcpu > 15)
ipi |= 0x10000; /* set bit 16 to select cpus 16-31 */
uint64_t val;
nlm_wreg_pic(local_base, XLP_PIC_IPI_CTRL_REG, ipi);
val = nlm_read_pic_reg(base, PIC_IRT(irt));
val |= cpu & 0xf;
if (cpu > 15)
val |= 1 << 16;
nlm_write_pic_reg(base, PIC_IRT(irt), val);
}
/* System Timer routines -- broadcasts systemtimer to 16 vcpus defined in cpu_mask */
static __inline__ void
nlm_pic_set_systimer(uint64_t base, int timer, uint64_t value, int irq, int node,
int cpugroup, uint32_t cpumask)
static inline void
nlm_pic_write_irt(uint64_t base, int irt_num, int en, int nmi,
int sch, int vec, int dt, int db, int dte)
{
uint64_t pic_ctrl = nlm_rdreg_pic(base, XLP_PIC_CTRL_REG);
uint64_t val;
val = (((uint64_t)en & 0x1) << 31) | ((nmi & 0x1) << 29) |
((sch & 0x1) << 28) | ((vec & 0x3f) << 20) |
((dt & 0x1) << 19) | ((db & 0x7) << 16) |
(dte & 0xffff);
nlm_write_pic_reg(base, PIC_IRT(irt_num), val);
}
static inline void
nlm_pic_write_irt_direct(uint64_t base, int irt_num, int en, int nmi,
int sch, int vec, int cpu)
{
nlm_pic_write_irt(base, irt_num, en, nmi, sch, vec, 1,
(cpu >> 4), /* thread group */
1 << (cpu & 0xf)); /* thread mask */
}
static inline uint64_t
nlm_pic_read_timer(uint64_t base, int timer)
{
return nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer));
}
static inline void
nlm_pic_write_timer(uint64_t base, int timer, uint64_t value)
{
nlm_write_pic_reg(base, PIC_TIMER_COUNT(timer), value);
}
static inline void
nlm_pic_set_timer(uint64_t base, int timer, uint64_t value, int irq, int cpu)
{
uint64_t pic_ctrl = nlm_read_pic_reg(base, PIC_CTRL);
int en;
en = (cpumask != 0);
nlm_wreg_pic(base, XLP_PIC_SYSTIMER_MAXVAL_REG(timer), value);
nlm_pic_write_irt_id(base, XLP_PIC_IRT_TIMER_INDEX(timer),
en, 0, irq, node, cpugroup, cpumask);
en = (irq > 0);
nlm_write_pic_reg(base, PIC_TIMER_MAXVAL(timer), value);
nlm_pic_write_irt_direct(base, PIC_IRT_TIMER_INDEX(timer),
en, 0, 0, irq, cpu);
/* enable the timer */
pic_ctrl |= (1 << (XLP_PIC_STE_OFFSET+timer));
nlm_wreg_pic(base, XLP_PIC_CTRL_REG, pic_ctrl);
pic_ctrl |= (1 << (PIC_CTRL_STE + timer));
nlm_write_pic_reg(base, PIC_CTRL, pic_ctrl);
}
static __inline__ uint64_t
nlm_pic_read_systimer(uint64_t base, int timer)
{
return nlm_rdreg_pic(base, XLP_PIC_SYSTIMER_COUNT_REG(timer));
}
/* Watchdog timer routines */
/* node - XLP node
* timer - watchdog timer. valid values are 0 and 1
* wrap_around_count - defines the number of times the watchdog timer can wrap-around
* after which the reset / NMI gets generated to the threads defined in thread-enable-masks.
* value - the vatchdog timer max value, upto which the timer will count down
*/
static __inline__ void
nlm_pic_set_wdogtimer(uint64_t base, int timer, int wrap_around_count, int nmi,
uint32_t node0_thrmask, uint32_t node1_thrmask,
uint32_t node2_thrmask, uint32_t node3_thrmask, uint64_t value)
{
uint64_t pic_ctrl = nlm_rdreg_pic(base, XLP_PIC_CTRL_REG);
uint64_t mask0, mask1;
if (timer > 1 || wrap_around_count > 3)
return;
/* enable watchdog timer interrupt */
pic_ctrl |= (((1 << timer) & 0xf));
if (timer) {
if (nmi)
pic_ctrl |= (wrap_around_count << XLP_PIC_WWN1_OFFSET);
else
pic_ctrl |= (wrap_around_count << XLP_PIC_WWN0_OFFSET);
} else {
if (nmi)
pic_ctrl |= (wrap_around_count << XLP_PIC_WWR1_OFFSET);
else
pic_ctrl |= (wrap_around_count << XLP_PIC_WWR0_OFFSET);
}
mask0 = ((unsigned long long)node1_thrmask << 32) | node0_thrmask;
mask1 = ((unsigned long long)node3_thrmask << 32) | node2_thrmask;
nlm_wreg_pic(base, XLP_PIC_WDOG_MAXVAL_REG(timer), value);
nlm_wreg_pic(base, XLP_PIC_WDOG_ENABLE0_REG(timer), mask0);
nlm_wreg_pic(base, XLP_PIC_WDOG_ENABLE1_REG(timer), mask1);
nlm_wreg_pic(base, XLP_PIC_CTRL_REG, pic_ctrl);
}
/* watchdog's need to be "stroked" by heartbeats from vcpus.
* On XLP, the heartbeat bit for a specific cpu thread on a specific
* node is set according to the following formula:
* 32N + 4C + T
* where N = node, C=cpu-core number, T=thread number
*
* src_node = source node of watchdog timer interrupts. These interrupts
* get generated from the PIC on src_node.
* timer = watchdog timer 0 or 1
* node = node for which the hearbeat is being done
* cpu = cpu-core for which the hearbeat is being done
* thread = h/w thread for which the hearbeat is being done
*/
static __inline__ void
nlm_pic_set_wdog_heartbeat(uint64_t base, int timer, int node, int cpu,
int thread)
{
int val = 32 * node + 4 * cpu + thread;
nlm_wreg_pic(base, XLP_PIC_WDOG_BEATCMD_REG(timer), val);
}
#endif /* !LOCORE && !__ASSEMBLY__ */
#endif
#endif /* __ASSEMBLY__ */
#endif /* _NLM_HAL_PIC_H */

View File

@ -25,101 +25,101 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __NLM_SYS_H__
#define __NLM_SYS_H__
#ifndef __NLM_HAL_SYS_H__
#define __NLM_HAL_SYS_H__
/**
* @file_name sys.h
* @author Netlogic Microsystems
* @brief HAL for System configuration registers
*/
#define XLP_SYS_CHIP_RESET_REG 0x40
#define XLP_SYS_POWER_ON_RESET_REG 0x41
#define XLP_SYS_EFUSE_DEVICE_CFG_STATUS0_REG 0x42
#define XLP_SYS_EFUSE_DEVICE_CFG_STATUS1_REG 0x43
#define XLP_SYS_EFUSE_DEVICE_CFG_STATUS2_REG 0x44
#define XLP_SYS_EFUSE_DEVICE_CFG3_REG 0x45
#define XLP_SYS_EFUSE_DEVICE_CFG4_REG 0x46
#define XLP_SYS_EFUSE_DEVICE_CFG5_REG 0x47
#define XLP_SYS_EFUSE_DEVICE_CFG6_REG 0x48
#define XLP_SYS_EFUSE_DEVICE_CFG7_REG 0x49
#define XLP_SYS_PLL_CTRL_REG 0x4a
#define XLP_SYS_CPU_RESET_REG 0x4b
#define XLP_SYS_CPU_NONCOHERENT_MODE_REG 0x4d
#define XLP_SYS_CORE_DFS_DIS_CTRL_REG 0x4e
#define XLP_SYS_CORE_DFS_RST_CTRL_REG 0x4f
#define XLP_SYS_CORE_DFS_BYP_CTRL_REG 0x50
#define XLP_SYS_CORE_DFS_PHA_CTRL_REG 0x51
#define XLP_SYS_CORE_DFS_DIV_INC_CTRL_REG 0x52
#define XLP_SYS_CORE_DFS_DIV_DEC_CTRL_REG 0x53
#define XLP_SYS_CORE_DFS_DIV_VALUE_REG 0x54
#define XLP_SYS_RESET_REG 0x55
#define XLP_SYS_DFS_DIS_CTRL_REG 0x56
#define XLP_SYS_DFS_RST_CTRL_REG 0x57
#define XLP_SYS_DFS_BYP_CTRL_REG 0x58
#define XLP_SYS_DFS_DIV_INC_CTRL_REG 0x59
#define XLP_SYS_DFS_DIV_DEC_CTRL_REG 0x5a
#define XLP_SYS_DFS_DIV_VALUE0_REG 0x5b
#define XLP_SYS_DFS_DIV_VALUE1_REG 0x5c
#define XLP_SYS_SENSE_AMP_DLY_REG 0x5d
#define XLP_SYS_SOC_SENSE_AMP_DLY_REG 0x5e
#define XLP_SYS_CTRL0_REG 0x5f
#define XLP_SYS_CTRL1_REG 0x60
#define XLP_SYS_TIMEOUT_BS1_REG 0x61
#define XLP_SYS_BYTE_SWAP_REG 0x62
#define XLP_SYS_VRM_VID_REG 0x63
#define XLP_SYS_PWR_RAM_CMD_REG 0x64
#define XLP_SYS_PWR_RAM_ADDR_REG 0x65
#define XLP_SYS_PWR_RAM_DATA0_REG 0x66
#define XLP_SYS_PWR_RAM_DATA1_REG 0x67
#define XLP_SYS_PWR_RAM_DATA2_REG 0x68
#define XLP_SYS_PWR_UCODE_REG 0x69
#define XLP_SYS_CPU0_PWR_STATUS_REG 0x6a
#define XLP_SYS_CPU1_PWR_STATUS_REG 0x6b
#define XLP_SYS_CPU2_PWR_STATUS_REG 0x6c
#define XLP_SYS_CPU3_PWR_STATUS_REG 0x6d
#define XLP_SYS_CPU4_PWR_STATUS_REG 0x6e
#define XLP_SYS_CPU5_PWR_STATUS_REG 0x6f
#define XLP_SYS_CPU6_PWR_STATUS_REG 0x70
#define XLP_SYS_CPU7_PWR_STATUS_REG 0x71
#define XLP_SYS_STATUS_REG 0x72
#define XLP_SYS_INT_POL_REG 0x73
#define XLP_SYS_INT_TYPE_REG 0x74
#define XLP_SYS_INT_STATUS_REG 0x75
#define XLP_SYS_INT_MASK0_REG 0x76
#define XLP_SYS_INT_MASK1_REG 0x77
#define XLP_SYS_UCO_S_ECC_REG 0x78
#define XLP_SYS_UCO_M_ECC_REG 0x79
#define XLP_SYS_UCO_ADDR_REG 0x7a
#define XLP_SYS_UCO_INSTR_REG 0x7b
#define XLP_SYS_MEM_BIST0_REG 0x7c
#define XLP_SYS_MEM_BIST1_REG 0x7d
#define XLP_SYS_MEM_BIST2_REG 0x7e
#define XLP_SYS_MEM_BIST3_REG 0x7f
#define XLP_SYS_MEM_BIST4_REG 0x80
#define XLP_SYS_MEM_BIST5_REG 0x81
#define XLP_SYS_MEM_BIST6_REG 0x82
#define XLP_SYS_MEM_BIST7_REG 0x83
#define XLP_SYS_MEM_BIST8_REG 0x84
#define XLP_SYS_MEM_BIST9_REG 0x85
#define XLP_SYS_MEM_BIST10_REG 0x86
#define XLP_SYS_MEM_BIST11_REG 0x87
#define XLP_SYS_MEM_BIST12_REG 0x88
#define XLP_SYS_SCRTCH0_REG 0x89
#define XLP_SYS_SCRTCH1_REG 0x8a
#define XLP_SYS_SCRTCH2_REG 0x8b
#define XLP_SYS_SCRTCH3_REG 0x8c
#define SYS_CHIP_RESET 0x00
#define SYS_POWER_ON_RESET_CFG 0x01
#define SYS_EFUSE_DEVICE_CFG_STATUS0 0x02
#define SYS_EFUSE_DEVICE_CFG_STATUS1 0x03
#define SYS_EFUSE_DEVICE_CFG_STATUS2 0x04
#define SYS_EFUSE_DEVICE_CFG3 0x05
#define SYS_EFUSE_DEVICE_CFG4 0x06
#define SYS_EFUSE_DEVICE_CFG5 0x07
#define SYS_EFUSE_DEVICE_CFG6 0x08
#define SYS_EFUSE_DEVICE_CFG7 0x09
#define SYS_PLL_CTRL 0x0a
#define SYS_CPU_RESET 0x0b
#define SYS_CPU_NONCOHERENT_MODE 0x0d
#define SYS_CORE_DFS_DIS_CTRL 0x0e
#define SYS_CORE_DFS_RST_CTRL 0x0f
#define SYS_CORE_DFS_BYP_CTRL 0x10
#define SYS_CORE_DFS_PHA_CTRL 0x11
#define SYS_CORE_DFS_DIV_INC_CTRL 0x12
#define SYS_CORE_DFS_DIV_DEC_CTRL 0x13
#define SYS_CORE_DFS_DIV_VALUE 0x14
#define SYS_RESET 0x15
#define SYS_DFS_DIS_CTRL 0x16
#define SYS_DFS_RST_CTRL 0x17
#define SYS_DFS_BYP_CTRL 0x18
#define SYS_DFS_DIV_INC_CTRL 0x19
#define SYS_DFS_DIV_DEC_CTRL 0x1a
#define SYS_DFS_DIV_VALUE0 0x1b
#define SYS_DFS_DIV_VALUE1 0x1c
#define SYS_SENSE_AMP_DLY 0x1d
#define SYS_SOC_SENSE_AMP_DLY 0x1e
#define SYS_CTRL0 0x1f
#define SYS_CTRL1 0x20
#define SYS_TIMEOUT_BS1 0x21
#define SYS_BYTE_SWAP 0x22
#define SYS_VRM_VID 0x23
#define SYS_PWR_RAM_CMD 0x24
#define SYS_PWR_RAM_ADDR 0x25
#define SYS_PWR_RAM_DATA0 0x26
#define SYS_PWR_RAM_DATA1 0x27
#define SYS_PWR_RAM_DATA2 0x28
#define SYS_PWR_UCODE 0x29
#define SYS_CPU0_PWR_STATUS 0x2a
#define SYS_CPU1_PWR_STATUS 0x2b
#define SYS_CPU2_PWR_STATUS 0x2c
#define SYS_CPU3_PWR_STATUS 0x2d
#define SYS_CPU4_PWR_STATUS 0x2e
#define SYS_CPU5_PWR_STATUS 0x2f
#define SYS_CPU6_PWR_STATUS 0x30
#define SYS_CPU7_PWR_STATUS 0x31
#define SYS_STATUS 0x32
#define SYS_INT_POL 0x33
#define SYS_INT_TYPE 0x34
#define SYS_INT_STATUS 0x35
#define SYS_INT_MASK0 0x36
#define SYS_INT_MASK1 0x37
#define SYS_UCO_S_ECC 0x38
#define SYS_UCO_M_ECC 0x39
#define SYS_UCO_ADDR 0x3a
#define SYS_UCO_INSTR 0x3b
#define SYS_MEM_BIST0 0x3c
#define SYS_MEM_BIST1 0x3d
#define SYS_MEM_BIST2 0x3e
#define SYS_MEM_BIST3 0x3f
#define SYS_MEM_BIST4 0x40
#define SYS_MEM_BIST5 0x41
#define SYS_MEM_BIST6 0x42
#define SYS_MEM_BIST7 0x43
#define SYS_MEM_BIST8 0x44
#define SYS_MEM_BIST9 0x45
#define SYS_MEM_BIST10 0x46
#define SYS_MEM_BIST11 0x47
#define SYS_MEM_BIST12 0x48
#define SYS_SCRTCH0 0x49
#define SYS_SCRTCH1 0x4a
#define SYS_SCRTCH2 0x4b
#define SYS_SCRTCH3 0x4c
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
#define nlm_rdreg_sys(b, r) nlm_read_reg_kseg(b,r)
#define nlm_wreg_sys(b, r, v) nlm_write_reg_kseg(b,r,v)
#define nlm_pcibase_sys(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
#define nlm_regbase_sys(node) nlm_pcibase_sys(node)
#define nlm_read_sys_reg(b, r) nlm_read_reg(b, r)
#define nlm_write_sys_reg(b, r, v) nlm_write_reg(b, r, v)
#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
#endif
#endif

View File

@ -25,127 +25,121 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __XLP_UART_H__
#define __XLP_UART_H__
#ifndef __XLP_HAL_UART_H__
#define __XLP_HAL_UART_H__
/* UART Specific registers */
#define XLP_UART_RX_DATA_REG 0x40
#define XLP_UART_TX_DATA_REG 0x40
#define UART_RX_DATA 0x00
#define UART_TX_DATA 0x00
#define XLP_UART_INT_EN_REG 0x41
#define XLP_UART_INT_ID_REG 0x42
#define XLP_UART_FIFO_CTL_REG 0x42
#define XLP_UART_LINE_CTL_REG 0x43
#define XLP_UART_MODEM_CTL_REG 0x44
#define XLP_UART_LINE_STS_REG 0x45
#define XLP_UART_MODEM_STS_REG 0x46
#define UART_INT_EN 0x01
#define UART_INT_ID 0x02
#define UART_FIFO_CTL 0x02
#define UART_LINE_CTL 0x03
#define UART_MODEM_CTL 0x04
#define UART_LINE_STS 0x05
#define UART_MODEM_STS 0x06
#define XLP_UART_DIVISOR0_REG 0x40
#define XLP_UART_DIVISOR1_REG 0x41
#define UART_DIVISOR0 0x00
#define UART_DIVISOR1 0x01
#define XLP_UART_BASE_BAUD (133000000/16)
#define XLP_UART_BAUD_DIVISOR(baud) (XLP_UART_BASE_BAUD / baud)
#define BASE_BAUD (XLP_IO_CLK/16)
#define BAUD_DIVISOR(baud) (BASE_BAUD / baud)
/* LCR mask values */
#define LCR_5BITS 0x00
#define LCR_6BITS 0x01
#define LCR_7BITS 0x02
#define LCR_8BITS 0x03
#define LCR_STOPB 0x04
#define LCR_PENAB 0x08
#define LCR_PODD 0x00
#define LCR_PEVEN 0x10
#define LCR_PONE 0x20
#define LCR_PZERO 0x30
#define LCR_SBREAK 0x40
#define LCR_EFR_ENABLE 0xbf
#define LCR_DLAB 0x80
#define LCR_5BITS 0x00
#define LCR_6BITS 0x01
#define LCR_7BITS 0x02
#define LCR_8BITS 0x03
#define LCR_STOPB 0x04
#define LCR_PENAB 0x08
#define LCR_PODD 0x00
#define LCR_PEVEN 0x10
#define LCR_PONE 0x20
#define LCR_PZERO 0x30
#define LCR_SBREAK 0x40
#define LCR_EFR_ENABLE 0xbf
#define LCR_DLAB 0x80
/* MCR mask values */
#define MCR_DTR 0x01
#define MCR_RTS 0x02
#define MCR_DRS 0x04
#define MCR_IE 0x08
#define MCR_LOOPBACK 0x10
#define MCR_DTR 0x01
#define MCR_RTS 0x02
#define MCR_DRS 0x04
#define MCR_IE 0x08
#define MCR_LOOPBACK 0x10
/* FCR mask values */
#define FCR_RCV_RST 0x02
#define FCR_XMT_RST 0x04
#define FCR_RX_LOW 0x00
#define FCR_RX_MEDL 0x40
#define FCR_RX_MEDH 0x80
#define FCR_RX_HIGH 0xc0
#define FCR_RCV_RST 0x02
#define FCR_XMT_RST 0x04
#define FCR_RX_LOW 0x00
#define FCR_RX_MEDL 0x40
#define FCR_RX_MEDH 0x80
#define FCR_RX_HIGH 0xc0
/* IER mask values */
#define IER_ERXRDY 0x1
#define IER_ETXRDY 0x2
#define IER_ERLS 0x4
#define IER_EMSC 0x8
/* uart IRQ info */
#define XLP_NODE0_UART0_IRQ 17
#define XLP_NODE1_UART0_IRQ 18
#define XLP_NODE2_UART0_IRQ 19
#define XLP_NODE3_UART0_IRQ 20
#define XLP_NODE0_UART1_IRQ 21
#define XLP_NODE1_UART1_IRQ 22
#define XLP_NODE2_UART1_IRQ 23
#define XLP_NODE3_UART1_IRQ 24
#define IER_ERXRDY 0x1
#define IER_ETXRDY 0x2
#define IER_ERLS 0x4
#define IER_EMSC 0x8
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
#define nlm_rdreg_uart(b, r) nlm_read_reg_kseg(b,r)
#define nlm_wreg_uart(b, r, v) nlm_write_reg_kseg(b,r,v)
#define nlm_pcibase_uart(node, inst) nlm_pcicfg_base(XLP_IO_UART_OFFSET(node, inst))
#define nlm_regbase_uart(node, inst) nlm_pcibase_uart(node, inst)
#define nlm_read_uart_reg(b, r) nlm_read_reg(b, r)
#define nlm_write_uart_reg(b, r, v) nlm_write_reg(b, r, v)
#define nlm_get_uart_pcibase(node, inst) \
nlm_pcicfg_base(XLP_IO_UART_OFFSET(node, inst))
#define nlm_get_uart_regbase(node, inst) \
(nlm_get_uart_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
static __inline__ void
static inline void
nlm_uart_set_baudrate(uint64_t base, int baud)
{
uint32_t lcr;
lcr = nlm_rdreg_uart(base, XLP_UART_LINE_CTL_REG);
lcr = nlm_read_uart_reg(base, UART_LINE_CTL);
/* enable divisor register, and write baud values */
nlm_wreg_uart(base, XLP_UART_LINE_CTL_REG, lcr | (1 << 7));
nlm_wreg_uart(base, XLP_UART_DIVISOR0_REG,
(XLP_UART_BAUD_DIVISOR(baud) & 0xff));
nlm_wreg_uart(base, XLP_UART_DIVISOR1_REG,
((XLP_UART_BAUD_DIVISOR(baud) >> 8) & 0xff));
nlm_write_uart_reg(base, UART_LINE_CTL, lcr | (1 << 7));
nlm_write_uart_reg(base, UART_DIVISOR0,
(BAUD_DIVISOR(baud) & 0xff));
nlm_write_uart_reg(base, UART_DIVISOR1,
((BAUD_DIVISOR(baud) >> 8) & 0xff));
/* restore default lcr */
nlm_wreg_uart(base, XLP_UART_LINE_CTL_REG, lcr);
nlm_write_uart_reg(base, UART_LINE_CTL, lcr);
}
static __inline__ void
nlm_outbyte (uint64_t base, char c)
static inline void
nlm_uart_outbyte(uint64_t base, char c)
{
uint32_t lsr;
for (;;) {
lsr = nlm_rdreg_uart(base, XLP_UART_LINE_STS_REG);
if (lsr & 0x20) break;
lsr = nlm_read_uart_reg(base, UART_LINE_STS);
if (lsr & 0x20)
break;
}
nlm_wreg_uart(base, XLP_UART_TX_DATA_REG, (int)c);
nlm_write_uart_reg(base, UART_TX_DATA, (int)c);
}
static __inline__ char
nlm_inbyte (uint64_t base)
static inline char
nlm_uart_inbyte(uint64_t base)
{
int data, lsr;
for(;;) {
lsr = nlm_rdreg_uart(base, XLP_UART_LINE_STS_REG);
if (lsr & 0x80) { /* parity/frame/break-error - push a zero */
for (;;) {
lsr = nlm_read_uart_reg(base, UART_LINE_STS);
if (lsr & 0x80) { /* parity/frame/break-error - push a zero */
data = 0;
break;
}
if (lsr & 0x01) { /* Rx data */
data = nlm_rdreg_uart(base, XLP_UART_RX_DATA_REG);
if (lsr & 0x01) { /* Rx data */
data = nlm_read_uart_reg(base, UART_RX_DATA);
break;
}
}
@ -153,9 +147,9 @@ nlm_inbyte (uint64_t base)
return (char)data;
}
static __inline__ int
static inline int
nlm_uart_init(uint64_t base, int baud, int databits, int stopbits,
int parity, int int_en, int loopback)
int parity, int int_en, int loopback)
{
uint32_t lcr;
@ -175,22 +169,20 @@ nlm_uart_init(uint64_t base, int baud, int databits, int stopbits,
lcr |= parity << 3;
/* setup default lcr */
nlm_wreg_uart(base, XLP_UART_LINE_CTL_REG, lcr);
nlm_write_uart_reg(base, UART_LINE_CTL, lcr);
/* Reset the FIFOs */
nlm_wreg_uart(base, XLP_UART_LINE_CTL_REG, FCR_RCV_RST | FCR_XMT_RST);
nlm_write_uart_reg(base, UART_LINE_CTL, FCR_RCV_RST | FCR_XMT_RST);
nlm_uart_set_baudrate(base, baud);
if (loopback)
nlm_wreg_uart(base, XLP_UART_MODEM_CTL_REG, 0x1f);
nlm_write_uart_reg(base, UART_MODEM_CTL, 0x1f);
if (int_en)
nlm_wreg_uart(base, XLP_UART_INT_EN_REG, IER_ERXRDY | IER_ETXRDY);
nlm_write_uart_reg(base, UART_INT_EN, IER_ERXRDY | IER_ETXRDY);
return 0;
}
#endif /* !LOCORE && !__ASSEMBLY__ */
#endif /* __XLP_UART_H__ */
#endif /* __XLP_HAL_UART_H__ */

59
sys/mips/nlm/hal/usb.h Normal file
View File

@ -0,0 +1,59 @@
/*-
* Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
*/
#ifndef __NLM_USB_H__
#define __NLM_USB_H__
#define USB_CTL_0 0x01
#define USB_PHY_0 0x0A
#define USB_PHY_RESET 0x01
#define USB_PHY_PORT_RESET_0 0x10
#define USB_PHY_PORT_RESET_1 0x20
#define USB_CONTROLLER_RESET 0x01
#define USB_INT_STATUS 0x0E
#define USB_INT_EN 0x0F
#define USB_PHY_INTERRUPT_EN 0x01
#define USB_OHCI_INTERRUPT_EN 0x02
#define USB_OHCI_INTERRUPT1_EN 0x04
#define USB_OHCI_INTERRUPT2_EN 0x08
#define USB_CTRL_INTERRUPT_EN 0x10
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
#define nlm_read_usb_reg(b, r) nlm_read_reg(b,r)
#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b,r,v)
#define nlm_get_usb_pcibase(node, inst) nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
#define nlm_get_usb_hcd_base(node, inst) nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst))
#define nlm_get_usb_regbase(node, inst) (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
#endif
#endif

86
sys/mips/nlm/intern_dev.c Normal file
View File

@ -0,0 +1,86 @@
/*-
* Copyright (c) 2011 Netlogic Microsystems Inc.
*
* (based on pci/ignore_pci.c)
* Copyright (c) 2000 Michael Smith <msmith@freebsd.org>
* Copyright (c) 2000 BSDi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* 'Ignore' driver - eats devices that show up errnoeously on PCI
* but shouldn't ever be listed or handled by a driver.
*/
#include <sys/param.h>
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <dev/pci/pcivar.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
static int nlm_soc_pci_probe(device_t dev);
static device_method_t nlm_soc_pci_methods[] = {
DEVMETHOD(device_probe, nlm_soc_pci_probe),
DEVMETHOD(device_attach, bus_generic_attach),
{ 0, 0 }
};
static driver_t nlm_soc_pci_driver = {
"nlm_soc_pci",
nlm_soc_pci_methods,
0,
};
static devclass_t nlm_soc_pci_devclass;
DRIVER_MODULE(nlm_soc_pci, pci, nlm_soc_pci_driver, nlm_soc_pci_devclass, 0, 0);
static int
nlm_soc_pci_probe(device_t dev)
{
if (pci_get_vendor(dev) != PCI_VENDOR_NETLOGIC)
return(ENXIO);
/* Ignore SoC internal devices */
switch (pci_get_device(dev)) {
case PCI_DEVICE_ID_NLM_ICI:
case PCI_DEVICE_ID_NLM_PIC:
case PCI_DEVICE_ID_NLM_FMN:
device_set_desc(dev, "Netlogic Internal");
device_quiet(dev);
return(-10000);
default:
return(ENXIO);
}
}

View File

@ -25,8 +25,9 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef _RMI_INTERRUPT_H_
#define _RMI_INTERRUPT_H_

View File

@ -46,9 +46,9 @@ __FBSDID("$FreeBSD$");
#include <machine/trap.h>
#include <machine/hwfunc.h>
#include <mips/nlm/hal/mmio.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/cop0.h>
#include <mips/nlm/hal/mips-extns.h>
#include <mips/nlm/interrupt.h>
#include <mips/nlm/hal/pic.h>
#include <mips/nlm/xlp.h>

View File

@ -1,233 +0,0 @@
/*-
* Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#define __RMAN_RESOURCE_VISIBLE
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/reboot.h>
#include <sys/rman.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <machine/cpu.h>
#include <machine/bus.h>
#include <machine/intr_machdep.h>
#include <mips/nlm/hal/mmio.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/pic.h>
#include <mips/nlm/hal/uart.h>
#include <mips/nlm/hal/cop2.h>
#include <mips/nlm/hal/fmn.h>
#include <mips/nlm/msgring.h>
#include <mips/nlm/xlp.h>
#include <mips/nlm/board.h>
extern void iodi_activateirqs(void);
extern bus_space_tag_t uart_bus_space_mem;
static struct resource *iodi_alloc_resource(device_t, device_t, int, int *,
u_long, u_long, u_long, u_int);
static int iodi_activate_resource(device_t, device_t, int, int,
struct resource *);
struct iodi_softc *iodi_softc; /* There can be only one. */
static int
iodi_setup_intr(device_t dev, device_t child,
struct resource *ires, int flags, driver_filter_t *filt,
driver_intr_t *intr, void *arg, void **cookiep)
{
const char *name = device_get_name(child);
int unit = device_get_unit(child);
if (strcmp(name, "uart") == 0) {
/* Note: in xlp, all pic interrupts are level triggered */
nlm_pic_write_irt_id(xlp_pic_base, XLP_PIC_IRT_UART0_INDEX, 1, 0,
xlp_irt_to_irq(XLP_PIC_IRT_UART0_INDEX), 0, 0, 0x1);
cpu_establish_hardintr("uart", filt, intr, arg,
xlp_irt_to_irq(XLP_PIC_IRT_UART0_INDEX), flags, cookiep);
} else if (strcmp(name, "ehci") == 0) {
if (unit == 0) {
nlm_pic_write_irt_id(xlp_pic_base, XLP_PIC_IRT_EHCI0_INDEX, 1, 0,
xlp_irt_to_irq(XLP_PIC_IRT_EHCI0_INDEX), 0, 0, 0x1);
cpu_establish_hardintr("ehci0", filt, intr, arg,
xlp_irt_to_irq(XLP_PIC_IRT_EHCI0_INDEX), flags, cookiep);
} else if (unit == 1) {
nlm_pic_write_irt_id(xlp_pic_base, XLP_PIC_IRT_EHCI1_INDEX, 1, 0,
xlp_irt_to_irq(XLP_PIC_IRT_EHCI1_INDEX), 0, 0, 0x1);
cpu_establish_hardintr("ehci1", filt, intr, arg,
xlp_irt_to_irq(XLP_PIC_IRT_EHCI1_INDEX), flags, cookiep);
}
} else if (strcmp(name, "xlp_sdhci") == 0) {
nlm_pic_write_irt_id(xlp_pic_base, XLP_PIC_IRT_MMC_INDEX, 1, 0,
xlp_irt_to_irq(XLP_PIC_IRT_MMC_INDEX), 0, 0, 0x1);
cpu_establish_hardintr("xlp_sdhci", filt, intr, arg,
xlp_irt_to_irq(XLP_PIC_IRT_MMC_INDEX), flags, cookiep);
}
return (0);
}
static struct resource *
iodi_alloc_resource(device_t bus, device_t child, int type, int *rid,
u_long start, u_long end, u_long count, u_int flags)
{
struct resource *res = malloc(sizeof(*res), M_DEVBUF, M_WAITOK);
const char *name = device_get_name(child);
int unit;
switch (type) {
case SYS_RES_IRQ:
device_printf(bus, "IRQ resource - for %s %lx-%lx\n",
device_get_nameunit(child), start, end);
break;
case SYS_RES_IOPORT:
device_printf(bus, "IOPORT resource - for %s %lx-%lx\n",
device_get_nameunit(child), start, end);
break;
case SYS_RES_MEMORY:
device_printf(bus, "MEMORY resource - for %s %lx-%lx\n",
device_get_nameunit(child), start, end);
break;
}
unit = device_get_unit(child);
if (strcmp(name, "uart") == 0) {
if (unit == 0) {
res->r_bushandle = nlm_regbase_uart(0, 0) + XLP_IO_PCI_HDRSZ;
} else if ( unit == 1) {
res->r_bushandle = nlm_regbase_uart(0, 1) + XLP_IO_PCI_HDRSZ;
} else
printf("%s: Unknown uart unit\n", __FUNCTION__);
res->r_bustag = uart_bus_space_mem;
}
return (res);
}
static int
iodi_activate_resource(device_t bus, device_t child, int type, int rid,
struct resource *r)
{
return (0);
}
/* prototypes */
static int iodi_probe(device_t);
static int iodi_attach(device_t);
static void iodi_identify(driver_t *, device_t);
int
iodi_probe(device_t dev)
{
return 0;
}
void
iodi_identify(driver_t *driver, device_t parent)
{
BUS_ADD_CHILD(parent, 0, "iodi", 0);
}
int
iodi_attach(device_t dev)
{
device_t tmpd;
char desc[32];
int i;
device_printf(dev, "IODI - Initialize message ring.\n");
xlp_msgring_iodi_config();
/*
* Attach each devices
*/
device_add_child(dev, "uart", 0);
device_add_child(dev, "xlp_i2c", 0);
device_add_child(dev, "xlp_i2c", 1);
device_add_child(dev, "ehci", 0);
device_add_child(dev, "ehci", 1);
device_add_child(dev, "xlp_sdhci", 0);
for (i=0; i < XLP_NUM_NODES; i++) {
tmpd = device_add_child(dev, "xlpnae", i);
device_set_ivars(tmpd, &xlp_board_info.nodes[i].nae_ivars);
snprintf(desc, sizeof(desc), "XLP NAE %d", i);
device_set_desc_copy(tmpd, desc);
}
bus_generic_probe(dev);
bus_generic_attach(dev);
return 0;
}
static device_method_t iodi_methods[] = {
DEVMETHOD(device_probe, iodi_probe),
DEVMETHOD(device_attach, iodi_attach),
DEVMETHOD(device_identify, iodi_identify),
DEVMETHOD(bus_alloc_resource, iodi_alloc_resource),
DEVMETHOD(bus_activate_resource, iodi_activate_resource),
DEVMETHOD(bus_add_child, bus_generic_add_child),
DEVMETHOD(bus_setup_intr, iodi_setup_intr),
{0, 0},
};
static driver_t iodi_driver = {
"iodi",
iodi_methods,
1 /* no softc */
};
static devclass_t iodi_devclass;
DRIVER_MODULE(iodi, nexus, iodi_driver, iodi_devclass, 0, 0);

View File

@ -25,8 +25,9 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#include <machine/asm.h>
#include <machine/cpu.h>
@ -35,6 +36,8 @@
#include <mips/nlm/hal/sys.h>
#include <mips/nlm/hal/cpucontrol.h>
#define SYS_REG_KSEG1(node, reg) (0xa0000000 + XLP_DEFAULT_IO_BASE + \
XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + (reg) * 4)
#include "assym.s"
.text
@ -59,7 +62,7 @@ VECTOR(XLPResetEntry, unknown)
nor t0, t0, zero /* mask with core id bit clear */
/* clear CPU non-coherent bit */
li t2, XLP_DEFAULT_IO_BASE_KSEG1 + XLP_IO_SYS_OFFSET(0) + XLP_SYS_CPU_NONCOHERENT_MODE_REG * 4
li t2, SYS_REG_KSEG1(0, SYS_CPU_NONCOHERENT_MODE)
lw t1, 0(t2)
and t1, t1, t0
sw t1, 0(t2)
@ -109,7 +112,7 @@ LEAF(xlp_enable_threads)
sd gp, 80(sp)
sd ra, 88(sp)
/* Use register number to work in o32 and n32 */
li $9, ((XLP_CPU_BLOCKID_MAP << 8) | XLP_BLKID_MAP_THREADMODE)
li $9, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
move $8, a0
sync
.word 0x71280019 /* mtcr t0, t1*/

View File

@ -25,10 +25,10 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
/** FIXME **/
extern uint32_t xlp_msg_thread_mask;
typedef void (*msgring_handler)(int, int, int, int, struct nlm_fmn_msg *, void *);
int register_msgring_handler(int startb, int endb, msgring_handler action,
@ -41,4 +41,3 @@ void xlp_cpu_msgring_handler(int bucket, int size, int code, int stid,
void nlm_cms_credit_setup(int credit);
void xlp_msgring_iodi_config(void);

View File

@ -42,7 +42,7 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
#include <machine/resource.h>
#include <mips/nlm/hal/mmio.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/uart.h>
@ -78,9 +78,9 @@ uart_iodi_probe(device_t dev)
sc->sc_class = &uart_ns8250_class;
bcopy(&sc->sc_sysdev->bas, &sc->sc_bas, sizeof(sc->sc_bas));
sc->sc_sysdev->bas.bst = rmi_bus_space;
sc->sc_sysdev->bas.bsh = nlm_regbase_uart(0, 0) + XLP_IO_PCI_HDRSZ;
sc->sc_sysdev->bas.bsh = nlm_get_uart_regbase(0, 0);
sc->sc_bas.bst = rmi_bus_space;
sc->sc_bas.bsh = nlm_regbase_uart(0, 0) + XLP_IO_PCI_HDRSZ;
sc->sc_bas.bsh = nlm_get_uart_regbase(0, 0);
/* regshft = 2, rclk = 66000000, rid = 0, chan = 0 */
return (uart_bus_probe(dev, 2, 133000000, 0, 0));
}

View File

@ -53,27 +53,51 @@ __FBSDID("$FreeBSD$");
#include <dev/uart/uart.h>
#include <dev/uart/uart_cpu.h>
#include <mips/nlm/hal/mmio.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/uart.h>
bus_space_tag_t uart_bus_space_io;
bus_space_tag_t uart_bus_space_mem;
/*
* need a special bus space for this, because the Netlogic SoC
* UART allows only 32 bit access to its registers
*/
static struct bus_space nlm_uart_bussp;
static u_int8_t
nlm_uart_bussp_read_1(void *tag, bus_space_handle_t handle,
bus_size_t offset)
{
return (u_int8_t)(*(volatile u_int32_t *)(handle + offset));
}
static void
nlm_uart_bussp_write_1(void *tag, bus_space_handle_t handle,
bus_size_t offset, u_int8_t value)
{
*(volatile u_int32_t *)(handle + offset) = value;
}
int
uart_cpu_eqres(struct uart_bas *b1, struct uart_bas *b2)
{
return ((b1->bsh == b2->bsh && b1->bst == b2->bst) ? 1 : 0);
}
int
uart_cpu_getdev(int devtype, struct uart_devinfo *di)
{
/* Create custom bus space */
memcpy(&nlm_uart_bussp, rmi_bus_space, sizeof(nlm_uart_bussp));
nlm_uart_bussp.bs_r_1 = nlm_uart_bussp_read_1;
nlm_uart_bussp.bs_w_1 = nlm_uart_bussp_write_1;
di->ops = uart_getops(&uart_ns8250_class);
di->bas.chan = 0;
di->bas.bst = rmi_bus_space;
di->bas.bsh = nlm_regbase_uart(0, 0) + XLP_IO_PCI_HDRSZ;
di->bas.bst = &nlm_uart_bussp;
di->bas.bsh = nlm_get_uart_regbase(0, 0);
di->bas.regshft = 2;
/* divisor = rclk / (baudrate * 16); */
@ -84,6 +108,6 @@ uart_cpu_getdev(int devtype, struct uart_devinfo *di)
di->parity = UART_PARITY_NONE;
uart_bus_space_io = NULL;
uart_bus_space_mem = rmi_bus_space;
uart_bus_space_mem = &nlm_uart_bussp;
return (0);
}

View File

@ -0,0 +1,85 @@
/*-
* Copyright (c) 2011 Netlogic Microsystems Inc.
*
* (based on dev/uart/uart_bus_pci.c)
* Copyright (c) 2006 Marcel Moolenaar
* Copyright (c) 2001 M. Warner Losh
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
#include <dev/pci/pcivar.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/uart.h>
#include <dev/uart/uart.h>
#include <dev/uart/uart_bus.h>
static int uart_soc_probe(device_t dev);
static device_method_t uart_soc_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, uart_soc_probe),
DEVMETHOD(device_attach, uart_bus_attach),
DEVMETHOD(device_detach, uart_bus_detach),
{ 0, 0 }
};
static driver_t uart_soc_driver = {
uart_driver_name,
uart_soc_methods,
sizeof(struct uart_softc),
};
static int
uart_soc_probe(device_t dev)
{
struct uart_softc *sc;
uint64_t ubase;
if (pci_get_vendor(dev) != PCI_VENDOR_NETLOGIC ||
pci_get_device(dev) != PCI_DEVICE_ID_NLM_UART)
return (ENXIO);
ubase = nlm_get_uart_regbase(0, 0);
sc = device_get_softc(dev);
sc->sc_class = &uart_ns8250_class;
device_set_desc(dev, "Netlogic SoC UART");
return (uart_bus_probe(dev, 2, 133000000, 0, 0));
}
DRIVER_MODULE(uart_soc, pci, uart_soc_driver, uart_devclass, 0, 0);

92
sys/mips/nlm/usb_init.c Normal file
View File

@ -0,0 +1,92 @@
/*-
* Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
* reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/cpuinfo.h>
#include <mips/nlm/hal/usb.h>
#include <mips/nlm/xlp.h>
static void
nlm_usb_intr_en(int node, int port)
{
uint32_t val;
uint64_t port_addr;
port_addr = nlm_get_usb_regbase(node, port);
val = nlm_read_usb_reg(port_addr, USB_INT_EN);
val = USB_CTRL_INTERRUPT_EN | USB_OHCI_INTERRUPT_EN |
USB_OHCI_INTERRUPT1_EN | USB_CTRL_INTERRUPT_EN |
USB_OHCI_INTERRUPT_EN | USB_OHCI_INTERRUPT2_EN;
nlm_write_usb_reg(port_addr, USB_INT_EN, val);
}
static void
nlm_usb_hw_reset(int node, int port)
{
uint64_t port_addr;
uint32_t val;
/* reset USB phy */
port_addr = nlm_get_usb_regbase(node, port);
val = nlm_read_usb_reg(port_addr, USB_PHY_0);
val &= ~(USB_PHY_RESET | USB_PHY_PORT_RESET_0 | USB_PHY_PORT_RESET_1);
nlm_write_usb_reg(port_addr, USB_PHY_0, val);
DELAY(100);
val = nlm_read_usb_reg(port_addr, USB_CTL_0);
val &= ~(USB_CONTROLLER_RESET);
val |= 0x4;
nlm_write_usb_reg(port_addr, USB_CTL_0, val);
}
static void
nlm_usb_init(void)
{
/* XXX: should be checking if these are in Device mode here */
printf("Initialize USB Interface\n");
nlm_usb_hw_reset(0, 0);
nlm_usb_hw_reset(0, 3);
/* Enable PHY interrupts */
nlm_usb_intr_en(0, 0);
nlm_usb_intr_en(0, 3);
}
SYSINIT(nlm_usb_init, SI_SUB_CPU, SI_ORDER_MIDDLE,
nlm_usb_init, NULL);

View File

@ -25,25 +25,25 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* NETLOGIC_BSD
* $FreeBSD$
* NETLOGIC_BSD */
*/
#ifndef __NLM_XLP_H__
#define __NLM_XLP_H__
#include <mips/nlm/hal/pic.h>
#define XLP_PIC_IRT_UART0_IRQ 9
#define XLP_PIC_IRT_UART1_IRQ 10
#define PIC_UART_0_IRQ 9
#define PIC_UART_1_IRQ 10
#define XLP_PIC_IRT_PCIE0_IRQ 11
#define XLP_PIC_IRT_PCIE1_IRQ 12
#define XLP_PIC_IRT_PCIE2_IRQ 13
#define XLP_PIC_IRT_PCIE3_IRQ 14
#define XLP_PIC_IRT_EHCI0_IRQ 39
#define XLP_PIC_IRT_EHCI1_IRQ 42
#define XLP_PIC_IRT_MMC_IRQ 43
#define PIC_PCIE_0_IRQ 11
#define PIC_PCIE_1_IRQ 12
#define PIC_PCIE_2_IRQ 13
#define PIC_PCIE_3_IRQ 14
#define PIC_EHCI_0_IRQ 39
#define PIC_EHCI_1_IRQ 42
#define PIC_MMC_IRQ 43
#ifndef LOCORE
/*
@ -59,30 +59,28 @@ extern int xlp_hwtid_to_cpuid[];
extern void xlp_enable_threads(int code);
#endif
extern uint64_t xlp_pic_base; /* TODO just for node 0 now */
static __inline__ int
xlp_irt_to_irq(int irt)
{
switch (irt) {
case XLP_PIC_IRT_MMC_INDEX :
return XLP_PIC_IRT_MMC_IRQ;
case XLP_PIC_IRT_EHCI0_INDEX :
return XLP_PIC_IRT_EHCI0_IRQ;
case XLP_PIC_IRT_EHCI1_INDEX :
return XLP_PIC_IRT_EHCI1_IRQ;
case XLP_PIC_IRT_UART0_INDEX :
return XLP_PIC_IRT_UART0_IRQ;
case XLP_PIC_IRT_UART1_INDEX :
return XLP_PIC_IRT_UART1_IRQ;
case XLP_PIC_IRT_PCIE_LINK0_INDEX :
return XLP_PIC_IRT_PCIE0_IRQ;
case XLP_PIC_IRT_PCIE_LINK1_INDEX :
return XLP_PIC_IRT_PCIE1_IRQ;
case XLP_PIC_IRT_PCIE_LINK2_INDEX :
return XLP_PIC_IRT_PCIE2_IRQ;
case XLP_PIC_IRT_PCIE_LINK3_INDEX :
return XLP_PIC_IRT_PCIE3_IRQ;
case PIC_IRT_MMC_INDEX :
return PIC_MMC_IRQ;
case PIC_IRT_EHCI_0_INDEX :
return PIC_EHCI_0_IRQ;
case PIC_IRT_EHCI_1_INDEX :
return PIC_EHCI_1_IRQ;
case PIC_IRT_UART_0_INDEX :
return PIC_UART_0_IRQ;
case PIC_IRT_UART_1_INDEX :
return PIC_UART_1_IRQ;
case PIC_IRT_PCIE_LINK_0_INDEX :
return PIC_PCIE_0_IRQ;
case PIC_IRT_PCIE_LINK_1_INDEX :
return PIC_PCIE_1_IRQ;
case PIC_IRT_PCIE_LINK_2_INDEX :
return PIC_PCIE_2_IRQ;
case PIC_IRT_PCIE_LINK_3_INDEX :
return PIC_PCIE_3_IRQ;
default: panic("Bad IRT %d\n", irt);
}
}
@ -91,24 +89,24 @@ static __inline__ int
xlp_irq_to_irt(int irq)
{
switch (irq) {
case XLP_PIC_IRT_MMC_IRQ :
return XLP_PIC_IRT_MMC_INDEX;
case XLP_PIC_IRT_EHCI0_IRQ :
return XLP_PIC_IRT_EHCI0_INDEX;
case XLP_PIC_IRT_EHCI1_IRQ :
return XLP_PIC_IRT_EHCI1_INDEX;
case XLP_PIC_IRT_UART0_IRQ :
return XLP_PIC_IRT_UART0_INDEX;
case XLP_PIC_IRT_UART1_IRQ :
return XLP_PIC_IRT_UART1_INDEX;
case XLP_PIC_IRT_PCIE0_IRQ :
return XLP_PIC_IRT_PCIE_LINK0_INDEX;
case XLP_PIC_IRT_PCIE1_IRQ :
return XLP_PIC_IRT_PCIE_LINK1_INDEX;
case XLP_PIC_IRT_PCIE2_IRQ :
return XLP_PIC_IRT_PCIE_LINK2_INDEX;
case XLP_PIC_IRT_PCIE3_IRQ :
return XLP_PIC_IRT_PCIE_LINK3_INDEX;
case PIC_MMC_IRQ :
return PIC_IRT_MMC_INDEX;
case PIC_EHCI_0_IRQ :
return PIC_IRT_EHCI_0_INDEX;
case PIC_EHCI_1_IRQ :
return PIC_IRT_EHCI_1_INDEX;
case PIC_UART_0_IRQ :
return PIC_IRT_UART_0_INDEX;
case PIC_UART_1_IRQ :
return PIC_IRT_UART_1_INDEX;
case PIC_PCIE_0_IRQ :
return PIC_IRT_PCIE_LINK_0_INDEX;
case PIC_PCIE_1_IRQ :
return PIC_IRT_PCIE_LINK_1_INDEX;
case PIC_PCIE_2_IRQ :
return PIC_IRT_PCIE_LINK_2_INDEX;
case PIC_PCIE_3_IRQ :
return PIC_IRT_PCIE_LINK_3_INDEX;
default: panic("Bad IRQ %d\n", irq);
}
}
@ -117,15 +115,15 @@ static __inline__ int
xlp_irq_is_picintr(int irq)
{
switch (irq) {
case XLP_PIC_IRT_MMC_IRQ : return 1;
case XLP_PIC_IRT_EHCI0_IRQ : return 1;
case XLP_PIC_IRT_EHCI1_IRQ : return 1;
case XLP_PIC_IRT_UART0_IRQ : return 1;
case XLP_PIC_IRT_UART1_IRQ : return 1;
case XLP_PIC_IRT_PCIE0_IRQ : return 1;
case XLP_PIC_IRT_PCIE1_IRQ : return 1;
case XLP_PIC_IRT_PCIE2_IRQ : return 1;
case XLP_PIC_IRT_PCIE3_IRQ : return 1;
case PIC_MMC_IRQ : return 1;
case PIC_EHCI_0_IRQ : return 1;
case PIC_EHCI_1_IRQ : return 1;
case PIC_UART_0_IRQ : return 1;
case PIC_UART_1_IRQ : return 1;
case PIC_PCIE_0_IRQ : return 1;
case PIC_PCIE_1_IRQ : return 1;
case PIC_PCIE_2_IRQ : return 1;
case PIC_PCIE_3_IRQ : return 1;
default: return 0;
}
}

View File

@ -71,9 +71,8 @@ __FBSDID("$FreeBSD$");
#include <machine/smp.h>
#include <mips/nlm/hal/mips-extns.h>
#include <mips/nlm/hal/mmio.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/cop0.h>
#include <mips/nlm/hal/sys.h>
#include <mips/nlm/hal/pic.h>
#include <mips/nlm/hal/uart.h>
@ -93,7 +92,7 @@ int xlp_argc;
char **xlp_argv, **xlp_envp;
uint64_t xlp_cpu_frequency;
uint64_t nlm_pcicfg_baseaddr = MIPS_PHYS_TO_KSEG1(XLP_DEFAULT_IO_BASE);
uint64_t xlp_io_base = MIPS_PHYS_TO_KSEG1(XLP_DEFAULT_IO_BASE);
int xlp_ncores;
int xlp_threads_per_core;
@ -112,7 +111,7 @@ xlp_setup_core(void)
{
uint64_t reg;
reg = nlm_mfcr(XLP_LSU_DEFEATURE);
reg = nlm_mfcr(LSU_DEFEATURE);
/* Enable Unaligned and L2HPE */
reg |= (1 << 30) | (1 << 23);
/*
@ -123,12 +122,12 @@ xlp_setup_core(void)
reg |= (1ull << 31);
/* Clear S1RCM - A0 errata */
reg &= ~0xeull;
nlm_mtcr(XLP_LSU_DEFEATURE, reg);
nlm_mtcr(LSU_DEFEATURE, reg);
reg = nlm_mfcr(XLP_SCHED_DEFEATURE);
reg = nlm_mfcr(SCHED_DEFEATURE);
/* Experimental: Disable BRU accepting ALU ops - A0 errata */
reg |= (1 << 24);
nlm_mtcr(XLP_SCHED_DEFEATURE, reg);
nlm_mtcr(SCHED_DEFEATURE, reg);
}
static void
@ -281,8 +280,9 @@ mips_init(void)
unsigned int
platform_get_timecount(struct timecounter *tc __unused)
{
uint64_t count = nlm_pic_read_timer(xlp_pic_base, PIC_CLOCK_TIMER);
return ((unsigned int)~nlm_pic_read_systimer(xlp_pic_base, 7));
return (unsigned int)~count;
}
static void
@ -292,21 +292,21 @@ xlp_pic_init(void)
platform_get_timecount, /* get_timecount */
0, /* no poll_pps */
~0U, /* counter_mask */
XLP_PIC_TIMER_FREQ, /* frequency */
XLP_IO_CLK, /* frequency */
"XLRPIC", /* name */
2000, /* quality (adjusted in code) */
};
int i;
xlp_pic_base = nlm_regbase_pic(0); /* TOOD: Add other nodes */
xlp_pic_base = nlm_get_pic_regbase(0); /* TOOD: Add other nodes */
printf("Initializing PIC...@%jx\n", (uintmax_t)xlp_pic_base);
/* Bind all PIC irqs to cpu 0 */
for(i = 0; i < XLP_PIC_MAX_IRT; i++) {
nlm_pic_write_irt_raw(xlp_pic_base, i, 0, 0, 1, 0,
for(i = 0; i < PIC_NUM_IRTS; i++) {
nlm_pic_write_irt(xlp_pic_base, i, 0, 0, 1, 0,
1, 0, 0x1);
}
nlm_pic_set_systimer(xlp_pic_base, 7, ~0ULL, 0, 0, 0, 0);
nlm_pic_set_timer(xlp_pic_base, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
platform_timecounter = &pic_timecounter;
}
@ -322,15 +322,15 @@ xlp_pic_init(void)
static void
xlp_mem_init(void)
{
uint64_t bridgebase = nlm_regbase_bridge(0); /* TOOD: Add other nodes */
uint64_t bridgebase = nlm_get_bridge_regbase(0); /* TOOD: Add other nodes */
vm_size_t physsz = 0;
uint64_t base, lim, val;
int i, j;
for (i = 0, j = 0; i < 8; i++) {
val = nlm_rdreg_bridge(bridgebase, XLP_BRIDGE_DRAM_BAR_REG(i));
val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_BAR(i));
base = ((val >> 12) & 0xfffff) << 20;
val = nlm_rdreg_bridge(bridgebase, XLP_BRIDGE_DRAM_LIMIT_REG(i));
val = nlm_read_bridge_reg(bridgebase, BRIDGE_DRAM_LIMIT(i));
lim = ((val >> 12) & 0xfffff) << 20;
/* BAR not enabled */
@ -396,17 +396,22 @@ xlp_mem_init(void)
/* setup final entry with 0 */
phys_avail[j] = phys_avail[j + 1] = 0;
/* copy phys_avail to dump_avail */
for(i = 0; i <= j + 1; i++)
dump_avail[i] = phys_avail[i];
realmem = physmem = btoc(physsz);
}
static uint32_t
xlp_get_cpu_frequency(void)
{
uint64_t sysbase = nlm_regbase_sys(0);
uint64_t sysbase = nlm_get_sys_regbase(0);
unsigned int pll_divf, pll_divr, dfs_div, num, denom;
uint32_t val;
val = nlm_rdreg_sys(sysbase, XLP_SYS_POWER_ON_RESET_REG);
val = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
pll_divf = (val >> 10) & 0x7f;
pll_divr = (val >> 8) & 0x3;
dfs_div = (val >> 17) & 0x3;
@ -520,9 +525,9 @@ platform_trap_enter(void)
void
platform_reset(void)
{
uint64_t sysbase = nlm_regbase_sys(0);
uint64_t sysbase = nlm_get_sys_regbase(0);
nlm_wreg_sys(sysbase, XLP_SYS_CHIP_RESET_REG, 1);
nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
for(;;)
__asm __volatile("wait");
}
@ -544,7 +549,7 @@ int
platform_start_ap(int cpuid)
{
uint32_t coremask, val;
uint64_t sysbase = nlm_regbase_sys(0);
uint64_t sysbase = nlm_get_sys_regbase(0);
int hwtid = xlp_cpuid_to_hwtid[cpuid];
int core, thr;
@ -555,21 +560,21 @@ platform_start_ap(int cpuid)
coremask = 1u << core;
/* Enable core clock */
val = nlm_rdreg_sys(sysbase, XLP_SYS_CORE_DFS_DIS_CTRL_REG);
val = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
val &= ~coremask;
nlm_wreg_sys(sysbase, XLP_SYS_CORE_DFS_DIS_CTRL_REG, val);
nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, val);
/* Remove CPU Reset */
val = nlm_rdreg_sys(sysbase, XLP_SYS_CPU_RESET_REG);
val = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
val &= ~coremask & 0xff;
nlm_wreg_sys(sysbase, XLP_SYS_CPU_RESET_REG, val);
nlm_write_sys_reg(sysbase, SYS_CPU_RESET, val);
if (bootverbose)
printf("Waking up core %d ...", core);
/* Poll for CPU to mark itself coherent */
do {
val = nlm_rdreg_sys(sysbase, XLP_SYS_CPU_NONCOHERENT_MODE_REG);
val = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
} while ((val & coremask) != 0);
if (bootverbose)
printf("Done\n");
@ -628,7 +633,7 @@ void
platform_ipi_send(int cpuid)
{
nlm_pic_send_ipi(xlp_pic_base, 0, xlp_cpuid_to_hwtid[cpuid],
nlm_pic_send_ipi(xlp_pic_base, xlp_cpuid_to_hwtid[cpuid],
platform_ipi_intrnum(), 0);
}

666
sys/mips/nlm/xlp_pci.c Normal file
View File

@ -0,0 +1,666 @@
/*-
* Copyright (c) 2003-2009 RMI Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of RMI Corporation, nor the names of its contributors,
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* NETLOGIC_BSD */
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/malloc.h>
#include <sys/bus.h>
#include <sys/endian.h>
#include <sys/rman.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <sys/pciio.h>
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/uart/uart.h>
#include <dev/uart/uart_bus.h>
#include <dev/uart/uart_cpu.h>
#include <machine/bus.h>
#include <machine/md_var.h>
#include <machine/intr_machdep.h>
#include <machine/cpuregs.h>
#include <mips/nlm/hal/haldefs.h>
#include <mips/nlm/interrupt.h>
#include <mips/nlm/hal/iomap.h>
#include <mips/nlm/hal/mips-extns.h>
#include <mips/nlm/hal/pic.h>
#include <mips/nlm/hal/pcibus.h>
#include <mips/nlm/hal/uart.h>
#include <mips/nlm/xlp.h>
#include "pcib_if.h"
struct xlp_pcib_softc {
bus_dma_tag_t sc_pci_dmat; /* PCI DMA tag pointer */
};
static devclass_t pcib_devclass;
static struct rman irq_rman, port_rman, mem_rman, emul_rman;
static void
xlp_pci_init_resources(void)
{
irq_rman.rm_start = 0;
irq_rman.rm_end = 255;
irq_rman.rm_type = RMAN_ARRAY;
irq_rman.rm_descr = "PCI Mapped Interrupts";
if (rman_init(&irq_rman)
|| rman_manage_region(&irq_rman, 0, 255))
panic("pci_init_resources irq_rman");
port_rman.rm_start = 0;
port_rman.rm_end = ~0ul;
port_rman.rm_type = RMAN_ARRAY;
port_rman.rm_descr = "I/O ports";
if (rman_init(&port_rman)
|| rman_manage_region(&port_rman, 0x14000000UL, 0x15ffffffUL))
panic("pci_init_resources port_rman");
mem_rman.rm_start = 0;
mem_rman.rm_end = ~0ul;
mem_rman.rm_type = RMAN_ARRAY;
mem_rman.rm_descr = "I/O memory";
if (rman_init(&mem_rman)
|| rman_manage_region(&mem_rman, 0xd0000000ULL, 0xdfffffffULL))
panic("pci_init_resources mem_rman");
emul_rman.rm_start = 0;
emul_rman.rm_end = ~0ul;
emul_rman.rm_type = RMAN_ARRAY;
emul_rman.rm_descr = "Emulated MEMIO";
if (rman_init(&emul_rman)
|| rman_manage_region(&emul_rman, 0x18000000ULL, 0x18ffffffULL))
panic("pci_init_resources emul_rman");
}
static int
xlp_pcib_probe(device_t dev)
{
device_set_desc(dev, "XLP PCI bus");
xlp_pci_init_resources();
return (0);
}
static int
xlp_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
{
switch (which) {
case PCIB_IVAR_DOMAIN:
*result = 0;
return (0);
case PCIB_IVAR_BUS:
*result = 0;
return (0);
}
return (ENOENT);
}
static int
xlp_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t result)
{
switch (which) {
case PCIB_IVAR_DOMAIN:
return (EINVAL);
case PCIB_IVAR_BUS:
return (EINVAL);
}
return (ENOENT);
}
static int
xlp_pcib_maxslots(device_t dev)
{
return (PCI_SLOTMAX);
}
static u_int32_t
xlp_pcib_read_config(device_t dev, u_int b, u_int s, u_int f,
u_int reg, int width)
{
uint32_t data = 0;
uint64_t cfgaddr;
int regindex = reg/sizeof(uint32_t);
cfgaddr = nlm_pcicfg_base(XLP_HDR_OFFSET(0, b, s, f));
if ((width == 2) && (reg & 1))
return 0xFFFFFFFF;
else if ((width == 4) && (reg & 3))
return 0xFFFFFFFF;
data = nlm_read_pci_reg(cfgaddr, regindex);
/*
* Fix up read data in some SoC devices
* to emulate complete PCIe header
*/
if (b == 0) {
int dev = s % 8;
/* Fake intpin on config read for UART/I2C, USB, SD/Flash */
if (regindex == 0xf &&
(dev == 6 || dev == 2 || dev == 7))
data |= 0x1 << 8; /* Fake int pin */
}
if (width == 1)
return ((data >> ((reg & 3) << 3)) & 0xff);
else if (width == 2)
return ((data >> ((reg & 3) << 3)) & 0xffff);
else
return (data);
}
static void
xlp_pcib_write_config(device_t dev, u_int b, u_int s, u_int f,
u_int reg, u_int32_t val, int width)
{
uint64_t cfgaddr;
uint32_t data = 0;
int regindex = reg / sizeof(uint32_t);
cfgaddr = nlm_pcicfg_base(XLP_HDR_OFFSET(0, b, s, f));
if ((width == 2) && (reg & 1))
return;
else if ((width == 4) && (reg & 3))
return;
if (width == 1) {
data = nlm_read_pci_reg(cfgaddr, regindex);
data = (data & ~(0xff << ((reg & 3) << 3))) |
(val << ((reg & 3) << 3));
} else if (width == 2) {
data = nlm_read_pci_reg(cfgaddr, regindex);
data = (data & ~(0xffff << ((reg & 3) << 3))) |
(val << ((reg & 3) << 3));
} else {
data = val;
}
nlm_write_pci_reg(cfgaddr, regindex, data);
return;
}
static int
xlp_pcib_attach(device_t dev)
{
struct xlp_pcib_softc *sc;
sc = device_get_softc(dev);
device_add_child(dev, "pci", 0);
bus_generic_attach(dev);
return (0);
}
static void
xlp_pcib_identify(driver_t * driver, device_t parent)
{
BUS_ADD_CHILD(parent, 0, "pcib", 0);
}
/*
* XLS PCIe can have upto 4 links, and each link has its on IRQ
* Find the link on which the device is on
*/
static int
xlp_pcie_link(device_t pcib, device_t dev)
{
device_t parent, tmp;
/* find the lane on which the slot is connected to */
printf("xlp_pcie_link : bus %s dev %s\n", device_get_nameunit(pcib),
device_get_nameunit(dev));
tmp = dev;
while (1) {
parent = device_get_parent(tmp);
if (parent == NULL || parent == pcib) {
device_printf(dev, "Cannot find parent bus\n");
return (-1);
}
if (strcmp(device_get_nameunit(parent), "pci0") == 0)
break;
tmp = parent;
}
return (pci_get_function(tmp));
}
/*
* Find the IRQ for the link, each link has a different interrupt
* at the XLP pic
*/
static int
xlp_pcie_link_irt(int link)
{
if( (link < 0) || (link > 3))
return (-1);
return PIC_IRT_PCIE_LINK_INDEX(link);
}
static int
xlp_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs)
{
int i, link;
/*
* Each link has 32 MSIs that can be allocated, but for now
* we only support one device per link.
* msi_alloc() equivalent is needed when we start supporting
* bridges on the PCIe link.
*/
link = xlp_pcie_link(pcib, dev);
if (link == -1)
return (ENXIO);
/*
* encode the irq so that we know it is a MSI interrupt when we
* setup interrupts
*/
for (i = 0; i < count; i++)
irqs[i] = 64 + link * 32 + i;
return (0);
}
static int
xlp_release_msi(device_t pcib, device_t dev, int count, int *irqs)
{
device_printf(dev, "%s: msi release %d\n", device_get_nameunit(pcib),
count);
return (0);
}
static int
xlp_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr,
uint32_t *data)
{
int msi, irt;
if (irq >= 64) {
msi = irq - 64;
*addr = MIPS_MSI_ADDR(0);
irt = xlp_pcie_link_irt(msi/32);
if (irt != -1)
*data = MIPS_MSI_DATA(xlp_irt_to_irq(irt));
return (0);
} else {
device_printf(dev, "%s: map_msi for irq %d - ignored",
device_get_nameunit(pcib), irq);
return (ENXIO);
}
}
static void
bridge_pcie_ack(int irq)
{
uint32_t node,reg;
uint64_t base;
node = nlm_nodeid();
reg = PCIE_MSI_STATUS;
switch(irq) {
case PIC_PCIE_0_IRQ:
base = nlm_pcicfg_base(XLP_IO_PCIE0_OFFSET(node));
break;
case PIC_PCIE_1_IRQ:
base = nlm_pcicfg_base(XLP_IO_PCIE1_OFFSET(node));
break;
case PIC_PCIE_2_IRQ:
base = nlm_pcicfg_base(XLP_IO_PCIE2_OFFSET(node));
break;
case PIC_PCIE_3_IRQ:
base = nlm_pcicfg_base(XLP_IO_PCIE3_OFFSET(node));
break;
default:
return;
}
nlm_write_pci_reg(base, reg, 0xFFFFFFFF);
return;
}
static int
mips_platform_pci_setup_intr(device_t dev, device_t child,
struct resource *irq, int flags, driver_filter_t *filt,
driver_intr_t *intr, void *arg, void **cookiep)
{
int error = 0;
int xlpirq;
int node,base,val,link;
void *extra_ack;
error = rman_activate_resource(irq);
if (error)
return error;
if (rman_get_start(irq) != rman_get_end(irq)) {
device_printf(dev, "Interrupt allocation %lu != %lu\n",
rman_get_start(irq), rman_get_end(irq));
return (EINVAL);
}
xlpirq = rman_get_start(irq);
device_printf(dev, "setup intr %d\n", xlpirq);
if (strcmp(device_get_name(dev), "pcib") != 0) {
device_printf(dev, "ret 0 on dev\n");
return (0);
}
/*
* temporary hack for MSI, we support just one device per
* link, and assign the link interrupt to the device interrupt
*/
if (xlpirq >= 64) {
xlpirq -= 64;
if (xlpirq % 32 != 0)
return (0);
node = nlm_nodeid();
link = (xlpirq / 32);
base = nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node,link));
/* MSI Interrupt Vector enable at bridge's configuration */
nlm_write_pci_reg(base, PCIE_MSI_EN, PCIE_MSI_VECTOR_INT_EN);
val = nlm_read_pci_reg(base, PCIE_INT_EN0);
/* MSI Interrupt enable at bridge's configuration */
nlm_write_pci_reg(base, PCIE_INT_EN0,
(val | PCIE_MSI_INT_EN));
/* legacy interrupt disable at bridge */
val = nlm_read_pci_reg(base, PCIE_BRIDGE_CMD);
nlm_write_pci_reg(base, PCIE_BRIDGE_CMD,
(val | PCIM_CMD_INTxDIS));
/* MSI address update at bridge */
val = nlm_read_pci_reg(base, PCIE_BRIDGE_MSI_ADDRL);
nlm_write_pci_reg(base, PCIE_BRIDGE_MSI_ADDRL,
(val | MSI_MIPS_ADDR_BASE));
val = nlm_read_pci_reg(base, PCIE_BRIDGE_MSI_CAP);
/* MSI capability enable at bridge */
nlm_write_pci_reg(base, PCIE_BRIDGE_MSI_CAP,
(val |
(PCIM_MSICTRL_MSI_ENABLE << 16) |
(PCIM_MSICTRL_MMC_32 << 16)));
xlpirq = xlp_pcie_link_irt(xlpirq / 32);
if (xlpirq == -1)
return (EINVAL);
xlpirq = xlp_irt_to_irq(xlpirq);
}
/* Set all irqs to CPU 0 for now */
nlm_pic_write_irt_direct(xlp_pic_base, xlp_irq_to_irt(xlpirq), 1, 0,
PIC_LOCAL_SCHEDULING, xlpirq, 0);
extra_ack = NULL;
if (xlpirq >= PIC_PCIE_0_IRQ &&
xlpirq <= PIC_PCIE_3_IRQ)
extra_ack = bridge_pcie_ack;
xlp_establish_intr(device_get_name(child), filt,
intr, arg, xlpirq, flags, cookiep, extra_ack);
return (0);
}
static int
mips_platform_pci_teardown_intr(device_t dev, device_t child,
struct resource *irq, void *cookie)
{
if (strcmp(device_get_name(child), "pci") == 0) {
/* if needed reprogram the pic to clear pcix related entry */
device_printf(dev, "teardown intr\n");
}
return (bus_generic_teardown_intr(dev, child, irq, cookie));
}
static void
assign_soc_resource(device_t child, int type, u_long *startp, u_long *endp,
u_long *countp, struct rman **rm, bus_space_tag_t *bst, vm_offset_t *va)
{
int devid = pci_get_device(child);
int inst = pci_get_function(child);
int node = pci_get_slot(child) / 8;
*rm = NULL;
*va = 0;
*bst = 0;
switch (devid) {
case PCI_DEVICE_ID_NLM_UART:
switch (type) {
case SYS_RES_IRQ:
*startp = *endp = PIC_UART_0_IRQ + inst;
*countp = 1;
break;
case SYS_RES_MEMORY:
*va = nlm_get_uart_regbase(node, inst);
*startp = MIPS_KSEG1_TO_PHYS(va);
*countp = 0x100;
*rm = &emul_rman;
*bst = uart_bus_space_mem;
break;
};
break;
case PCI_DEVICE_ID_NLM_EHCI:
if (type == SYS_RES_IRQ) {
if (inst == 0)
*startp = *endp = PIC_EHCI_0_IRQ;
else if (inst == 3)
*startp = *endp = PIC_EHCI_1_IRQ;
else
device_printf(child, "bad instance %d\n", inst);
*countp = 1;
}
break;
}
/* default to rmi_bus_space for SoC resources */
if (type == SYS_RES_MEMORY && *bst == 0)
*bst = rmi_bus_space;
}
static struct resource *
xlp_pci_alloc_resource(device_t bus, device_t child, int type, int *rid,
u_long start, u_long end, u_long count, u_int flags)
{
struct rman *rm = NULL;
struct resource *rv;
vm_offset_t va = 0;
int needactivate = flags & RF_ACTIVE;
bus_space_tag_t bst = 0;
/*
* For SoC PCI devices, we have to assign resources correctly
* since the IRQ and MEM resources depend on the block.
* If the address is not from BAR0, then we use emul_rman
*/
if (pci_get_bus(child) == 0 &&
pci_get_vendor(child) == PCI_VENDOR_NETLOGIC)
assign_soc_resource(child, type, &start, &end,
&count, &rm, &bst, &va);
if (rm == NULL) {
switch (type) {
case SYS_RES_IRQ:
rm = &irq_rman;
break;
case SYS_RES_IOPORT:
rm = &port_rman;
break;
case SYS_RES_MEMORY:
rm = &mem_rman;
break;
default:
return (0);
}
}
rv = rman_reserve_resource(rm, start, end, count, flags, child);
if (rv == 0)
return (0);
rman_set_rid(rv, *rid);
if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
if (va == 0)
va = (vm_offset_t)pmap_mapdev(start, count);
if (bst == 0)
bst = rmi_pci_bus_space;
rman_set_bushandle(rv, va);
rman_set_virtual(rv, (void *)va);
rman_set_bustag(rv, bst);
}
if (needactivate) {
if (bus_activate_resource(child, type, *rid, rv)) {
rman_release_resource(rv);
return (NULL);
}
}
return (rv);
}
static int
xlp_pci_release_resource(device_t bus, device_t child, int type, int rid,
struct resource *r)
{
return (rman_release_resource(r));
}
static bus_dma_tag_t
xlp_pci_get_dma_tag(device_t bus, device_t child)
{
struct xlp_pcib_softc *sc;
sc = device_get_softc(bus);
return (sc->sc_pci_dmat);
}
static int
xlp_pci_activate_resource(device_t bus, device_t child, int type, int rid,
struct resource *r)
{
return (rman_activate_resource(r));
}
static int
xlp_pci_deactivate_resource(device_t bus, device_t child, int type, int rid,
struct resource *r)
{
return (rman_deactivate_resource(r));
}
static int
mips_pci_route_interrupt(device_t bus, device_t dev, int pin)
{
int irt, link;
/*
* Validate requested pin number.
*/
device_printf(bus, "route %s %d", device_get_nameunit(dev), pin);
if ((pin < 1) || (pin > 4))
return (255);
link = xlp_pcie_link(bus, dev);
irt = xlp_pcie_link_irt(link);
if (irt != -1)
return (xlp_irt_to_irq(irt));
return (255);
}
static device_method_t xlp_pcib_methods[] = {
/* Device interface */
DEVMETHOD(device_identify, xlp_pcib_identify),
DEVMETHOD(device_probe, xlp_pcib_probe),
DEVMETHOD(device_attach, xlp_pcib_attach),
/* Bus interface */
DEVMETHOD(bus_print_child, bus_generic_print_child),
DEVMETHOD(bus_read_ivar, xlp_pcib_read_ivar),
DEVMETHOD(bus_write_ivar, xlp_pcib_write_ivar),
DEVMETHOD(bus_alloc_resource, xlp_pci_alloc_resource),
DEVMETHOD(bus_release_resource, xlp_pci_release_resource),
DEVMETHOD(bus_get_dma_tag, xlp_pci_get_dma_tag),
DEVMETHOD(bus_activate_resource, xlp_pci_activate_resource),
DEVMETHOD(bus_deactivate_resource, xlp_pci_deactivate_resource),
DEVMETHOD(bus_setup_intr, mips_platform_pci_setup_intr),
DEVMETHOD(bus_teardown_intr, mips_platform_pci_teardown_intr),
/* pcib interface */
DEVMETHOD(pcib_maxslots, xlp_pcib_maxslots),
DEVMETHOD(pcib_read_config, xlp_pcib_read_config),
DEVMETHOD(pcib_write_config, xlp_pcib_write_config),
DEVMETHOD(pcib_route_interrupt, mips_pci_route_interrupt),
DEVMETHOD(pcib_alloc_msi, xlp_alloc_msi),
DEVMETHOD(pcib_release_msi, xlp_release_msi),
DEVMETHOD(pcib_map_msi, xlp_map_msi),
{0, 0}
};
static driver_t xlp_pcib_driver = {
"pcib",
xlp_pcib_methods,
sizeof(struct xlp_pcib_softc),
};
DRIVER_MODULE(pcib, nexus, xlp_pcib_driver, pcib_devclass, 0, 0);