From 5cb5104246dbcf696aa8647fded889f97728780f Mon Sep 17 00:00:00 2001 From: Marius Strobl Date: Sun, 27 Dec 2009 16:55:44 +0000 Subject: [PATCH] Add a driver for the `Fire' JBus to PCIe bridges found in at least the Sun Fire V215/V245 and Sun Ultra 25/45 machines. This driver also already includes all the code to support the `Oberon' Uranus to PCIe bridges found in the Fujitsu-Siemens based Mx000 machines but due to lack of access to such a system for testing, probing of these bridges is currently disabled. Unfortunately, the event queue mechanism of these bridges for MSIs/ MSI-Xs matches our current MD and MI interrupt frameworks like square pegs fit into round holes so for now we are generous and use one event queue per MSI, which limits us to 35 MSIs/MSI-Xs per Host-PCIe-bridge (we use one event queue for the PCIe error messages). This seems tolerable as long as most devices just use one MSI/MSI-X anyway. Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us to decouple the 1:1 mapping at the cost of no longer being able to bind MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to quiesce a device during the transition of its MSIs/ MSI-Xs to another event queue. This would still require the problem of interrupt storms generated by devices which have no one-shot behavior or can't/don't mask interrupts while the filter/handler is executed (like the older PCIe NICs supported by bge(4)) to be solved though. Committed from: 26C3 --- sys/conf/options.sparc64 | 2 + sys/sparc64/pci/fire.c | 2122 +++++++++++++++++++++++++++++++++++++ sys/sparc64/pci/firereg.h | 1004 ++++++++++++++++++ sys/sparc64/pci/firevar.h | 98 ++ 4 files changed, 3226 insertions(+) create mode 100644 sys/sparc64/pci/fire.c create mode 100644 sys/sparc64/pci/firereg.h create mode 100644 sys/sparc64/pci/firevar.h diff --git a/sys/conf/options.sparc64 b/sys/conf/options.sparc64 index ba5ab9fc85bc..bc6af5a18a5e 100644 --- a/sys/conf/options.sparc64 +++ b/sys/conf/options.sparc64 @@ -8,6 +8,8 @@ SUN4U opt_global.h ATKBD_DFLT_KEYMAP opt_atkbd.h +FIRE_DEBUG opt_fire.h + # Debug IOMMU inserts/removes using diagnostic accesses. This is very loud. IOMMU_DIAG opt_iommu.h diff --git a/sys/sparc64/pci/fire.c b/sys/sparc64/pci/fire.c new file mode 100644 index 000000000000..39fac20623f0 --- /dev/null +++ b/sys/sparc64/pci/fire.c @@ -0,0 +1,2122 @@ +/*- + * Copyright (c) 1999, 2000 Matthew R. Green + * Copyright (c) 2001 - 2003 by Thomas Moestl + * Copyright (c) 2009 by Marius Strobl + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp + * from: FreeBSD: psycho.c 183152 2008-09-18 19:45:22Z marius + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Driver for `Fire' JBus to PCI Express and `Oberon' Uranus to PCI Express + * bridges + */ + +#include "opt_fire.h" +#include "opt_ofw_pci.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include "pcib_if.h" + +static bus_space_tag_t fire_alloc_bus_tag(struct fire_softc *sc, int type); +static const struct fire_desc *fire_get_desc(device_t dev); +static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map, + bus_dmasync_op_t op); +static int fire_get_intrmap(struct fire_softc *sc, u_int ino, + bus_addr_t *intrmapptr, bus_addr_t *intrclrptr); +static void fire_intr_assign(void *arg); +static void fire_intr_clear(void *arg); +static void fire_intr_disable(void *arg); +static void fire_intr_enable(void *arg); +static int fire_intr_register(struct fire_softc *sc, u_int ino); +static void fire_msiq_handler(void *cookie); +static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino, + driver_filter_t handler, void *arg); +static timecounter_get_t fire_get_timecount; + +/* Interrupt handlers */ +static driver_filter_t fire_dmc_pec; +static driver_filter_t fire_pcie; +static driver_filter_t fire_xcb; + +/* + * Methods + */ +static bus_activate_resource_t fire_activate_resource; +static pcib_alloc_msi_t fire_alloc_msi; +static pcib_alloc_msix_t fire_alloc_msix; +static bus_alloc_resource_t fire_alloc_resource; +static device_attach_t fire_attach; +static bus_deactivate_resource_t fire_deactivate_resource; +static bus_get_dma_tag_t fire_get_dma_tag; +static ofw_bus_get_node_t fire_get_node; +static pcib_map_msi_t fire_map_msi; +static pcib_maxslots_t fire_maxslots; +static device_probe_t fire_probe; +static pcib_read_config_t fire_read_config; +static bus_read_ivar_t fire_read_ivar; +static pcib_release_msi_t fire_release_msi; +static pcib_release_msix_t fire_release_msix; +static bus_release_resource_t fire_release_resource; +static pcib_route_interrupt_t fire_route_interrupt; +static bus_setup_intr_t fire_setup_intr; +static bus_teardown_intr_t fire_teardown_intr; +static pcib_write_config_t fire_write_config; + +static device_method_t fire_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, fire_probe), + DEVMETHOD(device_attach, fire_attach), + DEVMETHOD(device_shutdown, bus_generic_shutdown), + DEVMETHOD(device_suspend, bus_generic_suspend), + DEVMETHOD(device_resume, bus_generic_resume), + + /* Bus interface */ + DEVMETHOD(bus_print_child, bus_generic_print_child), + DEVMETHOD(bus_read_ivar, fire_read_ivar), + DEVMETHOD(bus_setup_intr, fire_setup_intr), + DEVMETHOD(bus_teardown_intr, fire_teardown_intr), + DEVMETHOD(bus_alloc_resource, fire_alloc_resource), + DEVMETHOD(bus_activate_resource, fire_activate_resource), + DEVMETHOD(bus_deactivate_resource, fire_deactivate_resource), + DEVMETHOD(bus_release_resource, fire_release_resource), + DEVMETHOD(bus_get_dma_tag, fire_get_dma_tag), + + /* pcib interface */ + DEVMETHOD(pcib_maxslots, fire_maxslots), + DEVMETHOD(pcib_read_config, fire_read_config), + DEVMETHOD(pcib_write_config, fire_write_config), + DEVMETHOD(pcib_route_interrupt, fire_route_interrupt), + DEVMETHOD(pcib_alloc_msi, fire_alloc_msi), + DEVMETHOD(pcib_release_msi, fire_release_msi), + DEVMETHOD(pcib_alloc_msix, fire_alloc_msix), + DEVMETHOD(pcib_release_msix, fire_release_msix), + DEVMETHOD(pcib_map_msi, fire_map_msi), + + /* ofw_bus interface */ + DEVMETHOD(ofw_bus_get_node, fire_get_node), + + KOBJMETHOD_END +}; + +static devclass_t fire_devclass; + +DEFINE_CLASS_0(pcib, fire_driver, fire_methods, sizeof(struct fire_softc)); +EARLY_DRIVER_MODULE(fire, nexus, fire_driver, fire_devclass, 0, 0, + BUS_PASS_BUS); +MODULE_DEPEND(fire, nexus, 1, 1, 1); + +static const struct intr_controller fire_ic = { + fire_intr_enable, + fire_intr_disable, + fire_intr_assign, + fire_intr_clear +}; + +struct fire_icarg { + struct fire_softc *fica_sc; + bus_addr_t fica_map; + bus_addr_t fica_clr; +}; + +struct fire_msiqarg { + struct fire_icarg fmqa_fica; + struct mtx fmqa_mtx; + struct fo_msiq_record *fmqa_base; + uint64_t fmqa_head; + uint64_t fmqa_tail; + uint32_t fmqa_msiq; + uint32_t fmqa_msi; +}; + +#define FIRE_PERF_CNT_QLTY 100 + +#define FIRE_SPC_BARRIER(spc, sc, offs, len, flags) \ + bus_barrier((sc)->sc_mem_res[(spc)], (offs), (len), (flags)) +#define FIRE_SPC_READ_8(spc, sc, offs) \ + bus_read_8((sc)->sc_mem_res[(spc)], (offs)) +#define FIRE_SPC_WRITE_8(spc, sc, offs, v) \ + bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v)) + +#ifndef FIRE_DEBUG +#define FIRE_SPC_SET(spc, sc, offs, reg, v) \ + FIRE_SPC_WRITE_8((spc), (sc), (offs), (v)) +#else +#define FIRE_SPC_SET(spc, sc, offs, reg, v) do { \ + device_printf((sc)->sc_dev, reg " 0x%016llx -> 0x%016llx\n", \ + (unsigned long long)FIRE_SPC_READ_8((spc), (sc), (offs)), \ + (unsigned long long)(v)); \ + FIRE_SPC_WRITE_8((spc), (sc), (offs), (v)); \ + } while (0) +#endif + +#define FIRE_PCI_BARRIER(sc, offs, len, flags) \ + FIRE_SPC_BARRIER(FIRE_PCI, (sc), (offs), len, flags) +#define FIRE_PCI_READ_8(sc, offs) \ + FIRE_SPC_READ_8(FIRE_PCI, (sc), (offs)) +#define FIRE_PCI_WRITE_8(sc, offs, v) \ + FIRE_SPC_WRITE_8(FIRE_PCI, (sc), (offs), (v)) +#define FIRE_CTRL_BARRIER(sc, offs, len, flags) \ + FIRE_SPC_BARRIER(FIRE_CTRL, (sc), (offs), len, flags) +#define FIRE_CTRL_READ_8(sc, offs) \ + FIRE_SPC_READ_8(FIRE_CTRL, (sc), (offs)) +#define FIRE_CTRL_WRITE_8(sc, offs, v) \ + FIRE_SPC_WRITE_8(FIRE_CTRL, (sc), (offs), (v)) + +#define FIRE_PCI_SET(sc, offs, v) \ + FIRE_SPC_SET(FIRE_PCI, (sc), (offs), # offs, (v)) +#define FIRE_CTRL_SET(sc, offs, v) \ + FIRE_SPC_SET(FIRE_CTRL, (sc), (offs), # offs, (v)) + +struct fire_desc { + const char *fd_string; + int fd_mode; + const char *fd_name; +}; + +static const struct fire_desc const fire_compats[] = { + { "pciex108e,80f0", FIRE_MODE_FIRE, "Fire" }, +#if 0 + { "pciex108e,80f8", FIRE_MODE_OBERON, "Oberon" }, +#endif + { NULL, 0, NULL } +}; + +static const struct fire_desc * +fire_get_desc(device_t dev) +{ + const struct fire_desc *desc; + const char *compat; + + compat = ofw_bus_get_compat(dev); + if (compat == NULL) + return (NULL); + for (desc = fire_compats; desc->fd_string != NULL; desc++) + if (strcmp(desc->fd_string, compat) == 0) + return (desc); + return (NULL); +} + +static int +fire_probe(device_t dev) +{ + const char *dtype; + + dtype = ofw_bus_get_type(dev); + if (dtype != NULL && strcmp(dtype, OFW_TYPE_PCIE) == 0 && + fire_get_desc(dev) != NULL) { + device_set_desc(dev, "Sun Host-PCIe bridge"); + return (BUS_PROBE_GENERIC); + } + return (ENXIO); +} + +static int +fire_attach(device_t dev) +{ + struct fire_softc *sc; + const struct fire_desc *desc; + struct ofw_pci_msi_ranges msi_ranges; + struct ofw_pci_msi_addr_ranges msi_addr_ranges; + struct ofw_pci_msi_eq_to_devino msi_eq_to_devino; + struct fire_msiqarg *fmqa; + struct timecounter *tc; + struct ofw_pci_ranges *range; + uint64_t ino_bitmap, val; + phandle_t node; + uint32_t prop, prop_array[2]; + int i, j, mode, nrange; + u_int lw; + uint16_t mps; + + sc = device_get_softc(dev); + node = ofw_bus_get_node(dev); + desc = fire_get_desc(dev); + mode = desc->fd_mode; + + sc->sc_dev = dev; + sc->sc_node = node; + sc->sc_mode = mode; + sc->sc_flags = 0; + + mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF); + mtx_init(&sc->sc_pcib_mtx, "pcib_mtx", NULL, MTX_SPIN); + + /* + * Fire and Oberon have two register banks: + * (0) per-PBM PCI Express configuration and status registers + * (1) (shared) Fire/Oberon controller configuration and status + * registers + */ + for (i = 0; i < FIRE_NREG; i++) { + j = i; + sc->sc_mem_res[i] = bus_alloc_resource_any(dev, + SYS_RES_MEMORY, &j, RF_ACTIVE); + if (sc->sc_mem_res[i] == NULL) + panic("%s: could not allocate register bank %d", + __func__, i); + } + + if (OF_getprop(node, "portid", &sc->sc_ign, sizeof(sc->sc_ign)) == -1) + panic("%s: could not determine IGN", __func__); + if (OF_getprop(node, "module-revision#", &prop, sizeof(prop)) == -1) + panic("%s: could not determine revision", __func__); + + device_printf(dev, "%s, module-revision %d, IGN %#x\n", + desc->fd_name, prop, sc->sc_ign); + + /* + * Hunt through all the interrupt mapping regs and register + * the interrupt controller for our interrupt vectors. We do + * this early in order to be able to catch stray interrupts. + */ + i = OF_getprop(node, "ino-bitmap", (void *)prop_array, + sizeof(prop_array)); + if (i == -1) + panic("%s: could not get ino-bitmap", __func__); + ino_bitmap = ((uint64_t)prop_array[1] << 32) | prop_array[0]; + for (i = 0; i <= FO_MAX_INO; i++) { + if ((ino_bitmap & (1ULL << i)) == 0) + continue; + j = fire_intr_register(sc, i); + if (j != 0) + device_printf(dev, "could not register interrupt " + "controller for INO %d (%d)\n", i, j); + } + + /* JBC/UBC module initialization */ + FIRE_CTRL_SET(sc, FO_XBC_ERR_LOG_EN, ~0ULL); + FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL); + /* not enabled by OpenSolaris */ + FIRE_CTRL_SET(sc, FO_XBC_INT_EN, ~0ULL); + if (sc->sc_mode == FIRE_MODE_FIRE) { + FIRE_CTRL_SET(sc, FIRE_JBUS_PAR_CTRL, + FIRE_JBUS_PAR_CTRL_P_EN); + FIRE_CTRL_SET(sc, FIRE_JBC_FATAL_RST_EN, + ((1ULL << FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT) & + FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK) | + FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT | + FIRE_JBC_FATAL_RST_EN_CPE_P_INT | + FIRE_JBC_FATAL_RST_EN_APE_P_INT | + FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT | + FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT | + FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT | + FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT); + FIRE_CTRL_SET(sc, FIRE_JBC_CORE_BLOCK_INT_EN, ~0ULL); + } + + /* TLU initialization */ + FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_STAT_CLR, + FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK); + /* not enabled by OpenSolaris */ + FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_INT_EN, + FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK); + FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_STAT_CLR, + FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK); + /* not enabled by OpenSolaris */ + FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_INT_EN, + FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK); + FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_STAT_CLR, + FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK); + /* not enabled by OpenSolaris */ + FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_INT_EN, + FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK); + val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) | + ((FO_PCI_TLU_CTRL_L0S_TIM_DFLT << FO_PCI_TLU_CTRL_L0S_TIM_SHFT) & + FO_PCI_TLU_CTRL_L0S_TIM_MASK) | + ((FO_PCI_TLU_CTRL_CFG_DFLT << FO_PCI_TLU_CTRL_CFG_SHFT) & + FO_PCI_TLU_CTRL_CFG_MASK); + if (sc->sc_mode == FIRE_MODE_OBERON) + val &= ~FO_PCI_TLU_CTRL_NWPR_EN; + val |= FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET; + FIRE_PCI_SET(sc, FO_PCI_TLU_CTRL, val); + FIRE_PCI_SET(sc, FO_PCI_TLU_DEV_CTRL, 0); + FIRE_PCI_SET(sc, FO_PCI_TLU_LNK_CTRL, FO_PCI_TLU_LNK_CTRL_CLK); + + /* DLU/LPU initialization */ + if (sc->sc_mode == FIRE_MODE_OBERON) + FIRE_PCI_SET(sc, FO_PCI_LPU_INT_MASK, 0); + else + FIRE_PCI_SET(sc, FO_PCI_LPU_RST, 0); + FIRE_PCI_SET(sc, FO_PCI_LPU_LNK_LYR_CFG, + FO_PCI_LPU_LNK_LYR_CFG_VC0_EN); + FIRE_PCI_SET(sc, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL, + FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN | + FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN); + if (sc->sc_mode == FIRE_MODE_OBERON) + FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS, + (OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT << + FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) & + FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK); + else { + switch ((FIRE_PCI_READ_8(sc, FO_PCI_TLU_LNK_STAT) & + FO_PCI_TLU_LNK_STAT_WDTH_MASK) >> + FO_PCI_TLU_LNK_STAT_WDTH_SHFT) { + case 1: + lw = 0; + break; + case 4: + lw = 1; + break; + case 8: + lw = 2; + break; + case 16: + lw = 3; + break; + default: + lw = 0; + } + mps = (FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) & + FO_PCI_TLU_CTRL_CFG_MASK) >> FO_PCI_TLU_CTRL_CFG_SHFT; + i = sizeof(fire_freq_nak_tmr_thrs) / + sizeof(*fire_freq_nak_tmr_thrs); + if (mps >= i); + mps = i - 1; + FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS, + (fire_freq_nak_tmr_thrs[mps][lw] << + FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT) & + FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK); + FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS, + (fire_rply_tmr_thrs[mps][lw] << + FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) & + FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK); + FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RTR_FIFO_PTR, + ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT << + FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT) & + FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK) | + ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT << + FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT) & + FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK)); + FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG2, + (FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT << + FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT) & + FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK); + FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG3, + (FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT << + FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT) & + FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK); + FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG4, + ((FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT << + FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT) & + FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK) | + ((FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT << + FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT) & + FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK)); + FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG5, 0); + } + + /* ILU initialization */ + FIRE_PCI_SET(sc, FO_PCI_ILU_ERR_STAT_CLR, ~0ULL); + /* not enabled by OpenSolaris */ + FIRE_PCI_SET(sc, FO_PCI_ILU_INT_EN, ~0ULL); + + /* IMU initialization */ + FIRE_PCI_SET(sc, FO_PCI_IMU_ERR_STAT_CLR, ~0ULL); + FIRE_PCI_SET(sc, FO_PCI_IMU_INT_EN, + FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_EN) & + ~(FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S | + FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S | + FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S | + FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P | + FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P | + FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P)); + + /* MMU initialization */ + FIRE_PCI_SET(sc, FO_PCI_MMU_ERR_STAT_CLR, + FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK); + /* not enabled by OpenSolaris */ + FIRE_PCI_SET(sc, FO_PCI_MMU_INT_EN, + FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK); + + /* DMC initialization */ + FIRE_PCI_SET(sc, FO_PCI_DMC_CORE_BLOCK_INT_EN, ~0ULL); + FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTA, 0); + FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTB, 0); + + /* PEC initialization */ + FIRE_PCI_SET(sc, FO_PCI_PEC_CORE_BLOCK_INT_EN, ~0ULL); + + /* Establish handlers for interesting interrupts. */ + if ((ino_bitmap & (1ULL << FO_DMC_PEC_INO)) != 0) + fire_set_intr(sc, 1, FO_DMC_PEC_INO, fire_dmc_pec, sc); + if ((ino_bitmap & (1ULL << FO_XCB_INO)) != 0) + fire_set_intr(sc, 0, FO_XCB_INO, fire_xcb, sc); + + /* MSI/MSI-X support */ + if (OF_getprop(node, "#msi", &sc->sc_msi_count, + sizeof(sc->sc_msi_count)) == -1) + panic("%s: could not determine MSI count", __func__); + if (OF_getprop(node, "msi-ranges", &msi_ranges, + sizeof(msi_ranges)) == -1) + sc->sc_msi_first = 0; + else + sc->sc_msi_first = msi_ranges.first; + if (OF_getprop(node, "msi-data-mask", &sc->sc_msi_data_mask, + sizeof(sc->sc_msi_data_mask)) == -1) + panic("%s: could not determine MSI data mask", __func__); + if (OF_getprop(node, "msix-data-width", &sc->sc_msix_data_width, + sizeof(sc->sc_msix_data_width)) > 0) + sc->sc_flags |= FIRE_MSIX; + if (OF_getprop(node, "msi-address-ranges", &msi_addr_ranges, + sizeof(msi_addr_ranges)) == -1) + panic("%s: could not determine MSI address ranges", __func__); + sc->sc_msi_addr32 = OFW_PCI_MSI_ADDR_RANGE_32(&msi_addr_ranges); + sc->sc_msi_addr64 = OFW_PCI_MSI_ADDR_RANGE_64(&msi_addr_ranges); + if (OF_getprop(node, "#msi-eqs", &sc->sc_msiq_count, + sizeof(sc->sc_msiq_count)) == -1) + panic("%s: could not determine MSI event queue count", + __func__); + if (OF_getprop(node, "msi-eq-size", &sc->sc_msiq_size, + sizeof(sc->sc_msiq_size)) == -1) + panic("%s: could not determine MSI event queue size", + __func__); + if (OF_getprop(node, "msi-eq-to-devino", &msi_eq_to_devino, + sizeof(msi_eq_to_devino)) == -1 && + OF_getprop(node, "msi-eq-devino", &msi_eq_to_devino, + sizeof(msi_eq_to_devino)) == -1) { + sc->sc_msiq_first = 0; + sc->sc_msiq_ino_first = FO_EQ_FIRST_INO; + } else { + sc->sc_msiq_first = msi_eq_to_devino.eq_first; + sc->sc_msiq_ino_first = msi_eq_to_devino.devino_first; + } + if (sc->sc_msiq_ino_first < FO_EQ_FIRST_INO || + sc->sc_msiq_ino_first + sc->sc_msiq_count - 1 > FO_EQ_LAST_INO) + panic("%s: event queues exceed INO range", __func__); + sc->sc_msi_bitmap = malloc(roundup2(sc->sc_msi_count, NBBY) / NBBY, + M_DEVBUF, M_NOWAIT | M_ZERO); + if (sc->sc_msi_bitmap == NULL) + panic("%s: could not malloc MSI bitmap", __func__); + sc->sc_msi_msiq_table = malloc(sc->sc_msi_count * + sizeof(*sc->sc_msi_msiq_table), M_DEVBUF, M_NOWAIT | M_ZERO); + if (sc->sc_msi_msiq_table == NULL) + panic("%s: could not malloc MSI-MSI event queue table", + __func__); + sc->sc_msiq_bitmap = malloc(roundup2(sc->sc_msiq_count, NBBY) / NBBY, + M_DEVBUF, M_NOWAIT | M_ZERO); + if (sc->sc_msiq_bitmap == NULL) + panic("%s: could not malloc MSI event queue bitmap", __func__); + j = FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * sc->sc_msiq_count; + sc->sc_msiq = contigmalloc(j, M_DEVBUF, M_NOWAIT, 0, ~0UL, + FO_EQ_ALIGNMENT, 0); + if (sc->sc_msiq == NULL) + panic("%s: could not contigmalloc MSI event queue", __func__); + memset(sc->sc_msiq, 0, j); + FIRE_PCI_SET(sc, FO_PCI_EQ_BASE_ADDR, FO_PCI_EQ_BASE_ADDR_BYPASS | + (pmap_kextract((vm_offset_t)sc->sc_msiq) & + FO_PCI_EQ_BASE_ADDR_MASK)); + for (i = 0; i < sc->sc_msi_count; i++) { + j = (i + sc->sc_msi_first) << 3; + FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + j, + FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + j) & + ~FO_PCI_MSI_MAP_V); + } + for (i = 0; i < sc->sc_msiq_count; i++) { + j = i + sc->sc_msiq_ino_first; + if ((ino_bitmap & (1ULL << j)) == 0) { + mtx_lock(&sc->sc_msi_mtx); + setbit(sc->sc_msiq_bitmap, i); + mtx_unlock(&sc->sc_msi_mtx); + } + fmqa = intr_vectors[INTMAP_VEC(sc->sc_ign, j)].iv_icarg; + mtx_init(&fmqa->fmqa_mtx, "msiq_mtx", NULL, MTX_SPIN); + fmqa->fmqa_base = + (struct fo_msiq_record *)((caddr_t)sc->sc_msiq + + (FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * i)); + j = i + sc->sc_msiq_first; + fmqa->fmqa_msiq = j; + j <<= 3; + fmqa->fmqa_head = FO_PCI_EQ_HD_BASE + j; + fmqa->fmqa_tail = FO_PCI_EQ_TL_BASE + j; + FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + j, + FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I | + FO_PCI_EQ_CTRL_CLR_DIS); + FIRE_PCI_WRITE_8(sc, fmqa->fmqa_tail, + (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK); + FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, + (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK); + } + FIRE_PCI_SET(sc, FO_PCI_MSI_32_BIT_ADDR, sc->sc_msi_addr32 & + FO_PCI_MSI_32_BIT_ADDR_MASK); + FIRE_PCI_SET(sc, FO_PCI_MSI_64_BIT_ADDR, sc->sc_msi_addr64 & + FO_PCI_MSI_64_BIT_ADDR_MASK); + + /* + * Establish a handler for interesting PCIe messages and disable + * unintersting ones. + */ + mtx_lock(&sc->sc_msi_mtx); + for (i = 0; i < sc->sc_msiq_count; i++) { + if (isclr(sc->sc_msiq_bitmap, i) != 0) { + j = i; + break; + } + } + if (i == sc->sc_msiq_count) { + mtx_unlock(&sc->sc_msi_mtx); + panic("%s: no spare event queue for PCIe messages", __func__); + } + setbit(sc->sc_msiq_bitmap, j); + mtx_unlock(&sc->sc_msi_mtx); + i = INTMAP_VEC(sc->sc_ign, j + sc->sc_msiq_ino_first); + if (bus_set_resource(dev, SYS_RES_IRQ, 2, i, 1) != 0) + panic("%s: failed to add interrupt for PCIe messages", + __func__); + fire_set_intr(sc, 2, INTINO(i), fire_pcie, intr_vectors[i].iv_icarg); + j += sc->sc_msiq_first; + /* + * "Please note that setting the EQNUM field to a value larger than + * 35 will yield unpredictable results." + */ + if (j > 35) + panic("%s: invalid queue for PCIe messages (%d)", + __func__, j); + FIRE_PCI_SET(sc, FO_PCI_ERR_COR, FO_PCI_ERR_PME_V | + ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK)); + FIRE_PCI_SET(sc, FO_PCI_ERR_NONFATAL, FO_PCI_ERR_PME_V | + ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK)); + FIRE_PCI_SET(sc, FO_PCI_ERR_FATAL, FO_PCI_ERR_PME_V | + ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK)); + FIRE_PCI_SET(sc, FO_PCI_PM_PME, 0); + FIRE_PCI_SET(sc, FO_PCI_PME_TO_ACK, 0); + FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (j << 3), + FO_PCI_EQ_CTRL_SET_EN); + +#define TC_COUNTER_MAX_MASK 0xffffffff + + /* + * Setup JBC/UBC performance counter 0 in bus cycle counting + * mode as timecounter. Unfortunately, at least with Fire all + * JBus-driven performance counters just don't advance in bus + * cycle counting mode. + */ + if (device_get_unit(dev) == 0) { + FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT0, 0); + FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT1, 0); + FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT_SEL, + (FO_XBC_PRF_CNT_NONE << FO_XBC_PRF_CNT_CNT1_SHFT) | + (FO_XBC_PRF_CNT_XB_CLK << FO_XBC_PRF_CNT_CNT0_SHFT)); +#ifdef FIRE_DEBUG + device_printf(dev, "FO_XBC_PRF_CNT0 0x%016llx\n", + (long long unsigned)FIRE_CTRL_READ_8(sc, + FO_XBC_PRF_CNT0)); + device_printf(dev, "FO_XBC_PRF_CNT0 0x%016llx\n", + (long long unsigned)FIRE_CTRL_READ_8(sc, + FO_XBC_PRF_CNT0)); +#endif + tc = malloc(sizeof(*tc), M_DEVBUF, M_NOWAIT | M_ZERO); + if (tc == NULL) + panic("%s: could not malloc timecounter", __func__); + tc->tc_get_timecount = fire_get_timecount; + tc->tc_poll_pps = NULL; + tc->tc_counter_mask = TC_COUNTER_MAX_MASK; + if (OF_getprop(OF_peer(0), "clock-frequency", &prop, + sizeof(prop)) == -1) + panic("%s: could not determine clock frequency", + __func__); + tc->tc_frequency = prop; + tc->tc_name = strdup(device_get_nameunit(dev), M_DEVBUF); + tc->tc_quality = -FIRE_PERF_CNT_QLTY; + tc->tc_priv = sc; + tc_init(tc); + } + + /* + * Set up the IOMMU. Both Fire and Oberon have one per PBM, but + * neither has a streaming buffer. + */ + memcpy(&sc->sc_dma_methods, &iommu_dma_methods, + sizeof(sc->sc_dma_methods)); + sc->sc_is.is_flags = IOMMU_FIRE | IOMMU_PRESERVE_PROM; + if (sc->sc_mode == FIRE_MODE_OBERON) { + sc->sc_is.is_flags |= IOMMU_FLUSH_CACHE; + sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(OBERON_IOMMU_BITS); + } else { + sc->sc_dma_methods.dm_dmamap_sync = fire_dmamap_sync; + sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(FIRE_IOMMU_BITS); + } + sc->sc_is.is_sb[0] = sc->sc_is.is_sb[1] = 0; + /* Punch in our copies. */ + sc->sc_is.is_bustag = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]); + sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_mem_res[FIRE_PCI]); + sc->sc_is.is_iommu = FO_PCI_MMU; + val = FIRE_PCI_READ_8(sc, FO_PCI_MMU + IMR_CTL); + iommu_init(device_get_nameunit(sc->sc_dev), &sc->sc_is, 7, -1, 0); +#ifdef FIRE_DEBUG + device_printf(dev, "FO_PCI_MMU + IMR_CTL 0x%016llx -> 0x%016llx\n", + (long long unsigned)val, (long long unsigned)sc->sc_is.is_cr); +#endif + + /* Initialize memory and I/O rmans. */ + sc->sc_pci_io_rman.rm_type = RMAN_ARRAY; + sc->sc_pci_io_rman.rm_descr = "Fire PCI I/O Ports"; + if (rman_init(&sc->sc_pci_io_rman) != 0 || + rman_manage_region(&sc->sc_pci_io_rman, 0, FO_IO_SIZE) != 0) + panic("%s: failed to set up I/O rman", __func__); + sc->sc_pci_mem_rman.rm_type = RMAN_ARRAY; + sc->sc_pci_mem_rman.rm_descr = "Fire PCI Memory"; + if (rman_init(&sc->sc_pci_mem_rman) != 0 || + rman_manage_region(&sc->sc_pci_mem_rman, 0, FO_MEM_SIZE) != 0) + panic("%s: failed to set up memory rman", __func__); + + nrange = OF_getprop_alloc(node, "ranges", sizeof(*range), + (void **)&range); + /* + * Make sure that the expected ranges are present. The + * OFW_PCI_CS_MEM64 one is not currently used though. + */ + if (nrange != FIRE_NRANGE) + panic("%s: unsupported number of ranges", __func__); + /* + * Find the addresses of the various bus spaces. + * There should not be multiple ones of one kind. + * The physical start addresses of the ranges are the configuration, + * memory and I/O handles. + */ + for (i = 0; i < FIRE_NRANGE; i++) { + j = OFW_PCI_RANGE_CS(&range[i]); + if (sc->sc_pci_bh[j] != 0) + panic("%s: duplicate range for space %d", + __func__, j); + sc->sc_pci_bh[j] = OFW_PCI_RANGE_PHYS(&range[i]); + } + free(range, M_OFWPROP); + + /* Allocate our tags. */ + sc->sc_pci_memt = fire_alloc_bus_tag(sc, PCI_MEMORY_BUS_SPACE); + sc->sc_pci_iot = fire_alloc_bus_tag(sc, PCI_IO_BUS_SPACE); + sc->sc_pci_cfgt = fire_alloc_bus_tag(sc, PCI_CONFIG_BUS_SPACE); + if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, + sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr, + 0xff, 0xffffffff, 0, NULL, NULL, &sc->sc_pci_dmat) != 0) + panic("%s: bus_dma_tag_create failed", __func__); + /* Customize the tag. */ + sc->sc_pci_dmat->dt_cookie = &sc->sc_is; + sc->sc_pci_dmat->dt_mt = &sc->sc_dma_methods; + + /* + * Get the bus range from the firmware. + * NB: Neither Fire nor Oberon support PCI bus reenumeration. + */ + i = OF_getprop(node, "bus-range", (void *)prop_array, + sizeof(prop_array)); + if (i == -1) + panic("%s: could not get bus-range", __func__); + if (i != sizeof(prop_array)) + panic("%s: broken bus-range (%d)", __func__, i); + sc->sc_pci_secbus = prop_array[0]; + sc->sc_pci_subbus = prop_array[1]; + if (bootverbose != 0) + device_printf(dev, "bus range %u to %u; PCI bus %d\n", + sc->sc_pci_secbus, sc->sc_pci_subbus, sc->sc_pci_secbus); + + ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(ofw_pci_intr_t)); + +#define FIRE_SYSCTL_ADD_UINT(name, arg, desc) \ + SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), \ + SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, \ + (name), CTLFLAG_RD, (arg), 0, (desc)) + + FIRE_SYSCTL_ADD_UINT("ilu_err", &sc->sc_stats_ilu_err, + "ILU unknown errors"); + FIRE_SYSCTL_ADD_UINT("jbc_ce_async", &sc->sc_stats_jbc_ce_async, + "JBC correctable errors"); + FIRE_SYSCTL_ADD_UINT("jbc_unsol_int", &sc->sc_stats_jbc_unsol_int, + "JBC unsolicited interrupt ACK/NACK errors"); + FIRE_SYSCTL_ADD_UINT("jbc_unsol_rd", &sc->sc_stats_jbc_unsol_rd, + "JBC unsolicited read response errors"); + FIRE_SYSCTL_ADD_UINT("mmu_err", &sc->sc_stats_mmu_err, "MMU errors"); + FIRE_SYSCTL_ADD_UINT("tlu_ce", &sc->sc_stats_tlu_ce, + "DLU/TLU correctable errors"); + FIRE_SYSCTL_ADD_UINT("tlu_oe_non_fatal", + &sc->sc_stats_tlu_oe_non_fatal, + "DLU/TLU other event non-fatal errors summary"), + FIRE_SYSCTL_ADD_UINT("tlu_oe_rx_err", &sc->sc_stats_tlu_oe_rx_err, + "DLU/TLU receive other event errors"), + FIRE_SYSCTL_ADD_UINT("tlu_oe_tx_err", &sc->sc_stats_tlu_oe_tx_err, + "DLU/TLU transmit other event errors"), + FIRE_SYSCTL_ADD_UINT("ubc_dmardue", &sc->sc_stats_ubc_dmardue, + "UBC DMARDUE erros"); + +#undef FIRE_SYSCTL_ADD_UINT + + device_add_child(dev, "pci", -1); + return (bus_generic_attach(dev)); +} + +static void +fire_set_intr(struct fire_softc *sc, u_int index, u_int ino, + driver_filter_t handler, void *arg) +{ + u_long vec; + int rid; + + rid = index; + sc->sc_irq_res[index] = bus_alloc_resource_any(sc->sc_dev, + SYS_RES_IRQ, &rid, RF_ACTIVE); + if (sc->sc_irq_res[index] == NULL || + INTINO(vec = rman_get_start(sc->sc_irq_res[index])) != ino || + INTIGN(vec) != sc->sc_ign || + intr_vectors[vec].iv_ic != &fire_ic || + bus_setup_intr(sc->sc_dev, sc->sc_irq_res[index], + INTR_TYPE_MISC | INTR_FAST, handler, NULL, arg, + &sc->sc_ihand[index]) != 0) + panic("%s: failed to set up interrupt %d", __func__, index); +} + +static int +fire_intr_register(struct fire_softc *sc, u_int ino) +{ + struct fire_icarg *fica; + bus_addr_t intrclr, intrmap; + int error; + + if (fire_get_intrmap(sc, ino, &intrmap, &intrclr) == 0) + return (ENXIO); + fica = malloc((ino >= FO_EQ_FIRST_INO && ino <= FO_EQ_LAST_INO) ? + sizeof(struct fire_msiqarg) : sizeof(struct fire_icarg), M_DEVBUF, + M_NOWAIT); + if (fica == NULL) + return (ENOMEM); + fica->fica_sc = sc; + fica->fica_map = intrmap; + fica->fica_clr = intrclr; + error = (intr_controller_register(INTMAP_VEC(sc->sc_ign, ino), + &fire_ic, fica)); + if (error != 0) + free(fica, M_DEVBUF); + return (error); +} + +static int +fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr, + bus_addr_t *intrclrptr) +{ + + if (ino > FO_MAX_INO) { + device_printf(sc->sc_dev, "out of range INO %d requested\n", + ino); + return (0); + } + + ino <<= 3; + if (intrmapptr != NULL) + *intrmapptr = FO_PCI_INT_MAP_BASE + ino; + if (intrclrptr != NULL) + *intrclrptr = FO_PCI_INT_CLR_BASE + ino; + return (1); +} + +/* + * Interrupt handlers + */ +static int +fire_dmc_pec(void *arg) +{ + struct fire_softc *sc; + device_t dev; + uint64_t cestat, dmcstat, ilustat, imustat, mcstat, mmustat, mmutfar; + uint64_t mmutfsr, oestat, pecstat, uestat, val; + u_int fatal, oenfatal; + + fatal = 0; + sc = arg; + dev = sc->sc_dev; + mtx_lock_spin(&sc->sc_pcib_mtx); + mcstat = FIRE_PCI_READ_8(sc, FO_PCI_MULTI_CORE_ERR_STAT); + if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_DMC) != 0) { + dmcstat = FIRE_PCI_READ_8(sc, FO_PCI_DMC_CORE_BLOCK_ERR_STAT); + if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU) != 0) { + imustat = FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_STAT); + device_printf(dev, "IMU error %#llx\n", + (unsigned long long)imustat); + if ((imustat & + FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P) != 0) { + fatal = 1; + val = FIRE_PCI_READ_8(sc, + FO_PCI_IMU_SCS_ERR_LOG); + device_printf(dev, "SCS error log %#llx\n", + (unsigned long long)val); + } + if ((imustat & FO_PCI_IMU_ERR_INT_EQ_OVER_P) != 0) { + fatal = 1; + val = FIRE_PCI_READ_8(sc, + FO_PCI_IMU_EQS_ERR_LOG); + device_printf(dev, "EQS error log %#llx\n", + (unsigned long long)val); + } + if ((imustat & (FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P | + FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P | + FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P | + FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P | + FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P | + FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P | + FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P | + FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P)) != 0) { + fatal = 1; + val = FIRE_PCI_READ_8(sc, + FO_PCI_IMU_RDS_ERR_LOG); + device_printf(dev, "RDS error log %#llx\n", + (unsigned long long)val); + } + } + if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU) != 0) { + fatal = 1; + mmustat = FIRE_PCI_READ_8(sc, FO_PCI_MMU_INT_STAT); + mmutfar = FIRE_PCI_READ_8(sc, + FO_PCI_MMU_TRANS_FAULT_ADDR); + mmutfsr = FIRE_PCI_READ_8(sc, + FO_PCI_MMU_TRANS_FAULT_STAT); + if ((mmustat & (FO_PCI_MMU_ERR_INT_TBW_DPE_P | + FO_PCI_MMU_ERR_INT_TBW_ERR_P | + FO_PCI_MMU_ERR_INT_TBW_UDE_P | + FO_PCI_MMU_ERR_INT_TBW_DME_P | + FO_PCI_MMU_ERR_INT_TTC_CAE_P | + FIRE_PCI_MMU_ERR_INT_TTC_DPE_P | + OBERON_PCI_MMU_ERR_INT_TTC_DUE_P | + FO_PCI_MMU_ERR_INT_TRN_ERR_P)) != 0) + fatal = 1; + else { + sc->sc_stats_mmu_err++; + FIRE_PCI_WRITE_8(sc, FO_PCI_MMU_ERR_STAT_CLR, + mmustat); + } + device_printf(dev, + "MMU error %#llx: TFAR %#llx TFSR %#llx\n", + (unsigned long long)mmustat, + (unsigned long long)mmutfar, + (unsigned long long)mmutfsr); + } + } + if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_PEC) != 0) { + pecstat = FIRE_PCI_READ_8(sc, FO_PCI_PEC_CORE_BLOCK_INT_STAT); + if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR) != 0) { + fatal = 1; + uestat = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_UERR_INT_STAT); + device_printf(dev, + "DLU/TLU uncorrectable error %#llx\n", + (unsigned long long)uestat); + if ((uestat & (FO_PCI_TLU_UERR_INT_UR_P | + OBERON_PCI_TLU_UERR_INT_POIS_P | + FO_PCI_TLU_UERR_INT_MFP_P | + FO_PCI_TLU_UERR_INT_ROF_P | + FO_PCI_TLU_UERR_INT_UC_P | + FIRE_PCI_TLU_UERR_INT_PP_P | + OBERON_PCI_TLU_UERR_INT_POIS_P)) != 0) { + val = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_RX_UERR_HDR1_LOG); + device_printf(dev, + "receive header log %#llx\n", + (unsigned long long)val); + val = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_RX_UERR_HDR2_LOG); + device_printf(dev, + "receive header log 2 %#llx\n", + (unsigned long long)val); + } + if ((uestat & FO_PCI_TLU_UERR_INT_CTO_P) != 0) { + val = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_TX_UERR_HDR1_LOG); + device_printf(dev, + "transmit header log %#llx\n", + (unsigned long long)val); + val = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_TX_UERR_HDR2_LOG); + device_printf(dev, + "transmit header log 2 %#llx\n", + (unsigned long long)val); + } + if ((uestat & FO_PCI_TLU_UERR_INT_DLP_P) != 0) { + val = FIRE_PCI_READ_8(sc, + FO_PCI_LPU_LNK_LYR_INT_STAT); + device_printf(dev, + "link layer interrupt and status %#llx\n", + (unsigned long long)val); + } + if ((uestat & FO_PCI_TLU_UERR_INT_TE_P) != 0) { + val = FIRE_PCI_READ_8(sc, + FO_PCI_LPU_PHY_LYR_INT_STAT); + device_printf(dev, + "phy layer interrupt and status %#llx\n", + (unsigned long long)val); + } + } + if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR) != 0) { + sc->sc_stats_tlu_ce++; + cestat = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_CERR_INT_STAT); + device_printf(dev, + "DLU/TLU correctable error %#llx\n", + (unsigned long long)cestat); + val = FIRE_PCI_READ_8(sc, + FO_PCI_LPU_LNK_LYR_INT_STAT); + device_printf(dev, + "link layer interrupt and status %#llx\n", + (unsigned long long)val); + if ((cestat & FO_PCI_TLU_CERR_INT_RE_P) != 0) { + FIRE_PCI_WRITE_8(sc, + FO_PCI_LPU_LNK_LYR_INT_STAT, val); + val = FIRE_PCI_READ_8(sc, + FO_PCI_LPU_PHY_LYR_INT_STAT); + device_printf(dev, + "phy layer interrupt and status %#llx\n", + (unsigned long long)val); + } + FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_CERR_STAT_CLR, + cestat); + } + if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT) != 0) { + oenfatal = 0; + oestat = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_OEVENT_INT_STAT); + device_printf(dev, "DLU/TLU other event %#llx\n", + (unsigned long long)oestat); + if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P | + FO_PCI_TLU_OEVENT_MRC_P | + FO_PCI_TLU_OEVENT_WUC_P | + FO_PCI_TLU_OEVENT_RUC_P | + FO_PCI_TLU_OEVENT_CRS_P)) != 0) { + val = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_RX_OEVENT_HDR1_LOG); + device_printf(dev, + "receive header log %#llx\n", + (unsigned long long)val); + val = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_RX_OEVENT_HDR2_LOG); + device_printf(dev, + "receive header log 2 %#llx\n", + (unsigned long long)val); + if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P | + FO_PCI_TLU_OEVENT_MRC_P | + FO_PCI_TLU_OEVENT_WUC_P | + FO_PCI_TLU_OEVENT_RUC_P)) != 0) + fatal = 1; + else { + sc->sc_stats_tlu_oe_rx_err++; + oenfatal = 1; + } + } + if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P | + FO_PCI_TLU_OEVENT_CTO_P | + FO_PCI_TLU_OEVENT_WUC_P | + FO_PCI_TLU_OEVENT_RUC_P)) != 0) { + val = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_TX_OEVENT_HDR1_LOG); + device_printf(dev, + "transmit header log %#llx\n", + (unsigned long long)val); + val = FIRE_PCI_READ_8(sc, + FO_PCI_TLU_TX_OEVENT_HDR2_LOG); + device_printf(dev, + "transmit header log 2 %#llx\n", + (unsigned long long)val); + if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P | + FO_PCI_TLU_OEVENT_CTO_P | + FO_PCI_TLU_OEVENT_WUC_P | + FO_PCI_TLU_OEVENT_RUC_P)) != 0) + fatal = 1; + else { + sc->sc_stats_tlu_oe_tx_err++; + oenfatal = 1; + } + } + if ((oestat & (FO_PCI_TLU_OEVENT_ERO_P | + FO_PCI_TLU_OEVENT_EMP_P | + FO_PCI_TLU_OEVENT_EPE_P | + FIRE_PCI_TLU_OEVENT_ERP_P | + OBERON_PCI_TLU_OEVENT_ERBU_P | + FIRE_PCI_TLU_OEVENT_EIP_P | + OBERON_PCI_TLU_OEVENT_EIUE_P)) != 0) { + fatal = 1; + val = FIRE_PCI_READ_8(sc, + FO_PCI_LPU_LNK_LYR_INT_STAT); + device_printf(dev, + "link layer interrupt and status %#llx\n", + (unsigned long long)val); + } + if ((oestat & (FO_PCI_TLU_OEVENT_IIP_P | + FO_PCI_TLU_OEVENT_EDP_P | + FIRE_PCI_TLU_OEVENT_EHP_P | + OBERON_PCI_TLU_OEVENT_TLUEITMO_S | + FO_PCI_TLU_OEVENT_ERU_P)) != 0) + fatal = 1; + if ((oestat & (FO_PCI_TLU_OEVENT_NFP_P | + FO_PCI_TLU_OEVENT_LWC_P | + FO_PCI_TLU_OEVENT_LIN_P | + FO_PCI_TLU_OEVENT_LRS_P | + FO_PCI_TLU_OEVENT_LDN_P | + FO_PCI_TLU_OEVENT_LUP_P)) != 0) + oenfatal = 1; + if (oenfatal != 0) { + sc->sc_stats_tlu_oe_non_fatal++; + FIRE_PCI_WRITE_8(sc, + FO_PCI_TLU_OEVENT_STAT_CLR, oestat); + if ((oestat & FO_PCI_TLU_OEVENT_LIN_P) != 0) + FIRE_PCI_WRITE_8(sc, + FO_PCI_LPU_LNK_LYR_INT_STAT, + FIRE_PCI_READ_8(sc, + FO_PCI_LPU_LNK_LYR_INT_STAT)); + } + } + if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU) != 0) { + ilustat = FIRE_PCI_READ_8(sc, FO_PCI_ILU_INT_STAT); + device_printf(dev, "ILU error %#llx\n", + (unsigned long long)ilustat); + if ((ilustat & (FIRE_PCI_ILU_ERR_INT_IHB_PE_P | + FIRE_PCI_ILU_ERR_INT_IHB_PE_P)) != 0) + fatal = 1; + else { + sc->sc_stats_ilu_err++; + FIRE_PCI_WRITE_8(sc, FO_PCI_ILU_INT_STAT, + ilustat); + } + } + } + mtx_unlock_spin(&sc->sc_pcib_mtx); + if (fatal != 0) + panic("%s: fatal DMC/PEC error", + device_get_nameunit(sc->sc_dev)); + return (FILTER_HANDLED); +} + +static int +fire_xcb(void *arg) +{ + struct fire_softc *sc; + device_t dev; + uint64_t errstat, intstat, val; + u_int fatal; + + fatal = 0; + sc = arg; + dev = sc->sc_dev; + mtx_lock_spin(&sc->sc_pcib_mtx); + if (sc->sc_mode == FIRE_MODE_OBERON) { + intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT); + device_printf(dev, "UBC error: interrupt status %#llx\n", + (unsigned long long)intstat); + if ((intstat & ~(OBERON_UBC_ERR_INT_DMARDUEB_P | + OBERON_UBC_ERR_INT_DMARDUEA_P)) != 0) + fatal = 1; + else + sc->sc_stats_ubc_dmardue++; + if (fatal != 0) { + mtx_unlock_spin(&sc->sc_pcib_mtx); + panic("%s: fatal UBC core block error", + device_get_nameunit(sc->sc_dev)); + } else { + FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL); + mtx_unlock_spin(&sc->sc_pcib_mtx); + } + } else { + errstat = FIRE_CTRL_READ_8(sc, FIRE_JBC_CORE_BLOCK_ERR_STAT); + if ((errstat & (FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE | + FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT | + FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT)) != 0) { + intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT); + device_printf(dev, "JBC interrupt status %#llx\n", + (unsigned long long)intstat); + if ((intstat & FIRE_JBC_ERR_INT_EBUS_TO_P) != 0) { + val = FIRE_CTRL_READ_8(sc, + FIRE_JBC_CSR_ERR_LOG); + device_printf(dev, "CSR error log %#llx\n", + (unsigned long long)val); + } + if ((intstat & (FIRE_JBC_ERR_INT_UNSOL_RD_P | + FIRE_JBC_ERR_INT_UNSOL_INT_P)) != 0) { + if ((intstat & + FIRE_JBC_ERR_INT_UNSOL_RD_P) != 0) + sc->sc_stats_jbc_unsol_rd++; + if ((intstat & + FIRE_JBC_ERR_INT_UNSOL_INT_P) != 0) + sc->sc_stats_jbc_unsol_int++; + val = FIRE_CTRL_READ_8(sc, + FIRE_DMCINT_IDC_ERR_LOG); + device_printf(dev, + "DMCINT IDC error log %#llx\n", + (unsigned long long)val); + } + if ((intstat & (FIRE_JBC_ERR_INT_MB_PER_P | + FIRE_JBC_ERR_INT_MB_PEW_P)) != 0) { + fatal = 1; + val = FIRE_CTRL_READ_8(sc, + FIRE_MERGE_TRANS_ERR_LOG); + device_printf(dev, + "merge transaction error log %#llx\n", + (unsigned long long)val); + } + if ((intstat & FIRE_JBC_ERR_INT_IJP_P) != 0) { + fatal = 1; + val = FIRE_CTRL_READ_8(sc, + FIRE_JBCINT_OTRANS_ERR_LOG); + device_printf(dev, + "JBCINT out transaction error log " + "%#llx\n", (unsigned long long)val); + val = FIRE_CTRL_READ_8(sc, + FIRE_JBCINT_OTRANS_ERR_LOG2); + device_printf(dev, + "JBCINT out transaction error log 2 " + "%#llx\n", (unsigned long long)val); + } + if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P | + FIRE_JBC_ERR_INT_CE_ASYN_P | + FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P | + FIRE_JBC_ERR_INT_JUE_P | + FIRE_JBC_ERR_INT_ICISE_P | + FIRE_JBC_ERR_INT_WR_DPE_P | + FIRE_JBC_ERR_INT_RD_DPE_P | + FIRE_JBC_ERR_INT_ILL_BMW_P | + FIRE_JBC_ERR_INT_ILL_BMR_P | + FIRE_JBC_ERR_INT_BJC_P)) != 0) { + if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P | + FIRE_JBC_ERR_INT_JTE_P | + FIRE_JBC_ERR_INT_JBE_P | + FIRE_JBC_ERR_INT_JUE_P | + FIRE_JBC_ERR_INT_ICISE_P | + FIRE_JBC_ERR_INT_WR_DPE_P | + FIRE_JBC_ERR_INT_RD_DPE_P | + FIRE_JBC_ERR_INT_ILL_BMW_P | + FIRE_JBC_ERR_INT_ILL_BMR_P | + FIRE_JBC_ERR_INT_BJC_P)) != 0) + fatal = 1; + else + sc->sc_stats_jbc_ce_async++; + val = FIRE_CTRL_READ_8(sc, + FIRE_JBCINT_ITRANS_ERR_LOG); + device_printf(dev, + "JBCINT in transaction error log %#llx\n", + (unsigned long long)val); + val = FIRE_CTRL_READ_8(sc, + FIRE_JBCINT_ITRANS_ERR_LOG2); + device_printf(dev, + "JBCINT in transaction error log 2 " + "%#llx\n", (unsigned long long)val); + } + if ((intstat & (FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P | + FIRE_JBC_ERR_INT_ILL_ACC_RD_P | + FIRE_JBC_ERR_INT_PIO_UNMAP_P | + FIRE_JBC_ERR_INT_PIO_DPE_P | + FIRE_JBC_ERR_INT_PIO_CPE_P | + FIRE_JBC_ERR_INT_ILL_ACC_P)) != 0) { + fatal = 1; + val = FIRE_CTRL_READ_8(sc, + FIRE_JBC_CSR_ERR_LOG); + device_printf(dev, + "DMCINT ODCD error log %#llx\n", + (unsigned long long)val); + } + if ((intstat & (FIRE_JBC_ERR_INT_MB_PEA_P | + FIRE_JBC_ERR_INT_CPE_P | FIRE_JBC_ERR_INT_APE_P | + FIRE_JBC_ERR_INT_PIO_CPE_P | + FIRE_JBC_ERR_INT_JTCEEW_P | + FIRE_JBC_ERR_INT_JTCEEI_P | + FIRE_JBC_ERR_INT_JTCEER_P)) != 0) { + fatal = 1; + val = FIRE_CTRL_READ_8(sc, + FIRE_FATAL_ERR_LOG); + device_printf(dev, "fatal error log %#llx\n", + (unsigned long long)val); + val = FIRE_CTRL_READ_8(sc, + FIRE_FATAL_ERR_LOG2); + device_printf(dev, "fatal error log 2 " + "%#llx\n", (unsigned long long)val); + } + if (fatal != 0) { + mtx_unlock_spin(&sc->sc_pcib_mtx); + panic("%s: fatal JBC core block error", + device_get_nameunit(sc->sc_dev)); + } else { + FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL); + mtx_unlock_spin(&sc->sc_pcib_mtx); + } + } else { + mtx_unlock_spin(&sc->sc_pcib_mtx); + panic("%s: unknown JCB core block error status %#llx", + device_get_nameunit(sc->sc_dev), + (unsigned long long)errstat); + } + } + return (FILTER_HANDLED); +} + +static int +fire_pcie(void *arg) +{ + struct fire_msiqarg *fmqa; + struct fire_softc *sc; + struct fo_msiq_record *qrec; + device_t dev; + uint64_t word0; + u_int head, msg, msiq; + + fmqa = arg; + sc = fmqa->fmqa_fica.fica_sc; + dev = sc->sc_dev; + msiq = fmqa->fmqa_msiq; + mtx_lock_spin(&fmqa->fmqa_mtx); + head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >> + FO_PCI_EQ_HD_SHFT; + qrec = &fmqa->fmqa_base[head]; + word0 = qrec->fomqr_word0; + for (;;) { + KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSG) != 0, + ("%s: received non-PCIe message in event queue %d " + "(word0 %#llx)", device_get_nameunit(dev), msiq, + (unsigned long long)word0)); + msg = (word0 & FO_MQR_WORD0_DATA0_MASK) >> + FO_MQR_WORD0_DATA0_SHFT; + +#define PCIE_MSG_CODE_ERR_COR 0x30 +#define PCIE_MSG_CODE_ERR_NONFATAL 0x31 +#define PCIE_MSG_CODE_ERR_FATAL 0x33 + + if (msg == PCIE_MSG_CODE_ERR_COR) + device_printf(dev, "correctable PCIe error\n"); + else if (msg == PCIE_MSG_CODE_ERR_NONFATAL || + msg == PCIE_MSG_CODE_ERR_FATAL) + panic("%s: %sfatal PCIe error", + device_get_nameunit(dev), + msg == PCIE_MSG_CODE_ERR_NONFATAL ? "non-" : ""); + else + panic("%s: received unknown PCIe message %#x", + device_get_nameunit(dev), msg); + qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK; + head = (head + 1) % sc->sc_msiq_size; + qrec = &fmqa->fmqa_base[head]; + word0 = qrec->fomqr_word0; + if (__predict_true((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0)) + break; + } + FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) << + FO_PCI_EQ_HD_SHFT); + if ((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) & + FO_PCI_EQ_TL_OVERR) != 0) { + device_printf(dev, "event queue %d overflow\n", msiq); + msiq <<= 3; + FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq, + FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) | + FO_PCI_EQ_CTRL_CLR_COVERR); + } + mtx_unlock_spin(&fmqa->fmqa_mtx); + return (FILTER_HANDLED); +} + +static int +fire_maxslots(device_t dev) +{ + + return (1); +} + +static uint32_t +fire_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, + int width) +{ + struct fire_softc *sc; + bus_space_handle_t bh; + u_long offset = 0; + uint32_t r, wrd; + int i; + uint16_t shrt; + uint8_t byte; + + sc = device_get_softc(dev); + if (bus < sc->sc_pci_secbus || bus > sc->sc_pci_subbus || + slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX) + return (-1); + + offset = FO_CONF_OFF(bus, slot, func, reg); + bh = sc->sc_pci_bh[OFW_PCI_CS_CONFIG]; + switch (width) { + case 1: + i = bus_space_peek_1(sc->sc_pci_cfgt, bh, offset, &byte); + r = byte; + break; + case 2: + i = bus_space_peek_2(sc->sc_pci_cfgt, bh, offset, &shrt); + r = shrt; + break; + case 4: + i = bus_space_peek_4(sc->sc_pci_cfgt, bh, offset, &wrd); + r = wrd; + break; + default: + panic("%s: bad width", __func__); + /* NOTREACHED */ + } + + if (i) { +#ifdef FIRE_DEBUG + printf("%s: read data error reading: %d.%d.%d: 0x%x\n", + __func__, bus, slot, func, reg); +#endif + r = -1; + } + return (r); +} + +static void +fire_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, + uint32_t val, int width) +{ + struct fire_softc *sc; + bus_space_handle_t bh; + u_long offset = 0; + + sc = device_get_softc(dev); + if (bus < sc->sc_pci_secbus || bus > sc->sc_pci_subbus || + slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX) + return; + + offset = FO_CONF_OFF(bus, slot, func, reg); + bh = sc->sc_pci_bh[OFW_PCI_CS_CONFIG]; + switch (width) { + case 1: + bus_space_write_1(sc->sc_pci_cfgt, bh, offset, val); + break; + case 2: + bus_space_write_2(sc->sc_pci_cfgt, bh, offset, val); + break; + case 4: + bus_space_write_4(sc->sc_pci_cfgt, bh, offset, val); + break; + default: + panic("%s: bad width", __func__); + /* NOTREACHED */ + } +} + +static int +fire_route_interrupt(device_t bridge, device_t dev, int pin) +{ + struct fire_softc *sc; + struct ofw_pci_register reg; + ofw_pci_intr_t pintr, mintr; + uint8_t maskbuf[sizeof(reg) + sizeof(pintr)]; + + sc = device_get_softc(bridge); + pintr = pin; + if (ofw_bus_lookup_imap(ofw_bus_get_node(dev), &sc->sc_pci_iinfo, + ®, sizeof(reg), &pintr, sizeof(pintr), &mintr, sizeof(mintr), + maskbuf) != 0) + return (mintr); + + device_printf(bridge, "could not route pin %d for device %d.%d\n", + pin, pci_get_slot(dev), pci_get_function(dev)); + return (PCI_INVALID_IRQ); +} + +static int +fire_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) +{ + struct fire_softc *sc; + + sc = device_get_softc(dev); + switch (which) { + case PCIB_IVAR_DOMAIN: + *result = device_get_unit(dev); + return (0); + case PCIB_IVAR_BUS: + *result = sc->sc_pci_secbus; + return (0); + } + return (ENOENT); +} + +#define VIS_BLOCKSIZE 64 + +static void +fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map, + bus_dmasync_op_t op) +{ + static u_char buf[VIS_BLOCKSIZE] __aligned(VIS_BLOCKSIZE); + register_t reg, s; + + if ((map->dm_flags & DMF_LOADED) == 0 || + (op & ~BUS_DMASYNC_POSTWRITE) == 0) + return; + + s = intr_disable(); + reg = rd(fprs); + wr(fprs, reg | FPRS_FEF, 0); + __asm __volatile("stda %%f0, [%0] %1" + : : "r" (buf), "n" (ASI_BLK_COMMIT_S)); + membar(Sync); + wr(fprs, reg, 0); + intr_restore(s); +} + +static void +fire_intr_enable(void *arg) +{ + struct intr_vector *iv; + struct fire_icarg *fica; + struct fire_softc *sc; + struct pcpu *pc; + uint64_t mr; + u_int ctrl, i; + + iv = arg; + fica = iv->iv_icarg; + sc = fica->fica_sc; + mr = FO_PCI_IMAP_V; + if (sc->sc_mode == FIRE_MODE_OBERON) + mr |= (iv->iv_mid << OBERON_PCI_IMAP_T_DESTID_SHFT) & + OBERON_PCI_IMAP_T_DESTID_MASK; + else + mr |= (iv->iv_mid << FIRE_PCI_IMAP_T_JPID_SHFT) & + FIRE_PCI_IMAP_T_JPID_MASK; + /* + * Given that all mondos for the same target are required to use the + * same interrupt controller we just use the CPU ID for indexing the + * latter. + */ + ctrl = 0; + for (i = 0; i < mp_ncpus; ++i) { + pc = pcpu_find(i); + if (pc == NULL || iv->iv_mid != pc->pc_mid) + continue; + ctrl = pc->pc_cpuid % 4; + break; + } + mr |= (1ULL << ctrl) << FO_PCI_IMAP_INT_CTRL_NUM_SHFT & + FO_PCI_IMAP_INT_CTRL_NUM_MASK; + FIRE_PCI_WRITE_8(sc, fica->fica_map, mr); +} + +static void +fire_intr_disable(void *arg) +{ + struct intr_vector *iv; + struct fire_icarg *fica; + struct fire_softc *sc; + + iv = arg; + fica = iv->iv_icarg; + sc = fica->fica_sc; + FIRE_PCI_WRITE_8(sc, fica->fica_map, + FIRE_PCI_READ_8(sc, fica->fica_map) & ~FO_PCI_IMAP_V); +} + +static void +fire_intr_assign(void *arg) +{ + struct intr_vector *iv; + struct fire_icarg *fica; + struct fire_softc *sc; + uint64_t mr; + + iv = arg; + fica = iv->iv_icarg; + sc = fica->fica_sc; + mr = FIRE_PCI_READ_8(sc, fica->fica_map); + if ((mr & FO_PCI_IMAP_V) != 0) { + FIRE_PCI_WRITE_8(sc, fica->fica_map, mr & ~FO_PCI_IMAP_V); + FIRE_PCI_BARRIER(sc, fica->fica_map, 8, + BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); + } + while (FIRE_PCI_READ_8(sc, fica->fica_clr) != INTCLR_IDLE) + ; + if ((mr & FO_PCI_IMAP_V) != 0) + fire_intr_enable(arg); +} + +static void +fire_intr_clear(void *arg) +{ + struct intr_vector *iv; + struct fire_icarg *fica; + + iv = arg; + fica = iv->iv_icarg; + FIRE_PCI_WRITE_8(fica->fica_sc, fica->fica_clr, INTCLR_IDLE); +} + +/* + * Given that the event queue implementation matches our current MD and MI + * interrupt frameworks like square pegs fit into round holes we are generous + * and use one event queue per MSI for now, which limits us to 35 MSIs/MSI-Xs + * per Host-PCIe-bridge (we use one event queue for the PCIe error messages). + * This seems tolerable as long as most devices just use one MSI/MSI-X anyway. + * Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us + * to decouple the 1:1 mapping at the cost of no longer being able to bind + * MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to + * quiesce a device while we move its MSIs/MSI-Xs to another event queue. + */ + +static int +fire_alloc_msi(device_t dev, device_t child, int count, int maxcount, + int *irqs) +{ + struct fire_softc *sc; + u_int i, j, msiqrun; + + if (powerof2(count) == 0 || count > 32) + return (EINVAL); + + sc = device_get_softc(dev); + mtx_lock(&sc->sc_msi_mtx); + msiqrun = 0; + for (i = 0; i < sc->sc_msiq_count; i++) { + for (j = i; j < i + count; j++) { + if (isclr(sc->sc_msiq_bitmap, j) == 0) + break; + } + if (j == i + count) { + msiqrun = i; + break; + } + } + if (i == sc->sc_msiq_count) { + mtx_unlock(&sc->sc_msi_mtx); + return (ENXIO); + } + /* + * It's unclear whether we need to actually align the MSIs in the + * mapping table based on the maxcount or just the count. We use + * maxcount to be on the safe side. + */ + for (i = 0; i + maxcount < sc->sc_msi_count; i += maxcount) { + for (j = i; j < i + maxcount; j++) + if (isclr(sc->sc_msi_bitmap, j) == 0) + break; + if (j == i + maxcount) { + for (j = 0; j < count; j++) { + setbit(sc->sc_msiq_bitmap, msiqrun + j); + setbit(sc->sc_msi_bitmap, i + j); + sc->sc_msi_msiq_table[i + j] = msiqrun + j; + irqs[j] = sc->sc_msi_first + i + j; + } + mtx_unlock(&sc->sc_msi_mtx); + return (0); + } + } + mtx_unlock(&sc->sc_msi_mtx); + return (ENXIO); +} + +static int +fire_release_msi(device_t dev, device_t child, int count, int *irqs) +{ + struct fire_softc *sc; + u_int i; + + sc = device_get_softc(dev); + mtx_lock(&sc->sc_msi_mtx); + for (i = 0; i < count; i++) { + clrbit(sc->sc_msiq_bitmap, + sc->sc_msi_msiq_table[irqs[i] - sc->sc_msi_first]); + clrbit(sc->sc_msi_bitmap, irqs[i] - sc->sc_msi_first); + } + mtx_unlock(&sc->sc_msi_mtx); + return (0); +} + +static int +fire_alloc_msix(device_t dev, device_t child, int *irq) +{ + struct fire_softc *sc; + u_int i, msiq; + + sc = device_get_softc(dev); + if ((sc->sc_flags & FIRE_MSIX) == 0) + return (ENXIO); + mtx_lock(&sc->sc_msi_mtx); + msiq = 0; + for (i = 0; i < sc->sc_msiq_count; i++) { + if (isclr(sc->sc_msiq_bitmap, i) != 0) { + msiq = i; + break; + } + } + if (i == sc->sc_msiq_count) { + mtx_unlock(&sc->sc_msi_mtx); + return (ENXIO); + } + for (i = sc->sc_msi_count - 1; i >= 0; i--) { + if (isclr(sc->sc_msi_bitmap, i) != 0) { + setbit(sc->sc_msiq_bitmap, msiq); + setbit(sc->sc_msi_bitmap, i); + sc->sc_msi_msiq_table[i] = msiq; + *irq = sc->sc_msi_first + i; + mtx_unlock(&sc->sc_msi_mtx); + return (0); + } + } + mtx_unlock(&sc->sc_msi_mtx); + return (ENXIO); +} + +static int +fire_release_msix(device_t dev, device_t child, int irq) +{ + struct fire_softc *sc; + + sc = device_get_softc(dev); + if ((sc->sc_flags & FIRE_MSIX) == 0) + return (ENXIO); + mtx_lock(&sc->sc_msi_mtx); + clrbit(sc->sc_msiq_bitmap, + sc->sc_msi_msiq_table[irq - sc->sc_msi_first]); + clrbit(sc->sc_msi_bitmap, irq - sc->sc_msi_first); + mtx_unlock(&sc->sc_msi_mtx); + return (0); +} + +static int +fire_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, + uint32_t *data) +{ + struct fire_softc *sc; + struct pci_devinfo *dinfo; + + sc = device_get_softc(dev); + dinfo = device_get_ivars(child); + if (dinfo->cfg.msi.msi_alloc > 0) { + if ((irq & ~sc->sc_msi_data_mask) != 0) { + device_printf(dev, "invalid MSI 0x%x\n", irq); + return (EINVAL); + } + } else { + if ((sc->sc_flags & FIRE_MSIX) == 0) + return (ENXIO); + if (fls(irq) > sc->sc_msix_data_width) { + device_printf(dev, "invalid MSI-X 0x%x\n", irq); + return (EINVAL); + } + } + if (dinfo->cfg.msi.msi_alloc > 0 && + (dinfo->cfg.msi.msi_ctrl & PCIM_MSICTRL_64BIT) == 0) + *addr = sc->sc_msi_addr32; + else + *addr = sc->sc_msi_addr64; + *data = irq; + return (0); +} + +static void +fire_msiq_handler(void *cookie) +{ + struct intr_vector *iv; + struct fire_msiqarg *fmqa; + struct fire_softc *sc; + struct fo_msiq_record *qrec; + device_t dev; + uint64_t word0; + u_int head, msi, msiq; + + iv = cookie; + fmqa = iv->iv_icarg; + sc = fmqa->fmqa_fica.fica_sc; + dev = sc->sc_dev; + msiq = fmqa->fmqa_msiq; + /* + * Note that since fire_intr_clear() will clear the event queue + * interrupt after the filter/handler associated with the MSI [sic] + * has been executed we have to protect the access to the event queue + * as otherwise nested event queue interrupts cause corruption of the + * event queue on MP machines. Obviously especially when abandoning + * the 1:1 mapping it would be better to not clear the event queue + * interrupt after each filter/handler invocation but only once when + * the outstanding MSIs have been processed but unfortunately that + * doesn't work well and leads to interrupt storms with controllers/ + * drivers which don't mask interrupts while the filter/handler is + * executed. Maybe delaying clearing the MSI until after the filter/ + * handler has been executed could be used to work around this but + * that's not the intended usage and might in turn cause lost MSIs. + */ + mtx_lock_spin(&fmqa->fmqa_mtx); + head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >> + FO_PCI_EQ_HD_SHFT; + qrec = &fmqa->fmqa_base[head]; + word0 = qrec->fomqr_word0; + for (;;) { + KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSI64) != 0 || + (word0 & FO_MQR_WORD0_FMT_TYPE_MSI32) != 0, + ("%s: received non-MSI/MSI-X message in event queue %d " + "(word0 %#llx)", device_get_nameunit(dev), msiq, + (unsigned long long)word0)); + if (__predict_false((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0)) + break; + msi = (word0 & FO_MQR_WORD0_DATA0_MASK) >> + FO_MQR_WORD0_DATA0_SHFT; + /* + * Sanity check the MSI/MSI-X as long as we use a 1:1 mapping. + */ + KASSERT(msi == fmqa->fmqa_msi, + ("%s: received non-matching MSI/MSI-X in event queue %d " + "(%d versus %d)", device_get_nameunit(dev), msiq, msi, + fmqa->fmqa_msi)); + FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + (msi << 3), + FO_PCI_MSI_CLR_EQWR_N); + if (__predict_false(intr_event_handle(iv->iv_event, + NULL) != 0)) + printf("stray MSI/MSI-X in event queue %d\n", msiq); + qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK; + head = (head + 1) % sc->sc_msiq_size; + qrec = &fmqa->fmqa_base[head]; + word0 = qrec->fomqr_word0; + } + FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) << + FO_PCI_EQ_HD_SHFT); + if (__predict_false((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) & + FO_PCI_EQ_TL_OVERR) != 0)) { + device_printf(dev, "event queue %d overflow\n", msiq); + msiq <<= 3; + FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq, + FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) | + FO_PCI_EQ_CTRL_CLR_COVERR); + } + mtx_unlock_spin(&fmqa->fmqa_mtx); +} + +static int +fire_setup_intr(device_t dev, device_t child, struct resource *ires, + int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, + void **cookiep) +{ + struct fire_softc *sc; + u_long vec; + int error; + u_int msi, msiq; + + sc = device_get_softc(dev); + /* + * XXX this assumes that a device only has one INTx, while in fact + * Cassini+ and Saturn can use all four the firmware has assigned + * to them, but so does pci(4). + */ + if (rman_get_rid(ires) != 0) { + msi = rman_get_start(ires); + msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first]; + vec = INTMAP_VEC(sc->sc_ign, sc->sc_msiq_ino_first + msiq); + msiq += sc->sc_msiq_first; + if (intr_vectors[vec].iv_ic != &fire_ic) { + device_printf(dev, + "invalid interrupt controller for vector 0x%lx\n", + vec); + return (EINVAL); + } + /* + * The MD interrupt code needs the vector rather than the MSI. + */ + rman_set_start(ires, vec); + rman_set_end(ires, vec); + error = bus_generic_setup_intr(dev, child, ires, flags, filt, + intr, arg, cookiep); + rman_set_start(ires, msi); + rman_set_end(ires, msi); + if (error == 0) { + /* + * XXX inject our event queue handler. + */ + intr_vectors[vec].iv_func = fire_msiq_handler; + /* + * Record the MSI/MSI-X as long as we we use a 1:1 + * mapping. + */ + ((struct fire_msiqarg *)intr_vectors[vec].iv_icarg)-> + fmqa_msi = msi; + FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + + (msiq << 3), FO_PCI_EQ_CTRL_SET_EN); + msi <<= 3; + FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi, + (FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) & + ~FO_PCI_MSI_MAP_EQNUM_MASK) | + ((msiq << FO_PCI_MSI_MAP_EQNUM_SHFT) & + FO_PCI_MSI_MAP_EQNUM_MASK)); + FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + msi, + FO_PCI_MSI_CLR_EQWR_N); + FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi, + FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) | + FO_PCI_MSI_MAP_V); + } + return (error); + } + + /* + * Make sure the vector is fully specified and we registered + * our interrupt controller for it. + */ + vec = rman_get_start(ires); + if (INTIGN(vec) != sc->sc_ign) { + device_printf(dev, "invalid interrupt vector 0x%lx\n", vec); + return (EINVAL); + } + if (intr_vectors[vec].iv_ic != &fire_ic) { + device_printf(dev, + "invalid interrupt controller for vector 0x%lx\n", vec); + return (EINVAL); + } + return (bus_generic_setup_intr(dev, child, ires, flags, filt, intr, + arg, cookiep)); +} + +static int +fire_teardown_intr(device_t dev, device_t child, struct resource *ires, + void *cookie) +{ + struct fire_softc *sc; + u_long vec; + int error; + u_int msi, msiq; + + sc = device_get_softc(dev); + if (rman_get_rid(ires) != 0) { + msi = rman_get_start(ires); + msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first]; + vec = INTMAP_VEC(sc->sc_ign, msiq + sc->sc_msiq_ino_first); + msiq += sc->sc_msiq_first; + msi <<= 3; + FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi, + FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) & + ~FO_PCI_MSI_MAP_V); + msiq <<= 3; + FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq, + FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I | + FO_PCI_EQ_CTRL_CLR_DIS); + FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_TL_BASE + msiq, + (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK); + FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_HD_BASE + msiq, + (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK); + /* + * The MD interrupt code needs the vector rather than the MSI. + */ + rman_set_start(ires, vec); + rman_set_end(ires, vec); + error = bus_generic_teardown_intr(dev, child, ires, cookie); + rman_set_start(ires, msi); + rman_set_end(ires, msi >> 3); + return (error); + } + return (bus_generic_teardown_intr(dev, child, ires, cookie)); +} + +static struct resource * +fire_alloc_resource(device_t bus, device_t child, int type, int *rid, + u_long start, u_long end, u_long count, u_int flags) +{ + struct fire_softc *sc; + struct resource *rv; + struct rman *rm; + bus_space_tag_t bt; + bus_space_handle_t bh; + int needactivate = flags & RF_ACTIVE; + + flags &= ~RF_ACTIVE; + + sc = device_get_softc(bus); + if (type == SYS_RES_IRQ) { + /* + * XXX: Don't accept blank ranges for now, only single + * interrupts. The other case should not happen with + * the MI PCI code... + * XXX: This may return a resource that is out of the + * range that was specified. Is this correct...? + */ + if (start != end) + panic("%s: XXX: interrupt range", __func__); + if (*rid == 0) + start = end = INTMAP_VEC(sc->sc_ign, end); + return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, + type, rid, start, end, count, flags)); + } + switch (type) { + case SYS_RES_MEMORY: + rm = &sc->sc_pci_mem_rman; + bt = sc->sc_pci_memt; + bh = sc->sc_pci_bh[OFW_PCI_CS_MEM32]; + break; + case SYS_RES_IOPORT: + rm = &sc->sc_pci_io_rman; + bt = sc->sc_pci_iot; + bh = sc->sc_pci_bh[OFW_PCI_CS_IO]; + break; + default: + return (NULL); + /* NOTREACHED */ + } + + rv = rman_reserve_resource(rm, start, end, count, flags, child); + if (rv == NULL) + return (NULL); + rman_set_rid(rv, *rid); + bh += rman_get_start(rv); + rman_set_bustag(rv, bt); + rman_set_bushandle(rv, bh); + + if (needactivate) { + if (bus_activate_resource(child, type, *rid, rv)) { + rman_release_resource(rv); + return (NULL); + } + } + return (rv); +} + +static int +fire_activate_resource(device_t bus, device_t child, int type, int rid, + struct resource *r) +{ + void *p; + int error; + + if (type == SYS_RES_IRQ) + return (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, + type, rid, r)); + if (type == SYS_RES_MEMORY) { + /* + * Need to memory-map the device space, as some drivers + * depend on the virtual address being set and usable. + */ + error = sparc64_bus_mem_map(rman_get_bustag(r), + rman_get_bushandle(r), rman_get_size(r), 0, 0, &p); + if (error != 0) + return (error); + rman_set_virtual(r, p); + } + return (rman_activate_resource(r)); +} + +static int +fire_deactivate_resource(device_t bus, device_t child, int type, int rid, + struct resource *r) +{ + + if (type == SYS_RES_IRQ) + return (BUS_DEACTIVATE_RESOURCE(device_get_parent(bus), child, + type, rid, r)); + if (type == SYS_RES_MEMORY) { + sparc64_bus_mem_unmap(rman_get_virtual(r), rman_get_size(r)); + rman_set_virtual(r, NULL); + } + return (rman_deactivate_resource(r)); +} + +static int +fire_release_resource(device_t bus, device_t child, int type, int rid, + struct resource *r) +{ + int error; + + if (type == SYS_RES_IRQ) + return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child, + type, rid, r)); + if (rman_get_flags(r) & RF_ACTIVE) { + error = bus_deactivate_resource(child, type, rid, r); + if (error) + return (error); + } + return (rman_release_resource(r)); +} + +static bus_dma_tag_t +fire_get_dma_tag(device_t bus, device_t child) +{ + struct fire_softc *sc; + + sc = device_get_softc(bus); + return (sc->sc_pci_dmat); +} + +static phandle_t +fire_get_node(device_t bus, device_t dev) +{ + struct fire_softc *sc; + + sc = device_get_softc(bus); + /* We only have one child, the PCI bus, which needs our own node. */ + return (sc->sc_node); +} + +static bus_space_tag_t +fire_alloc_bus_tag(struct fire_softc *sc, int type) +{ + bus_space_tag_t bt; + + bt = (bus_space_tag_t)malloc(sizeof(struct bus_space_tag), M_DEVBUF, + M_NOWAIT | M_ZERO); + if (bt == NULL) + panic("%s: out of memory", __func__); + + bt->bst_cookie = sc; + bt->bst_parent = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]); + bt->bst_type = type; + return (bt); +} + +static u_int +fire_get_timecount(struct timecounter *tc) +{ + struct fire_softc *sc; + + sc = tc->tc_priv; + return (FIRE_CTRL_READ_8(sc, FO_XBC_PRF_CNT0) & TC_COUNTER_MAX_MASK); +} diff --git a/sys/sparc64/pci/firereg.h b/sys/sparc64/pci/firereg.h new file mode 100644 index 000000000000..1471b09de99d --- /dev/null +++ b/sys/sparc64/pci/firereg.h @@ -0,0 +1,1004 @@ +/*- + * Copyright (c) 2009 Marius Strobl + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SPARC64_PCI_FIREREG_H_ +#define _SPARC64_PCI_FIREREG_H_ + +#define FIRE_NINTR 3 /* 2 OFW + 1 MSIq */ +#define FIRE_NRANGE 4 +#define FIRE_NREG 2 + +#define FIRE_PCI 0 +#define FIRE_CTRL 1 + +/* PCI configuration and status registers */ +#define FO_PCI_INT_MAP_BASE 0x01000 +#define FO_PCI_INT_CLR_BASE 0x01400 +#define FO_PCI_EQ_BASE_ADDR 0x10000 +#define FO_PCI_EQ_CTRL_SET_BASE 0x11000 +#define FO_PCI_EQ_CTRL_CLR_BASE 0x11200 +#define FO_PCI_EQ_TL_BASE 0x11600 +#define FO_PCI_EQ_HD_BASE 0x11800 +#define FO_PCI_MSI_MAP_BASE 0x20000 +#define FO_PCI_MSI_CLR_BASE 0x28000 +#define FO_PCI_ERR_COR 0x30000 +#define FO_PCI_ERR_NONFATAL 0x30008 +#define FO_PCI_ERR_FATAL 0x30010 +#define FO_PCI_PM_PME 0x30018 +#define FO_PCI_PME_TO_ACK 0x30020 +#define FO_PCI_IMU_INT_EN 0x31008 +#define FO_PCI_IMU_INT_STAT 0x31010 +#define FO_PCI_IMU_ERR_STAT_CLR 0x31018 +#define FO_PCI_IMU_RDS_ERR_LOG 0x31028 +#define FO_PCI_IMU_SCS_ERR_LOG 0x31030 +#define FO_PCI_IMU_EQS_ERR_LOG 0x31038 +#define FO_PCI_DMC_CORE_BLOCK_INT_EN 0x31800 +#define FO_PCI_DMC_CORE_BLOCK_ERR_STAT 0x31808 +#define FO_PCI_MULTI_CORE_ERR_STAT 0x31810 +#define FO_PCI_MSI_32_BIT_ADDR 0x34000 +#define FO_PCI_MSI_64_BIT_ADDR 0x34008 +#define FO_PCI_MMU 0x40000 +#define FO_PCI_MMU_INT_EN 0x41008 +#define FO_PCI_MMU_INT_STAT 0x41010 +#define FO_PCI_MMU_ERR_STAT_CLR 0x41018 +#define FO_PCI_MMU_TRANS_FAULT_ADDR 0x41028 +#define FO_PCI_MMU_TRANS_FAULT_STAT 0x41030 +#define FO_PCI_ILU_INT_EN 0x51008 +#define FO_PCI_ILU_INT_STAT 0x51010 +#define FO_PCI_ILU_ERR_STAT_CLR 0x51018 +#define FO_PCI_DMC_DBG_SEL_PORTA 0x53000 +#define FO_PCI_DMC_DBG_SEL_PORTB 0x53008 +#define FO_PCI_PEC_CORE_BLOCK_INT_EN 0x51800 +#define FO_PCI_PEC_CORE_BLOCK_INT_STAT 0x51808 +#define FO_PCI_TLU_CTRL 0x80000 +#define FO_PCI_TLU_OEVENT_INT_EN 0x81008 +#define FO_PCI_TLU_OEVENT_INT_STAT 0x81010 +#define FO_PCI_TLU_OEVENT_STAT_CLR 0x81018 +#define FO_PCI_TLU_RX_OEVENT_HDR1_LOG 0x81028 +#define FO_PCI_TLU_RX_OEVENT_HDR2_LOG 0x81030 +#define FO_PCI_TLU_TX_OEVENT_HDR1_LOG 0x81038 +#define FO_PCI_TLU_TX_OEVENT_HDR2_LOG 0x81040 +#define FO_PCI_TLU_DEV_CTRL 0x90008 +#define FO_PCI_TLU_LNK_CTRL 0x90020 +#define FO_PCI_TLU_LNK_STAT 0x90028 +#define FO_PCI_TLU_UERR_INT_EN 0x91008 +#define FO_PCI_TLU_UERR_INT_STAT 0x91010 +#define FO_PCI_TLU_UERR_STAT_CLR 0x91018 +#define FO_PCI_TLU_RX_UERR_HDR1_LOG 0x91028 +#define FO_PCI_TLU_RX_UERR_HDR2_LOG 0x91030 +#define FO_PCI_TLU_TX_UERR_HDR1_LOG 0x91038 +#define FO_PCI_TLU_TX_UERR_HDR2_LOG 0x91040 +#define FO_PCI_TLU_CERR_INT_EN 0xa1008 +#define FO_PCI_TLU_CERR_INT_STAT 0xa1010 +#define FO_PCI_TLU_CERR_STAT_CLR 0xa1018 +#define FO_PCI_LPU_RST 0xe2008 +#define FO_PCI_LPU_INT_STAT 0xe2040 +#define FO_PCI_LPU_INT_MASK 0xe0248 +#define FO_PCI_LPU_LNK_LYR_CFG 0xe2200 +#define FO_PCI_LPU_LNK_LYR_INT_STAT 0xe2210 +#define FO_PCI_LPU_FLW_CTRL_UPDT_CTRL 0xe2240 +#define FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS 0xe2400 +#define FO_PCI_LPU_TXLNK_RPLY_TMR_THRS 0xe2410 +#define FO_PCI_LPU_TXLNK_RTR_FIFO_PTR 0xe2430 +#define FO_PCI_LPU_PHY_LYR_INT_STAT 0xe2610 +#define FO_PCI_LPU_LTSSM_CFG2 0xe2788 +#define FO_PCI_LPU_LTSSM_CFG3 0xe2790 +#define FO_PCI_LPU_LTSSM_CFG4 0xe2798 +#define FO_PCI_LPU_LTSSM_CFG5 0xe27a0 + +/* PCI interrupt mapping registers */ +#define FO_PCI_IMAP_MDO_MODE 0x8000000000000000ULL +#define FO_PCI_IMAP_V 0x0000000080000000ULL +#define FIRE_PCI_IMAP_T_JPID_MASK 0x000000007c000000ULL +#define FIRE_PCI_IMAP_T_JPID_SHFT 26 +#define OBERON_PCI_IMAP_T_DESTID_MASK 0x000000007fe00000ULL +#define OBERON_PCI_IMAP_T_DESTID_SHFT 21 +#define FO_PCI_IMAP_INT_CTRL_NUM_MASK 0x00000000000003c0ULL +#define FO_PCI_IMAP_INT_CTRL_NUM_SHFT 6 + +/* PCI interrupt clear registers - use INTCLR_* from */ + +/* PCI event queue base address register */ +#define FO_PCI_EQ_BASE_ADDR_BYPASS 0xfffc000000000000ULL +#define FO_PCI_EQ_BASE_ADDR_MASK 0xfffffffffff80000ULL +#define FO_PCI_EQ_BASE_ADDR_SHFT 19 + +/* PCI event queue control set registers */ +#define FO_PCI_EQ_CTRL_SET_ENOVERR 0x0200000000000000ULL +#define FO_PCI_EQ_CTRL_SET_EN 0x0000100000000000ULL + +/* PCI event queue control clear registers */ +#define FO_PCI_EQ_CTRL_CLR_COVERR 0x0200000000000000ULL +#define FO_PCI_EQ_CTRL_CLR_E2I 0x0000800000000000ULL +#define FO_PCI_EQ_CTRL_CLR_DIS 0x0000100000000000ULL + +/* PCI event queue tail registers */ +#define FO_PCI_EQ_TL_OVERR 0x0200000000000000ULL +#define FO_PCI_EQ_TL_MASK 0x000000000000007fULL +#define FO_PCI_EQ_TL_SHFT 0 + +/* PCI event queue head registers */ +#define FO_PCI_EQ_HD_MASK 0x000000000000007fULL +#define FO_PCI_EQ_HD_SHFT 0 + +/* PCI MSI mapping registers */ +#define FO_PCI_MSI_MAP_V 0x8000000000000000ULL +#define FO_PCI_MSI_MAP_EQWR_N 0x4000000000000000ULL +#define FO_PCI_MSI_MAP_EQNUM_MASK 0x000000000000003fULL +#define FO_PCI_MSI_MAP_EQNUM_SHFT 0 + +/* PCI MSI clear registers */ +#define FO_PCI_MSI_CLR_EQWR_N 0x4000000000000000ULL + +/* + * PCI IMU interrupt enable, interrupt status and error status clear + * registers + */ +#define FO_PCI_IMU_ERR_INT_SPARE_S_MASK 0x00007c0000000000ULL +#define FO_PCI_IMU_ERR_INT_SPARE_S_SHFT 42 +#define FO_PCI_IMU_ERR_INT_EQ_OVER_S 0x0000020000000000ULL +#define FO_PCI_IMU_ERR_INT_EQ_NOT_EN_S 0x0000010000000000ULL +#define FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_S 0x0000008000000000ULL +#define FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_S 0x0000004000000000ULL +#define FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_S 0x0000002000000000ULL +#define FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_S 0x0000001000000000ULL +#define FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S 0x0000000800000000ULL +#define FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S 0x0000000400000000ULL +#define FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S 0x0000000200000000ULL +#define FO_PCI_IMU_ERR_INT_MSI_NOT_EN_S 0x0000000100000000ULL +#define FO_PCI_IMU_ERR_INT_SPARE_P_MASK 0x0000000000007c00ULL +#define FO_PCI_IMU_ERR_INT_SPARE_P_SHFT 10 +#define FO_PCI_IMU_ERR_INT_EQ_OVER_P 0x0000000000000200ULL +#define FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P 0x0000000000000100ULL +#define FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P 0x0000000000000080ULL +#define FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P 0x0000000000000040ULL +#define FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P 0x0000000000000020ULL +#define FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P 0x0000000000000010ULL +#define FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P 0x0000000000000008ULL +#define FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P 0x0000000000000004ULL +#define FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P 0x0000000000000002ULL +#define FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P 0x0000000000000001ULL + +/* PCI IMU RDS error log register */ +#define FO_PCI_IMU_RDS_ERR_LOG_TYPE_MASK 0xfc00000000000000ULL +#define FO_PCI_IMU_RDS_ERR_LOG_TYPE_SHFT 58 +#define FO_PCI_IMU_RDS_ERR_LOG_LENGTH_MASK 0x03ff000000000000ULL +#define FO_PCI_IMU_RDS_ERR_LOG_LENGTH_SHFT 48 +#define FO_PCI_IMU_RDS_ERR_LOG_REQ_ID_MASK 0x0000ffff00000000ULL +#define FO_PCI_IMU_RDS_ERR_LOG_REQ_ID_SHFT 32 +#define FO_PCI_IMU_RDS_ERR_LOG_TLP_TAG_MASK 0x00000000ff000000ULL +#define FO_PCI_IMU_RDS_ERR_LOG_TLP_TAG_SHFT 24 +#define FO_PCI_IMU_RDS_ERR_LOG_BE_MCODE_MASK 0x0000000000ff0000ULL +#define FO_PCI_IMU_RDS_ERR_LOG_BE_MCODE_SHFT 16 +#define FO_PCI_IMU_RDS_ERR_LOG_MSI_DATA_MASK 0x000000000000ffffULL +#define FO_PCI_IMU_RDS_ERR_LOG_MSI_DATA_SHFT 0 + +/* PCI IMU SCS error log register */ +#define FO_PCI_IMU_SCS_ERR_LOG_TYPE_MASK 0xfc00000000000000ULL +#define FO_PCI_IMU_SCS_ERR_LOG_TYPE_SHFT 58 +#define FO_PCI_IMU_SCS_ERR_LOG_LENGTH_MASK 0x03ff000000000000ULL +#define FO_PCI_IMU_SCS_ERR_LOG_LENGTH_SHFT 48 +#define FO_PCI_IMU_SCS_ERR_LOG_REQ_ID_MASK 0x0000ffff00000000ULL +#define FO_PCI_IMU_SCS_ERR_LOG_REQ_ID_SHFT 32 +#define FO_PCI_IMU_SCS_ERR_LOG_TLP_TAG_MASK 0x00000000ff000000ULL +#define FO_PCI_IMU_SCS_ERR_LOG_TLP_TAG_SHFT 24 +#define FO_PCI_IMU_SCS_ERR_LOG_BE_MODE_MASK 0x0000000000ff0000ULL +#define FO_PCI_IMU_SCS_ERR_LOG_BE_MCODE_SHFT 16 +#define FO_PCI_IMU_SCS_ERR_LOG_EQ_NUM_MASK 0x000000000000003fULL +#define FO_PCI_IMU_SCS_ERR_LOG_EQ_NUM_SHFT 0 + +/* PCI IMU EQS error log register */ +#define FO_PCI_IMU_EQS_ERR_LOG_EQ_NUM_MASK 0x000000000000003fULL +#define FO_PCI_IMU_EQS_ERROR_LOG_EQ_NUM_SHFT 0 + +/* + * PCI ERR COR, ERR NONFATAL, ERR FATAL, PM PME and PME To ACK mapping + * registers + */ +#define FO_PCI_ERR_PME_V 0x8000000000000000ULL +#define FO_PCI_ERR_PME_EQNUM_MASK 0x000000000000003fULL +#define FO_PCI_ERR_PME_EQNUM_SHFT 0 + +/* PCI DMC core and block interrupt enable register */ +#define FO_PCI_DMC_CORE_BLOCK_INT_EN_DMC 0x8000000000000000ULL +#define FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU 0x0000000000000002ULL +#define FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU 0x0000000000000001ULL + +/* PCI DMC core and block error status register */ +#define FO_PCI_DMC_CORE_BLOCK_ERR_STAT_MMU 0x0000000000000002ULL +#define FO_PCI_DMC_CORE_BLOCK_ERR_STAT_IMU 0x0000000000000001ULL + +/* PCI multi core error status register */ +#define FO_PCI_MULTI_CORE_ERR_STAT_PEC 0x0000000000000002ULL +#define FO_PCI_MULTI_CORE_ERR_STAT_DMC 0x0000000000000001ULL + +/* PCI MSI 32-bit address register */ +#define FO_PCI_MSI_32_BIT_ADDR_MASK 0x00000000ffff0000ULL +#define FO_PCI_MSI_32_BIT_ADDR_SHFT 16 + +/* PCI MSI 64-bit address register */ +#define FO_PCI_MSI_64_BIT_ADDR_MASK 0x0000ffffffff0000ULL +#define FO_PCI_MSI_64_BIT_ADDR_SHFT 16 + +/* + * PCI MMU interrupt enable, interrupt status and error status clear + * registers + */ +#define FO_PCI_MMU_ERR_INT_S_MASK 0x0000ffff00000000ULL +#define FO_PCI_MMU_ERR_INT_S_SHFT 32 +#define FO_PCI_MMU_ERR_INT_TBW_DPE_S 0x0000800000000000ULL +#define FO_PCI_MMU_ERR_INT_TBW_ERR_S 0x0000400000000000ULL +#define FO_PCI_MMU_ERR_INT_TBW_UDE_S 0x0000200000000000ULL +#define FO_PCI_MMU_ERR_INT_TBW_DME_S 0x0000100000000000ULL +#define FO_PCI_MMU_ERR_INT_SPARE3_S 0x0000080000000000ULL +#define FO_PCI_MMU_ERR_INT_SPARE2_S 0x0000040000000000ULL +#define FO_PCI_MMU_ERR_INT_TTC_CAE_S 0x0000020000000000ULL +#define FIRE_PCI_MMU_ERR_INT_TTC_DPE_S 0x0000010000000000ULL +#define OBERON_PCI_MMU_ERR_INT_TTC_DUE_S 0x0000010000000000ULL +#define FO_PCI_MMU_ERR_INT_TTE_PRT_S 0x0000008000000000ULL +#define FO_PCI_MMU_ERR_INT_TTE_INV_S 0x0000004000000000ULL +#define FO_PCI_MMU_ERR_INT_TRN_OOR_S 0x0000002000000000ULL +#define FO_PCI_MMU_ERR_INT_TRN_ERR_S 0x0000001000000000ULL +#define FO_PCI_MMU_ERR_INT_SPARE1_S 0x0000000800000000ULL +#define FO_PCI_MMU_ERR_INT_SPARE0_S 0x0000000400000000ULL +#define FO_PCI_MMU_ERR_INT_BYP_OOR_S 0x0000000200000000ULL +#define FO_PCI_MMU_ERR_INT_BYP_ERR_S 0x0000000100000000ULL +#define FO_PCI_MMU_ERR_INT_P_MASK 0x000000000000ffffULL +#define FO_PCI_MMU_ERR_INT_P_SHFT 0 +#define FO_PCI_MMU_ERR_INT_TBW_DPE_P 0x0000000000008000ULL +#define FO_PCI_MMU_ERR_INT_TBW_ERR_P 0x0000000000004000ULL +#define FO_PCI_MMU_ERR_INT_TBW_UDE_P 0x0000000000002000ULL +#define FO_PCI_MMU_ERR_INT_TBW_DME_P 0x0000000000001000ULL +#define FO_PCI_MMU_ERR_INT_SPARE3_P 0x0000000000000800ULL +#define FO_PCI_MMU_ERR_INT_SPARE2_P 0x0000000000000400ULL +#define FO_PCI_MMU_ERR_INT_TTC_CAE_P 0x0000000000000200ULL +#define FIRE_PCI_MMU_ERR_INT_TTC_DPE_P 0x0000000000000100ULL +#define OBERON_PCI_MMU_ERR_INT_TTC_DUE_P 0x0000000000000100ULL +#define FO_PCI_MMU_ERR_INT_TTE_PRT_P 0x0000000000000080ULL +#define FO_PCI_MMU_ERR_INT_TTE_INV_P 0x0000000000000040ULL +#define FO_PCI_MMU_ERR_INT_TRN_OOR_P 0x0000000000000020ULL +#define FO_PCI_MMU_ERR_INT_TRN_ERR_P 0x0000000000000010ULL +#define FO_PCI_MMU_ERR_INT_SPARE1_P 0x0000000000000008ULL +#define FO_PCI_MMU_ERR_INT_SPARE0_P 0x0000000000000004ULL +#define FO_PCI_MMU_ERR_INT_BYP_OOR_P 0x0000000000000002ULL +#define FO_PCI_MMU_ERR_INT_BYP_ERR_P 0x0000000000000001ULL + +/* PCI MMU translation fault address register */ +#define FO_PCI_MMU_TRANS_FAULT_ADDR_VA_MASK 0xfffffffffffffffcULL +#define FO_PCI_MMU_TRANS_FAULT_ADDR_VA_SHFT 2 + +/* PCI MMU translation fault status register */ +#define FO_PCI_MMU_TRANS_FAULT_STAT_ENTRY_MASK 0x000001ff00000000ULL +#define FO_PCI_MMU_TRANS_FAULT_STAT_ENTRY_SHFT 32 +#define FO_PCI_MMU_TRANS_FAULT_STAT_TYPE_MASK 0x00000000007f0000ULL +#define FO_PCI_MMU_TRANS_FAULT_STAT_TYPE_SHFT 16 +#define FO_PCI_MMU_TRANS_FAULT_STAT_ID_MASK 0x000000000000ffffULL +#define FO_PCI_MMU_TRANS_FAULT_STAT_ID_SHFT 0 + +/* + * PCI ILU interrupt enable, interrupt status and error status clear + * registers + */ +#define FO_PCI_ILU_ERR_INT_SPARE3_S 0x0000008000000000ULL +#define FO_PCI_ILU_ERR_INT_SPARE2_S 0x0000004000000000ULL +#define FO_PCI_ILU_ERR_INT_SPARE1_S 0x0000002000000000ULL +#define FIRE_PCI_ILU_ERR_INT_IHB_PE_S 0x0000001000000000ULL +#define OBERON_PCI_ILU_ERR_INT_IHB_UE_S 0x0000001000000000ULL +#define FO_PCI_ILU_ERR_INT_SPARE3_P 0x0000000000000080ULL +#define FO_PCI_ILU_ERR_INT_SPARE2_P 0x0000000000000040ULL +#define FO_PCI_ILU_ERR_INT_SPARE1_P 0x0000000000000020ULL +#define FIRE_PCI_ILU_ERR_INT_IHB_PE_P 0x0000000000000010ULL +#define OBERON_PCI_ILU_ERR_INT_IHB_UE_P 0x0000000000000010ULL + +/* PCI DMC debug select registers for port a/b */ +#define FO_PCI_DMC_DBG_SEL_PORT_BLCK_MASK 0x00000000000003c0ULL +#define FO_PCI_DMC_DBG_SEL_PORT_BLCK_SHFT 6 +#define FO_PCI_DMC_DBG_SEL_PORT_SUB_MASK 0x0000000000000038ULL +#define FO_PCI_DMC_DBG_SEL_PORT_SUB_SHFT 3 +#define FO_PCI_DMC_DBG_SEL_PORT_SUB_SGNL_MASK 0x0000000000000007ULL +#define FO_PCI_DMC_DBG_SEL_PORT_SUB_SGNL_SHFT 0 + +/* PCI PEC core and block interrupt enable register */ +#define FO_PCI_PEC_CORE_BLOCK_INT_EN_PEC 0x8000000000000000ULL +#define FO_PCI_PEC_CORE_BLOCK_INT_EN_ILU 0x0000000000000008ULL +#define FO_PCI_PEC_CORE_BLOCK_INT_EN_UERR 0x0000000000000004ULL +#define FO_PCI_PEC_CORE_BLOCK_INT_EN_CERR 0x0000000000000002ULL +#define FO_PCI_PEC_CORE_BLOCK_INT_EN_OEVENT 0x0000000000000001ULL + +/* PCI PEC core and block interrupt status register */ +#define FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU 0x0000000000000008ULL +#define FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR 0x0000000000000004ULL +#define FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR 0x0000000000000002ULL +#define FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT 0x0000000000000001ULL + +/* PCI TLU control register */ +#define FO_PCI_TLU_CTRL_L0S_TIM_MASK 0x00000000ff000000ULL +#define FO_PCI_TLU_CTRL_L0S_TIM_SHFT 24 +#define FO_PCI_TLU_CTRL_NWPR_EN 0x0000000000100000ULL +#define FO_PCI_TLU_CTRL_CTO_SEL_MASK 0x0000000000070000ULL +#define FO_PCI_TLU_CTRL_CTO_SEL_SHFT 16 +#define FO_PCI_TLU_CTRL_CFG_MASK 0x000000000000ffffULL +#define FO_PCI_TLU_CTRL_CFG_SHFT 0 +#define FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET 0x0000000000000100ULL + +/* + * PCI TLU other event interrupt enable, interrupt status and status clear + * registers + */ +#define FO_PCI_TLU_OEVENT_S_MASK 0x00ffffff00000000ULL +#define FO_PCI_TLU_OEVENT_S_SHFT 32 +#define FO_PCI_TLU_OEVENT_SPARE_S 0x0080000000000000ULL +#define FO_PCI_TLU_OEVENT_MFC_S 0x0040000000000000ULL +#define FO_PCI_TLU_OEVENT_CTO_S 0x0020000000000000ULL +#define FO_PCI_TLU_OEVENT_NFP_S 0x0010000000000000ULL +#define FO_PCI_TLU_OEVENT_LWC_S 0x0008000000000000ULL +#define FO_PCI_TLU_OEVENT_MRC_S 0x0004000000000000ULL +#define FO_PCI_TLU_OEVENT_WUC_S 0x0002000000000000ULL +#define FO_PCI_TLU_OEVENT_RUC_S 0x0001000000000000ULL +#define FO_PCI_TLU_OEVENT_CRS_S 0x0000800000000000ULL +#define FO_PCI_TLU_OEVENT_IIP_S 0x0000400000000000ULL +#define FO_PCI_TLU_OEVENT_EDP_S 0x0000200000000000ULL +#define FIRE_PCI_TLU_OEVENT_EHP_S 0x0000100000000000ULL +#define OBERON_PCI_TLU_OEVENT_EHBUE_S 0x0000100000000000ULL +#define OBERON_PCI_TLU_OEVENT_EDBUE_S 0x0000100000000000ULL +#define FO_PCI_TLU_OEVENT_LIN_S 0x0000080000000000ULL +#define FO_PCI_TLU_OEVENT_LRS_S 0x0000040000000000ULL +#define FO_PCI_TLU_OEVENT_LDN_S 0x0000020000000000ULL +#define FO_PCI_TLU_OEVENT_LUP_S 0x0000010000000000ULL +#define FO_PCI_TLU_OEVENT_LPU_S_MASK 0x000000c000000000ULL +#define FO_PCI_TLU_OEVENT_LPU_S_SHFT 38 +#define OBERON_PCI_TLU_OEVENT_TLUEITMO_S 0x0000008000000000ULL +#define FO_PCI_TLU_OEVENT_ERU_S 0x0000002000000000ULL +#define FO_PCI_TLU_OEVENT_ERO_S 0x0000001000000000ULL +#define FO_PCI_TLU_OEVENT_EMP_S 0x0000000800000000ULL +#define FO_PCI_TLU_OEVENT_EPE_S 0x0000000400000000ULL +#define FIRE_PCI_TLU_OEVENT_ERP_S 0x0000000200000000ULL +#define OBERON_PCI_TLU_OEVENT_ERBU_S 0x0000000200000000ULL +#define FIRE_PCI_TLU_OEVENT_EIP_S 0x0000000100000000ULL +#define OBERON_PCI_TLU_OEVENT_EIUE_S 0x0000000100000000ULL +#define FO_PCI_TLU_OEVENT_P_MASK 0x0000000000ffffffULL +#define FO_PCI_TLU_OEVENT_P_SHFT 0 +#define FO_PCI_TLU_OEVENT_SPARE_P 0x0000000000800000ULL +#define FO_PCI_TLU_OEVENT_MFC_P 0x0000000000400000ULL +#define FO_PCI_TLU_OEVENT_CTO_P 0x0000000000200000ULL +#define FO_PCI_TLU_OEVENT_NFP_P 0x0000000000100000ULL +#define FO_PCI_TLU_OEVENT_LWC_P 0x0000000000080000ULL +#define FO_PCI_TLU_OEVENT_MRC_P 0x0000000000040000ULL +#define FO_PCI_TLU_OEVENT_WUC_P 0x0000000000020000ULL +#define FO_PCI_TLU_OEVENT_RUC_P 0x0000000000010000ULL +#define FO_PCI_TLU_OEVENT_CRS_P 0x0000000000008000ULL +#define FO_PCI_TLU_OEVENT_IIP_P 0x0000000000004000ULL +#define FO_PCI_TLU_OEVENT_EDP_P 0x0000000000002000ULL +#define FIRE_PCI_TLU_OEVENT_EHP_P 0x0000000000001000ULL +#define OBERON_PCI_TLU_OEVENT_EHBUE_P 0x0000000000001000ULL +#define OBERON_PCI_TLU_OEVENT_EDBUE_P 0x0000000000001000ULL +#define FO_PCI_TLU_OEVENT_LIN_P 0x0000000000000800ULL +#define FO_PCI_TLU_OEVENT_LRS_P 0x0000000000000400ULL +#define FO_PCI_TLU_OEVENT_LDN_P 0x0000000000000200ULL +#define FO_PCI_TLU_OEVENT_LUP_P 0x0000000000000100ULL +#define FO_PCI_TLU_OEVENT_LPU_P_MASK 0x00000000000000c0ULL +#define FO_PCI_TLU_OEVENT_LPU_P_SHFT 6 +#define OBERON_PCI_TLU_OEVENT_TLUEITMO_P 0x0000000000000080ULL +#define FO_PCI_TLU_OEVENT_ERU_P 0x0000000000000020ULL +#define FO_PCI_TLU_OEVENT_ERO_P 0x0000000000000010ULL +#define FO_PCI_TLU_OEVENT_EMP_P 0x0000000000000008ULL +#define FO_PCI_TLU_OEVENT_EPE_P 0x0000000000000004ULL +#define FIRE_PCI_TLU_OEVENT_ERP_P 0x0000000000000002ULL +#define OBERON_PCI_TLU_OEVENT_ERBU_P 0x0000000000000002ULL +#define FIRE_PCI_TLU_OEVENT_EIP_P 0x0000000000000001ULL +#define OBERON_PCI_TLU_OEVENT_EIUE_P 0x0000000000000001ULL + +/* PCI receive/transmit DLU/TLU other event header 1/2 log registers */ +#define FO_PCI_TLU_OEVENT_HDR_LOG_MASK 0xffffffffffffffffULL +#define FO_PCI_TLU_OEVENT_HDR_LOG_SHFT 0 + +/* PCI TLU device control register */ +#define FO_PCI_TLU_DEV_CTRL_MRRS_MASK 0x0000000000007000ULL +#define FO_PCI_TLU_DEV_CTRL_MRRS_SHFT 12 +#define FO_PCI_TLU_DEV_CTRL_MPS_MASK 0x00000000000000e0ULL +#define FO_PCI_TLU_DEV_CTRL_MPS_SHFT 5 + +/* + * PCI TLU uncorrectable error interrupt enable, interrupt status and + * status clear registers + */ +#define FO_PCI_TLU_UERR_INT_S_MASK 0x001fffff00000000ULL +#define FO_PCI_TLU_UERR_INT_S_SHFT 32 +#define FO_PCI_TLU_UERR_INT_UR_S 0x0010000000000000ULL +#define OBERON_PCI_TLU_UERR_INT_ECRC_S 0x0008000000000000ULL +#define FO_PCI_TLU_UERR_INT_MFP_S 0x0004000000000000ULL +#define FO_PCI_TLU_UERR_INT_ROF_S 0x0002000000000000ULL +#define FO_PCI_TLU_UERR_INT_UC_S 0x0001000000000000ULL +#define FO_PCI_TLU_UERR_INT_CA_S 0x0000800000000000ULL +#define FO_PCI_TLU_UERR_INT_CTO_S 0x0000400000000000ULL +#define FO_PCI_TLU_UERR_INT_FCP_S 0x0000200000000000ULL +#define FIRE_PCI_TLU_UERR_INT_PP_S 0x0000100000000000ULL +#define OBERON_PCI_TLU_UERR_INT_POIS_S 0x0000100000000000ULL +#define FO_PCI_TLU_UERR_INT_DLP_S 0x0000001000000000ULL +#define FO_PCI_TLU_UERR_INT_TE_S 0x0000000100000000ULL +#define FO_PCI_TLU_UERR_INT_P_MASK 0x00000000001fffffULL +#define FO_PCI_TLU_UERR_INT_P_SHFT 0 +#define FO_PCI_TLU_UERR_INT_UR_P 0x0000000000100000ULL +#define OBERON_PCI_TLU_UERR_INT_ECRC_P 0x0000000000080000ULL +#define FO_PCI_TLU_UERR_INT_MFP_P 0x0000000000040000ULL +#define FO_PCI_TLU_UERR_INT_ROF_P 0x0000000000020000ULL +#define FO_PCI_TLU_UERR_INT_UC_P 0x0000000000010000ULL +#define FO_PCI_TLU_UERR_INT_CA_P 0x0000000000008000ULL +#define FO_PCI_TLU_UERR_INT_CTO_P 0x0000000000004000ULL +#define FO_PCI_TLU_UERR_INT_FCP_P 0x0000000000002000ULL +#define FIRE_PCI_TLU_UERR_INT_PP_P 0x0000000000001000ULL +#define OBERON_PCI_TLU_UERR_INT_POIS_P 0x0000000000001000ULL +#define FO_PCI_TLU_UERR_INT_DLP_P 0x0000000000000010ULL +#define FO_PCI_TLU_UERR_INT_TE_P 0x0000000000000001ULL + +/* + * PCI TLU correctable error interrupt enable, interrupt status and + * status clear registers + */ +#define FO_PCI_TLU_CERR_INT_S_MASK 0x001fffff00000000ULL +#define FO_PCI_TLU_CERR_INT_S_SHFT 32 +#define FO_PCI_TLU_CERR_INT_RTO_S 0x0000100000000000ULL +#define FO_PCI_TLU_CERR_INT_RNR_S 0x0000010000000000ULL +#define FO_PCI_TLU_CERR_INT_BDP_S 0x0000008000000000ULL +#define FO_PCI_TLU_CERR_INT_BTP_S 0x0000004000000000ULL +#define FO_PCI_TLU_CERR_INT_RE_S 0x0000000100000000ULL +#define FO_PCI_TLU_CERR_INT_P_MASK 0x00000000001fffffULL +#define FO_PCI_TLU_CERR_INT_P_SHFT 0 +#define FO_PCI_TLU_CERR_INT_RTO_P 0x0000000000001000ULL +#define FO_PCI_TLU_CERR_INT_RNR_P 0x0000000000000100ULL +#define FO_PCI_TLU_CERR_INT_BDP_P 0x0000000000000080ULL +#define FO_PCI_TLU_CERR_INT_BTP_P 0x0000000000000040ULL +#define FO_PCI_TLU_CERR_INT_RE_P 0x0000000000000001ULL + +/* PCI TLU reset register */ +#define FO_PCI_LPU_RST_WE 0x0000000080000000ULL +#define FO_PCI_LPU_RST_UNUSED_MASK 0x0000000000000e00ULL +#define FO_PCI_LPU_RST_UNUSED_SHFT 9 +#define FO_PCI_LPU_RST_ERR 0x0000000000000100ULL +#define FO_PCI_LPU_RST_TXLINK 0x0000000000000080ULL +#define FO_PCI_LPU_RST_RXLINK 0x0000000000000040ULL +#define FO_PCI_LPU_RST_SMLINK 0x0000000000000020ULL +#define FO_PCI_LPU_RST_LTSSM 0x0000000000000010ULL +#define FO_PCI_LPU_RST_TXPHY 0x0000000000000008ULL +#define FO_PCI_LPU_RST_RXPHY 0x0000000000000004ULL +#define FO_PCI_LPU_RST_TXPCS 0x0000000000000002ULL +#define FO_PCI_LPU_RST_RXPCS 0x0000000000000001ULL + +/* PCI TLU link control register */ +#define FO_PCI_TLU_LNK_CTRL_EXTSYNC 0x0000000000000080ULL +#define FO_PCI_TLU_LNK_CTRL_CLK 0x0000000000000040ULL +#define FO_PCI_TLU_LNK_CTRL_RETRAIN 0x0000000000000020ULL +#define FO_PCI_TLU_LNK_CTRL_DIS 0x0000000000000010ULL +#define FO_PCI_TLU_LNK_CTRL_RCB 0x0000000000000008ULL +#define FO_PCI_TLU_LNK_CTRL_ASPM_L0S_L1S 0x0000000000000003ULL +#define FO_PCI_TLU_LNK_CTRL_ASPM_L1S 0x0000000000000002ULL +#define FO_PCI_TLU_LNK_CTRL_ASPM_L0S 0x0000000000000001ULL +#define FO_PCI_TLU_LNK_CTRL_ASPM_DIS 0x0000000000000000ULL + +/* PCI TLU link status register */ +#define FO_PCI_TLU_LNK_STAT_CLK 0x0000000000001000ULL +#define FO_PCI_TLU_LNK_STAT_TRAIN 0x0000000000000800ULL +#define FO_PCI_TLU_LNK_STAT_ERR 0x0000000000000400ULL +#define FO_PCI_TLU_LNK_STAT_WDTH_MASK 0x00000000000003f0ULL +#define FO_PCI_TLU_LNK_STAT_WDTH_SHFT 4 +#define FO_PCI_TLU_LNK_STAT_SPEED_MASK 0x000000000000000fULL +#define FO_PCI_TLU_LNK_STAT_SPEED_SHFT 0 + +/* + * PCI receive/transmit DLU/TLU uncorrectable error header 1/2 log + * registers + */ +#define FO_PCI_TLU_UERR_HDR_LOG_MASK 0xffffffffffffffffULL +#define FO_PCI_TLU_UERR_HDR_LOG_SHFT 0 + +/* PCI DLU/LPU interrupt status and mask registers */ +#define FO_PCI_LPU_INT_INT 0x0000000080000000ULL +#define FIRE_PCI_LPU_INT_PRF_CNT2_OFLW 0x0000000000000080ULL +#define FIRE_PCI_LPU_INT_PRF_CNT1_OFLW 0x0000000000000040ULL +#define FO_PCI_LPU_INT_LNK_LYR 0x0000000000000020ULL +#define FO_PCI_LPU_INT_PHY_ERR 0x0000000000000010ULL +#define FIRE_PCI_LPU_INT_LTSSM 0x0000000000000008ULL +#define FIRE_PCI_LPU_INT_PHY_TX 0x0000000000000004ULL +#define FIRE_PCI_LPU_INT_PHY_RX 0x0000000000000002ULL +#define FIRE_PCI_LPU_INT_PHY_GB 0x0000000000000001ULL + +/* PCI DLU/LPU link layer config register */ +#define FIRE_PCI_LPU_LNK_LYR_CFG_AUTO_UPDT_DIS 0x0000000000080000ULL +#define FIRE_PCI_LPU_LNK_LYR_CFG_FREQ_NAK_EN 0x0000000000040000ULL +#define FIRE_PCI_LPU_LNK_LYR_CFG_RPLY_AFTER_REQ 0x0000000000020000ULL +#define FIRE_PCI_LPU_LNK_LYR_CFG_LAT_THRS_WR_EN 0x0000000000010000ULL +#define FO_PCI_LPU_LNK_LYR_CFG_VC0_EN 0x0000000000000100ULL +#define FIRE_PCI_LPU_LNK_LYR_CFG_L0S_ADJ_FAC_EN 0x0000000000000010ULL +#define FIER_PCI_LPU_LNK_LYR_CFG_TLP_XMIT_FC_EN 0x0000000000000008ULL +#define FO_PCI_LPU_LNK_LYR_CFG_FREQ_ACK_EN 0x0000000000000004ULL +#define FO_PCI_LPU_LNK_LYR_CFG_RETRY_DIS 0x0000000000000002ULL + +/* PCI DLU/LPU link layer interrupt and status register */ +#define FO_PCI_LPU_LNK_LYR_INT_STAT_LNK_ERR_ACT 0x0000000080000000ULL +#define OBERON_PCI_LPU_LNK_LYR_INT_STAT_PBUS_PE 0x0000000000800000ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_USPRTD_DLLP 0x0000000000400000ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_DLLP_RX_ERR 0x0000000000200000ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_BAD_DLLP 0x0000000000100000ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_TLP_RX_ERR 0x0000000000040000ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_SRC_ERR_TLP 0x0000000000020000ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_BAD_TLP 0x0000000000010000ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_RBF_UDF_ERR 0x0000000000000200ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_RBF_OVF_ERR 0x0000000000000100ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_EG_TLPM_ERR 0x0000000000000080ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_EG_TFRM_ERR 0x0000000000000040ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_RBF_PE 0x0000000000000020ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_EGRESS_PE 0x0000000000000010ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_RPLY_TMR_TO 0x0000000000000004ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_RPLY_NUM_RO 0x0000000000000002ULL +#define FO_PCI_LPU_LNK_LYR_INT_STAT_DLNK_PES 0x0000000000000001ULL + +/* PCI DLU/LPU flow control update control register */ +#define FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_C_EN 0x0000000000000004ULL +#define FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN 0x0000000000000002ULL +#define FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN 0x0000000000000001ULL + +/* PCI DLU/LPU txlink ACKNAK latency timer threshold register */ +#define FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK 0x000000000000ffffULL +#define FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT 0 + +/* PCI DLU/LPU txlink replay timer threshold register */ +#define FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK 0x00000000000fffffULL +#define FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT 0 + +/* PCI DLU/LPU txlink FIFO pointer register */ +#define FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK 0x00000000ffff0000ULL +#define FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT 16 +#define FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK 0x000000000000ffffULL +#define FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT 0 + +/* PCI DLU/LPU phy layer interrupt and status register */ +#define FO_PCI_LPU_PHY_LYR_INT_STAT_PHY_LYR_ERR 0x0000000080000000ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_KC_DLLP_ERR 0x0000000000000800ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_END_POS_ERR 0x0000000000000400ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_LNK_ERR 0x0000000000000200ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_TRN_ERR 0x0000000000000100ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_EDB_DET 0x0000000000000080ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_SDP_END 0x0000000000000040ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_STP_END_EDB 0x0000000000000020ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_INVC_ERR 0x0000000000000010ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_MULTI_SDP 0x0000000000000008ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_MULTI_STP 0x0000000000000004ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_ILL_SDP_POS 0x0000000000000002ULL +#define FO_PCI_LPU_PHY_LYR_INT_STAT_ILL_STP_POS 0x0000000000000001ULL + +/* PCI DLU/LPU LTSSM config2 register */ +#define FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK 0x00000000ffffffffULL +#define FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT 0 + +/* PCI DLU/LPU LTSSM config3 register */ +#define FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK 0x00000000ffffffffULL +#define FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT 0 + +/* PCI DLU/LPU LTSSM config4 register */ +#define FO_PCI_LPU_LTSSM_CFG4_TRN_CTRL_MASK 0x00000000ff000000ULL +#define FO_PCI_LPU_LTSSM_CFG4_TRN_CTRL_SHFT 24 +#define FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK 0x0000000000ff0000ULL +#define FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT 16 +#define FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK 0x000000000000ff00ULL +#define FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT 8 +#define FO_PCI_LPU_LTSSM_CFG4_LNK_NUM_MASK 0x00000000000000ffULL +#define FO_PCI_LPU_LTSSM_CFG4_LNK_NUM_SHFT 0 + +/* PCI DLU/LPU LTSSM config5 register */ +#define FO_PCI_LPU_LTSSM_CFG5_UNUSED0_MASK 0x00000000ffffe000ULL +#define FO_PCI_LPU_LTSSM_CFG5_UNUSED0_SHFT 13 +#define FO_PCI_LPU_LTSSM_CFG5_RCV_DET_TST_MODE 0x0000000000001000ULL +#define FO_PCI_LPU_LTSSM_CFG5_POLL_CMPLNC_DIS 0x0000000000000800ULL +#define FO_PCI_LPU_LTSSM_CFG5_TX_IDLE_TX_FTS 0x0000000000000400ULL +#define FO_PCI_LPU_LTSSM_CFG5_RX_FTS_RVR_LK 0x0000000000000200ULL +#define FO_PCI_LPU_LTSSM_CFG5_UNUSED1_MASK 0x0000000000000180ULL +#define FO_PCI_LPU_LTSSM_CFG5_UNUSED1_SHFT 7 +#define FO_PCI_LPU_LTSSM_CFG5_LPBK_NTRY_ACTIVE 0x0000000000000040ULL +#define FO_PCI_LPU_LTSSM_CFG5_LPBK_NTRY_EXIT 0x0000000000000020ULL +#define FO_PCI_LPU_LTSSM_CFG5_LPBK_ACTIVE_EXIT 0x0000000000000010ULL +#define FO_PCI_LPU_LTSSM_CFG5_L1_IDLE_RCVRY_LK 0x0000000000000008ULL +#define FO_PCI_LPU_LTSSM_CFG5_L0_TRN_CNTRL_RST 0x0000000000000004ULL +#define FO_PCI_LPU_LTSSM_CFG5_L0_LPBK 0x0000000000000002ULL +#define FO_PCI_LPU_LTSSM_CFG5_UNUSED2 0x0000000000000001ULL + +/* Controller configuration and status registers */ +#define FIRE_JBUS_PAR_CTRL 0x60010 +#define FO_XBC_ERR_LOG_EN 0x61000 +#define FO_XBC_INT_EN 0x61008 +#define FO_XBC_INT_STAT 0x61010 +#define FO_XBC_ERR_STAT_CLR 0x61018 +#define FIRE_JBC_FATAL_RST_EN 0x61028 +#define FIRE_JBCINT_ITRANS_ERR_LOG 0x61040 +#define FIRE_JBCINT_ITRANS_ERR_LOG2 0x61048 +#define FIRE_JBCINT_OTRANS_ERR_LOG 0x61040 +#define FIRE_JBCINT_OTRANS_ERR_LOG2 0x61048 +#define FIRE_FATAL_ERR_LOG 0x61050 +#define FIRE_FATAL_ERR_LOG2 0x61058 +#define FIRE_MERGE_TRANS_ERR_LOG 0x61060 +#define FIRE_DMCINT_ODCD_ERR_LOG 0x61068 +#define FIRE_DMCINT_IDC_ERR_LOG 0x61070 +#define FIRE_JBC_CSR_ERR_LOG 0x61078 +#define FIRE_JBC_CORE_BLOCK_INT_EN 0x61800 +#define FIRE_JBC_CORE_BLOCK_ERR_STAT 0x61808 +#define FO_XBC_PRF_CNT_SEL 0x62000 +#define FO_XBC_PRF_CNT0 0x62008 +#define FO_XBC_PRF_CNT1 0x62010 + +/* JBus parity control register */ +#define FIRE_JBUS_PAR_CTRL_P_EN 0x8000000000000000ULL +#define FIRE_JBUS_PAR_CTRL_INVRTD_PAR_MASK 0x000000000000003cULL +#define FIRE_JBUS_PAR_CTRL_INVRTD_PAR_SHFT 2 +#define FIRE_JBUS_PAR_CTRL_NEXT_DATA 0x0000000000000002ULL +#define FIRE_JBUS_PAR_CTRL_NEXT_ADDR 0x0000000000000001ULL + +/* JBC error log enable register - may also apply to UBC */ +#define FIRE_JBC_ERR_LOG_EN_SPARE_MASK 0x00000000e0000000ULL +#define FIRE_JBC_ERR_LOG_EN_SPARE_SHFT 29 +#define FIRE_JBC_ERR_LOG_EN_PIO_UNMAP_RD 0x0000000010000000ULL +#define FIRE_JBC_ERR_LOG_EN_ILL_ACC_RD 0x0000000008000000ULL +#define FIRE_JBC_ERR_LOG_EN_EBUS_TO 0x0000000004000000ULL +#define FIRE_JBC_ERR_LOG_EN_MB_PEA 0x0000000002000000ULL +#define FIRE_JBC_ERR_LOG_EN_MB_PER 0x0000000001000000ULL +#define FIRE_JBC_ERR_LOG_EN_MB_PEW 0x0000000000800000ULL +#define FIRE_JBC_ERR_LOG_EN_UE_ASYN 0x0000000000400000ULL +#define FIRE_JBC_ERR_LOG_EN_CE_ASYN 0x0000000000200000ULL +#define FIRE_JBC_ERR_LOG_EN_JTE 0x0000000000100000ULL +#define FIRE_JBC_ERR_LOG_EN_JBE 0x0000000000080000ULL +#define FIRE_JBC_ERR_LOG_EN_JUE 0x0000000000040000ULL +#define FIRE_JBC_ERR_LOG_EN_IJP 0x0000000000020000ULL +#define FIRE_JBC_ERR_LOG_EN_ICISE 0x0000000000010000ULL +#define FIRE_JBC_ERR_LOG_EN_CPE 0x0000000000008000ULL +#define FIRE_JBC_ERR_LOG_EN_APE 0x0000000000004000ULL +#define FIRE_JBC_ERR_LOG_EN_WR_DPE 0x0000000000002000ULL +#define FIRE_JBC_ERR_LOG_EN_RD_DPE 0x0000000000001000ULL +#define FIRE_JBC_ERR_LOG_EN_ILL_BMW 0x0000000000000800ULL +#define FIRE_JBC_ERR_LOG_EN_ILL_BMR 0x0000000000000400ULL +#define FIRE_JBC_ERR_LOG_EN_BJC 0x0000000000000200ULL +#define FIRE_JBC_ERR_LOG_EN_PIO_UNMAP 0x0000000000000100ULL +#define FIRE_JBC_ERR_LOG_EN_PIO_DPE 0x0000000000000080ULL +#define FIRE_JBC_ERR_LOG_EN_PIO_CPE 0x0000000000000040ULL +#define FIRE_JBC_ERR_LOG_EN_ILL_ACC 0x0000000000000020ULL +#define FIRE_JBC_ERR_LOG_EN_UNSOL_RD 0x0000000000000010ULL +#define FIRE_JBC_ERR_LOG_EN_UNSOL_INT 0x0000000000000008ULL +#define FIRE_JBC_ERR_LOG_EN_JTCEEW 0x0000000000000004ULL +#define FIRE_JBC_ERR_LOG_EN_JTCEEI 0x0000000000000002ULL +#define FIRE_JBC_ERR_LOG_EN_JTCEER 0x0000000000000001ULL + +/* JBC interrupt enable, interrupt status and error status clear registers */ +#define FIRE_JBC_ERR_INT_SPARE_S_MASK 0xe000000000000000ULL +#define FIRE_JBC_ERR_INT_SPARE_S_SHFT 61 +#define FIRE_JBC_ERR_INT_PIO_UNMAP_RD_S 0x1000000000000000ULL +#define FIRE_JBC_ERR_INT_ILL_ACC_RD_S 0x0800000000000000ULL +#define FIRE_JBC_ERR_INT_EBUS_TO_S 0x0400000000000000ULL +#define FIRE_JBC_ERR_INT_MB_PEA_S 0x0200000000000000ULL +#define FIRE_JBC_ERR_INT_MB_PER_S 0x0100000000000000ULL +#define FIRE_JBC_ERR_INT_MB_PEW_S 0x0080000000000000ULL +#define FIRE_JBC_ERR_INT_UE_ASYN_S 0x0040000000000000ULL +#define FIRE_JBC_ERR_INT_CE_ASYN_S 0x0020000000000000ULL +#define FIRE_JBC_ERR_INT_JTE_S 0x0010000000000000ULL +#define FIRE_JBC_ERR_INT_JBE_S 0x0008000000000000ULL +#define FIRE_JBC_ERR_INT_JUE_S 0x0004000000000000ULL +#define FIRE_JBC_ERR_INT_IJP_S 0x0002000000000000ULL +#define FIRE_JBC_ERR_INT_ICISE_S 0x0001000000000000ULL +#define FIRE_JBC_ERR_INT_CPE_S 0x0000800000000000ULL +#define FIRE_JBC_ERR_INT_APE_S 0x0000400000000000ULL +#define FIRE_JBC_ERR_INT_WR_DPE_S 0x0000200000000000ULL +#define FIRE_JBC_ERR_INT_RD_DPE_S 0x0000100000000000ULL +#define FIRE_JBC_ERR_INT_ILL_BMW_S 0x0000080000000000ULL +#define FIRE_JBC_ERR_INT_ILL_BMR_S 0x0000040000000000ULL +#define FIRE_JBC_ERR_INT_BJC_S 0x0000020000000000ULL +#define FIRE_JBC_ERR_INT_PIO_UNMAP_S 0x0000010000000000ULL +#define FIRE_JBC_ERR_INT_PIO_DPE_S 0x0000008000000000ULL +#define FIRE_JBC_ERR_INT_PIO_CPE_S 0x0000004000000000ULL +#define FIRE_JBC_ERR_INT_ILL_ACC_S 0x0000002000000000ULL +#define FIRE_JBC_ERR_INT_UNSOL_RD_S 0x0000001000000000ULL +#define FIRE_JBC_ERR_INT_UNSOL_INT_S 0x0000000800000000ULL +#define FIRE_JBC_ERR_INT_JTCEEW_S 0x0000000400000000ULL +#define FIRE_JBC_ERR_INT_JTCEEI_S 0x0000000200000000ULL +#define FIRE_JBC_ERR_INT_JTCEER_S 0x0000000100000000ULL +#define FIRE_JBC_ERR_INT_SPARE_P_MASK 0x00000000e0000000ULL +#define FIRE_JBC_ERR_INT_SPARE_P_SHFT 29 +#define FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P 0x0000000010000000ULL +#define FIRE_JBC_ERR_INT_ILL_ACC_RD_P 0x0000000008000000ULL +#define FIRE_JBC_ERR_INT_EBUS_TO_P 0x0000000004000000ULL +#define FIRE_JBC_ERR_INT_MB_PEA_P 0x0000000002000000ULL +#define FIRE_JBC_ERR_INT_MB_PER_P 0x0000000001000000ULL +#define FIRE_JBC_ERR_INT_MB_PEW_P 0x0000000000800000ULL +#define FIRE_JBC_ERR_INT_UE_ASYN_P 0x0000000000400000ULL +#define FIRE_JBC_ERR_INT_CE_ASYN_P 0x0000000000200000ULL +#define FIRE_JBC_ERR_INT_JTE_P 0x0000000000100000ULL +#define FIRE_JBC_ERR_INT_JBE_P 0x0000000000080000ULL +#define FIRE_JBC_ERR_INT_JUE_P 0x0000000000040000ULL +#define FIRE_JBC_ERR_INT_IJP_P 0x0000000000020000ULL +#define FIRE_JBC_ERR_INT_ICISE_P 0x0000000000010000ULL +#define FIRE_JBC_ERR_INT_CPE_P 0x0000000000008000ULL +#define FIRE_JBC_ERR_INT_APE_P 0x0000000000004000ULL +#define FIRE_JBC_ERR_INT_WR_DPE_P 0x0000000000002000ULL +#define FIRE_JBC_ERR_INT_RD_DPE_P 0x0000000000001000ULL +#define FIRE_JBC_ERR_INT_ILL_BMW_P 0x0000000000000800ULL +#define FIRE_JBC_ERR_INT_ILL_BMR_P 0x0000000000000400ULL +#define FIRE_JBC_ERR_INT_BJC_P 0x0000000000000200ULL +#define FIRE_JBC_ERR_INT_PIO_UNMAP_P 0x0000000000000100ULL +#define FIRE_JBC_ERR_INT_PIO_DPE_P 0x0000000000000080ULL +#define FIRE_JBC_ERR_INT_PIO_CPE_P 0x0000000000000040ULL +#define FIRE_JBC_ERR_INT_ILL_ACC_P 0x0000000000000020ULL +#define FIRE_JBC_ERR_INT_UNSOL_RD_P 0x0000000000000010ULL +#define FIRE_JBC_ERR_INT_UNSOL_INT_P 0x0000000000000008ULL +#define FIRE_JBC_ERR_INT_JTCEEW_P 0x0000000000000004ULL +#define FIRE_JBC_ERR_INT_JTCEEI_P 0x0000000000000002ULL +#define FIRE_JBC_ERR_INT_JTCEER_P 0x0000000000000001ULL + +/* UBC interrupt enable, error status and error status clear registers */ +#define OBERON_UBC_ERR_INT_PIORBEUE_S 0x0004000000000000ULL +#define OBERON_UBC_ERR_INT_PIOWBEUE_S 0x0002000000000000ULL +#define OBERON_UBC_ERR_INT_PIOWTUE_S 0x0001000000000000ULL +#define OBERON_UBC_ERR_INT_MEMWTAXB_S 0x0000080000000000ULL +#define OBERON_UBC_ERR_INT_MEMRDAXB_S 0x0000040000000000ULL +#define OBERON_UBC_ERR_INT_DMAWTUEB_S 0x0000020000000000ULL +#define OBERON_UBC_ERR_INT_DMARDUEB_S 0x0000010000000000ULL +#define OBERON_UBC_ERR_INT_MEMWTAXA_S 0x0000000800000000ULL +#define OBERON_UBC_ERR_INT_MEMRDAXA_S 0x0000000400000000ULL +#define OBERON_UBC_ERR_INT_DMAWTUEA_S 0x0000000200000000ULL +#define OBERON_UBC_ERR_INT_DMARDUEA_S 0x0000000100000000ULL +#define OBERON_UBC_ERR_INT_PIORBEUE_P 0x0000000000040000ULL +#define OBERON_UBC_ERR_INT_PIOWBEUE_P 0x0000000000020000ULL +#define OBERON_UBC_ERR_INT_PIOWTUE_P 0x0000000000010000ULL +#define OBERON_UBC_ERR_INT_MEMWTAXB_P 0x0000000000000800ULL +#define OBERON_UBC_ERR_INT_MEMRDAXB_P 0x0000000000000400ULL +#define OBERON_UBC_ERR_INT_DMARDUEB_P 0x0000000000000200ULL +#define OBERON_UBC_ERR_INT_DMAWTUEB_P 0x0000000000000100ULL +#define OBERON_UBC_ERR_INT_MEMWTAXA_P 0x0000000000000008ULL +#define OBERON_UBC_ERR_INT_MEMRDAXA_P 0x0000000000000004ULL +#define OBERON_UBC_ERR_INT_DMAWTUEA_P 0x0000000000000002ULL +#define OBERON_UBC_ERR_INT_DMARDUEA_P 0x0000000000000001ULL + +/* JBC fatal reset enable register */ +#define FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK 0x000000000c000000ULL +#define FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT 26 +#define FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT 0x0000000002000000ULL +#define FIRE_JBC_FATAL_RST_EN_CPE_P_INT 0x0000000000008000ULL +#define FIRE_JBC_FATAL_RST_EN_APE_P_INT 0x0000000000004000ULL +#define FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT 0x0000000000000040ULL +#define FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT 0x0000000000000004ULL +#define FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT 0x0000000000000002ULL +#define FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT 0x0000000000000001ULL + +/* JBC JBCINT in transaction error log register */ +#define FIRE_JBCINT_ITRANS_ERR_LOG_Q_WORD_MASK 0x00c0000000000000ULL +#define FIRE_JBCINT_ITRANS_ERR_LOG_Q_WORD_SHFT 54 +#define FIRE_JBCINT_ITRANS_ERR_LOG_TRANSID_MASK 0x0003000000000000ULL +#define FIRE_JBCINT_ITRANS_ERR_LOG_TRANSID_SHFT 48 +#define FIRE_JBCINT_ITRANS_ERR_LOG_ADDR_MASK 0x000007ffffffffffULL +#define FIRE_JBCINT_ITRANS_ERR_LOG_ADDR_SHFT 0 + +/* JBC JBCINT in transaction error log register 2 */ +#define FIRE_JBCINT_ITRANS_ERR_LOG2_ARB_WN_MASK 0x000ffffff0000000ULL +#define FIRE_JBCINT_ITRANS_ERR_LOG2_ARB_WN_SHFT 28 +#define FIRE_JBCINT_ITRANS_ERR_LOG2_J_REQ_MASK 0x000000000fe00000ULL +#define FIRE_JBCINT_ITRANS_ERR_LOG2_J_REQ_SHFT 21 +#define FIRE_JBCINT_ITRANS_ERR_LOG2_J_PACK_MASK 0x00000000001fffffULL +#define FIRE_JBCINT_ITRANS_ERR_LOG2_J_PACK_SHFT 0 + +/* JBC JBCINT out transaction error log register */ +#define FIRE_JBCINT_OTRANS_ERR_LOG_TRANSID_MASK 0x003f000000000000ULL +#define FIRE_JBCINT_OTRANS_ERR_LOG_TRANSID_SHFT 48 +#define FIRE_JBCINT_OTRANS_ERR_LOG_ADDR_MASK 0x000007ffffffffffULL +#define FIRE_JBCINT_OTRANS_ERR_LOG_ADDR_SHFT 0 + +/* JBC JBCINT out transaction error log register 2 */ +#define FIRE_JBCINT_OTRANS_ERR_LOG2_ARB_WN_MASK 0x000ffffff0000000ULL +#define FIRE_JBCINT_OTRANS_ERR_LOG2_ARB_WN_SHFT 28 +#define FIRE_JBCINT_OTRANS_ERR_LOG2_J_REQ_MASK 0x000000000fe00000ULL +#define FIRE_JBCINT_OTRANS_ERR_LOG2_J_REQ_SHFT 21 +#define FIRE_JBCINT_OTRANS_ERR_LOG2_J_PACK_MASK 0x00000000001fffffULL +#define FIRE_JBCINT_OTRANS_ERR_LOG2_J_PACK_SHFT 0 + +/* JBC merge transaction error log register */ +#define FIRE_FATAL_ERR_LOG_DATA_MASK 0xffffffffffffffffULL +#define FIRE_FATAL_ERR_LOG_DATA_SHFT 0 + +/* JBC merge transaction error log register 2 */ +#define FIRE_FATAL_ERR_LOG2_ARB_WN_MASK 0x000ffffff0000000ULL +#define FIRE_FATAL_ERR_LOG2_ARB_WN_SHFT 28 +#define FIRE_FATAL_ERR_LOG2_J_REQ_MASK 0x000000000fe00000ULL +#define FIRE_FATAL_ERR_LOG2_J_REQ_SHFT 21 +#define FIRE_FATAL_ERR_LOG2_J_PACK_MASK 0x00000000001fffffULL +#define FIRE_FATAL_ERR_LOG2_J_PACK_SHFT 0 + +/* JBC merge transaction error log register */ +#define FIRE_MERGE_TRANS_ERR_LOG_Q_WORD_MASK 0x00c0000000000000ULL +#define FIRE_MERGE_TRANS_ERR_LOG_Q_WORD_SHFT 54 +#define FIRE_MERGE_TRANS_ERR_LOG_TRANSID_MASK 0x0003000000000000ULL +#define FIRE_MERGE_TRANS_ERR_LOG_TRANSID_SHFT 48 +#define FIRE_MERGE_TRANS_ERR_LOG_JBC_TAG_MASK 0x0000f80000000000ULL +#define FIRE_MERGE_TRANS_ERR_LOG_JBC_TAG_SHFT 43 +#define FIRE_MERGE_TRANS_ERR_LOG_ADDR_MASK 0x000007ffffffffffULL +#define FIRE_MERGE_TRANS_ERR_LOG_ADDR_SHFT 0 + +/* JBC DMCINT ODCD error log register */ +#define FIRE_DMCINT_ODCD_ERR_LOG_TRANS_ID_MASK 0x0030000000000000ULL +#define FIRE_DMCINT_ODCD_ERR_LOG_TRANS_ID_SHFT 52 +#define FIRE_DMCINT_ODCD_ERR_LOG_AID_MASK 0x000f000000000000ULL +#define FIRE_DMCINT_ODCD_ERR_LOG_AID_SHFT 48 +#define FIRE_DMCINT_ODCD_ERR_LOG_TTYPE_MASK 0x0000f80000000000ULL +#define FIRE_DMCINT_ODCD_ERR_LOG_TTYPE_SHFT 43 +#define FIRE_DMCINT_ODCD_ERR_LOG_ADDR_MASK 0x000007ffffffffffULL +#define FIRE_DMCINT_ODCD_ERR_LOG_ADDR_SHFT 0 + +/* JBC DMCINT IDC error log register */ +#define FIRE_DMCINT_IDC_ERR_DMC_CTAG_MASK 0x000000000fff0000ULL +#define FIRE_DMCINT_IDC_ERR_DMC_CTAG_SHFT 16 +#define FIRE_DMCINT_IDC_ERR_TRANSID_MASK 0x000000000000c000ULL +#define FIRE_DMCINT_IDC_ERR_AGNTID_MASK 0x0000000000003c00ULL +#define FIRE_DMCINT_IDC_ERR_AGNTID_SHFT 10 +#define FIRE_DMCINT_IDC_ERR_SRCID_MASK 0x00000000000003e0ULL +#define FIRE_DMCINT_IDC_ERR_SRCID_SHFT 5 +#define FIRE_DMCINT_IDC_ERR_TARGID_MASK 0x000000000000001fULL +#define FIRE_DMCINT_IDC_ERRO_TARGID_SHFT 0 + +/* JBC CSR error log register */ +#define FIRE_JBC_CSR_ERR_LOG_WR 0x0000040000000000ULL +#define FIRE_JBC_CSR_ERR_LOG_BMASK_MASK 0x000003fffc000000ULL +#define FIRE_JBC_CSR_ERR_LOG_BMASK_SHFT 26 +#define FIRE_JBC_CSR_ERR_LOG_ADDR_MASK 0x0000000003ffffffULL +#define FIRE_JBC_CSR_ERR_LOG_ADDR_SHFT 0 + +/* JBC core and block interrupt enable register */ +#define FIRE_JBC_CORE_BLOCK_INT_EN_JBC 0x8000000000000000ULL +#define FIRE_JBC_CORE_BLOCK_INT_EN_CSR 0x0000000000000008ULL +#define FIRE_JBC_CORE_BLOCK_INT_EN_MERGE 0x0000000000000004ULL +#define FIRE_JBC_CORE_BLOCK_INT_EN_JBCINT 0x0000000000000002ULL +#define FIRE_JBC_CORE_BLOCK_INT_EN_DMCINT 0x0000000000000001ULL + +/* JBC core and block error status register */ +#define FIRE_JBC_CORE_BLOCK_ERR_STAT_CSR 0x0000000000000008ULL +#define FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE 0x0000000000000004ULL +#define FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT 0x0000000000000002ULL +#define FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT 0x0000000000000001ULL + +/* JBC performance counter select register - may also apply to UBC */ +#define FO_XBC_PRF_CNT_PIO_RD_PCIEB 0x0000000000000018ULL +#define FO_XBC_PRF_CNT_PIO_WR_PCIEB 0x0000000000000017ULL +#define FO_XBC_PRF_CNT_PIO_RD_PCIEA 0x0000000000000016ULL +#define FO_XBC_PRF_CNT_PIO_WR_PCIEA 0x0000000000000015ULL +#define FO_XBC_PRF_CNT_WB 0x0000000000000014ULL +#define FO_XBC_PRF_CNT_PIO_FRGN 0x0000000000000013ULL +#define FO_XBC_PRF_CNT_XB_NCHRNT 0x0000000000000012ULL +#define FO_XBC_PRF_CNT_FO_CHRNT 0x0000000000000011ULL +#define FO_XBC_PRF_CNT_XB_CHRNT 0x0000000000000010ULL +#define FO_XBC_PRF_CNT_AOKOFF_DOKOFF 0x000000000000000fULL +#define FO_XBC_PRF_CNT_DOKOFF 0x000000000000000eULL +#define FO_XBC_PRF_CNT_AOKOFF 0x000000000000000dULL +#define FO_XBC_PRF_CNT_RD_TOTAL 0x000000000000000cULL +#define FO_XBC_PRF_CNT_WR_TOTAL 0x000000000000000bULL +#define FO_XBC_PRF_CNT_WR_PARTIAL 0x000000000000000aULL +#define FO_XBC_PRF_CNT_PIOS_CSR_RINGB 0x0000000000000009ULL +#define FO_XBC_PRF_CNT_PIOS_CSR_RINGA 0x0000000000000008ULL +#define FO_XBC_PRF_CNT_PIOS_EBUS 0x0000000000000007ULL +#define FO_XBC_PRF_CNT_PIOS_I2C 0x0000000000000006ULL +#define FO_XBC_PRF_CNT_RD_LAT_SMPLS 0x0000000000000005ULL +#define FO_XBC_PRF_CNT_RD_LAT 0x0000000000000004ULL +#define FO_XBC_PRF_CNT_ON_XB 0x0000000000000003ULL +#define FO_XBC_PRF_CNT_XB_IDL 0x0000000000000002ULL +#define FO_XBC_PRF_CNT_XB_CLK 0x0000000000000001ULL +#define FO_XBC_PRF_CNT_NONE 0x0000000000000000ULL +#define FO_XBC_PRF_CNT_CNT1_SHFT 8 +#define FO_XBC_PRF_CNT_CNT0_SHFT 0 + +/* JBC performance counter 0/1 registers - may also apply to UBC */ +#define FO_XBC_PRF_CNT_MASK 0xffffffffffffffffULL +#define FO_XBC_PRF_CNT_SHFT 0 + +/* Lookup tables */ +const uint16_t const fire_freq_nak_tmr_thrs[6][4] = { + { 0x00ed, 0x049, 0x043, 0x030 }, + { 0x01a0, 0x076, 0x06b, 0x048 }, + { 0x022f, 0x09a, 0x056, 0x056 }, + { 0x042f, 0x11a, 0x096, 0x096 }, + { 0x082f, 0x21a, 0x116, 0x116 }, + { 0x102f, 0x41a, 0x216, 0x216 } +}; + +const uint16_t const fire_rply_tmr_thrs[6][4] = { + { 0x0379, 0x112, 0x0fc, 0x0b4 }, + { 0x0618, 0x1BA, 0x192, 0x10e }, + { 0x0831, 0x242, 0x143, 0x143 }, + { 0x0fb1, 0x422, 0x233, 0x233 }, + { 0x1eb0, 0x7e1, 0x412, 0x412 }, + { 0x3cb0, 0xf61, 0x7d2, 0x7d2 } +}; + +/* Register default values */ +#define FO_PCI_TLU_CTRL_L0S_TIM_DFLT 0xda +#define FO_PCI_TLU_CTRL_CFG_DFLT 0x1 +#define FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT 0x2dc6c0 +#define FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT 0x7a120 +#define FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT 0x2 +#define FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT 0x8c +#define OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT 0xc9 +#define FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT 0x0 +#define FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT 0xffff + +/* INO macros */ +#define FO_EQ_FIRST_INO 0x18 +#define FO_EQ_LAST_INO 0x3b +#define FO_DMC_PEC_INO 0x3e +#define FO_XCB_INO 0x3f +#define FO_MAX_INO FO_XCB_INO + +/* Device space macros */ +#define FO_CONF_BUS_SHFT 20 +#define FO_CONF_DEV_SHFT 15 +#define FO_CONF_FUNC_SHFT 12 +#define FO_CONF_REG_SHFT 0 +#define FO_IO_SIZE 0x10000000 +#define FO_MEM_SIZE 0x1ffff0000 + +#define FO_CONF_OFF(bus, slot, func, reg) \ + (((bus) << FO_CONF_BUS_SHFT) | \ + ((slot) << FO_CONF_DEV_SHFT) | \ + ((func) << FO_CONF_FUNC_SHFT) | \ + ((reg) << FO_CONF_REG_SHFT)) + +/* Width of the physical addresses the IOMMU translates to */ +#define FIRE_IOMMU_BITS 43 +#define OBERON_IOMMU_BITS 47 + +/* Event queue macros */ +#define FO_EQ_ALIGNMENT (512 * 1024) +#define FO_EQ_NRECORDS 128 +#define FO_EQ_RECORD_SIZE 64 + +/* Event queue record format */ +struct fo_msiq_record { + uint64_t fomqr_word0; + uint64_t fomqr_word1; + uint64_t fomqr_reserved[6]; +}; + +#define FO_MQR_WORD0_FMT_TYPE_MASK 0x7f00000000000000ULL +#define FO_MQR_WORD0_FMT_TYPE_SHFT 56 +#define FO_MQR_WORD0_FMT_TYPE_MSI64 0x7800000000000000ULL +#define FO_MQR_WORD0_FMT_TYPE_MSI32 0x5800000000000000ULL +#define FO_MQR_WORD0_FMT_TYPE_MSG 0x3000000000000000ULL +#define FO_MQR_WORD0_FMT_TYPE_MSG_ROUTE_MASK 0x0700000000000000ULL +#define FO_MQR_WORD0_FMT_TYPE_MSG_ROUTE_SHFT 56 +#define FO_MQR_WORD0_LENGTH_MASK 0x00ffc00000000000ULL +#define FO_MQR_WORD0_LENGTH_SHFT 46 +#define FO_MQR_WORD0_ADDR0_MASK 0x00003fff00000000ULL +#define FO_MQR_WORD0_ADDR0_SHFT 32 +#define FO_MQR_WORD0_RID_MASK 0x00000000ffff0000ULL +#define FO_MQR_WORD0_RID_SHFT 16 +#define FO_MQR_WORD0_DATA0_MASK 0x000000000000ffffULL +#define FO_MQR_WORD0_DATA0_SHFT 0 +#define FO_MQR_WORD1_ADDR1_MASK 0xffffffffffff0000ULL +#define FO_MQR_WORD1_ADDR1_SHFT 16 +#define FO_MQR_WORD1_DATA1_MASK 0x000000000000ffffULL +#define FO_MQR_WORD1_DATA1_SHFT 0 + +#endif /* !_SPARC64_PCI_FIREREG_H_ */ diff --git a/sys/sparc64/pci/firevar.h b/sys/sparc64/pci/firevar.h new file mode 100644 index 000000000000..58ba419daeea --- /dev/null +++ b/sys/sparc64/pci/firevar.h @@ -0,0 +1,98 @@ +/*- + * Copyright (c) 2009 by Marius Strobl . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification, immediately at the beginning of the file. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SPARC64_PCI_FIREVAR_H_ +#define _SPARC64_PCI_FIREVAR_H_ + +struct fire_softc { + struct iommu_state sc_is; + struct bus_dma_methods sc_dma_methods; + + struct mtx sc_msi_mtx; + struct mtx sc_pcib_mtx; + + struct resource *sc_mem_res[FIRE_NREG]; + struct resource *sc_irq_res[FIRE_NINTR]; + void *sc_ihand[FIRE_NINTR]; + + struct rman sc_pci_mem_rman; + struct rman sc_pci_io_rman; + bus_space_handle_t sc_pci_bh[FIRE_NRANGE]; + bus_space_tag_t sc_pci_cfgt; + bus_space_tag_t sc_pci_iot; + bus_space_tag_t sc_pci_memt; + bus_dma_tag_t sc_pci_dmat; + + device_t sc_dev; + + uint64_t *sc_msiq; + u_char *sc_msi_bitmap; + uint32_t *sc_msi_msiq_table; + u_char *sc_msiq_bitmap; + uint64_t sc_msi_addr32; + uint64_t sc_msi_addr64; + uint32_t sc_msi_count; + uint32_t sc_msi_first; + uint32_t sc_msi_data_mask; + uint32_t sc_msix_data_width; + uint32_t sc_msiq_count; + uint32_t sc_msiq_size; + uint32_t sc_msiq_first; + uint32_t sc_msiq_ino_first; + + phandle_t sc_node; + + u_int sc_mode; +#define FIRE_MODE_FIRE 0 +#define FIRE_MODE_OBERON 1 + + u_int sc_flags; +#define FIRE_MSIX (1 << 0) + + uint32_t sc_ign; + + uint32_t sc_stats_ilu_err; + uint32_t sc_stats_jbc_ce_async; + uint32_t sc_stats_jbc_unsol_int; + uint32_t sc_stats_jbc_unsol_rd; + uint32_t sc_stats_mmu_err; + uint32_t sc_stats_tlu_ce; + uint32_t sc_stats_tlu_oe_non_fatal; + uint32_t sc_stats_tlu_oe_rx_err; + uint32_t sc_stats_tlu_oe_tx_err; + uint32_t sc_stats_ubc_dmardue; + + uint8_t sc_pci_secbus; + uint8_t sc_pci_subbus; + + struct ofw_bus_iinfo sc_pci_iinfo; +}; + +#endif /* !_SPARC64_PCI_FIREVAR_H_ */