diff --git a/sys/conf/NOTES b/sys/conf/NOTES index af1556fd0147..faa8d78ce01d 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -1673,12 +1673,16 @@ options MSIZE=512 # mbuf size in bytes # The `en' device provides support for Efficient Networks (ENI) # ENI-155 PCI midway cards, and the Adaptec 155Mbps PCI ATM cards (ANA-59x0). # +# The `hatm' device provides support for Fore/Marconi HE155 and HE622 +# ATM PCI cards. +# # atm device provides generic atm functions and is required for # atm devices. # NATM enables the netnatm protocol family that can be used to # bypass TCP/IP. # # utopia provides the access to the ATM PHY chips and is required for en +# and hatm # # the current driver supports only PVC operations (no atm-arp, no multicast). # for more details, please read the original documents at @@ -1686,6 +1690,7 @@ options MSIZE=512 # mbuf size in bytes # device atm device en +device hatm #Fore/Marconi HE155/622 device utopia #ATM PHY driver options NATM #native ATM diff --git a/sys/conf/files b/sys/conf/files index ad16fbc6ef22..671d0acb89ec 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -408,6 +408,11 @@ dev/fxp/if_fxp.c optional fxp dev/gem/if_gem.c optional gem dev/gem/if_gem_pci.c optional gem pci dev/gx/if_gx.c optional gx +dev/hatm/if_hatm.c optional hatm pci +dev/hatm/if_hatm_intr.c optional hatm pci +dev/hatm/if_hatm_ioctl.c optional hatm pci +dev/hatm/if_hatm_rx.c optional hatm pci +dev/hatm/if_hatm_tx.c optional hatm pci dev/hea/eni.c optional hea nowerror dev/hea/eni_buffer.c optional hea nowerror dev/hea/eni_globals.c optional hea diff --git a/sys/dev/hatm/if_hatm.c b/sys/dev/hatm/if_hatm.c new file mode 100644 index 000000000000..da3b40ea6ec7 --- /dev/null +++ b/sys/dev/hatm/if_hatm.c @@ -0,0 +1,2382 @@ +/* + * Copyright (c) 2001-2003 + * Fraunhofer Institute for Open Communication Systems (FhG Fokus). + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Author: Hartmut Brandt + * + * $FreeBSD$ + * + * ForeHE driver. + * + * This file contains the module and driver infrastructure stuff as well + * as a couple of utility functions and the entire initialisation. + */ +#include "opt_inet.h" +#include "opt_natm.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#ifdef ENABLE_BPF +#include +#endif +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static const struct { + uint16_t vid; + uint16_t did; + const char *name; +} hatm_devs[] = { + { 0x1127, 0x400, + "FORE HE" }, + { 0, 0, NULL } +}; + +SYSCTL_DECL(_hw_atm); + +MODULE_DEPEND(hatm, utopia, 1, 1, 1); +MODULE_DEPEND(hatm, pci, 1, 1, 1); +MODULE_DEPEND(hatm, atm, 1, 1, 1); + +#define EEPROM_DELAY 400 /* microseconds */ + +/* Read from EEPROM 0000 0011b */ +static const uint32_t readtab[] = { + HE_REGM_HOST_PROM_SEL | HE_REGM_HOST_PROM_CLOCK, + 0, + HE_REGM_HOST_PROM_CLOCK, + 0, /* 0 */ + HE_REGM_HOST_PROM_CLOCK, + 0, /* 0 */ + HE_REGM_HOST_PROM_CLOCK, + 0, /* 0 */ + HE_REGM_HOST_PROM_CLOCK, + 0, /* 0 */ + HE_REGM_HOST_PROM_CLOCK, + 0, /* 0 */ + HE_REGM_HOST_PROM_CLOCK, + HE_REGM_HOST_PROM_DATA_IN, /* 0 */ + HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN, + HE_REGM_HOST_PROM_DATA_IN, /* 1 */ + HE_REGM_HOST_PROM_CLOCK | HE_REGM_HOST_PROM_DATA_IN, + HE_REGM_HOST_PROM_DATA_IN, /* 1 */ +}; +static const uint32_t clocktab[] = { + 0, HE_REGM_HOST_PROM_CLOCK, + 0, HE_REGM_HOST_PROM_CLOCK, + 0, HE_REGM_HOST_PROM_CLOCK, + 0, HE_REGM_HOST_PROM_CLOCK, + 0, HE_REGM_HOST_PROM_CLOCK, + 0, HE_REGM_HOST_PROM_CLOCK, + 0, HE_REGM_HOST_PROM_CLOCK, + 0, HE_REGM_HOST_PROM_CLOCK, + 0 +}; + +/* + * Convert cell rate to ATM Forum format + */ +u_int +hatm_cps2atmf(uint32_t pcr) +{ + u_int e; + + if (pcr == 0) + return (0); + pcr <<= 9; + e = 0; + while (pcr > (1024 - 1)) { + e++; + pcr >>= 1; + } + return ((1 << 14) | (e << 9) | (pcr & 0x1ff)); +} +u_int +hatm_atmf2cps(uint32_t fcr) +{ + fcr &= 0x7fff; + + return ((1 << ((fcr >> 9) & 0x1f)) * (512 + (fcr & 0x1ff)) / 512 + * (fcr >> 14)); +} + +/************************************************************ + * + * Initialisation + */ +/* + * Probe for a HE controller + */ +static int +hatm_probe(device_t dev) +{ + int i; + + for (i = 0; hatm_devs[i].name; i++) + if (pci_get_vendor(dev) == hatm_devs[i].vid && + pci_get_device(dev) == hatm_devs[i].did) { + device_set_desc(dev, hatm_devs[i].name); + return (0); + } + return (ENXIO); +} + +/* + * Allocate and map DMA-able memory. We support only contiguous mappings. + */ +static void +dmaload_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + if (error) + return; + KASSERT(nsegs == 1, ("too many segments for DMA: %d", nsegs)); + KASSERT(segs[0].ds_addr <= 0xffffffffUL, + ("phys addr too large %lx", (u_long)segs[0].ds_addr)); + + *(bus_addr_t *)arg = segs[0].ds_addr; +} +static int +hatm_alloc_dmamem(struct hatm_softc *sc, const char *what, struct dmamem *mem) +{ + int error; + + mem->base = NULL; + + /* + * Alignement does not work in the bus_dmamem_alloc function below + * on FreeBSD. malloc seems to align objects at least to the object + * size so increase the size to the alignment if the size is lesser + * than the alignemnt. + * XXX on sparc64 this is (probably) not needed. + */ + if (mem->size < mem->align) + mem->size = mem->align; + + error = bus_dma_tag_create(sc->parent_tag, mem->align, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, + NULL, NULL, mem->size, 1, + BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, &mem->tag); + if (error) { + if_printf(&sc->ifatm.ifnet, "DMA tag create (%s)\n", what); + return (error); + } + + error = bus_dmamem_alloc(mem->tag, &mem->base, 0, &mem->map); + if (error) { + if_printf(&sc->ifatm.ifnet, "DMA mem alloc (%s): %d\n", + what, error); + bus_dma_tag_destroy(mem->tag); + mem->base = NULL; + return (error); + } + + error = bus_dmamap_load(mem->tag, mem->map, mem->base, mem->size, + dmaload_helper, &mem->paddr, 0); + if (error) { + if_printf(&sc->ifatm.ifnet, "DMA map load (%s): %d\n", + what, error); + bus_dmamem_free(mem->tag, mem->base, mem->map); + bus_dma_tag_destroy(mem->tag); + mem->base = NULL; + return (error); + } + + DBG(sc, DMA, ("%s S/A/V/P 0x%x 0x%x %p 0x%lx", what, mem->size, + mem->align, mem->base, (u_long)mem->paddr)); + + return (0); +} + +/* + * Destroy all the resources of an DMA-able memory region. + */ +static void +hatm_destroy_dmamem(struct dmamem *mem) +{ + if (mem->base != NULL) { + bus_dmamap_unload(mem->tag, mem->map); + bus_dmamem_free(mem->tag, mem->base, mem->map); + (void)bus_dma_tag_destroy(mem->tag); + mem->base = NULL; + } +} + +/* + * Initialize/destroy DMA maps for the large pool 0 + */ +static void +hatm_destroy_rmaps(struct hatm_softc *sc) +{ + u_int b; + + DBG(sc, ATTACH, ("destroying rmaps and lbuf pointers...")); + if (sc->rmaps != NULL) { + for (b = 0; b < sc->lbufs_size; b++) + bus_dmamap_destroy(sc->mbuf_tag, sc->rmaps[b]); + free(sc->rmaps, M_DEVBUF); + } + if (sc->lbufs != NULL) + free(sc->lbufs, M_DEVBUF); +} + +static void +hatm_init_rmaps(struct hatm_softc *sc) +{ + u_int b; + int err; + + DBG(sc, ATTACH, ("allocating rmaps and lbuf pointers...")); + sc->lbufs = malloc(sizeof(sc->lbufs[0]) * sc->lbufs_size, + M_DEVBUF, M_ZERO | M_WAITOK); + + /* allocate and create the DMA maps for the large pool */ + sc->rmaps = malloc(sizeof(sc->rmaps[0]) * sc->lbufs_size, + M_DEVBUF, M_WAITOK); + for (b = 0; b < sc->lbufs_size; b++) { + err = bus_dmamap_create(sc->mbuf_tag, 0, &sc->rmaps[b]); + if (err != 0) + panic("bus_dmamap_create: %d\n", err); + } +} + +/* + * Initialize and destroy small mbuf page pointers and pages + */ +static void +hatm_destroy_smbufs(struct hatm_softc *sc) +{ + u_int i, b; + struct mbuf_page *pg; + + if (sc->mbuf_pages != NULL) { + for (i = 0; i < sc->mbuf_npages; i++) { + pg = sc->mbuf_pages[i]; + for (b = 0; b < pg->hdr.nchunks; b++) { + if (MBUF_TST_BIT(pg->hdr.card, b)) + if_printf(&sc->ifatm.ifnet, + "%s -- mbuf page=%u card buf %u\n", + __func__, i, b); + if (MBUF_TST_BIT(pg->hdr.used, b)) + if_printf(&sc->ifatm.ifnet, + "%s -- mbuf page=%u used buf %u\n", + __func__, i, b); + } + bus_dmamap_unload(sc->mbuf_tag, pg->hdr.map); + bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); + free(pg, M_DEVBUF); + } + free(sc->mbuf_pages, M_DEVBUF); + } +} + +static void +hatm_init_smbufs(struct hatm_softc *sc) +{ + sc->mbuf_pages = malloc(sizeof(sc->mbuf_pages[0]) * + HE_CONFIG_MAX_MBUF_PAGES, M_DEVBUF, M_WAITOK); + sc->mbuf_npages = 0; +} + +/* + * Initialize/destroy TPDs. This is called from attach/detach. + */ +static void +hatm_destroy_tpds(struct hatm_softc *sc) +{ + struct tpd *t; + + if (sc->tpds.base == NULL) + return; + + DBG(sc, ATTACH, ("releasing TPDs ...")); + if (sc->tpd_nfree != sc->tpd_total) + if_printf(&sc->ifatm.ifnet, "%u tpds still in use from %u\n", + sc->tpd_total - sc->tpd_nfree, sc->tpd_total); + while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) { + SLIST_REMOVE_HEAD(&sc->tpd_free, link); + bus_dmamap_destroy(sc->tx_tag, t->map); + } + hatm_destroy_dmamem(&sc->tpds); + free(sc->tpd_used, M_DEVBUF); + DBG(sc, ATTACH, ("... done")); +} +static int +hatm_init_tpds(struct hatm_softc *sc) +{ + int error; + u_int i; + struct tpd *t; + + DBG(sc, ATTACH, ("allocating %u TPDs and maps ...", sc->tpd_total)); + error = hatm_alloc_dmamem(sc, "TPD memory", &sc->tpds); + if (error != 0) { + DBG(sc, ATTACH, ("... dmamem error=%d", error)); + return (error); + } + + /* put all the TPDs on the free list and allocate DMA maps */ + for (i = 0; i < sc->tpd_total; i++) { + t = TPD_ADDR(sc, i); + t->no = i; + t->mbuf = NULL; + error = bus_dmamap_create(sc->tx_tag, 0, &t->map); + if (error != 0) { + DBG(sc, ATTACH, ("... dmamap error=%d", error)); + while ((t = SLIST_FIRST(&sc->tpd_free)) != NULL) { + SLIST_REMOVE_HEAD(&sc->tpd_free, link); + bus_dmamap_destroy(sc->tx_tag, t->map); + } + hatm_destroy_dmamem(&sc->tpds); + return (error); + } + + SLIST_INSERT_HEAD(&sc->tpd_free, t, link); + } + + /* allocate and zero bitmap */ + sc->tpd_used = malloc(sizeof(uint8_t) * (sc->tpd_total + 7) / 8, + M_DEVBUF, M_ZERO | M_WAITOK); + sc->tpd_nfree = sc->tpd_total; + + DBG(sc, ATTACH, ("... done")); + + return (0); +} + +/* + * Free all the TPDs that where given to the card. + * An mbuf chain may be attached to a TPD - free it also and + * unload its associated DMA map. + */ +static void +hatm_stop_tpds(struct hatm_softc *sc) +{ + u_int i; + struct tpd *t; + + DBG(sc, ATTACH, ("free TPDs ...")); + for (i = 0; i < sc->tpd_total; i++) { + if (TPD_TST_USED(sc, i)) { + t = TPD_ADDR(sc, i); + if (t->mbuf) { + m_freem(t->mbuf); + t->mbuf = NULL; + bus_dmamap_unload(sc->tx_tag, t->map); + } + TPD_CLR_USED(sc, i); + SLIST_INSERT_HEAD(&sc->tpd_free, t, link); + sc->tpd_nfree++; + } + } +} + +/* + * This frees ALL resources of this interface and leaves the structure + * in an indeterminate state. This is called just before detaching or + * on a failed attach. No lock should be held. + */ +static void +hatm_destroy(struct hatm_softc *sc) +{ + bus_teardown_intr(sc->dev, sc->irqres, sc->ih); + + hatm_destroy_rmaps(sc); + hatm_destroy_smbufs(sc); + hatm_destroy_tpds(sc); + + if (sc->vcc_zone != NULL) + uma_zdestroy(sc->vcc_zone); + + /* + * Release all memory allocated to the various queues and + * Status pages. These have there own flag which shows whether + * they are really allocated. + */ + hatm_destroy_dmamem(&sc->irq_0.mem); + hatm_destroy_dmamem(&sc->rbp_s0.mem); + hatm_destroy_dmamem(&sc->rbp_l0.mem); + hatm_destroy_dmamem(&sc->rbp_s1.mem); + hatm_destroy_dmamem(&sc->rbrq_0.mem); + hatm_destroy_dmamem(&sc->rbrq_1.mem); + hatm_destroy_dmamem(&sc->tbrq.mem); + hatm_destroy_dmamem(&sc->tpdrq.mem); + hatm_destroy_dmamem(&sc->hsp_mem); + + if (sc->irqres != NULL) + bus_release_resource(sc->dev, SYS_RES_IRQ, + sc->irqid, sc->irqres); + + if (sc->tx_tag != NULL) + if (bus_dma_tag_destroy(sc->tx_tag)) + if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n"); + + if (sc->mbuf_tag != NULL) + if (bus_dma_tag_destroy(sc->mbuf_tag)) + if_printf(&sc->ifatm.ifnet, "mbuf DMA tag busy\n"); + + if (sc->parent_tag != NULL) + if (bus_dma_tag_destroy(sc->parent_tag)) + if_printf(&sc->ifatm.ifnet, "parent DMA tag busy\n"); + + if (sc->memres != NULL) + bus_release_resource(sc->dev, SYS_RES_MEMORY, + sc->memid, sc->memres); + + sysctl_ctx_free(&sc->sysctl_ctx); + + cv_destroy(&sc->cv_rcclose); + cv_destroy(&sc->vcc_cv); + mtx_destroy(&sc->mbuf0_mtx); + mtx_destroy(&sc->mbuf1_mtx); + mtx_destroy(&sc->mtx); +} + +/* + * 4.4 Card reset + */ +static int +hatm_reset(struct hatm_softc *sc) +{ + u_int v, count; + + WRITE4(sc, HE_REGO_RESET_CNTL, 0x00); + BARRIER_W(sc); + WRITE4(sc, HE_REGO_RESET_CNTL, 0xff); + BARRIER_RW(sc); + count = 0; + while (((v = READ4(sc, HE_REGO_RESET_CNTL)) & HE_REGM_RESET_STATE) == 0) { + BARRIER_R(sc); + if (++count == 100) { + if_printf(&sc->ifatm.ifnet, "reset failed\n"); + return (ENXIO); + } + DELAY(1000); + } + return (0); +} + +/* + * 4.5 Set Bus Width + */ +static void +hatm_init_bus_width(struct hatm_softc *sc) +{ + uint32_t v, v1; + + v = READ4(sc, HE_REGO_HOST_CNTL); + BARRIER_R(sc); + if (v & HE_REGM_HOST_BUS64) { + sc->pci64 = 1; + v1 = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); + v1 |= HE_PCIM_CTL0_64BIT; + pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v1, 4); + + v |= HE_REGM_HOST_DESC_RD64 + | HE_REGM_HOST_DATA_RD64 + | HE_REGM_HOST_DATA_WR64; + WRITE4(sc, HE_REGO_HOST_CNTL, v); + BARRIER_W(sc); + } else { + sc->pci64 = 0; + v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); + v &= ~HE_PCIM_CTL0_64BIT; + pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); + } +} + +/* + * 4.6 Set Host Endianess + */ +static void +hatm_init_endianess(struct hatm_softc *sc) +{ + uint32_t v; + + v = READ4(sc, HE_REGO_LB_SWAP); + BARRIER_R(sc); +#if BYTE_ORDER == BIG_ENDIAN + v |= HE_REGM_LBSWAP_INTR_SWAP | + HE_REGM_LBSWAP_DESC_WR_SWAP | + HE_REGM_LBSWAP_BIG_ENDIAN; + v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP | + HE_REGM_LBSWAP_DESC_RD_SWAP | + HE_REGM_LBSWAP_DATA_RD_SWAP); +#else + v &= ~(HE_REGM_LBSWAP_DATA_WR_SWAP | + HE_REGM_LBSWAP_DESC_RD_SWAP | + HE_REGM_LBSWAP_DATA_RD_SWAP | + HE_REGM_LBSWAP_INTR_SWAP | + HE_REGM_LBSWAP_DESC_WR_SWAP | + HE_REGM_LBSWAP_BIG_ENDIAN); +#endif + + if (sc->he622) + v |= HE_REGM_LBSWAP_XFER_SIZE; + + WRITE4(sc, HE_REGO_LB_SWAP, v); + BARRIER_W(sc); +} + +/* + * 4.7 Read EEPROM + */ +static uint8_t +hatm_read_prom_byte(struct hatm_softc *sc, u_int addr) +{ + uint32_t val, tmp_read, byte_read; + u_int i, j; + int n; + + val = READ4(sc, HE_REGO_HOST_CNTL); + val &= HE_REGM_HOST_PROM_BITS; + BARRIER_R(sc); + + val |= HE_REGM_HOST_PROM_WREN; + WRITE4(sc, HE_REGO_HOST_CNTL, val); + BARRIER_W(sc); + + /* send READ */ + for (i = 0; i < sizeof(readtab) / sizeof(readtab[0]); i++) { + WRITE4(sc, HE_REGO_HOST_CNTL, val | readtab[i]); + BARRIER_W(sc); + DELAY(EEPROM_DELAY); + } + + /* send ADDRESS */ + for (n = 7, j = 0; n >= 0; n--) { + WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] | + (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN)); + BARRIER_W(sc); + DELAY(EEPROM_DELAY); + WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++] | + (((addr >> n) & 1 ) << HE_REGS_HOST_PROM_DATA_IN)); + BARRIER_W(sc); + DELAY(EEPROM_DELAY); + } + + val &= ~HE_REGM_HOST_PROM_WREN; + WRITE4(sc, HE_REGO_HOST_CNTL, val); + BARRIER_W(sc); + + /* read DATA */ + byte_read = 0; + for (n = 7, j = 0; n >= 0; n--) { + WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); + BARRIER_W(sc); + DELAY(EEPROM_DELAY); + tmp_read = READ4(sc, HE_REGO_HOST_CNTL); + byte_read |= (uint8_t)(((tmp_read & HE_REGM_HOST_PROM_DATA_OUT) + >> HE_REGS_HOST_PROM_DATA_OUT) << n); + WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); + BARRIER_W(sc); + DELAY(EEPROM_DELAY); + } + WRITE4(sc, HE_REGO_HOST_CNTL, val | clocktab[j++]); + BARRIER_W(sc); + DELAY(EEPROM_DELAY); + + return (byte_read); +} + +static void +hatm_init_read_eeprom(struct hatm_softc *sc) +{ + u_int n, count; + u_char byte; + uint32_t v; + + for (n = count = 0; count < HE_EEPROM_PROD_ID_LEN; count++) { + byte = hatm_read_prom_byte(sc, HE_EEPROM_PROD_ID + count); + if (n > 0 || byte != ' ') + sc->prod_id[n++] = byte; + } + while (n > 0 && sc->prod_id[n-1] == ' ') + n--; + sc->prod_id[n] = '\0'; + + for (n = count = 0; count < HE_EEPROM_REV_LEN; count++) { + byte = hatm_read_prom_byte(sc, HE_EEPROM_REV + count); + if (n > 0 || byte != ' ') + sc->rev[n++] = byte; + } + while (n > 0 && sc->rev[n-1] == ' ') + n--; + sc->rev[n] = '\0'; + sc->ifatm.mib.hw_version = sc->rev[0]; + + sc->ifatm.mib.serial = hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 0) << 0; + sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 1) << 8; + sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 2) << 16; + sc->ifatm.mib.serial |= hatm_read_prom_byte(sc, HE_EEPROM_M_SN + 3) << 24; + + v = hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 0) << 0; + v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 1) << 8; + v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 2) << 16; + v |= hatm_read_prom_byte(sc, HE_EEPROM_MEDIA + 3) << 24; + + switch (v) { + case HE_MEDIA_UTP155: + sc->ifatm.mib.media = IFM_ATM_UTP_155; + sc->ifatm.mib.pcr = ATM_RATE_155M; + break; + + case HE_MEDIA_MMF155: + sc->ifatm.mib.media = IFM_ATM_MM_155; + sc->ifatm.mib.pcr = ATM_RATE_155M; + break; + + case HE_MEDIA_MMF622: + sc->ifatm.mib.media = IFM_ATM_MM_622; + sc->ifatm.mib.device = ATM_DEVICE_HE622; + sc->ifatm.mib.pcr = ATM_RATE_622M; + sc->he622 = 1; + break; + + case HE_MEDIA_SMF155: + sc->ifatm.mib.media = IFM_ATM_SM_155; + sc->ifatm.mib.pcr = ATM_RATE_155M; + break; + + case HE_MEDIA_SMF622: + sc->ifatm.mib.media = IFM_ATM_SM_622; + sc->ifatm.mib.device = ATM_DEVICE_HE622; + sc->ifatm.mib.pcr = ATM_RATE_622M; + sc->he622 = 1; + break; + } + + sc->ifatm.mib.esi[0] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 0); + sc->ifatm.mib.esi[1] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 1); + sc->ifatm.mib.esi[2] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 2); + sc->ifatm.mib.esi[3] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 3); + sc->ifatm.mib.esi[4] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 4); + sc->ifatm.mib.esi[5] = hatm_read_prom_byte(sc, HE_EEPROM_MAC + 5); +} + +/* + * Clear unused interrupt queue + */ +static void +hatm_clear_irq(struct hatm_softc *sc, u_int group) +{ + WRITE4(sc, HE_REGO_IRQ_BASE(group), 0); + WRITE4(sc, HE_REGO_IRQ_HEAD(group), 0); + WRITE4(sc, HE_REGO_IRQ_CNTL(group), 0); + WRITE4(sc, HE_REGO_IRQ_DATA(group), 0); +} + +/* + * 4.10 Initialize interrupt queues + */ +static void +hatm_init_irq(struct hatm_softc *sc, struct heirq *q, u_int group) +{ + u_int i; + + if (q->size == 0) { + hatm_clear_irq(sc, group); + return; + } + + q->group = group; + q->sc = sc; + q->irq = q->mem.base; + q->head = 0; + q->tailp = q->irq + (q->size - 1); + *q->tailp = 0; + + for (i = 0; i < q->size; i++) + q->irq[i] = HE_REGM_ITYPE_INVALID; + + WRITE4(sc, HE_REGO_IRQ_BASE(group), q->mem.paddr); + WRITE4(sc, HE_REGO_IRQ_HEAD(group), + ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | + (q->thresh << HE_REGS_IRQ_HEAD_THRESH)); + WRITE4(sc, HE_REGO_IRQ_CNTL(group), q->line); + WRITE4(sc, HE_REGO_IRQ_DATA(group), 0); +} + +/* + * 5.1.3 Initialize connection memory + */ +static void +hatm_init_cm(struct hatm_softc *sc) +{ + u_int rsra, mlbm, rabr, numbuffs; + u_int tsra, tabr, mtpd; + u_int n; + + for (n = 0; n < HE_CONFIG_TXMEM; n++) + WRITE_TCM4(sc, n, 0); + for (n = 0; n < HE_CONFIG_RXMEM; n++) + WRITE_RCM4(sc, n, 0); + + numbuffs = sc->r0_numbuffs + sc->r1_numbuffs + sc->tx_numbuffs; + + rsra = 0; + mlbm = ((rsra + sc->ifatm.mib.max_vccs * 8) + 0x7ff) & ~0x7ff; + rabr = ((mlbm + numbuffs * 2) + 0x7ff) & ~0x7ff; + sc->rsrb = ((rabr + 2048) + (2 * sc->ifatm.mib.max_vccs - 1)) & + ~(2 * sc->ifatm.mib.max_vccs - 1); + + tsra = 0; + sc->tsrb = tsra + sc->ifatm.mib.max_vccs * 8; + sc->tsrc = sc->tsrb + sc->ifatm.mib.max_vccs * 4; + sc->tsrd = sc->tsrc + sc->ifatm.mib.max_vccs * 2; + tabr = sc->tsrd + sc->ifatm.mib.max_vccs * 1; + mtpd = ((tabr + 1024) + (16 * sc->ifatm.mib.max_vccs - 1)) & + ~(16 * sc->ifatm.mib.max_vccs - 1); + + DBG(sc, ATTACH, ("rsra=%x mlbm=%x rabr=%x rsrb=%x", + rsra, mlbm, rabr, sc->rsrb)); + DBG(sc, ATTACH, ("tsra=%x tsrb=%x tsrc=%x tsrd=%x tabr=%x mtpd=%x", + tsra, sc->tsrb, sc->tsrc, sc->tsrd, tabr, mtpd)); + + WRITE4(sc, HE_REGO_TSRB_BA, sc->tsrb); + WRITE4(sc, HE_REGO_TSRC_BA, sc->tsrc); + WRITE4(sc, HE_REGO_TSRD_BA, sc->tsrd); + WRITE4(sc, HE_REGO_TMABR_BA, tabr); + WRITE4(sc, HE_REGO_TPD_BA, mtpd); + + WRITE4(sc, HE_REGO_RCMRSRB_BA, sc->rsrb); + WRITE4(sc, HE_REGO_RCMLBM_BA, mlbm); + WRITE4(sc, HE_REGO_RCMABR_BA, rabr); + + BARRIER_W(sc); +} + +/* + * 5.1.4 Initialize Local buffer Pools + */ +static void +hatm_init_rx_buffer_pool(struct hatm_softc *sc, + u_int num, /* bank */ + u_int start, /* start row */ + u_int numbuffs /* number of entries */ +) +{ + u_int row_size; /* bytes per row */ + uint32_t row_addr; /* start address of this row */ + u_int lbuf_size; /* bytes per lbuf */ + u_int lbufs_per_row; /* number of lbufs per memory row */ + uint32_t lbufd_index; /* index of lbuf descriptor */ + uint32_t lbufd_addr; /* address of lbuf descriptor */ + u_int lbuf_row_cnt; /* current lbuf in current row */ + uint32_t lbuf_addr; /* address of current buffer */ + u_int i; + + row_size = sc->bytes_per_row;; + row_addr = start * row_size; + lbuf_size = sc->cells_per_lbuf * 48; + lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf; + + /* descriptor index */ + lbufd_index = num; + + /* 2 words per entry */ + lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2; + + /* write head of queue */ + WRITE4(sc, HE_REGO_RLBF_H(num), lbufd_index); + + lbuf_row_cnt = 0; + for (i = 0; i < numbuffs; i++) { + lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32; + + WRITE_RCM4(sc, lbufd_addr, lbuf_addr); + + lbufd_index += 2; + WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index); + + if (++lbuf_row_cnt == lbufs_per_row) { + lbuf_row_cnt = 0; + row_addr += row_size; + } + + lbufd_addr += 2 * 2; + } + + WRITE4(sc, HE_REGO_RLBF_T(num), lbufd_index - 2); + WRITE4(sc, HE_REGO_RLBF_C(num), numbuffs); + + BARRIER_W(sc); +} + +static void +hatm_init_tx_buffer_pool(struct hatm_softc *sc, + u_int start, /* start row */ + u_int numbuffs /* number of entries */ +) +{ + u_int row_size; /* bytes per row */ + uint32_t row_addr; /* start address of this row */ + u_int lbuf_size; /* bytes per lbuf */ + u_int lbufs_per_row; /* number of lbufs per memory row */ + uint32_t lbufd_index; /* index of lbuf descriptor */ + uint32_t lbufd_addr; /* address of lbuf descriptor */ + u_int lbuf_row_cnt; /* current lbuf in current row */ + uint32_t lbuf_addr; /* address of current buffer */ + u_int i; + + row_size = sc->bytes_per_row;; + row_addr = start * row_size; + lbuf_size = sc->cells_per_lbuf * 48; + lbufs_per_row = sc->cells_per_row / sc->cells_per_lbuf; + + /* descriptor index */ + lbufd_index = sc->r0_numbuffs + sc->r1_numbuffs; + + /* 2 words per entry */ + lbufd_addr = READ4(sc, HE_REGO_RCMLBM_BA) + lbufd_index * 2; + + /* write head of queue */ + WRITE4(sc, HE_REGO_TLBF_H, lbufd_index); + + lbuf_row_cnt = 0; + for (i = 0; i < numbuffs; i++) { + lbuf_addr = (row_addr + lbuf_row_cnt * lbuf_size) / 32; + + WRITE_RCM4(sc, lbufd_addr, lbuf_addr); + lbufd_index++; + WRITE_RCM4(sc, lbufd_addr + 1, lbufd_index); + + if (++lbuf_row_cnt == lbufs_per_row) { + lbuf_row_cnt = 0; + row_addr += row_size; + } + + lbufd_addr += 2; + } + + WRITE4(sc, HE_REGO_TLBF_T, lbufd_index - 1); + BARRIER_W(sc); +} + +/* + * 5.1.5 Initialize Intermediate Receive Queues + */ +static void +hatm_init_imed_queues(struct hatm_softc *sc) +{ + u_int n; + + if (sc->he622) { + for (n = 0; n < 8; n++) { + WRITE4(sc, HE_REGO_INMQ_S(n), 0x10*n+0x000f); + WRITE4(sc, HE_REGO_INMQ_L(n), 0x10*n+0x200f); + } + } else { + for (n = 0; n < 8; n++) { + WRITE4(sc, HE_REGO_INMQ_S(n), n); + WRITE4(sc, HE_REGO_INMQ_L(n), n+0x8); + } + } +} + +/* + * 5.1.7 Init CS block + */ +static void +hatm_init_cs_block(struct hatm_softc *sc) +{ + u_int n, i; + u_int clkfreg, cellrate, decr, tmp; + static const uint32_t erthr[2][5][3] = HE_REGT_CS_ERTHR; + static const uint32_t erctl[2][3] = HE_REGT_CS_ERCTL; + static const uint32_t erstat[2][2] = HE_REGT_CS_ERSTAT; + static const uint32_t rtfwr[2] = HE_REGT_CS_RTFWR; + static const uint32_t rtatr[2] = HE_REGT_CS_RTATR; + static const uint32_t bwalloc[2][6] = HE_REGT_CS_BWALLOC; + static const uint32_t orcf[2][2] = HE_REGT_CS_ORCF; + + /* Clear Rate Controller Start Times and Occupied Flags */ + for (n = 0; n < 32; n++) + WRITE_MBOX4(sc, HE_REGO_CS_STTIM(n), 0); + + clkfreg = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; + cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; + decr = cellrate / 32; + + for (n = 0; n < 16; n++) { + tmp = clkfreg / cellrate; + WRITE_MBOX4(sc, HE_REGO_CS_TGRLD(n), tmp - 1); + cellrate -= decr; + } + + i = (sc->cells_per_lbuf == 2) ? 0 + :(sc->cells_per_lbuf == 4) ? 1 + : 2; + + /* table 5.2 */ + WRITE_MBOX4(sc, HE_REGO_CS_ERTHR0, erthr[sc->he622][0][i]); + WRITE_MBOX4(sc, HE_REGO_CS_ERTHR1, erthr[sc->he622][1][i]); + WRITE_MBOX4(sc, HE_REGO_CS_ERTHR2, erthr[sc->he622][2][i]); + WRITE_MBOX4(sc, HE_REGO_CS_ERTHR3, erthr[sc->he622][3][i]); + WRITE_MBOX4(sc, HE_REGO_CS_ERTHR4, erthr[sc->he622][4][i]); + + WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, erctl[sc->he622][0]); + WRITE_MBOX4(sc, HE_REGO_CS_ERCTL1, erctl[sc->he622][1]); + WRITE_MBOX4(sc, HE_REGO_CS_ERCTL2, erctl[sc->he622][2]); + + WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT0, erstat[sc->he622][0]); + WRITE_MBOX4(sc, HE_REGO_CS_ERSTAT1, erstat[sc->he622][1]); + + WRITE_MBOX4(sc, HE_REGO_CS_RTFWR, rtfwr[sc->he622]); + WRITE_MBOX4(sc, HE_REGO_CS_RTATR, rtatr[sc->he622]); + + WRITE_MBOX4(sc, HE_REGO_CS_TFBSET, bwalloc[sc->he622][0]); + WRITE_MBOX4(sc, HE_REGO_CS_WCRMAX, bwalloc[sc->he622][1]); + WRITE_MBOX4(sc, HE_REGO_CS_WCRMIN, bwalloc[sc->he622][2]); + WRITE_MBOX4(sc, HE_REGO_CS_WCRINC, bwalloc[sc->he622][3]); + WRITE_MBOX4(sc, HE_REGO_CS_WCRDEC, bwalloc[sc->he622][4]); + WRITE_MBOX4(sc, HE_REGO_CS_WCRCEIL, bwalloc[sc->he622][5]); + + WRITE_MBOX4(sc, HE_REGO_CS_OTPPER, orcf[sc->he622][0]); + WRITE_MBOX4(sc, HE_REGO_CS_OTWPER, orcf[sc->he622][1]); + + WRITE_MBOX4(sc, HE_REGO_CS_OTTLIM, 8); + + for (n = 0; n < 8; n++) + WRITE_MBOX4(sc, HE_REGO_CS_HGRRT(n), 0); +} + +/* + * 5.1.8 CS Block Connection Memory Initialisation + */ +static void +hatm_init_cs_block_cm(struct hatm_softc *sc) +{ + u_int n, i; + u_int expt, mant, etrm, wcr, ttnrm, tnrm; + uint32_t rate; + uint32_t clkfreq, cellrate, decr; + uint32_t *rg, rtg, val = 0; + uint64_t drate; + u_int buf, buf_limit; + uint32_t base = READ4(sc, HE_REGO_RCMABR_BA); + + for (n = 0; n < HE_REGL_CM_GQTBL; n++) + WRITE_RCM4(sc, base + HE_REGO_CM_GQTBL + n, 0); + for (n = 0; n < HE_REGL_CM_RGTBL; n++) + WRITE_RCM4(sc, base + HE_REGO_CM_RGTBL + n, 0); + + tnrm = 0; + for (n = 0; n < HE_REGL_CM_TNRMTBL * 4; n++) { + expt = (n >> 5) & 0x1f; + mant = ((n & 0x18) << 4) | 0x7f; + wcr = (1 << expt) * (mant + 512) / 512; + etrm = n & 0x7; + ttnrm = wcr / 10 / (1 << etrm); + if (ttnrm > 255) + ttnrm = 255; + else if(ttnrm < 2) + ttnrm = 2; + tnrm = (tnrm << 8) | (ttnrm & 0xff); + if (n % 4 == 0) + WRITE_RCM4(sc, base + HE_REGO_CM_TNRMTBL + (n/4), tnrm); + } + + clkfreq = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; + buf_limit = 4; + + cellrate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; + decr = cellrate / 32; + + /* compute GRID top row in 1000 * cps */ + for (n = 0; n < 16; n++) { + u_int interval = clkfreq / cellrate; + sc->rate_grid[0][n] = (u_int64_t)clkfreq * 1000 / interval; + cellrate -= decr; + } + + /* compute the other rows according to 2.4 */ + for (i = 1; i < 16; i++) + for (n = 0; n < 16; n++) + sc->rate_grid[i][n] = sc->rate_grid[i-1][n] / + ((i < 14) ? 2 : 4); + + /* first entry is line rate */ + n = hatm_cps2atmf(sc->he622 ? ATM_RATE_622M : ATM_RATE_155M); + expt = (n >> 9) & 0x1f; + mant = n & 0x1f0; + sc->rate_grid[0][0] = (u_int64_t)(1<he622 ? ATM_RATE_622M : ATM_RATE_155M; + rg = &sc->rate_grid[15][15]; + + for (rate = 0; rate < 2 * HE_REGL_CM_RTGTBL; rate++) { + /* unpack the ATMF rate */ + expt = rate >> 5; + mant = (rate & 0x1f) << 4; + + /* get the cell rate - minimum is 10 per second */ + drate = (uint64_t)(1 << expt) * 1000 * (mant + 512) / 512; + if (drate < 10 * 1000) + drate = 10 * 1000; + + /* now look up the grid index */ + while (drate >= *rg && rg-- > &sc->rate_grid[0][0]) + ; + rg++; + rtg = rg - &sc->rate_grid[0][0]; + + /* now compute the buffer limit */ + buf = drate * sc->tx_numbuffs / (cellrate * 2) / 1000; + if (buf == 0) + buf = 1; + else if (buf > buf_limit) + buf = buf_limit; + + /* make value */ + val = (val << 16) | (rtg << 8) | buf; + + /* write */ + if (rate % 2 == 1) + WRITE_RCM4(sc, base + HE_REGO_CM_RTGTBL + rate/2, val); + } +} + +/* + * Clear an unused receive group buffer pool + */ +static void +hatm_clear_rpool(struct hatm_softc *sc, u_int group, u_int large) +{ + WRITE4(sc, HE_REGO_RBP_S(large, group), 0); + WRITE4(sc, HE_REGO_RBP_T(large, group), 0); + WRITE4(sc, HE_REGO_RBP_QI(large, group), 1); + WRITE4(sc, HE_REGO_RBP_BL(large, group), 0); +} + +/* + * Initialize a receive group buffer pool + */ +static void +hatm_init_rpool(struct hatm_softc *sc, struct herbp *q, u_int group, + u_int large) +{ + if (q->size == 0) { + hatm_clear_rpool(sc, group, large); + return; + } + + bzero(q->mem.base, q->mem.size); + q->rbp = q->mem.base; + q->head = q->tail = 0; + + DBG(sc, ATTACH, ("RBP%u%c=0x%lx", group, "SL"[large], + (u_long)q->mem.paddr)); + + WRITE4(sc, HE_REGO_RBP_S(large, group), q->mem.paddr); + WRITE4(sc, HE_REGO_RBP_T(large, group), 0); + WRITE4(sc, HE_REGO_RBP_QI(large, group), + ((q->size - 1) << HE_REGS_RBP_SIZE) | + HE_REGM_RBP_INTR_ENB | + (q->thresh << HE_REGS_RBP_THRESH)); + WRITE4(sc, HE_REGO_RBP_BL(large, group), (q->bsize >> 2) & ~1); +} + +/* + * Clear an unused receive buffer return queue + */ +static void +hatm_clear_rbrq(struct hatm_softc *sc, u_int group) +{ + WRITE4(sc, HE_REGO_RBRQ_ST(group), 0); + WRITE4(sc, HE_REGO_RBRQ_H(group), 0); + WRITE4(sc, HE_REGO_RBRQ_Q(group), (1 << HE_REGS_RBRQ_THRESH)); + WRITE4(sc, HE_REGO_RBRQ_I(group), 0); +} + +/* + * Initialize receive buffer return queue + */ +static void +hatm_init_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) +{ + if (rq->size == 0) { + hatm_clear_rbrq(sc, group); + return; + } + + rq->rbrq = rq->mem.base; + rq->head = 0; + + DBG(sc, ATTACH, ("RBRQ%u=0x%lx", group, (u_long)rq->mem.paddr)); + + WRITE4(sc, HE_REGO_RBRQ_ST(group), rq->mem.paddr); + WRITE4(sc, HE_REGO_RBRQ_H(group), 0); + WRITE4(sc, HE_REGO_RBRQ_Q(group), + (rq->thresh << HE_REGS_RBRQ_THRESH) | + ((rq->size - 1) << HE_REGS_RBRQ_SIZE)); + WRITE4(sc, HE_REGO_RBRQ_I(group), + (rq->tout << HE_REGS_RBRQ_TIME) | + (rq->pcnt << HE_REGS_RBRQ_COUNT)); +} + +/* + * Clear an unused transmit buffer return queue N + */ +static void +hatm_clear_tbrq(struct hatm_softc *sc, u_int group) +{ + WRITE4(sc, HE_REGO_TBRQ_B_T(group), 0); + WRITE4(sc, HE_REGO_TBRQ_H(group), 0); + WRITE4(sc, HE_REGO_TBRQ_S(group), 0); + WRITE4(sc, HE_REGO_TBRQ_THRESH(group), 1); +} + +/* + * Initialize transmit buffer return queue N + */ +static void +hatm_init_tbrq(struct hatm_softc *sc, struct hetbrq *tq, u_int group) +{ + if (tq->size == 0) { + hatm_clear_tbrq(sc, group); + return; + } + + tq->tbrq = tq->mem.base; + tq->head = 0; + + DBG(sc, ATTACH, ("TBRQ%u=0x%lx", group, (u_long)tq->mem.paddr)); + + WRITE4(sc, HE_REGO_TBRQ_B_T(group), tq->mem.paddr); + WRITE4(sc, HE_REGO_TBRQ_H(group), 0); + WRITE4(sc, HE_REGO_TBRQ_S(group), tq->size - 1); + WRITE4(sc, HE_REGO_TBRQ_THRESH(group), tq->thresh); +} + +/* + * Initialize TPDRQ + */ +static void +hatm_init_tpdrq(struct hatm_softc *sc) +{ + struct hetpdrq *tq; + + tq = &sc->tpdrq; + tq->tpdrq = tq->mem.base; + tq->tail = tq->head = 0; + + DBG(sc, ATTACH, ("TPDRQ=0x%lx", (u_long)tq->mem.paddr)); + + WRITE4(sc, HE_REGO_TPDRQ_H, tq->mem.paddr); + WRITE4(sc, HE_REGO_TPDRQ_T, 0); + WRITE4(sc, HE_REGO_TPDRQ_S, tq->size - 1); +} + +/* + * Function can be called by the infrastructure to start the card. + */ +static void +hatm_init(void *p) +{ + struct hatm_softc *sc = p; + + mtx_lock(&sc->mtx); + hatm_stop(sc); + hatm_initialize(sc); + mtx_unlock(&sc->mtx); +} + +enum { + CTL_STATS, + CTL_ISTATS, +}; + +/* + * Sysctl handler + */ +static int +hatm_sysctl(SYSCTL_HANDLER_ARGS) +{ + struct hatm_softc *sc = arg1; + uint32_t *ret; + int error; + size_t len; + + switch (arg2) { + + case CTL_STATS: + len = sizeof(uint32_t) * 4; + break; + + case CTL_ISTATS: + len = sizeof(sc->istats); + break; + + default: + panic("bad control code"); + } + + ret = malloc(len, M_TEMP, M_WAITOK); + mtx_lock(&sc->mtx); + + switch (arg2) { + + case CTL_STATS: + ret[0] = READ4(sc, HE_REGO_MCC); + ret[1] = READ4(sc, HE_REGO_OEC); + ret[2] = READ4(sc, HE_REGO_DCC); + ret[3] = READ4(sc, HE_REGO_CEC); + break; + + case CTL_ISTATS: + bcopy(&sc->istats, ret, sizeof(sc->istats)); + break; + } + mtx_unlock(&sc->mtx); + + error = SYSCTL_OUT(req, ret, len); + free(ret, M_TEMP); + + return (error); +} + +static int +kenv_getuint(struct hatm_softc *sc, const char *var, + u_int *ptr, u_int def, int rw) +{ + char full[IFNAMSIZ + 3 + 20]; + char *val, *end; + u_int u; + + *ptr = def; + + if (SYSCTL_ADD_UINT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + OID_AUTO, var, rw ? CTLFLAG_RW : CTLFLAG_RD, ptr, 0, "") == NULL) + return (ENOMEM); + + snprintf(full, sizeof(full), "hw.%s.%s", + device_get_nameunit(sc->dev), var); + + if ((val = getenv(full)) == NULL) + return (0); + u = strtoul(val, &end, 0); + if (end == val || *end != '\0') { + freeenv(val); + return (EINVAL); + } + if (bootverbose) + if_printf(&sc->ifatm.ifnet, "%s=%u\n", full, u); + *ptr = u; + return (0); +} + +/* + * Set configurable parameters. Many of these are configurable via + * kenv. + */ +static int +hatm_configure(struct hatm_softc *sc) +{ + /* Receive buffer pool 0 small */ + kenv_getuint(sc, "rbps0.size", &sc->rbp_s0.size, + HE_CONFIG_RBPS0_SIZE, 0); + kenv_getuint(sc, "rbps0.thresh", &sc->rbp_s0.thresh, + HE_CONFIG_RBPS0_THRESH, 0); + sc->rbp_s0.bsize = MBUF0_SIZE; + + /* Receive buffer pool 0 large */ + kenv_getuint(sc, "rbpl0.size", &sc->rbp_l0.size, + HE_CONFIG_RBPL0_SIZE, 0); + kenv_getuint(sc, "rbpl0.thresh", &sc->rbp_l0.thresh, + HE_CONFIG_RBPL0_THRESH, 0); + sc->rbp_l0.bsize = MCLBYTES - MBUFL_OFFSET; + + /* Receive buffer return queue 0 */ + kenv_getuint(sc, "rbrq0.size", &sc->rbrq_0.size, + HE_CONFIG_RBRQ0_SIZE, 0); + kenv_getuint(sc, "rbrq0.thresh", &sc->rbrq_0.thresh, + HE_CONFIG_RBRQ0_THRESH, 0); + kenv_getuint(sc, "rbrq0.tout", &sc->rbrq_0.tout, + HE_CONFIG_RBRQ0_TOUT, 0); + kenv_getuint(sc, "rbrq0.pcnt", &sc->rbrq_0.pcnt, + HE_CONFIG_RBRQ0_PCNT, 0); + + /* Receive buffer pool 1 small */ + kenv_getuint(sc, "rbps1.size", &sc->rbp_s1.size, + HE_CONFIG_RBPS1_SIZE, 0); + kenv_getuint(sc, "rbps1.thresh", &sc->rbp_s1.thresh, + HE_CONFIG_RBPS1_THRESH, 0); + sc->rbp_s1.bsize = MBUF1_SIZE; + + /* Receive buffer return queue 1 */ + kenv_getuint(sc, "rbrq1.size", &sc->rbrq_1.size, + HE_CONFIG_RBRQ1_SIZE, 0); + kenv_getuint(sc, "rbrq1.thresh", &sc->rbrq_1.thresh, + HE_CONFIG_RBRQ1_THRESH, 0); + kenv_getuint(sc, "rbrq1.tout", &sc->rbrq_1.tout, + HE_CONFIG_RBRQ1_TOUT, 0); + kenv_getuint(sc, "rbrq1.pcnt", &sc->rbrq_1.pcnt, + HE_CONFIG_RBRQ1_PCNT, 0); + + /* Interrupt queue 0 */ + kenv_getuint(sc, "irq0.size", &sc->irq_0.size, + HE_CONFIG_IRQ0_SIZE, 0); + kenv_getuint(sc, "irq0.thresh", &sc->irq_0.thresh, + HE_CONFIG_IRQ0_THRESH, 0); + sc->irq_0.line = HE_CONFIG_IRQ0_LINE; + + /* Transmit buffer return queue 0 */ + kenv_getuint(sc, "tbrq0.size", &sc->tbrq.size, + HE_CONFIG_TBRQ_SIZE, 0); + kenv_getuint(sc, "tbrq0.thresh", &sc->tbrq.thresh, + HE_CONFIG_TBRQ_THRESH, 0); + + /* Transmit buffer ready queue */ + kenv_getuint(sc, "tpdrq.size", &sc->tpdrq.size, + HE_CONFIG_TPDRQ_SIZE, 0); + /* Max TPDs per VCC */ + kenv_getuint(sc, "tpdmax", &sc->max_tpd, + HE_CONFIG_TPD_MAXCC, 0); + + return (0); +} + +#ifdef HATM_DEBUG + +/* + * Get TSRs from connection memory + */ +static int +hatm_sysctl_tsr(SYSCTL_HANDLER_ARGS) +{ + struct hatm_softc *sc = arg1; + int error, i, j; + uint32_t *val; + + val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 15, M_TEMP, M_WAITOK); + + mtx_lock(&sc->mtx); + for (i = 0; i < HE_MAX_VCCS; i++) + for (j = 0; j <= 14; j++) + val[15 * i + j] = READ_TSR(sc, i, j); + mtx_unlock(&sc->mtx); + + error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 15); + free(val, M_TEMP); + if (error != 0 || req->newptr == NULL) + return (error); + + return (EPERM); +} + +/* + * Get TPDs from connection memory + */ +static int +hatm_sysctl_tpd(SYSCTL_HANDLER_ARGS) +{ + struct hatm_softc *sc = arg1; + int error, i, j; + uint32_t *val; + + val = malloc(sizeof(uint32_t) * HE_MAX_VCCS * 16, M_TEMP, M_WAITOK); + + mtx_lock(&sc->mtx); + for (i = 0; i < HE_MAX_VCCS; i++) + for (j = 0; j < 16; j++) + val[16 * i + j] = READ_TCM4(sc, 16 * i + j); + mtx_unlock(&sc->mtx); + + error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_MAX_VCCS * 16); + free(val, M_TEMP); + if (error != 0 || req->newptr == NULL) + return (error); + + return (EPERM); +} + +/* + * Get mbox registers + */ +static int +hatm_sysctl_mbox(SYSCTL_HANDLER_ARGS) +{ + struct hatm_softc *sc = arg1; + int error, i; + uint32_t *val; + + val = malloc(sizeof(uint32_t) * HE_REGO_CS_END, M_TEMP, M_WAITOK); + + mtx_lock(&sc->mtx); + for (i = 0; i < HE_REGO_CS_END; i++) + val[i] = READ_MBOX4(sc, i); + mtx_unlock(&sc->mtx); + + error = SYSCTL_OUT(req, val, sizeof(uint32_t) * HE_REGO_CS_END); + free(val, M_TEMP); + if (error != 0 || req->newptr == NULL) + return (error); + + return (EPERM); +} + +/* + * Get connection memory + */ +static int +hatm_sysctl_cm(SYSCTL_HANDLER_ARGS) +{ + struct hatm_softc *sc = arg1; + int error, i; + uint32_t *val; + + val = malloc(sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1), M_TEMP, M_WAITOK); + + mtx_lock(&sc->mtx); + val[0] = READ4(sc, HE_REGO_RCMABR_BA); + for (i = 0; i < HE_CONFIG_RXMEM; i++) + val[i + 1] = READ_RCM4(sc, i); + mtx_unlock(&sc->mtx); + + error = SYSCTL_OUT(req, val, sizeof(uint32_t) * (HE_CONFIG_RXMEM + 1)); + free(val, M_TEMP); + if (error != 0 || req->newptr == NULL) + return (error); + + return (EPERM); +} + +/* + * Get local buffer memory + */ +static int +hatm_sysctl_lbmem(SYSCTL_HANDLER_ARGS) +{ + struct hatm_softc *sc = arg1; + int error, i; + uint32_t *val; + u_int bytes = (1 << 21); + + val = malloc(bytes, M_TEMP, M_WAITOK); + + mtx_lock(&sc->mtx); + for (i = 0; i < bytes / 4; i++) + val[i] = READ_LB4(sc, i); + mtx_unlock(&sc->mtx); + + error = SYSCTL_OUT(req, val, bytes); + free(val, M_TEMP); + if (error != 0 || req->newptr == NULL) + return (error); + + return (EPERM); +} + +/* + * Get all card registers + */ +static int +hatm_sysctl_heregs(SYSCTL_HANDLER_ARGS) +{ + struct hatm_softc *sc = arg1; + int error, i; + uint32_t *val; + + val = malloc(HE_REGO_END, M_TEMP, M_WAITOK); + + mtx_lock(&sc->mtx); + for (i = 0; i < HE_REGO_END; i += 4) + val[i / 4] = READ4(sc, i); + mtx_unlock(&sc->mtx); + + error = SYSCTL_OUT(req, val, HE_REGO_END); + free(val, M_TEMP); + if (error != 0 || req->newptr == NULL) + return (error); + + return (EPERM); +} +#endif + +/* + * Suni register access + */ +/* + * read at most n SUNI registers starting at reg into val + */ +static int +hatm_utopia_readregs(struct ifatm *ifatm, u_int reg, uint8_t *val, u_int *n) +{ + u_int i; + struct hatm_softc *sc = (struct hatm_softc *)ifatm; + + if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) + return (EINVAL); + if (reg + *n > (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) + *n = reg - (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4; + + mtx_assert(&sc->mtx, MA_OWNED); + for (i = 0; i < *n; i++) + val[i] = READ4(sc, HE_REGO_SUNI + 4 * (reg + i)); + + return (0); +} + +/* + * change the bits given by mask to them in val in register reg + */ +static int +hatm_utopia_writereg(struct ifatm *ifatm, u_int reg, u_int mask, u_int val) +{ + uint32_t regval; + struct hatm_softc *sc = (struct hatm_softc *)ifatm; + + if (reg >= (HE_REGO_SUNI_END - HE_REGO_SUNI) / 4) + return (EINVAL); + + mtx_assert(&sc->mtx, MA_OWNED); + regval = READ4(sc, HE_REGO_SUNI + 4 * reg); + regval = (regval & ~mask) | (val & mask); + WRITE4(sc, HE_REGO_SUNI + 4 * reg, regval); + + return (0); +} + +static struct utopia_methods hatm_utopia_methods = { + hatm_utopia_readregs, + hatm_utopia_writereg, +}; + +/* + * Detach - if it is running, stop. Destroy. + */ +static int +hatm_detach(device_t dev) +{ + struct hatm_softc *sc = (struct hatm_softc *)device_get_softc(dev); + + mtx_lock(&sc->mtx); + hatm_stop(sc); + if (sc->utopia.state & UTP_ST_ATTACHED) { + utopia_stop(&sc->utopia); + utopia_detach(&sc->utopia); + } + mtx_unlock(&sc->mtx); + + atm_ifdetach(&sc->ifatm.ifnet); + + hatm_destroy(sc); + + return (0); +} + +/* + * Attach to the device. Assume that no locking is needed here. + * All resource we allocate here are freed by calling hatm_destroy. + */ +static int +hatm_attach(device_t dev) +{ + struct hatm_softc *sc; + int unit; + int error; + uint32_t v; + struct ifnet *ifp; + + sc = device_get_softc(dev); + unit = device_get_unit(dev); + + sc->dev = dev; + sc->ifatm.mib.device = ATM_DEVICE_HE155; + sc->ifatm.mib.serial = 0; + sc->ifatm.mib.hw_version = 0; + sc->ifatm.mib.sw_version = 0; + sc->ifatm.mib.vpi_bits = HE_CONFIG_VPI_BITS; + sc->ifatm.mib.vci_bits = HE_CONFIG_VCI_BITS; + sc->ifatm.mib.max_vpcs = 0; + sc->ifatm.mib.max_vccs = HE_MAX_VCCS; + sc->ifatm.mib.media = IFM_ATM_UNKNOWN; + sc->he622 = 0; + sc->ifatm.phy = &sc->utopia; + + SLIST_INIT(&sc->mbuf0_list); + SLIST_INIT(&sc->mbuf1_list); + SLIST_INIT(&sc->tpd_free); + + mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); + mtx_init(&sc->mbuf0_mtx, device_get_nameunit(dev), "HEb0", MTX_DEF); + mtx_init(&sc->mbuf1_mtx, device_get_nameunit(dev), "HEb1", MTX_DEF); + cv_init(&sc->vcc_cv, "HEVCCcv"); + cv_init(&sc->cv_rcclose, "RCClose"); + + sysctl_ctx_init(&sc->sysctl_ctx); + + /* + * 4.2 BIOS Configuration + */ + v = pci_read_config(dev, PCIR_COMMAND, 2); + v |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN; + pci_write_config(dev, PCIR_COMMAND, v, 2); + + /* + * 4.3 PCI Bus Controller-Specific Initialisation + */ + v = pci_read_config(dev, HE_PCIR_GEN_CNTL_0, 4); + v |= HE_PCIM_CTL0_MRL | HE_PCIM_CTL0_MRM | HE_PCIM_CTL0_IGNORE_TIMEOUT; +#if BYTE_ORDER == BIG_ENDIAN && 0 + v |= HE_PCIM_CTL0_BIGENDIAN; +#endif + pci_write_config(dev, HE_PCIR_GEN_CNTL_0, v, 4); + + /* + * Map memory + */ + v = pci_read_config(dev, PCIR_COMMAND, 2); + if (!(v & PCIM_CMD_MEMEN)) { + device_printf(dev, "failed to enable memory\n"); + error = ENXIO; + goto failed; + } + sc->memid = PCIR_MAPS; + sc->memres = bus_alloc_resource(dev, SYS_RES_MEMORY, &sc->memid, + 0, ~0, 1, RF_ACTIVE); + if (sc->memres == NULL) { + device_printf(dev, "could not map memory\n"); + error = ENXIO; + goto failed; + } + sc->memh = rman_get_bushandle(sc->memres); + sc->memt = rman_get_bustag(sc->memres); + + /* + * ALlocate a DMA tag for subsequent allocations + */ + if (bus_dma_tag_create(NULL, 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, + NULL, NULL, + BUS_SPACE_MAXSIZE_32BIT, 1, + BUS_SPACE_MAXSIZE_32BIT, 0, &sc->parent_tag)) { + device_printf(dev, "could not allocate DMA tag\n"); + error = ENOMEM; + goto failed; + } + + if (bus_dma_tag_create(sc->parent_tag, 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, + NULL, NULL, + MBUF_ALLOC_SIZE, 1, + MBUF_ALLOC_SIZE, 0, &sc->mbuf_tag)) { + device_printf(dev, "could not allocate mbuf DMA tag\n"); + error = ENOMEM; + goto failed; + } + + /* + * Allocate a DMA tag for packets to send. Here we have a problem with + * the specification of the maximum number of segments. Theoretically + * this would be the size of the transmit ring - 1 multiplied by 3, + * but this would not work. So make the maximum number of TPDs + * occupied by one packet a configuration parameter. + */ + if (bus_dma_tag_create(NULL, 1, 0, + BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, + HE_MAX_PDU, 3 * HE_CONFIG_MAX_TPD_PER_PACKET, HE_MAX_PDU, 0, + &sc->tx_tag)) { + device_printf(dev, "could not allocate TX tag\n"); + error = ENOMEM; + goto failed; + } + + /* + * Setup the interrupt + */ + sc->irqid = 0; + sc->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid, + 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); + if (sc->irqres == 0) { + device_printf(dev, "could not allocate irq\n"); + error = ENXIO; + goto failed; + } + + ifp = &sc->ifatm.ifnet; + ifp->if_softc = sc; + ifp->if_unit = unit; + ifp->if_name = "hatm"; + + /* + * Make the sysctl tree + */ + error = ENOMEM; + if ((sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, + SYSCTL_STATIC_CHILDREN(_hw_atm), OID_AUTO, + device_get_nameunit(dev), CTLFLAG_RD, 0, "")) == NULL) + goto failed; + + if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + OID_AUTO, "istats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_ISTATS, + hatm_sysctl, "LU", "internal statistics") == NULL) + goto failed; + + if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + OID_AUTO, "stats", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, CTL_STATS, + hatm_sysctl, "LU", "card statistics") == NULL) + goto failed; + +#ifdef HATM_DEBUG + if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + OID_AUTO, "tsr", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, + hatm_sysctl_tsr, "S", "transmission status registers") == NULL) + goto failed; + + if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + OID_AUTO, "tpd", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, + hatm_sysctl_tpd, "S", "transmission packet descriptors") == NULL) + goto failed; + + if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + OID_AUTO, "mbox", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, + hatm_sysctl_mbox, "S", "mbox registers") == NULL) + goto failed; + + if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + OID_AUTO, "cm", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, + hatm_sysctl_cm, "S", "connection memory") == NULL) + goto failed; + + if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + OID_AUTO, "heregs", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, + hatm_sysctl_heregs, "S", "card registers") == NULL) + goto failed; + + if (SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + OID_AUTO, "lbmem", CTLFLAG_RD | CTLTYPE_OPAQUE, sc, 0, + hatm_sysctl_lbmem, "S", "local memory") == NULL) + goto failed; + + kenv_getuint(sc, "debug", &sc->debug, 0, 1); +#endif + + /* + * Configure + */ + if ((error = hatm_configure(sc)) != 0) + goto failed; + + /* + * Compute memory parameters + */ + if (sc->rbp_s0.size != 0) { + sc->rbp_s0.mask = (sc->rbp_s0.size - 1) << 3; + sc->rbp_s0.mem.size = sc->rbp_s0.size * 8; + sc->rbp_s0.mem.align = sc->rbp_s0.mem.size; + } + if (sc->rbp_l0.size != 0) { + sc->rbp_l0.mask = (sc->rbp_l0.size - 1) << 3; + sc->rbp_l0.mem.size = sc->rbp_l0.size * 8; + sc->rbp_l0.mem.align = sc->rbp_l0.mem.size; + } + if (sc->rbp_s1.size != 0) { + sc->rbp_s1.mask = (sc->rbp_s1.size - 1) << 3; + sc->rbp_s1.mem.size = sc->rbp_s1.size * 8; + sc->rbp_s1.mem.align = sc->rbp_s1.mem.size; + } + if (sc->rbrq_0.size != 0) { + sc->rbrq_0.mem.size = sc->rbrq_0.size * 8; + sc->rbrq_0.mem.align = sc->rbrq_0.mem.size; + } + if (sc->rbrq_1.size != 0) { + sc->rbrq_1.mem.size = sc->rbrq_1.size * 8; + sc->rbrq_1.mem.align = sc->rbrq_1.mem.size; + } + + sc->irq_0.mem.size = sc->irq_0.size * sizeof(uint32_t); + sc->irq_0.mem.align = 4 * 1024; + + sc->tbrq.mem.size = sc->tbrq.size * 4; + sc->tbrq.mem.align = 2 * sc->tbrq.mem.size; /* ZZZ */ + + sc->tpdrq.mem.size = sc->tpdrq.size * 8; + sc->tpdrq.mem.align = sc->tpdrq.mem.size; + + sc->hsp_mem.size = sizeof(struct he_hsp); + sc->hsp_mem.align = 1024; + + sc->lbufs_size = sc->rbp_l0.size + sc->rbrq_0.size; + sc->tpd_total = sc->tbrq.size + sc->tpdrq.size; + sc->tpds.align = 64; + sc->tpds.size = sc->tpd_total * HE_TPD_SIZE; + + hatm_init_rmaps(sc); + hatm_init_smbufs(sc); + if ((error = hatm_init_tpds(sc)) != 0) + goto failed; + + /* + * Allocate memory + */ + if ((error = hatm_alloc_dmamem(sc, "IRQ", &sc->irq_0.mem)) != 0 || + (error = hatm_alloc_dmamem(sc, "TBRQ0", &sc->tbrq.mem)) != 0 || + (error = hatm_alloc_dmamem(sc, "TPDRQ", &sc->tpdrq.mem)) != 0 || + (error = hatm_alloc_dmamem(sc, "HSP", &sc->hsp_mem)) != 0) + goto failed; + + if (sc->rbp_s0.mem.size != 0 && + (error = hatm_alloc_dmamem(sc, "RBPS0", &sc->rbp_s0.mem))) + goto failed; + if (sc->rbp_l0.mem.size != 0 && + (error = hatm_alloc_dmamem(sc, "RBPL0", &sc->rbp_l0.mem))) + goto failed; + if (sc->rbp_s1.mem.size != 0 && + (error = hatm_alloc_dmamem(sc, "RBPS1", &sc->rbp_s1.mem))) + goto failed; + + if (sc->rbrq_0.mem.size != 0 && + (error = hatm_alloc_dmamem(sc, "RBRQ0", &sc->rbrq_0.mem))) + goto failed; + if (sc->rbrq_1.mem.size != 0 && + (error = hatm_alloc_dmamem(sc, "RBRQ1", &sc->rbrq_1.mem))) + goto failed; + + if ((sc->vcc_zone = uma_zcreate("HE vccs", sizeof(struct hevcc), + NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0)) == NULL) { + device_printf(dev, "cannot allocate zone for vccs\n"); + goto failed; + } + + /* + * 4.4 Reset the card. + */ + if ((error = hatm_reset(sc)) != 0) + goto failed; + + /* + * Read the prom. + */ + hatm_init_bus_width(sc); + hatm_init_read_eeprom(sc); + hatm_init_endianess(sc); + + /* + * Initialize interface + */ + ifp->if_flags = IFF_SIMPLEX; + ifp->if_ioctl = hatm_ioctl; + ifp->if_start = hatm_start; + ifp->if_watchdog = NULL; + ifp->if_init = hatm_init; + + utopia_attach(&sc->utopia, &sc->ifatm, &sc->media, &sc->mtx, + &sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), + &hatm_utopia_methods); + utopia_init_media(&sc->utopia); + + /* these two SUNI routines need the lock */ + mtx_lock(&sc->mtx); + /* poll while we are not running */ + sc->utopia.flags |= UTP_FL_POLL_CARRIER; + utopia_start(&sc->utopia); + utopia_reset(&sc->utopia); + mtx_unlock(&sc->mtx); + + atm_ifattach(ifp); + +#ifdef ENABLE_BPF + bpfattach(ifp, DLT_ATM_RFC1483, sizeof(struct atmllc)); +#endif + + error = bus_setup_intr(dev, sc->irqres, INTR_TYPE_NET, hatm_intr, + &sc->irq_0, &sc->ih); + if (error != 0) { + device_printf(dev, "could not setup interrupt\n"); + hatm_detach(dev); + return (error); + } + + return (0); + + failed: + hatm_destroy(sc); + return (error); +} + +/* + * Start the interface. Assume a state as from attach(). + */ +void +hatm_initialize(struct hatm_softc *sc) +{ + uint32_t v; + static const u_int layout[2][7] = HE_CONFIG_MEM_LAYOUT; + + if (sc->ifatm.ifnet.if_flags & IFF_RUNNING) + return; + + hatm_init_bus_width(sc); + hatm_init_endianess(sc); + + if_printf(&sc->ifatm.ifnet, "%s, Rev. %s, S/N %u, " + "MAC=%02x:%02x:%02x:%02x:%02x:%02x (%ubit PCI)\n", + sc->prod_id, sc->rev, sc->ifatm.mib.serial, + sc->ifatm.mib.esi[0], sc->ifatm.mib.esi[1], sc->ifatm.mib.esi[2], + sc->ifatm.mib.esi[3], sc->ifatm.mib.esi[4], sc->ifatm.mib.esi[5], + sc->pci64 ? 64 : 32); + + /* + * 4.8 SDRAM Controller Initialisation + * 4.9 Initialize RNUM value + */ + if (sc->he622) + WRITE4(sc, HE_REGO_SDRAM_CNTL, HE_REGM_SDRAM_64BIT); + else + WRITE4(sc, HE_REGO_SDRAM_CNTL, 0); + BARRIER_W(sc); + + v = READ4(sc, HE_REGO_LB_SWAP); + BARRIER_R(sc); + v |= 0xf << HE_REGS_LBSWAP_RNUM; + WRITE4(sc, HE_REGO_LB_SWAP, v); + BARRIER_W(sc); + + hatm_init_irq(sc, &sc->irq_0, 0); + hatm_clear_irq(sc, 1); + hatm_clear_irq(sc, 2); + hatm_clear_irq(sc, 3); + + WRITE4(sc, HE_REGO_GRP_1_0_MAP, 0); + WRITE4(sc, HE_REGO_GRP_3_2_MAP, 0); + WRITE4(sc, HE_REGO_GRP_5_4_MAP, 0); + WRITE4(sc, HE_REGO_GRP_7_6_MAP, 0); + BARRIER_W(sc); + + /* + * 4.11 Enable PCI Bus Controller State Machine + */ + v = READ4(sc, HE_REGO_HOST_CNTL); + BARRIER_R(sc); + v |= HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB | + HE_REGM_HOST_QUICK_RD | HE_REGM_HOST_QUICK_WR; + WRITE4(sc, HE_REGO_HOST_CNTL, v); + BARRIER_W(sc); + + /* + * 5.1.1 Generic configuration state + */ + sc->cells_per_row = layout[sc->he622][0]; + sc->bytes_per_row = layout[sc->he622][1]; + sc->r0_numrows = layout[sc->he622][2]; + sc->tx_numrows = layout[sc->he622][3]; + sc->r1_numrows = layout[sc->he622][4]; + sc->r0_startrow = layout[sc->he622][5]; + sc->tx_startrow = sc->r0_startrow + sc->r0_numrows; + sc->r1_startrow = sc->tx_startrow + sc->tx_numrows; + sc->cells_per_lbuf = layout[sc->he622][6]; + + sc->r0_numbuffs = sc->r0_numrows * (sc->cells_per_row / + sc->cells_per_lbuf); + sc->r1_numbuffs = sc->r1_numrows * (sc->cells_per_row / + sc->cells_per_lbuf); + sc->tx_numbuffs = sc->tx_numrows * (sc->cells_per_row / + sc->cells_per_lbuf); + + if (sc->r0_numbuffs > 2560) + sc->r0_numbuffs = 2560; + if (sc->r1_numbuffs > 2560) + sc->r1_numbuffs = 2560; + if (sc->tx_numbuffs > 5120) + sc->tx_numbuffs = 5120; + + DBG(sc, ATTACH, ("cells_per_row=%u bytes_per_row=%u r0_numrows=%u " + "tx_numrows=%u r1_numrows=%u r0_startrow=%u tx_startrow=%u " + "r1_startrow=%u cells_per_lbuf=%u\nr0_numbuffs=%u r1_numbuffs=%u " + "tx_numbuffs=%u\n", sc->cells_per_row, sc->bytes_per_row, + sc->r0_numrows, sc->tx_numrows, sc->r1_numrows, sc->r0_startrow, + sc->tx_startrow, sc->r1_startrow, sc->cells_per_lbuf, + sc->r0_numbuffs, sc->r1_numbuffs, sc->tx_numbuffs)); + + /* + * 5.1.2 Configure Hardware dependend registers + */ + if (sc->he622) { + WRITE4(sc, HE_REGO_LBARB, + (0x2 << HE_REGS_LBARB_SLICE) | + (0xf << HE_REGS_LBARB_RNUM) | + (0x3 << HE_REGS_LBARB_THPRI) | + (0x3 << HE_REGS_LBARB_RHPRI) | + (0x2 << HE_REGS_LBARB_TLPRI) | + (0x1 << HE_REGS_LBARB_RLPRI) | + (0x28 << HE_REGS_LBARB_BUS_MULT) | + (0x50 << HE_REGS_LBARB_NET_PREF)); + BARRIER_W(sc); + WRITE4(sc, HE_REGO_SDRAMCON, + /* HW bug: don't use banking */ + /* HE_REGM_SDRAMCON_BANK | */ + HE_REGM_SDRAMCON_WIDE | + (0x384 << HE_REGS_SDRAMCON_REF)); + BARRIER_W(sc); + WRITE4(sc, HE_REGO_RCMCONFIG, + (0x1 << HE_REGS_RCMCONFIG_BANK_WAIT) | + (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) | + (0x0 << HE_REGS_RCMCONFIG_TYPE)); + WRITE4(sc, HE_REGO_TCMCONFIG, + (0x2 << HE_REGS_TCMCONFIG_BANK_WAIT) | + (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) | + (0x0 << HE_REGS_TCMCONFIG_TYPE)); + } else { + WRITE4(sc, HE_REGO_LBARB, + (0x2 << HE_REGS_LBARB_SLICE) | + (0xf << HE_REGS_LBARB_RNUM) | + (0x3 << HE_REGS_LBARB_THPRI) | + (0x3 << HE_REGS_LBARB_RHPRI) | + (0x2 << HE_REGS_LBARB_TLPRI) | + (0x1 << HE_REGS_LBARB_RLPRI) | + (0x46 << HE_REGS_LBARB_BUS_MULT) | + (0x8C << HE_REGS_LBARB_NET_PREF)); + BARRIER_W(sc); + WRITE4(sc, HE_REGO_SDRAMCON, + /* HW bug: don't use banking */ + /* HE_REGM_SDRAMCON_BANK | */ + (0x150 << HE_REGS_SDRAMCON_REF)); + BARRIER_W(sc); + WRITE4(sc, HE_REGO_RCMCONFIG, + (0x0 << HE_REGS_RCMCONFIG_BANK_WAIT) | + (0x1 << HE_REGS_RCMCONFIG_RW_WAIT) | + (0x0 << HE_REGS_RCMCONFIG_TYPE)); + WRITE4(sc, HE_REGO_TCMCONFIG, + (0x1 << HE_REGS_TCMCONFIG_BANK_WAIT) | + (0x1 << HE_REGS_TCMCONFIG_RW_WAIT) | + (0x0 << HE_REGS_TCMCONFIG_TYPE)); + } + WRITE4(sc, HE_REGO_LBCONFIG, (sc->cells_per_lbuf * 48)); + + WRITE4(sc, HE_REGO_RLBC_H, 0); + WRITE4(sc, HE_REGO_RLBC_T, 0); + WRITE4(sc, HE_REGO_RLBC_H2, 0); + + WRITE4(sc, HE_REGO_RXTHRSH, 512); + WRITE4(sc, HE_REGO_LITHRSH, 256); + + WRITE4(sc, HE_REGO_RLBF0_C, sc->r0_numbuffs); + WRITE4(sc, HE_REGO_RLBF1_C, sc->r1_numbuffs); + + if (sc->he622) { + WRITE4(sc, HE_REGO_RCCONFIG, + (8 << HE_REGS_RCCONFIG_UTDELAY) | + (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) | + (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC)); + WRITE4(sc, HE_REGO_TXCONFIG, + (32 << HE_REGS_TXCONFIG_THRESH) | + (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) | + (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE)); + } else { + WRITE4(sc, HE_REGO_RCCONFIG, + (0 << HE_REGS_RCCONFIG_UTDELAY) | + HE_REGM_RCCONFIG_UT_MODE | + (sc->ifatm.mib.vpi_bits << HE_REGS_RCCONFIG_VP) | + (sc->ifatm.mib.vci_bits << HE_REGS_RCCONFIG_VC)); + WRITE4(sc, HE_REGO_TXCONFIG, + (32 << HE_REGS_TXCONFIG_THRESH) | + HE_REGM_TXCONFIG_UTMODE | + (sc->ifatm.mib.vci_bits << HE_REGS_TXCONFIG_VCI_MASK) | + (sc->tx_numbuffs << HE_REGS_TXCONFIG_LBFREE)); + } + + WRITE4(sc, HE_REGO_TXAAL5_PROTO, 0); + + WRITE4(sc, HE_REGO_RHCONFIG, + HE_REGM_RHCONFIG_PHYENB | + ((sc->he622 ? 0x41 : 0x31) << HE_REGS_RHCONFIG_PTMR_PRE)); + BARRIER_W(sc); + + hatm_init_cm(sc); + + hatm_init_rx_buffer_pool(sc, 0, sc->r0_startrow, sc->r0_numbuffs); + hatm_init_rx_buffer_pool(sc, 1, sc->r1_startrow, sc->r1_numbuffs); + hatm_init_tx_buffer_pool(sc, sc->tx_startrow, sc->tx_numbuffs); + + hatm_init_imed_queues(sc); + + /* + * 5.1.6 Application tunable Parameters + */ + WRITE4(sc, HE_REGO_MCC, 0); + WRITE4(sc, HE_REGO_OEC, 0); + WRITE4(sc, HE_REGO_DCC, 0); + WRITE4(sc, HE_REGO_CEC, 0); + + hatm_init_cs_block(sc); + hatm_init_cs_block_cm(sc); + + hatm_init_rpool(sc, &sc->rbp_s0, 0, 0); + hatm_init_rpool(sc, &sc->rbp_l0, 0, 1); + hatm_init_rpool(sc, &sc->rbp_s1, 1, 0); + hatm_clear_rpool(sc, 1, 1); + hatm_clear_rpool(sc, 2, 0); + hatm_clear_rpool(sc, 2, 1); + hatm_clear_rpool(sc, 3, 0); + hatm_clear_rpool(sc, 3, 1); + hatm_clear_rpool(sc, 4, 0); + hatm_clear_rpool(sc, 4, 1); + hatm_clear_rpool(sc, 5, 0); + hatm_clear_rpool(sc, 5, 1); + hatm_clear_rpool(sc, 6, 0); + hatm_clear_rpool(sc, 6, 1); + hatm_clear_rpool(sc, 7, 0); + hatm_clear_rpool(sc, 7, 1); + hatm_init_rbrq(sc, &sc->rbrq_0, 0); + hatm_init_rbrq(sc, &sc->rbrq_1, 1); + hatm_clear_rbrq(sc, 2); + hatm_clear_rbrq(sc, 3); + hatm_clear_rbrq(sc, 4); + hatm_clear_rbrq(sc, 5); + hatm_clear_rbrq(sc, 6); + hatm_clear_rbrq(sc, 7); + + sc->lbufs_next = 0; + bzero(sc->lbufs, sizeof(sc->lbufs[0]) * sc->lbufs_size); + + hatm_init_tbrq(sc, &sc->tbrq, 0); + hatm_clear_tbrq(sc, 1); + hatm_clear_tbrq(sc, 2); + hatm_clear_tbrq(sc, 3); + hatm_clear_tbrq(sc, 4); + hatm_clear_tbrq(sc, 5); + hatm_clear_tbrq(sc, 6); + hatm_clear_tbrq(sc, 7); + + hatm_init_tpdrq(sc); + + WRITE4(sc, HE_REGO_UBUFF_BA, (sc->he622 ? 0x104780 : 0x800)); + + /* + * Initialize HSP + */ + bzero(sc->hsp_mem.base, sc->hsp_mem.size); + sc->hsp = sc->hsp_mem.base; + WRITE4(sc, HE_REGO_HSP_BA, sc->hsp_mem.paddr); + + /* + * 5.1.12 Enable transmit and receive + * Enable bus master and interrupts + */ + v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0); + v |= 0x18000000; + WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v); + + v = READ4(sc, HE_REGO_RCCONFIG); + v |= HE_REGM_RCCONFIG_RXENB; + WRITE4(sc, HE_REGO_RCCONFIG, v); + + v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); + v |= HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB; + pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); + + sc->ifatm.ifnet.if_flags |= IFF_RUNNING; + sc->ifatm.ifnet.if_baudrate = 53 * 8 * sc->ifatm.mib.pcr; + + sc->utopia.flags &= ~UTP_FL_POLL_CARRIER; +} + +/* + * This functions stops the card and frees all resources allocated after + * the attach. Must have the global lock. + */ +void +hatm_stop(struct hatm_softc *sc) +{ + uint32_t v; + u_int i, p, cid; + struct mbuf_chunk_hdr *ch; + struct mbuf_page *pg; + + mtx_assert(&sc->mtx, MA_OWNED); + + if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) + return; + sc->ifatm.ifnet.if_flags &= ~IFF_RUNNING; + + sc->utopia.flags |= UTP_FL_POLL_CARRIER; + + /* + * Stop and reset the hardware so that everything remains + * stable. + */ + v = READ_MBOX4(sc, HE_REGO_CS_ERCTL0); + v &= ~0x18000000; + WRITE_MBOX4(sc, HE_REGO_CS_ERCTL0, v); + + v = READ4(sc, HE_REGO_RCCONFIG); + v &= ~HE_REGM_RCCONFIG_RXENB; + WRITE4(sc, HE_REGO_RCCONFIG, v); + + WRITE4(sc, HE_REGO_RHCONFIG, (0x2 << HE_REGS_RHCONFIG_PTMR_PRE)); + BARRIER_W(sc); + + v = READ4(sc, HE_REGO_HOST_CNTL); + BARRIER_R(sc); + v &= ~(HE_REGM_HOST_OUTFF_ENB | HE_REGM_HOST_CMDFF_ENB); + WRITE4(sc, HE_REGO_HOST_CNTL, v); + BARRIER_W(sc); + + /* + * Disable bust master and interrupts + */ + v = pci_read_config(sc->dev, HE_PCIR_GEN_CNTL_0, 4); + v &= ~(HE_PCIM_CTL0_INIT_ENB | HE_PCIM_CTL0_INT_PROC_ENB); + pci_write_config(sc->dev, HE_PCIR_GEN_CNTL_0, v, 4); + + (void)hatm_reset(sc); + + /* + * Give any waiters on closing a VCC a chance. They will stop + * to wait if they see that IFF_RUNNING disappeared. + */ + while (!(cv_waitq_empty(&sc->vcc_cv))) { + cv_broadcast(&sc->vcc_cv); + DELAY(100); + } + while (!(cv_waitq_empty(&sc->cv_rcclose))) { + cv_broadcast(&sc->cv_rcclose); + } + + /* + * Now free all resources. + */ + + /* + * Free the large mbufs that are given to the card. + */ + for (i = 0 ; i < sc->lbufs_size; i++) { + if (sc->lbufs[i] != NULL) { + bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[i]); + m_freem(sc->lbufs[i]); + sc->lbufs[i] = NULL; + } + } + + /* + * Free small buffers + */ + for (p = 0; p < sc->mbuf_npages; p++) { + pg = sc->mbuf_pages[p]; + for (i = 0; i < pg->hdr.nchunks; i++) { + if (MBUF_TST_BIT(pg->hdr.card, i)) { + MBUF_CLR_BIT(pg->hdr.card, i); + MBUF_CLR_BIT(pg->hdr.used, i); + ch = (struct mbuf_chunk_hdr *) ((char *)pg + + i * pg->hdr.chunksize + pg->hdr.hdroff); + m_freem(ch->mbuf); + } + } + } + + hatm_stop_tpds(sc); + + /* + * Free all partial reassembled PDUs on any VCC. + */ + for (cid = 0; cid < HE_MAX_VCCS; cid++) { + if (sc->vccs[cid] != NULL) { + if (sc->vccs[cid]->chain != NULL) + m_freem(sc->vccs[cid]->chain); + uma_zfree(sc->vcc_zone, sc->vccs[cid]); + } + } + bzero(sc->vccs, sizeof(sc->vccs)); + sc->cbr_bw = 0; + sc->open_vccs = 0; + + /* + * Reset CBR rate groups + */ + bzero(sc->rate_ctrl, sizeof(sc->rate_ctrl)); + + if (sc->rbp_s0.size != 0) + bzero(sc->rbp_s0.mem.base, sc->rbp_s0.mem.size); + if (sc->rbp_l0.size != 0) + bzero(sc->rbp_l0.mem.base, sc->rbp_l0.mem.size); + if (sc->rbp_s1.size != 0) + bzero(sc->rbp_s1.mem.base, sc->rbp_s1.mem.size); + if (sc->rbrq_0.size != 0) + bzero(sc->rbrq_0.mem.base, sc->rbrq_0.mem.size); + if (sc->rbrq_1.size != 0) + bzero(sc->rbrq_1.mem.base, sc->rbrq_1.mem.size); + + bzero(sc->tbrq.mem.base, sc->tbrq.mem.size); + bzero(sc->tpdrq.mem.base, sc->tpdrq.mem.size); + bzero(sc->hsp_mem.base, sc->hsp_mem.size); +} + +/************************************************************ + * + * Driver infrastructure + */ +devclass_t hatm_devclass; + +static device_method_t hatm_methods[] = { + DEVMETHOD(device_probe, hatm_probe), + DEVMETHOD(device_attach, hatm_attach), + DEVMETHOD(device_detach, hatm_detach), + {0,0} +}; +static driver_t hatm_driver = { + "hatm", + hatm_methods, + sizeof(struct hatm_softc), +}; +DRIVER_MODULE(hatm, pci, hatm_driver, hatm_devclass, NULL, 0); diff --git a/sys/dev/hatm/if_hatm_intr.c b/sys/dev/hatm/if_hatm_intr.c new file mode 100644 index 000000000000..b251aded1f9a --- /dev/null +++ b/sys/dev/hatm/if_hatm_intr.c @@ -0,0 +1,681 @@ +/* + * Copyright (c) 2001-2003 + * Fraunhofer Institute for Open Communication Systems (FhG Fokus). + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Author: Hartmut Brandt + * + * $FreeBSD$ + * + * ForeHE driver. + * + * Interrupt handler. + */ + +#include "opt_inet.h" +#include "opt_natm.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE); +CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK); +CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK); +CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE); +CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE); +CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE); + +/* + * Either the queue treshold was crossed or a TPD with the INTR bit set + * was transmitted. + */ +static void +he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group) +{ + uint32_t *tailp = &sc->hsp->group[group].tbrq_tail; + u_int no; + + while (q->head != (*tailp >> 2)) { + no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >> + HE_REGS_TPD_ADDR; + hatm_tx_complete(sc, TPD_ADDR(sc, no), + (q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS)); + + if (++q->head == q->size) + q->head = 0; + } + WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2); +} + +/* + * DMA loader function for external mbuf page. + */ +static void +hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, + int error) +{ + if (error) { + printf("%s: mapping error %d\n", __func__, error); + return; + } + KASSERT(nsegs == 1, + ("too many segments for DMA: %d", nsegs)); + KASSERT(segs[0].ds_addr <= 0xffffffffLU, + ("phys addr too large %lx", (u_long)segs[0].ds_addr)); + + *(uint32_t *)arg = segs[0].ds_addr; +} + +/* + * Allocate a page of external mbuf storage for the small pools. + * Create a DMA map and load it. Put all the chunks onto the right + * free list. + */ +static void +hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group) +{ + struct mbuf_page *pg; + int err; + u_int i; + + if (sc->mbuf_npages == HE_CONFIG_MAX_MBUF_PAGES) + return; + if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL) + return; + bzero(pg->hdr.card, sizeof(pg->hdr.card)); + bzero(pg->hdr.used, sizeof(pg->hdr.used)); + + err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map); + if (err != 0) { + if_printf(&sc->ifatm.ifnet, "%s -- bus_dmamap_create: %d\n", + __func__, err); + free(pg, M_DEVBUF); + return; + } + err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE, + hatm_extbuf_helper, &pg->hdr.phys, 0); + if (err != 0) { + if_printf(&sc->ifatm.ifnet, "%s -- mbuf mapping failed %d\n", + __func__, err); + bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map); + free(pg, M_DEVBUF); + return; + } + + sc->mbuf_pages[sc->mbuf_npages] = pg; + + if (group == 0) { + struct mbuf0_chunk *c; + + pg->hdr.nchunks = MBUF0_PER_PAGE; + pg->hdr.chunksize = MBUF0_CHUNK; + pg->hdr.hdroff = sizeof(c->storage); + c = (struct mbuf0_chunk *)pg; + for (i = 0; i < MBUF0_PER_PAGE; i++, c++) { + c->hdr.pageno = sc->mbuf_npages; + c->hdr.chunkno = i; + SLIST_INSERT_HEAD(&sc->mbuf0_list, + (struct mbufx_free *)c, link); + } + } else { + struct mbuf1_chunk *c; + + pg->hdr.nchunks = MBUF1_PER_PAGE; + pg->hdr.chunksize = MBUF1_CHUNK; + pg->hdr.hdroff = sizeof(c->storage); + c = (struct mbuf1_chunk *)pg; + for (i = 0; i < MBUF1_PER_PAGE; i++, c++) { + c->hdr.pageno = sc->mbuf_npages; + c->hdr.chunkno = i; + SLIST_INSERT_HEAD(&sc->mbuf1_list, + (struct mbufx_free *)c, link); + } + } + sc->mbuf_npages++; +} + +/* + * Free an mbuf and put it onto the free list. + */ +static void +hatm_mbuf0_free(void *buf, void *args) +{ + struct hatm_softc *sc = args; + struct mbuf0_chunk *c = buf; + + mtx_lock(&sc->mbuf0_mtx); + SLIST_INSERT_HEAD(&sc->mbuf0_list, (struct mbufx_free *)c, link); + MBUF_CLR_BIT(sc->mbuf_pages[c->hdr.pageno]->hdr.used, c->hdr.chunkno); + mtx_unlock(&sc->mbuf0_mtx); +} +static void +hatm_mbuf1_free(void *buf, void *args) +{ + struct hatm_softc *sc = args; + struct mbuf1_chunk *c = buf; + + mtx_lock(&sc->mbuf1_mtx); + SLIST_INSERT_HEAD(&sc->mbuf1_list, (struct mbufx_free *)c, link); + MBUF_CLR_BIT(sc->mbuf_pages[c->hdr.pageno]->hdr.used, c->hdr.chunkno); + mtx_unlock(&sc->mbuf1_mtx); +} + +/* + * Allocate an external mbuf storage + */ +static int +hatm_mbuf_alloc(struct hatm_softc *sc, u_int group, struct mbuf *m, + uint32_t *phys, uint32_t *handle) +{ + struct mbufx_free *cf; + struct mbuf_page *pg; + + if (group == 0) { + struct mbuf0_chunk *buf0; + + mtx_lock(&sc->mbuf0_mtx); + if ((cf = SLIST_FIRST(&sc->mbuf0_list)) == NULL) { + hatm_mbuf_page_alloc(sc, group); + if ((cf = SLIST_FIRST(&sc->mbuf0_list)) == NULL) { + mtx_unlock(&sc->mbuf0_mtx); + return (0); + } + } + SLIST_REMOVE_HEAD(&sc->mbuf0_list, link); + buf0 = (struct mbuf0_chunk *)cf; + pg = sc->mbuf_pages[buf0->hdr.pageno]; + MBUF_SET_BIT(pg->hdr.card, buf0->hdr.chunkno); + mtx_unlock(&sc->mbuf0_mtx); + + m_extadd(m, (caddr_t)buf0, MBUF0_SIZE, hatm_mbuf0_free, sc, + M_PKTHDR, EXT_NET_DRV); + m->m_data += MBUF0_OFFSET; + buf0->hdr.mbuf = m; + + *handle = MBUF_MAKE_HANDLE(buf0->hdr.pageno, buf0->hdr.chunkno); + + } else if (group == 1) { + struct mbuf1_chunk *buf1; + + mtx_lock(&sc->mbuf1_mtx); + if ((cf = SLIST_FIRST(&sc->mbuf1_list)) == NULL) { + hatm_mbuf_page_alloc(sc, group); + if ((cf = SLIST_FIRST(&sc->mbuf1_list)) == NULL) { + mtx_unlock(&sc->mbuf1_mtx); + return (0); + } + } + SLIST_REMOVE_HEAD(&sc->mbuf1_list, link); + buf1 = (struct mbuf1_chunk *)cf; + pg = sc->mbuf_pages[buf1->hdr.pageno]; + MBUF_SET_BIT(pg->hdr.card, buf1->hdr.chunkno); + mtx_unlock(&sc->mbuf1_mtx); + + m_extadd(m, (caddr_t)buf1, MBUF1_SIZE, hatm_mbuf1_free, sc, + M_PKTHDR, EXT_NET_DRV); + m->m_data += MBUF1_OFFSET; + buf1->hdr.mbuf = m; + + *handle = MBUF_MAKE_HANDLE(buf1->hdr.pageno, buf1->hdr.chunkno); + + } else + return (-1); + + *phys = pg->hdr.phys + (mtod(m, char *) - (char *)pg); + bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map, BUS_DMASYNC_PREREAD); + + return (0); +} + +static void +hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error) +{ + uint32_t *ptr = (uint32_t *)arg; + + if (nsegs == 0) { + printf("%s: error=%d\n", __func__, error); + return; + } + KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs)); + KASSERT(segs[0].ds_addr <= 0xffffffffLU, + ("phys addr too large %lx", (u_long)segs[0].ds_addr)); + + *ptr = segs[0].ds_addr; +} + +/* + * Receive buffer pool interrupt. This means the number of entries in the + * queue has dropped below the threshold. Try to supply new buffers. + */ +static void +he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large, + u_int group) +{ + u_int ntail, upd; + struct mbuf *m; + int error; + + DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u", + large ? "large" : "small", group)); + + rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD) + & (rbp->size - 1); + + upd = 0; + for (;;) { + if ((ntail = rbp->tail + 1) == rbp->size) + ntail = 0; + if (ntail == rbp->head) + break; + + /* allocate the MBUF */ + if (large) { + if ((m = m_getcl(M_DONTWAIT, MT_DATA, + M_PKTHDR)) == NULL) { + if_printf(&sc->ifatm.ifnet, + "no mbuf clusters\n"); + break; + } + m->m_data += MBUFL_OFFSET; + + if (sc->lbufs[sc->lbufs_next] != NULL) + panic("hatm: lbufs full %u", sc->lbufs_next); + sc->lbufs[sc->lbufs_next] = m; + + if ((error = bus_dmamap_load(sc->mbuf_tag, + sc->rmaps[sc->lbufs_next], + m->m_data, rbp->bsize, hatm_mbuf_helper, + &rbp->rbp[rbp->tail].phys, 0)) != NULL) + panic("hatm: mbuf mapping failed %d", error); + + bus_dmamap_sync(sc->mbuf_tag, + sc->rmaps[sc->lbufs_next], + BUS_DMASYNC_PREREAD); + + rbp->rbp[rbp->tail].handle = sc->lbufs_next | + MBUF_LARGE_FLAG; + + if (++sc->lbufs_next == sc->lbufs_size) + sc->lbufs_next = 0; + + } else { + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == NULL) { + if_printf(&sc->ifatm.ifnet, "no mbufs\n"); + break; + } + if (hatm_mbuf_alloc(sc, group, m, + &rbp->rbp[rbp->tail].phys, + &rbp->rbp[rbp->tail].handle)) { + m_freem(m); + break; + } + } + DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x", + rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys)); + rbp->rbp[rbp->tail].handle <<= HE_REGS_RBRQ_ADDR; + + rbp->tail = ntail; + upd++; + } + if (upd) { + WRITE4(sc, HE_REGO_RBP_T(large, group), + (rbp->tail << HE_REGS_RBP_TAIL)); + } +} + +/* + * Extract the buffer and hand it to the receive routine + */ +static struct mbuf * +hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle) +{ + u_int pageno; + u_int chunkno; + struct mbuf *m; + + if (handle & MBUF_LARGE_FLAG) { + /* large buffer - sync and unload */ + handle &= ~MBUF_LARGE_FLAG; + DBG(sc, RX, ("RX large handle=%x", handle)); + + bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle], + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]); + + m = sc->lbufs[handle]; + sc->lbufs[handle] = NULL; + + return (m); + } + + MBUF_PARSE_HANDLE(handle, pageno, chunkno); + + DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle, + pageno, chunkno)); + + if (group == 0) { + struct mbuf0_chunk *c0; + + c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno; + KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u", + c0->hdr.pageno, pageno)); + KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u", + c0->hdr.chunkno, chunkno)); + + m = c0->hdr.mbuf; + + } else { + struct mbuf1_chunk *c1; + + c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno; + KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u", + c1->hdr.pageno, pageno)); + KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u", + c1->hdr.chunkno, chunkno)); + + m = c1->hdr.mbuf; + } + MBUF_CLR_BIT(sc->mbuf_pages[pageno]->hdr.card, chunkno); + MBUF_SET_BIT(sc->mbuf_pages[pageno]->hdr.used, chunkno); + + bus_dmamap_sync(sc->mbuf_tag, sc->mbuf_pages[pageno]->hdr.map, + BUS_DMASYNC_POSTREAD); + + return (m); +} + +/* + * Interrupt because of receive buffer returned. + */ +static void +he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group) +{ + struct he_rbrqen *e; + uint32_t flags, tail; + u_int cid, len; + struct mbuf *m; + + for (;;) { + tail = sc->hsp->group[group].rbrq_tail >> 3; + + if (rq->head == tail) + break; + + e = &rq->rbrq[rq->head]; + + flags = e->addr & HE_REGM_RBRQ_FLAGS; + if (!(flags & HE_REGM_RBRQ_HBUF_ERROR)) + m = hatm_rx_buffer(sc, group, + (e->addr & HE_REGM_RBRQ_ADDR) >> HE_REGS_RBRQ_ADDR); + else + m = NULL; + + cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID; + len = 4 * (e->len & HE_REGM_RBRQ_LEN); + + hatm_rx(sc, cid, flags, m, len); + + if (++rq->head == rq->size) + rq->head = 0; + } + WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3); +} + +void +hatm_intr(void *p) +{ + struct heirq *q = p; + struct hatm_softc *sc = q->sc; + u_int status; + u_int tail; + + /* if we have a stray interrupt with a non-initialized card, + * we cannot even lock before looking at the flag */ + if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) + return; + + mtx_lock(&sc->mtx); + (void)READ4(sc, HE_REGO_INT_FIFO); + + tail = *q->tailp; + if (q->head == tail) { + /* workaround for tail pointer not updated bug (8.1.1) */ + DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered")); + + /* read the tail pointer from the card */ + tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) & + HE_REGM_IRQ_BASE_TAIL; + BARRIER_R(sc); + + sc->istats.bug_no_irq_upd++; + } + + /* clear the interrupt */ + WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA); + BARRIER_W(sc); + + while (q->head != tail) { + status = q->irq[q->head]; + q->irq[q->head] = HE_REGM_ITYPE_INVALID; + if (++q->head == (q->size - 1)) + q->head = 0; + + switch (status & HE_REGM_ITYPE) { + + case HE_REGM_ITYPE_TBRQ: + DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP)); + sc->istats.itype_tbrq++; + he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); + break; + + case HE_REGM_ITYPE_TPD: + DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP)); + sc->istats.itype_tpd++; + he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP); + break; + + case HE_REGM_ITYPE_RBPS: + sc->istats.itype_rbps++; + switch (status & HE_REGM_IGROUP) { + + case 0: + he_intr_rbp(sc, &sc->rbp_s0, 0, 0); + break; + + case 1: + he_intr_rbp(sc, &sc->rbp_s1, 0, 1); + break; + + default: + if_printf(&sc->ifatm.ifnet, "bad INTR RBPS%u\n", + status & HE_REGM_IGROUP); + break; + } + break; + + case HE_REGM_ITYPE_RBPL: + sc->istats.itype_rbpl++; + switch (status & HE_REGM_IGROUP) { + + case 0: + he_intr_rbp(sc, &sc->rbp_l0, 1, 0); + break; + + default: + if_printf(&sc->ifatm.ifnet, "bad INTR RBPL%u\n", + status & HE_REGM_IGROUP); + break; + } + break; + + case HE_REGM_ITYPE_RBRQ: + DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP)); + sc->istats.itype_rbrq++; + switch (status & HE_REGM_IGROUP) { + + case 0: + he_intr_rbrq(sc, &sc->rbrq_0, 0); + break; + + case 1: + if (sc->rbrq_1.size > 0) { + he_intr_rbrq(sc, &sc->rbrq_1, 1); + break; + } + /* FALLTHRU */ + + default: + if_printf(&sc->ifatm.ifnet, "bad INTR RBRQ%u\n", + status & HE_REGM_IGROUP); + break; + } + break; + + case HE_REGM_ITYPE_RBRQT: + DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP)); + sc->istats.itype_rbrqt++; + switch (status & HE_REGM_IGROUP) { + + case 0: + he_intr_rbrq(sc, &sc->rbrq_0, 0); + break; + + case 1: + if (sc->rbrq_1.size > 0) { + he_intr_rbrq(sc, &sc->rbrq_1, 1); + break; + } + /* FALLTHRU */ + + default: + if_printf(&sc->ifatm.ifnet, "bad INTR RBRQT%u\n", + status & HE_REGM_IGROUP); + break; + } + break; + + case HE_REGM_ITYPE_PHYS: + sc->istats.itype_phys++; + utopia_intr(&sc->utopia); + break; + +#if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID + case HE_REGM_ITYPE_UNKNOWN: + sc->istats.itype_unknown++; + if_printf(&sc->ifatm.ifnet, "bad interrupt\n"); + break; +#endif + + case HE_REGM_ITYPE_ERR: + sc->istats.itype_err++; + switch (status) { + + case HE_REGM_ITYPE_PERR: + if_printf(&sc->ifatm.ifnet, "parity error\n"); + break; + + case HE_REGM_ITYPE_ABORT: + if_printf(&sc->ifatm.ifnet, "abort interrupt " + "addr=0x%08x\n", + READ4(sc, HE_REGO_ABORT_ADDR)); + break; + + default: + if_printf(&sc->ifatm.ifnet, + "bad interrupt type %08x\n", status); + break; + } + break; + + case HE_REGM_ITYPE_INVALID: + /* this is the documented fix for the ISW bug 8.1.1 + * Note, that the documented fix is partly wrong: + * the ISWs should be intialized to 0xf8 not 0xff */ + sc->istats.bug_bad_isw++; + DBG(sc, INTR, ("hatm: invalid ISW bug triggered")); + he_intr_tbrq(sc, &sc->tbrq, 0); + he_intr_rbp(sc, &sc->rbp_s0, 0, 0); + he_intr_rbp(sc, &sc->rbp_l0, 1, 0); + he_intr_rbp(sc, &sc->rbp_s1, 0, 1); + he_intr_rbrq(sc, &sc->rbrq_0, 0); + he_intr_rbrq(sc, &sc->rbrq_1, 1); + utopia_intr(&sc->utopia); + break; + + default: + if_printf(&sc->ifatm.ifnet, "bad interrupt type %08x\n", + status); + break; + } + } + + /* write back head to clear queue */ + WRITE4(sc, HE_REGO_IRQ_HEAD(0), + ((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) | + (q->thresh << HE_REGS_IRQ_HEAD_THRESH) | + (q->head << HE_REGS_IRQ_HEAD_HEAD)); + BARRIER_W(sc); + + /* workaround the back-to-back irq access problem (8.1.2) */ + (void)READ4(sc, HE_REGO_INT_FIFO); + BARRIER_R(sc); + + mtx_unlock(&sc->mtx); +} diff --git a/sys/dev/hatm/if_hatm_ioctl.c b/sys/dev/hatm/if_hatm_ioctl.c new file mode 100644 index 000000000000..704b1410763a --- /dev/null +++ b/sys/dev/hatm/if_hatm_ioctl.c @@ -0,0 +1,486 @@ +/* + * Copyright (c) 2001-2003 + * Fraunhofer Institute for Open Communication Systems (FhG Fokus). + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Author: Hartmut Brandt + * + * $FreeBSD$ + * + * ForeHE driver. + * + * Ioctl handler. + */ + +#include "opt_inet.h" +#include "opt_natm.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static u_int hatm_natm_traffic = ATMIO_TRAFFIC_UBR; +static u_int hatm_natm_pcr = 0; + +static int hatm_sysctl_natm_traffic(SYSCTL_HANDLER_ARGS); + +SYSCTL_DECL(_hw_atm); + +SYSCTL_PROC(_hw_atm, OID_AUTO, natm_traffic, CTLTYPE_UINT | CTLFLAG_RW, + &hatm_natm_traffic, sizeof(hatm_natm_traffic), hatm_sysctl_natm_traffic, + "IU", "traffic type for NATM connections"); +SYSCTL_UINT(_hw_atm, OID_AUTO, natm_pcr, CTLFLAG_RW, + &hatm_natm_pcr, 0, "PCR for NATM connections"); + +/* + * Return a table of VCCs in a freshly allocated memory area. + * Here we have a problem: we first count, how many vccs we need + * to return. The we allocate the memory and finally fill it in. + * Because we cannot lock while calling malloc, the number of active + * vccs may change while we're in malloc. So we allocate a couple of + * vccs more and if space anyway is not enough re-iterate. + */ +static struct atmio_vcctable * +hatm_getvccs(struct hatm_softc *sc) +{ + u_int cid, alloc; + size_t len; + struct atmio_vcctable *vccs; + struct atmio_vcc *v; + + alloc = sc->open_vccs + 10; + vccs = NULL; + + again: + len = sizeof(*vccs) + alloc * sizeof(vccs->vccs[0]); + vccs = reallocf(vccs, len, M_DEVBUF, M_WAITOK); + bzero(vccs, len); + + /* + * Fill in + */ + vccs->count = 0; + v = vccs->vccs; + + mtx_lock(&sc->mtx); + for (cid = 0; cid < HE_MAX_VCCS; cid++) + if (sc->vccs[cid] != NULL && + (sc->vccs[cid]->vflags & (HE_VCC_RX_OPEN | + HE_VCC_TX_OPEN))) { + if (++vccs->count == alloc) { + /* + * too many - try again + */ + break; + } + *v++ = sc->vccs[cid]->param; + } + mtx_unlock(&sc->mtx); + + if (cid == HE_MAX_VCCS) + return (vccs); + + alloc *= 2; + goto again; +} + +/* + * Try to open the given VCC. + */ +static int +hatm_open_vcc(struct hatm_softc *sc, struct atmio_openvcc *arg) +{ + u_int cid; + struct hevcc *vcc; + int error = 0; + + DBG(sc, VCC, ("Open VCC: %u.%u flags=%#x", arg->param.vpi, + arg->param.vci, arg->param.flags)); + + if ((arg->param.vpi & ~HE_VPI_MASK) || + (arg->param.vci & ~HE_VCI_MASK) || + (arg->param.vci == 0)) + return (EINVAL); + cid = HE_CID(arg->param.vpi, arg->param.vci); + + if ((arg->param.flags & ATMIO_FLAG_NOTX) && + (arg->param.flags & ATMIO_FLAG_NORX)) + return (EINVAL); + + vcc = uma_zalloc(sc->vcc_zone, M_NOWAIT | M_ZERO); + if (vcc == NULL) + return (ENOMEM); + + mtx_lock(&sc->mtx); + if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) { + error = EIO; + goto done; + } + if (sc->vccs[cid] != NULL) { + error = EBUSY; + goto done; + } + vcc->param = arg->param; + vcc->rxhand = arg->rxhand; + switch (vcc->param.aal) { + + case ATMIO_AAL_0: + case ATMIO_AAL_5: + case ATMIO_AAL_RAW: + break; + + default: + error = EINVAL; + goto done; + } + switch (vcc->param.traffic) { + + case ATMIO_TRAFFIC_UBR: + case ATMIO_TRAFFIC_CBR: + case ATMIO_TRAFFIC_ABR: + break; + + default: + error = EINVAL; + goto done; + } + vcc->ntpds = 0; + vcc->chain = vcc->last = NULL; + vcc->ibytes = vcc->ipackets = 0; + vcc->obytes = vcc->opackets = 0; + + if (!(vcc->param.flags & ATMIO_FLAG_NOTX) && + (error = hatm_tx_vcc_can_open(sc, cid, vcc)) != 0) + goto done; + + /* ok - go ahead */ + sc->vccs[cid] = vcc; + + if (!(vcc->param.flags & ATMIO_FLAG_NOTX)) + hatm_tx_vcc_open(sc, cid); + if (!(vcc->param.flags & ATMIO_FLAG_NORX)) + hatm_rx_vcc_open(sc, cid); + +#ifdef notyet + /* inform management about non-NG and NG-PVCs */ + if (!(vcc->param.flags & ATMIO_FLAG_NG) || + (vcc->param.flags & ATMIO_FLAG_PVC)) + atm_message(&sc->ifatm.ifnet, ATM_MSG_VCC_CHANGED, + (1 << 24) | (arg->vpi << 16) | arg->vci); +#endif + + /* don't free below */ + vcc = NULL; + + sc->open_vccs++; + + done: + mtx_unlock(&sc->mtx); + if (vcc != NULL) + uma_zfree(sc->vcc_zone, vcc); + return (error); +} + +/* + * Enable ioctl for NATM. Map to an open ioctl. + */ +static int +hatm_open_vcc1(struct hatm_softc *sc, struct atm_pseudoioctl *ph) +{ + struct atmio_openvcc *v; + int error; + + if ((v = malloc(sizeof(*v), M_TEMP, M_NOWAIT | M_ZERO)) == NULL) + return (ENOMEM); + + v->param.flags = ATM_PH_FLAGS(&ph->aph) & + (ATM_PH_AAL5 | ATM_PH_LLCSNAP); + v->param.vpi = ATM_PH_VPI(&ph->aph); + v->param.vci = ATM_PH_VCI(&ph->aph); + v->param.aal = (ATM_PH_FLAGS(&ph->aph) & ATM_PH_AAL5) + ? ATMIO_AAL_5 : ATMIO_AAL_0; + v->param.traffic = hatm_natm_traffic; + v->rxhand = ph->rxhand; + if ((v->param.tparam.pcr = hatm_natm_pcr) == 0 || + hatm_natm_pcr > sc->ifatm.mib.pcr) + v->param.tparam.pcr = sc->ifatm.mib.pcr; + v->param.tparam.mcr = 0; + + error = hatm_open_vcc(sc, v); + if (error == 0) + sc->vccs[HE_CID(v->param.vpi, v->param.vci)]->vflags |= + HE_VCC_ASYNC; + + free(v, M_TEMP); + + return (error); +} + +/* + * VCC has been finally closed. + */ +void +hatm_vcc_closed(struct hatm_softc *sc, u_int cid) +{ + struct hevcc *vcc = sc->vccs[cid]; + +#ifdef notyet + /* inform management about non-NG and NG-PVCs */ + if (!(vcc->param.flags & ATMIO_FLAG_NG) || + (vcc->param.flags & ATMIO_FLAG_PVC)) + atm_message(&sc->ifatm.ifnet, ATM_MSG_VCC_CHANGED, + (0 << 24) | (HE_VPI(cid) << 16) | HE_VCI(cid)); +#endif + + sc->open_vccs--; + uma_zfree(sc->vcc_zone, vcc); + sc->vccs[cid] = NULL; +} + +/* + * Try to close the given VCC + */ +static int +hatm_close_vcc(struct hatm_softc *sc, struct atmio_closevcc *arg) +{ + u_int cid; + struct hevcc *vcc; + int error = 0; + + DBG(sc, VCC, ("Close VCC: %u.%u", arg->vpi, arg->vci)); + + if((arg->vpi & ~HE_VPI_MASK) || + (arg->vci & ~HE_VCI_MASK) || + (arg->vci == 0)) + return (EINVAL); + cid = HE_CID(arg->vpi, arg->vci); + + mtx_lock(&sc->mtx); + vcc = sc->vccs[cid]; + if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) { + error = EIO; + goto done; + } + + if (vcc == NULL || !(vcc->vflags & HE_VCC_OPEN)) { + error = ENOENT; + goto done; + } + + if (vcc->vflags & HE_VCC_TX_OPEN) + hatm_tx_vcc_close(sc, cid); + if (vcc->vflags & HE_VCC_RX_OPEN) + hatm_rx_vcc_close(sc, cid); + + if (vcc->vflags & HE_VCC_ASYNC) + goto done; + + while ((sc->ifatm.ifnet.if_flags & IFF_RUNNING) && + (vcc->vflags & (HE_VCC_TX_CLOSING | HE_VCC_RX_CLOSING))) + cv_wait(&sc->vcc_cv, &sc->mtx); + + if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) { + error = EIO; + goto done; + } + + if (!(vcc->vflags & ATMIO_FLAG_NOTX)) + hatm_tx_vcc_closed(sc, cid); + + hatm_vcc_closed(sc, cid); + + done: + mtx_unlock(&sc->mtx); + return (error); +} + +static int +hatm_close_vcc1(struct hatm_softc *sc, struct atm_pseudoioctl *ph) +{ + struct atmio_closevcc v; + + v.vpi = ATM_PH_VPI(&ph->aph); + v.vci = ATM_PH_VCI(&ph->aph); + + return (hatm_close_vcc(sc, &v)); +} + +/* + * IOCTL handler + */ +int +hatm_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) +{ + struct ifreq *ifr = (struct ifreq *)data; + struct ifaddr *ifa = (struct ifaddr *)data; + struct hatm_softc *sc = (struct hatm_softc *)ifp->if_softc; + struct atmio_vcctable *vtab; + int error = 0; + + switch (cmd) { + + case SIOCSIFADDR: + mtx_lock(&sc->mtx); + ifp->if_flags |= IFF_UP; + if (!(ifp->if_flags & IFF_RUNNING)) + hatm_initialize(sc); + switch (ifa->ifa_addr->sa_family) { + +#ifdef INET + case AF_INET: + case AF_INET6: + ifa->ifa_rtrequest = atm_rtrequest; + break; +#endif + default: + break; + } + mtx_unlock(&sc->mtx); + break; + + case SIOCSIFFLAGS: + mtx_lock(&sc->mtx); + if (ifp->if_flags & IFF_UP) { + if (!(ifp->if_flags & IFF_RUNNING)) { + hatm_initialize(sc); + } + } else { + if (ifp->if_flags & IFF_RUNNING) { + hatm_stop(sc); + } + } + mtx_unlock(&sc->mtx); + break; + + case SIOCGIFMEDIA: + case SIOCSIFMEDIA: + error = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); + break; + + case SIOCSIFMTU: + /* + * Set the interface MTU. + */ + if (ifr->ifr_mtu > ATMMTU) + error = EINVAL; + else + ifp->if_mtu = ifr->ifr_mtu; + break; + + case SIOCATMGVCCS: + /* return vcc table */ + vtab = hatm_getvccs(sc); + if (vtab == NULL) { + error = ENOMEM; + break; + } + error = copyout(vtab, ifr->ifr_data, sizeof(*vtab) + + vtab->count * sizeof(vtab->vccs[0])); + free(vtab, M_DEVBUF); + break; + + case SIOCATMENA: /* NATM internal use */ + error = hatm_open_vcc1(sc, (struct atm_pseudoioctl *)data); + break; + + case SIOCATMDIS: /* NATM internal use */ + error = hatm_close_vcc1(sc, (struct atm_pseudoioctl *)data); + break; + + case SIOCATMGETVCCS: /* netgraph internal use */ + if ((vtab = hatm_getvccs(sc)) == NULL) { + error = ENOMEM; + break; + } + *(void **)data = vtab; + break; + + case SIOCATMOPENVCC: /* netgraph/harp internal use */ + error = hatm_open_vcc(sc, (struct atmio_openvcc *)data); + break; + + case SIOCATMCLOSEVCC: /* netgraph and HARP internal use */ + error = hatm_close_vcc(sc, (struct atmio_closevcc *)data); + break; + + default: + DBG(sc, IOCTL, ("cmd=%08lx arg=%p", cmd, data)); + error = EINVAL; + break; + } + + return (error); +} + +static int +hatm_sysctl_natm_traffic(SYSCTL_HANDLER_ARGS) +{ + int error; + int tmp; + + tmp = hatm_natm_traffic; + error = sysctl_handle_int(oidp, &tmp, 0, req); + if (error != 0 || req->newptr == NULL) + return (error); + + if (tmp != ATMIO_TRAFFIC_UBR && tmp != ATMIO_TRAFFIC_CBR) + return (EINVAL); + + hatm_natm_traffic = tmp; + return (0); +} diff --git a/sys/dev/hatm/if_hatm_rx.c b/sys/dev/hatm/if_hatm_rx.c new file mode 100644 index 000000000000..e276dd1af161 --- /dev/null +++ b/sys/dev/hatm/if_hatm_rx.c @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2001-2003 + * Fraunhofer Institute for Open Communication Systems (FhG Fokus). + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Author: Hartmut Brandt + * + * $FreeBSD$ + * + * ForeHE driver. + * + * Receive. + */ + +#include "opt_inet.h" +#include "opt_natm.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#ifdef ENABLE_BPF +#include +#endif +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +void +hatm_rx(struct hatm_softc *sc, u_int cid, u_int flags, struct mbuf *m0, + u_int len) +{ + struct hevcc *vcc; + struct atm_pseudohdr aph; + struct mbuf *m, *m1; + u_int vpi, vci; + u_char *ptr; + + DBG(sc, RX, ("cid=%#x flags=%#x len=%u mbuf=%p", cid, flags, len, m0)); + + vcc = sc->vccs[cid]; + if (vcc == NULL) + goto drop; + + if (flags & HE_REGM_RBRQ_CON_CLOSED) { + if (vcc->vflags & HE_VCC_RX_CLOSING) { + vcc->vflags &= ~HE_VCC_RX_CLOSING; + if (vcc->vflags & HE_VCC_ASYNC) { + if (!(vcc->vflags & HE_VCC_OPEN)) + hatm_vcc_closed(sc, cid); + } else + cv_signal(&sc->vcc_cv); + } + goto drop; + } + + if (!(vcc->vflags & HE_VCC_RX_OPEN)) + goto drop; + + if (flags & HE_REGM_RBRQ_HBUF_ERROR) { + sc->istats.hbuf_error++; + if (vcc->chain != NULL) { + m_freem(vcc->chain); + vcc->chain = vcc->last = NULL; + } + goto drop; + } + + if ((m0->m_len = len) == 0) { + sc->istats.empty_hbuf++; + m_free(m0); + + } else if (vcc->chain == NULL) { + sc->istats.rx_seg++; + vcc->chain = vcc->last = m0; + vcc->last->m_next = NULL; + vcc->chain->m_pkthdr.len = m0->m_len; + vcc->chain->m_pkthdr.rcvif = &sc->ifatm.ifnet; + + } else { + sc->istats.rx_seg++; + vcc->last->m_next = m0; + vcc->last = m0; + vcc->last->m_next = NULL; + vcc->chain->m_pkthdr.len += m0->m_len; + } + + if (!(flags & HE_REGM_RBRQ_END_PDU)) + return; + + if (flags & HE_REGM_RBRQ_CRC_ERROR) { + if (vcc->chain) + m_freem(vcc->chain); + vcc->chain = vcc->last = NULL; + sc->istats.crc_error++; + sc->ifatm.ifnet.if_ierrors++; + return; + } + if (flags & HE_REGM_RBRQ_LEN_ERROR) { + if (vcc->chain) + m_freem(vcc->chain); + vcc->chain = vcc->last = NULL; + sc->istats.len_error++; + sc->ifatm.ifnet.if_ierrors++; + return; + } + +#if 0 + { + struct mbuf *tmp; + + for (tmp = vcc->chain; tmp != NULL; tmp = tmp->m_next) { + printf("mbuf %p: len=%u\n", tmp, tmp->m_len); + for (ptr = mtod(tmp, u_char *); + ptr < mtod(tmp, u_char *) + tmp->m_len; ptr++) + printf("%02x ", *ptr); + printf("\n"); + } + } +#endif + + if (vcc->param.aal == ATMIO_AAL_5) { + /* + * Need to remove padding and the trailer. The trailer + * may be split accross buffers according to 2.10.1.2 + * Assume that mbufs sizes are even (buffer sizes and cell + * payload sizes are) and that there are no empty mbufs. + */ + m = vcc->last; + if (m->m_len == 2) { + /* Ah, oh, only part of CRC */ + if (m == vcc->chain) { + /* ups */ + sc->istats.short_aal5++; + m_freem(vcc->chain); + vcc->chain = vcc->last = NULL; + return; + } + for (m1 = vcc->chain; m1->m_next != m; m1 = m1->m_next) + ; + ptr = (u_char *)m1->m_data + m1->m_len - 4; + + } else if (m->m_len == 4) { + /* Ah, oh, only CRC */ + if (m == vcc->chain) { + /* ups */ + sc->istats.short_aal5++; + m_freem(vcc->chain); + vcc->chain = vcc->last = NULL; + return; + } + for (m1 = vcc->chain; m1->m_next != m; m1 = m1->m_next) + ; + ptr = (u_char *)m1->m_data + m1->m_len - 2; + + } else if (m->m_len >= 6) { + ptr = (u_char *)m->m_data + m->m_len - 6; + } else + panic("hatm_rx: bad mbuf len %d", m->m_len); + + len = (ptr[0] << 8) + ptr[1]; + if (len > (u_int)vcc->chain->m_pkthdr.len - 4) { + sc->istats.badlen_aal5++; + m_freem(vcc->chain); + vcc->chain = vcc->last = NULL; + return; + } + m_adj(vcc->chain, -(vcc->chain->m_pkthdr.len - len)); + } + m = vcc->chain; + vcc->chain = vcc->last = NULL; + +#ifdef ENABLE_BPF + if (!(vcc->param.flags & ATMIO_FLAG_NG) && + (vcc->param.flags & ATM_PH_AAL5) && + (vcc->param.flags & ATM_PH_LLCSNAP)) + BPF_MTAP(&sc->ifatm.ifnet, m); +#endif + + vpi = HE_VPI(cid); + vci = HE_VCI(cid); + + ATM_PH_FLAGS(&aph) = vcc->param.flags & 0xff; + ATM_PH_VPI(&aph) = vpi; + ATM_PH_SETVCI(&aph, vci); + + sc->ifatm.ifnet.if_ipackets++; + /* this is in if_atmsubr.c */ + /* sc->ifatm.ifnet.if_ibytes += len; */ + + vcc->ibytes += len; + vcc->ipackets++; + +#if 0 + { + struct mbuf *tmp; + + for (tmp = m; tmp != NULL; tmp = tmp->m_next) { + printf("mbuf %p: len=%u\n", tmp, tmp->m_len); + for (ptr = mtod(tmp, u_char *); + ptr < mtod(tmp, u_char *) + tmp->m_len; ptr++) + printf("%02x ", *ptr); + printf("\n"); + } + } +#endif + + atm_input(&sc->ifatm.ifnet, &aph, m, vcc->rxhand); + + return; + + drop: + if (m0 != NULL) + m_free(m0); +} + +void +hatm_rx_vcc_open(struct hatm_softc *sc, u_int cid) +{ + struct hevcc *vcc = sc->vccs[cid]; + uint32_t rsr0, rsr1, rsr4; + + rsr0 = rsr1 = rsr4 = 0; + + if (vcc->param.traffic == ATMIO_TRAFFIC_ABR) { + rsr1 |= HE_REGM_RSR1_AQI; + rsr4 |= HE_REGM_RSR4_AQI; + } + + if (vcc->param.aal == ATMIO_AAL_5) { + rsr0 |= HE_REGM_RSR0_STARTPDU | HE_REGM_RSR0_AAL_5; + } else if (vcc->param.aal == ATMIO_AAL_0) { + rsr0 |= HE_REGM_RSR0_AAL_0; + } else { + if (sc->rbp_s1.size != 0) { + rsr1 |= (1 << HE_REGS_RSR1_GROUP); + rsr4 |= (1 << HE_REGS_RSR4_GROUP); + } + rsr0 |= HE_REGM_RSR0_AAL_RAW; + } + rsr0 |= HE_REGM_RSR0_OPEN; + + WRITE_RSR(sc, cid, 0, 0xf, rsr0); + WRITE_RSR(sc, cid, 1, 0xf, rsr1); + WRITE_RSR(sc, cid, 4, 0xf, rsr4); + + vcc->vflags |= HE_VCC_RX_OPEN; +} + +/* + * Close the RX side of a VCC. + */ +void +hatm_rx_vcc_close(struct hatm_softc *sc, u_int cid) +{ + struct hevcc *vcc = sc->vccs[cid]; + uint32_t v; + + vcc->vflags |= HE_VCC_RX_CLOSING; + WRITE_RSR(sc, cid, 0, 0xf, 0); + + v = READ4(sc, HE_REGO_RCCSTAT); + while ((sc->ifatm.ifnet.if_flags & IFF_RUNNING) && + (READ4(sc, HE_REGO_RCCSTAT) & HE_REGM_RCCSTAT_PROG)) + cv_timedwait(&sc->cv_rcclose, &sc->mtx, 1); + + if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING)) + return; + + WRITE_MBOX4(sc, HE_REGO_RCON_CLOSE, cid); + + vcc->vflags |= HE_VCC_RX_CLOSING; + vcc->vflags &= ~HE_VCC_RX_OPEN; +} diff --git a/sys/dev/hatm/if_hatm_tx.c b/sys/dev/hatm/if_hatm_tx.c new file mode 100644 index 000000000000..355300b8a697 --- /dev/null +++ b/sys/dev/hatm/if_hatm_tx.c @@ -0,0 +1,780 @@ +/* + * Copyright (c) 2001-2003 + * Fraunhofer Institute for Open Communication Systems (FhG Fokus). + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Author: Hartmut Brandt + * + * $FreeBSD$ + * + * ForeHE driver. + * + * Transmission. + */ + +#include "opt_inet.h" +#include "opt_natm.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#ifdef ENABLE_BPF +#include +#endif +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Allocate a new TPD, zero the TPD part. Cannot return NULL if + * flag is 0. The TPD is removed from the free list and its used + * bit is set. + */ +static struct tpd * +hatm_alloc_tpd(struct hatm_softc *sc, u_int flags) +{ + struct tpd *t; + + /* if we allocate a transmit TPD check for the reserve */ + if (flags & M_NOWAIT) { + if (sc->tpd_nfree <= HE_CONFIG_TPD_RESERVE) + return (NULL); + } else { + if (sc->tpd_nfree == 0) + return (NULL); + } + + /* make it beeing used */ + t = SLIST_FIRST(&sc->tpd_free); + KASSERT(t != NULL, ("tpd botch")); + SLIST_REMOVE_HEAD(&sc->tpd_free, link); + TPD_SET_USED(sc, t->no); + sc->tpd_nfree--; + + /* initialize */ + t->mbuf = NULL; + t->cid = 0; + bzero(&t->tpd, sizeof(t->tpd)); + t->tpd.addr = t->no << HE_REGS_TPD_ADDR; + + return (t); +} + +/* + * Free a TPD. If the mbuf pointer in that TPD is not zero, it is assumed, that + * the DMA map of this TPD was used to load this mbuf. The map is unloaded + * and the mbuf is freed. The TPD is put back onto the free list and + * its used bit is cleared. + */ +static void +hatm_free_tpd(struct hatm_softc *sc, struct tpd *tpd) +{ + if (tpd->mbuf != NULL) { + bus_dmamap_unload(sc->tx_tag, tpd->map); + m_freem(tpd->mbuf); + tpd->mbuf = NULL; + } + + /* insert TPD into free list */ + SLIST_INSERT_HEAD(&sc->tpd_free, tpd, link); + TPD_CLR_USED(sc, tpd->no); + sc->tpd_nfree++; +} + +/* + * Queue a number of TPD. If there is not enough space none of the TPDs + * is queued and an error code is returned. + */ +static int +hatm_queue_tpds(struct hatm_softc *sc, u_int count, struct tpd **list, + u_int cid) +{ + u_int space; + u_int i; + + if (count >= sc->tpdrq.size) { + sc->istats.tdprq_full++; + return (EBUSY); + } + + if (sc->tpdrq.tail < sc->tpdrq.head) + space = sc->tpdrq.head - sc->tpdrq.tail; + else + space = sc->tpdrq.head - sc->tpdrq.tail + sc->tpdrq.size; + + if (space <= count) { + sc->tpdrq.head = + (READ4(sc, HE_REGO_TPDRQ_H) >> HE_REGS_TPDRQ_H_H) & + (sc->tpdrq.size - 1); + + if (sc->tpdrq.tail < sc->tpdrq.head) + space = sc->tpdrq.head - sc->tpdrq.tail; + else + space = sc->tpdrq.head - sc->tpdrq.tail + + sc->tpdrq.size; + + if (space <= count) { + if_printf(&sc->ifatm.ifnet, "TPDRQ full\n"); + sc->istats.tdprq_full++; + return (EBUSY); + } + } + + /* we are going to write to the TPD queue space */ + bus_dmamap_sync(sc->tpdrq.mem.tag, sc->tpdrq.mem.map, + BUS_DMASYNC_PREWRITE); + + /* put the entries into the TPD space */ + for (i = 0; i < count; i++) { + /* we are going to 'write' the TPD to the device */ + bus_dmamap_sync(sc->tpds.tag, sc->tpds.map, + BUS_DMASYNC_PREWRITE); + + sc->tpdrq.tpdrq[sc->tpdrq.tail].tpd = + sc->tpds.paddr + HE_TPD_SIZE * list[i]->no; + sc->tpdrq.tpdrq[sc->tpdrq.tail].cid = cid; + + if (++sc->tpdrq.tail == sc->tpdrq.size) + sc->tpdrq.tail = 0; + } + + /* update tail pointer */ + WRITE4(sc, HE_REGO_TPDRQ_T, (sc->tpdrq.tail << HE_REGS_TPDRQ_T_T)); + + return (0); +} + +/* + * Helper struct for communication with the DMA load helper. + */ +struct load_txbuf_arg { + struct hatm_softc *sc; + struct tpd *first; + struct mbuf *mbuf; + struct hevcc *vcc; + int error; + u_int pti; + u_int vpi, vci; +}; + +/* + * Loader callback for the mbuf. This function allocates the TPDs and + * fills them. It puts the dmamap and and the mbuf pointer into the last + * TPD and then tries to queue all the TPDs. If anything fails, all TPDs + * allocated by this function are freed and the error flag is set in the + * argument structure. The first TPD must then be freed by the caller. + */ +static void +hatm_load_txbuf(void *uarg, bus_dma_segment_t *segs, int nseg, + bus_size_t mapsize, int error) +{ + struct load_txbuf_arg *arg = uarg; + u_int tpds_needed, i, n, tpd_cnt; + int need_intr; + struct tpd *tpd; + struct tpd *tpd_list[HE_CONFIG_MAX_TPD_PER_PACKET]; + + if (error != 0) { + DBG(arg->sc, DMA, ("%s -- error=%d plen=%d\n", + __func__, error, arg->mbuf->m_pkthdr.len)); + return; + } + + /* ensure, we have enough TPDs (remember, we already have one) */ + tpds_needed = (nseg + 2) / 3; + if (HE_CONFIG_TPD_RESERVE + tpds_needed - 1 > arg->sc->tpd_nfree) { + if_printf(&arg->sc->ifatm.ifnet, "%s -- out of TPDs (need %d, " + "have %u)\n", __func__, tpds_needed - 1, + arg->sc->tpd_nfree + 1); + arg->error = 1; + return; + } + + /* + * Check for the maximum number of TPDs on the connection. + */ + need_intr = 0; + if (arg->sc->max_tpd > 0) { + if (arg->vcc->ntpds + tpds_needed > arg->sc->max_tpd) { + arg->sc->istats.flow_closed++; + arg->vcc->vflags |= HE_VCC_FLOW_CTRL; +#ifdef notyet + atm_message(&arg->sc->ifatm.ifnet, ATM_MSG_FLOW_CONTROL, + (1 << 24) | (arg->vpi << 16) | arg->vci); +#endif + arg->error = 1; + return; + } + if (arg->vcc->ntpds + tpds_needed > + (9 * arg->sc->max_tpd) / 10) + need_intr = 1; + } + + tpd = arg->first; + tpd_cnt = 0; + tpd_list[tpd_cnt++] = tpd; + for (i = n = 0; i < nseg; i++, n++) { + if (n == 3) { + if ((tpd = hatm_alloc_tpd(arg->sc, M_NOWAIT)) == NULL) + /* may not fail (see check above) */ + panic("%s: out of TPDs", __func__); + tpd->cid = arg->first->cid; + tpd->tpd.addr |= arg->pti; + tpd_list[tpd_cnt++] = tpd; + n = 0; + } + KASSERT(segs[i].ds_addr <= 0xffffffffLU, + ("phys addr too large %lx", (u_long)segs[i].ds_addr)); + + DBG(arg->sc, DMA, ("DMA loaded: %lx/%lu", + (u_long)segs[i].ds_addr, (u_long)segs[i].ds_len)); + + tpd->tpd.bufs[n].addr = segs[i].ds_addr; + tpd->tpd.bufs[n].len = segs[i].ds_len; + + DBG(arg->sc, TX, ("seg[%u]=tpd[%u,%u]=%x/%u", i, + tpd_cnt, n, tpd->tpd.bufs[n].addr, tpd->tpd.bufs[n].len)); + + if (i == nseg - 1) + tpd->tpd.bufs[n].len |= HE_REGM_TPD_LST; + } + + /* + * Swap the MAP in the first and the last TPD and set the mbuf + * pointer into the last TPD. We use the map in the last TPD, because + * the map must stay valid until the last TPD is processed by the card. + */ + if (tpd_cnt > 1) { + bus_dmamap_t tmp; + + tmp = arg->first->map; + arg->first->map = tpd_list[tpd_cnt - 1]->map; + tpd_list[tpd_cnt - 1]->map = tmp; + } + tpd_list[tpd_cnt - 1]->mbuf = arg->mbuf; + + if (need_intr) + tpd_list[tpd_cnt - 1]->tpd.addr |= HE_REGM_TPD_INTR; + + /* queue the TPDs */ + if (hatm_queue_tpds(arg->sc, tpd_cnt, tpd_list, arg->first->cid)) { + /* free all, except the first TPD */ + for (i = 1; i < tpd_cnt; i++) + hatm_free_tpd(arg->sc, tpd_list[i]); + arg->error = 1; + return; + } + arg->vcc->ntpds += tpd_cnt; +} + + +/* + * Start output on the interface + * + * For raw aal we process only the first cell in the mbuf chain! XXX + */ +void +hatm_start(struct ifnet *ifp) +{ + struct hatm_softc *sc = (struct hatm_softc *)ifp->if_softc; + struct mbuf *m; + struct atm_pseudohdr *aph; + u_int cid; + struct tpd *tpd; + struct load_txbuf_arg arg; + u_int len; + int error; + + if (!(ifp->if_flags & IFF_RUNNING)) + return; + mtx_lock(&sc->mtx); + arg.sc = sc; + + while (1) { + IF_DEQUEUE(&ifp->if_snd, m); + if (m == NULL) + break; + + if (m->m_len < sizeof(*aph)) + if ((m = m_pullup(m, sizeof(*aph))) == NULL) + continue; + + aph = mtod(m, struct atm_pseudohdr *); + arg.vci = ATM_PH_VCI(aph); + arg.vpi = ATM_PH_VPI(aph); + m_adj(m, sizeof(*aph)); + + if ((len = m->m_pkthdr.len) == 0) { + m_freem(m); + continue; + } + + if ((arg.vpi & ~HE_VPI_MASK) || (arg.vci & ~HE_VCI_MASK) || + (arg.vci == 0)) { + m_freem(m); + continue; + } + cid = HE_CID(arg.vpi, arg.vci); + arg.vcc = sc->vccs[cid]; + + if (arg.vcc == NULL || !(arg.vcc->vflags & HE_VCC_OPEN)) { + m_freem(m); + continue; + } + if (arg.vcc->vflags & HE_VCC_FLOW_CTRL) { + m_freem(m); + sc->istats.flow_drop++; + continue; + } + + arg.pti = 0; + if (arg.vcc->param.aal == ATMIO_AAL_RAW) { + if (len < 52) { + m_freem(m); + continue; + } + if (len > 52) { + m_adj(m, -((int)(len - 52))); + len = 52; + } + if (m->m_len < 4 && (m = m_pullup(m, 4)) == NULL) + continue; + + /* ignore header except payload type and CLP */ + arg.pti = mtod(m, u_char *)[3] & 0xf; + arg.pti = ((arg.pti & 0xe) << 2) | ((arg.pti & 1) << 1); + m_adj(m, 4); + len -= 4; + } + +#ifdef ENABLE_BPF + if (!(arg.vcc->param.flags & ATMIO_FLAG_NG) && + (arg.vcc->param.flags & ATM_PH_AAL5) && + (arg.vcc->param.flags & ATM_PH_LLCSNAP)) + BPF_MTAP(ifp, m); +#endif + + /* Now load a DMA map with the packet. Allocate the first + * TPD to get a map. Additional TPDs may be allocated by the + * callback. */ + if ((tpd = hatm_alloc_tpd(sc, M_NOWAIT)) == NULL) { + m_freem(m); + sc->ifatm.ifnet.if_oerrors++; + continue; + } + tpd->cid = cid; + tpd->tpd.addr |= arg.pti; + arg.first = tpd; + arg.error = 0; + arg.mbuf = m; + + error = bus_dmamap_load_mbuf(sc->tx_tag, tpd->map, m, + hatm_load_txbuf, &arg, 0); + + if (error == EFBIG) { + /* try to defragment the packet */ + sc->istats.defrag++; + m = m_defrag(m, M_DONTWAIT); + if (m == NULL) { + sc->ifatm.ifnet.if_oerrors++; + continue; + } + arg.mbuf = m; + error = bus_dmamap_load_mbuf(sc->tx_tag, tpd->map, m, + hatm_load_txbuf, &arg, 0); + } + + if (error != 0) { + if_printf(&sc->ifatm.ifnet, "mbuf loaded error=%d\n", + error); + hatm_free_tpd(sc, tpd); + sc->ifatm.ifnet.if_oerrors++; + continue; + } + if (arg.error) { + hatm_free_tpd(sc, tpd); + sc->ifatm.ifnet.if_oerrors++; + continue; + } + arg.vcc->opackets++; + arg.vcc->obytes += len; + sc->ifatm.ifnet.if_opackets++; + } + mtx_unlock(&sc->mtx); +} + +void +hatm_tx_complete(struct hatm_softc *sc, struct tpd *tpd, uint32_t flags) +{ + struct hevcc *vcc = sc->vccs[tpd->cid]; + + DBG(sc, TX, ("tx_complete cid=%#x flags=%#x", tpd->cid, flags)); + + if (vcc == NULL) + return; + if ((flags & HE_REGM_TBRQ_EOS) && (vcc->vflags & HE_VCC_TX_CLOSING)) { + vcc->vflags &= ~HE_VCC_TX_CLOSING; + if (vcc->vflags & HE_VCC_ASYNC) { + hatm_tx_vcc_closed(sc, tpd->cid); + if (!(vcc->vflags & HE_VCC_OPEN)) { + hatm_vcc_closed(sc, tpd->cid); + vcc = NULL; + } + } else + cv_signal(&sc->vcc_cv); + } + hatm_free_tpd(sc, tpd); + + if (vcc == NULL) + return; + + vcc->ntpds--; + + if ((vcc->vflags & HE_VCC_FLOW_CTRL) && + vcc->ntpds <= HE_CONFIG_TPD_FLOW_ENB) { + vcc->vflags &= ~HE_VCC_FLOW_CTRL; +#ifdef notyet + atm_message(&sc->ifatm.ifnet, ATM_MSG_FLOW_CONTROL, + (0 << 24) | (HE_VPI(tpd->cid) << 16) | HE_VCI(tpd->cid)); +#endif + } +} + +/* + * Convert CPS to Rate for a rate group + */ +static u_int +cps_to_rate(struct hatm_softc *sc, uint32_t cps) +{ + u_int clk = sc->he622 ? HE_622_CLOCK : HE_155_CLOCK; + u_int period, rate; + + /* how many double ticks between two cells */ + period = (clk + 2 * cps - 1) / (2 * cps); + rate = hatm_cps2atmf(period); + if (hatm_atmf2cps(rate) < period) + rate++; + + return (rate); +} + +/* + * Check whether the VCC is really closed on the hardware and available for + * open. Check that we have enough resources. If this function returns ok, + * a later actual open must succeed. Assume, that we are locked between this + * function and the next one, so that nothing does change. For CBR this + * assigns the rate group and set the rate group's parameter. + */ +int +hatm_tx_vcc_can_open(struct hatm_softc *sc, u_int cid, struct hevcc *vcc) +{ + uint32_t v, line_rate; + u_int rc, idx, free_idx; + struct atmio_tparam *t = &vcc->param.tparam; + + /* verify that connection is closed */ +#if 0 + v = READ_TSR(sc, cid, 4); + if(!(v & HE_REGM_TSR4_SESS_END)) { + if_printf(&sc->ifatm.ifnet, "cid=%#x not closed (TSR4)\n", cid); + return (EBUSY); + } +#endif + v = READ_TSR(sc, cid, 0); + if((v & HE_REGM_TSR0_CONN_STATE) != 0) { + if_printf(&sc->ifatm.ifnet, "cid=%#x not closed (TSR0=%#x)\n", + cid, v); + return (EBUSY); + } + + /* check traffic parameters */ + line_rate = sc->he622 ? ATM_RATE_622M : ATM_RATE_155M; + switch (vcc->param.traffic) { + + case ATMIO_TRAFFIC_UBR: + if (t->pcr == 0 || t->pcr > line_rate) + t->pcr = line_rate; + if (t->mcr != 0 || t->icr != 0 || t->tbe != 0 || t->nrm != 0 || + t->trm != 0 || t->adtf != 0 || t->rif != 0 || t->rdf != 0 || + t->cdf != 0) + return (EINVAL); + break; + + case ATMIO_TRAFFIC_CBR: + /* + * Compute rate group index + */ + if (t->pcr < 10) + t->pcr = 10; + if (sc->cbr_bw + t->pcr > line_rate) + return (EINVAL); + if (t->mcr != 0 || t->icr != 0 || t->tbe != 0 || t->nrm != 0 || + t->trm != 0 || t->adtf != 0 || t->rif != 0 || t->rdf != 0 || + t->cdf != 0) + return (EINVAL); + + rc = cps_to_rate(sc, t->pcr); + free_idx = HE_REGN_CS_STPER; + for (idx = 0; idx < HE_REGN_CS_STPER; idx++) { + if (sc->rate_ctrl[idx].refcnt == 0) { + if (free_idx == HE_REGN_CS_STPER) + free_idx = idx; + } else { + if (sc->rate_ctrl[idx].rate == rc) + break; + } + } + if (idx == HE_REGN_CS_STPER) { + if ((idx = free_idx) == HE_REGN_CS_STPER) + return (EBUSY); + sc->rate_ctrl[idx].rate = rc; + WRITE_MBOX4(sc, HE_REGO_CS_STPER(idx), rc); + } + vcc->rc = idx; + break; + + case ATMIO_TRAFFIC_ABR: + if (t->pcr > line_rate) + t->pcr = line_rate; + if (t->mcr > line_rate) + t->mcr = line_rate; + if (t->icr > line_rate) + t->icr = line_rate; + if (t->tbe == 0 || t->tbe >= 1 << 24 || t->nrm > 7 || + t->trm > 7 || t->adtf >= 1 << 10 || t->rif > 15 || + t->rdf > 15 || t->cdf > 7) + return (EINVAL); + break; + + default: + return (EINVAL); + } + return (0); +} + +#define NRM_CODE2VAL(CODE) (2 * (1 << (CODE))) + +/* + * Actually open the transmit VCC + */ +void +hatm_tx_vcc_open(struct hatm_softc *sc, u_int cid) +{ + struct hevcc *vcc = sc->vccs[cid]; + uint32_t tsr0, tsr4, atmf, crm; + const struct atmio_tparam *t = &vcc->param.tparam; + + if (vcc->param.aal == ATMIO_AAL_5) { + tsr0 = HE_REGM_TSR0_AAL_5 << HE_REGS_TSR0_AAL; + tsr4 = HE_REGM_TSR4_AAL_5 << HE_REGS_TSR4_AAL; + } else { + tsr0 = HE_REGM_TSR0_AAL_0 << HE_REGS_TSR0_AAL; + tsr4 = HE_REGM_TSR4_AAL_0 << HE_REGS_TSR4_AAL; + } + tsr4 |= 1; + + switch (vcc->param.traffic) { + + case ATMIO_TRAFFIC_UBR: + atmf = hatm_cps2atmf(t->pcr); + + tsr0 |= HE_REGM_TSR0_TRAFFIC_UBR << HE_REGS_TSR0_TRAFFIC; + tsr0 |= HE_REGM_TSR0_USE_WMIN | HE_REGM_TSR0_UPDATE_GER; + + WRITE_TSR(sc, cid, 0, 0xf, tsr0); + WRITE_TSR(sc, cid, 4, 0xf, tsr4); + WRITE_TSR(sc, cid, 1, 0xf, (atmf << HE_REGS_TSR1_PCR)); + WRITE_TSR(sc, cid, 2, 0xf, (atmf << HE_REGS_TSR2_ACR)); + WRITE_TSR(sc, cid, 9, 0xf, HE_REGM_TSR9_INIT); + WRITE_TSR(sc, cid, 3, 0xf, 0); + WRITE_TSR(sc, cid, 5, 0xf, 0); + WRITE_TSR(sc, cid, 6, 0xf, 0); + WRITE_TSR(sc, cid, 7, 0xf, 0); + WRITE_TSR(sc, cid, 8, 0xf, 0); + WRITE_TSR(sc, cid, 10, 0xf, 0); + WRITE_TSR(sc, cid, 11, 0xf, 0); + WRITE_TSR(sc, cid, 12, 0xf, 0); + WRITE_TSR(sc, cid, 13, 0xf, 0); + WRITE_TSR(sc, cid, 14, 0xf, 0); + break; + + case ATMIO_TRAFFIC_CBR: + atmf = hatm_cps2atmf(t->pcr); + sc->rate_ctrl[vcc->rc].refcnt++; + + tsr0 |= HE_REGM_TSR0_TRAFFIC_CBR << HE_REGS_TSR0_TRAFFIC; + tsr0 |= vcc->rc; + + WRITE_TSR(sc, cid, 1, 0xf, (atmf << HE_REGS_TSR1_PCR)); + WRITE_TSR(sc, cid, 2, 0xf, (atmf << HE_REGS_TSR2_ACR)); + WRITE_TSR(sc, cid, 3, 0xf, 0); + WRITE_TSR(sc, cid, 5, 0xf, 0); + WRITE_TSR(sc, cid, 6, 0xf, 0); + WRITE_TSR(sc, cid, 7, 0xf, 0); + WRITE_TSR(sc, cid, 8, 0xf, 0); + WRITE_TSR(sc, cid, 10, 0xf, 0); + WRITE_TSR(sc, cid, 11, 0xf, 0); + WRITE_TSR(sc, cid, 12, 0xf, 0); + WRITE_TSR(sc, cid, 13, 0xf, 0); + WRITE_TSR(sc, cid, 14, 0xf, 0); + WRITE_TSR(sc, cid, 4, 0xf, tsr4); + WRITE_TSR(sc, cid, 9, 0xf, HE_REGM_TSR9_INIT); + WRITE_TSR(sc, cid, 0, 0xf, tsr0); + + sc->cbr_bw += t->pcr; + break; + + case ATMIO_TRAFFIC_ABR: + if ((crm = t->tbe / NRM_CODE2VAL(t->nrm)) > 0xffff) + crm = 0xffff; + + tsr0 |= HE_REGM_TSR0_TRAFFIC_ABR << HE_REGS_TSR0_TRAFFIC; + tsr0 |= HE_REGM_TSR0_USE_WMIN | HE_REGM_TSR0_UPDATE_GER; + + WRITE_TSR(sc, cid, 0, 0xf, tsr0); + WRITE_TSR(sc, cid, 4, 0xf, tsr4); + + WRITE_TSR(sc, cid, 1, 0xf, + ((hatm_cps2atmf(t->pcr) << HE_REGS_TSR1_PCR) | + (hatm_cps2atmf(t->mcr) << HE_REGS_TSR1_MCR))); + WRITE_TSR(sc, cid, 2, 0xf, + (hatm_cps2atmf(t->icr) << HE_REGS_TSR2_ACR)); + WRITE_TSR(sc, cid, 3, 0xf, + ((NRM_CODE2VAL(t->nrm) - 1) << HE_REGS_TSR3_NRM) | + (crm << HE_REGS_TSR3_CRM)); + + WRITE_TSR(sc, cid, 5, 0xf, 0); + WRITE_TSR(sc, cid, 6, 0xf, 0); + WRITE_TSR(sc, cid, 7, 0xf, 0); + WRITE_TSR(sc, cid, 8, 0xf, 0); + WRITE_TSR(sc, cid, 10, 0xf, 0); + WRITE_TSR(sc, cid, 12, 0xf, 0); + WRITE_TSR(sc, cid, 14, 0xf, 0); + WRITE_TSR(sc, cid, 9, 0xf, HE_REGM_TSR9_INIT); + + WRITE_TSR(sc, cid, 11, 0xf, + (hatm_cps2atmf(t->icr) << HE_REGS_TSR11_ICR) | + (t->trm << HE_REGS_TSR11_TRM) | + (t->nrm << HE_REGS_TSR11_NRM) | + (t->adtf << HE_REGS_TSR11_ADTF)); + + WRITE_TSR(sc, cid, 13, 0xf, + (t->rdf << HE_REGS_TSR13_RDF) | + (t->rif << HE_REGS_TSR13_RIF) | + (t->cdf << HE_REGS_TSR13_CDF) | + (crm << HE_REGS_TSR13_CRM)); + + break; + + default: + return; + } + + vcc->vflags |= HE_VCC_TX_OPEN; +} + +/* + * Close the TX side of a VCC. Set the CLOSING flag. + */ +void +hatm_tx_vcc_close(struct hatm_softc *sc, u_int cid) +{ + struct hevcc *vcc = sc->vccs[cid]; + struct tpd *tpd_list[1]; + u_int i, pcr = 0; + + WRITE_TSR(sc, cid, 4, 0x8, HE_REGM_TSR4_FLUSH); + + switch (vcc->param.traffic) { + + case ATMIO_TRAFFIC_CBR: + WRITE_TSR(sc, cid, 14, 0x8, HE_REGM_TSR14_CBR_DELETE); + break; + + case ATMIO_TRAFFIC_ABR: + WRITE_TSR(sc, cid, 14, 0x4, HE_REGM_TSR14_ABR_CLOSE); + pcr = vcc->param.tparam.pcr; + /* FALL THROUGH */ + + case ATMIO_TRAFFIC_UBR: + WRITE_TSR(sc, cid, 1, 0xf, + hatm_cps2atmf(HE_CONFIG_FLUSH_RATE) << HE_REGS_TSR1_MCR | + hatm_cps2atmf(pcr) << HE_REGS_TSR1_PCR); + break; + } + + tpd_list[0] = hatm_alloc_tpd(sc, 0); + tpd_list[0]->tpd.addr |= HE_REGM_TPD_EOS | HE_REGM_TPD_INTR; + tpd_list[0]->cid = cid; + + vcc->vflags |= HE_VCC_TX_CLOSING; + vcc->vflags &= ~HE_VCC_TX_OPEN; + + i = 0; + while (hatm_queue_tpds(sc, 1, tpd_list, cid) != 0) { + if (++i == 1000) + panic("TPDRQ permanently full"); + DELAY(1000); + } +} + +void +hatm_tx_vcc_closed(struct hatm_softc *sc, u_int cid) +{ + if (sc->vccs[cid]->param.traffic == ATMIO_TRAFFIC_CBR) { + sc->cbr_bw -= sc->vccs[cid]->param.tparam.pcr; + sc->rate_ctrl[sc->vccs[cid]->rc].refcnt--; + } +} diff --git a/sys/dev/hatm/if_hatmconf.h b/sys/dev/hatm/if_hatmconf.h new file mode 100644 index 000000000000..9d2d794c1339 --- /dev/null +++ b/sys/dev/hatm/if_hatmconf.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2001-2003 + * Fraunhofer Institute for Open Communication Systems (FhG Fokus). + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Author: Hartmut Brandt + * + * $FreeBSD$ + * + * Default configuration + */ + +/* configuration */ +#define HE_CONFIG_VPI_BITS 2 +#define HE_CONFIG_VCI_BITS 10 + +/* interrupt group 0 only */ +/* the size must be 1 <= size <= 1023 */ +#define HE_CONFIG_IRQ0_SIZE 256 +#define HE_CONFIG_IRQ0_THRESH 224 /* retrigger interrupt */ +#define HE_CONFIG_IRQ0_LINE HE_REGM_IRQ_A /* routing */ + +/* don't change these */ +#define HE_CONFIG_TXMEM (128 * 1024) /* words */ +#define HE_CONFIG_RXMEM (64 * 1024) /* words */ +#define HE_CONFIG_LCMEM (512 * 1024) /* words */ + +/* group 0 - all AALs except AAL.raw */ +/* receive group 0 buffer pools (mbufs and mbufs+cluster) */ +/* the size must be a power of 2: 4 <= size <= 8192 */ +#define HE_CONFIG_RBPS0_SIZE 2048 /* entries per queue */ +#define HE_CONFIG_RBPS0_THRESH 256 /* interrupt threshold */ +#define HE_CONFIG_RBPL0_SIZE 512 /* entries per queue */ +#define HE_CONFIG_RBPL0_THRESH 32 /* interrupt threshold */ + +/* receive group 0 buffer return queue */ +/* the size must be a power of 2: 1 <= size <= 16384 */ +#define HE_CONFIG_RBRQ0_SIZE 512 /* entries in queue */ +#define HE_CONFIG_RBRQ0_THRESH 256 /* interrupt threshold */ +#define HE_CONFIG_RBRQ0_TOUT 10 /* interrupt timeout */ +#define HE_CONFIG_RBRQ0_PCNT 5 /* packet count threshold */ + +/* group 1 - raw cells */ +/* receive group 1 small buffer pool */ +/* the size must be a power of 2: 4 <= size <= 8192 */ +#define HE_CONFIG_RBPS1_SIZE 1024 /* entries in queue */ +#define HE_CONFIG_RBPS1_THRESH 512 /* interrupt threshold */ + +/* receive group 1 buffer return queue */ +/* the size must be a power of 2: 1 <= size <= 16384 */ +#define HE_CONFIG_RBRQ1_SIZE 512 /* entries in queue */ +#define HE_CONFIG_RBRQ1_THRESH 256 /* interrupt threshold */ +#define HE_CONFIG_RBRQ1_TOUT 100 /* interrupt timeout */ +#define HE_CONFIG_RBRQ1_PCNT 25 /* packet count threshold */ + +/* there is only one TPD queue */ +/* the size must be a power of 2: 1 <= size <= 4096 */ +#define HE_CONFIG_TPDRQ_SIZE 2048 /* entries in queue */ + +/* transmit group 0 */ +/* the size must be a power of 2: 1 <= size <= 16384 */ +#define HE_CONFIG_TBRQ_SIZE 512 /* entries in queue */ +#define HE_CONFIG_TBRQ_THRESH 400 /* interrupt threshold */ + +/* Maximum number of TPDs to allocate to a single VCC. This + * number should depend on the cell rate and the maximum allowed cell delay */ +#define HE_CONFIG_TPD_MAXCC 2048 + +/* Maximum number of external mbuf pages */ +#define HE_CONFIG_MAX_MBUF_PAGES 256 + +/* Maximum number of TPDs used for one packet */ +#define HE_CONFIG_MAX_TPD_PER_PACKET \ + ((((HE_MAX_PDU + MCLBYTES - 1) / MCLBYTES + 2) / 3) + 2) + +/* Number of TPDs to reserve for close operations */ +#define HE_CONFIG_TPD_RESERVE 32 + +/* Number of TPDs per VCC when to re-enable flow control */ +#define HE_CONFIG_TPD_FLOW_ENB 80 + +/* MCR for flushing CBR and ABR connections at close */ +#define HE_CONFIG_FLUSH_RATE 200000 diff --git a/sys/dev/hatm/if_hatmreg.h b/sys/dev/hatm/if_hatmreg.h new file mode 100644 index 000000000000..7e88a5c61e5a --- /dev/null +++ b/sys/dev/hatm/if_hatmreg.h @@ -0,0 +1,641 @@ +/* + * Copyright (c) 2001-2003 + * Fraunhofer Institute for Open Communication Systems (FhG Fokus). + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Author: Hartmut Brandt + * + * $FreeBSD$ + * + * Fore HE driver for NATM + */ + +/* check configuration */ +#if HE_CONFIG_VPI_BITS + HE_CONFIG_VCI_BITS > 12 +#error "hatm: too many bits configured for VPI/VCI" +#endif + +#define HE_MAX_VCCS (1 << (HE_CONFIG_VPI_BITS + HE_CONFIG_VCI_BITS)) + +#define HE_VPI_MASK ((1 << (HE_CONFIG_VPI_BITS))-1) +#define HE_VCI_MASK ((1 << (HE_CONFIG_VCI_BITS))-1) + +#define HE_VPI(CID) (((CID) >> HE_CONFIG_VCI_BITS) & HE_VPI_MASK) +#define HE_VCI(CID) ((CID) & HE_VCI_MASK) + +#define HE_CID(VPI,VCI) ((((VPI) & HE_VPI_MASK) << HE_CONFIG_VCI_BITS) | \ + ((VCI) & HE_VCI_MASK)) + + +/* GEN_CNTL_0 register */ +#define HE_PCIR_GEN_CNTL_0 0x40 +#define HE_PCIM_CTL0_64BIT (1 << 0) +#define HE_PCIM_CTL0_IGNORE_TIMEOUT (1 << 1) +#define HE_PCIM_CTL0_INIT_ENB (1 << 2) +#define HE_PCIM_CTL0_MRM (1 << 4) +#define HE_PCIM_CTL0_MRL (1 << 5) +#define HE_PCIM_CTL0_BIGENDIAN (1 << 16) +#define HE_PCIM_CTL0_INT_PROC_ENB (1 << 25) + +/* + * Memory registers + */ +#define HE_REGO_FLASH 0x00000 +#define HE_REGO_RESET_CNTL 0x80000 +#define HE_REGM_RESET_STATE (1 << 6) +#define HE_REGO_HOST_CNTL 0x80004 +#define HE_REGM_HOST_BUS64 (1 << 27) +#define HE_REGM_HOST_DESC_RD64 (1 << 26) +#define HE_REGM_HOST_DATA_RD64 (1 << 25) +#define HE_REGM_HOST_DATA_WR64 (1 << 24) +#define HE_REGM_HOST_PROM_SEL (1 << 12) +#define HE_REGM_HOST_PROM_WREN (1 << 11) +#define HE_REGM_HOST_PROM_DATA_OUT (1 << 10) +#define HE_REGS_HOST_PROM_DATA_OUT 10 +#define HE_REGM_HOST_PROM_DATA_IN (1 << 9) +#define HE_REGS_HOST_PROM_DATA_IN 9 +#define HE_REGM_HOST_PROM_CLOCK (1 << 8) +#define HE_REGM_HOST_PROM_BITS (0x00001f00) +#define HE_REGM_HOST_QUICK_RD (1 << 7) +#define HE_REGM_HOST_QUICK_WR (1 << 6) +#define HE_REGM_HOST_OUTFF_ENB (1 << 5) +#define HE_REGM_HOST_CMDFF_ENB (1 << 4) +#define HE_REGO_LB_SWAP 0x80008 +#define HE_REGM_LBSWAP_RNUM (0xf << 27) +#define HE_REGS_LBSWAP_RNUM 27 +#define HE_REGM_LBSWAP_DATA_WR_SWAP (1 << 20) +#define HE_REGM_LBSWAP_DESC_RD_SWAP (1 << 19) +#define HE_REGM_LBSWAP_DATA_RD_SWAP (1 << 18) +#define HE_REGM_LBSWAP_INTR_SWAP (1 << 17) +#define HE_REGM_LBSWAP_DESC_WR_SWAP (1 << 16) +#define HE_REGM_LBSWAP_BIG_ENDIAN (1 << 14) +#define HE_REGM_LBSWAP_XFER_SIZE (1 << 7) + +#define HE_REGO_LB_MEM_ADDR 0x8000C +#define HE_REGO_LB_MEM_DATA 0x80010 +#define HE_REGO_LB_MEM_ACCESS 0x80014 +#define HE_REGM_LB_MEM_HNDSHK (1 << 30) +#define HE_REGM_LB_MEM_READ 0x3 +#define HE_REGM_LB_MEM_WRITE 0x7 + +#define HE_REGO_SDRAM_CNTL 0x80018 +#define HE_REGM_SDRAM_64BIT (1 << 3) +#define HE_REGO_INT_FIFO 0x8001C +#define HE_REGM_INT_FIFO_CLRA (1 << 8) +#define HE_REGM_INT_FIFO_CLRB (1 << 9) +#define HE_REGM_INT_FIFO_CLRC (1 << 10) +#define HE_REGM_INT_FIFO_CLRD (1 << 11) +#define HE_REGO_ABORT_ADDR 0x80020 + +#define HE_REGO_IRQ0_BASE 0x80080 +#define HE_REGO_IRQ_BASE(Q) (HE_REGO_IRQ0_BASE + (Q) * 0x10 + 0x00) +#define HE_REGM_IRQ_BASE_TAIL 0x3ff +#define HE_REGO_IRQ_HEAD(Q) (HE_REGO_IRQ0_BASE + (Q) * 0x10 + 0x04) +#define HE_REGS_IRQ_HEAD_SIZE 22 +#define HE_REGS_IRQ_HEAD_THRESH 12 +#define HE_REGS_IRQ_HEAD_HEAD 2 +#define HE_REGO_IRQ_CNTL(Q) (HE_REGO_IRQ0_BASE + (Q) * 0x10 + 0x08) +#define HE_REGM_IRQ_A (0 << 2) +#define HE_REGM_IRQ_B (1 << 2) +#define HE_REGM_IRQ_C (2 << 2) +#define HE_REGM_IRQ_D (3 << 2) +#define HE_REGO_IRQ_DATA(Q) (HE_REGO_IRQ0_BASE + (Q) * 0x10 + 0x0C) + +#define HE_REGO_GRP_1_0_MAP 0x800C0 +#define HE_REGO_GRP_3_2_MAP 0x800C4 +#define HE_REGO_GRP_5_4_MAP 0x800C8 +#define HE_REGO_GRP_7_6_MAP 0x800CC + +/* + * Receive buffer pools + */ +#define HE_REGO_G0_RBPS_S 0x80400 +#define HE_REGO_G0_RBPS_T 0x80404 +#define HE_REGO_G0_RBPS_QI 0x80408 +#define HE_REGO_G0_RBPS_BL 0x8040C + +#define HE_REGO_RBP_S(K,G) (HE_REGO_G0_RBPS_S + (K) * 0x10 + (G) * 0x20) +#define HE_REGO_RBP_T(K,G) (HE_REGO_G0_RBPS_T + (K) * 0x10 + (G) * 0x20) +#define HE_REGO_RBP_QI(K,G) (HE_REGO_G0_RBPS_QI + (K) * 0x10 + (G) * 0x20) +#define HE_REGO_RBP_BL(K,G) (HE_REGO_G0_RBPS_BL + (K) * 0x10 + (G) * 0x20) + +#define HE_REGS_RBP_HEAD 3 +#define HE_REGS_RBP_TAIL 3 +#define HE_REGS_RBP_SIZE 14 +#define HE_REGM_RBP_INTR_ENB (1 << 13) +#define HE_REGS_RBP_THRESH 0 + +/* + * Receive buffer return queues + */ +#define HE_REGO_G0_RBRQ_ST 0x80500 +#define HE_REGO_G0_RBRQ_H 0x80504 +#define HE_REGO_G0_RBRQ_Q 0x80508 +#define HE_REGO_G0_RBRQ_I 0x8050C + +#define HE_REGO_RBRQ_ST(G) (HE_REGO_G0_RBRQ_ST + (G) * 0x10) +#define HE_REGO_RBRQ_H(G) (HE_REGO_G0_RBRQ_H + (G) * 0x10) +#define HE_REGO_RBRQ_Q(G) (HE_REGO_G0_RBRQ_Q + (G) * 0x10) +#define HE_REGO_RBRQ_I(G) (HE_REGO_G0_RBRQ_I + (G) * 0x10) + +#define HE_REGS_RBRQ_HEAD 3 +#define HE_REGS_RBRQ_THRESH 13 +#define HE_REGS_RBRQ_SIZE 0 +#define HE_REGS_RBRQ_TIME 8 +#define HE_REGS_RBRQ_COUNT 0 + +/* + * Intermediate queues + */ +#define HE_REGO_G0_INMQ_S 0x80580 +#define HE_REGO_G0_INMQ_L 0x80584 +#define HE_REGO_INMQ_S(G) (HE_REGO_G0_INMQ_S + (G) * 8) +#define HE_REGO_INMQ_L(G) (HE_REGO_G0_INMQ_L + (G) * 8) + +#define HE_REGO_RHCONFIG 0x805C0 +#define HE_REGM_RHCONFIG_PHYENB (1 << 10) +#define HE_REGS_RHCONFIG_OAM_GID 7 +#define HE_REGS_RHCONFIG_PTMR_PRE 0 + +/* + * Transmit buffer return queues + */ +#define HE_REGO_TBRQ0_B_T 0x80600 +#define HE_REGO_TBRQ0_H 0x80604 +#define HE_REGO_TBRQ0_S 0x80608 +#define HE_REGO_TBRQ0_THRESH 0x8060C + +#define HE_REGO_TBRQ_B_T(G) (HE_REGO_TBRQ0_B_T + (G) * 0x10) +#define HE_REGO_TBRQ_H(G) (HE_REGO_TBRQ0_H + (G) * 0x10) +#define HE_REGO_TBRQ_S(G) (HE_REGO_TBRQ0_S + (G) * 0x10) +#define HE_REGO_TBRQ_THRESH(G) (HE_REGO_TBRQ0_THRESH + (G) * 0x10) + +#define HE_REGS_TBRQ_HEAD 2 + +/* + * Transmit packet descriptor ready queue + */ +#define HE_REGO_TPDRQ_H 0x80680 +#define HE_REGS_TPDRQ_H_H 3 +/* #define HE_REGM_TPDRQ_H_H ((HE_CONFIG_TPDRQ_SIZE - 1) << 3) */ +#define HE_REGO_TPDRQ_T 0x80684 +#define HE_REGS_TPDRQ_T_T 3 +/* #define HE_REGM_TPDRQ_T_T ((HE_CONFIG_TPDRQ_SIZE - 1) << 3) */ +#define HE_REGO_TPDRQ_S 0x80688 + +#define HE_REGO_UBUFF_BA 0x8068C + +#define HE_REGO_RLBF0_H 0x806C0 +#define HE_REGO_RLBF0_T 0x806C4 +#define HE_REGO_RLBF1_H 0x806C8 +#define HE_REGO_RLBF1_T 0x806CC +#define HE_REGO_RLBF_H(N) (HE_REGO_RLBF0_H + (N) * 8) +#define HE_REGO_RLBF_T(N) (HE_REGO_RLBF0_T + (N) * 8) + +#define HE_REGO_RLBC_H 0x806D0 +#define HE_REGO_RLBC_T 0x806D4 +#define HE_REGO_RLBC_H2 0x806D8 +#define HE_REGO_TLBF_H 0x806E0 +#define HE_REGO_TLBF_T 0x806E4 + +#define HE_REGO_RLBF0_C 0x806E8 +#define HE_REGO_RLBF1_C 0x806EC +#define HE_REGO_RLBF_C(N) (HE_REGO_RLBF0_C + (N) * 4) + +#define HE_REGO_RXTHRSH 0x806F0 +#define HE_REGO_LITHRSH 0x806F4 + +#define HE_REGO_LBARB 0x80700 +#define HE_REGS_LBARB_SLICE 28 +#define HE_REGS_LBARB_RNUM 23 +#define HE_REGS_LBARB_THPRI 21 +#define HE_REGS_LBARB_RHPRI 19 +#define HE_REGS_LBARB_TLPRI 17 +#define HE_REGS_LBARB_RLPRI 15 +#define HE_REGS_LBARB_BUS_MULT 8 +#define HE_REGS_LBARB_NET_PREF 0 + +#define HE_REGO_SDRAMCON 0x80704 +#define HE_REGM_SDRAMCON_BANK (1 << 14) +#define HE_REGM_SDRAMCON_WIDE (1 << 13) +#define HE_REGM_SDRAMCON_TWRWAIT (1 << 12) +#define HE_REGM_SDRAMCON_TRPWAIT (1 << 11) +#define HE_REGM_SDRAMCON_TRASWAIT (1 << 10) +#define HE_REGS_SDRAMCON_REF 0 + +#define HE_REGO_RCCSTAT 0x8070C +#define HE_REGM_RCCSTAT_PROG (1 << 0) + +#define HE_REGO_TCMCONFIG 0x80740 +#define HE_REGS_TCMCONFIG_BANK_WAIT 6 +#define HE_REGS_TCMCONFIG_RW_WAIT 2 +#define HE_REGS_TCMCONFIG_TYPE 0 + +#define HE_REGO_TSRB_BA 0x80744 +#define HE_REGO_TSRC_BA 0x80748 +#define HE_REGO_TMABR_BA 0x8074C +#define HE_REGO_TPD_BA 0x80750 +#define HE_REGO_TSRD_BA 0x80758 + +#define HE_REGO_TXCONFIG 0x80760 +#define HE_REGS_TXCONFIG_THRESH 22 +#define HE_REGM_TXCONFIG_UTMODE (1 << 21) +#define HE_REGS_TXCONFIG_VCI_MASK 17 +#define HE_REGS_TXCONFIG_LBFREE 0 + +#define HE_REGO_TXAAL5_PROTO 0x80764 + +#define HE_REGO_RCMCONFIG 0x80780 +#define HE_REGS_RCMCONFIG_BANK_WAIT 6 +#define HE_REGS_RCMCONFIG_RW_WAIT 2 +#define HE_REGS_RCMCONFIG_TYPE 0 + +#define HE_REGO_RCMRSRB_BA 0x80784 +#define HE_REGO_RCMLBM_BA 0x80788 +#define HE_REGO_RCMABR_BA 0x8078C + +#define HE_REGO_RCCONFIG 0x807C0 +#define HE_REGS_RCCONFIG_UTDELAY 11 +#define HE_REGM_RCCONFIG_WRAP_MODE (1 << 10) +#define HE_REGM_RCCONFIG_UT_MODE (1 << 9) +#define HE_REGM_RCCONFIG_RXENB (1 << 8) +#define HE_REGS_RCCONFIG_VP 4 +#define HE_REGS_RCCONFIG_VC 0 + +#define HE_REGO_MCC 0x807C4 +#define HE_REGO_OEC 0x807C8 +#define HE_REGO_DCC 0x807CC +#define HE_REGO_CEC 0x807D0 + +#define HE_REGO_HSP_BA 0x807F0 + +#define HE_REGO_LBCONFIG 0x807F4 + +#define HE_REGO_CON_DAT 0x807F8 +#define HE_REGO_CON_CTL 0x807FC +#define HE_REGM_CON_MBOX (2 << 30) +#define HE_REGM_CON_TCM (1 << 30) +#define HE_REGM_CON_RCM (0 << 30) +#define HE_REGM_CON_WE (1 << 29) +#define HE_REGM_CON_STATUS (1 << 28) +#define HE_REGM_CON_DIS3 (1 << 22) +#define HE_REGM_CON_DIS2 (1 << 21) +#define HE_REGM_CON_DIS1 (1 << 20) +#define HE_REGM_CON_DIS0 (1 << 19) +#define HE_REGS_CON_DIS 19 +#define HE_REGS_CON_ADDR 0 + +#define HE_REGO_SUNI 0x80800 +#define HE_REGO_SUNI_END 0x80C00 + +#define HE_REGO_END 0x100000 + +/* + * MBOX registers + */ +#define HE_REGO_CS_STPER0 0x000 +#define HE_REGO_CS_STPER(G) (HE_REGO_CS_STPER0 + (G)) +#define HE_REGN_CS_STPER 32 +#define HE_REGO_CS_STTIM0 0x020 +#define HE_REGO_CS_STTIM(G) (HE_REGO_CS_STTIM0 + (G)) +#define HE_REGO_CS_TGRLD0 0x040 +#define HE_REGO_CS_TGRLD(G) (HE_REGO_CS_TGRLD0 + (G)) +#define HE_REGO_CS_ERTHR0 0x50 +#define HE_REGO_CS_ERTHR1 0x51 +#define HE_REGO_CS_ERTHR2 0x52 +#define HE_REGO_CS_ERTHR3 0x53 +#define HE_REGO_CS_ERTHR4 0x54 +#define HE_REGO_CS_ERCTL0 0x55 +#define HE_REGO_CS_ERCTL1 0x56 +#define HE_REGO_CS_ERCTL2 0x57 +#define HE_REGO_CS_ERSTAT0 0x58 +#define HE_REGO_CS_ERSTAT1 0x59 +#define HE_REGO_CS_RTCCT 0x60 +#define HE_REGO_CS_RTFWC 0x61 +#define HE_REGO_CS_RTFWR 0x62 +#define HE_REGO_CS_RTFTC 0x63 +#define HE_REGO_CS_RTATR 0x64 +#define HE_REGO_CS_TFBSET 0x70 +#define HE_REGO_CS_TFBADD 0x71 +#define HE_REGO_CS_TFBSUB 0x72 +#define HE_REGO_CS_WCRMAX 0x73 +#define HE_REGO_CS_WCRMIN 0x74 +#define HE_REGO_CS_WCRINC 0x75 +#define HE_REGO_CS_WCRDEC 0x76 +#define HE_REGO_CS_WCRCEIL 0x77 +#define HE_REGO_CS_BWDCNT 0x78 +#define HE_REGO_CS_OTPPER 0x80 +#define HE_REGO_CS_OTWPER 0x81 +#define HE_REGO_CS_OTTLIM 0x82 +#define HE_REGO_CS_OTTCNT 0x83 +#define HE_REGO_CS_HGRRT0 0x90 +#define HE_REGO_CS_HGRRT(G) (HE_REGO_CS_HGRRT0 + (G)) +#define HE_REGO_CS_ORPTRS 0xA0 +#define HE_REGO_RCON_CLOSE 0x100 +#define HE_REGO_CS_END 0x101 + +#define HE_REGT_CS_ERTHR { \ + { /* 155 */ \ + { 0x000800ea, 0x000400ea, 0x000200ea }, /* ERTHR0 */ \ + { 0x000C3388, 0x00063388, 0x00033388 }, /* ERTHR1 */ \ + { 0x00101018, 0x00081018, 0x00041018 }, /* ERTHR2 */ \ + { 0x00181dac, 0x000c1dac, 0x00061dac }, /* ERTHR3 */ \ + { 0x0028051a, 0x0014051a, 0x000a051a }, /* ERTHR4 */ \ + }, { /* 622 */ \ + { 0x000800fa, 0x000400fa, 0x000200fa }, /* ERTHR0 */ \ + { 0x000c33cb, 0x000633cb, 0x000333cb }, /* ERTHR1 */ \ + { 0x0010101b, 0x0008101b, 0x0004101b }, /* ERTHR2 */ \ + { 0x00181dac, 0x000c1dac, 0x00061dac }, /* ERTHR3 */ \ + { 0x00280600, 0x00140600, 0x000a0600 }, /* ERTHR4 */ \ + } \ +} + +#define HE_REGT_CS_ERCTL { \ + { 0x0235e4b1, 0x4701, 0x64b1 }, /* 155 */ \ + { 0x023de8b3, 0x1801, 0x68b3 } /* 622 */ \ +} + +#define HE_REGT_CS_ERSTAT { \ + { 0x1280, 0x64b1 }, /* 155 */ \ + { 0x1280, 0x68b3 }, /* 622 */ \ +} + +#define HE_REGT_CS_RTFWR { \ + 0xf424, /* 155 */ \ + 0x14585 /* 622 */ \ +} + +#define HE_REGT_CS_RTATR { \ + 0x4680, /* 155 */ \ + 0x4680 /* 622 */ \ +} + +#define HE_REGT_CS_BWALLOC { \ + { 0x000563b7, 0x64b1, 0x5ab1, 0xe4b1, 0xdab1, 0x64b1 }, /* 155 */\ + { 0x00159ece, 0x68b3, 0x5eb3, 0xe8b3, 0xdeb3, 0x68b3 }, /* 622 */\ +} + +#define HE_REGT_CS_ORCF { \ + { 0x6, 0x1e }, /* 155 */ \ + { 0x5, 0x14 } /* 622 */ \ +} + +/* + * TSRs - NR is relative to the starting number of the block + */ +#define HE_REGO_TSRA(BASE,CID,NR) ((BASE) + ((CID) << 3) + (NR)) +#define HE_REGO_TSRB(BASE,CID,NR) ((BASE) + ((CID) << 2) + (NR)) +#define HE_REGO_TSRC(BASE,CID,NR) ((BASE) + ((CID) << 1) + (NR)) +#define HE_REGO_TSRD(BASE,CID) ((BASE) + (CID)) + +#define HE_REGM_TSR0_CONN_STATE (7 << 28) +#define HE_REGS_TSR0_CONN_STATE 28 +#define HE_REGM_TSR0_USE_WMIN (1 << 23) +#define HE_REGM_TSR0_GROUP (7 << 18) +#define HE_REGS_TSR0_GROUP 18 +#define HE_REGM_TSR0_TRAFFIC (3 << 16) +#define HE_REGS_TSR0_TRAFFIC 16 +#define HE_REGM_TSR0_TRAFFIC_CBR 0 +#define HE_REGM_TSR0_TRAFFIC_UBR 1 +#define HE_REGM_TSR0_TRAFFIC_ABR 2 +#define HE_REGM_TSR0_PROT (1 << 15) +#define HE_REGM_TSR0_AAL (3 << 12) +#define HE_REGS_TSR0_AAL 12 +#define HE_REGM_TSR0_AAL_5 0 +#define HE_REGM_TSR0_AAL_0 1 +#define HE_REGM_TSR0_AAL_0T 2 +#define HE_REGM_TSR0_HALT_ER (1 << 11) +#define HE_REGM_TSR0_MARK_CI (1 << 10) +#define HE_REGM_TSR0_MARK_ER (1 << 9) +#define HE_REGM_TSR0_UPDATE_GER (1 << 8) +#define HE_REGM_TSR0_RC 0xff + +#define HE_REGM_TSR1_PCR (0x7fff << 16) +#define HE_REGS_TSR1_PCR 16 +#define HE_REGM_TSR1_MCR (0x7fff << 0) +#define HE_REGS_TSR1_MCR 0 + +#define HE_REGM_TSR2_ACR (0x7fff << 16) +#define HE_REGS_TSR2_ACR 16 + +#define HE_REGM_TSR3_NRM (0xff << 24) +#define HE_REGS_TSR3_NRM 24 +#define HE_REGM_TSR3_CRM (0xff << 0) +#define HE_REGS_TSR3_CRM 0 + +#define HE_REGM_TSR4_FLUSH (1 << 31) +#define HE_REGM_TSR4_SESS_END (1 << 30) +#define HE_REGM_TSR4_OAM_CRC10 (1 << 28) +#define HE_REGM_TSR4_NULL_CRC10 (1 << 27) +#define HE_REGM_TSR4_PROT (1 << 26) +#define HE_REGM_TSR4_AAL (3 << 24) +#define HE_REGS_TSR4_AAL 24 +#define HE_REGM_TSR4_AAL_5 0 +#define HE_REGM_TSR4_AAL_0 1 +#define HE_REGM_TSR4_AAL_0T 2 + +#define HE_REGM_TSR9_INIT 0x00100000 + +#define HE_REGM_TSR11_ICR (0x7fff << 16) +#define HE_REGS_TSR11_ICR 16 +#define HE_REGM_TSR11_TRM (0x7 << 13) +#define HE_REGS_TSR11_TRM 13 +#define HE_REGM_TSR11_NRM (0x7 << 10) +#define HE_REGS_TSR11_NRM 10 +#define HE_REGM_TSR11_ADTF 0x3ff +#define HE_REGS_TSR11_ADTF 0 + +#define HE_REGM_TSR13_RDF (0xf << 23) +#define HE_REGS_TSR13_RDF 23 +#define HE_REGM_TSR13_RIF (0xf << 19) +#define HE_REGS_TSR13_RIF 19 +#define HE_REGM_TSR13_CDF (0x7 << 16) +#define HE_REGS_TSR13_CDF 16 +#define HE_REGM_TSR13_CRM 0xffff +#define HE_REGS_TSR13_CRM 0 + +#define HE_REGM_TSR14_CBR_DELETE (1 << 31) +#define HE_REGM_TSR14_ABR_CLOSE (1 << 16) + +/* + * RSRs + */ +#define HE_REGO_RSRA(BASE,CID,NR) ((BASE) + ((CID) << 3) + (NR)) +#define HE_REGO_RSRB(BASE,CID,NR) ((BASE) + ((CID) << 1) + (NR)) + +#define HE_REGM_RSR0_PTI7 (1 << 15) +#define HE_REGM_RSR0_RM (1 << 14) +#define HE_REGM_RSR0_F5OAM (1 << 13) +#define HE_REGM_RSR0_STARTPDU (1 << 10) +#define HE_REGM_RSR0_OPEN (1 << 6) +#define HE_REGM_RSR0_PPD (1 << 5) +#define HE_REGM_RSR0_EPD (1 << 4) +#define HE_REGM_RSR0_TCPCS (1 << 3) +#define HE_REGM_RSR0_AAL 0x7 +#define HE_REGM_RSR0_AAL_5 0x0 +#define HE_REGM_RSR0_AAL_0 0x1 +#define HE_REGM_RSR0_AAL_0T 0x2 +#define HE_REGM_RSR0_AAL_RAW 0x3 +#define HE_REGM_RSR0_AAL_RAWCRC10 0x4 + +#define HE_REGM_RSR1_AQI (1 << 20) +#define HE_REGM_RSR1_RBPL_ONLY (1 << 19) +#define HE_REGM_RSR1_GROUP (7 << 16) +#define HE_REGS_RSR1_GROUP 16 + +#define HE_REGM_RSR4_AQI (1 << 30) +#define HE_REGM_RSR4_GROUP (7 << 27) +#define HE_REGS_RSR4_GROUP 27 +#define HE_REGM_RSR4_RBPL_ONLY (1 << 26) + +/* + * Relative to RCMABR_BA + */ +#define HE_REGO_CM_GQTBL 0x000 +#define HE_REGL_CM_GQTBL 0x100 +#define HE_REGO_CM_RGTBL 0x100 +#define HE_REGL_CM_RGTBL 0x100 +#define HE_REGO_CM_TNRMTBL 0x200 +#define HE_REGL_CM_TNRMTBL 0x100 +#define HE_REGO_CM_ORCF 0x300 +#define HE_REGL_CM_ORCF 0x100 +#define HE_REGO_CM_RTGTBL 0x400 +#define HE_REGL_CM_RTGTBL 0x200 +#define HE_REGO_CM_IRCF 0x600 +#define HE_REGL_CM_IRCF 0x200 + +/* + * Interrupt Status + */ +#define HE_REGM_ITYPE 0xf8 +#define HE_REGM_IGROUP 0x07 +#define HE_REGM_ITYPE_TBRQ (0x0 << 3) +#define HE_REGM_ITYPE_TPD (0x1 << 3) +#define HE_REGM_ITYPE_RBPS (0x2 << 3) +#define HE_REGM_ITYPE_RBPL (0x3 << 3) +#define HE_REGM_ITYPE_RBRQ (0x4 << 3) +#define HE_REGM_ITYPE_RBRQT (0x5 << 3) +#define HE_REGM_ITYPE_PHYS (0x6 << 3) +#define HE_REGM_ITYPE_UNKNOWN 0xf8 +#define HE_REGM_ITYPE_ERR 0x80 +#define HE_REGM_ITYPE_PERR 0x81 +#define HE_REGM_ITYPE_ABORT 0x82 +#define HE_REGM_ITYPE_INVALID 0xf8 + +/* + * Serial EEPROM + */ +#define HE_EEPROM_PROD_ID 0x08 +#define HE_EEPROM_PROD_ID_LEN 30 +#define HE_EEPROM_REV 0x26 +#define HE_EEPROM_REV_LEN 4 +#define HE_EEPROM_M_SN 0x3A +#define HE_EEPROM_MEDIA 0x3E +#define HE_EEPROM_MAC 0x42 + +#define HE_MEDIA_UTP155 0x06 +#define HE_MEDIA_MMF155 0x26 +#define HE_MEDIA_MMF622 0x27 +#define HE_MEDIA_SMF155 0x46 +#define HE_MEDIA_SMF622 0x47 + +#define HE_622_CLOCK 66667000 +#define HE_155_CLOCK 50000000 + +/* + * Statistics + */ +struct fatm_statshe { +}; + +/* + * Queue entries + */ +/* Receive Buffer Pool Queue entry */ +struct he_rbpen { + uint32_t phys; /* physical address */ + uint32_t handle; /* handle or virtual address */ +}; +/* Receive Buffer Return Queue entry */ +struct he_rbrqen { + uint32_t addr; /* handle and flags */ + uint32_t len; /* length and CID */ +}; +#define HE_REGM_RBRQ_ADDR 0xFFFFFFC0 +#define HE_REGS_RBRQ_ADDR 6 +#define HE_REGM_RBRQ_FLAGS 0x0000003F +#define HE_REGM_RBRQ_HBUF_ERROR (1 << 0) +#define HE_REGM_RBRQ_CON_CLOSED (1 << 1) +#define HE_REGM_RBRQ_AAL5_PROT (1 << 2) +#define HE_REGM_RBRQ_END_PDU (1 << 3) +#define HE_REGM_RBRQ_LEN_ERROR (1 << 4) +#define HE_REGM_RBRQ_CRC_ERROR (1 << 5) +#define HE_REGM_RBRQ_CID (0x1fff << 16) +#define HE_REGS_RBRQ_CID 16 +#define HE_REGM_RBRQ_LEN 0xffff + +/* Transmit Packet Descriptor Ready Queue entry */ +struct he_tpdrqen { + uint32_t tpd; /* physical address */ + uint32_t cid; /* connection id */ +}; +/* Transmit buffer return queue */ +struct he_tbrqen { + uint32_t addr; /* handle and flags */ +}; +#define HE_REGM_TBRQ_ADDR 0xffffffc0 +#define HE_REGM_TBRQ_FLAGS 0x0000000a +#define HE_REGM_TBRQ_EOS 0x00000008 +#define HE_REGM_TBRQ_MULT 0x00000002 + +struct he_tpd { + uint32_t addr; /* handle or virtual address and flags */ + uint32_t res; /* reserved */ + struct { + uint32_t addr; /* buffer address */ + uint32_t len; /* buffer length and flags */ + } bufs[3]; +}; +#define HE_REGM_TPD_ADDR 0xffffffC0 +#define HE_REGS_TPD_ADDR 6 +#define HE_REGM_TPD_INTR 0x0001 +#define HE_REGM_TPD_CLP 0x0002 +#define HE_REGM_TPD_EOS 0x0004 +#define HE_REGM_TPD_PTI 0x0038 +#define HE_REGS_TPD_PTI 3 +#define HE_REGM_TPD_LST 0x80000000 + +/* + * The HOST STATUS PAGE + */ +struct he_hsp { + struct { + uint32_t tbrq_tail; + uint32_t res1[15]; + uint32_t rbrq_tail; + uint32_t res2[15]; + } group[8]; +}; + +#define HE_MAX_PDU (65535) diff --git a/sys/dev/hatm/if_hatmvar.h b/sys/dev/hatm/if_hatmvar.h new file mode 100644 index 000000000000..92722d05a850 --- /dev/null +++ b/sys/dev/hatm/if_hatmvar.h @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2001-2003 + * Fraunhofer Institute for Open Communication Systems (FhG Fokus). + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Author: Hartmut Brandt + * + * $FreeBSD$ + * + * Fore HE driver for NATM + */ + +/* + * Debug statistics of the HE driver + */ +struct istats { + uint32_t tdprq_full; + uint32_t hbuf_error; + uint32_t crc_error; + uint32_t len_error; + uint32_t flow_closed; + uint32_t flow_drop; + uint32_t tpd_no_mem; + uint32_t rx_seg; + uint32_t empty_hbuf; + uint32_t short_aal5; + uint32_t badlen_aal5; + uint32_t bug_bad_isw; + uint32_t bug_no_irq_upd; + uint32_t itype_tbrq; + uint32_t itype_tpd; + uint32_t itype_rbps; + uint32_t itype_rbpl; + uint32_t itype_rbrq; + uint32_t itype_rbrqt; + uint32_t itype_unknown; + uint32_t itype_phys; + uint32_t itype_err; + uint32_t defrag; +}; + +/* Card memory layout parameters */ +#define HE_CONFIG_MEM_LAYOUT { \ + { /* 155 */ \ + 20, /* cells_per_row */ \ + 1024, /* bytes_per_row */ \ + 512, /* r0_numrows */ \ + 1018, /* tx_numrows */ \ + 512, /* r1_numrows */ \ + 6, /* r0_startrow */ \ + 2 /* cells_per_lbuf */ \ + }, { /* 622 */ \ + 40, /* cells_per_row */ \ + 2048, /* bytes_per_row */ \ + 256, /* r0_numrows */ \ + 512, /* tx_numrows */ \ + 256, /* r1_numrows */ \ + 0, /* r0_startrow */ \ + 4 /* cells_per_lbuf */ \ + } \ +} + +/*********************************************************************/ +struct hatm_softc; + +/* + * A chunk of DMA-able memory + */ +struct dmamem { + u_int size; /* in bytes */ + u_int align; /* alignement */ + bus_dma_tag_t tag; /* DMA tag */ + void *base; /* the memory */ + bus_addr_t paddr; /* physical address */ + bus_dmamap_t map; /* the MAP */ +}; + +/* + * RBP (Receive Buffer Pool) queue entry and queue. + */ +struct herbp { + u_int size; /* RBP number of entries (power of two) */ + u_int thresh; /* interrupt treshold */ + uint32_t bsize; /* buffer size in bytes */ + u_int offset; /* free space at start for small bufs */ + uint32_t mask; /* mask for index */ + struct dmamem mem; /* the queue area */ + struct he_rbpen *rbp; + uint32_t head, tail; /* head and tail */ +}; + +/* + * RBRQ (Receive Buffer Return Queue) entry and queue. + */ +struct herbrq { + u_int size; /* number of entries */ + u_int thresh; /* interrupt threshold */ + u_int tout; /* timeout value */ + u_int pcnt; /* packet count threshold */ + struct dmamem mem; /* memory */ + struct he_rbrqen *rbrq; + uint32_t head; /* driver end */ +}; + +/* + * TPDRQ (Transmit Packet Descriptor Ready Queue) entry and queue + */ +struct hetpdrq { + u_int size; /* number of entries */ + struct dmamem mem; /* memory */ + struct he_tpdrqen *tpdrq; + u_int head; /* head (copy of adapter) */ + u_int tail; /* written back to adapter */ +}; + +/* + * TBRQ (Transmit Buffer Return Queue) entry and queue + */ +struct hetbrq { + u_int size; /* number of entries */ + u_int thresh; /* interrupt threshold */ + struct dmamem mem; /* memory */ + struct he_tbrqen *tbrq; + u_int head; /* adapter end */ +}; + +/*==================================================================*/ + +/* + * TPDs are 32 byte and must be aligned on 64 byte boundaries. That means, + * that half of the space is free. We use this space to plug in a link for + * the list of free TPDs. Note, that the m_act member of the mbufs contain + * a pointer to the dmamap. + * + * The maximum number of TDPs is the size of the common transmit packet + * descriptor ready queue plus the sizes of the transmit buffer return queues + * (currently only queue 0). We allocate and map these TPD when initializing + * the card. We also allocate on DMA map for each TPD. Only the map in the + * last TPD of a packets is used when a packet is transmitted. + * This is signalled by having the mbuf member of this TPD non-zero and + * pointing to the mbuf. + */ +#define HE_TPD_SIZE 64 +struct tpd { + struct he_tpd tpd; /* at beginning */ + SLIST_ENTRY(tpd) link; /* free cid list link */ + struct mbuf *mbuf; /* the buf chain */ + bus_dmamap_t map; /* map */ + uint32_t cid; /* CID */ + uint16_t no; /* number of this tpd */ +}; +SLIST_HEAD(tpd_list, tpd); + +#define TPD_SET_USED(SC, I) do { \ + (SC)->tpd_used[(I) / 8] |= (1 << ((I) % 8)); \ + } while (0) + +#define TPD_CLR_USED(SC, I) do { \ + (SC)->tpd_used[(I) / 8] &= ~(1 << ((I) % 8)); \ + } while (0) + +#define TPD_TST_USED(SC, I) ((SC)->tpd_used[(I) / 8] & (1 << ((I) % 8))) + +#define TPD_ADDR(SC, I) ((struct tpd *)((char *)sc->tpds.base + \ + (I) * HE_TPD_SIZE)) + +/*==================================================================*/ + +/* + * External MBUFs. The card needs a lot of mbufs in the pools for high + * performance. The problem with using mbufs directly is that we would need + * a dmamap for each of the mbufs. This can exhaust iommu space on the sparc + * and it eats also a lot of processing time. So we use external mbufs + * for the small buffers and clusters for the large buffers. + * For receive group 0 we use 5 ATM cells, for group 1 one (52 byte) ATM + * cell. The mbuf storage is allocated pagewise and one dmamap is used per + * page. + * + * The handle we give to the card for the small buffers is a word combined + * of the page number and the number of the chunk in the page. This restricts + * the number of chunks per page to 256 (8 bit) and the number of pages to + * 65536 (16 bits). + * + * A chunk may be in one of three states: free, on the card and floating around + * in the system. If it is free, it is on one of the two free lists and + * start with a struct mbufx_free. Each page has a bitmap that tracks where + * its chunks are. + * + * For large buffers we use mbuf clusters. Here we have two problems: we need + * to track the buffers on the card (in the case we want to stop it) and + * we need to map the 64bit mbuf address to a 26bit handle for 64-bit machines. + * The card uses the buffers in the order we give it to the card. Therefor + * we can use a private array holding pointers to the mbufs as a circular + * queue for both tasks. This is done with the lbufs member of softc. The + * handle for these buffer is the lbufs index ored with a flag. + */ +#define MBUF0_SIZE (5 * 48) /* 240 */ +#define MBUF1_SIZE (52) + +#define MBUF0_CHUNK 256 /* 16 free bytes */ +#define MBUF1_CHUNK 96 /* 44 free bytes */ +#ifdef XXX +#define MBUF0_OFFSET (MBUF0_CHUNK - sizeof(struct mbuf_chunk_hdr) \ + - MBUF0_SIZE) +#else +#define MBUF0_OFFSET 0 +#endif +#define MBUF1_OFFSET (MBUF1_CHUNK - sizeof(struct mbuf_chunk_hdr) \ + - MBUF1_SIZE) +#define MBUFL_OFFSET 16 /* two pointers for HARP */ + +#define MBUF_ALLOC_SIZE (PAGE_SIZE) + +/* each allocated page has one of these structures at its very end. */ +struct mbuf_page_hdr { + uint8_t card[32]; /* bitmap for on-card */ + uint8_t used[32]; /* bitmap for used but not on-card */ + uint16_t nchunks; /* chunks on this page */ + bus_dmamap_t map; /* the DMA MAP */ + uint32_t phys; /* physical base address */ + uint32_t hdroff; /* chunk header offset */ + uint32_t chunksize; /* chunk size */ +}; +struct mbuf_page { + char storage[MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)]; + struct mbuf_page_hdr hdr; +}; + +/* numbers per page */ +#define MBUF0_PER_PAGE ((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \ + MBUF0_CHUNK) +#define MBUF1_PER_PAGE ((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \ + MBUF1_CHUNK) + +#define MBUF_CLR_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] &= ~(1 << ((BIT) % 8))) +#define MBUF_SET_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] |= (1 << ((BIT) % 8))) +#define MBUF_TST_BIT(ARRAY, BIT) ((ARRAY)[(BIT) / 8] & (1 << ((BIT) % 8))) + +#define MBUF_MAKE_HANDLE(PAGENO, CHUNKNO) \ + (((PAGENO) << 10) | (CHUNKNO)) + +#define MBUF_PARSE_HANDLE(HANDLE, PAGENO, CHUNKNO) do { \ + (CHUNKNO) = (HANDLE) & 0x3ff; \ + (PAGENO) = ((HANDLE) >> 10) & 0x3ff; \ + } while (0) + +#define MBUF_LARGE_FLAG (1 << 20) + +/* chunks have the following structure at the end */ +struct mbuf_chunk_hdr { + struct mbuf *mbuf; + uint16_t pageno; + uint16_t chunkno; +}; + +#define MBUFX_STORAGE_SIZE(X) (MBUF##X##_CHUNK \ + - sizeof(struct mbuf_chunk_hdr)) + +struct mbuf0_chunk { + char storage[MBUFX_STORAGE_SIZE(0)]; + struct mbuf_chunk_hdr hdr; +}; + +struct mbuf1_chunk { + char storage[MBUFX_STORAGE_SIZE(1)]; + struct mbuf_chunk_hdr hdr; +}; + +struct mbufx_free { + SLIST_ENTRY(mbufx_free) link; +}; +SLIST_HEAD(mbufx_free_list, mbufx_free); + +/*==================================================================*/ + +/* + * Interrupt queue + */ +struct heirq { + u_int size; /* number of entries */ + u_int thresh; /* re-interrupt threshold */ + u_int line; /* interrupt line to use */ + struct dmamem mem; /* interrupt queues */ + uint32_t * irq; /* interrupt queue */ + uint32_t head; /* head index */ + uint32_t * tailp; /* pointer to tail */ + struct hatm_softc *sc; /* back pointer */ + u_int group; /* interrupt group */ +}; + +/* + * This structure describes all information for a VCC open on the card. + * The array of these structures is indexed by the compressed connection ID + * (CID). + */ +struct hevcc { + u_int vflags; /* private flags */ + void * rxhand; /* NATM protocol block */ + u_int rc; /* rate control group for CBR */ + struct mbuf * chain; /* partial received PDU */ + struct mbuf * last; /* last mbuf in chain */ + + /* from the OPEN_VCC ioctl */ + struct atmio_vcc param; /* traffic parameters */ + + uint32_t ibytes; + uint32_t ipackets; + uint32_t obytes; + uint32_t opackets; + u_int ntpds; /* number of active TPDs */ +}; +#define HE_VCC_OPEN 0x000f0000 +#define HE_VCC_RX_OPEN 0x00010000 +#define HE_VCC_RX_CLOSING 0x00020000 +#define HE_VCC_TX_OPEN 0x00040000 +#define HE_VCC_TX_CLOSING 0x00080000 +#define HE_VCC_FLOW_CTRL 0x00100000 +#define HE_VCC_ASYNC 0x00200000 + +/* + * CBR rate groups + */ +struct herg { + u_int refcnt; /* how many connections reference this group */ + u_int rate; /* the value */ +}; + +/* + * Softc + */ +struct hatm_softc { + struct ifatm ifatm; /* common ATM stuff */ + struct mtx mtx; /* lock */ + struct ifmedia media; /* media */ + device_t dev; /* device */ + int memid; /* resoure id for memory */ + struct resource * memres; /* memory resource */ + bus_space_handle_t memh; /* handle */ + bus_space_tag_t memt; /* ... and tag */ + bus_dma_tag_t parent_tag; /* global restriction */ + struct cv vcc_cv; /* condition variable */ + int irqid; /* resource id */ + struct resource * irqres; /* resource */ + void * ih; /* interrupt handle */ + struct utopia utopia; /* utopia state */ + + /* rest has to be reset by stop */ + int he622; /* this is a HE622 */ + int pci64; /* 64bit bus */ + char prod_id[HE_EEPROM_PROD_ID_LEN + 1]; + char rev[HE_EEPROM_REV_LEN + 1]; + struct heirq irq_0; /* interrupt queues 0 */ + + /* generic network controller state */ + u_int cells_per_row; + u_int bytes_per_row; + u_int r0_numrows; + u_int tx_numrows; + u_int r1_numrows; + u_int r0_startrow; + u_int tx_startrow; + u_int r1_startrow; + u_int cells_per_lbuf; + u_int r0_numbuffs; + u_int r1_numbuffs; + u_int tx_numbuffs; + + /* HSP */ + struct he_hsp *hsp; + struct dmamem hsp_mem; + + /*** TX ***/ + struct hetbrq tbrq; /* TBRQ 0 */ + struct hetpdrq tpdrq; /* TPDRQ */ + struct tpd_list tpd_free; /* Free TPDs */ + u_int tpd_nfree; /* number of free TPDs */ + u_int tpd_total; /* total TPDs */ + uint8_t *tpd_used; /* bitmap of used TPDs */ + struct dmamem tpds; /* TPD memory */ + bus_dma_tag_t tx_tag; /* DMA tag for all tx mbufs */ + + /*** RX ***/ + /* receive/transmit groups */ + struct herbp rbp_s0; /* RBPS0 */ + struct herbp rbp_l0; /* RBPL0 */ + struct herbp rbp_s1; /* RBPS1 */ + struct herbrq rbrq_0; /* RBRQ0 */ + struct herbrq rbrq_1; /* RBRQ1 */ + + /* list of external mbuf storage */ + bus_dma_tag_t mbuf_tag; + struct mbuf_page **mbuf_pages; + u_int mbuf_npages; + struct mtx mbuf0_mtx; + struct mbufx_free_list mbuf0_list; + struct mtx mbuf1_mtx; + struct mbufx_free_list mbuf1_list; + + /* mbuf cluster tracking and mapping for group 0 */ + struct mbuf **lbufs; /* mbufs */ + bus_dmamap_t *rmaps; /* DMA maps */ + u_int lbufs_size; + u_int lbufs_next; + + /* VCCs */ + struct hevcc *vccs[HE_MAX_VCCS]; + u_int cbr_bw; /* BW allocated to CBR */ + u_int max_tpd; /* per VCC */ + u_int open_vccs; + uma_zone_t vcc_zone; + + /* rate groups */ + struct herg rate_ctrl[HE_REGN_CS_STPER]; + + /* memory offsets */ + u_int tsrb, tsrc, tsrd; + u_int rsrb; + + struct cv cv_rcclose; /* condition variable */ + uint32_t rate_grid[16][16]; /* our copy */ + + /* sysctl support */ + struct sysctl_ctx_list sysctl_ctx; + struct sysctl_oid *sysctl_tree; + + /* internal statistics */ + struct istats istats; + +#ifdef HATM_DEBUG + /* debugging */ + u_int debug; +#endif +}; + +#define READ4(SC,OFF) bus_space_read_4(SC->memt, SC->memh, (OFF)) +#define READ2(SC,OFF) bus_space_read_2(SC->memt, SC->memh, (OFF)) +#define READ1(SC,OFF) bus_space_read_1(SC->memt, SC->memh, (OFF)) + +#define WRITE4(SC,OFF,VAL) bus_space_write_4(SC->memt, SC->memh, (OFF), (VAL)) +#define WRITE2(SC,OFF,VAL) bus_space_write_2(SC->memt, SC->memh, (OFF), (VAL)) +#define WRITE1(SC,OFF,VAL) bus_space_write_1(SC->memt, SC->memh, (OFF), (VAL)) + +#define BARRIER_R(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \ + BUS_SPACE_BARRIER_READ) +#define BARRIER_W(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \ + BUS_SPACE_BARRIER_WRITE) +#define BARRIER_RW(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \ + BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE) + +#define READ_SUNI(SC,OFF) READ4(SC, HE_REGO_SUNI + 4 * (OFF)) +#define WRITE_SUNI(SC,OFF,VAL) WRITE4(SC, HE_REGO_SUNI + 4 * (OFF), (VAL)) + +#define READ_LB4(SC,OFF) \ + ({ \ + WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF)); \ + WRITE4(SC, HE_REGO_LB_MEM_ACCESS, \ + (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_READ)); \ + while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\ + ; \ + READ4(SC, HE_REGO_LB_MEM_DATA); \ + }) +#define WRITE_LB4(SC,OFF,VAL) \ + do { \ + WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF)); \ + WRITE4(SC, HE_REGO_LB_MEM_DATA, (VAL)); \ + WRITE4(SC, HE_REGO_LB_MEM_ACCESS, \ + (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_WRITE)); \ + while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\ + ; \ + } while(0) + +#define WRITE_MEM4(SC,OFF,VAL,SPACE) \ + do { \ + WRITE4(SC, HE_REGO_CON_DAT, (VAL)); \ + WRITE4(SC, HE_REGO_CON_CTL, \ + (SPACE | HE_REGM_CON_WE | HE_REGM_CON_STATUS | (OFF))); \ + while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0) \ + ; \ + } while(0) + +#define READ_MEM4(SC,OFF,SPACE) \ + ({ \ + WRITE4(SC, HE_REGO_CON_CTL, \ + (SPACE | HE_REGM_CON_STATUS | (OFF))); \ + while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0) \ + ; \ + READ4(SC, HE_REGO_CON_DAT); \ + }) + +#define WRITE_TCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_TCM) +#define WRITE_RCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_RCM) +#define WRITE_MBOX4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_MBOX) + +#define READ_TCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_TCM) +#define READ_RCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_RCM) +#define READ_MBOX4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_MBOX) + +#define WRITE_TCM(SC,OFF,BYTES,VAL) \ + WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS), \ + (VAL), HE_REGM_CON_TCM) +#define WRITE_RCM(SC,OFF,BYTES,VAL) \ + WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS), \ + (VAL), HE_REGM_CON_RCM) + +#define READ_TSR(SC,CID,NR) \ + ({ \ + uint32_t _v; \ + if((NR) <= 7) { \ + _v = READ_TCM4(SC, HE_REGO_TSRA(0,CID,NR)); \ + } else if((NR) <= 11) { \ + _v = READ_TCM4(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)));\ + } else if((NR) <= 13) { \ + _v = READ_TCM4(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)));\ + } else { \ + _v = READ_TCM4(SC, HE_REGO_TSRD((SC)->tsrd,CID)); \ + } \ + _v; \ + }) + +#define WRITE_TSR(SC,CID,NR,BEN,VAL) \ + do { \ + if((NR) <= 7) { \ + WRITE_TCM(SC, HE_REGO_TSRA(0,CID,NR),BEN,VAL); \ + } else if((NR) <= 11) { \ + WRITE_TCM(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)),BEN,VAL);\ + } else if((NR) <= 13) { \ + WRITE_TCM(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)),BEN,VAL);\ + } else { \ + WRITE_TCM(SC, HE_REGO_TSRD((SC)->tsrd,CID),BEN,VAL); \ + } \ + } while(0) + +#define READ_RSR(SC,CID,NR) \ + ({ \ + uint32_t _v; \ + if((NR) <= 7) { \ + _v = READ_RCM4(SC, HE_REGO_RSRA(0,CID,NR)); \ + } else { \ + _v = READ_RCM4(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)));\ + } \ + _v; \ + }) + +#define WRITE_RSR(SC,CID,NR,BEN,VAL) \ + do { \ + if((NR) <= 7) { \ + WRITE_RCM(SC, HE_REGO_RSRA(0,CID,NR),BEN,VAL); \ + } else { \ + WRITE_RCM(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)),BEN,VAL);\ + } \ + } while(0) + +#ifdef HATM_DEBUG +#define DBG(SC, FL, PRINT) do { \ + if((SC)->debug & DBG_##FL) { \ + if_printf(&(SC)->ifatm.ifnet, "%s: ", __func__); \ + printf PRINT; \ + printf("\n"); \ + } \ + } while (0) + +enum { + DBG_RX = 0x0001, + DBG_TX = 0x0002, + DBG_VCC = 0x0004, + DBG_IOCTL = 0x0008, + DBG_ATTACH = 0x0010, + DBG_INTR = 0x0020, + DBG_DMA = 0x0040, + DBG_DMAH = 0x0080, + + DBG_ALL = 0x00ff +}; + +#else +#define DBG(SC, FL, PRINT) +#endif + +u_int hatm_cps2atmf(uint32_t); +u_int hatm_atmf2cps(uint32_t); + +void hatm_intr(void *); +int hatm_ioctl(struct ifnet *, u_long, caddr_t); +void hatm_initialize(struct hatm_softc *); +void hatm_stop(struct hatm_softc *sc); +void hatm_start(struct ifnet *); + +void hatm_rx(struct hatm_softc *sc, u_int cid, u_int flags, struct mbuf *m, + u_int len); +void hatm_tx_complete(struct hatm_softc *sc, struct tpd *tpd, uint32_t); + +int hatm_tx_vcc_can_open(struct hatm_softc *sc, u_int cid, struct hevcc *); +void hatm_tx_vcc_open(struct hatm_softc *sc, u_int cid); +void hatm_rx_vcc_open(struct hatm_softc *sc, u_int cid); +void hatm_tx_vcc_close(struct hatm_softc *sc, u_int cid); +void hatm_rx_vcc_close(struct hatm_softc *sc, u_int cid); +void hatm_tx_vcc_closed(struct hatm_softc *sc, u_int cid); +void hatm_vcc_closed(struct hatm_softc *sc, u_int cid); diff --git a/sys/modules/Makefile b/sys/modules/Makefile index 70b35b509579..e0aaf6d1f929 100644 --- a/sys/modules/Makefile +++ b/sys/modules/Makefile @@ -36,6 +36,7 @@ SUBDIR= accf_data \ fxp \ geom \ gx \ + hatm \ hifn \ if_disc \ if_ef \ diff --git a/sys/modules/hatm/Makefile b/sys/modules/hatm/Makefile new file mode 100644 index 000000000000..807740e25c0f --- /dev/null +++ b/sys/modules/hatm/Makefile @@ -0,0 +1,21 @@ +# $FreeBSD$ +# +# Author: Harti Brandt +# +.PATH: ${.CURDIR}/../../dev/hatm + +KMOD= if_hatm +SRCS= if_hatm.c if_hatm_intr.c if_hatm_ioctl.c if_hatm_tx.c if_hatm_rx.c \ + device_if.h bus_if.h pci_if.h opt_inet.h opt_natm.h + +CFLAGS+= -DENABLE_BPF +# CFLAGS+= -DHATM_DEBUG -DINVARIANT_SUPPORT -DINVARIANTS -g +# LDFLAGS+= -g + +opt_inet.h: + echo "#define INET 1" > opt_inet.h + +opt_natm.h: + echo "#define NATM 1" > opt_natm.h + +.include