2003-06-17 16:12:50 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2001-2003
|
|
|
|
* Fraunhofer Institute for Open Communication Systems (FhG Fokus).
|
|
|
|
* All rights reserved.
|
2003-08-24 17:55:58 +00:00
|
|
|
* Author: Hartmut Brandt <harti@freebsd.org>
|
2003-06-17 16:12:50 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2003-06-18 09:31:37 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2003-08-24 17:55:58 +00:00
|
|
|
/*
|
|
|
|
* ForeHE driver.
|
|
|
|
*
|
|
|
|
* Interrupt handler.
|
|
|
|
*/
|
|
|
|
|
2003-06-17 16:12:50 +00:00
|
|
|
#include "opt_inet.h"
|
|
|
|
#include "opt_natm.h"
|
|
|
|
|
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/errno.h>
|
|
|
|
#include <sys/conf.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <sys/syslog.h>
|
|
|
|
#include <sys/condvar.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <vm/uma.h>
|
|
|
|
|
|
|
|
#include <sys/sockio.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_media.h>
|
|
|
|
#include <net/if_atm.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/if_atm.h>
|
|
|
|
|
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <machine/resource.h>
|
|
|
|
#include <sys/bus.h>
|
|
|
|
#include <sys/rman.h>
|
2003-08-22 06:00:27 +00:00
|
|
|
#include <dev/pci/pcireg.h>
|
|
|
|
#include <dev/pci/pcivar.h>
|
2003-06-17 16:12:50 +00:00
|
|
|
|
|
|
|
#include <dev/utopia/utopia.h>
|
|
|
|
#include <dev/hatm/if_hatmconf.h>
|
|
|
|
#include <dev/hatm/if_hatmreg.h>
|
|
|
|
#include <dev/hatm/if_hatmvar.h>
|
|
|
|
|
|
|
|
CTASSERT(sizeof(struct mbuf_page) == MBUF_ALLOC_SIZE);
|
|
|
|
CTASSERT(sizeof(struct mbuf0_chunk) == MBUF0_CHUNK);
|
|
|
|
CTASSERT(sizeof(struct mbuf1_chunk) == MBUF1_CHUNK);
|
|
|
|
CTASSERT(sizeof(((struct mbuf0_chunk *)NULL)->storage) >= MBUF0_SIZE);
|
|
|
|
CTASSERT(sizeof(((struct mbuf1_chunk *)NULL)->storage) >= MBUF1_SIZE);
|
|
|
|
CTASSERT(sizeof(struct tpd) <= HE_TPD_SIZE);
|
|
|
|
|
2003-10-29 12:59:44 +00:00
|
|
|
static void hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free an external mbuf to a list. We use atomic functions so that
|
|
|
|
* we don't need a mutex for the list.
|
|
|
|
*/
|
|
|
|
static __inline void
|
|
|
|
hatm_ext_free(struct mbufx_free **list, struct mbufx_free *buf)
|
|
|
|
{
|
|
|
|
for (;;) {
|
|
|
|
buf->link = *list;
|
|
|
|
if (atomic_cmpset_ptr(list, buf->link, buf))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline struct mbufx_free *
|
|
|
|
hatm_ext_alloc(struct hatm_softc *sc, u_int g)
|
|
|
|
{
|
|
|
|
struct mbufx_free *buf;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if ((buf = sc->mbuf_list[g]) == NULL)
|
|
|
|
break;
|
|
|
|
if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (buf == NULL) {
|
|
|
|
hatm_mbuf_page_alloc(sc, g);
|
|
|
|
for (;;) {
|
|
|
|
if ((buf = sc->mbuf_list[g]) == NULL)
|
|
|
|
break;
|
|
|
|
if (atomic_cmpset_ptr(&sc->mbuf_list[g], buf, buf->link))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (buf);
|
|
|
|
}
|
|
|
|
|
2003-06-17 16:12:50 +00:00
|
|
|
/*
|
|
|
|
* Either the queue treshold was crossed or a TPD with the INTR bit set
|
|
|
|
* was transmitted.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
he_intr_tbrq(struct hatm_softc *sc, struct hetbrq *q, u_int group)
|
|
|
|
{
|
|
|
|
uint32_t *tailp = &sc->hsp->group[group].tbrq_tail;
|
|
|
|
u_int no;
|
|
|
|
|
|
|
|
while (q->head != (*tailp >> 2)) {
|
|
|
|
no = (q->tbrq[q->head].addr & HE_REGM_TBRQ_ADDR) >>
|
|
|
|
HE_REGS_TPD_ADDR;
|
|
|
|
hatm_tx_complete(sc, TPD_ADDR(sc, no),
|
|
|
|
(q->tbrq[q->head].addr & HE_REGM_TBRQ_FLAGS));
|
|
|
|
|
|
|
|
if (++q->head == q->size)
|
|
|
|
q->head = 0;
|
|
|
|
}
|
|
|
|
WRITE4(sc, HE_REGO_TBRQ_H(group), q->head << 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* DMA loader function for external mbuf page.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
hatm_extbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs,
|
|
|
|
int error)
|
|
|
|
{
|
|
|
|
if (error) {
|
|
|
|
printf("%s: mapping error %d\n", __func__, error);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
KASSERT(nsegs == 1,
|
|
|
|
("too many segments for DMA: %d", nsegs));
|
|
|
|
KASSERT(segs[0].ds_addr <= 0xffffffffLU,
|
|
|
|
("phys addr too large %lx", (u_long)segs[0].ds_addr));
|
|
|
|
|
|
|
|
*(uint32_t *)arg = segs[0].ds_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a page of external mbuf storage for the small pools.
|
|
|
|
* Create a DMA map and load it. Put all the chunks onto the right
|
|
|
|
* free list.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
hatm_mbuf_page_alloc(struct hatm_softc *sc, u_int group)
|
|
|
|
{
|
|
|
|
struct mbuf_page *pg;
|
|
|
|
int err;
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
if (sc->mbuf_npages == HE_CONFIG_MAX_MBUF_PAGES)
|
|
|
|
return;
|
|
|
|
if ((pg = malloc(MBUF_ALLOC_SIZE, M_DEVBUF, M_NOWAIT)) == NULL)
|
|
|
|
return;
|
|
|
|
bzero(pg->hdr.card, sizeof(pg->hdr.card));
|
|
|
|
|
|
|
|
err = bus_dmamap_create(sc->mbuf_tag, 0, &pg->hdr.map);
|
|
|
|
if (err != 0) {
|
|
|
|
if_printf(&sc->ifatm.ifnet, "%s -- bus_dmamap_create: %d\n",
|
|
|
|
__func__, err);
|
|
|
|
free(pg, M_DEVBUF);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
err = bus_dmamap_load(sc->mbuf_tag, pg->hdr.map, pg, MBUF_ALLOC_SIZE,
|
2003-07-10 13:55:09 +00:00
|
|
|
hatm_extbuf_helper, &pg->hdr.phys, BUS_DMA_NOWAIT);
|
2003-06-17 16:12:50 +00:00
|
|
|
if (err != 0) {
|
|
|
|
if_printf(&sc->ifatm.ifnet, "%s -- mbuf mapping failed %d\n",
|
|
|
|
__func__, err);
|
|
|
|
bus_dmamap_destroy(sc->mbuf_tag, pg->hdr.map);
|
|
|
|
free(pg, M_DEVBUF);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sc->mbuf_pages[sc->mbuf_npages] = pg;
|
|
|
|
|
|
|
|
if (group == 0) {
|
|
|
|
struct mbuf0_chunk *c;
|
|
|
|
|
|
|
|
pg->hdr.nchunks = MBUF0_PER_PAGE;
|
|
|
|
pg->hdr.chunksize = MBUF0_CHUNK;
|
|
|
|
pg->hdr.hdroff = sizeof(c->storage);
|
|
|
|
c = (struct mbuf0_chunk *)pg;
|
|
|
|
for (i = 0; i < MBUF0_PER_PAGE; i++, c++) {
|
|
|
|
c->hdr.pageno = sc->mbuf_npages;
|
|
|
|
c->hdr.chunkno = i;
|
2003-10-29 12:59:44 +00:00
|
|
|
hatm_ext_free(&sc->mbuf_list[0],
|
|
|
|
(struct mbufx_free *)c);
|
2003-06-17 16:12:50 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
struct mbuf1_chunk *c;
|
|
|
|
|
|
|
|
pg->hdr.nchunks = MBUF1_PER_PAGE;
|
|
|
|
pg->hdr.chunksize = MBUF1_CHUNK;
|
|
|
|
pg->hdr.hdroff = sizeof(c->storage);
|
|
|
|
c = (struct mbuf1_chunk *)pg;
|
|
|
|
for (i = 0; i < MBUF1_PER_PAGE; i++, c++) {
|
|
|
|
c->hdr.pageno = sc->mbuf_npages;
|
|
|
|
c->hdr.chunkno = i;
|
2003-10-29 12:59:44 +00:00
|
|
|
hatm_ext_free(&sc->mbuf_list[1],
|
|
|
|
(struct mbufx_free *)c);
|
2003-06-17 16:12:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
sc->mbuf_npages++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free an mbuf and put it onto the free list.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
hatm_mbuf0_free(void *buf, void *args)
|
|
|
|
{
|
|
|
|
struct hatm_softc *sc = args;
|
|
|
|
struct mbuf0_chunk *c = buf;
|
|
|
|
|
2003-10-29 12:59:44 +00:00
|
|
|
hatm_ext_free(&sc->mbuf_list[0], (struct mbufx_free *)c);
|
2003-06-17 16:12:50 +00:00
|
|
|
}
|
|
|
|
static void
|
|
|
|
hatm_mbuf1_free(void *buf, void *args)
|
|
|
|
{
|
|
|
|
struct hatm_softc *sc = args;
|
|
|
|
struct mbuf1_chunk *c = buf;
|
|
|
|
|
2003-10-29 12:59:44 +00:00
|
|
|
hatm_ext_free(&sc->mbuf_list[1], (struct mbufx_free *)c);
|
2003-06-17 16:12:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an external mbuf storage
|
|
|
|
*/
|
|
|
|
static int
|
2003-10-29 13:14:39 +00:00
|
|
|
hatm_mbuf_alloc(struct hatm_softc *sc, u_int group, uint32_t *phys,
|
|
|
|
uint32_t *handle)
|
2003-06-17 16:12:50 +00:00
|
|
|
{
|
|
|
|
struct mbufx_free *cf;
|
|
|
|
struct mbuf_page *pg;
|
|
|
|
|
|
|
|
if (group == 0) {
|
|
|
|
struct mbuf0_chunk *buf0;
|
|
|
|
|
2003-10-29 12:59:44 +00:00
|
|
|
if ((cf = hatm_ext_alloc(sc, 0)) == NULL)
|
|
|
|
return (-1);
|
2003-06-17 16:12:50 +00:00
|
|
|
buf0 = (struct mbuf0_chunk *)cf;
|
|
|
|
pg = sc->mbuf_pages[buf0->hdr.pageno];
|
|
|
|
MBUF_SET_BIT(pg->hdr.card, buf0->hdr.chunkno);
|
|
|
|
|
|
|
|
*handle = MBUF_MAKE_HANDLE(buf0->hdr.pageno, buf0->hdr.chunkno);
|
2003-10-29 13:14:39 +00:00
|
|
|
*phys = pg->hdr.phys + buf0->hdr.chunkno * MBUF0_CHUNK +
|
|
|
|
MBUF0_OFFSET;
|
2003-06-17 16:12:50 +00:00
|
|
|
|
|
|
|
} else if (group == 1) {
|
|
|
|
struct mbuf1_chunk *buf1;
|
|
|
|
|
2003-10-29 12:59:44 +00:00
|
|
|
if ((cf = hatm_ext_alloc(sc, 1)) == NULL)
|
|
|
|
return (-1);
|
2003-06-17 16:12:50 +00:00
|
|
|
buf1 = (struct mbuf1_chunk *)cf;
|
|
|
|
pg = sc->mbuf_pages[buf1->hdr.pageno];
|
|
|
|
MBUF_SET_BIT(pg->hdr.card, buf1->hdr.chunkno);
|
|
|
|
|
|
|
|
*handle = MBUF_MAKE_HANDLE(buf1->hdr.pageno, buf1->hdr.chunkno);
|
2003-10-29 13:14:39 +00:00
|
|
|
*phys = pg->hdr.phys + buf1->hdr.chunkno * MBUF1_CHUNK +
|
|
|
|
MBUF1_OFFSET;
|
2003-06-17 16:12:50 +00:00
|
|
|
|
|
|
|
} else
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
bus_dmamap_sync(sc->mbuf_tag, pg->hdr.map, BUS_DMASYNC_PREREAD);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
hatm_mbuf_helper(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
|
|
|
{
|
|
|
|
uint32_t *ptr = (uint32_t *)arg;
|
|
|
|
|
|
|
|
if (nsegs == 0) {
|
|
|
|
printf("%s: error=%d\n", __func__, error);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
KASSERT(nsegs == 1, ("too many segments for mbuf: %d", nsegs));
|
|
|
|
KASSERT(segs[0].ds_addr <= 0xffffffffLU,
|
|
|
|
("phys addr too large %lx", (u_long)segs[0].ds_addr));
|
|
|
|
|
|
|
|
*ptr = segs[0].ds_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Receive buffer pool interrupt. This means the number of entries in the
|
|
|
|
* queue has dropped below the threshold. Try to supply new buffers.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
|
|
|
|
u_int group)
|
|
|
|
{
|
2003-10-27 16:21:59 +00:00
|
|
|
u_int ntail;
|
2003-06-17 16:12:50 +00:00
|
|
|
struct mbuf *m;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
DBG(sc, INTR, ("%s buffer supply threshold crossed for group %u",
|
|
|
|
large ? "large" : "small", group));
|
|
|
|
|
|
|
|
rbp->head = (READ4(sc, HE_REGO_RBP_S(large, group)) >> HE_REGS_RBP_HEAD)
|
|
|
|
& (rbp->size - 1);
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if ((ntail = rbp->tail + 1) == rbp->size)
|
|
|
|
ntail = 0;
|
|
|
|
if (ntail == rbp->head)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (large) {
|
2003-10-29 13:14:39 +00:00
|
|
|
/* allocate the MBUF */
|
2003-06-17 16:12:50 +00:00
|
|
|
if ((m = m_getcl(M_DONTWAIT, MT_DATA,
|
|
|
|
M_PKTHDR)) == NULL) {
|
|
|
|
if_printf(&sc->ifatm.ifnet,
|
|
|
|
"no mbuf clusters\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
m->m_data += MBUFL_OFFSET;
|
|
|
|
|
|
|
|
if (sc->lbufs[sc->lbufs_next] != NULL)
|
|
|
|
panic("hatm: lbufs full %u", sc->lbufs_next);
|
|
|
|
sc->lbufs[sc->lbufs_next] = m;
|
|
|
|
|
|
|
|
if ((error = bus_dmamap_load(sc->mbuf_tag,
|
|
|
|
sc->rmaps[sc->lbufs_next],
|
|
|
|
m->m_data, rbp->bsize, hatm_mbuf_helper,
|
2003-07-10 13:55:09 +00:00
|
|
|
&rbp->rbp[rbp->tail].phys, BUS_DMA_NOWAIT)) != NULL)
|
2003-06-17 16:12:50 +00:00
|
|
|
panic("hatm: mbuf mapping failed %d", error);
|
|
|
|
|
|
|
|
bus_dmamap_sync(sc->mbuf_tag,
|
|
|
|
sc->rmaps[sc->lbufs_next],
|
|
|
|
BUS_DMASYNC_PREREAD);
|
|
|
|
|
|
|
|
rbp->rbp[rbp->tail].handle = sc->lbufs_next |
|
|
|
|
MBUF_LARGE_FLAG;
|
|
|
|
|
|
|
|
if (++sc->lbufs_next == sc->lbufs_size)
|
|
|
|
sc->lbufs_next = 0;
|
|
|
|
|
|
|
|
} else {
|
2003-10-29 13:14:39 +00:00
|
|
|
m = NULL;
|
|
|
|
if (hatm_mbuf_alloc(sc, group,
|
2003-06-17 16:12:50 +00:00
|
|
|
&rbp->rbp[rbp->tail].phys,
|
|
|
|
&rbp->rbp[rbp->tail].handle)) {
|
|
|
|
m_freem(m);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DBG(sc, DMA, ("MBUF loaded: handle=%x m=%p phys=%x",
|
|
|
|
rbp->rbp[rbp->tail].handle, m, rbp->rbp[rbp->tail].phys));
|
|
|
|
rbp->rbp[rbp->tail].handle <<= HE_REGS_RBRQ_ADDR;
|
|
|
|
|
|
|
|
rbp->tail = ntail;
|
|
|
|
}
|
2003-10-27 16:21:59 +00:00
|
|
|
WRITE4(sc, HE_REGO_RBP_T(large, group),
|
|
|
|
(rbp->tail << HE_REGS_RBP_TAIL));
|
2003-06-17 16:12:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract the buffer and hand it to the receive routine
|
|
|
|
*/
|
|
|
|
static struct mbuf *
|
|
|
|
hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
|
|
|
|
{
|
|
|
|
u_int pageno;
|
|
|
|
u_int chunkno;
|
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
if (handle & MBUF_LARGE_FLAG) {
|
|
|
|
/* large buffer - sync and unload */
|
|
|
|
handle &= ~MBUF_LARGE_FLAG;
|
|
|
|
DBG(sc, RX, ("RX large handle=%x", handle));
|
|
|
|
|
|
|
|
bus_dmamap_sync(sc->mbuf_tag, sc->rmaps[handle],
|
|
|
|
BUS_DMASYNC_POSTREAD);
|
|
|
|
bus_dmamap_unload(sc->mbuf_tag, sc->rmaps[handle]);
|
|
|
|
|
|
|
|
m = sc->lbufs[handle];
|
|
|
|
sc->lbufs[handle] = NULL;
|
|
|
|
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
|
|
|
MBUF_PARSE_HANDLE(handle, pageno, chunkno);
|
2003-10-29 13:14:39 +00:00
|
|
|
MBUF_CLR_BIT(sc->mbuf_pages[pageno]->hdr.card, chunkno);
|
2003-06-17 16:12:50 +00:00
|
|
|
|
|
|
|
DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle,
|
|
|
|
pageno, chunkno));
|
|
|
|
|
2003-10-29 13:14:39 +00:00
|
|
|
MGETHDR(m, M_DONTWAIT, MT_DATA);
|
|
|
|
|
2003-06-17 16:12:50 +00:00
|
|
|
if (group == 0) {
|
|
|
|
struct mbuf0_chunk *c0;
|
|
|
|
|
|
|
|
c0 = (struct mbuf0_chunk *)sc->mbuf_pages[pageno] + chunkno;
|
|
|
|
KASSERT(c0->hdr.pageno == pageno, ("pageno = %u/%u",
|
|
|
|
c0->hdr.pageno, pageno));
|
|
|
|
KASSERT(c0->hdr.chunkno == chunkno, ("chunkno = %u/%u",
|
|
|
|
c0->hdr.chunkno, chunkno));
|
|
|
|
|
2003-10-29 13:14:39 +00:00
|
|
|
if (m != NULL) {
|
|
|
|
m_extadd(m, (void *)c0, MBUF0_SIZE,
|
|
|
|
hatm_mbuf0_free, sc, M_PKTHDR, EXT_NET_DRV);
|
|
|
|
m->m_data += MBUF0_OFFSET;
|
|
|
|
} else
|
|
|
|
hatm_mbuf0_free(c0, sc);
|
2003-06-17 16:12:50 +00:00
|
|
|
|
|
|
|
} else {
|
|
|
|
struct mbuf1_chunk *c1;
|
|
|
|
|
|
|
|
c1 = (struct mbuf1_chunk *)sc->mbuf_pages[pageno] + chunkno;
|
|
|
|
KASSERT(c1->hdr.pageno == pageno, ("pageno = %u/%u",
|
|
|
|
c1->hdr.pageno, pageno));
|
|
|
|
KASSERT(c1->hdr.chunkno == chunkno, ("chunkno = %u/%u",
|
|
|
|
c1->hdr.chunkno, chunkno));
|
|
|
|
|
2003-10-29 13:14:39 +00:00
|
|
|
if (m != NULL) {
|
|
|
|
m_extadd(m, (void *)c1, MBUF1_SIZE,
|
|
|
|
hatm_mbuf1_free, sc, M_PKTHDR, EXT_NET_DRV);
|
|
|
|
m->m_data += MBUF1_OFFSET;
|
|
|
|
} else
|
|
|
|
hatm_mbuf1_free(c1, sc);
|
2003-06-17 16:12:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt because of receive buffer returned.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
he_intr_rbrq(struct hatm_softc *sc, struct herbrq *rq, u_int group)
|
|
|
|
{
|
|
|
|
struct he_rbrqen *e;
|
|
|
|
uint32_t flags, tail;
|
|
|
|
u_int cid, len;
|
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
tail = sc->hsp->group[group].rbrq_tail >> 3;
|
|
|
|
|
|
|
|
if (rq->head == tail)
|
|
|
|
break;
|
|
|
|
|
|
|
|
e = &rq->rbrq[rq->head];
|
|
|
|
|
|
|
|
flags = e->addr & HE_REGM_RBRQ_FLAGS;
|
|
|
|
if (!(flags & HE_REGM_RBRQ_HBUF_ERROR))
|
|
|
|
m = hatm_rx_buffer(sc, group,
|
|
|
|
(e->addr & HE_REGM_RBRQ_ADDR) >> HE_REGS_RBRQ_ADDR);
|
|
|
|
else
|
|
|
|
m = NULL;
|
|
|
|
|
|
|
|
cid = (e->len & HE_REGM_RBRQ_CID) >> HE_REGS_RBRQ_CID;
|
|
|
|
len = 4 * (e->len & HE_REGM_RBRQ_LEN);
|
|
|
|
|
|
|
|
hatm_rx(sc, cid, flags, m, len);
|
|
|
|
|
|
|
|
if (++rq->head == rq->size)
|
|
|
|
rq->head = 0;
|
|
|
|
}
|
|
|
|
WRITE4(sc, HE_REGO_RBRQ_H(group), rq->head << 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
hatm_intr(void *p)
|
|
|
|
{
|
|
|
|
struct heirq *q = p;
|
|
|
|
struct hatm_softc *sc = q->sc;
|
|
|
|
u_int status;
|
|
|
|
u_int tail;
|
|
|
|
|
|
|
|
/* if we have a stray interrupt with a non-initialized card,
|
|
|
|
* we cannot even lock before looking at the flag */
|
|
|
|
if (!(sc->ifatm.ifnet.if_flags & IFF_RUNNING))
|
|
|
|
return;
|
|
|
|
|
|
|
|
mtx_lock(&sc->mtx);
|
|
|
|
(void)READ4(sc, HE_REGO_INT_FIFO);
|
|
|
|
|
|
|
|
tail = *q->tailp;
|
|
|
|
if (q->head == tail) {
|
|
|
|
/* workaround for tail pointer not updated bug (8.1.1) */
|
|
|
|
DBG(sc, INTR, ("hatm: intr tailq not updated bug triggered"));
|
|
|
|
|
|
|
|
/* read the tail pointer from the card */
|
|
|
|
tail = READ4(sc, HE_REGO_IRQ_BASE(q->group)) &
|
|
|
|
HE_REGM_IRQ_BASE_TAIL;
|
|
|
|
BARRIER_R(sc);
|
|
|
|
|
|
|
|
sc->istats.bug_no_irq_upd++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clear the interrupt */
|
|
|
|
WRITE4(sc, HE_REGO_INT_FIFO, HE_REGM_INT_FIFO_CLRA);
|
|
|
|
BARRIER_W(sc);
|
|
|
|
|
|
|
|
while (q->head != tail) {
|
|
|
|
status = q->irq[q->head];
|
|
|
|
q->irq[q->head] = HE_REGM_ITYPE_INVALID;
|
|
|
|
if (++q->head == (q->size - 1))
|
|
|
|
q->head = 0;
|
|
|
|
|
|
|
|
switch (status & HE_REGM_ITYPE) {
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_TBRQ:
|
|
|
|
DBG(sc, INTR, ("TBRQ treshold %u", status & HE_REGM_IGROUP));
|
|
|
|
sc->istats.itype_tbrq++;
|
|
|
|
he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_TPD:
|
|
|
|
DBG(sc, INTR, ("TPD ready %u", status & HE_REGM_IGROUP));
|
|
|
|
sc->istats.itype_tpd++;
|
|
|
|
he_intr_tbrq(sc, &sc->tbrq, status & HE_REGM_IGROUP);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_RBPS:
|
|
|
|
sc->istats.itype_rbps++;
|
|
|
|
switch (status & HE_REGM_IGROUP) {
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if_printf(&sc->ifatm.ifnet, "bad INTR RBPS%u\n",
|
|
|
|
status & HE_REGM_IGROUP);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_RBPL:
|
|
|
|
sc->istats.itype_rbpl++;
|
|
|
|
switch (status & HE_REGM_IGROUP) {
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if_printf(&sc->ifatm.ifnet, "bad INTR RBPL%u\n",
|
|
|
|
status & HE_REGM_IGROUP);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_RBRQ:
|
|
|
|
DBG(sc, INTR, ("INTERRUPT RBRQ %u", status & HE_REGM_IGROUP));
|
|
|
|
sc->istats.itype_rbrq++;
|
|
|
|
switch (status & HE_REGM_IGROUP) {
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
he_intr_rbrq(sc, &sc->rbrq_0, 0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
if (sc->rbrq_1.size > 0) {
|
|
|
|
he_intr_rbrq(sc, &sc->rbrq_1, 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* FALLTHRU */
|
|
|
|
|
|
|
|
default:
|
|
|
|
if_printf(&sc->ifatm.ifnet, "bad INTR RBRQ%u\n",
|
|
|
|
status & HE_REGM_IGROUP);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_RBRQT:
|
|
|
|
DBG(sc, INTR, ("INTERRUPT RBRQT %u", status & HE_REGM_IGROUP));
|
|
|
|
sc->istats.itype_rbrqt++;
|
|
|
|
switch (status & HE_REGM_IGROUP) {
|
|
|
|
|
|
|
|
case 0:
|
|
|
|
he_intr_rbrq(sc, &sc->rbrq_0, 0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case 1:
|
|
|
|
if (sc->rbrq_1.size > 0) {
|
|
|
|
he_intr_rbrq(sc, &sc->rbrq_1, 1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* FALLTHRU */
|
|
|
|
|
|
|
|
default:
|
|
|
|
if_printf(&sc->ifatm.ifnet, "bad INTR RBRQT%u\n",
|
|
|
|
status & HE_REGM_IGROUP);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_PHYS:
|
|
|
|
sc->istats.itype_phys++;
|
|
|
|
utopia_intr(&sc->utopia);
|
|
|
|
break;
|
|
|
|
|
|
|
|
#if HE_REGM_ITYPE_UNKNOWN != HE_REGM_ITYPE_INVALID
|
|
|
|
case HE_REGM_ITYPE_UNKNOWN:
|
|
|
|
sc->istats.itype_unknown++;
|
|
|
|
if_printf(&sc->ifatm.ifnet, "bad interrupt\n");
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_ERR:
|
|
|
|
sc->istats.itype_err++;
|
|
|
|
switch (status) {
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_PERR:
|
|
|
|
if_printf(&sc->ifatm.ifnet, "parity error\n");
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_ABORT:
|
|
|
|
if_printf(&sc->ifatm.ifnet, "abort interrupt "
|
|
|
|
"addr=0x%08x\n",
|
|
|
|
READ4(sc, HE_REGO_ABORT_ADDR));
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if_printf(&sc->ifatm.ifnet,
|
|
|
|
"bad interrupt type %08x\n", status);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case HE_REGM_ITYPE_INVALID:
|
|
|
|
/* this is the documented fix for the ISW bug 8.1.1
|
|
|
|
* Note, that the documented fix is partly wrong:
|
|
|
|
* the ISWs should be intialized to 0xf8 not 0xff */
|
|
|
|
sc->istats.bug_bad_isw++;
|
|
|
|
DBG(sc, INTR, ("hatm: invalid ISW bug triggered"));
|
|
|
|
he_intr_tbrq(sc, &sc->tbrq, 0);
|
|
|
|
he_intr_rbp(sc, &sc->rbp_s0, 0, 0);
|
|
|
|
he_intr_rbp(sc, &sc->rbp_l0, 1, 0);
|
|
|
|
he_intr_rbp(sc, &sc->rbp_s1, 0, 1);
|
|
|
|
he_intr_rbrq(sc, &sc->rbrq_0, 0);
|
|
|
|
he_intr_rbrq(sc, &sc->rbrq_1, 1);
|
|
|
|
utopia_intr(&sc->utopia);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if_printf(&sc->ifatm.ifnet, "bad interrupt type %08x\n",
|
|
|
|
status);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* write back head to clear queue */
|
|
|
|
WRITE4(sc, HE_REGO_IRQ_HEAD(0),
|
|
|
|
((q->size - 1) << HE_REGS_IRQ_HEAD_SIZE) |
|
|
|
|
(q->thresh << HE_REGS_IRQ_HEAD_THRESH) |
|
|
|
|
(q->head << HE_REGS_IRQ_HEAD_HEAD));
|
|
|
|
BARRIER_W(sc);
|
|
|
|
|
|
|
|
/* workaround the back-to-back irq access problem (8.1.2) */
|
|
|
|
(void)READ4(sc, HE_REGO_INT_FIFO);
|
|
|
|
BARRIER_R(sc);
|
|
|
|
|
|
|
|
mtx_unlock(&sc->mtx);
|
|
|
|
}
|