Fix three more bugs in bfe:

- Fix bfe_encap so that it will pass the address of the mbuf back up to its
  caller if/when it modifies it, as it does when doing a m_defrag on a mbuf chain.
- Make sure to unload the dmamap for ALL fragments of a packet, not just the first
- Use BUS_DMA_NOWAIT for all bus_dmamap_load calls so that the allocation of the
  map is not delayed - this driver is not set up to handle such delays.
- Reduce the number of RX and TX buffers bfe uses so that it does not use more
  bounce buffers than busdma is willing to allow it to use

With these changes, the driver now works properly for a user with a 2GB system,
and it also works on my system when the acceptable address range is lowered to 128MB.
Previously, both of these setups would act up after a few minutes of activity.
This commit is contained in:
Mike Silbersack 2006-05-04 07:41:01 +00:00
parent de916c8b74
commit 5511c4d6dc
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=158285
2 changed files with 22 additions and 17 deletions

View File

@ -297,7 +297,7 @@ bfe_dma_alloc(device_t dev)
bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE);
error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map,
sc->bfe_rx_list, sizeof(struct bfe_desc),
bfe_dma_map, &sc->bfe_rx_dma, 0);
bfe_dma_map, &sc->bfe_rx_dma, BUS_DMA_NOWAIT);
if(error)
return (ENOMEM);
@ -312,7 +312,7 @@ bfe_dma_alloc(device_t dev)
error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map,
sc->bfe_tx_list, sizeof(struct bfe_desc),
bfe_dma_map, &sc->bfe_tx_dma, 0);
bfe_dma_map, &sc->bfe_tx_dma, BUS_DMA_NOWAIT);
if(error)
return (ENOMEM);
@ -572,6 +572,7 @@ bfe_list_newbuf(struct bfe_softc *sc, int c, struct mbuf *m)
struct bfe_desc *d;
struct bfe_data *r;
u_int32_t ctrl;
int error;
if ((c < 0) || (c >= BFE_RX_LIST_CNT))
return (EINVAL);
@ -593,8 +594,10 @@ bfe_list_newbuf(struct bfe_softc *sc, int c, struct mbuf *m)
sc->bfe_rx_cnt = c;
d = &sc->bfe_rx_list[c];
r = &sc->bfe_rx_ring[c];
bus_dmamap_load(sc->bfe_tag, r->bfe_map, mtod(m, void *),
MCLBYTES, bfe_dma_map_desc, d, 0);
error = bus_dmamap_load(sc->bfe_tag, r->bfe_map, mtod(m, void *),
MCLBYTES, bfe_dma_map_desc, d, BUS_DMA_NOWAIT);
if (error)
printf("Serious error: bfe failed to map RX buffer\n");
bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_PREWRITE);
ctrl = ETHER_MAX_LEN + 32;
@ -1100,8 +1103,8 @@ bfe_txeof(struct bfe_softc *sc)
ifp->if_opackets++;
m_freem(r->bfe_mbuf);
r->bfe_mbuf = NULL;
bus_dmamap_unload(sc->bfe_tag, r->bfe_map);
}
bus_dmamap_unload(sc->bfe_tag, r->bfe_map);
sc->bfe_tx_cnt--;
BFE_INC(i, BFE_TX_LIST_CNT);
}
@ -1239,13 +1242,14 @@ bfe_intr(void *xsc)
}
static int
bfe_encap(struct bfe_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
bfe_encap(struct bfe_softc *sc, struct mbuf **m_head, u_int32_t *txidx)
{
struct bfe_desc *d = NULL;
struct bfe_data *r = NULL;
struct mbuf *m;
u_int32_t frag, cur, cnt = 0;
int chainlen = 0;
int error;
if(BFE_TX_LIST_CNT - sc->bfe_tx_cnt < 2)
return (ENOBUFS);
@ -1256,16 +1260,16 @@ bfe_encap(struct bfe_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
* by all packets, we'll m_defrag long chains so that they
* do not use up the entire list, even if they would fit.
*/
for(m = m_head; m != NULL; m = m->m_next)
for(m = *m_head; m != NULL; m = m->m_next)
chainlen++;
if ((chainlen > BFE_TX_LIST_CNT / 4) ||
((BFE_TX_LIST_CNT - (chainlen + sc->bfe_tx_cnt)) < 2)) {
m = m_defrag(m_head, M_DONTWAIT);
m = m_defrag(*m_head, M_DONTWAIT);
if (m == NULL)
return (ENOBUFS);
m_head = m;
*m_head = m;
}
/*
@ -1273,11 +1277,10 @@ bfe_encap(struct bfe_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
* the fragment pointers. Stop when we run out
* of fragments or hit the end of the mbuf chain.
*/
m = m_head;
cur = frag = *txidx;
cnt = 0;
for(m = m_head; m != NULL; m = m->m_next) {
for(m = *m_head; m != NULL; m = m->m_next) {
if(m->m_len != 0) {
if((BFE_TX_LIST_CNT - (sc->bfe_tx_cnt + cnt)) < 2)
return (ENOBUFS);
@ -1297,9 +1300,11 @@ bfe_encap(struct bfe_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
*/
d->bfe_ctrl |= BFE_DESC_EOT;
bus_dmamap_load(sc->bfe_tag,
error = bus_dmamap_load(sc->bfe_tag,
r->bfe_map, mtod(m, void*), m->m_len,
bfe_dma_map_desc, d, 0);
bfe_dma_map_desc, d, BUS_DMA_NOWAIT);
if (error)
return (ENOBUFS);
bus_dmamap_sync(sc->bfe_tag, r->bfe_map,
BUS_DMASYNC_PREWRITE);
@ -1313,7 +1318,7 @@ bfe_encap(struct bfe_softc *sc, struct mbuf *m_head, u_int32_t *txidx)
return (ENOBUFS);
sc->bfe_tx_list[frag].bfe_ctrl |= BFE_DESC_EOF;
sc->bfe_tx_ring[frag].bfe_mbuf = m_head;
sc->bfe_tx_ring[frag].bfe_mbuf = *m_head;
bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREWRITE);
*txidx = cur;
@ -1366,7 +1371,7 @@ bfe_start_locked(struct ifnet *ifp)
* Pack the data into the tx ring. If we dont have
* enough room, let the chip drain the ring.
*/
if(bfe_encap(sc, m_head, &idx)) {
if(bfe_encap(sc, &m_head, &idx)) {
IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;

View File

@ -427,8 +427,8 @@
#define BFE_RX_RING_SIZE 512
#define BFE_TX_RING_SIZE 512
#define BFE_LINK_DOWN 5
#define BFE_TX_LIST_CNT 511
#define BFE_RX_LIST_CNT 511
#define BFE_TX_LIST_CNT 128
#define BFE_RX_LIST_CNT 128
#define BFE_TX_LIST_SIZE BFE_TX_LIST_CNT * sizeof(struct bfe_desc)
#define BFE_RX_LIST_SIZE BFE_RX_LIST_CNT * sizeof(struct bfe_desc)
#define BFE_RX_OFFSET 30