MFC r198923-198924,198927-198928

r198923:
  Use correct dma tag for jumbo buffer.

r198924:
  Covert bge_newbuf_std to use bus_dmamap_load_mbuf_sg(9). Note,
  bge_newbuf_std still has a bug for handling dma map load failure
  under high network load. Just reusing mbuf is not enough as driver
  already unloaded the dma map of the mbuf. Graceful recovery needs
  more work.
  Ideally we can just update dma address part of a Rx descriptor
  because the controller never overwrite the Rx descriptor. This
  requires some Rx initialization code changes and it would be done
  later after fixing other incorrect bus_dma(9) usages.

r198927:
  Remove common DMA tag used for TX/RX mbufs and create Tx DMA tag
  and Rx DMA tag separately. Previously it used a common mbuf DMA tag
  for both Tx and Rx path but Rx buffer(standard ring case) should
  have a single DMA segment and maximum buffer size of the segment
  should be less than or equal to MCLBYTES. This change also make it
  possible to add TSO with minor changes.

r198928:
  Make bge_newbuf_std()/bge_newbuf_jumbo() returns actual error code
  for buffer allocation. If driver know we are out of Rx buffers let
  controller stop. This should fix panic when interface is run even
  if it had no configured Rx buffers.
This commit is contained in:
Pyun YongHyeon 2010-01-06 22:45:49 +00:00
parent d14e59b97c
commit 2c8f00d0c7
2 changed files with 73 additions and 54 deletions

View File

@ -916,8 +916,8 @@ bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
{
struct mbuf *m_new = NULL;
struct bge_rx_bd *r;
struct bge_dmamap_arg ctx;
int error;
bus_dma_segment_t segs[1];
int error, nsegs;
if (m == NULL) {
m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
@ -932,27 +932,24 @@ bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m)
if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0)
m_adj(m_new, ETHER_ALIGN);
sc->bge_cdata.bge_rx_std_chain[i] = m_new;
r = &sc->bge_ldata.bge_rx_std_ring[i];
ctx.bge_maxsegs = 1;
ctx.sc = sc;
error = bus_dmamap_load(sc->bge_cdata.bge_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *),
m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT);
if (error || ctx.bge_maxsegs == 0) {
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i], m_new, segs, &nsegs, 0);
if (error != 0) {
if (m == NULL) {
sc->bge_cdata.bge_rx_std_chain[i] = NULL;
m_freem(m_new);
}
return (ENOMEM);
return (error);
}
r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr);
r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr);
sc->bge_cdata.bge_rx_std_chain[i] = m_new;
r = &sc->bge_ldata.bge_rx_std_ring[i];
r->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr);
r->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr);
r->bge_flags = BGE_RXBDFLAG_END;
r->bge_len = m_new->m_len;
r->bge_len = segs[0].ds_len;
r->bge_idx = i;
bus_dmamap_sync(sc->bge_cdata.bge_mtag,
bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i],
BUS_DMASYNC_PREREAD);
@ -1031,7 +1028,7 @@ bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
panic("%s: %d segments\n", __func__, nsegs);
}
bus_dmamap_sync(sc->bge_cdata.bge_mtag,
bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo,
sc->bge_cdata.bge_rx_jumbo_dmamap[i],
BUS_DMASYNC_PREREAD);
@ -1047,11 +1044,11 @@ bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m)
static int
bge_init_rx_ring_std(struct bge_softc *sc)
{
int i;
int error, i;
for (i = 0; i < BGE_SSLOTS; i++) {
if (bge_newbuf_std(sc, i, NULL) == ENOBUFS)
return (ENOBUFS);
if ((error = bge_newbuf_std(sc, i, NULL)) != 0)
return (error);
};
bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag,
@ -1071,10 +1068,10 @@ bge_free_rx_ring_std(struct bge_softc *sc)
for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_mtag,
bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i],
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->bge_cdata.bge_mtag,
bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i]);
m_freem(sc->bge_cdata.bge_rx_std_chain[i]);
sc->bge_cdata.bge_rx_std_chain[i] = NULL;
@ -1088,11 +1085,11 @@ static int
bge_init_rx_ring_jumbo(struct bge_softc *sc)
{
struct bge_rcb *rcb;
int i;
int error, i;
for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) {
if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS)
return (ENOBUFS);
if ((error = bge_newbuf_jumbo(sc, i, NULL)) != 0)
return (error);
};
bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag,
@ -1141,10 +1138,10 @@ bge_free_tx_ring(struct bge_softc *sc)
for (i = 0; i < BGE_TX_RING_CNT; i++) {
if (sc->bge_cdata.bge_tx_chain[i] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_mtag,
bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[i],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->bge_cdata.bge_mtag,
bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[i]);
m_freem(sc->bge_cdata.bge_tx_chain[i]);
sc->bge_cdata.bge_tx_chain[i] = NULL;
@ -1979,7 +1976,7 @@ bge_dma_free(struct bge_softc *sc)
/* Destroy DMA maps for RX buffers. */
for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
if (sc->bge_cdata.bge_rx_std_dmamap[i])
bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
bus_dmamap_destroy(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[i]);
}
@ -1993,12 +1990,14 @@ bge_dma_free(struct bge_softc *sc)
/* Destroy DMA maps for TX buffers. */
for (i = 0; i < BGE_TX_RING_CNT; i++) {
if (sc->bge_cdata.bge_tx_dmamap[i])
bus_dmamap_destroy(sc->bge_cdata.bge_mtag,
bus_dmamap_destroy(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[i]);
}
if (sc->bge_cdata.bge_mtag)
bus_dma_tag_destroy(sc->bge_cdata.bge_mtag);
if (sc->bge_cdata.bge_rx_mtag)
bus_dma_tag_destroy(sc->bge_cdata.bge_rx_mtag);
if (sc->bge_cdata.bge_tx_mtag)
bus_dma_tag_destroy(sc->bge_cdata.bge_tx_mtag);
/* Destroy standard RX ring. */
@ -2109,21 +2108,33 @@ bge_dma_alloc(device_t dev)
}
/*
* Create tag for mbufs.
* Create tag for Tx mbufs.
*/
error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1,
0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL,
NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES,
BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag);
BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_tx_mtag);
if (error) {
device_printf(sc->bge_dev, "could not allocate dma tag\n");
device_printf(sc->bge_dev, "could not allocate TX dma tag\n");
return (ENOMEM);
}
/*
* Create tag for Rx mbufs.
*/
error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_rx_mtag);
if (error) {
device_printf(sc->bge_dev, "could not allocate RX dma tag\n");
return (ENOMEM);
}
/* Create DMA maps for RX buffers. */
for (i = 0; i < BGE_STD_RX_RING_CNT; i++) {
error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
error = bus_dmamap_create(sc->bge_cdata.bge_rx_mtag, 0,
&sc->bge_cdata.bge_rx_std_dmamap[i]);
if (error) {
device_printf(sc->bge_dev,
@ -2134,11 +2145,11 @@ bge_dma_alloc(device_t dev)
/* Create DMA maps for TX buffers. */
for (i = 0; i < BGE_TX_RING_CNT; i++) {
error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0,
error = bus_dmamap_create(sc->bge_cdata.bge_tx_mtag, 0,
&sc->bge_cdata.bge_tx_dmamap[i]);
if (error) {
device_printf(sc->bge_dev,
"can't create DMA map for RX\n");
"can't create DMA map for TX\n");
return (ENOMEM);
}
}
@ -3168,18 +3179,17 @@ bge_rxeof(struct bge_softc *sc)
bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
continue;
}
if (bge_newbuf_jumbo(sc,
sc->bge_jumbo, NULL) == ENOBUFS) {
if (bge_newbuf_jumbo(sc, sc->bge_jumbo, NULL) != 0) {
ifp->if_ierrors++;
bge_newbuf_jumbo(sc, sc->bge_jumbo, m);
continue;
}
} else {
BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT);
bus_dmamap_sync(sc->bge_cdata.bge_mtag,
bus_dmamap_sync(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[rxidx],
BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(sc->bge_cdata.bge_mtag,
bus_dmamap_unload(sc->bge_cdata.bge_rx_mtag,
sc->bge_cdata.bge_rx_std_dmamap[rxidx]);
m = sc->bge_cdata.bge_rx_std_chain[rxidx];
sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL;
@ -3189,8 +3199,7 @@ bge_rxeof(struct bge_softc *sc)
bge_newbuf_std(sc, sc->bge_std, m);
continue;
}
if (bge_newbuf_std(sc, sc->bge_std,
NULL) == ENOBUFS) {
if (bge_newbuf_std(sc, sc->bge_std, NULL) != 0) {
ifp->if_ierrors++;
bge_newbuf_std(sc, sc->bge_std, m);
continue;
@ -3309,10 +3318,10 @@ bge_txeof(struct bge_softc *sc)
if (cur_tx->bge_flags & BGE_TXBDFLAG_END)
ifp->if_opackets++;
if (sc->bge_cdata.bge_tx_chain[idx] != NULL) {
bus_dmamap_sync(sc->bge_cdata.bge_mtag,
bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[idx],
BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sc->bge_cdata.bge_mtag,
bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag,
sc->bge_cdata.bge_tx_dmamap[idx]);
m_freem(sc->bge_cdata.bge_tx_chain[idx]);
sc->bge_cdata.bge_tx_chain[idx] = NULL;
@ -3645,7 +3654,7 @@ bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
}
map = sc->bge_cdata.bge_tx_dmamap[idx];
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs,
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
&nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
@ -3655,8 +3664,8 @@ bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
return (ENOBUFS);
}
*m_head = m;
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m,
segs, &nsegs, BUS_DMA_NOWAIT);
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map,
m, segs, &nsegs, BUS_DMA_NOWAIT);
if (error) {
m_freem(m);
*m_head = NULL;
@ -3670,11 +3679,11 @@ bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
* of the end of the ring.
*/
if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) {
bus_dmamap_unload(sc->bge_cdata.bge_mtag, map);
bus_dmamap_unload(sc->bge_cdata.bge_tx_mtag, map);
return (ENOBUFS);
}
bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE);
bus_dmamap_sync(sc->bge_cdata.bge_tx_mtag, map, BUS_DMASYNC_PREWRITE);
for (i = 0; ; i++) {
d = &sc->bge_ldata.bge_tx_ring[idx];
@ -3886,7 +3895,11 @@ bge_init_locked(struct bge_softc *sc)
bge_setvlan(sc);
/* Init RX ring. */
bge_init_rx_ring_std(sc);
if (bge_init_rx_ring_std(sc) != 0) {
device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
bge_stop(sc);
return;
}
/*
* Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's
@ -3907,8 +3920,13 @@ bge_init_locked(struct bge_softc *sc)
}
/* Init jumbo RX ring. */
if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN))
bge_init_rx_ring_jumbo(sc);
if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) {
if (bge_init_rx_ring_jumbo(sc) != 0) {
device_printf(sc->bge_dev, "no memory for std Rx buffers.\n");
bge_stop(sc);
return;
}
}
/* Init our RX return ring index. */
sc->bge_rx_saved_considx = 0;

View File

@ -2543,8 +2543,9 @@ struct bge_chain_data {
bus_dma_tag_t bge_tx_ring_tag;
bus_dma_tag_t bge_status_tag;
bus_dma_tag_t bge_stats_tag;
bus_dma_tag_t bge_mtag; /* mbuf mapping tag */
bus_dma_tag_t bge_mtag_jumbo; /* mbuf mapping tag */
bus_dma_tag_t bge_rx_mtag; /* Rx mbuf mapping tag */
bus_dma_tag_t bge_tx_mtag; /* Tx mbuf mapping tag */
bus_dma_tag_t bge_mtag_jumbo; /* Jumbo mbuf mapping tag */
bus_dmamap_t bge_tx_dmamap[BGE_TX_RING_CNT];
bus_dmamap_t bge_rx_std_dmamap[BGE_STD_RX_RING_CNT];
bus_dmamap_t bge_rx_jumbo_dmamap[BGE_JUMBO_RX_RING_CNT];