Overhaul Rx path to recover from mbuf cluster allocation failure.

o Create one more spare DMA map for Rx handler to recover from
   bus_dmamap_load_mbuf_sg(9) failure.
 o Make sure to update status bit in Rx descriptors even if we failed
   to allocate a new buffer. Previously it resulted in stuck condition
   and em_handle_rxtx task took up all available CPU cycles.
 o Don't blindly unload DMA map. Reuse loaded DMA map if received
   packet has errors. This would speed up Rx processing a bit under
   heavy load as it does not need to reload DMA map in case of error.
   (bus_dmamap_load_mbuf_sg(9) is the most expensive call in driver
    context.)
 o Update if_iqdrops counter if it can't allocate a mbuf cluster.
   With this change it's now possible to see queue dropped packets
   with netstat(1).
 o Update mbuf_cluster_failed counter if fixup code failed to
   allocate mbuf header.
 o Return ENOBUFS instead of ENOMEM in case of Rx fixup failure.
 o Make adapter->lmp NULL in case of Rx fixup failure. Strictly
   specking it's not necessary for correct operation but it makes
   the intention clear.
 o Remove now unused dropped_pkts member in softc.

With these changes em(4) should survive mbuf cluster allocation
failure on Rx path.

Reviewed by:	pdeuskar, glebius (with improvements)
This commit is contained in:
Pyun YongHyeon 2006-08-14 01:50:54 +00:00
parent 1ad7bca7e9
commit f1909c6f53
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=161266
2 changed files with 69 additions and 48 deletions

View File

@ -236,7 +236,7 @@ static void em_disable_promisc(struct adapter *);
static void em_set_multi(struct adapter *); static void em_set_multi(struct adapter *);
static void em_print_hw_stats(struct adapter *); static void em_print_hw_stats(struct adapter *);
static void em_update_link_status(struct adapter *); static void em_update_link_status(struct adapter *);
static int em_get_buf(int i, struct adapter *, struct mbuf *); static int em_get_buf(struct adapter *, int);
static void em_enable_vlans(struct adapter *); static void em_enable_vlans(struct adapter *);
static void em_disable_vlans(struct adapter *); static void em_disable_vlans(struct adapter *);
static int em_encap(struct adapter *, struct mbuf **); static int em_encap(struct adapter *, struct mbuf **);
@ -2803,45 +2803,49 @@ em_txeof(struct adapter *adapter)
* *
**********************************************************************/ **********************************************************************/
static int static int
em_get_buf(int i, struct adapter *adapter, struct mbuf *mp) em_get_buf(struct adapter *adapter, int i)
{ {
struct mbuf *m;
bus_dma_segment_t segs[1]; bus_dma_segment_t segs[1];
bus_dmamap_t map;
struct em_buffer *rx_buffer; struct em_buffer *rx_buffer;
int error, nsegs; int error, nsegs;
if (mp == NULL) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) {
if (mp == NULL) { adapter->mbuf_cluster_failed++;
adapter->mbuf_cluster_failed++; return (ENOBUFS);
return (ENOBUFS);
}
mp->m_len = mp->m_pkthdr.len = MCLBYTES;
} else {
mp->m_len = mp->m_pkthdr.len = MCLBYTES;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
} }
m->m_len = m->m_pkthdr.len = MCLBYTES;
if (adapter->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN)) if (adapter->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
m_adj(mp, ETHER_ALIGN); m_adj(m, ETHER_ALIGN);
rx_buffer = &adapter->rx_buffer_area[i];
/* /*
* Using memory from the mbuf cluster pool, invoke the * Using memory from the mbuf cluster pool, invoke the
* bus_dma machinery to arrange the memory mapping. * bus_dma machinery to arrange the memory mapping.
*/ */
error = bus_dmamap_load_mbuf_sg(adapter->rxtag, rx_buffer->map, error = bus_dmamap_load_mbuf_sg(adapter->rxtag, adapter->rx_sparemap,
mp, segs, &nsegs, 0); m, segs, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) { if (error != 0) {
m_free(mp); m_free(m);
return (error); return (error);
} }
/* If nsegs is wrong then the stack is corrupt. */ /* If nsegs is wrong then the stack is corrupt. */
KASSERT(nsegs == 1, ("Too many segments returned!")); KASSERT(nsegs == 1, ("Too many segments returned!"));
rx_buffer->m_head = mp;
adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr); rx_buffer = &adapter->rx_buffer_area[i];
if (rx_buffer->m_head != NULL)
bus_dmamap_unload(adapter->rxtag, rx_buffer->map);
map = rx_buffer->map;
rx_buffer->map = adapter->rx_sparemap;
adapter->rx_sparemap = map;
bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD);
rx_buffer->m_head = m;
adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr);
/* Zero out the receive descriptors status. */
adapter->rx_desc_base[i].status = 0;
return (0); return (0);
} }
@ -2888,6 +2892,13 @@ em_allocate_receive_structures(struct adapter *adapter)
goto fail; goto fail;
} }
error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
&adapter->rx_sparemap);
if (error) {
device_printf(dev, "%s: bus_dmamap_create failed: %d\n",
__func__, error);
goto fail;
}
rx_buffer = adapter->rx_buffer_area; rx_buffer = adapter->rx_buffer_area;
for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT,
@ -2900,7 +2911,7 @@ em_allocate_receive_structures(struct adapter *adapter)
} }
for (i = 0; i < adapter->num_rx_desc; i++) { for (i = 0; i < adapter->num_rx_desc; i++) {
error = em_get_buf(i, adapter, NULL); error = em_get_buf(adapter, i);
if (error) if (error)
goto fail; goto fail;
} }
@ -3035,6 +3046,10 @@ em_free_receive_structures(struct adapter *adapter)
INIT_DEBUGOUT("free_receive_structures: begin"); INIT_DEBUGOUT("free_receive_structures: begin");
if (adapter->rx_sparemap) {
bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap);
adapter->rx_sparemap = NULL;
}
if (adapter->rx_buffer_area != NULL) { if (adapter->rx_buffer_area != NULL) {
rx_buffer = adapter->rx_buffer_area; rx_buffer = adapter->rx_buffer_area;
for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) {
@ -3103,10 +3118,12 @@ em_rxeof(struct adapter *adapter, int count)
struct mbuf *m = NULL; struct mbuf *m = NULL;
mp = adapter->rx_buffer_area[i].m_head; mp = adapter->rx_buffer_area[i].m_head;
/*
* Can't defer bus_dmamap_sync(9) because TBI_ACCEPT
* needs to access the last received byte in the mbuf.
*/
bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map,
BUS_DMASYNC_POSTREAD); BUS_DMASYNC_POSTREAD);
bus_dmamap_unload(adapter->rxtag,
adapter->rx_buffer_area[i].map);
accept_frame = 1; accept_frame = 1;
prev_len_adj = 0; prev_len_adj = 0;
@ -3145,14 +3162,9 @@ em_rxeof(struct adapter *adapter, int count)
} }
if (accept_frame) { if (accept_frame) {
if (em_get_buf(i, adapter, NULL) == ENOBUFS) { if (em_get_buf(adapter, i) != 0) {
adapter->dropped_pkts++; ifp->if_iqdrops++;
em_get_buf(i, adapter, mp); goto discard;
if (adapter->fmp != NULL)
m_freem(adapter->fmp);
adapter->fmp = NULL;
adapter->lmp = NULL;
break;
} }
/* Assign correct length to the current fragment */ /* Assign correct length to the current fragment */
@ -3203,16 +3215,25 @@ em_rxeof(struct adapter *adapter, int count)
adapter->lmp = NULL; adapter->lmp = NULL;
} }
} else { } else {
adapter->dropped_pkts++; ifp->if_ierrors++;
em_get_buf(i, adapter, mp); discard:
if (adapter->fmp != NULL) /* Reuse loaded DMA map and just update mbuf chain */
mp = adapter->rx_buffer_area[i].m_head;
mp->m_len = mp->m_pkthdr.len = MCLBYTES;
mp->m_data = mp->m_ext.ext_buf;
mp->m_next = NULL;
if (adapter->hw.max_frame_size <= (MCLBYTES - ETHER_ALIGN))
m_adj(mp, ETHER_ALIGN);
if (adapter->fmp != NULL) {
m_freem(adapter->fmp); m_freem(adapter->fmp);
adapter->fmp = NULL; adapter->fmp = NULL;
adapter->lmp = NULL; adapter->lmp = NULL;
}
/* Zero out the receive descriptors status. */
adapter->rx_desc_base[i].status = 0;
m = NULL;
} }
/* Zero out the receive descriptors status. */
current_desc->status = 0;
bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@ -3281,10 +3302,12 @@ em_fixup_rx(struct adapter *adapter)
n->m_next = m; n->m_next = m;
adapter->fmp = n; adapter->fmp = n;
} else { } else {
adapter->dropped_pkts++; adapter->ifp->if_iqdrops++;
adapter->mbuf_alloc_failed++;
m_freem(adapter->fmp); m_freem(adapter->fmp);
adapter->fmp = NULL; adapter->fmp = NULL;
error = ENOMEM; adapter->lmp = NULL;
error = ENOBUFS;
} }
} }
@ -3556,9 +3579,9 @@ em_update_stats_counters(struct adapter *adapter)
ifp->if_collisions = adapter->stats.colc; ifp->if_collisions = adapter->stats.colc;
/* Rx Errors */ /* Rx Errors */
ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc + ifp->if_ierrors = adapter->stats.rxerrc + adapter->stats.crcerrs +
adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc +
adapter->stats.roc + adapter->stats.mpc + adapter->stats.cexterr; adapter->stats.mpc + adapter->stats.cexterr;
/* Tx Errors */ /* Tx Errors */
ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol + ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol +
@ -3611,8 +3634,6 @@ em_print_debug_info(struct adapter *adapter)
adapter->mbuf_alloc_failed); adapter->mbuf_alloc_failed);
device_printf(dev, "Std mbuf cluster failed = %ld\n", device_printf(dev, "Std mbuf cluster failed = %ld\n",
adapter->mbuf_cluster_failed); adapter->mbuf_cluster_failed);
device_printf(dev, "Driver dropped packets = %ld\n",
adapter->dropped_pkts);
} }
static void static void

View File

@ -324,13 +324,13 @@ struct adapter {
int rx_process_limit; int rx_process_limit;
struct em_buffer *rx_buffer_area; struct em_buffer *rx_buffer_area;
bus_dma_tag_t rxtag; bus_dma_tag_t rxtag;
bus_dmamap_t rx_sparemap;
/* First/last mbuf pointers, for collecting multisegment RX packets. */ /* First/last mbuf pointers, for collecting multisegment RX packets. */
struct mbuf *fmp; struct mbuf *fmp;
struct mbuf *lmp; struct mbuf *lmp;
/* Misc stats maintained by the driver */ /* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_alloc_failed; unsigned long mbuf_alloc_failed;
unsigned long mbuf_cluster_failed; unsigned long mbuf_cluster_failed;
unsigned long no_tx_desc_avail1; unsigned long no_tx_desc_avail1;