Mechanically substitute flags from historic mbuf allocator with

malloc(9) flags in sys/dev.
This commit is contained in:
glebius 2012-12-04 09:32:43 +00:00
parent 75a08a975a
commit a69aaa7721
133 changed files with 389 additions and 389 deletions

View File

@ -1521,7 +1521,7 @@ age_encap(struct age_softc *sc, struct mbuf **m_head)
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
/* Release original mbufs. */
m_freem(*m_head);
if (m == NULL) {
@ -1599,7 +1599,7 @@ age_encap(struct age_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->age_cdata.age_tx_tag, map,
*m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, AGE_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, AGE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -3061,7 +3061,7 @@ age_newbuf(struct age_softc *sc, struct age_rxdesc *rxd)
AGE_LOCK_ASSERT(sc);
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;

View File

@ -2047,7 +2047,7 @@ alc_encap(struct alc_softc *sc, struct mbuf **m_head)
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
/* Release original mbufs. */
m_freem(*m_head);
if (m == NULL) {
@ -2125,7 +2125,7 @@ alc_encap(struct alc_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->alc_cdata.alc_tx_tag, map,
*m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, ALC_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, ALC_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -2803,7 +2803,7 @@ alc_newbuf(struct alc_softc *sc, struct alc_rxdesc *rxd)
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = RX_BUF_SIZE_MAX;
@ -2923,7 +2923,7 @@ alc_fixup_rx(struct ifnet *ifp, struct mbuf *m)
* header from the mbuf chain. This can save lots of CPU
* cycles for jumbo frame.
*/
MGETHDR(n, M_DONTWAIT, MT_DATA);
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n == NULL) {
ifp->if_iqdrops++;
m_freem(m);

View File

@ -1640,7 +1640,7 @@ ale_encap(struct ale_softc *sc, struct mbuf **m_head)
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
/* Release original mbufs. */
m_freem(*m_head);
if (m == NULL) {
@ -1657,7 +1657,7 @@ ale_encap(struct ale_softc *sc, struct mbuf **m_head)
if ((sc->ale_flags & ALE_FLAG_TXCSUM_BUG) != 0 &&
(m->m_pkthdr.csum_flags & ALE_CSUM_FEATURES) != 0 &&
(mtod(m, intptr_t) & 3) != 0) {
m = m_defrag(*m_head, M_DONTWAIT);
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
*m_head = NULL;
return (ENOBUFS);
@ -1742,7 +1742,7 @@ ale_encap(struct ale_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->ale_cdata.ale_tx_tag, map,
*m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, ALE_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, ALE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -946,12 +946,12 @@ an_rxeof(struct an_softc *sc)
/* dump raw 802.11 packet to bpf and skip ip stack */
BPF_TAP(ifp, bpf_buf, len);
} else {
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
ifp->if_ierrors++;
return;
}
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if (!(m->m_flags & M_EXT)) {
m_freem(m);
ifp->if_ierrors++;
@ -1037,12 +1037,12 @@ an_rxeof(struct an_softc *sc)
if (an_rx_desc.an_done && !an_rx_desc.an_valid) {
buf = sc->an_rx_buffer[count].an_dma_vaddr;
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
ifp->if_ierrors++;
return;
}
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if (!(m->m_flags & M_EXT)) {
m_freem(m);
ifp->if_ierrors++;

View File

@ -232,7 +232,7 @@ ath_legacy_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
* multiple of the cache line size. Not doing this
* causes weird stuff to happen (for the 5210 at least).
*/
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
DPRINTF(sc, ATH_DEBUG_ANY,
"%s: no mbuf/cluster\n", __func__);

View File

@ -518,7 +518,7 @@ ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf)
ATH_RX_LOCK_ASSERT(sc);
m = m_getm(NULL, sc->sc_edma_bufsize, M_DONTWAIT, MT_DATA);
m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA);
if (! m)
return (ENOBUFS); /* XXX ?*/

View File

@ -325,7 +325,7 @@ ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
*/
if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */
sc->sc_stats.ast_tx_linear++;
m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC);
m = m_collapse(m0, M_NOWAIT, ATH_TXDESC);
if (m == NULL) {
ath_freetx(m0);
sc->sc_stats.ast_tx_nombuf++;

View File

@ -5437,9 +5437,9 @@ bce_get_rx_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
/* This is a new mbuf allocation. */
if (bce_hdr_split == TRUE)
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
MGETHDR(m_new, M_NOWAIT, MT_DATA);
else
m_new = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
sc->rx_bd_mbuf_alloc_size);
if (m_new == NULL) {
@ -5559,7 +5559,7 @@ bce_get_pg_buf(struct bce_softc *sc, struct mbuf *m, u16 *prod,
goto bce_get_pg_buf_exit);
/* This is a new mbuf allocation. */
m_new = m_getcl(M_DONTWAIT, MT_DATA, 0);
m_new = m_getcl(M_NOWAIT, MT_DATA, 0);
if (m_new == NULL) {
sc->mbuf_alloc_failed_count++;
rc = ENOBUFS;
@ -7320,7 +7320,7 @@ bce_tso_setup(struct bce_softc *sc, struct mbuf **m_head, u16 *flags)
/* Controller may modify mbuf chains. */
if (M_WRITABLE(*m_head) == 0) {
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
sc->mbuf_alloc_failed_count++;
@ -7486,7 +7486,7 @@ bce_tx_encap(struct bce_softc *sc, struct mbuf **m_head)
sc->mbuf_frag_count++;
/* Try to defrag the mbuf. */
m0 = m_collapse(*m_head, M_DONTWAIT, BCE_MAX_SEGMENTS);
m0 = m_collapse(*m_head, M_NOWAIT, BCE_MAX_SEGMENTS);
if (m0 == NULL) {
/* Defrag was unsuccessful */
m_freem(*m_head);

View File

@ -791,7 +791,7 @@ bfe_list_newbuf(struct bfe_softc *sc, int c)
u_int32_t ctrl;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
m->m_len = m->m_pkthdr.len = MCLBYTES;
if (bus_dmamap_load_mbuf_sg(sc->bfe_rxmbuf_tag, sc->bfe_rx_sparemap,
@ -1519,7 +1519,7 @@ bfe_encap(struct bfe_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->bfe_txmbuf_tag, r->bfe_map, *m_head,
txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, BFE_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, BFE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -1312,12 +1312,12 @@ bge_newbuf_std(struct bge_softc *sc, int i)
if (sc->bge_flags & BGE_FLAG_JUMBO_STD &&
(sc->bge_ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN +
ETHER_VLAN_ENCAP_LEN > (MCLBYTES - ETHER_ALIGN))) {
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MJUM9BYTES;
} else {
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
@ -1368,11 +1368,11 @@ bge_newbuf_jumbo(struct bge_softc *sc, int i)
struct mbuf *m;
int error, nsegs;
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return (ENOBUFS);
m_cljget(m, M_DONTWAIT, MJUM9BYTES);
m_cljget(m, M_NOWAIT, MJUM9BYTES);
if (!(m->m_flags & M_EXT)) {
m_freem(m);
return (ENOBUFS);
@ -4946,7 +4946,7 @@ bge_cksum_pad(struct mbuf *m)
/* Allocate new empty mbuf, pad it. Compact later. */
struct mbuf *n;
MGET(n, M_DONTWAIT, MT_DATA);
MGET(n, M_NOWAIT, MT_DATA);
if (n == NULL)
return (ENOBUFS);
n->m_len = 0;
@ -4988,7 +4988,7 @@ bge_check_short_dma(struct mbuf *m)
}
if (found > 1) {
n = m_defrag(m, M_DONTWAIT);
n = m_defrag(m, M_NOWAIT);
if (n == NULL)
m_freem(m);
} else
@ -5008,7 +5008,7 @@ bge_setup_tso(struct bge_softc *sc, struct mbuf *m, uint16_t *mss,
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
n = m_dup(m, M_DONTWAIT);
n = m_dup(m, M_NOWAIT);
m_freem(m);
if (n == NULL)
return (NULL);
@ -5125,9 +5125,9 @@ bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
* DMA read operation.
*/
if (sc->bge_forced_collapse == 1)
m = m_defrag(m, M_DONTWAIT);
m = m_defrag(m, M_NOWAIT);
else
m = m_collapse(m, M_DONTWAIT,
m = m_collapse(m, M_NOWAIT,
sc->bge_forced_collapse);
if (m == NULL)
m = *m_head;
@ -5139,7 +5139,7 @@ bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx)
error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_tx_mtag, map, m, segs,
&nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW);
m = m_collapse(m, M_NOWAIT, BGE_NSEG_NEW);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -539,7 +539,7 @@ bm_dummypacket(struct bm_softc *sc)
ifp = sc->sc_ifp;
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return;
@ -793,7 +793,7 @@ bm_encap(struct bm_softc *sc, struct mbuf **m_head)
*m_head, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, nsegs);
m = m_collapse(*m_head, M_NOWAIT, nsegs);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -1238,7 +1238,7 @@ bm_add_rxbuf(struct bm_softc *sc, int idx)
bus_dma_segment_t segs[1];
int error, nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;

View File

@ -2534,7 +2534,7 @@ bwi_newbuf(struct bwi_softc *sc, int buf_idx, int init)
KASSERT(buf_idx < BWI_RX_NDESC, ("buf_idx %d", buf_idx));
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
error = ENOBUFS;
@ -3015,7 +3015,7 @@ bwi_encap(struct bwi_softc *sc, int idx, struct mbuf *m,
/*
* Setup the embedded TX header
*/
M_PREPEND(m, sizeof(*hdr), M_DONTWAIT);
M_PREPEND(m, sizeof(*hdr), M_NOWAIT);
if (m == NULL) {
if_printf(ifp, "%s: prepend TX header failed\n", __func__);
return ENOBUFS;
@ -3074,7 +3074,7 @@ bwi_encap(struct bwi_softc *sc, int idx, struct mbuf *m,
if (error) { /* error == EFBIG */
struct mbuf *m_new;
m_new = m_defrag(m, M_DONTWAIT);
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
if_printf(ifp, "%s: can't defrag TX buffer\n",
__func__);
@ -3195,7 +3195,7 @@ bwi_encap_raw(struct bwi_softc *sc, int idx, struct mbuf *m,
/*
* Setup the embedded TX header
*/
M_PREPEND(m, sizeof(*hdr), M_DONTWAIT);
M_PREPEND(m, sizeof(*hdr), M_NOWAIT);
if (m == NULL) {
if_printf(ifp, "%s: prepend TX header failed\n", __func__);
return ENOBUFS;
@ -3249,7 +3249,7 @@ bwi_encap_raw(struct bwi_softc *sc, int idx, struct mbuf *m,
__func__, error);
goto back;
}
m_new = m_defrag(m, M_DONTWAIT);
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
if_printf(ifp, "%s: can't defrag TX buffer\n",
__func__);

View File

@ -1428,7 +1428,7 @@ bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m)
/*
* XXX please removes m_defrag(9)
*/
m_new = m_defrag(m, M_DONTWAIT);
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
device_printf(sc->sc_dev,
"%s: can't defrag TX buffer\n",
@ -1544,7 +1544,7 @@ bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m)
if (error) { /* error == EFBIG */
struct mbuf *m_new;
m_new = m_defrag(m, M_DONTWAIT);
m_new = m_defrag(m, M_NOWAIT);
if (m_new == NULL) {
if_printf(ifp, "%s: can't defrag TX buffer\n",
__func__);
@ -9124,7 +9124,7 @@ bwn_pio_rxeof(struct bwn_pio_rxqueue *prq)
padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0;
totlen = len + padding;
KASSERT(totlen <= MCLBYTES, ("too big..\n"));
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->sc_dev, "%s: out of memory", __func__);
goto error;
@ -9183,7 +9183,7 @@ bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc,
struct mbuf *m;
int error;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
error = ENOBUFS;

View File

@ -3757,7 +3757,7 @@ bxe_alloc_buf_rings(struct bxe_softc *sc)
if (fp != NULL) {
fp->br = buf_ring_alloc(BXE_BR_SIZE,
M_DEVBUF, M_DONTWAIT, &fp->mtx);
M_DEVBUF, M_NOWAIT, &fp->mtx);
if (fp->br == NULL) {
rc = ENOMEM;
goto bxe_alloc_buf_rings_exit;
@ -8960,7 +8960,7 @@ bxe_tx_encap(struct bxe_fastpath *fp, struct mbuf **m_head)
} else if (error == EFBIG) {
/* Possibly recoverable with defragmentation. */
fp->mbuf_defrag_attempts++;
m0 = m_defrag(*m_head, M_DONTWAIT);
m0 = m_defrag(*m_head, M_NOWAIT);
if (m0 == NULL) {
fp->mbuf_defrag_failures++;
rc = ENOBUFS;
@ -10467,7 +10467,7 @@ bxe_alloc_tpa_mbuf(struct bxe_fastpath *fp, int queue)
#endif
/* Allocate the new TPA mbuf. */
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
if (__predict_false(m == NULL)) {
fp->mbuf_tpa_alloc_failed++;
rc = ENOBUFS;
@ -10661,7 +10661,7 @@ bxe_alloc_rx_sge_mbuf(struct bxe_fastpath *fp, uint16_t index)
#endif
/* Allocate a new SGE mbuf. */
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, SGE_PAGE_SIZE);
if (__predict_false(m == NULL)) {
fp->mbuf_sge_alloc_failed++;
rc = ENOMEM;
@ -10851,7 +10851,7 @@ bxe_alloc_rx_bd_mbuf(struct bxe_fastpath *fp, uint16_t index)
#endif
/* Allocate the new RX BD mbuf. */
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, sc->mbuf_alloc_size);
if (__predict_false(m == NULL)) {
fp->mbuf_rx_bd_alloc_failed++;
rc = ENOBUFS;

View File

@ -1192,7 +1192,7 @@ cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head)
cflags = 0;
if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) {
if (M_WRITABLE(*m_head) == 0) {
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
*m_head = m;
if (m == NULL)
@ -1215,7 +1215,7 @@ cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
*m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, CAS_NTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, CAS_NTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -1714,7 +1714,7 @@ cas_rint(struct cas_softc *sc)
__func__, idx, off, len);
#endif
rxds = &sc->sc_rxdsoft[idx];
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m != NULL) {
refcount_acquire(&rxds->rxds_refcount);
bus_dmamap_sync(sc->sc_rdmatag,
@ -1759,7 +1759,7 @@ cas_rint(struct cas_softc *sc)
__func__, idx, off, len);
#endif
rxds = &sc->sc_rxdsoft[idx];
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m != NULL) {
refcount_acquire(&rxds->rxds_refcount);
off += ETHER_ALIGN;
@ -1796,7 +1796,7 @@ cas_rint(struct cas_softc *sc)
#endif
rxds2 = &sc->sc_rxdsoft[idx2];
if (m != NULL) {
MGET(m2, M_DONTWAIT, MT_DATA);
MGET(m2, M_NOWAIT, MT_DATA);
if (m2 != NULL) {
refcount_acquire(
&rxds2->rxds_refcount);

View File

@ -319,10 +319,10 @@ static struct mbuf *makembuf (void *buf, unsigned len)
{
struct mbuf *m;
MGETHDR (m, M_DONTWAIT, MT_DATA);
MGETHDR (m, M_NOWAIT, MT_DATA);
if (! m)
return 0;
MCLGET (m, M_DONTWAIT);
MCLGET (m, M_NOWAIT);
if (! (m->m_flags & M_EXT)) {
m_freem (m);
return 0;

View File

@ -502,7 +502,7 @@ cm_srint_locked(vsc)
buffer = sc->sc_rx_act ^ 1;
/* Allocate header mbuf */
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == 0) {
/*
@ -539,7 +539,7 @@ cm_srint_locked(vsc)
*/
if ((len + 2 + 2) > MHLEN) {
/* attach an mbuf cluster */
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
/* Insist on getting a cluster */
if ((m->m_flags & M_EXT) == 0) {

View File

@ -203,10 +203,10 @@ static struct mbuf *makembuf (void *buf, unsigned len)
{
struct mbuf *m;
MGETHDR (m, M_DONTWAIT, MT_DATA);
MGETHDR (m, M_NOWAIT, MT_DATA);
if (! m)
return 0;
MCLGET (m, M_DONTWAIT);
MCLGET (m, M_NOWAIT);
if (! (m->m_flags & M_EXT)) {
m_freem (m);
return 0;

View File

@ -726,12 +726,12 @@ cs_get_packet(struct cs_softc *sc)
return (-1);
}
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m==NULL)
return (-1);
if (length > MHLEN) {
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if (!(m->m_flags & M_EXT)) {
m_freem(m);
return (-1);

View File

@ -205,10 +205,10 @@ static struct mbuf *makembuf (void *buf, u_int len)
{
struct mbuf *m;
MGETHDR (m, M_DONTWAIT, MT_DATA);
MGETHDR (m, M_NOWAIT, MT_DATA);
if (! m)
return 0;
MCLGET (m, M_DONTWAIT);
MCLGET (m, M_NOWAIT);
if (! (m->m_flags & M_EXT)) {
m_freem (m);
return 0;

View File

@ -252,13 +252,13 @@ static struct mbuf *makembuf (void *buf, u_int len)
{
struct mbuf *m, *o, *p;
MGETHDR (m, M_DONTWAIT, MT_DATA);
MGETHDR (m, M_NOWAIT, MT_DATA);
if (! m)
return 0;
if (len >= MINCLSIZE)
MCLGET (m, M_DONTWAIT);
MCLGET (m, M_NOWAIT);
m->m_pkthdr.len = len;
m->m_len = 0;
@ -271,13 +271,13 @@ static struct mbuf *makembuf (void *buf, u_int len)
if (! n) {
/* Allocate new mbuf. */
o = p;
MGET (p, M_DONTWAIT, MT_DATA);
MGET (p, M_NOWAIT, MT_DATA);
if (! p) {
m_freem (m);
return 0;
}
if (len >= MINCLSIZE)
MCLGET (p, M_DONTWAIT);
MCLGET (p, M_NOWAIT);
p->m_len = 0;
o->m_next = p;

View File

@ -1442,7 +1442,7 @@ send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
struct mbuf *m;
struct mngt_pktsched_wr *req;
m = m_gethdr(M_DONTWAIT, MT_DATA);
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m) {
req = mtod(m, struct mngt_pktsched_wr *);
req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));

View File

@ -2710,7 +2710,7 @@ get_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
if (recycle_enable && len <= SGE_RX_COPY_THRES &&
sopeop == RSPQ_SOP_EOP) {
if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
goto skip_recycle;
cl = mtod(m, void *);
memcpy(cl, sd->rxsd_cl, len);
@ -2866,10 +2866,10 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
printf("async notification\n");
if (mh->mh_head == NULL) {
mh->mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
m = mh->mh_head;
} else {
m = m_gethdr(M_DONTWAIT, MT_DATA);
m = m_gethdr(M_NOWAIT, MT_DATA);
}
if (m == NULL)
goto no_mem;
@ -2882,7 +2882,7 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
rspq->async_notif++;
goto skip;
} else if (flags & F_RSPD_IMM_DATA_VALID) {
struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
no_mem:

View File

@ -98,7 +98,7 @@ busdma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
printf("mbuf chain too long: %d max allowed %d\n",
seg_count, TX_MAX_SEGS);
if (!defragged) {
n = m_defrag(*m, M_DONTWAIT);
n = m_defrag(*m, M_NOWAIT);
if (n == NULL) {
err = ENOBUFS;
goto err_out;

View File

@ -2761,7 +2761,7 @@ start: sgl->nsegs = 0;
rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg,
&sgl->nsegs, BUS_DMA_NOWAIT);
if (rc == EFBIG && defragged == 0) {
m = m_defrag(m, M_DONTWAIT);
m = m_defrag(m, M_NOWAIT);
if (m == NULL)
return (EFBIG);

View File

@ -1207,7 +1207,7 @@ t4_soreceive_ddp(struct socket *so, struct sockaddr **psa, struct uio *uio,
KASSERT(sb->sb_mb != NULL,
("%s: len > 0 && sb->sb_mb empty", __func__));
m = m_copym(sb->sb_mb, 0, len, M_DONTWAIT);
m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
if (m == NULL)
len = 0; /* Don't flush data from sockbuf. */
else

View File

@ -1164,7 +1164,7 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
* or may not be stashed in the original SYN mbuf passed to us.
* Just copy it over instead of dealing with all possibilities.
*/
m = m_dup(synqe->syn, M_DONTWAIT);
m = m_dup(synqe->syn, M_NOWAIT);
if (m)
m->m_pkthdr.rcvif = ifp;

View File

@ -2650,7 +2650,7 @@ dc_newbuf(struct dc_softc *sc, int i)
bus_dma_segment_t segs[1];
int error, nseg;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
@ -3387,7 +3387,7 @@ dc_encap(struct dc_softc *sc, struct mbuf **m_head)
defragged = 0;
if (sc->dc_flags & DC_TX_COALESCE &&
((*m_head)->m_next != NULL || sc->dc_flags & DC_TX_ALIGN)) {
m = m_defrag(*m_head, M_DONTWAIT);
m = m_defrag(*m_head, M_NOWAIT);
defragged = 1;
} else {
/*
@ -3402,7 +3402,7 @@ dc_encap(struct dc_softc *sc, struct mbuf **m_head)
if (i > DC_TX_LIST_CNT / 4 ||
DC_TX_LIST_CNT - i + sc->dc_cdata.dc_tx_cnt <=
DC_TX_LIST_RSVD) {
m = m_collapse(*m_head, M_DONTWAIT, DC_MAXFRAGS);
m = m_collapse(*m_head, M_NOWAIT, DC_MAXFRAGS);
defragged = 1;
}
}
@ -3419,7 +3419,7 @@ dc_encap(struct dc_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->dc_tx_mtag,
sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0);
if (error == EFBIG) {
if (defragged != 0 || (m = m_collapse(*m_head, M_DONTWAIT,
if (defragged != 0 || (m = m_collapse(*m_head, M_NOWAIT,
DC_MAXFRAGS)) == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -257,7 +257,7 @@ tulip_txprobe(tulip_softc_t * const sc)
* to verify the connectivity.
*/
TULIP_LOCK_ASSERT(sc);
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return 0;
/*
@ -3517,7 +3517,7 @@ tulip_rx_intr(tulip_softc_t * const sc)
ms->m_pkthdr.len = total_len;
ms->m_pkthdr.rcvif = ifp;
m0 = ms;
ms = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
ms = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
#endif
TULIP_UNLOCK(sc);
CTR1(KTR_TULIP, "tulip_rx_intr: passing %p to upper layer", m0);
@ -3528,7 +3528,7 @@ tulip_rx_intr(tulip_softc_t * const sc)
* If we are priming the TULIP with mbufs, then allocate
* a new cluster for the next descriptor.
*/
ms = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
ms = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
#ifndef __NO_STRICT_ALIGNMENT
skip_input:
@ -3970,7 +3970,7 @@ tulip_txput(tulip_softc_t * const sc, struct mbuf *m)
* to recopy it into one mbuf and then try again. If
* we can't recopy it, try again later.
*/
m0 = m_defrag(m, M_DONTWAIT);
m0 = m_defrag(m, M_NOWAIT);
if (m0 == NULL) {
sc->tulip_flags |= TULIP_WANTTXSTART;
#if defined(TULIP_DEBUG)

View File

@ -1831,7 +1831,7 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
if (do_tso || (m_head->m_next != NULL &&
m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) {
if (M_WRITABLE(*m_headp) == 0) {
m_head = m_dup(*m_headp, M_DONTWAIT);
m_head = m_dup(*m_headp, M_NOWAIT);
m_freem(*m_headp);
if (m_head == NULL) {
*m_headp = NULL;
@ -1948,7 +1948,7 @@ em_xmit(struct tx_ring *txr, struct mbuf **m_headp)
if (error == EFBIG && remap) {
struct mbuf *m;
m = m_defrag(*m_headp, M_DONTWAIT);
m = m_defrag(*m_headp, M_NOWAIT);
if (m == NULL) {
adapter->mbuf_alloc_failed++;
m_freem(*m_headp);
@ -3930,7 +3930,7 @@ em_refresh_mbufs(struct rx_ring *rxr, int limit)
while (j != limit) {
rxbuf = &rxr->rx_buffers[i];
if (rxbuf->m_head == NULL) {
m = m_getjcl(M_DONTWAIT, MT_DATA,
m = m_getjcl(M_NOWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
/*
** If we have a temporary resource shortage
@ -4100,7 +4100,7 @@ em_setup_receive_ring(struct rx_ring *rxr)
continue;
}
#endif /* DEV_NETMAP */
rxbuf->m_head = m_getjcl(M_DONTWAIT, MT_DATA,
rxbuf->m_head = m_getjcl(M_NOWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
if (rxbuf->m_head == NULL) {
error = ENOBUFS;
@ -4579,7 +4579,7 @@ em_fixup_rx(struct rx_ring *rxr)
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
m->m_data += ETHER_HDR_LEN;
} else {
MGETHDR(n, M_DONTWAIT, MT_DATA);
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n != NULL) {
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
m->m_data += ETHER_HDR_LEN;

View File

@ -1830,7 +1830,7 @@ igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
if (do_tso || (m_head->m_next != NULL &&
m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD)) {
if (M_WRITABLE(*m_headp) == 0) {
m_head = m_dup(*m_headp, M_DONTWAIT);
m_head = m_dup(*m_headp, M_NOWAIT);
m_freem(*m_headp);
if (m_head == NULL) {
*m_headp = NULL;
@ -1935,7 +1935,7 @@ igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
if (error == EFBIG && remap) {
struct mbuf *m;
m = m_defrag(*m_headp, M_DONTWAIT);
m = m_defrag(*m_headp, M_NOWAIT);
if (m == NULL) {
adapter->mbuf_defrag_failed++;
m_freem(*m_headp);
@ -3983,7 +3983,7 @@ igb_refresh_mbufs(struct rx_ring *rxr, int limit)
if (rxr->hdr_split == FALSE)
goto no_split;
if (rxbuf->m_head == NULL) {
mh = m_gethdr(M_DONTWAIT, MT_DATA);
mh = m_gethdr(M_NOWAIT, MT_DATA);
if (mh == NULL)
goto update;
} else
@ -4009,7 +4009,7 @@ igb_refresh_mbufs(struct rx_ring *rxr, int limit)
htole64(hseg[0].ds_addr);
no_split:
if (rxbuf->m_pack == NULL) {
mp = m_getjcl(M_DONTWAIT, MT_DATA,
mp = m_getjcl(M_NOWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
if (mp == NULL)
goto update;
@ -4225,7 +4225,7 @@ igb_setup_receive_ring(struct rx_ring *rxr)
goto skip_head;
/* First the header */
rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
if (rxbuf->m_head == NULL) {
error = ENOBUFS;
goto fail;
@ -4247,7 +4247,7 @@ igb_setup_receive_ring(struct rx_ring *rxr)
skip_head:
/* Now the payload cluster */
rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA,
rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
if (rxbuf->m_pack == NULL) {
error = ENOBUFS;

View File

@ -1566,7 +1566,7 @@ lem_xmit(struct adapter *adapter, struct mbuf **m_headp)
if (error == EFBIG) {
struct mbuf *m;
m = m_defrag(*m_headp, M_DONTWAIT);
m = m_defrag(*m_headp, M_NOWAIT);
if (m == NULL) {
adapter->mbuf_alloc_failed++;
m_freem(*m_headp);
@ -3075,7 +3075,7 @@ lem_get_buf(struct adapter *adapter, int i)
struct em_buffer *rx_buffer;
int error, nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
adapter->mbuf_cluster_failed++;
return (ENOBUFS);
@ -3633,7 +3633,7 @@ lem_fixup_rx(struct adapter *adapter)
bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len);
m->m_data += ETHER_HDR_LEN;
} else {
MGETHDR(n, M_DONTWAIT, MT_DATA);
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n != NULL) {
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
m->m_data += ETHER_HDR_LEN;

View File

@ -1268,7 +1268,7 @@ ed_get_packet(struct ed_softc *sc, bus_size_t buf, u_short len)
struct mbuf *m;
/* Allocate a header mbuf */
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return;
m->m_pkthdr.rcvif = ifp;
@ -1282,7 +1282,7 @@ ed_get_packet(struct ed_softc *sc, bus_size_t buf, u_short len)
*/
if ((len + 2) > MHLEN) {
/* Attach an mbuf cluster */
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
/* Insist on getting a cluster */
if ((m->m_flags & M_EXT) == 0) {

View File

@ -837,15 +837,15 @@ copy_mbuf(struct mbuf *m)
{
struct mbuf *new;
MGET(new, M_WAIT, MT_DATA);
MGET(new, M_WAITOK, MT_DATA);
if (m->m_flags & M_PKTHDR) {
M_MOVE_PKTHDR(new, m);
if (m->m_len > MHLEN)
MCLGET(new, M_WAIT);
MCLGET(new, M_WAITOK);
} else {
if (m->m_len > MLEN)
MCLGET(new, M_WAIT);
MCLGET(new, M_WAITOK);
}
bcopy(m->m_data, new->m_data, m->m_len);
@ -1925,7 +1925,7 @@ en_mget(struct en_softc *sc, u_int pktlen)
* words at the begin.
*/
/* called from interrupt context */
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return (NULL);
@ -1940,7 +1940,7 @@ en_mget(struct en_softc *sc, u_int pktlen)
totlen -= m->m_len;
/* called from interrupt context */
tmp = m_getm(m, totlen, M_DONTWAIT, MT_DATA);
tmp = m_getm(m, totlen, M_NOWAIT, MT_DATA);
if (tmp == NULL) {
m_free(m);
return (NULL);
@ -2924,7 +2924,7 @@ en_attach(struct en_softc *sc)
&en_utopia_methods);
utopia_init_media(&sc->utopia);
MGET(sc->padbuf, M_WAIT, MT_DATA);
MGET(sc->padbuf, M_WAITOK, MT_DATA);
bzero(sc->padbuf->m_data, MLEN);
if (bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,

View File

@ -746,11 +746,11 @@ epread(struct ep_softc *sc)
rx_fifo = rx_fifo2 = status & RX_BYTES_MASK;
if (EP_FTST(sc, F_RX_FIRST)) {
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (!m)
goto out;
if (rx_fifo >= MINCLSIZE)
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
sc->top = sc->mcur = top = m;
#define EROUND ((sizeof(struct ether_header) + 3) & ~3)
#define EOFF (EROUND - sizeof(struct ether_header))
@ -774,11 +774,11 @@ epread(struct ep_softc *sc)
lenthisone = min(rx_fifo, M_TRAILINGSPACE(m));
if (lenthisone == 0) { /* no room in this one */
mcur = m;
MGET(m, M_DONTWAIT, MT_DATA);
MGET(m, M_NOWAIT, MT_DATA);
if (!m)
goto out;
if (rx_fifo >= MINCLSIZE)
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
m->m_len = 0;
mcur->m_next = m;
lenthisone = min(rx_fifo, M_TRAILINGSPACE(m));

View File

@ -2169,7 +2169,7 @@ et_encap(struct et_softc *sc, struct mbuf **m0)
error = bus_dmamap_load_mbuf_sg(sc->sc_tx_tag, map, *m0, segs, &nsegs,
0);
if (error == EFBIG) {
m = m_collapse(*m0, M_DONTWAIT, ET_NSEG_MAX);
m = m_collapse(*m0, M_NOWAIT, ET_NSEG_MAX);
if (m == NULL) {
m_freem(*m0);
*m0 = NULL;
@ -2331,7 +2331,7 @@ et_newbuf_cluster(struct et_rxbuf_data *rbd, int buf_idx)
int nsegs;
MPASS(buf_idx < ET_RX_NDESC);
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
@ -2390,7 +2390,7 @@ et_newbuf_hdr(struct et_rxbuf_data *rbd, int buf_idx)
int nsegs;
MPASS(buf_idx < ET_RX_NDESC);
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MHLEN;

View File

@ -733,7 +733,7 @@ ex_rx_intr(struct ex_softc *sc)
QQQ = pkt_len = CSR_READ_2(sc, IO_PORT_REG);
if (rx_status & RCV_OK_bit) {
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
ipkt = m;
if (ipkt == NULL) {
ifp->if_iqdrops++;
@ -744,7 +744,7 @@ ex_rx_intr(struct ex_softc *sc)
while (pkt_len > 0) {
if (pkt_len >= MINCLSIZE) {
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if (m->m_flags & M_EXT) {
m->m_len = MCLBYTES;
} else {
@ -769,7 +769,7 @@ ex_rx_intr(struct ex_softc *sc)
pkt_len -= m->m_len;
if (pkt_len > 0) {
MGET(m->m_next, M_DONTWAIT, MT_DATA);
MGET(m->m_next, M_NOWAIT, MT_DATA);
if (m->m_next == NULL) {
m_freem(ipkt);
ifp->if_iqdrops++;

View File

@ -1099,7 +1099,7 @@ fatm_supply_small_buffers(struct fatm_softc *sc)
if_printf(sc->ifp, "out of rbufs\n");
break;
}
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
break;
@ -1189,7 +1189,7 @@ fatm_supply_large_buffers(struct fatm_softc *sc)
if_printf(sc->ifp, "out of rbufs\n");
break;
}
if ((m = m_getcl(M_DONTWAIT, MT_DATA,
if ((m = m_getcl(M_NOWAIT, MT_DATA,
M_PKTHDR)) == NULL) {
LIST_INSERT_HEAD(&sc->rbuf_free, rb, link);
break;
@ -1768,17 +1768,17 @@ copy_mbuf(struct mbuf *m)
{
struct mbuf *new;
MGET(new, M_DONTWAIT, MT_DATA);
MGET(new, M_NOWAIT, MT_DATA);
if (new == NULL)
return (NULL);
if (m->m_flags & M_PKTHDR) {
M_MOVE_PKTHDR(new, m);
if (m->m_len > MHLEN)
MCLGET(new, M_WAIT);
MCLGET(new, M_WAITOK);
} else {
if (m->m_len > MLEN)
MCLGET(new, M_WAIT);
MCLGET(new, M_WAITOK);
}
bcopy(m->m_data, new->m_data, m->m_len);

View File

@ -1870,13 +1870,13 @@ fe_get_packet (struct fe_softc * sc, u_short len)
*/
/* Allocate an mbuf with packet header info. */
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return -1;
/* Attach a cluster if this packet doesn't fit in a normal mbuf. */
if (len > MHLEN - NFS_MAGIC_OFFSET) {
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if (!(m->m_flags & M_EXT)) {
m_freem(m);
return -1;

View File

@ -977,7 +977,7 @@ fwohci_start(struct fwohci_softc *sc, struct fwohci_dbch *dbch)
if (firewire_debug)
device_printf(sc->fc.dev, "EFBIG.\n");
m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m0 != NULL) {
m_copydata(xfer->mbuf, 0,
xfer->mbuf->m_pkthdr.len,

View File

@ -357,7 +357,7 @@ fwe_init(void *arg)
STAILQ_INIT(&xferq->stdma);
xferq->stproc = NULL;
for (i = 0; i < xferq->bnchunk; i ++) {
m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xferq->bulkxfer[i].mbuf = m;
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
STAILQ_INSERT_TAIL(&xferq->stfree,
@ -606,7 +606,7 @@ fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp)
#endif
/* keep ip packet alignment for alpha */
M_PREPEND(m, ETHER_ALIGN, M_DONTWAIT);
M_PREPEND(m, ETHER_ALIGN, M_NOWAIT);
fp = &xfer->send.hdr;
*(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr;
fp->mode.stream.len = m->m_pkthdr.len;
@ -657,7 +657,7 @@ fwe_as_input(struct fw_xferq *xferq)
m = sxfer->mbuf;
/* insert new rbuf */
sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m0 != NULL) {
m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);

View File

@ -333,7 +333,7 @@ fwip_init(void *arg)
STAILQ_INIT(&xferq->stdma);
xferq->stproc = NULL;
for (i = 0; i < xferq->bnchunk; i ++) {
m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xferq->bulkxfer[i].mbuf = m;
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
STAILQ_INSERT_TAIL(&xferq->stfree,
@ -349,7 +349,7 @@ fwip_init(void *arg)
xfer = fw_xfer_alloc(M_FWIP);
if (xfer == NULL)
break;
m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xfer->recv.payload = mtod(m, uint32_t *);
xfer->recv.pay_len = MCLBYTES;
xfer->hand = fwip_unicast_input;
@ -657,7 +657,7 @@ fwip_async_output(struct fwip_softc *fwip, struct ifnet *ifp)
*/
uint32_t *p;
M_PREPEND(m, 2*sizeof(uint32_t), M_DONTWAIT);
M_PREPEND(m, 2*sizeof(uint32_t), M_NOWAIT);
p = mtod(m, uint32_t *);
fp->mode.stream.len = m->m_pkthdr.len;
fp->mode.stream.chtag = broadcast_channel;
@ -778,7 +778,7 @@ fwip_stream_input(struct fw_xferq *xferq)
m = sxfer->mbuf;
/* insert new rbuf */
sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m0 != NULL) {
m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
@ -871,7 +871,7 @@ fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer)
* We have finished with a unicast xfer. Allocate a new
* cluster and stick it on the back of the input queue.
*/
m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
xfer->mbuf = m;
xfer->recv.payload = mtod(m, uint32_t *);
xfer->recv.pay_len = MCLBYTES;

View File

@ -1447,7 +1447,7 @@ fxp_encap(struct fxp_softc *sc, struct mbuf **m_head)
if (M_WRITABLE(*m_head) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
@ -1563,7 +1563,7 @@ fxp_encap(struct fxp_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map, *m_head,
segs, &nseg, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, sc->maxtxseg);
m = m_collapse(*m_head, M_NOWAIT, sc->maxtxseg);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -2628,7 +2628,7 @@ fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
bus_dmamap_t tmp_map;
int error;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);

View File

@ -1120,7 +1120,7 @@ gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
cflags = 0;
if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
if (M_WRITABLE(*m_head) == 0) {
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
*m_head = m;
if (m == NULL)
@ -1143,7 +1143,7 @@ gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
*m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, GEM_NTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -1663,7 +1663,7 @@ gem_add_rxbuf(struct gem_softc *sc, int idx)
GEM_LOCK_ASSERT(sc, MA_OWNED);
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;

View File

@ -370,7 +370,7 @@ gx_rx_intr(void *arg)
continue;
}
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL) {
device_printf(sc->sc_dev, "no memory for receive mbuf.\n");
sc->sc_ifp->if_iqdrops++;

View File

@ -330,7 +330,7 @@ he_intr_rbp(struct hatm_softc *sc, struct herbp *rbp, u_int large,
if (large) {
/* allocate the MBUF */
if ((m = m_getcl(M_DONTWAIT, MT_DATA,
if ((m = m_getcl(M_NOWAIT, MT_DATA,
M_PKTHDR)) == NULL) {
if_printf(sc->ifp,
"no mbuf clusters\n");
@ -437,7 +437,7 @@ hatm_rx_buffer(struct hatm_softc *sc, u_int group, u_int handle)
DBG(sc, RX, ("RX group=%u handle=%x page=%u chunk=%u", group, handle,
pageno, chunkno));
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (group == 0) {
struct mbuf0_chunk *c0;

View File

@ -466,7 +466,7 @@ hatm_start(struct ifnet *ifp)
if (error == EFBIG) {
/* try to defragment the packet */
sc->istats.defrag++;
m = m_defrag(m, M_DONTWAIT);
m = m_defrag(m, M_NOWAIT);
if (m == NULL) {
tpd->mbuf = NULL;
hatm_free_txmbuf(sc);

View File

@ -1878,14 +1878,14 @@ hifn_crypto(
totlen = cmd->src_mapsize;
if (cmd->src_m->m_flags & M_PKTHDR) {
len = MHLEN;
MGETHDR(m0, M_DONTWAIT, MT_DATA);
if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
MGETHDR(m0, M_NOWAIT, MT_DATA);
if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_NOWAIT)) {
m_free(m0);
m0 = NULL;
}
} else {
len = MLEN;
MGET(m0, M_DONTWAIT, MT_DATA);
MGET(m0, M_NOWAIT, MT_DATA);
}
if (m0 == NULL) {
hifnstats.hst_nomem_mbuf++;
@ -1893,7 +1893,7 @@ hifn_crypto(
goto err_srcmap;
}
if (totlen >= MINCLSIZE) {
MCLGET(m0, M_DONTWAIT);
MCLGET(m0, M_NOWAIT);
if ((m0->m_flags & M_EXT) == 0) {
hifnstats.hst_nomem_mcl++;
err = sc->sc_cmdu ? ERESTART : ENOMEM;
@ -1907,7 +1907,7 @@ hifn_crypto(
mlast = m0;
while (totlen > 0) {
MGET(m, M_DONTWAIT, MT_DATA);
MGET(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
hifnstats.hst_nomem_mbuf++;
err = sc->sc_cmdu ? ERESTART : ENOMEM;
@ -1916,7 +1916,7 @@ hifn_crypto(
}
len = MLEN;
if (totlen >= MINCLSIZE) {
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if ((m->m_flags & M_EXT) == 0) {
hifnstats.hst_nomem_mcl++;
err = sc->sc_cmdu ? ERESTART : ENOMEM;

View File

@ -564,7 +564,7 @@ hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold)
hme_discard_rxbuf(sc, ri);
return (0);
}
if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL)
if ((m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
b = mtod(m, uintptr_t);
@ -951,7 +951,7 @@ hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
cflags = 0;
if (((*m0)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) {
if (M_WRITABLE(*m0) == 0) {
m = m_dup(*m0, M_DONTWAIT);
m = m_dup(*m0, M_NOWAIT);
m_freem(*m0);
*m0 = m;
if (m == NULL)
@ -974,7 +974,7 @@ hme_load_txmbuf(struct hme_softc *sc, struct mbuf **m0)
error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, htx->htx_dmamap,
*m0, segs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m0, M_DONTWAIT, HME_NTXSEGS);
m = m_collapse(*m0, M_NOWAIT, HME_NTXSEGS);
if (m == NULL) {
m_freem(*m0);
*m0 = NULL;

View File

@ -698,7 +698,7 @@ ieget(struct ie_softc *sc, struct mbuf **mp)
return (-1);
}
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (!m) {
ie_drop_packet_buffer(sc);
/* XXXX if_ierrors++; */
@ -727,7 +727,7 @@ ieget(struct ie_softc *sc, struct mbuf **mp)
* single mbuf which may or may not be big enough. Got that?
*/
if (top) {
MGET(m, M_DONTWAIT, MT_DATA);
MGET(m, M_NOWAIT, MT_DATA);
if (!m) {
m_freem(top);
ie_drop_packet_buffer(sc);
@ -736,7 +736,7 @@ ieget(struct ie_softc *sc, struct mbuf **mp)
m->m_len = MLEN;
}
if (resid >= MINCLSIZE) {
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if (m->m_flags & M_EXT)
m->m_len = min(resid, MCLBYTES);
} else {

View File

@ -1174,7 +1174,7 @@ ndis_rxeof_eth(adapter, ctx, addr, hdr, hdrlen, lookahead, lookaheadlen, pktlen)
block = adapter;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return;
@ -1418,7 +1418,7 @@ ndis_rxeof(adapter, packets, pktcnt)
} else {
#ifdef notdef
if (p->np_oob.npo_status == NDIS_STATUS_RESOURCES) {
m = m_dup(m0, M_DONTWAIT);
m = m_dup(m0, M_NOWAIT);
/*
* NOTE: we want to destroy the mbuf here, but
* we don't actually want to return it to the
@ -1436,7 +1436,7 @@ ndis_rxeof(adapter, packets, pktcnt)
} else
p->np_oob.npo_status = NDIS_STATUS_PENDING;
#endif
m = m_dup(m0, M_DONTWAIT);
m = m_dup(m0, M_NOWAIT);
if (p->np_oob.npo_status == NDIS_STATUS_RESOURCES)
p->np_refcnt++;
else

View File

@ -709,7 +709,7 @@ ipw_dma_alloc(struct ipw_softc *sc)
sbuf = &sc->rx_sbuf_list[i];
sbd->bd = &sc->rbd_list[i];
sbuf->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
sbuf->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (sbuf->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
@ -1207,7 +1207,7 @@ ipw_rx_data_intr(struct ipw_softc *sc, struct ipw_status *status,
* drop the received packet and reuse the old mbuf. In the unlikely
* case that the old mbuf can't be reloaded either, explicitly panic.
*/
mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
ifp->if_ierrors++;
return;
@ -1651,7 +1651,7 @@ ipw_tx_start(struct ifnet *ifp, struct mbuf *m0, struct ieee80211_node *ni)
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_DONTWAIT);
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");

View File

@ -788,7 +788,7 @@ iwi_alloc_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring, int count)
goto fail;
}
data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
@ -1236,7 +1236,7 @@ iwi_frame_intr(struct iwi_softc *sc, struct iwi_rx_data *data, int i,
* drop the received packet and reuse the old mbuf. In the unlikely
* case that the old mbuf can't be reloaded either, explicitly panic.
*/
mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
ifp->if_ierrors++;
return;
@ -1884,7 +1884,7 @@ iwi_tx_start(struct ifnet *ifp, struct mbuf *m0, struct ieee80211_node *ni,
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_DONTWAIT);
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");

View File

@ -1377,7 +1377,7 @@ iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
goto fail;
}
data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
IWN_RBUF_SIZE);
if (data->m == NULL) {
device_printf(sc->sc_dev,
@ -2334,7 +2334,7 @@ iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
return;
}
m1 = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE);
if (m1 == NULL) {
DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n",
__func__);
@ -3539,7 +3539,7 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
return error;
}
/* Too many DMA segments, linearize mbuf. */
m1 = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
if (m1 == NULL) {
device_printf(sc->sc_dev,
"%s: could not defrag mbuf\n", __func__);
@ -3743,7 +3743,7 @@ iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m,
return error;
}
/* Too many DMA segments, linearize mbuf. */
m1 = m_collapse(m, M_DONTWAIT, IWN_MAX_SCATTER);
m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER);
if (m1 == NULL) {
device_printf(sc->sc_dev,
"%s: could not defrag mbuf\n", __func__);
@ -3976,7 +3976,7 @@ iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
/* Command is too large to fit in a descriptor. */
if (totlen > MCLBYTES)
return EINVAL;
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
if (m == NULL)
return ENOMEM;
cmd = mtod(m, struct iwn_tx_cmd *);

View File

@ -1790,7 +1790,7 @@ ixgb_get_buf(int i, struct adapter * adapter,
if (mp == NULL) {
mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mp == NULL) {
adapter->mbuf_alloc_failed++;

View File

@ -3707,7 +3707,7 @@ ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
while (j != limit) {
rxbuf = &rxr->rx_buffers[i];
if (rxbuf->buf == NULL) {
mp = m_getjcl(M_DONTWAIT, MT_DATA,
mp = m_getjcl(M_NOWAIT, MT_DATA,
M_PKTHDR, rxr->mbuf_sz);
if (mp == NULL)
goto update;

View File

@ -1235,7 +1235,7 @@ ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
if (error == EFBIG) {
struct mbuf *m;
m = m_defrag(*m_headp, M_DONTWAIT);
m = m_defrag(*m_headp, M_NOWAIT);
if (m == NULL) {
adapter->mbuf_defrag_failed++;
m_freem(*m_headp);
@ -2723,7 +2723,7 @@ ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
while (j != limit) {
rxbuf = &rxr->rx_buffers[i];
if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
mh = m_gethdr(M_DONTWAIT, MT_DATA);
mh = m_gethdr(M_NOWAIT, MT_DATA);
if (mh == NULL)
goto update;
mh->m_pkthdr.len = mh->m_len = MHLEN;
@ -2747,7 +2747,7 @@ ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
}
if (rxbuf->m_pack == NULL) {
mp = m_getjcl(M_DONTWAIT, MT_DATA,
mp = m_getjcl(M_NOWAIT, MT_DATA,
M_PKTHDR, adapter->rx_mbuf_sz);
if (mp == NULL)
goto update;

View File

@ -1712,7 +1712,7 @@ jme_encap(struct jme_softc *sc, struct mbuf **m_head)
if (M_WRITABLE(*m_head) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
@ -1774,7 +1774,7 @@ jme_encap(struct jme_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->jme_cdata.jme_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, JME_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, JME_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -3181,7 +3181,7 @@ jme_newbuf(struct jme_softc *sc, struct jme_rxdesc *rxd)
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
/*

View File

@ -394,7 +394,7 @@ lance_get(struct lance_softc *sc, int boff, int totlen)
return (NULL);
}
MGETHDR(m0, M_DONTWAIT, MT_DATA);
MGETHDR(m0, M_NOWAIT, MT_DATA);
if (m0 == NULL)
return (NULL);
m0->m_pkthdr.rcvif = ifp;
@ -404,7 +404,7 @@ lance_get(struct lance_softc *sc, int boff, int totlen)
while (totlen > 0) {
if (totlen >= MINCLSIZE) {
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if ((m->m_flags & M_EXT) == 0)
goto bad;
len = MCLBYTES;
@ -423,7 +423,7 @@ lance_get(struct lance_softc *sc, int boff, int totlen)
totlen -= len;
if (totlen > 0) {
MGET(newm, M_DONTWAIT, MT_DATA);
MGET(newm, M_NOWAIT, MT_DATA);
if (newm == 0)
goto bad;
len = MLEN;

View File

@ -691,7 +691,7 @@ lge_newbuf(sc, c, m)
caddr_t *buf = NULL;
if (m == NULL) {
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
MGETHDR(m_new, M_NOWAIT, MT_DATA);
if (m_new == NULL) {
device_printf(sc->lge_dev, "no memory for rx list "
"-- packet dropped!\n");

View File

@ -2916,7 +2916,7 @@ rxintr_cleanup(softc_t *sc)
/* Optimization: copy a small pkt into a small mbuf. */
if (first_mbuf->m_pkthdr.len <= COPY_BREAK)
{
MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA);
MGETHDR(new_mbuf, M_NOWAIT, MT_DATA);
if (new_mbuf != NULL)
{
new_mbuf->m_pkthdr.rcvif = first_mbuf->m_pkthdr.rcvif;
@ -3016,7 +3016,7 @@ rxintr_setup(softc_t *sc)
return 0; /* ring is full; nothing to do */
/* Allocate a small mbuf and attach an mbuf cluster. */
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
{
sc->status.cntrs.rxdma++;
@ -3024,7 +3024,7 @@ rxintr_setup(softc_t *sc)
printf("%s: rxintr_setup: MGETHDR() failed\n", NAME_UNIT);
return 0;
}
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if ((m->m_flags & M_EXT) == 0)
{
m_freem(m);

View File

@ -854,7 +854,7 @@ malo_tx_dmasetup(struct malo_softc *sc, struct malo_txbuf *bf, struct mbuf *m0)
*/
if (error == EFBIG) { /* too many desc's, linearize */
sc->malo_stats.mst_tx_linear++;
m = m_defrag(m0, M_DONTWAIT);
m = m_defrag(m0, M_NOWAIT);
if (m == NULL) {
m_freem(m0);
sc->malo_stats.mst_tx_nombuf++;
@ -1396,7 +1396,7 @@ malo_getrxmbuf(struct malo_softc *sc, struct malo_rxbuf *bf)
int error;
/* XXX don't need mbuf, just dma buffer */
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE);
if (m == NULL) {
sc->malo_stats.mst_rx_nombuf++; /* XXX */
return NULL;

View File

@ -389,7 +389,7 @@ mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp,
KASSERT(mbufp != NULL, ("NULL mbuf pointer!"));
new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
new_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (new_mbuf == NULL)
return (ENOBUFS);
new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size;
@ -1549,7 +1549,7 @@ mge_start_locked(struct ifnet *ifp)
if (m0 == NULL)
break;
mtmp = m_defrag(m0, M_DONTWAIT);
mtmp = m_defrag(m0, M_NOWAIT);
if (mtmp)
m0 = mtmp;

View File

@ -693,7 +693,7 @@ ngmn_connect(hook_p hook)
/* Setup a transmit chain with one descriptor */
/* XXX: we actually send a 1 byte packet */
dp = mn_alloc_desc();
MGETHDR(m, M_WAIT, MT_DATA);
MGETHDR(m, M_WAITOK, MT_DATA);
m->m_pkthdr.len = 0;
dp->m = m;
dp->flags = 0xc0000000 + (1 << 16);
@ -708,8 +708,8 @@ ngmn_connect(hook_p hook)
dp = mn_alloc_desc();
m = NULL;
MGETHDR(m, M_WAIT, MT_DATA);
MCLGET(m, M_WAIT);
MGETHDR(m, M_WAITOK, MT_DATA);
MCLGET(m, M_WAITOK);
dp->m = m;
dp->data = vtophys(m->m_data);
dp->flags = 0x40000000;
@ -722,8 +722,8 @@ ngmn_connect(hook_p hook)
dp2 = dp;
dp = mn_alloc_desc();
m = NULL;
MGETHDR(m, M_WAIT, MT_DATA);
MCLGET(m, M_WAIT);
MGETHDR(m, M_WAITOK, MT_DATA);
MCLGET(m, M_WAITOK);
dp->m = m;
dp->data = vtophys(m->m_data);
dp->flags = 0x00000000;
@ -1160,12 +1160,12 @@ mn_rx_intr(struct mn_softc *sc, u_int32_t vector)
/* Replenish desc + mbuf supplies */
if (!m) {
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
mn_free_desc(dp);
return; /* ENOBUFS */
}
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if((m->m_flags & M_EXT) == 0) {
mn_free_desc(dp);
m_freem(m);

View File

@ -897,7 +897,7 @@ msk_newbuf(struct msk_if_softc *sc_if, int idx)
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
@ -955,7 +955,7 @@ msk_jumbo_newbuf(struct msk_if_softc *sc_if, int idx)
bus_dmamap_t map;
int nsegs;
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
if ((m->m_flags & M_EXT) == 0) {
@ -2653,7 +2653,7 @@ msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
@ -2732,7 +2732,7 @@ msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
*m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, MSK_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -2810,7 +2810,7 @@ mwl_rx_proc(void *arg, int npending)
* be a net loss. The tradeoff might be system
* dependent (cache architecture is important).
*/
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
DPRINTF(sc, MWL_DEBUG_ANY,
"%s: no rx mbuf\n", __func__);
@ -3087,9 +3087,9 @@ mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0)
if (error == EFBIG) { /* too many desc's, linearize */
sc->sc_stats.mst_tx_linear++;
#if MWL_TXDESC > 1
m = m_collapse(m0, M_DONTWAIT, MWL_TXDESC);
m = m_collapse(m0, M_NOWAIT, MWL_TXDESC);
#else
m = m_defrag(m0, M_DONTWAIT);
m = m_defrag(m0, M_NOWAIT);
#endif
if (m == NULL) {
m_freem(m0);

View File

@ -2004,7 +2004,7 @@ mxge_vlan_tag_insert(struct mbuf *m)
{
struct ether_vlan_header *evl;
M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_DONTWAIT);
M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
if (__predict_false(m == NULL))
return NULL;
if (m->m_len < sizeof(*evl)) {
@ -2376,7 +2376,7 @@ mxge_get_buf_small(struct mxge_slice_state *ss, bus_dmamap_t map, int idx)
mxge_rx_ring_t *rx = &ss->rx_small;
int cnt, err;
m = m_gethdr(M_DONTWAIT, MT_DATA);
m = m_gethdr(M_NOWAIT, MT_DATA);
if (m == NULL) {
rx->alloc_fail++;
err = ENOBUFS;
@ -2409,7 +2409,7 @@ mxge_get_buf_big(struct mxge_slice_state *ss, bus_dmamap_t map, int idx)
mxge_rx_ring_t *rx = &ss->rx_big;
int cnt, err, i;
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, rx->cl_size);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx->cl_size);
if (m == NULL) {
rx->alloc_fail++;
err = ENOBUFS;

View File

@ -1085,13 +1085,13 @@ my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c)
struct mbuf *m_new = NULL;
MY_LOCK_ASSERT(sc);
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
MGETHDR(m_new, M_NOWAIT, MT_DATA);
if (m_new == NULL) {
device_printf(sc->my_dev,
"no memory for rx list -- packet dropped!\n");
return (ENOBUFS);
}
MCLGET(m_new, M_DONTWAIT);
MCLGET(m_new, M_NOWAIT);
if (!(m_new->m_flags & M_EXT)) {
device_printf(sc->my_dev,
"no memory for rx list -- packet dropped!\n");
@ -1352,13 +1352,13 @@ my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head)
* chain.
*/
m = m_head;
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
MGETHDR(m_new, M_NOWAIT, MT_DATA);
if (m_new == NULL) {
device_printf(sc->my_dev, "no memory for tx list");
return (1);
}
if (m_head->m_pkthdr.len > MHLEN) {
MCLGET(m_new, M_DONTWAIT);
MCLGET(m_new, M_NOWAIT);
if (!(m_new->m_flags & M_EXT)) {
m_freem(m_new);
device_printf(sc->my_dev, "no memory for tx list");

View File

@ -1975,7 +1975,7 @@ nfe_newbuf(struct nfe_softc *sc, int idx)
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
@ -2031,7 +2031,7 @@ nfe_jnewbuf(struct nfe_softc *sc, int idx)
bus_dmamap_t map;
int nsegs;
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
if ((m->m_flags & M_EXT) == 0) {
@ -2400,7 +2400,7 @@ nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
&nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
m = m_collapse(*m_head, M_NOWAIT, NFE_MAX_SCATTER);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -1381,7 +1381,7 @@ nge_newbuf(struct nge_softc *sc, int idx)
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
@ -1894,7 +1894,7 @@ nge_encap(struct nge_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map,
*m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, NGE_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, NGE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -764,7 +764,7 @@ nve_init_rings(struct nve_softc *sc)
struct nve_rx_desc *desc = sc->rx_desc + i;
struct nve_map_buffer *buf = &desc->buf;
buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
buf->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (buf->mbuf == NULL) {
device_printf(sc->dev, "couldn't allocate mbuf\n");
nve_free_rings(sc);
@ -918,7 +918,7 @@ nve_ifstart_locked(struct ifnet *ifp)
* cluster
*/
if (error) {
m = m_defrag(m0, M_DONTWAIT);
m = m_defrag(m0, M_NOWAIT);
if (m == NULL) {
m_freem(m0);
sc->tx_errors++;
@ -1480,7 +1480,7 @@ nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id)
buf = &desc->buf;
if (buf->mbuf == NULL) {
buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
buf->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (buf->mbuf == NULL) {
device_printf(sc->dev, "failed to allocate memory\n");
goto fail;

View File

@ -3024,7 +3024,7 @@ xge_send_locked(struct ifnet *ifnetp, int qindex)
}
if(count >= max_fragments) {
m_buf = m_defrag(m_head, M_DONTWAIT);
m_buf = m_defrag(m_head, M_NOWAIT);
if(m_buf != NULL) m_head = m_buf;
XGE_DRV_STATS(tx_defrag);
}
@ -3132,7 +3132,7 @@ xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
if(buffer_size <= MCLBYTES) {
cluster_size = MCLBYTES;
mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
}
else {
cluster_size = MJUMPAGESIZE;
@ -3140,7 +3140,7 @@ xge_get_buf(xge_hal_dtr_h dtrh, xge_rx_priv_t *rxd_priv,
(buffer_size > MJUMPAGESIZE)) {
cluster_size = MJUM9BYTES;
}
mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, cluster_size);
mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, cluster_size);
}
if(!mp) {
xge_trace(XGE_ERR, "Out of memory to allocate mbuf");

View File

@ -902,7 +902,7 @@ oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
} else if (rc == EFBIG) {
if (retry_cnt == 0) {
m_temp = m_defrag(m, M_DONTWAIT);
m_temp = m_defrag(m, M_NOWAIT);
if (m_temp == NULL)
goto free_ret;
m = m_temp;
@ -995,7 +995,7 @@ oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp)
m = *mpp;
if (M_WRITABLE(m) == 0) {
m = m_dup(*mpp, M_DONTWAIT);
m = m_dup(*mpp, M_NOWAIT);
if (!m)
return NULL;
m_freem(*mpp);
@ -1481,7 +1481,7 @@ oce_alloc_rx_bufs(struct oce_rq *rq, int count)
break; /* no more room */
pd = &rq->pckts[rq->packets_in];
pd->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (pd->mbuf == NULL)
break;

View File

@ -324,7 +324,7 @@ patm_lmbuf_alloc(struct patm_softc *sc)
struct mbuf *m;
struct lmbuf *b;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (NULL);
m->m_data += LMBUF_OFFSET;

View File

@ -396,9 +396,9 @@ patm_rcv_mbuf(struct patm_softc *sc, void *buf, u_int h, int hdr)
return ((struct mbuf *)buf);
if (hdr)
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
else
MGET(m, M_DONTWAIT, MT_DATA);
MGET(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
patm_rcv_free(sc, buf, h);
return (NULL);
@ -458,7 +458,7 @@ patm_rx_raw(struct patm_softc *sc, u_char *cell)
}
}
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
sc->stats.raw_no_buf++;
return;

View File

@ -438,7 +438,7 @@ patm_tx_pad(struct patm_softc *sc, struct mbuf *m0)
return (m0);
}
}
MGET(m, M_DONTWAIT, MT_DATA);
MGET(m, M_NOWAIT, MT_DATA);
if (m == 0) {
m_freem(m0);
sc->ifp->if_oerrors++;
@ -532,7 +532,7 @@ patm_launch(struct patm_softc *sc, struct patm_scd *scd)
error = bus_dmamap_load_mbuf(sc->tx_tag, map->map, m,
patm_load_txbuf, &a, BUS_DMA_NOWAIT);
if (error == EFBIG) {
if ((m = m_defrag(m, M_DONTWAIT)) == NULL) {
if ((m = m_defrag(m, M_NOWAIT)) == NULL) {
sc->ifp->if_oerrors++;
continue;
}

View File

@ -798,11 +798,11 @@ pcn_newbuf(sc, idx, m)
c = &sc->pcn_ldata->pcn_rx_list[idx];
if (m == NULL) {
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
MGETHDR(m_new, M_NOWAIT, MT_DATA);
if (m_new == NULL)
return(ENOBUFS);
MCLGET(m_new, M_DONTWAIT);
MCLGET(m_new, M_NOWAIT);
if (!(m_new->m_flags & M_EXT)) {
m_freem(m_new);
return(ENOBUFS);

View File

@ -187,9 +187,9 @@ typedef struct _pdq_os_ctx_t {
#define PDQ_OS_DATABUF_ALLOC(pdq, b) do { \
PDQ_OS_DATABUF_T *x_m0; \
MGETHDR(x_m0, M_DONTWAIT, MT_DATA); \
MGETHDR(x_m0, M_NOWAIT, MT_DATA); \
if (x_m0 != NULL) { \
MCLGET(x_m0, M_DONTWAIT); \
MCLGET(x_m0, M_NOWAIT); \
if ((x_m0->m_flags & M_EXT) == 0) { \
m_free(x_m0); \
(b) = NULL; \

View File

@ -742,12 +742,12 @@ pdq_os_databuf_alloc(
struct mbuf *m;
bus_dmamap_t map;
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
printf("%s: can't alloc small buf\n", sc->sc_dev.dv_xname);
return NULL;
}
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if ((m->m_flags & M_EXT) == 0) {
printf("%s: can't alloc cluster\n", sc->sc_dev.dv_xname);
m_free(m);

View File

@ -214,9 +214,9 @@ typedef struct mbuf PDQ_OS_DATABUF_T;
#ifndef PDQ_OS_DATABUF_ALLOC
#define PDQ_OS_DATABUF_ALLOC(pdq, b) do { \
PDQ_OS_DATABUF_T *x_m0; \
MGETHDR(x_m0, M_DONTWAIT, MT_DATA); \
MGETHDR(x_m0, M_NOWAIT, MT_DATA); \
if (x_m0 != NULL) { \
MCLGET(x_m0, M_DONTWAIT); \
MCLGET(x_m0, M_NOWAIT); \
if ((x_m0->m_flags & M_EXT) == 0) { \
m_free(x_m0); \
(b) = NULL; \

View File

@ -1063,7 +1063,7 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp)
QL_DPRINT8((ha->pci_dev, "%s: EFBIG [%d]\n", __func__,
m_head->m_pkthdr.len));
m = m_defrag(m_head, M_DONTWAIT);
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
ha->err_tx_defrag++;
m_freem(m_head);
@ -1405,7 +1405,7 @@ qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
if (mp == NULL) {
if (!jumbo) {
mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
mp = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mp == NULL) {
ha->err_m_getcl++;
@ -1416,7 +1416,7 @@ qla_get_mbuf(qla_host_t *ha, qla_rx_buf_t *rxb, struct mbuf *nmp,
}
mp->m_len = mp->m_pkthdr.len = MCLBYTES;
} else {
mp = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
MJUM9BYTES);
if (mp == NULL) {
ha->err_m_getjcl++;

View File

@ -673,7 +673,7 @@ rt2560_alloc_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring,
goto fail;
}
data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
@ -1160,7 +1160,7 @@ rt2560_decryption_intr(struct rt2560_softc *sc)
* mbuf. In the unlikely case that the old mbuf can't be
* reloaded either, explicitly panic.
*/
mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
ifp->if_ierrors++;
goto skip;
@ -1844,7 +1844,7 @@ rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0,
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_DONTWAIT);
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");

View File

@ -682,7 +682,7 @@ rt2661_alloc_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring,
goto fail;
}
data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
@ -1030,7 +1030,7 @@ rt2661_rx_intr(struct rt2661_softc *sc)
* mbuf. In the unlikely case that the old mbuf can't be
* reloaded either, explicitly panic.
*/
mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (mnew == NULL) {
ifp->if_ierrors++;
goto skip;
@ -1534,7 +1534,7 @@ rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0,
return error;
}
if (error != 0) {
mnew = m_defrag(m0, M_DONTWAIT);
mnew = m_defrag(m0, M_NOWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");

View File

@ -743,7 +743,7 @@ rt2860_alloc_rx_ring(struct rt2860_softc *sc, struct rt2860_rx_ring *ring)
goto fail;
}
data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (data->m == NULL) {
device_printf(sc->sc_dev,
"could not allocate rx mbuf\n");
@ -1237,7 +1237,7 @@ rt2860_rx_intr(struct rt2860_softc *sc)
}
#endif
m1 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m1 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (__predict_false(m1 == NULL)) {
ifp->if_ierrors++;
goto skip;
@ -1625,7 +1625,7 @@ rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
}
}
if (__predict_false(error != 0)) {
m1 = m_defrag(m, M_DONTWAIT);
m1 = m_defrag(m, M_NOWAIT);
if (m1 == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");
@ -1877,7 +1877,7 @@ rt2860_tx_raw(struct rt2860_softc *sc, struct mbuf *m,
}
}
if (__predict_false(error != 0)) {
m1 = m_defrag(m, M_DONTWAIT);
m1 = m_defrag(m, M_NOWAIT);
if (m1 == NULL) {
device_printf(sc->sc_dev,
"could not defragment mbuf\n");

View File

@ -758,7 +758,7 @@ re_diag(struct rl_softc *sc)
u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' };
/* Allocate a single mbuf */
MGETHDR(m0, M_DONTWAIT, MT_DATA);
MGETHDR(m0, M_NOWAIT, MT_DATA);
if (m0 == NULL)
return (ENOBUFS);
@ -1886,7 +1886,7 @@ re_newbuf(struct rl_softc *sc, int idx)
uint32_t cmdstat;
int error, nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
@ -1950,7 +1950,7 @@ re_jumbo_newbuf(struct rl_softc *sc, int idx)
uint32_t cmdstat;
int error, nsegs;
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MJUM9BYTES;
@ -2700,7 +2700,7 @@ re_encap(struct rl_softc *sc, struct mbuf **m_head)
padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len;
if (M_WRITABLE(*m_head) == 0) {
/* Get a writable copy. */
m_new = m_dup(*m_head, M_DONTWAIT);
m_new = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m_new == NULL) {
*m_head = NULL;
@ -2710,7 +2710,7 @@ re_encap(struct rl_softc *sc, struct mbuf **m_head)
}
if ((*m_head)->m_next != NULL ||
M_TRAILINGSPACE(*m_head) < padlen) {
m_new = m_defrag(*m_head, M_DONTWAIT);
m_new = m_defrag(*m_head, M_NOWAIT);
if (m_new == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -2734,7 +2734,7 @@ re_encap(struct rl_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
*m_head, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS);
m_new = m_collapse(*m_head, M_NOWAIT, RL_NTXSEGS);
if (m_new == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -892,7 +892,7 @@ rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid)
"mbuf: ndmasegs=%d, len=%d, error=%d\n",
ndmasegs, m->m_pkthdr.len, error);
m_d = m_collapse(m, M_DONTWAIT, 16);
m_d = m_collapse(m, M_NOWAIT, 16);
if (m_d == NULL) {
m_freem(m);
m = NULL;
@ -1637,7 +1637,7 @@ rt_rx_eof(struct rt_softc *sc, int limit)
nframes++;
mnew = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
MJUMPAGESIZE);
if (mnew == NULL) {
sc->rx_mbuf_alloc_errors++;
@ -2009,7 +2009,7 @@ rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring)
goto fail;
}
data->m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
MJUMPAGESIZE);
if (data->m == NULL) {
device_printf(sc->dev, "could not allocate Rx mbuf\n");

View File

@ -1325,15 +1325,15 @@ safe_process(device_t dev, struct cryptop *crp, int hint)
totlen = re->re_src_mapsize;
if (re->re_src_m->m_flags & M_PKTHDR) {
len = MHLEN;
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m && !m_dup_pkthdr(m, re->re_src_m,
M_DONTWAIT)) {
M_NOWAIT)) {
m_free(m);
m = NULL;
}
} else {
len = MLEN;
MGET(m, M_DONTWAIT, MT_DATA);
MGET(m, M_NOWAIT, MT_DATA);
}
if (m == NULL) {
safestats.st_nombuf++;
@ -1341,7 +1341,7 @@ safe_process(device_t dev, struct cryptop *crp, int hint)
goto errout;
}
if (totlen >= MINCLSIZE) {
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if ((m->m_flags & M_EXT) == 0) {
m_free(m);
safestats.st_nomcl++;
@ -1357,7 +1357,7 @@ safe_process(device_t dev, struct cryptop *crp, int hint)
while (totlen > 0) {
if (top) {
MGET(m, M_DONTWAIT, MT_DATA);
MGET(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
m_freem(top);
safestats.st_nombuf++;
@ -1368,7 +1368,7 @@ safe_process(device_t dev, struct cryptop *crp, int hint)
len = MLEN;
}
if (top && totlen >= MINCLSIZE) {
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if ((m->m_flags & M_EXT) == 0) {
*mp = m;
m_freem(top);

View File

@ -863,7 +863,7 @@ get_rx_buf(struct sbni_softc *sc)
{
struct mbuf *m;
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
if_printf(sc->ifp, "cannot allocate header mbuf\n");
return (0);
@ -877,7 +877,7 @@ get_rx_buf(struct sbni_softc *sc)
*/
if (ETHER_MAX_LEN + 2 > MHLEN) {
/* Attach an mbuf cluster */
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if ((m->m_flags & M_EXT) == 0) {
m_freem(m);
return (0);

View File

@ -1455,7 +1455,7 @@ sf_newbuf(struct sf_softc *sc, int idx)
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
@ -2171,7 +2171,7 @@ sf_encap(struct sf_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map,
*m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, SF_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -97,7 +97,7 @@ sfxge_dma_map_sg_collapse(bus_dma_tag_t tag, bus_dmamap_t map,
goto err_out;
} else if (err == EFBIG || seg_count >= maxsegs) {
if (!defragged) {
m = m_defrag(*mp, M_DONTWAIT);
m = m_defrag(*mp, M_NOWAIT);
if (m == NULL) {
err = ENOBUFS;
goto err_out;

View File

@ -262,7 +262,7 @@ static int sfxge_tx_queue_mbuf(struct sfxge_txq *txq, struct mbuf *mbuf)
mbuf, dma_seg, &n_dma_seg, 0);
if (rc == EFBIG) {
/* Try again. */
struct mbuf *new_mbuf = m_collapse(mbuf, M_DONTWAIT,
struct mbuf *new_mbuf = m_collapse(mbuf, M_NOWAIT,
SFXGE_TX_MAPPING_MAX_SEG);
if (new_mbuf == NULL)
goto reject;

View File

@ -1083,7 +1083,7 @@ sge_newbuf(struct sge_softc *sc, int prod)
SGE_LOCK_ASSERT(sc);
cd = &sc->sge_cdata;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
@ -1407,7 +1407,7 @@ sge_encap(struct sge_softc *sc, struct mbuf **m_head)
if (M_WRITABLE(*m_head) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
@ -1464,7 +1464,7 @@ sge_encap(struct sge_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->sge_cdata.sge_txmbuf_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, SGE_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, SGE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -1411,7 +1411,7 @@ sis_newbuf(struct sis_softc *sc, struct sis_rxdesc *rxd)
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = SIS_RXLEN;
@ -1771,7 +1771,7 @@ sis_encap(struct sis_softc *sc, struct mbuf **m_head)
padlen = SIS_MIN_FRAMELEN - m->m_pkthdr.len;
if (M_WRITABLE(m) == 0) {
/* Get a writable copy. */
m = m_dup(*m_head, M_DONTWAIT);
m = m_dup(*m_head, M_NOWAIT);
m_freem(*m_head);
if (m == NULL) {
*m_head = NULL;
@ -1780,7 +1780,7 @@ sis_encap(struct sis_softc *sc, struct mbuf **m_head)
*m_head = m;
}
if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) {
m = m_defrag(m, M_DONTWAIT);
m = m_defrag(m, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -1799,7 +1799,7 @@ sis_encap(struct sis_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->sis_tx_tag, txd->tx_dmamap,
*m_head, segs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, SIS_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, SIS_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -965,7 +965,7 @@ sk_newbuf(sc_if, idx)
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
@ -1010,7 +1010,7 @@ sk_jumbo_newbuf(sc_if, idx)
bus_dmamap_t map;
int nsegs;
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
if ((m->m_flags & M_EXT) == 0) {
@ -2393,7 +2393,7 @@ sk_encap(sc_if, m_head)
error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nseg, 0);
if (error == EFBIG) {
m = m_defrag(*m_head, M_DONTWAIT);
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -688,11 +688,11 @@ smc_task_rx(void *context, int pending)
/*
* Grab an mbuf and attach a cluster.
*/
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL) {
break;
}
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if ((m->m_flags & M_EXT) == 0) {
m_freem(m);
break;

View File

@ -1056,7 +1056,7 @@ snread(struct ifnet *ifp)
/*
* Allocate a header mbuf from the kernel.
*/
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
goto out;
@ -1066,7 +1066,7 @@ snread(struct ifnet *ifp)
/*
* Attach an mbuf cluster
*/
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
/*
* Insist on getting a cluster

View File

@ -1108,7 +1108,7 @@ sonic_get(struct snc_softc *sc, u_int32_t pkt, int datalen)
* Our sonic_read() and sonic_get() require it.
*/
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == 0)
return (0);
m->m_pkthdr.rcvif = sc->sc_ifp;
@ -1119,7 +1119,7 @@ sonic_get(struct snc_softc *sc, u_int32_t pkt, int datalen)
while (datalen > 0) {
if (top) {
MGET(m, M_DONTWAIT, MT_DATA);
MGET(m, M_NOWAIT, MT_DATA);
if (m == 0) {
m_freem(top);
return (0);
@ -1127,7 +1127,7 @@ sonic_get(struct snc_softc *sc, u_int32_t pkt, int datalen)
len = MLEN;
}
if (datalen >= MINCLSIZE) {
MCLGET(m, M_DONTWAIT);
MCLGET(m, M_NOWAIT);
if ((m->m_flags & M_EXT) == 0) {
if (top) m_freem(top);
return (0);

View File

@ -1385,7 +1385,7 @@ ste_newbuf(struct ste_softc *sc, struct ste_chain_onefrag *rxc)
bus_dmamap_t map;
int error, nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
@ -1824,7 +1824,7 @@ ste_encap(struct ste_softc *sc, struct mbuf **m_head, struct ste_chain *txc)
error = bus_dmamap_load_mbuf_sg(sc->ste_cdata.ste_tx_tag,
txc->ste_map, *m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, STE_MAXFRAGS);
m = m_collapse(*m_head, M_NOWAIT, STE_MAXFRAGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -1081,7 +1081,7 @@ stge_encap(struct stge_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag,
txd->tx_dmamap, *m_head, txsegs, &nsegs, 0);
if (error == EFBIG) {
m = m_collapse(*m_head, M_DONTWAIT, STGE_MAXTXSEGS);
m = m_collapse(*m_head, M_NOWAIT, STGE_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;
@ -1609,7 +1609,7 @@ stge_fixup_rx(struct stge_softc *sc, struct mbuf *m)
m->m_data += ETHER_HDR_LEN;
n = m;
} else {
MGETHDR(n, M_DONTWAIT, MT_DATA);
MGETHDR(n, M_NOWAIT, MT_DATA);
if (n != NULL) {
bcopy(m->m_data, n->m_data, ETHER_HDR_LEN);
m->m_data += ETHER_HDR_LEN;
@ -2439,7 +2439,7 @@ stge_newbuf(struct stge_softc *sc, int idx)
bus_dmamap_t map;
int nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;

View File

@ -1381,7 +1381,7 @@ ti_newbuf_std(struct ti_softc *sc, int i)
struct ti_rx_desc *r;
int error, nsegs;
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MCLBYTES;
@ -1436,7 +1436,7 @@ ti_newbuf_mini(struct ti_softc *sc, int i)
struct ti_rx_desc *r;
int error, nsegs;
MGETHDR(m, M_DONTWAIT, MT_DATA);
MGETHDR(m, M_NOWAIT, MT_DATA);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MHLEN;
@ -1495,7 +1495,7 @@ ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *dummy)
(void)dummy;
m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES);
if (m == NULL)
return (ENOBUFS);
m->m_len = m->m_pkthdr.len = MJUM9BYTES;
@ -1577,19 +1577,19 @@ ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old)
}
} else {
/* Allocate the mbufs. */
MGETHDR(m_new, M_DONTWAIT, MT_DATA);
MGETHDR(m_new, M_NOWAIT, MT_DATA);
if (m_new == NULL) {
device_printf(sc->ti_dev, "mbuf allocation failed "
"-- packet dropped!\n");
goto nobufs;
}
MGET(m[NPAYLOAD], M_DONTWAIT, MT_DATA);
MGET(m[NPAYLOAD], M_NOWAIT, MT_DATA);
if (m[NPAYLOAD] == NULL) {
device_printf(sc->ti_dev, "cluster mbuf allocation "
"failed -- packet dropped!\n");
goto nobufs;
}
MCLGET(m[NPAYLOAD], M_DONTWAIT);
MCLGET(m[NPAYLOAD], M_NOWAIT);
if ((m[NPAYLOAD]->m_flags & M_EXT) == 0) {
device_printf(sc->ti_dev, "mbuf allocation failed "
"-- packet dropped!\n");
@ -1598,7 +1598,7 @@ ti_newbuf_jumbo(struct ti_softc *sc, int idx, struct mbuf *m_old)
m[NPAYLOAD]->m_len = MCLBYTES;
for (i = 0; i < NPAYLOAD; i++){
MGET(m[i], M_DONTWAIT, MT_DATA);
MGET(m[i], M_NOWAIT, MT_DATA);
if (m[i] == NULL) {
device_printf(sc->ti_dev, "mbuf allocation "
"failed -- packet dropped!\n");
@ -3051,7 +3051,7 @@ ti_encap(struct ti_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->ti_cdata.ti_tx_tag, txd->tx_dmamap,
*m_head, txsegs, &nseg, 0);
if (error == EFBIG) {
m = m_defrag(*m_head, M_DONTWAIT);
m = m_defrag(*m_head, M_NOWAIT);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

Some files were not shown because too many files have changed in this diff Show More