- Retire npe_defrag(), gem_defrag(), msk_defrag(), nfe_defrag(), and

re_defrag() and use m_collapse() instead.
- Replace a reference to ath_defrag() in a comment in if_wpi.c with
  m_collapse().
This commit is contained in:
jhb 2008-01-17 23:37:47 +00:00
parent 7e32513a5b
commit 4d2d2276c8
6 changed files with 6 additions and 442 deletions

View File

@ -1139,90 +1139,6 @@ npeinit(void *xsc)
NPE_UNLOCK(sc);
}
/*
* Defragment an mbuf chain, returning at most maxfrags separate
* mbufs+clusters. If this is not possible NULL is returned and
* the original mbuf chain is left in it's present (potentially
* modified) state. We use two techniques: collapsing consecutive
* mbufs and replacing consecutive mbufs by a cluster.
*/
static struct mbuf *
npe_defrag(struct mbuf *m0, int how, int maxfrags)
{
struct mbuf *m, *n, *n2, **prev;
u_int curfrags;
/*
* Calculate the current number of frags.
*/
curfrags = 0;
for (m = m0; m != NULL; m = m->m_next)
curfrags++;
/*
* First, try to collapse mbufs. Note that we always collapse
* towards the front so we don't need to deal with moving the
* pkthdr. This may be suboptimal if the first mbuf has much
* less data than the following.
*/
m = m0;
again:
for (;;) {
n = m->m_next;
if (n == NULL)
break;
if ((m->m_flags & M_RDONLY) == 0 &&
n->m_len < M_TRAILINGSPACE(m)) {
bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
n->m_len);
m->m_len += n->m_len;
m->m_next = n->m_next;
m_free(n);
if (--curfrags <= maxfrags)
return m0;
} else
m = n;
}
KASSERT(maxfrags > 1,
("maxfrags %u, but normal collapse failed", maxfrags));
/*
* Collapse consecutive mbufs to a cluster.
*/
prev = &m0->m_next; /* NB: not the first mbuf */
while ((n = *prev) != NULL) {
if ((n2 = n->m_next) != NULL &&
n->m_len + n2->m_len < MCLBYTES) {
m = m_getcl(how, MT_DATA, 0);
if (m == NULL)
goto bad;
bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
n2->m_len);
m->m_len = n->m_len + n2->m_len;
m->m_next = n2->m_next;
*prev = m;
m_free(n);
m_free(n2);
if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
return m0;
/*
* Still not there, try the normal collapse
* again before we allocate another cluster.
*/
goto again;
}
prev = &n->m_next;
}
/*
* No place where we can collapse to a cluster; punt.
* This can occur if, for example, you request 2 frags
* but the packet requires that both be clusters (we
* never reallocate the first mbuf to avoid moving the
* packet header).
*/
bad:
return NULL;
}
/*
* Dequeue packets and place on the h/w transmit queue.
*/
@ -1254,7 +1170,7 @@ npestart_locked(struct ifnet *ifp)
error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map,
m, segs, &nseg, 0);
if (error == EFBIG) {
n = npe_defrag(m, M_DONTWAIT, NPE_MAXSEG);
n = m_collapse(m, M_DONTWAIT, NPE_MAXSEG);
if (n == NULL) {
if_printf(ifp, "%s: too many fragments %u\n",
__func__, nseg);

View File

@ -100,7 +100,6 @@ static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr,
uint32_t set);
static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs,
int nsegs, int error);
static struct mbuf *gem_defrag(struct mbuf *m0, int how, int maxfrags);
static int gem_disable_rx(struct gem_softc *sc);
static int gem_disable_tx(struct gem_softc *sc);
static void gem_eint(struct gem_softc *sc, u_int status);
@ -1057,92 +1056,6 @@ gem_init_locked(struct gem_softc *sc)
callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc);
}
/*
* This is a copy of ath_defrag(ath(4)).
*
* Defragment an mbuf chain, returning at most maxfrags separate
* mbufs+clusters. If this is not possible NULL is returned and
* the original mbuf chain is left in it's present (potentially
* modified) state. We use two techniques: collapsing consecutive
* mbufs and replacing consecutive mbufs by a cluster.
*/
static struct mbuf *
gem_defrag(struct mbuf *m0, int how, int maxfrags)
{
struct mbuf *m, *n, *n2, **prev;
u_int curfrags;
/*
* Calculate the current number of frags.
*/
curfrags = 0;
for (m = m0; m != NULL; m = m->m_next)
curfrags++;
/*
* First, try to collapse mbufs. Note that we always collapse
* towards the front so we don't need to deal with moving the
* pkthdr. This may be suboptimal if the first mbuf has much
* less data than the following.
*/
m = m0;
again:
for (;;) {
n = m->m_next;
if (n == NULL)
break;
if ((m->m_flags & M_RDONLY) == 0 &&
n->m_len < M_TRAILINGSPACE(m)) {
bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
n->m_len);
m->m_len += n->m_len;
m->m_next = n->m_next;
m_free(n);
if (--curfrags <= maxfrags)
return (m0);
} else
m = n;
}
KASSERT(maxfrags > 1,
("maxfrags %u, but normal collapse failed", maxfrags));
/*
* Collapse consecutive mbufs to a cluster.
*/
prev = &m0->m_next; /* NB: not the first mbuf. */
while ((n = *prev) != NULL) {
if ((n2 = n->m_next) != NULL &&
n->m_len + n2->m_len < MCLBYTES) {
m = m_getcl(how, MT_DATA, 0);
if (m == NULL)
goto bad;
bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
n2->m_len);
m->m_len = n->m_len + n2->m_len;
m->m_next = n2->m_next;
*prev = m;
m_free(n);
m_free(n2);
if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
return (m0);
/*
* Still not there, try the normal collapse
* again before we allocate another cluster.
*/
goto again;
}
prev = &n->m_next;
}
/*
* No place where we can collapse to a cluster; punt.
* This can occur if, for example, you request 2 frags
* but the packet requires that both be clusters (we
* never reallocate the first mbuf to avoid moving the
* packet header).
*/
bad:
return (NULL);
}
static int
gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
{
@ -1160,7 +1073,7 @@ gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap,
*m_head, txsegs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = gem_defrag(*m_head, M_DONTWAIT, GEM_NTXSEGS);
m = m_collapse(*m_head, M_DONTWAIT, GEM_NTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -247,7 +247,6 @@ static void msk_intr_hwerr(struct msk_softc *);
static void msk_rxeof(struct msk_if_softc *, uint32_t, int);
static void msk_jumbo_rxeof(struct msk_if_softc *, uint32_t, int);
static void msk_txeof(struct msk_if_softc *, int);
static struct mbuf *msk_defrag(struct mbuf *, int, int);
static int msk_encap(struct msk_if_softc *, struct mbuf **);
static void msk_tx_task(void *, int);
static void msk_start(struct ifnet *);
@ -2502,92 +2501,6 @@ msk_jfree(void *buf, void *args)
MSK_JLIST_UNLOCK(sc_if);
}
/*
* It's copy of ath_defrag(ath(4)).
*
* Defragment an mbuf chain, returning at most maxfrags separate
* mbufs+clusters. If this is not possible NULL is returned and
* the original mbuf chain is left in it's present (potentially
* modified) state. We use two techniques: collapsing consecutive
* mbufs and replacing consecutive mbufs by a cluster.
*/
static struct mbuf *
msk_defrag(struct mbuf *m0, int how, int maxfrags)
{
struct mbuf *m, *n, *n2, **prev;
u_int curfrags;
/*
* Calculate the current number of frags.
*/
curfrags = 0;
for (m = m0; m != NULL; m = m->m_next)
curfrags++;
/*
* First, try to collapse mbufs. Note that we always collapse
* towards the front so we don't need to deal with moving the
* pkthdr. This may be suboptimal if the first mbuf has much
* less data than the following.
*/
m = m0;
again:
for (;;) {
n = m->m_next;
if (n == NULL)
break;
if ((m->m_flags & M_RDONLY) == 0 &&
n->m_len < M_TRAILINGSPACE(m)) {
bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
n->m_len);
m->m_len += n->m_len;
m->m_next = n->m_next;
m_free(n);
if (--curfrags <= maxfrags)
return (m0);
} else
m = n;
}
KASSERT(maxfrags > 1,
("maxfrags %u, but normal collapse failed", maxfrags));
/*
* Collapse consecutive mbufs to a cluster.
*/
prev = &m0->m_next; /* NB: not the first mbuf */
while ((n = *prev) != NULL) {
if ((n2 = n->m_next) != NULL &&
n->m_len + n2->m_len < MCLBYTES) {
m = m_getcl(how, MT_DATA, 0);
if (m == NULL)
goto bad;
bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
n2->m_len);
m->m_len = n->m_len + n2->m_len;
m->m_next = n2->m_next;
*prev = m;
m_free(n);
m_free(n2);
if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
return m0;
/*
* Still not there, try the normal collapse
* again before we allocate another cluster.
*/
goto again;
}
prev = &n->m_next;
}
/*
* No place where we can collapse to a cluster; punt.
* This can occur if, for example, you request 2 frags
* but the packet requires that both be clusters (we
* never reallocate the first mbuf to avoid moving the
* packet header).
*/
bad:
return (NULL);
}
static int
msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
{
@ -2685,7 +2598,7 @@ msk_encap(struct msk_if_softc *sc_if, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc_if->msk_cdata.msk_tx_tag, map,
*m_head, txsegs, &nseg, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = msk_defrag(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
m = m_collapse(*m_head, M_DONTWAIT, MSK_MAXTXSEGS);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -98,7 +98,6 @@ static int nfe_jnewbuf(struct nfe_softc *, int);
static int nfe_rxeof(struct nfe_softc *, int);
static int nfe_jrxeof(struct nfe_softc *, int);
static void nfe_txeof(struct nfe_softc *);
static struct mbuf *nfe_defrag(struct mbuf *, int, int);
static int nfe_encap(struct nfe_softc *, struct mbuf **);
static void nfe_setmulti(struct nfe_softc *);
static void nfe_tx_task(void *, int);
@ -2437,93 +2436,6 @@ nfe_txeof(struct nfe_softc *sc)
}
}
/*
* It's copy of ath_defrag(ath(4)).
*
* Defragment an mbuf chain, returning at most maxfrags separate
* mbufs+clusters. If this is not possible NULL is returned and
* the original mbuf chain is left in it's present (potentially
* modified) state. We use two techniques: collapsing consecutive
* mbufs and replacing consecutive mbufs by a cluster.
*/
static struct mbuf *
nfe_defrag(struct mbuf *m0, int how, int maxfrags)
{
struct mbuf *m, *n, *n2, **prev;
u_int curfrags;
/*
* Calculate the current number of frags.
*/
curfrags = 0;
for (m = m0; m != NULL; m = m->m_next)
curfrags++;
/*
* First, try to collapse mbufs. Note that we always collapse
* towards the front so we don't need to deal with moving the
* pkthdr. This may be suboptimal if the first mbuf has much
* less data than the following.
*/
m = m0;
again:
for (;;) {
n = m->m_next;
if (n == NULL)
break;
if ((m->m_flags & M_RDONLY) == 0 &&
n->m_len < M_TRAILINGSPACE(m)) {
bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
n->m_len);
m->m_len += n->m_len;
m->m_next = n->m_next;
m_free(n);
if (--curfrags <= maxfrags)
return (m0);
} else
m = n;
}
KASSERT(maxfrags > 1,
("maxfrags %u, but normal collapse failed", maxfrags));
/*
* Collapse consecutive mbufs to a cluster.
*/
prev = &m0->m_next; /* NB: not the first mbuf */
while ((n = *prev) != NULL) {
if ((n2 = n->m_next) != NULL &&
n->m_len + n2->m_len < MCLBYTES) {
m = m_getcl(how, MT_DATA, 0);
if (m == NULL)
goto bad;
bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
n2->m_len);
m->m_len = n->m_len + n2->m_len;
m->m_next = n2->m_next;
*prev = m;
m_free(n);
m_free(n2);
if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
return m0;
/*
* Still not there, try the normal collapse
* again before we allocate another cluster.
*/
goto again;
}
prev = &n->m_next;
}
/*
* No place where we can collapse to a cluster; punt.
* This can occur if, for example, you request 2 frags
* but the packet requires that both be clusters (we
* never reallocate the first mbuf to avoid moving the
* packet header).
*/
bad:
return (NULL);
}
static int
nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
{
@ -2542,7 +2454,7 @@ nfe_encap(struct nfe_softc *sc, struct mbuf **m_head)
error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs,
&nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m = nfe_defrag(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER);
if (m == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -233,7 +233,6 @@ static int re_probe (device_t);
static int re_attach (device_t);
static int re_detach (device_t);
static struct mbuf *re_defrag (struct mbuf *, int, int);
static int re_encap (struct rl_softc *, struct mbuf **);
static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int);
@ -2129,95 +2128,6 @@ re_int_task(arg, npending)
return;
}
/*
* It's copy of ath_defrag(ath(4)).
*
* Defragment an mbuf chain, returning at most maxfrags separate
* mbufs+clusters. If this is not possible NULL is returned and
* the original mbuf chain is left in it's present (potentially
* modified) state. We use two techniques: collapsing consecutive
* mbufs and replacing consecutive mbufs by a cluster.
*/
static struct mbuf *
re_defrag(m0, how, maxfrags)
struct mbuf *m0;
int how;
int maxfrags;
{
struct mbuf *m, *n, *n2, **prev;
u_int curfrags;
/*
* Calculate the current number of frags.
*/
curfrags = 0;
for (m = m0; m != NULL; m = m->m_next)
curfrags++;
/*
* First, try to collapse mbufs. Note that we always collapse
* towards the front so we don't need to deal with moving the
* pkthdr. This may be suboptimal if the first mbuf has much
* less data than the following.
*/
m = m0;
again:
for (;;) {
n = m->m_next;
if (n == NULL)
break;
if ((m->m_flags & M_RDONLY) == 0 &&
n->m_len < M_TRAILINGSPACE(m)) {
bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
n->m_len);
m->m_len += n->m_len;
m->m_next = n->m_next;
m_free(n);
if (--curfrags <= maxfrags)
return (m0);
} else
m = n;
}
KASSERT(maxfrags > 1,
("maxfrags %u, but normal collapse failed", maxfrags));
/*
* Collapse consecutive mbufs to a cluster.
*/
prev = &m0->m_next; /* NB: not the first mbuf */
while ((n = *prev) != NULL) {
if ((n2 = n->m_next) != NULL &&
n->m_len + n2->m_len < MCLBYTES) {
m = m_getcl(how, MT_DATA, 0);
if (m == NULL)
goto bad;
bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
n2->m_len);
m->m_len = n->m_len + n2->m_len;
m->m_next = n2->m_next;
*prev = m;
m_free(n);
m_free(n2);
if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
return m0;
/*
* Still not there, try the normal collapse
* again before we allocate another cluster.
*/
goto again;
}
prev = &n->m_next;
}
/*
* No place where we can collapse to a cluster; punt.
* This can occur if, for example, you request 2 frags
* but the packet requires that both be clusters (we
* never reallocate the first mbuf to avoid moving the
* packet header).
*/
bad:
return (NULL);
}
static int
re_encap(sc, m_head)
struct rl_softc *sc;
@ -2290,7 +2200,7 @@ re_encap(sc, m_head)
error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap,
*m_head, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == EFBIG) {
m_new = re_defrag(*m_head, M_DONTWAIT, RL_NTXSEGS);
m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS);
if (m_new == NULL) {
m_freem(*m_head);
*m_head = NULL;

View File

@ -2108,7 +2108,7 @@ wpi_tx_data(struct wpi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
return error;
}
if (error != 0) {
/* XXX use ath_defrag */
/* XXX use m_collapse */
mnew = m_defrag(m0, M_DONTWAIT);
if (mnew == NULL) {
device_printf(sc->sc_dev,