promote ath_defrag to m_collapse (and retire private+unused
m_collapse from cxgb) Reviewed by: pyun, jhb, kmacy MFC after: 2 weeks
This commit is contained in:
parent
d51108b354
commit
e443f3b38c
@ -3891,90 +3891,6 @@ ath_tx_cleanup(struct ath_softc *sc)
|
|||||||
ATH_TXQ_LOCK_DESTROY(&sc->sc_mcastq);
|
ATH_TXQ_LOCK_DESTROY(&sc->sc_mcastq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Defragment an mbuf chain, returning at most maxfrags separate
|
|
||||||
* mbufs+clusters. If this is not possible NULL is returned and
|
|
||||||
* the original mbuf chain is left in it's present (potentially
|
|
||||||
* modified) state. We use two techniques: collapsing consecutive
|
|
||||||
* mbufs and replacing consecutive mbufs by a cluster.
|
|
||||||
*/
|
|
||||||
static struct mbuf *
|
|
||||||
ath_defrag(struct mbuf *m0, int how, int maxfrags)
|
|
||||||
{
|
|
||||||
struct mbuf *m, *n, *n2, **prev;
|
|
||||||
u_int curfrags;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Calculate the current number of frags.
|
|
||||||
*/
|
|
||||||
curfrags = 0;
|
|
||||||
for (m = m0; m != NULL; m = m->m_next)
|
|
||||||
curfrags++;
|
|
||||||
/*
|
|
||||||
* First, try to collapse mbufs. Note that we always collapse
|
|
||||||
* towards the front so we don't need to deal with moving the
|
|
||||||
* pkthdr. This may be suboptimal if the first mbuf has much
|
|
||||||
* less data than the following.
|
|
||||||
*/
|
|
||||||
m = m0;
|
|
||||||
again:
|
|
||||||
for (;;) {
|
|
||||||
n = m->m_next;
|
|
||||||
if (n == NULL)
|
|
||||||
break;
|
|
||||||
if ((m->m_flags & M_RDONLY) == 0 &&
|
|
||||||
n->m_len < M_TRAILINGSPACE(m)) {
|
|
||||||
bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
|
|
||||||
n->m_len);
|
|
||||||
m->m_len += n->m_len;
|
|
||||||
m->m_next = n->m_next;
|
|
||||||
m_free(n);
|
|
||||||
if (--curfrags <= maxfrags)
|
|
||||||
return m0;
|
|
||||||
} else
|
|
||||||
m = n;
|
|
||||||
}
|
|
||||||
KASSERT(maxfrags > 1,
|
|
||||||
("maxfrags %u, but normal collapse failed", maxfrags));
|
|
||||||
/*
|
|
||||||
* Collapse consecutive mbufs to a cluster.
|
|
||||||
*/
|
|
||||||
prev = &m0->m_next; /* NB: not the first mbuf */
|
|
||||||
while ((n = *prev) != NULL) {
|
|
||||||
if ((n2 = n->m_next) != NULL &&
|
|
||||||
n->m_len + n2->m_len < MCLBYTES) {
|
|
||||||
m = m_getcl(how, MT_DATA, 0);
|
|
||||||
if (m == NULL)
|
|
||||||
goto bad;
|
|
||||||
bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
|
|
||||||
bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
|
|
||||||
n2->m_len);
|
|
||||||
m->m_len = n->m_len + n2->m_len;
|
|
||||||
m->m_next = n2->m_next;
|
|
||||||
*prev = m;
|
|
||||||
m_free(n);
|
|
||||||
m_free(n2);
|
|
||||||
if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
|
|
||||||
return m0;
|
|
||||||
/*
|
|
||||||
* Still not there, try the normal collapse
|
|
||||||
* again before we allocate another cluster.
|
|
||||||
*/
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
prev = &n->m_next;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* No place where we can collapse to a cluster; punt.
|
|
||||||
* This can occur if, for example, you request 2 frags
|
|
||||||
* but the packet requires that both be clusters (we
|
|
||||||
* never reallocate the first mbuf to avoid moving the
|
|
||||||
* packet header).
|
|
||||||
*/
|
|
||||||
bad:
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return h/w rate index for an IEEE rate (w/o basic rate bit).
|
* Return h/w rate index for an IEEE rate (w/o basic rate bit).
|
||||||
*/
|
*/
|
||||||
@ -4033,7 +3949,7 @@ ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0)
|
|||||||
*/
|
*/
|
||||||
if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */
|
if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */
|
||||||
sc->sc_stats.ast_tx_linear++;
|
sc->sc_stats.ast_tx_linear++;
|
||||||
m = ath_defrag(m0, M_DONTWAIT, ATH_TXDESC);
|
m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC);
|
||||||
if (m == NULL) {
|
if (m == NULL) {
|
||||||
ath_freetx(m0);
|
ath_freetx(m0);
|
||||||
sc->sc_stats.ast_tx_nombuf++;
|
sc->sc_stats.ast_tx_nombuf++;
|
||||||
|
@ -114,7 +114,6 @@ void mi_init(void);
|
|||||||
void mi_deinit(void);
|
void mi_deinit(void);
|
||||||
|
|
||||||
int _m_explode(struct mbuf *);
|
int _m_explode(struct mbuf *);
|
||||||
int _m_collapse(struct mbuf *, int maxbufs, struct mbuf **);
|
|
||||||
void mb_free_vec(struct mbuf *m);
|
void mb_free_vec(struct mbuf *m);
|
||||||
|
|
||||||
static __inline void
|
static __inline void
|
||||||
@ -185,19 +184,6 @@ struct mbuf *mi_collapse_mbuf(struct mbuf_iovec *mi, struct mbuf *m);
|
|||||||
struct mbuf *mi_collapse_sge(struct mbuf_iovec *mi, bus_dma_segment_t *seg);
|
struct mbuf *mi_collapse_sge(struct mbuf_iovec *mi, bus_dma_segment_t *seg);
|
||||||
void *mcl_alloc(int seg_count, int *type);
|
void *mcl_alloc(int seg_count, int *type);
|
||||||
|
|
||||||
static __inline int
|
|
||||||
m_collapse(struct mbuf *m, int maxbufs, struct mbuf **mnew)
|
|
||||||
{
|
|
||||||
#if (!defined(__sparc64__) && !defined(__sun4v__))
|
|
||||||
if (m->m_next == NULL)
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
*mnew = m;
|
|
||||||
return (0);
|
|
||||||
}
|
|
||||||
return _m_collapse(m, maxbufs, mnew);
|
|
||||||
}
|
|
||||||
|
|
||||||
void mb_free_ext_fast(struct mbuf_iovec *mi, int type, int idx);
|
void mb_free_ext_fast(struct mbuf_iovec *mi, int type, int idx);
|
||||||
|
|
||||||
static __inline void
|
static __inline void
|
||||||
|
@ -1539,6 +1539,92 @@ nospace:
|
|||||||
return (NULL);
|
return (NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defragment an mbuf chain, returning at most maxfrags separate
|
||||||
|
* mbufs+clusters. If this is not possible NULL is returned and
|
||||||
|
* the original mbuf chain is left in it's present (potentially
|
||||||
|
* modified) state. We use two techniques: collapsing consecutive
|
||||||
|
* mbufs and replacing consecutive mbufs by a cluster.
|
||||||
|
*
|
||||||
|
* NB: this should really be named m_defrag but that name is taken
|
||||||
|
*/
|
||||||
|
struct mbuf *
|
||||||
|
m_collapse(struct mbuf *m0, int how, int maxfrags)
|
||||||
|
{
|
||||||
|
struct mbuf *m, *n, *n2, **prev;
|
||||||
|
u_int curfrags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calculate the current number of frags.
|
||||||
|
*/
|
||||||
|
curfrags = 0;
|
||||||
|
for (m = m0; m != NULL; m = m->m_next)
|
||||||
|
curfrags++;
|
||||||
|
/*
|
||||||
|
* First, try to collapse mbufs. Note that we always collapse
|
||||||
|
* towards the front so we don't need to deal with moving the
|
||||||
|
* pkthdr. This may be suboptimal if the first mbuf has much
|
||||||
|
* less data than the following.
|
||||||
|
*/
|
||||||
|
m = m0;
|
||||||
|
again:
|
||||||
|
for (;;) {
|
||||||
|
n = m->m_next;
|
||||||
|
if (n == NULL)
|
||||||
|
break;
|
||||||
|
if ((m->m_flags & M_RDONLY) == 0 &&
|
||||||
|
n->m_len < M_TRAILINGSPACE(m)) {
|
||||||
|
bcopy(mtod(n, void *), mtod(m, char *) + m->m_len,
|
||||||
|
n->m_len);
|
||||||
|
m->m_len += n->m_len;
|
||||||
|
m->m_next = n->m_next;
|
||||||
|
m_free(n);
|
||||||
|
if (--curfrags <= maxfrags)
|
||||||
|
return m0;
|
||||||
|
} else
|
||||||
|
m = n;
|
||||||
|
}
|
||||||
|
KASSERT(maxfrags > 1,
|
||||||
|
("maxfrags %u, but normal collapse failed", maxfrags));
|
||||||
|
/*
|
||||||
|
* Collapse consecutive mbufs to a cluster.
|
||||||
|
*/
|
||||||
|
prev = &m0->m_next; /* NB: not the first mbuf */
|
||||||
|
while ((n = *prev) != NULL) {
|
||||||
|
if ((n2 = n->m_next) != NULL &&
|
||||||
|
n->m_len + n2->m_len < MCLBYTES) {
|
||||||
|
m = m_getcl(how, MT_DATA, 0);
|
||||||
|
if (m == NULL)
|
||||||
|
goto bad;
|
||||||
|
bcopy(mtod(n, void *), mtod(m, void *), n->m_len);
|
||||||
|
bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len,
|
||||||
|
n2->m_len);
|
||||||
|
m->m_len = n->m_len + n2->m_len;
|
||||||
|
m->m_next = n2->m_next;
|
||||||
|
*prev = m;
|
||||||
|
m_free(n);
|
||||||
|
m_free(n2);
|
||||||
|
if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */
|
||||||
|
return m0;
|
||||||
|
/*
|
||||||
|
* Still not there, try the normal collapse
|
||||||
|
* again before we allocate another cluster.
|
||||||
|
*/
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
prev = &n->m_next;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* No place where we can collapse to a cluster; punt.
|
||||||
|
* This can occur if, for example, you request 2 frags
|
||||||
|
* but the packet requires that both be clusters (we
|
||||||
|
* never reallocate the first mbuf to avoid moving the
|
||||||
|
* packet header).
|
||||||
|
*/
|
||||||
|
bad:
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef MBUF_STRESS_TEST
|
#ifdef MBUF_STRESS_TEST
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -746,6 +746,7 @@ int m_append(struct mbuf *, int, c_caddr_t);
|
|||||||
void m_cat(struct mbuf *, struct mbuf *);
|
void m_cat(struct mbuf *, struct mbuf *);
|
||||||
void m_extadd(struct mbuf *, caddr_t, u_int,
|
void m_extadd(struct mbuf *, caddr_t, u_int,
|
||||||
void (*)(void *, void *), void *, int, int);
|
void (*)(void *, void *), void *, int, int);
|
||||||
|
struct mbuf *m_collapse(struct mbuf *, int, int);
|
||||||
void m_copyback(struct mbuf *, int, int, c_caddr_t);
|
void m_copyback(struct mbuf *, int, int, c_caddr_t);
|
||||||
void m_copydata(const struct mbuf *, int, int, caddr_t);
|
void m_copydata(const struct mbuf *, int, int, caddr_t);
|
||||||
struct mbuf *m_copym(struct mbuf *, int, int, int);
|
struct mbuf *m_copym(struct mbuf *, int, int, int);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user