Implement a global (all non-mgmt traffic) TX ath_buf limitation when

ath_start() is called.

This (defaults to 10 frames) gives for a little headway in the TX ath_buf
allocation, so buffer cloning is still possible.

This requires a lot omre experimenting and tuning.

It also doesn't stop a node/TID from consuming all of the available
ath_buf's, especially when the node is going through high packet loss
or only talking at a low TX rate.  It also doesn't stop a paused TID
from taking all of the ath_bufs.  I'll look at fixing that up in subsequent
commits.

PR:	kern/168170
This commit is contained in:
Adrian Chadd 2012-06-14 00:51:53 +00:00
parent 6ad799103d
commit 23ced6c117
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=237038
3 changed files with 52 additions and 7 deletions

View File

@ -2239,8 +2239,22 @@ _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
if (btype == ATH_BUFTYPE_MGMT)
TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
else
else {
TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
sc->sc_txbuf_cnt--;
/*
* This shuldn't happen; however just to be
* safe print a warning and fudge the txbuf
* count.
*/
if (sc->sc_txbuf_cnt < 0) {
device_printf(sc->sc_dev,
"%s: sc_txbuf_cnt < 0?\n",
__func__);
sc->sc_txbuf_cnt = 0;
}
}
} else
bf = NULL;
@ -2367,6 +2381,7 @@ ath_start(struct ifnet *ifp)
"%s: sc_inreset_cnt > 0; bailing\n", __func__);
ATH_PCU_UNLOCK(sc);
IF_LOCK(&ifp->if_snd);
sc->sc_stats.ast_tx_qstop++;
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
IF_UNLOCK(&ifp->if_snd);
return;
@ -2375,6 +2390,17 @@ ath_start(struct ifnet *ifp)
ATH_PCU_UNLOCK(sc);
for (;;) {
ATH_TXBUF_LOCK(sc);
if (sc->sc_txbuf_cnt <= sc->sc_txq_data_minfree) {
/* XXX increment counter? */
ATH_TXBUF_UNLOCK(sc);
IF_LOCK(&ifp->if_snd);
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
IF_UNLOCK(&ifp->if_snd);
break;
}
ATH_TXBUF_UNLOCK(sc);
/*
* Grab a TX buffer and associated resources.
*/
@ -2883,6 +2909,7 @@ ath_desc_alloc(struct ath_softc *sc)
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
return error;
}
sc->sc_txbuf_cnt = ath_txbuf;
error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
"tx_mgmt", ath_txbuf_mgmt, ATH_TXDESC);
@ -3686,8 +3713,17 @@ ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
if (bf->bf_flags & ATH_BUF_MGMT)
TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
else
else {
TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
sc->sc_txbuf_cnt++;
if (sc->sc_txbuf_cnt > ath_txbuf) {
device_printf(sc->sc_dev,
"%s: sc_txbuf_cnt > %d?\n",
__func__,
ath_txbuf);
sc->sc_txbuf_cnt = ath_txbuf;
}
}
}
void
@ -3698,8 +3734,17 @@ ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
if (bf->bf_flags & ATH_BUF_MGMT)
TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
else
else {
TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
sc->sc_txbuf_cnt++;
if (sc->sc_txbuf_cnt > ATH_TXBUF) {
device_printf(sc->sc_dev,
"%s: sc_txbuf_cnt > %d?\n",
__func__,
ATH_TXBUF);
sc->sc_txbuf_cnt = ATH_TXBUF;
}
}
}
/*

View File

@ -374,8 +374,8 @@ ath_sysctl_txagg(SYSCTL_HANDLER_ARGS)
t++;
}
ATH_TXBUF_UNLOCK(sc);
printf("Total TX buffers: %d; Total TX buffers busy: %d\n",
t, i);
printf("Total TX buffers: %d; Total TX buffers busy: %d (%d)\n",
t, i, sc->sc_txbuf_cnt);
i = t = 0;
ATH_TXBUF_LOCK(sc);
@ -620,12 +620,11 @@ ath_sysctlattach(struct ath_softc *sc)
"tid_hwq_hi", CTLFLAG_RW, &sc->sc_tid_hwq_hi, 0,
"");
#if 0
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"txq_data_minfree", CTLFLAG_RW, &sc->sc_txq_data_minfree,
0, "Minimum free buffers before adding a data frame"
" to the TX queue");
#endif
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"txq_mcastq_maxdepth", CTLFLAG_RW,
&sc->sc_txq_mcastq_maxdepth, 0,

View File

@ -501,6 +501,7 @@ struct ath_softc {
struct ath_descdma sc_txdma; /* TX descriptors */
ath_bufhead sc_txbuf; /* transmit buffer */
int sc_txbuf_cnt; /* how many buffers avail */
struct ath_descdma sc_txdma_mgmt; /* mgmt TX descriptors */
ath_bufhead sc_txbuf_mgmt; /* mgmt transmit buffer */
struct mtx sc_txbuflock; /* txbuf lock */