Implement a separate, smaller pool of ath_buf entries for use by management

traffic.

* Create sc_mgmt_txbuf and sc_mgmt_txdesc, initialise/free them appropriately.
* Create an enum to represent buffer types in the API.
* Extend ath_getbuf() and _ath_getbuf_locked() to take the above enum.
* Right now anything sent via ic_raw_xmit() allocates via ATH_BUFTYPE_MGMT.
  This may not be very useful.
* Add ATH_BUF_MGMT flag (ath_buf.bf_flags) which indicates the current buffer
  is a mgmt buffer and should go back onto the mgmt free list.
* Extend 'txagg' to include debugging output for both normal and mgmt txbufs.
* When checking/clearing ATH_BUF_BUSY, do it on both TX pools.

Tested:

* STA mode, with heavy UDP injection via iperf.  This filled the TX queue
  however BARs were still going out successfully.

TODO:

* Initialise the mgmt buffers with ATH_BUF_MGMT and then ensure the right
  type is being allocated and freed on the appropriate list.  That'd save
  a write operation (to bf->bf_flags) on each buffer alloc/free.

* Test on AP mode, ensure that BAR TX and probe responses go out nicely
  when the main TX queue is filled (eg with paused traffic to a TID,
  awaiting a BAR to complete.)

PR:		kern/168170
This commit is contained in:
Adrian Chadd 2012-06-13 06:57:55 +00:00
parent 4ea9694252
commit af33d486ab
5 changed files with 106 additions and 17 deletions

View File

@ -246,6 +246,10 @@ static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
0, "tx buffers allocated");
TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
static int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
0, "tx (mgmt) buffers allocated");
TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
int ath_bstuck_threshold = 4; /* max missed beacons */
SYSCTL_INT(_hw_ath, OID_AUTO, bstuck, CTLFLAG_RW, &ath_bstuck_threshold,
@ -2212,13 +2216,17 @@ ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
}
struct ath_buf *
_ath_getbuf_locked(struct ath_softc *sc)
_ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
{
struct ath_buf *bf;
ATH_TXBUF_LOCK_ASSERT(sc);
if (btype == ATH_BUFTYPE_MGMT)
bf = TAILQ_FIRST(&sc->sc_txbuf_mgmt);
else
bf = TAILQ_FIRST(&sc->sc_txbuf);
if (bf == NULL) {
sc->sc_stats.ast_tx_getnobuf++;
} else {
@ -2228,18 +2236,29 @@ _ath_getbuf_locked(struct ath_softc *sc)
}
}
if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0)
TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
if (bf != NULL && (bf->bf_flags & ATH_BUF_BUSY) == 0) {
if (btype == ATH_BUFTYPE_MGMT)
TAILQ_REMOVE(&sc->sc_txbuf_mgmt, bf, bf_list);
else
TAILQ_REMOVE(&sc->sc_txbuf, bf, bf_list);
} else
bf = NULL;
if (bf == NULL) {
/* XXX should check which list, mgmt or otherwise */
DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %s\n", __func__,
TAILQ_FIRST(&sc->sc_txbuf) == NULL ?
"out of xmit buffers" : "xmit buffer busy");
return NULL;
}
/* XXX TODO: should do this at buffer list initialisation */
/* XXX (then, ensure the buffer has the right flag set) */
if (btype == ATH_BUFTYPE_MGMT)
bf->bf_flags |= ATH_BUF_MGMT;
else
bf->bf_flags &= (~ATH_BUF_MGMT);
/* Valid bf here; clear some basic fields */
bf->bf_next = NULL; /* XXX just to be sure */
bf->bf_last = NULL; /* XXX again, just to be sure */
@ -2274,7 +2293,9 @@ ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
{
struct ath_buf *tbf;
tbf = ath_getbuf(sc);
tbf = ath_getbuf(sc,
(bf->bf_flags & ATH_BUF_MGMT) ?
ATH_BUFTYPE_MGMT : ATH_BUFTYPE_NORMAL);
if (tbf == NULL)
return NULL; /* XXX failure? Why? */
@ -2302,12 +2323,18 @@ ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
}
struct ath_buf *
ath_getbuf(struct ath_softc *sc)
ath_getbuf(struct ath_softc *sc, ath_buf_type_t btype)
{
struct ath_buf *bf;
ATH_TXBUF_LOCK(sc);
bf = _ath_getbuf_locked(sc);
bf = _ath_getbuf_locked(sc, btype);
/*
* If a mgmt buffer was requested but we're out of those,
* try requesting a normal one.
*/
if (bf == NULL && btype == ATH_BUFTYPE_MGMT)
bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
ATH_TXBUF_UNLOCK(sc);
if (bf == NULL) {
struct ifnet *ifp = sc->sc_ifp;
@ -2351,7 +2378,7 @@ ath_start(struct ifnet *ifp)
/*
* Grab a TX buffer and associated resources.
*/
bf = ath_getbuf(sc);
bf = ath_getbuf(sc, ATH_BUFTYPE_NORMAL);
if (bf == NULL)
break;
@ -2857,11 +2884,26 @@ ath_desc_alloc(struct ath_softc *sc)
return error;
}
error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
"tx_mgmt", ath_txbuf_mgmt, ATH_TXDESC);
if (error != 0) {
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
return error;
}
/*
* XXX mark txbuf_mgmt frames with ATH_BUF_MGMT, so the
* flag doesn't have to be set in ath_getbuf_locked().
*/
error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
"beacon", ATH_BCBUF, 1);
if (error != 0) {
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
&sc->sc_txbuf_mgmt);
return error;
}
return 0;
@ -2877,6 +2919,9 @@ ath_desc_free(struct ath_softc *sc)
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
if (sc->sc_rxdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
if (sc->sc_txdma_mgmt.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
&sc->sc_txbuf_mgmt);
}
static struct ieee80211_node *
@ -3323,12 +3368,14 @@ ath_tx_update_busy(struct ath_softc *sc)
* descriptor.
*/
ATH_TXBUF_LOCK_ASSERT(sc);
last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
if (last != NULL)
last->bf_flags &= ~ATH_BUF_BUSY;
last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
if (last != NULL)
last->bf_flags &= ~ATH_BUF_BUSY;
}
/*
* Process completed xmit descriptors from the specified queue.
* Kick the packet scheduler if needed. This can occur from this
@ -3637,6 +3684,9 @@ ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf)
ATH_TXBUF_LOCK_ASSERT(sc);
if (bf->bf_flags & ATH_BUF_MGMT)
TAILQ_INSERT_TAIL(&sc->sc_txbuf_mgmt, bf, bf_list);
else
TAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
}
@ -3646,6 +3696,9 @@ ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
ATH_TXBUF_LOCK_ASSERT(sc);
if (bf->bf_flags & ATH_BUF_MGMT)
TAILQ_INSERT_HEAD(&sc->sc_txbuf_mgmt, bf, bf_list);
else
TAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
}
@ -3725,6 +3778,9 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
*/
ATH_TXBUF_LOCK(sc);
bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
if (bf != NULL)
bf->bf_flags &= ~ATH_BUF_BUSY;
bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
if (bf != NULL)
bf->bf_flags &= ~ATH_BUF_BUSY;
ATH_TXBUF_UNLOCK(sc);

View File

@ -50,10 +50,13 @@
extern int ath_tx_findrix(const struct ath_softc *sc, uint8_t rate);
extern struct ath_buf * ath_getbuf(struct ath_softc *sc);
extern struct ath_buf * _ath_getbuf_locked(struct ath_softc *sc);
extern struct ath_buf * ath_getbuf(struct ath_softc *sc,
ath_buf_type_t btype);
extern struct ath_buf * _ath_getbuf_locked(struct ath_softc *sc,
ath_buf_type_t btype);
extern struct ath_buf * ath_buf_clone(struct ath_softc *sc,
const struct ath_buf *bf);
/* XXX change this to NULL the buffer pointer? */
extern void ath_freebuf(struct ath_softc *sc, struct ath_buf *bf);
extern void ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf);
extern void ath_returnbuf_tail(struct ath_softc *sc, struct ath_buf *bf);

View File

@ -377,6 +377,19 @@ ath_sysctl_txagg(SYSCTL_HANDLER_ARGS)
printf("Total TX buffers: %d; Total TX buffers busy: %d\n",
t, i);
i = t = 0;
ATH_TXBUF_LOCK(sc);
TAILQ_FOREACH(bf, &sc->sc_txbuf_mgmt, bf_list) {
if (bf->bf_flags & ATH_BUF_BUSY) {
printf("Busy: %d\n", t);
i++;
}
t++;
}
ATH_TXBUF_UNLOCK(sc);
printf("Total mgmt TX buffers: %d; Total mgmt TX buffers busy: %d\n",
t, i);
return 0;
}

View File

@ -203,7 +203,8 @@ ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags,
ATH_TXBUF_LOCK(sc);
for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) {
bf = _ath_getbuf_locked(sc);
/* XXX non-management? */
bf = _ath_getbuf_locked(sc, ATH_BUFTYPE_NORMAL);
if (bf == NULL) { /* out of buffers, cleanup */
device_printf(sc->sc_dev, "%s: no buffer?\n",
__func__);
@ -1878,7 +1879,7 @@ ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
/*
* Grab a TX buffer and associated resources.
*/
bf = ath_getbuf(sc);
bf = ath_getbuf(sc, ATH_BUFTYPE_MGMT);
if (bf == NULL) {
sc->sc_stats.ast_tx_nobuf++;
m_freem(m);

View File

@ -43,6 +43,14 @@
#define ATH_TIMEOUT 1000
/*
* There is a separate TX ath_buf pool for management frames.
* This ensures that management frames such as probe responses
* and BAR frames can be transmitted during periods of high
* TX activity.
*/
#define ATH_MGMT_TXBUF 32
/*
* 802.11n requires more TX and RX buffers to do AMPDU.
*/
@ -172,6 +180,11 @@ struct ath_node {
((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
#define ATH_RSSI(x) ATH_EP_RND(x, HAL_RSSI_EP_MULTIPLIER)
typedef enum {
ATH_BUFTYPE_NORMAL = 0,
ATH_BUFTYPE_MGMT = 1,
} ath_buf_type_t;
struct ath_buf {
TAILQ_ENTRY(ath_buf) bf_list;
struct ath_buf * bf_next; /* next buffer in the aggregate */
@ -243,6 +256,7 @@ struct ath_buf {
};
typedef TAILQ_HEAD(ath_bufhead_s, ath_buf) ath_bufhead;
#define ATH_BUF_MGMT 0x00000001 /* (tx) desc is a mgmt desc */
#define ATH_BUF_BUSY 0x00000002 /* (tx) desc owned by h/w */
/*
@ -487,6 +501,8 @@ struct ath_softc {
struct ath_descdma sc_txdma; /* TX descriptors */
ath_bufhead sc_txbuf; /* transmit buffer */
struct ath_descdma sc_txdma_mgmt; /* mgmt TX descriptors */
ath_bufhead sc_txbuf_mgmt; /* mgmt transmit buffer */
struct mtx sc_txbuflock; /* txbuf lock */
char sc_txname[12]; /* e.g. "ath0_buf" */
u_int sc_txqsetup; /* h/w queues setup */