Implement "holding buffers" per TX queue rather than globally.

When working on TDMA, Sam Leffler found that the MAC DMA hardware
would re-read the last TX descriptor when getting ready to transmit
the next one.  Thus the whole ATH_BUF_BUSY came into existance -
the descriptor must be left alone (very specifically the link pointer
must be maintained) until the hardware has moved onto the next frame.

He saw this in TDMA because the MAC would be frequently stopping during
active transmit (ie, when it wasn't its turn to transmit.)

Fast-forward to today.  It turns out that this is a problem not with
a single MAC DMA instance, but with each QCU (from 0->9).  They each
maintain separate descriptor pointers and will re-read the last
descriptor when starting to transmit the next.

So when your AP is busy transmitting from multiple TX queues, you'll
(more) frequently see one QCU stopped, waiting for a higher-priority QCU
to finsh transmitting, before it'll go ahead and continue.  If you mess
up the descriptor (ie by freeing it) then you're short of luck.

Thanks to rpaulo for sticking with me whilst I diagnosed this issue
that he was quite reliably triggering in his environment.

This is a reimplementation; it doesn't have anything in common with
the ath9k or the Qualcomm Atheros reference driver.

Now - it in theory doesn't apply on the EDMA chips, as long as you
push one complete frame into the FIFO at a time.  But the MAC can DMA
from a list of frames pushed into the hardware queue (ie, you concat
'n' frames together with link pointers, and then push the head pointer
into the TXQ FIFO.)  Since that's likely how I'm going to implement
CABQ handling in hostap mode, it's likely that I will end up teaching
the EDMA TX completion code about busy buffers, just to be "sure"
this doesn't creep up.

Tested - iperf ap->sta and sta->ap (with both sides running this code):

* AR5416 STA
* AR9160/AR9220 hostap

To validate that it doesn't break the EDMA (FIFO) chips:

* AR9380, AR9485, AR9462 STA

Using iperf with the -S <tos byte decimal value> to set the TCP client
side DSCP bits, mapping to different TIDs and thus different TX queues.

TODO:

* Make this work on the EDMA chips, if we end up pushing lists of frames
  to the hardware (eg how we eventually will handle cabq in hostap/ibss
  mode.)
This commit is contained in:
Adrian Chadd 2013-03-14 06:20:02 +00:00
parent cac48304a8
commit 629ce2188a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=248264
3 changed files with 66 additions and 49 deletions

View File

@ -3749,39 +3749,6 @@ ath_tx_update_ratectrl(struct ath_softc *sc, struct ieee80211_node *ni,
}
}
/*
* Update the busy status of the last frame on the free list.
* When doing TDMA, the busy flag tracks whether the hardware
* currently points to this buffer or not, and thus gated DMA
* may restart by re-reading the last descriptor in this
* buffer.
*
* This should be called in the completion function once one
* of the buffers has been used.
*/
static void
ath_tx_update_busy(struct ath_softc *sc)
{
struct ath_buf *last;
/*
* Since the last frame may still be marked
* as ATH_BUF_BUSY, unmark it here before
* finishing the frame processing.
* Since we've completed a frame (aggregate
* or otherwise), the hardware has moved on
* and is no longer referencing the previous
* descriptor.
*/
ATH_TXBUF_LOCK_ASSERT(sc);
last = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
if (last != NULL)
last->bf_flags &= ~ATH_BUF_BUSY;
last = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
if (last != NULL)
last->bf_flags &= ~ATH_BUF_BUSY;
}
/*
* Process the completion of the given buffer.
*
@ -3901,7 +3868,6 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
break;
}
ATH_TXQ_REMOVE(txq, bf, bf_list);
#ifdef IEEE80211_SUPPORT_TDMA
if (txq->axq_depth > 0) {
/*
* More frames follow. Mark the buffer busy
@ -3914,9 +3880,6 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq, int dosched)
*/
bf->bf_last->bf_flags |= ATH_BUF_BUSY;
} else
#else
if (txq->axq_depth == 0)
#endif
txq->axq_link = NULL;
if (bf->bf_state.bfs_aggr)
txq->axq_aggr_depth--;
@ -4187,6 +4150,51 @@ ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
}
}
/*
* Free the holding buffer if it exists
*/
static void
ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq)
{
if (txq->axq_holdingbf == NULL)
return;
txq->axq_holdingbf->bf_flags &= ~ATH_BUF_BUSY;
ATH_TXBUF_LOCK(sc);
ath_returnbuf_tail(sc, txq->axq_holdingbf);
ATH_TXBUF_UNLOCK(sc);
txq->axq_holdingbf = NULL;
}
/*
* Add this buffer to the holding queue, freeing the previous
* one if it exists.
*/
static void
ath_txq_addholdingbuf(struct ath_softc *sc, struct ath_buf *bf)
{
struct ath_txq *txq;
/* XXX assert ATH_BUF_BUSY is set */
/* XXX assert the tx queue is under the max number */
if (bf->bf_state.bfs_tx_queue > HAL_NUM_TX_QUEUES) {
device_printf(sc->sc_dev, "%s: bf=%p: invalid tx queue (%d)\n",
__func__,
bf,
bf->bf_state.bfs_tx_queue);
bf->bf_flags &= ~ATH_BUF_BUSY;
ath_returnbuf_tail(sc, bf);
return;
}
txq = &sc->sc_txq[bf->bf_state.bfs_tx_queue];
ath_txq_freeholdingbuf(sc, txq);
txq->axq_holdingbf = bf;
}
/*
* Return a buffer to the pool and update the 'busy' flag on the
* previous 'tail' entry.
@ -4207,8 +4215,18 @@ ath_freebuf(struct ath_softc *sc, struct ath_buf *bf)
KASSERT((bf->bf_node == NULL), ("%s: bf->bf_node != NULL\n", __func__));
KASSERT((bf->bf_m == NULL), ("%s: bf->bf_m != NULL\n", __func__));
/*
* If this buffer is busy, push it onto the holding queue
*/
if (bf->bf_flags & ATH_BUF_BUSY) {
ath_txq_addholdingbuf(sc, bf);
return;
}
/*
* Not a busy buffer, so free normally
*/
ATH_TXBUF_LOCK(sc);
ath_tx_update_busy(sc);
ath_returnbuf_tail(sc, bf);
ATH_TXBUF_UNLOCK(sc);
}
@ -4261,15 +4279,6 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
* NB: this assumes output has been stopped and
* we do not need to block ath_tx_proc
*/
ATH_TXBUF_LOCK(sc);
bf = TAILQ_LAST(&sc->sc_txbuf, ath_bufhead_s);
if (bf != NULL)
bf->bf_flags &= ~ATH_BUF_BUSY;
bf = TAILQ_LAST(&sc->sc_txbuf_mgmt, ath_bufhead_s);
if (bf != NULL)
bf->bf_flags &= ~ATH_BUF_BUSY;
ATH_TXBUF_UNLOCK(sc);
for (ix = 0;; ix++) {
ATH_TX_LOCK(sc);
bf = TAILQ_FIRST(&txq->axq_q);
@ -4330,6 +4339,11 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
ath_tx_default_comp(sc, bf, 1);
}
/*
* Free the holding buffer if it exists
*/
ath_txq_freeholdingbuf(sc, txq);
/*
* Drain software queued frames which are on
* active TIDs.

View File

@ -361,11 +361,13 @@ ath_sysctl_txagg(SYSCTL_HANDLER_ARGS)
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
printf("HW TXQ %d: axq_depth=%d, axq_aggr_depth=%d, axq_fifo_depth=%d\n",
printf("HW TXQ %d: axq_depth=%d, axq_aggr_depth=%d, "
"axq_fifo_depth=%d, holdingbf=%p\n",
i,
sc->sc_txq[i].axq_depth,
sc->sc_txq[i].axq_aggr_depth,
sc->sc_txq[i].axq_fifo_depth);
sc->sc_txq[i].axq_fifo_depth,
sc->sc_txq[i].axq_holdingbf);
}
}

View File

@ -329,6 +329,7 @@ struct ath_txq {
u_int axq_intrcnt; /* interrupt count */
u_int32_t *axq_link; /* link ptr in last TX desc */
TAILQ_HEAD(axq_q_s, ath_buf) axq_q; /* transmit queue */
struct ath_buf *axq_holdingbf; /* holding TX buffer */
char axq_name[12]; /* e.g. "ath0_txq4" */
/* Per-TID traffic queue for software -> hardware TX */