Add per-TXQ EDMA FIFO staging queue support.
Each set of frames pushed into a FIFO is represented by a list of ath_bufs - the first ath_buf in the FIFO list is marked with ATH_BUF_FIFOPTR; the last ath_buf in the FIFO list is marked with ATH_BUF_FIFOEND. Multiple lists of frames are just glued together in the TAILQ as per normal - except that at the end of a FIFO list, the descriptor link pointer will be NULL and it'll be tagged with ATH_BUF_FIFOEND. For non-EDMA chipsets this is a no-op - the ath_txq frame list (axq_q) stays the same and is treated the same. For EDMA chipsets the frames are pushed into axq_q and then when the FIFO is to be (re) filled, frames will be moved onto the FIFO queue and then pushed into the FIFO. So: * Add a new queue in each hardware TXQ (ath_txq) for staging FIFO frame lists. It's a TAILQ (like the normal hardware frame queue) rather than the ath9k list-of-lists to represent FIFO entries. * Add new ath_buf flags - ATH_TX_FIFOPTR and ATH_TX_FIFOEND. * When allocating ath_buf entries, clear out the flag value before returning it or it'll end up having stale flags. * When cloning ath_buf entries, only clone ATH_BUF_MGMT. Don't clone the FIFO related flags. * Extend ath_tx_draintxq() to first drain the FIFO staging queue, _then_ drain the normal hardware queue. Tested: * AR9280, hostap * AR9280, STA * AR9380/AR9580 - hostap TODO: * Test on other chipsets, just to be thorough.
This commit is contained in:
parent
8d7ad01f94
commit
3feffbd796
@ -2474,6 +2474,7 @@ _ath_getbuf_locked(struct ath_softc *sc, ath_buf_type_t btype)
|
||||
|
||||
/* XXX TODO: should do this at buffer list initialisation */
|
||||
/* XXX (then, ensure the buffer has the right flag set) */
|
||||
bf->bf_flags = 0;
|
||||
if (btype == ATH_BUFTYPE_MGMT)
|
||||
bf->bf_flags |= ATH_BUF_MGMT;
|
||||
else
|
||||
@ -2530,7 +2531,7 @@ ath_buf_clone(struct ath_softc *sc, const struct ath_buf *bf)
|
||||
/* Copy basics */
|
||||
tbf->bf_next = NULL;
|
||||
tbf->bf_nseg = bf->bf_nseg;
|
||||
tbf->bf_flags = bf->bf_flags & ~ATH_BUF_BUSY;
|
||||
tbf->bf_flags = bf->bf_flags & ATH_BUF_FLAGS_CLONE;
|
||||
tbf->bf_status = bf->bf_status;
|
||||
tbf->bf_m = bf->bf_m;
|
||||
/*
|
||||
@ -3410,6 +3411,7 @@ ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
|
||||
txq->axq_softc = sc;
|
||||
TAILQ_INIT(&txq->axq_q);
|
||||
TAILQ_INIT(&txq->axq_tidq);
|
||||
TAILQ_INIT(&txq->fifo.axq_q);
|
||||
ATH_TXQ_LOCK_INIT(sc, txq);
|
||||
}
|
||||
|
||||
@ -4169,7 +4171,7 @@ ath_returnbuf_head(struct ath_softc *sc, struct ath_buf *bf)
|
||||
/*
|
||||
* Free the holding buffer if it exists
|
||||
*/
|
||||
static void
|
||||
void
|
||||
ath_txq_freeholdingbuf(struct ath_softc *sc, struct ath_txq *txq)
|
||||
{
|
||||
ATH_TXBUF_LOCK_ASSERT(sc);
|
||||
@ -4283,6 +4285,61 @@ ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf, int status)
|
||||
*/
|
||||
}
|
||||
|
||||
static struct ath_buf *
|
||||
ath_tx_draintxq_get_one(struct ath_softc *sc, struct ath_txq *txq)
|
||||
{
|
||||
struct ath_buf *bf;
|
||||
|
||||
ATH_TXQ_LOCK_ASSERT(txq);
|
||||
|
||||
/*
|
||||
* Drain the FIFO queue first, then if it's
|
||||
* empty, move to the normal frame queue.
|
||||
*/
|
||||
bf = TAILQ_FIRST(&txq->fifo.axq_q);
|
||||
if (bf != NULL) {
|
||||
/*
|
||||
* Is it the last buffer in this set?
|
||||
* Decrement the FIFO counter.
|
||||
*/
|
||||
if (bf->bf_flags & ATH_BUF_FIFOEND) {
|
||||
if (txq->axq_fifo_depth == 0) {
|
||||
device_printf(sc->sc_dev,
|
||||
"%s: Q%d: fifo_depth=0, fifo.axq_depth=%d?\n",
|
||||
__func__,
|
||||
txq->axq_qnum,
|
||||
txq->fifo.axq_depth);
|
||||
} else
|
||||
txq->axq_fifo_depth--;
|
||||
}
|
||||
ATH_TXQ_REMOVE(&txq->fifo, bf, bf_list);
|
||||
return (bf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Debugging!
|
||||
*/
|
||||
if (txq->axq_fifo_depth != 0 || txq->fifo.axq_depth != 0) {
|
||||
device_printf(sc->sc_dev,
|
||||
"%s: Q%d: fifo_depth=%d, fifo.axq_depth=%d\n",
|
||||
__func__,
|
||||
txq->axq_qnum,
|
||||
txq->axq_fifo_depth,
|
||||
txq->fifo.axq_depth);
|
||||
}
|
||||
|
||||
/*
|
||||
* Now drain the pending queue.
|
||||
*/
|
||||
bf = TAILQ_FIRST(&txq->axq_q);
|
||||
if (bf == NULL) {
|
||||
txq->axq_link = NULL;
|
||||
return (NULL);
|
||||
}
|
||||
ATH_TXQ_REMOVE(txq, bf, bf_list);
|
||||
return (bf);
|
||||
}
|
||||
|
||||
void
|
||||
ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
|
||||
{
|
||||
@ -4298,24 +4355,11 @@ ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq)
|
||||
*/
|
||||
for (ix = 0;; ix++) {
|
||||
ATH_TXQ_LOCK(txq);
|
||||
bf = TAILQ_FIRST(&txq->axq_q);
|
||||
bf = ath_tx_draintxq_get_one(sc, txq);
|
||||
if (bf == NULL) {
|
||||
txq->axq_link = NULL;
|
||||
/*
|
||||
* There's currently no flag that indicates
|
||||
* a buffer is on the FIFO. So until that
|
||||
* occurs, just clear the FIFO counter here.
|
||||
*
|
||||
* Yes, this means that if something in parallel
|
||||
* is pushing things onto this TXQ and pushing
|
||||
* _that_ into the hardware, things will get
|
||||
* very fruity very quickly.
|
||||
*/
|
||||
txq->axq_fifo_depth = 0;
|
||||
ATH_TXQ_UNLOCK(txq);
|
||||
break;
|
||||
}
|
||||
ATH_TXQ_REMOVE(txq, bf, bf_list);
|
||||
if (bf->bf_state.bfs_aggr)
|
||||
txq->axq_aggr_depth--;
|
||||
#ifdef ATH_DEBUG
|
||||
|
@ -77,6 +77,8 @@ extern int ath_hal_gethangstate(struct ath_hal *ah, uint32_t mask,
|
||||
|
||||
extern void ath_tx_freebuf(struct ath_softc *sc, struct ath_buf *bf,
|
||||
int status);
|
||||
extern void ath_txq_freeholdingbuf(struct ath_softc *sc,
|
||||
struct ath_txq *txq);
|
||||
|
||||
extern void ath_txqmove(struct ath_txq *dst, struct ath_txq *src);
|
||||
|
||||
|
@ -291,6 +291,10 @@ typedef TAILQ_HEAD(ath_bufhead_s, ath_buf) ath_bufhead;
|
||||
|
||||
#define ATH_BUF_MGMT 0x00000001 /* (tx) desc is a mgmt desc */
|
||||
#define ATH_BUF_BUSY 0x00000002 /* (tx) desc owned by h/w */
|
||||
#define ATH_BUF_FIFOEND 0x00000004
|
||||
#define ATH_BUF_FIFOPTR 0x00000008
|
||||
|
||||
#define ATH_BUF_FLAGS_CLONE (ATH_BUF_MGMT)
|
||||
|
||||
/*
|
||||
* DMA state for tx/rx descriptors.
|
||||
@ -325,12 +329,29 @@ struct ath_txq {
|
||||
#define ATH_TXQ_PUTPENDING 0x0001 /* ath_hal_puttxbuf pending */
|
||||
u_int axq_depth; /* queue depth (stat only) */
|
||||
u_int axq_aggr_depth; /* how many aggregates are queued */
|
||||
u_int axq_fifo_depth; /* depth of FIFO frames */
|
||||
u_int axq_intrcnt; /* interrupt count */
|
||||
u_int32_t *axq_link; /* link ptr in last TX desc */
|
||||
TAILQ_HEAD(axq_q_s, ath_buf) axq_q; /* transmit queue */
|
||||
struct mtx axq_lock; /* lock on q and link */
|
||||
|
||||
/*
|
||||
* This is the FIFO staging buffer when doing EDMA.
|
||||
*
|
||||
* For legacy chips, we just push the head pointer to
|
||||
* the hardware and we ignore this list.
|
||||
*
|
||||
* For EDMA, the staging buffer is treated as normal;
|
||||
* when it's time to push a list of frames to the hardware
|
||||
* we move that list here and we stamp buffers with
|
||||
* flags to identify the beginning/end of that particular
|
||||
* FIFO entry.
|
||||
*/
|
||||
struct {
|
||||
TAILQ_HEAD(axq_q_f_s, ath_buf) axq_q;
|
||||
u_int axq_depth;
|
||||
} fifo;
|
||||
u_int axq_fifo_depth; /* depth of FIFO frames */
|
||||
|
||||
/*
|
||||
* XXX the holdingbf field is protected by the TXBUF lock
|
||||
* for now, NOT the TXQ lock.
|
||||
|
Loading…
x
Reference in New Issue
Block a user