Implement my first cut at "correct" node power-save and
PS-POLL support. This implements PS-POLL awareness i nthe * Implement frame "leaking", which allows for a software queue to be scheduled even though it's asleep * Track whether a frame has been leaked or not * Leak out a single non-AMPDU frame when transmitting aggregates * Queue BAR frames if the node is asleep * Direct-dispatch the rest of control and management frames. This allows for things like re-association to occur (which involves sending probe req/resp as well as assoc request/response) when the node is asleep and then tries reassociating. * Limit how many frames can set in the software node queue whilst the node is asleep. net80211 is already buffering frames for us so this is mostly just paranoia. * Add a PS-POLL method which leaks out a frame if there's something in the software queue, else it calls net80211's ps-poll routine. Since the ath PS-POLL routine marks the node as having a single frame to leak, either a software queued frame would leak, OR the next queued frame would leak. The next queued frame could be something from the net80211 power save queue, OR it could be a NULL frame from net80211. TODO: * Don't transmit further BAR frames (eg via a timeout) if the node is currently asleep. Otherwise we may end up exhausting management frames due to the lots of queued BAR frames. I may just undo this bit later on and direct-dispatch BAR frames even if the node is asleep. * It would be nice to burst out a single A-MPDU frame if both ends support this. I may end adding a FreeBSD IE soon to negotiate this power save behaviour. * I should make STAs timeout of power save mode if they've been in power save for more than a handful of seconds. This way cards that get "stuck" in power save mode don't stay there for the "inactivity" timeout in net80211. * Move the queue depth check into the driver layer (ath_start / ath_transmit) rather than doing it in the TX path. * There could be some naughty corner cases with ps-poll leaking. Specifically, if net80211 generates a NULL data frame whilst another transmitter sends a normal data frame out net80211 output / transmit, we need to ensure that the NULL data frame goes out first. This is one of those things that should occur inside the VAP/ic TX lock. Grr, more investigations to do.. Tested: * STA: AR5416, AR9280 * AP: AR5416, AR9280, AR9160
This commit is contained in:
parent
d8aa05d972
commit
c059ecd485
@ -125,7 +125,7 @@ __FBSDID("$FreeBSD$");
|
||||
/*
|
||||
* Only enable this if you're working on PS-POLL support.
|
||||
*/
|
||||
#undef ATH_SW_PSQ
|
||||
#define ATH_SW_PSQ
|
||||
|
||||
/*
|
||||
* ATH_BCBUF determines the number of vap's that can transmit
|
||||
@ -212,6 +212,7 @@ static void ath_announce(struct ath_softc *);
|
||||
static void ath_dfs_tasklet(void *, int);
|
||||
static void ath_node_powersave(struct ieee80211_node *, int);
|
||||
static int ath_node_set_tim(struct ieee80211_node *, int);
|
||||
static void ath_node_recv_pspoll(struct ieee80211_node *, struct mbuf *);
|
||||
|
||||
#ifdef IEEE80211_SUPPORT_TDMA
|
||||
#include <dev/ath/if_ath_tdma.h>
|
||||
@ -694,6 +695,11 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
|
||||
*/
|
||||
sc->sc_txq_mcastq_maxdepth = ath_txbuf;
|
||||
|
||||
/*
|
||||
* How deep can the node software TX queue get whilst it's asleep.
|
||||
*/
|
||||
sc->sc_txq_node_psq_maxdepth = 16;
|
||||
|
||||
/*
|
||||
* Default the maximum queue depth for a given node
|
||||
* to 1/4'th the TX buffers, or 64, whichever
|
||||
@ -1248,6 +1254,9 @@ ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
|
||||
avp->av_set_tim = vap->iv_set_tim;
|
||||
vap->iv_set_tim = ath_node_set_tim;
|
||||
|
||||
avp->av_recv_pspoll = vap->iv_recv_pspoll;
|
||||
vap->iv_recv_pspoll = ath_node_recv_pspoll;
|
||||
|
||||
/* Set default parameters */
|
||||
|
||||
/*
|
||||
@ -6169,9 +6178,11 @@ ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
an->an_tim_set == 1 &&
|
||||
an->an_swq_depth == 0) {
|
||||
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
|
||||
"%s: an=%p, swq_depth=0, tim_set=1, psq_set=0,"
|
||||
"%s: %6D: swq_depth=0, tim_set=1, psq_set=0,"
|
||||
" clear!\n",
|
||||
__func__, an);
|
||||
__func__,
|
||||
ni->ni_macaddr,
|
||||
":");
|
||||
an->an_tim_set = 0;
|
||||
(void) avp->av_set_tim(ni, 0);
|
||||
}
|
||||
@ -6181,6 +6192,151 @@ ath_tx_update_tim(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
#endif /* ATH_SW_PSQ */
|
||||
}
|
||||
|
||||
/*
|
||||
* Received a ps-poll frame from net80211.
|
||||
*
|
||||
* Here we get a chance to serve out a software-queued frame ourselves
|
||||
* before we punt it to net80211 to transmit us one itself - either
|
||||
* because there's traffic in the net80211 psq, or a NULL frame to
|
||||
* indicate there's nothing else.
|
||||
*/
|
||||
static void
|
||||
ath_node_recv_pspoll(struct ieee80211_node *ni, struct mbuf *m)
|
||||
{
|
||||
#ifdef ATH_SW_PSQ
|
||||
struct ath_node *an;
|
||||
struct ath_vap *avp;
|
||||
struct ieee80211com *ic = ni->ni_ic;
|
||||
struct ath_softc *sc = ic->ic_ifp->if_softc;
|
||||
int tid;
|
||||
|
||||
/* Just paranoia */
|
||||
if (ni == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Unassociated (temporary node) station.
|
||||
*/
|
||||
if (ni->ni_associd == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* We do have an active node, so let's begin looking into it.
|
||||
*/
|
||||
an = ATH_NODE(ni);
|
||||
avp = ATH_VAP(ni->ni_vap);
|
||||
|
||||
/*
|
||||
* For now, we just call the original ps-poll method.
|
||||
* Once we're ready to flip this on:
|
||||
*
|
||||
* + Set leak to 1, as no matter what we're going to have
|
||||
* to send a frame;
|
||||
* + Check the software queue and if there's something in it,
|
||||
* schedule the highest TID thas has traffic from this node.
|
||||
* Then make sure we schedule the software scheduler to
|
||||
* run so it picks up said frame.
|
||||
*
|
||||
* That way whatever happens, we'll at least send _a_ frame
|
||||
* to the given node.
|
||||
*
|
||||
* Again, yes, it's crappy QoS if the node has multiple
|
||||
* TIDs worth of traffic - but let's get it working first
|
||||
* before we optimise it.
|
||||
*
|
||||
* Also yes, there's definitely latency here - we're not
|
||||
* direct dispatching to the hardware in this path (and
|
||||
* we're likely being called from the packet receive path,
|
||||
* so going back into TX may be a little hairy!) but again
|
||||
* I'd like to get this working first before optimising
|
||||
* turn-around time.
|
||||
*/
|
||||
|
||||
ATH_TX_LOCK(sc);
|
||||
|
||||
/*
|
||||
* Legacy - we're called and the node isn't asleep.
|
||||
* Immediately punt.
|
||||
*/
|
||||
if (! an->an_is_powersave) {
|
||||
device_printf(sc->sc_dev,
|
||||
"%s: %6D: not in powersave?\n",
|
||||
__func__,
|
||||
ni->ni_macaddr,
|
||||
":");
|
||||
ATH_TX_UNLOCK(sc);
|
||||
avp->av_recv_pspoll(ni, m);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're in powersave.
|
||||
*
|
||||
* Leak a frame.
|
||||
*/
|
||||
an->an_leak_count = 1;
|
||||
|
||||
/*
|
||||
* Now, if there's no frames in the node, just punt to
|
||||
* recv_pspoll.
|
||||
*
|
||||
* Don't bother checking if the TIM bit is set, we really
|
||||
* only care if there are any frames here!
|
||||
*/
|
||||
if (an->an_swq_depth == 0) {
|
||||
ATH_TX_UNLOCK(sc);
|
||||
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
|
||||
"%s: %6D: SWQ empty; punting to net80211\n",
|
||||
__func__,
|
||||
ni->ni_macaddr,
|
||||
":");
|
||||
avp->av_recv_pspoll(ni, m);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, let's schedule the highest TID that has traffic
|
||||
* and then schedule something.
|
||||
*/
|
||||
for (tid = IEEE80211_TID_SIZE - 1; tid >= 0; tid--) {
|
||||
struct ath_tid *atid = &an->an_tid[tid];
|
||||
/*
|
||||
* No frames? Skip.
|
||||
*/
|
||||
if (atid->axq_depth == 0)
|
||||
continue;
|
||||
ath_tx_tid_sched(sc, atid);
|
||||
/*
|
||||
* XXX we could do a direct call to the TXQ
|
||||
* scheduler code here to optimise latency
|
||||
* at the expense of a REALLY deep callstack.
|
||||
*/
|
||||
ATH_TX_UNLOCK(sc);
|
||||
taskqueue_enqueue(sc->sc_tq, &sc->sc_txqtask);
|
||||
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
|
||||
"%s: %6D: leaking frame to TID %d\n",
|
||||
__func__,
|
||||
ni->ni_macaddr,
|
||||
":",
|
||||
tid);
|
||||
return;
|
||||
}
|
||||
|
||||
ATH_TX_UNLOCK(sc);
|
||||
|
||||
/*
|
||||
* XXX nothing in the TIDs at this point? Eek.
|
||||
*/
|
||||
device_printf(sc->sc_dev, "%s: %6D: TIDs empty, but ath_node showed traffic?!\n",
|
||||
__func__,
|
||||
ni->ni_macaddr,
|
||||
":");
|
||||
avp->av_recv_pspoll(ni, m);
|
||||
#else
|
||||
avp->av_recv_pspoll(ni, m);
|
||||
#endif /* ATH_SW_PSQ */
|
||||
}
|
||||
|
||||
MODULE_VERSION(if_ath, 1);
|
||||
MODULE_DEPEND(if_ath, wlan, 1, 1, 1); /* 802.11 media layer */
|
||||
#if defined(IEEE80211_ALQ) || defined(AH_DEBUG_ALQ)
|
||||
|
@ -1400,6 +1400,69 @@ ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return whether this frame should be software queued or
|
||||
* direct dispatched.
|
||||
*
|
||||
* When doing powersave, BAR frames should be queued but other management
|
||||
* frames should be directly sent.
|
||||
*
|
||||
* When not doing powersave, stick BAR frames into the hardware queue
|
||||
* so it goes out even though the queue is paused.
|
||||
*
|
||||
* For now, management frames are also software queued by default.
|
||||
*/
|
||||
static int
|
||||
ath_tx_should_swq_frame(struct ath_softc *sc, struct ath_node *an,
|
||||
struct mbuf *m0, int *queue_to_head)
|
||||
{
|
||||
struct ieee80211_node *ni = &an->an_node;
|
||||
struct ieee80211_frame *wh;
|
||||
uint8_t type, subtype;
|
||||
|
||||
wh = mtod(m0, struct ieee80211_frame *);
|
||||
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
|
||||
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
|
||||
|
||||
(*queue_to_head) = 0;
|
||||
|
||||
/* If it's not in powersave - direct-dispatch BAR */
|
||||
if ((ATH_NODE(ni)->an_is_powersave == 0)
|
||||
&& type == IEEE80211_FC0_TYPE_CTL &&
|
||||
subtype == IEEE80211_FC0_SUBTYPE_BAR) {
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX,
|
||||
"%s: BAR: TX'ing direct\n", __func__);
|
||||
return (0);
|
||||
} else if ((ATH_NODE(ni)->an_is_powersave == 1)
|
||||
&& type == IEEE80211_FC0_TYPE_CTL &&
|
||||
subtype == IEEE80211_FC0_SUBTYPE_BAR) {
|
||||
/* BAR TX whilst asleep; queue */
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX,
|
||||
"%s: swq: TX'ing\n", __func__);
|
||||
(*queue_to_head) = 1;
|
||||
return (1);
|
||||
} else if ((ATH_NODE(ni)->an_is_powersave == 1)
|
||||
&& (type == IEEE80211_FC0_TYPE_MGT ||
|
||||
type == IEEE80211_FC0_TYPE_CTL)) {
|
||||
/*
|
||||
* Other control/mgmt frame; bypass software queuing
|
||||
* for now!
|
||||
*/
|
||||
device_printf(sc->sc_dev,
|
||||
"%s: %6D: Node is asleep; sending mgmt "
|
||||
"(type=%d, subtype=%d)\n",
|
||||
__func__,
|
||||
ni->ni_macaddr,
|
||||
":",
|
||||
type,
|
||||
subtype);
|
||||
return (0);
|
||||
} else {
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Transmit the given frame to the hardware.
|
||||
*
|
||||
@ -1410,6 +1473,10 @@ ath_tx_update_clrdmask(struct ath_softc *sc, struct ath_tid *tid,
|
||||
* it for this long when not doing software aggregation), later on
|
||||
* break this function into "setup_normal" and "xmit_normal". The
|
||||
* lock only needs to be held for the ath_tx_handoff call.
|
||||
*
|
||||
* XXX we don't update the leak count here - if we're doing
|
||||
* direct frame dispatch, we need to be able to do it without
|
||||
* decrementing the leak count (eg multicast queue frames.)
|
||||
*/
|
||||
static void
|
||||
ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||
@ -1786,6 +1853,7 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
int is_ampdu, is_ampdu_tx, is_ampdu_pending;
|
||||
ieee80211_seq seqno;
|
||||
uint8_t type, subtype;
|
||||
int queue_to_head;
|
||||
|
||||
ATH_TX_LOCK_ASSERT(sc);
|
||||
|
||||
@ -1826,6 +1894,32 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Enforce how deep the unicast queue can grow.
|
||||
*
|
||||
* If the node is in power save then we don't want
|
||||
* the software queue to grow too deep, or a node may
|
||||
* end up consuming all of the ath_buf entries.
|
||||
*
|
||||
* For now, only do this for DATA frames.
|
||||
*
|
||||
* We will want to cap how many management/control
|
||||
* frames get punted to the software queue so it doesn't
|
||||
* fill up. But the correct solution isn't yet obvious.
|
||||
* In any case, this check should at least let frames pass
|
||||
* that we are direct-dispatching.
|
||||
*
|
||||
* XXX TODO: duplicate this to the raw xmit path!
|
||||
*/
|
||||
if (type == IEEE80211_FC0_TYPE_DATA &&
|
||||
ATH_NODE(ni)->an_is_powersave &&
|
||||
ATH_NODE(ni)->an_swq_depth >
|
||||
sc->sc_txq_node_psq_maxdepth) {
|
||||
sc->sc_stats.ast_tx_node_psq_overflow++;
|
||||
m_freem(m0);
|
||||
return (ENOBUFS);
|
||||
}
|
||||
|
||||
/* A-MPDU TX */
|
||||
is_ampdu_tx = ath_tx_ampdu_running(sc, ATH_NODE(ni), tid);
|
||||
is_ampdu_pending = ath_tx_ampdu_pending(sc, ATH_NODE(ni), tid);
|
||||
@ -1924,22 +2018,26 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
* either been TXed successfully or max retries has been
|
||||
* reached.)
|
||||
*/
|
||||
/*
|
||||
* Until things are better debugged - if this node is asleep
|
||||
* and we're sending it a non-BAR frame, direct dispatch it.
|
||||
* Why? Because we need to figure out what's actually being
|
||||
* sent - eg, during reassociation/reauthentication after
|
||||
* the node (last) disappeared whilst asleep, the driver should
|
||||
* have unpaused/unsleep'ed the node. So until that is
|
||||
* sorted out, use this workaround.
|
||||
*/
|
||||
if (txq == &avp->av_mcastq) {
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX,
|
||||
"%s: bf=%p: mcastq: TX'ing\n", __func__, bf);
|
||||
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
|
||||
ath_tx_xmit_normal(sc, txq, bf);
|
||||
} else if (type == IEEE80211_FC0_TYPE_CTL &&
|
||||
subtype == IEEE80211_FC0_SUBTYPE_BAR) {
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX,
|
||||
"%s: BAR: TX'ing direct\n", __func__);
|
||||
} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
|
||||
&queue_to_head)) {
|
||||
ath_tx_swq(sc, ni, txq, queue_to_head, bf);
|
||||
} else {
|
||||
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
|
||||
ath_tx_xmit_normal(sc, txq, bf);
|
||||
} else {
|
||||
/* add to software queue */
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX,
|
||||
"%s: bf=%p: swq: TX'ing\n", __func__, bf);
|
||||
ath_tx_swq(sc, ni, txq, bf);
|
||||
}
|
||||
#else
|
||||
/*
|
||||
@ -1947,6 +2045,12 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
* direct-dispatch to the hardware.
|
||||
*/
|
||||
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
|
||||
/*
|
||||
* Update the current leak count if
|
||||
* we're leaking frames; and set the
|
||||
* MORE flag as appropriate.
|
||||
*/
|
||||
ath_tx_leak_count_update(sc, tid, bf);
|
||||
ath_tx_xmit_normal(sc, txq, bf);
|
||||
#endif
|
||||
done:
|
||||
@ -1973,6 +2077,8 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
u_int pri;
|
||||
int o_tid = -1;
|
||||
int do_override;
|
||||
uint8_t type, subtype;
|
||||
int queue_to_head;
|
||||
|
||||
ATH_TX_LOCK_ASSERT(sc);
|
||||
|
||||
@ -1986,6 +2092,9 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
/* XXX honor IEEE80211_BPF_DATAPAD */
|
||||
pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN;
|
||||
|
||||
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
|
||||
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
|
||||
|
||||
ATH_KTR(sc, ATH_KTR_TX, 2,
|
||||
"ath_tx_raw_start: ni=%p, bf=%p, raw", ni, bf);
|
||||
|
||||
@ -2162,16 +2271,35 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
__func__, do_override);
|
||||
|
||||
#if 1
|
||||
/*
|
||||
* Put addba frames in the right place in the right TID/HWQ.
|
||||
*/
|
||||
if (do_override) {
|
||||
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
|
||||
/*
|
||||
* XXX if it's addba frames, should we be leaking
|
||||
* them out via the frame leak method?
|
||||
* XXX for now let's not risk it; but we may wish
|
||||
* to investigate this later.
|
||||
*/
|
||||
ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
|
||||
} else {
|
||||
} else if (ath_tx_should_swq_frame(sc, ATH_NODE(ni), m0,
|
||||
&queue_to_head)) {
|
||||
/* Queue to software queue */
|
||||
ath_tx_swq(sc, ni, sc->sc_ac2q[pri], bf);
|
||||
ath_tx_swq(sc, ni, sc->sc_ac2q[pri], queue_to_head, bf);
|
||||
} else {
|
||||
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
|
||||
ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
|
||||
}
|
||||
#else
|
||||
/* Direct-dispatch to the hardware */
|
||||
bf->bf_state.bfs_txflags |= HAL_TXDESC_CLRDMASK;
|
||||
/*
|
||||
* Update the current leak count if
|
||||
* we're leaking frames; and set the
|
||||
* MORE flag as appropriate.
|
||||
*/
|
||||
ath_tx_leak_count_update(sc, tid, bf);
|
||||
ath_tx_xmit_normal(sc, sc->sc_ac2q[pri], bf);
|
||||
#endif
|
||||
return 0;
|
||||
@ -2603,6 +2731,60 @@ ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
|
||||
__func__, tap->txa_start, tap->txa_wnd, tid->baw_head);
|
||||
}
|
||||
|
||||
static void
|
||||
ath_tx_leak_count_update(struct ath_softc *sc, struct ath_tid *tid,
|
||||
struct ath_buf *bf)
|
||||
{
|
||||
struct ieee80211_frame *wh;
|
||||
|
||||
ATH_TX_LOCK_ASSERT(sc);
|
||||
|
||||
if (tid->an->an_leak_count > 0) {
|
||||
wh = mtod(bf->bf_m, struct ieee80211_frame *);
|
||||
|
||||
/*
|
||||
* Update MORE based on the software/net80211 queue states.
|
||||
*/
|
||||
if ((tid->an->an_stack_psq > 0)
|
||||
|| (tid->an->an_swq_depth > 0))
|
||||
wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA;
|
||||
else
|
||||
wh->i_fc[1] &= ~IEEE80211_FC1_MORE_DATA;
|
||||
|
||||
DPRINTF(sc, ATH_DEBUG_NODE_PWRSAVE,
|
||||
"%s: %6D: leak count = %d, psq=%d, swq=%d, MORE=%d\n",
|
||||
__func__,
|
||||
tid->an->an_node.ni_macaddr,
|
||||
":",
|
||||
tid->an->an_leak_count,
|
||||
tid->an->an_stack_psq,
|
||||
tid->an->an_swq_depth,
|
||||
!! (wh->i_fc[1] & IEEE80211_FC1_MORE_DATA));
|
||||
|
||||
/*
|
||||
* Re-sync the underlying buffer.
|
||||
*/
|
||||
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
|
||||
BUS_DMASYNC_PREWRITE);
|
||||
|
||||
tid->an->an_leak_count --;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
ath_tx_tid_can_tx_or_sched(struct ath_softc *sc, struct ath_tid *tid)
|
||||
{
|
||||
|
||||
ATH_TX_LOCK_ASSERT(sc);
|
||||
|
||||
if (tid->an->an_leak_count > 0) {
|
||||
return (1);
|
||||
}
|
||||
if (tid->paused)
|
||||
return (0);
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the current node/TID as ready to TX.
|
||||
*
|
||||
@ -2611,14 +2793,19 @@ ath_tx_update_baw(struct ath_softc *sc, struct ath_node *an,
|
||||
*
|
||||
* The TXQ lock must be held.
|
||||
*/
|
||||
static void
|
||||
void
|
||||
ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
|
||||
{
|
||||
struct ath_txq *txq = sc->sc_ac2q[tid->ac];
|
||||
|
||||
ATH_TX_LOCK_ASSERT(sc);
|
||||
|
||||
if (tid->paused)
|
||||
/*
|
||||
* If we are leaking out a frame to this destination
|
||||
* for PS-POLL, ensure that we allow scheduling to
|
||||
* occur.
|
||||
*/
|
||||
if (! ath_tx_tid_can_tx_or_sched(sc, tid))
|
||||
return; /* paused, can't schedule yet */
|
||||
|
||||
if (tid->sched)
|
||||
@ -2626,6 +2813,30 @@ ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid)
|
||||
|
||||
tid->sched = 1;
|
||||
|
||||
#if 0
|
||||
/*
|
||||
* If this is a sleeping node we're leaking to, given
|
||||
* it a higher priority. This is so bad for QoS it hurts.
|
||||
*/
|
||||
if (tid->an->an_leak_count) {
|
||||
TAILQ_INSERT_HEAD(&txq->axq_tidq, tid, axq_qelem);
|
||||
} else {
|
||||
TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We can't do the above - it'll confuse the TXQ software
|
||||
* scheduler which will keep checking the _head_ TID
|
||||
* in the list to see if it has traffic. If we queue
|
||||
* a TID to the head of the list and it doesn't transmit,
|
||||
* we'll check it again.
|
||||
*
|
||||
* So, get the rest of this leaking frames support working
|
||||
* and reliable first and _then_ optimise it so they're
|
||||
* pushed out in front of any other pending software
|
||||
* queued nodes.
|
||||
*/
|
||||
TAILQ_INSERT_TAIL(&txq->axq_tidq, tid, axq_qelem);
|
||||
}
|
||||
|
||||
@ -2722,7 +2933,7 @@ ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
|
||||
tap = ath_tx_get_tx_tid(an, tid->tid);
|
||||
|
||||
/* paused? queue */
|
||||
if (tid->paused) {
|
||||
if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
|
||||
ATH_TID_INSERT_HEAD(tid, bf, bf_list);
|
||||
/* XXX don't sched - we're paused! */
|
||||
return;
|
||||
@ -2782,6 +2993,13 @@ ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
|
||||
/* Set completion handler, multi-frame aggregate or not */
|
||||
bf->bf_comp = ath_tx_aggr_comp;
|
||||
|
||||
/*
|
||||
* Update the current leak count if
|
||||
* we're leaking frames; and set the
|
||||
* MORE flag as appropriate.
|
||||
*/
|
||||
ath_tx_leak_count_update(sc, tid, bf);
|
||||
|
||||
/* Hand off to hardware */
|
||||
ath_tx_handoff(sc, txq, bf);
|
||||
}
|
||||
@ -2793,8 +3011,8 @@ ath_tx_xmit_aggr(struct ath_softc *sc, struct ath_node *an,
|
||||
* relevant software queue.
|
||||
*/
|
||||
void
|
||||
ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq,
|
||||
struct ath_buf *bf)
|
||||
ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
struct ath_txq *txq, int queue_to_head, struct ath_buf *bf)
|
||||
{
|
||||
struct ath_node *an = ATH_NODE(ni);
|
||||
struct ieee80211_frame *wh;
|
||||
@ -2824,11 +3042,21 @@ ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq,
|
||||
* If the hardware queue is busy, queue it.
|
||||
* If the TID is paused or the traffic it outside BAW, software
|
||||
* queue it.
|
||||
*
|
||||
* If the node is in power-save and we're leaking a frame,
|
||||
* leak a single frame.
|
||||
*/
|
||||
if (atid->paused) {
|
||||
if (! ath_tx_tid_can_tx_or_sched(sc, atid)) {
|
||||
/* TID is paused, queue */
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: paused\n", __func__);
|
||||
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
|
||||
/*
|
||||
* If the caller requested that it be sent at a high
|
||||
* priority, queue it at the head of the list.
|
||||
*/
|
||||
if (queue_to_head)
|
||||
ATH_TID_INSERT_HEAD(atid, bf, bf_list);
|
||||
else
|
||||
ATH_TID_INSERT_TAIL(atid, bf, bf_list);
|
||||
} else if (ath_tx_ampdu_pending(sc, an, tid)) {
|
||||
/* AMPDU pending; queue */
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: pending\n", __func__);
|
||||
@ -2878,6 +3106,17 @@ ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_txq *txq,
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: xmit_normal\n", __func__);
|
||||
/* See if clrdmask needs to be set */
|
||||
ath_tx_update_clrdmask(sc, atid, bf);
|
||||
|
||||
/*
|
||||
* Update the current leak count if
|
||||
* we're leaking frames; and set the
|
||||
* MORE flag as appropriate.
|
||||
*/
|
||||
ath_tx_leak_count_update(sc, atid, bf);
|
||||
|
||||
/*
|
||||
* Dispatch the frame.
|
||||
*/
|
||||
ath_tx_xmit_normal(sc, txq, bf);
|
||||
} else {
|
||||
/* Busy; queue */
|
||||
@ -3646,6 +3885,7 @@ ath_tx_tid_reset(struct ath_softc *sc, struct ath_tid *tid)
|
||||
* do a complete hard reset of state here - no pause, no
|
||||
* complete counter, etc.
|
||||
*/
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3670,7 +3910,7 @@ ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
|
||||
ATH_TX_LOCK(sc);
|
||||
DPRINTF(sc, ATH_DEBUG_NODE,
|
||||
"%s: %6D: flush; is_powersave=%d, stack_psq=%d, tim=%d, "
|
||||
"swq_depth=%d, clrdmask=%d\n",
|
||||
"swq_depth=%d, clrdmask=%d, leak_count=%d\n",
|
||||
__func__,
|
||||
an->an_node.ni_macaddr,
|
||||
":",
|
||||
@ -3678,18 +3918,26 @@ ath_tx_node_flush(struct ath_softc *sc, struct ath_node *an)
|
||||
an->an_stack_psq,
|
||||
an->an_tim_set,
|
||||
an->an_swq_depth,
|
||||
an->clrdmask);
|
||||
an->clrdmask,
|
||||
an->an_leak_count);
|
||||
|
||||
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
|
||||
struct ath_tid *atid = &an->an_tid[tid];
|
||||
|
||||
/* Free packets */
|
||||
ath_tx_tid_drain(sc, an, atid, &bf_cq);
|
||||
|
||||
/* Remove this tid from the list of active tids */
|
||||
ath_tx_tid_unsched(sc, atid);
|
||||
|
||||
/* Reset the per-TID pause, BAR, etc state */
|
||||
ath_tx_tid_reset(sc, atid);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear global leak count
|
||||
*/
|
||||
an->an_leak_count = 0;
|
||||
ATH_TX_UNLOCK(sc);
|
||||
|
||||
/* Handle completed frames */
|
||||
@ -4860,6 +5108,11 @@ ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d\n", __func__, tid->tid);
|
||||
ATH_TX_LOCK_ASSERT(sc);
|
||||
|
||||
/*
|
||||
* XXX TODO: If we're called for a queue that we're leaking frames to,
|
||||
* ensure we only leak one.
|
||||
*/
|
||||
|
||||
tap = ath_tx_get_tx_tid(an, tid->tid);
|
||||
|
||||
if (tid->tid == IEEE80211_NONQOS_TID)
|
||||
@ -4877,7 +5130,7 @@ ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
|
||||
* of packet loss; but as its serialised with this code,
|
||||
* it won't "appear" half way through queuing packets.
|
||||
*/
|
||||
if (tid->paused)
|
||||
if (! ath_tx_tid_can_tx_or_sched(sc, tid))
|
||||
break;
|
||||
|
||||
bf = ATH_TID_FIRST(tid);
|
||||
@ -5029,6 +5282,14 @@ ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
|
||||
if (bf->bf_state.bfs_tid == IEEE80211_NONQOS_TID)
|
||||
device_printf(sc->sc_dev, "%s: TID=16?\n", __func__);
|
||||
|
||||
/*
|
||||
* Update leak count and frame config if were leaking frames.
|
||||
*
|
||||
* XXX TODO: it should update all frames in an aggregate
|
||||
* correctly!
|
||||
*/
|
||||
ath_tx_leak_count_update(sc, tid, bf);
|
||||
|
||||
/* Punt to txq */
|
||||
ath_tx_handoff(sc, txq, bf);
|
||||
|
||||
@ -5044,7 +5305,8 @@ ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
|
||||
* XXX locking on txq here?
|
||||
*/
|
||||
if (txq->axq_aggr_depth >= sc->sc_hwq_limit ||
|
||||
status == ATH_AGGR_BAW_CLOSED)
|
||||
(status == ATH_AGGR_BAW_CLOSED ||
|
||||
status == ATH_AGGR_LEAK_CLOSED))
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -5077,8 +5339,11 @@ ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
|
||||
/*
|
||||
* If the upper layers have paused the TID, don't
|
||||
* queue any further packets.
|
||||
*
|
||||
* XXX if we are leaking frames, make sure we decrement
|
||||
* that counter _and_ we continue here.
|
||||
*/
|
||||
if (tid->paused)
|
||||
if (! ath_tx_tid_can_tx_or_sched(sc, tid))
|
||||
break;
|
||||
|
||||
bf = ATH_TID_FIRST(tid);
|
||||
@ -5114,6 +5379,13 @@ ath_tx_tid_hw_queue_norm(struct ath_softc *sc, struct ath_node *an,
|
||||
ath_tx_rate_fill_rcflags(sc, bf);
|
||||
ath_tx_setds(sc, bf);
|
||||
|
||||
/*
|
||||
* Update the current leak count if
|
||||
* we're leaking frames; and set the
|
||||
* MORE flag as appropriate.
|
||||
*/
|
||||
ath_tx_leak_count_update(sc, tid, bf);
|
||||
|
||||
/* Track outstanding buffer count to hardware */
|
||||
/* aggregates are "one" buffer */
|
||||
tid->hwq_depth++;
|
||||
@ -5161,7 +5433,11 @@ ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
|
||||
DPRINTF(sc, ATH_DEBUG_SW_TX, "%s: tid=%d, paused=%d\n",
|
||||
__func__, tid->tid, tid->paused);
|
||||
ath_tx_tid_unsched(sc, tid);
|
||||
if (tid->paused) {
|
||||
/*
|
||||
* This node may be in power-save and we're leaking
|
||||
* a frame; be careful.
|
||||
*/
|
||||
if (! ath_tx_tid_can_tx_or_sched(sc, tid)) {
|
||||
continue;
|
||||
}
|
||||
if (ath_tx_ampdu_running(sc, tid->an, tid->tid))
|
||||
@ -5182,6 +5458,11 @@ ath_txq_sched(struct ath_softc *sc, struct ath_txq *txq)
|
||||
* If this was the last entry on the original list, stop.
|
||||
* Otherwise nodes that have been rescheduled onto the end
|
||||
* of the TID FIFO list will just keep being rescheduled.
|
||||
*
|
||||
* XXX What should we do about nodes that were paused
|
||||
* but are pending a leaking frame in response to a ps-poll?
|
||||
* They'll be put at the front of the list; so they'll
|
||||
* prematurely trigger this condition! Ew.
|
||||
*/
|
||||
if (tid == last)
|
||||
break;
|
||||
@ -5433,6 +5714,7 @@ ath_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
|
||||
TAILQ_REMOVE(&bf_cq, bf, bf_list);
|
||||
ath_tx_default_comp(sc, bf, 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5604,33 +5886,19 @@ ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
|
||||
|
||||
ATH_TX_UNLOCK_ASSERT(sc);
|
||||
|
||||
/*
|
||||
* It's possible that a parallel call to ath_tx_node_wakeup()
|
||||
* will unpause these queues.
|
||||
*
|
||||
* The node lock can't just be grabbed here, as there's places
|
||||
* in the driver where the node lock is grabbed _within_ a
|
||||
* TXQ lock.
|
||||
* So, we do this delicately and unwind state if needed.
|
||||
*
|
||||
* + Pause all the queues
|
||||
* + Grab the node lock
|
||||
* + If the queue is already asleep, unpause and quit
|
||||
* + else just mark as asleep.
|
||||
*
|
||||
* A parallel sleep() call will just pause and then
|
||||
* find they're already paused, so undo it.
|
||||
*
|
||||
* A parallel wakeup() call will check if asleep is 1
|
||||
* and if it's not (ie, it's 0), it'll treat it as already
|
||||
* being awake. If it's 1, it'll mark it as 0 and then
|
||||
* unpause everything.
|
||||
*
|
||||
* (Talk about a delicate hack.)
|
||||
*/
|
||||
|
||||
/* Suspend all traffic on the node */
|
||||
ATH_TX_LOCK(sc);
|
||||
|
||||
if (an->an_is_powersave) {
|
||||
device_printf(sc->sc_dev,
|
||||
"%s: %6D: node was already asleep!\n",
|
||||
__func__,
|
||||
an->an_node.ni_macaddr,
|
||||
":");
|
||||
ATH_TX_UNLOCK(sc);
|
||||
return;
|
||||
}
|
||||
|
||||
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
|
||||
atid = &an->an_tid[tid];
|
||||
txq = sc->sc_ac2q[atid->ac];
|
||||
@ -5638,21 +5906,6 @@ ath_tx_node_sleep(struct ath_softc *sc, struct ath_node *an)
|
||||
ath_tx_tid_pause(sc, atid);
|
||||
}
|
||||
|
||||
/* In case of concurrency races from net80211.. */
|
||||
if (an->an_is_powersave == 1) {
|
||||
device_printf(sc->sc_dev,
|
||||
"%s: an=%p: node was already asleep\n",
|
||||
__func__, an);
|
||||
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
|
||||
atid = &an->an_tid[tid];
|
||||
txq = sc->sc_ac2q[atid->ac];
|
||||
|
||||
ath_tx_tid_resume(sc, atid);
|
||||
}
|
||||
ATH_TX_UNLOCK(sc);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Mark node as in powersaving */
|
||||
an->an_is_powersave = 1;
|
||||
|
||||
@ -5674,7 +5927,7 @@ ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
|
||||
|
||||
ATH_TX_LOCK(sc);
|
||||
|
||||
/* In case of concurrency races from net80211.. */
|
||||
/* !? */
|
||||
if (an->an_is_powersave == 0) {
|
||||
ATH_TX_UNLOCK(sc);
|
||||
device_printf(sc->sc_dev,
|
||||
@ -5685,6 +5938,10 @@ ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an)
|
||||
|
||||
/* Mark node as awake */
|
||||
an->an_is_powersave = 0;
|
||||
/*
|
||||
* Clear any pending leaked frame requests
|
||||
*/
|
||||
an->an_leak_count = 0;
|
||||
|
||||
for (tid = 0; tid < IEEE80211_TID_SIZE; tid++) {
|
||||
atid = &an->an_tid[tid];
|
||||
|
@ -98,7 +98,7 @@ extern int ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
|
||||
|
||||
/* software queue stuff */
|
||||
extern void ath_tx_swq(struct ath_softc *sc, struct ieee80211_node *ni,
|
||||
struct ath_txq *txq, struct ath_buf *bf);
|
||||
struct ath_txq *txq, int queue_to_head, struct ath_buf *bf);
|
||||
extern void ath_tx_tid_init(struct ath_softc *sc, struct ath_node *an);
|
||||
extern void ath_tx_tid_hw_queue_aggr(struct ath_softc *sc, struct ath_node *an,
|
||||
struct ath_tid *tid);
|
||||
@ -113,6 +113,7 @@ extern void ath_tx_addto_baw(struct ath_softc *sc, struct ath_node *an,
|
||||
struct ath_tid *tid, struct ath_buf *bf);
|
||||
extern struct ieee80211_tx_ampdu * ath_tx_get_tx_tid(struct ath_node *an,
|
||||
int tid);
|
||||
extern void ath_tx_tid_sched(struct ath_softc *sc, struct ath_tid *tid);
|
||||
|
||||
/* TX addba handling */
|
||||
extern int ath_addba_request(struct ieee80211_node *ni,
|
||||
@ -136,6 +137,11 @@ extern void ath_tx_node_wakeup(struct ath_softc *sc, struct ath_node *an);
|
||||
extern int ath_tx_node_is_asleep(struct ath_softc *sc, struct ath_node *an);
|
||||
extern void ath_tx_node_reassoc(struct ath_softc *sc, struct ath_node *an);
|
||||
|
||||
/*
|
||||
* Hardware queue stuff
|
||||
*/
|
||||
extern void ath_tx_push_pending(struct ath_softc *sc, struct ath_txq *txq);
|
||||
|
||||
/*
|
||||
* Misc debugging stuff
|
||||
*/
|
||||
|
@ -862,9 +862,14 @@ ath_tx_form_aggr(struct ath_softc *sc, struct ath_node *an,
|
||||
bf_prev = bf;
|
||||
|
||||
/*
|
||||
* XXX TODO: if any sub-frames have RTS/CTS enabled;
|
||||
* enable it for the entire aggregate.
|
||||
* If we're leaking frames, just return at this point;
|
||||
* we've queued a single frame and we don't want to add
|
||||
* any more.
|
||||
*/
|
||||
if (tid->an->an_leak_count) {
|
||||
status = ATH_AGGR_LEAK_CLOSED;
|
||||
break;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/*
|
||||
|
@ -46,6 +46,7 @@ typedef enum {
|
||||
ATH_AGGR_8K_LIMITED,
|
||||
ATH_AGGR_ERROR,
|
||||
ATH_AGGR_NONAGGR,
|
||||
ATH_AGGR_LEAK_CLOSED,
|
||||
} ATH_AGGR_STATUS;
|
||||
|
||||
extern int ath_max_4ms_framelen[4][32];
|
||||
|
@ -163,9 +163,10 @@ struct ath_stats {
|
||||
u_int32_t ast_tx_mcastq_overflow; /* multicast queue overflow */
|
||||
u_int32_t ast_rx_keymiss;
|
||||
u_int32_t ast_tx_swfiltered;
|
||||
u_int32_t ast_tx_node_psq_overflow;
|
||||
u_int32_t ast_rx_stbc; /* RX STBC frame */
|
||||
u_int32_t ast_tx_nodeq_overflow; /* node sw queue overflow */
|
||||
u_int32_t ast_pad[13];
|
||||
u_int32_t ast_pad[12];
|
||||
};
|
||||
|
||||
#define SIOCGATHSTATS _IOWR('i', 137, struct ifreq)
|
||||
|
@ -182,6 +182,7 @@ struct ath_node {
|
||||
uint32_t an_swq_depth; /* how many SWQ packets for this
|
||||
node */
|
||||
int clrdmask; /* has clrdmask been set */
|
||||
uint32_t an_leak_count; /* How many frames to leak during pause */
|
||||
/* variable-length rate control state follows */
|
||||
};
|
||||
#define ATH_NODE(ni) ((struct ath_node *)(ni))
|
||||
@ -465,6 +466,8 @@ struct ath_vap {
|
||||
void (*av_bmiss)(struct ieee80211vap *);
|
||||
void (*av_node_ps)(struct ieee80211_node *, int);
|
||||
int (*av_set_tim)(struct ieee80211_node *, int);
|
||||
void (*av_recv_pspoll)(struct ieee80211_node *,
|
||||
struct mbuf *);
|
||||
};
|
||||
#define ATH_VAP(vap) ((struct ath_vap *)(vap))
|
||||
|
||||
@ -794,6 +797,8 @@ struct ath_softc {
|
||||
* management/multicast frames;
|
||||
* + multicast frames overwhelming everything (when the
|
||||
* air is sufficiently busy that cabq can't drain.)
|
||||
* + A node in powersave shouldn't be allowed to exhaust
|
||||
* all available mbufs;
|
||||
*
|
||||
* These implement:
|
||||
* + data_minfree is the maximum number of free buffers
|
||||
@ -804,6 +809,7 @@ struct ath_softc {
|
||||
int sc_txq_node_maxdepth;
|
||||
int sc_txq_data_minfree;
|
||||
int sc_txq_mcastq_maxdepth;
|
||||
int sc_txq_node_psq_maxdepth;
|
||||
|
||||
/*
|
||||
* Aggregation twiddles
|
||||
|
Loading…
Reference in New Issue
Block a user