Hoist 802.11 encapsulation up into net80211:

o call ieee80211_encap in ieee80211_start so frames passed down to drivers
  are already encapsulated
o remove ieee80211_encap calls in drivers
o fixup wi so it recreates the 802.3 head it requires from the 802.11
  header contents
o move fast-frame aggregation from ath to net80211 (conditional on
  IEEE80211_SUPPORT_SUPERG):
  - aggregation is now done in ieee80211_start; it is enabled when the
    packets/sec exceeds ieee80211_ffppsmin (net.wlan.ffppsmin) and frames
    are held on a staging queue according to ieee80211_ffagemax
    (net.wlan.ffagemax) to wait for a frame to combine with
  - drivers must call back to age/flush the staging queue (ath does this
    on tx done, at swba, and on rx according to the state of the tx queues
    and/or the contents of the staging queue)
  - remove fast-frame-related data structures from ath
  - add ieee80211_ff_node_init and ieee80211_ff_node_cleanup to handle
    per-node fast-frames state (we reuse 11n tx ampdu state)
o change ieee80211_encap calling convention to include an explicit vap
  so frames coming through a WDS vap are recognized w/o setting M_WDS

With these changes any device able to tx/rx 3Kbyte+ frames can use fast-frames.

Reviewed by:	thompsa, rpaulo, avatar, imp, sephe
This commit is contained in:
Sam Leffler 2009-03-30 21:53:27 +00:00
parent 4f8cb6ff40
commit 339ccfb391
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=190579
24 changed files with 462 additions and 499 deletions

View File

@ -51,6 +51,7 @@ options FFS #Berkeley Fast Filesystem
options SOFTUPDATES #Enable FFS soft updates support
options NFSCLIENT #Network Filesystem Client
options NFS_ROOT #NFS usable as /, requires NFSCLIENT
options NFS_LEGACYRPC
options BOOTP
options BOOTP_NFSROOT
options BOOTP_NFSV3

View File

@ -71,6 +71,9 @@ __FBSDID("$FreeBSD$");
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
#ifdef IEEE80211_SUPPORT_SUPERG
#include <net80211/ieee80211_superg.h>
#endif
#ifdef IEEE80211_SUPPORT_TDMA
#include <net80211/ieee80211_tdma.h>
#endif
@ -1293,7 +1296,17 @@ ath_intr(void *arg)
sc->sc_tdmaswba--;
} else
#endif
{
ath_beacon_proc(sc, 0);
#ifdef IEEE80211_SUPPORT_SUPERG
/*
* Schedule the rx taskq in case there's no
* traffic so any frames held on the staging
* queue are aged and potentially flushed.
*/
taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask);
#endif
}
}
if (status & HAL_INT_RXEOL) {
/*
@ -1662,320 +1675,6 @@ ath_reset_vap(struct ieee80211vap *vap, u_long cmd)
return ath_reset(ifp);
}
static int
ath_ff_always(struct ath_txq *txq, struct ath_buf *bf)
{
return 0;
}
#if 0
static int
ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf)
{
return (txq->axq_curage - bf->bf_age) < ATH_FF_STAGEMAX;
}
#endif
/*
* Flush FF staging queue.
*/
static void
ath_ff_stageq_flush(struct ath_softc *sc, struct ath_txq *txq,
int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf))
{
struct ath_buf *bf;
struct ieee80211_node *ni;
int pktlen, pri;
for (;;) {
ATH_TXQ_LOCK(txq);
/*
* Go from the back (oldest) to front so we can
* stop early based on the age of the entry.
*/
bf = TAILQ_LAST(&txq->axq_stageq, axq_headtype);
if (bf == NULL || ath_ff_flushdonetest(txq, bf)) {
ATH_TXQ_UNLOCK(txq);
break;
}
ni = bf->bf_node;
pri = M_WME_GETAC(bf->bf_m);
KASSERT(ATH_NODE(ni)->an_ff_buf[pri],
("no bf on staging queue %p", bf));
ATH_NODE(ni)->an_ff_buf[pri] = NULL;
TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist);
ATH_TXQ_UNLOCK(txq);
DPRINTF(sc, ATH_DEBUG_FF, "%s: flush frame, age %u\n",
__func__, bf->bf_age);
sc->sc_stats.ast_ff_flush++;
/* encap and xmit */
bf->bf_m = ieee80211_encap(ni, bf->bf_m);
if (bf->bf_m == NULL) {
DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
"%s: discard, encapsulation failure\n",
__func__);
sc->sc_stats.ast_tx_encap++;
goto bad;
}
pktlen = bf->bf_m->m_pkthdr.len; /* NB: don't reference below */
if (ath_tx_start(sc, ni, bf, bf->bf_m) == 0) {
#if 0 /*XXX*/
ifp->if_opackets++;
#endif
continue;
}
bad:
if (ni != NULL)
ieee80211_free_node(ni);
bf->bf_node = NULL;
if (bf->bf_m != NULL) {
m_freem(bf->bf_m);
bf->bf_m = NULL;
}
ATH_TXBUF_LOCK(sc);
STAILQ_INSERT_HEAD(&sc->sc_txbuf, bf, bf_list);
ATH_TXBUF_UNLOCK(sc);
}
}
static __inline u_int32_t
ath_ff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct mbuf *m)
{
struct ieee80211com *ic = sc->sc_ifp->if_l2com;
u_int32_t framelen;
struct ath_buf *bf;
/*
* Approximate the frame length to be transmitted. A swag to add
* the following maximal values to the skb payload:
* - 32: 802.11 encap + CRC
* - 24: encryption overhead (if wep bit)
* - 4 + 6: fast-frame header and padding
* - 16: 2 LLC FF tunnel headers
* - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd)
*/
framelen = m->m_pkthdr.len + 32 + 4 + 6 + 16 + 14;
if (ic->ic_flags & IEEE80211_F_PRIVACY)
framelen += 24;
bf = an->an_ff_buf[M_WME_GETAC(m)];
if (bf != NULL)
framelen += bf->bf_m->m_pkthdr.len;
return ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen,
sc->sc_lastdatarix, AH_FALSE);
}
/*
* Determine if a data frame may be aggregated via ff tunnelling.
* Note the caller is responsible for checking if the destination
* supports fast frames.
*
* NB: allowing EAPOL frames to be aggregated with other unicast traffic.
* Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
* be aggregated with other types of frames when encryption is on?
*
* NB: assumes lock on an_ff_buf effectively held by txq lock mechanism.
*/
static __inline int
ath_ff_can_aggregate(struct ath_softc *sc,
struct ath_node *an, struct mbuf *m, int *flushq)
{
struct ieee80211com *ic = sc->sc_ifp->if_l2com;
struct ath_txq *txq;
u_int32_t txoplimit;
u_int pri;
*flushq = 0;
/*
* If there is no frame to combine with and the txq has
* fewer frames than the minimum required; then do not
* attempt to aggregate this frame.
*/
pri = M_WME_GETAC(m);
txq = sc->sc_ac2q[pri];
if (an->an_ff_buf[pri] == NULL && txq->axq_depth < sc->sc_fftxqmin)
return 0;
/*
* When not in station mode never aggregate a multicast
* frame; this insures, for example, that a combined frame
* does not require multiple encryption keys when using
* 802.1x/WPA.
*/
if (ic->ic_opmode != IEEE80211_M_STA &&
ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost))
return 0;
/*
* Consult the max bursting interval to insure a combined
* frame fits within the TxOp window.
*/
txoplimit = IEEE80211_TXOP_TO_US(
ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
if (txoplimit != 0 && ath_ff_approx_txtime(sc, an, m) > txoplimit) {
DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
"%s: FF TxOp violation\n", __func__);
if (an->an_ff_buf[pri] != NULL)
*flushq = 1;
return 0;
}
return 1; /* try to aggregate */
}
/*
* Check if the supplied frame can be partnered with an existing
* or pending frame. Return a reference to any frame that should be
* sent on return; otherwise return NULL.
*/
static struct mbuf *
ath_ff_check(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf, struct mbuf *m, struct ieee80211_node *ni)
{
struct ath_node *an = ATH_NODE(ni);
struct ath_buf *bfstaged;
int ff_flush, pri;
/*
* Check if the supplied frame can be aggregated.
*
* NB: we use the txq lock to protect references to
* an->an_ff_txbuf in ath_ff_can_aggregate().
*/
ATH_TXQ_LOCK(txq);
pri = M_WME_GETAC(m);
if (ath_ff_can_aggregate(sc, an, m, &ff_flush)) {
struct ath_buf *bfstaged = an->an_ff_buf[pri];
if (bfstaged != NULL) {
/*
* A frame is available for partnering; remove
* it, chain it to this one, and encapsulate.
*/
an->an_ff_buf[pri] = NULL;
TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
ATH_TXQ_UNLOCK(txq);
/*
* Chain mbufs and add FF magic.
*/
DPRINTF(sc, ATH_DEBUG_FF,
"[%s] aggregate fast-frame, age %u\n",
ether_sprintf(ni->ni_macaddr), txq->axq_curage);
m->m_nextpkt = NULL;
bfstaged->bf_m->m_nextpkt = m;
m = bfstaged->bf_m;
bfstaged->bf_m = NULL;
m->m_flags |= M_FF;
/*
* Release the node reference held while
* the packet sat on an_ff_buf[]
*/
bfstaged->bf_node = NULL;
ieee80211_free_node(ni);
/*
* Return bfstaged to the free list.
*/
ATH_TXBUF_LOCK(sc);
STAILQ_INSERT_HEAD(&sc->sc_txbuf, bfstaged, bf_list);
ATH_TXBUF_UNLOCK(sc);
return m; /* ready to go */
} else {
/*
* No frame available, queue this frame to wait
* for a partner. Note that we hold the buffer
* and a reference to the node; we need the
* buffer in particular so we're certain we
* can flush the frame at a later time.
*/
DPRINTF(sc, ATH_DEBUG_FF,
"[%s] stage fast-frame, age %u\n",
ether_sprintf(ni->ni_macaddr), txq->axq_curage);
bf->bf_m = m;
bf->bf_node = ni; /* NB: held reference */
bf->bf_age = txq->axq_curage;
an->an_ff_buf[pri] = bf;
TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist);
ATH_TXQ_UNLOCK(txq);
return NULL; /* consumed */
}
}
/*
* Frame could not be aggregated, it needs to be returned
* to the caller for immediate transmission. In addition
* we check if we should first flush a frame from the
* staging queue before sending this one.
*
* NB: ath_ff_can_aggregate only marks ff_flush if a frame
* is present to flush.
*/
if (ff_flush) {
int pktlen;
bfstaged = an->an_ff_buf[pri];
an->an_ff_buf[pri] = NULL;
TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist);
ATH_TXQ_UNLOCK(txq);
DPRINTF(sc, ATH_DEBUG_FF, "[%s] flush staged frame\n",
ether_sprintf(an->an_node.ni_macaddr));
/* encap and xmit */
bfstaged->bf_m = ieee80211_encap(ni, bfstaged->bf_m);
if (bfstaged->bf_m == NULL) {
DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF,
"%s: discard, encap failure\n", __func__);
sc->sc_stats.ast_tx_encap++;
goto ff_flushbad;
}
pktlen = bfstaged->bf_m->m_pkthdr.len;
if (ath_tx_start(sc, ni, bfstaged, bfstaged->bf_m)) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: discard, xmit failure\n", __func__);
ff_flushbad:
/*
* Unable to transmit frame that was on the staging
* queue. Reclaim the node reference and other
* resources.
*/
if (ni != NULL)
ieee80211_free_node(ni);
bfstaged->bf_node = NULL;
if (bfstaged->bf_m != NULL) {
m_freem(bfstaged->bf_m);
bfstaged->bf_m = NULL;
}
ATH_TXBUF_LOCK(sc);
STAILQ_INSERT_HEAD(&sc->sc_txbuf, bfstaged, bf_list);
ATH_TXBUF_UNLOCK(sc);
} else {
#if 0
ifp->if_opackets++;
#endif
}
} else {
if (an->an_ff_buf[pri] != NULL) {
/*
* XXX: out-of-order condition only occurs for AP
* mode and multicast. There may be no valid way
* to get this condition.
*/
DPRINTF(sc, ATH_DEBUG_FF, "[%s] out-of-order frame\n",
ether_sprintf(an->an_node.ni_macaddr));
/* XXX stat */
}
ATH_TXQ_UNLOCK(txq);
}
return m;
}
static struct ath_buf *
_ath_getbuf_locked(struct ath_softc *sc)
{
@ -2070,9 +1769,7 @@ ath_start(struct ifnet *ifp)
struct ieee80211_node *ni;
struct ath_buf *bf;
struct mbuf *m, *next;
struct ath_txq *txq;
ath_bufhead frags;
int pri;
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid)
return;
@ -2091,47 +1788,14 @@ ath_start(struct ifnet *ifp)
ATH_TXBUF_UNLOCK(sc);
break;
}
STAILQ_INIT(&frags);
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
pri = M_WME_GETAC(m);
txq = sc->sc_ac2q[pri];
if (IEEE80211_ATH_CAP(ni->ni_vap, ni, IEEE80211_NODE_FF)) {
/*
* Check queue length; if too deep drop this
* frame (tail drop considered good).
*/
if (txq->axq_depth >= sc->sc_fftxqmax) {
DPRINTF(sc, ATH_DEBUG_FF,
"[%s] tail drop on q %u depth %u\n",
ether_sprintf(ni->ni_macaddr),
txq->axq_qnum, txq->axq_depth);
sc->sc_stats.ast_tx_qfull++;
m_freem(m);
goto reclaim;
}
m = ath_ff_check(sc, txq, bf, m, ni);
if (m == NULL) {
/* NB: ni ref & bf held on stageq */
continue;
}
}
ifp->if_opackets++;
/*
* Encapsulate the packet in prep for transmission.
*/
m = ieee80211_encap(ni, m);
if (m == NULL) {
DPRINTF(sc, ATH_DEBUG_XMIT,
"%s: encapsulation failure\n", __func__);
sc->sc_stats.ast_tx_encap++;
goto bad;
}
/*
* Check for fragmentation. If this frame
* has been broken up verify we have enough
* buffers to send all the fragments so all
* go out or none...
*/
STAILQ_INIT(&frags);
if ((m->m_flags & M_FRAG) &&
!ath_txfrag_setup(sc, &frags, m, ni)) {
DPRINTF(sc, ATH_DEBUG_XMIT,
@ -2140,6 +1804,7 @@ ath_start(struct ifnet *ifp)
ath_freetx(m);
goto bad;
}
ifp->if_opackets++;
nextfrag:
/*
* Pass the frame to the h/w for transmission.
@ -2189,13 +1854,6 @@ ath_start(struct ifnet *ifp)
}
sc->sc_wd_timer = 5;
#if 0
/*
* Flush stale frames from the fast-frame staging queue.
*/
if (ic->ic_opmode != IEEE80211_M_STA)
ath_ff_stageq_flush(sc, txq, ath_ff_ageflushtestdone);
#endif
}
}
@ -4335,10 +3993,18 @@ ath_rx_proc(void *arg, int npending)
if (ngood)
sc->sc_lastrx = tsf;
if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 &&
!IFQ_IS_EMPTY(&ifp->if_snd))
ath_start(ifp);
if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) {
#ifdef IEEE80211_SUPPORT_SUPERG
if (ic->ic_stageqdepth) {
ieee80211_age_stageq(ic, WME_AC_VO, 100);
ieee80211_age_stageq(ic, WME_AC_VI, 100);
ieee80211_age_stageq(ic, WME_AC_BE, 100);
ieee80211_age_stageq(ic, WME_AC_BK, 100);
}
#endif
if (!IFQ_IS_EMPTY(&ifp->if_snd))
ath_start(ifp);
}
#undef PA2DESC
}
@ -4346,13 +4012,12 @@ static void
ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum)
{
txq->axq_qnum = qnum;
txq->axq_ac = 0;
txq->axq_depth = 0;
txq->axq_intrcnt = 0;
txq->axq_link = NULL;
STAILQ_INIT(&txq->axq_q);
ATH_TXQ_LOCK_INIT(sc, txq);
TAILQ_INIT(&txq->axq_stageq);
txq->axq_curage = 0;
}
/*
@ -4429,6 +4094,7 @@ ath_tx_setup(struct ath_softc *sc, int ac, int haltype)
}
txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype);
if (txq != NULL) {
txq->axq_ac = ac;
sc->sc_ac2q[ac] = txq;
return 1;
} else
@ -5292,13 +4958,6 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ieee80211_process_callback(ni, bf->bf_m,
(bf->bf_txflags & HAL_TXDESC_NOACK) == 0 ?
ts->ts_status : HAL_TXERR_XRETRY);
/*
* Reclaim reference to node.
*
* NB: the node may be reclaimed here if, for example
* this is a DEAUTH message that was sent and the
* node was timed out due to inactivity.
*/
ieee80211_free_node(ni);
}
bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap,
@ -5316,11 +4975,13 @@ ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list);
ATH_TXBUF_UNLOCK(sc);
}
#ifdef IEEE80211_SUPPORT_SUPERG
/*
* Flush fast-frame staging queue when traffic slows.
*/
if (txq->axq_depth <= 1)
ath_ff_stageq_flush(sc, txq, ath_ff_always);
ieee80211_flush_stageq(ic, txq->axq_ac);
#endif
return nacked;
}
@ -6920,16 +6581,6 @@ ath_sysctlattach(struct ath_softc *sc)
"tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_tpcts, "I", "tx power for cts frames");
}
if (ath_hal_hasfastframes(sc->sc_ah)) {
sc->sc_fftxqmin = ATH_FF_TXQMIN;
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"fftxqmin", CTLFLAG_RW, &sc->sc_fftxqmin, 0,
"min frames before fast-frame staging");
sc->sc_fftxqmax = ATH_FF_TXQMAX;
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"fftxqmax", CTLFLAG_RW, &sc->sc_fftxqmax, 0,
"max queued frames before tail drop");
}
if (ath_hal_hasrfsilent(ah)) {
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0,

View File

@ -71,10 +71,6 @@
#define ATH_KEYMAX 128 /* max key cache size we handle */
#define ATH_KEYBYTES (ATH_KEYMAX/NBBY) /* storage space in bytes */
#define ATH_FF_TXQMIN 2 /* min txq depth for staging */
#define ATH_FF_TXQMAX 50 /* maximum # of queued frames allowed */
#define ATH_FF_STAGEMAX 5 /* max waiting period for staged frame*/
struct taskqueue;
struct kthread;
struct ath_buf;
@ -106,8 +102,6 @@ struct ath_node {
struct ath_buf {
STAILQ_ENTRY(ath_buf) bf_list;
TAILQ_ENTRY(ath_buf) bf_stagelist; /* stage queue list */
u_int32_t bf_age; /* age when placed on stageq */
int bf_nseg;
uint16_t bf_txflags; /* tx descriptor flags */
uint16_t bf_flags; /* status flags (below) */
@ -151,6 +145,7 @@ struct ath_descdma {
struct ath_txq {
u_int axq_qnum; /* hardware q number */
#define ATH_TXQ_SWQ (HAL_NUM_TX_QUEUES+1) /* qnum for s/w only queue */
u_int axq_ac; /* WME AC */
u_int axq_flags;
#define ATH_TXQ_PUTPENDING 0x0001 /* ath_hal_puttxbuf pending */
u_int axq_depth; /* queue depth (stat only) */
@ -159,13 +154,6 @@ struct ath_txq {
STAILQ_HEAD(, ath_buf) axq_q; /* transmit queue */
struct mtx axq_lock; /* lock on q and link */
char axq_name[12]; /* e.g. "ath0_txq4" */
/*
* Fast-frame state. The staging queue holds awaiting
* a fast-frame pairing. Buffers on this queue are
* assigned an ``age'' and flushed when they wait too long.
*/
TAILQ_HEAD(axq_headtype, ath_buf) axq_stageq;
u_int32_t axq_curage; /* queue age */
};
#define ATH_TXQ_LOCK_INIT(_sc, _tq) do { \
@ -181,7 +169,6 @@ struct ath_txq {
#define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \
STAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \
(_tq)->axq_depth++; \
(_tq)->axq_curage++; \
} while (0)
#define ATH_TXQ_REMOVE_HEAD(_tq, _field) do { \
STAILQ_REMOVE_HEAD(&(_tq)->axq_q, _field); \

View File

@ -1795,11 +1795,6 @@ ipw_start_locked(struct ifnet *ifp)
break;
}
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
m = ieee80211_encap(ni, m);
if (m == NULL) {
ieee80211_free_node(ni);
continue;
}
if (ipw_tx_start(ifp, m, ni) != 0) {
ieee80211_free_node(ni);
ifp->if_oerrors++;

View File

@ -1982,13 +1982,6 @@ iwi_start_locked(struct ifnet *ifp)
BPF_MTAP(ifp, m);
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
m = ieee80211_encap(ni, m);
if (m == NULL) {
ieee80211_free_node(ni);
ifp->if_oerrors++;
continue;
}
if (iwi_tx_start(ifp, m, ni, ac) != 0) {
ieee80211_free_node(ni);
ifp->if_oerrors++;

View File

@ -2122,12 +2122,6 @@ iwn_start_locked(struct ifnet *ifp)
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
pri = M_WME_GETAC(m);
txq = &sc->txq[pri];
m = ieee80211_encap(ni, m);
if (m == NULL) {
ifp->if_oerrors++;
ieee80211_free_node(ni);
continue;
}
if (txq->queued >= IWN_TX_RING_COUNT - 8) {
/* XXX not right */
/* ring is nearly full, stop flow */

View File

@ -1289,21 +1289,10 @@ malo_start(struct ifnet *ifp)
sc->malo_stats.mst_tx_qstop++;
break;
}
/*
* Encapsulate the packet in prep for transmission.
*/
m = ieee80211_encap(ni, m);
if (m == NULL) {
DPRINTF(sc, MALO_DEBUG_XMIT,
"%s: encapsulation failure\n", __func__);
sc->malo_stats.mst_tx_encap++;
goto bad;
}
/*
* Pass the frame to the h/w for transmission.
*/
if (malo_tx_start(sc, ni, bf, m)) {
bad:
ifp->if_oerrors++;
if (bf != NULL) {
bf->bf_m = NULL;

View File

@ -1963,15 +1963,7 @@ rt2560_start_locked(struct ifnet *ifp)
sc->sc_flags |= RT2560_F_DATA_OACTIVE;
break;
}
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
m = ieee80211_encap(ni, m);
if (m == NULL) {
ieee80211_free_node(ni);
ifp->if_oerrors++;
continue;
}
if (rt2560_tx_data(sc, m, ni) != 0) {
ieee80211_free_node(ni);
ifp->if_oerrors++;

View File

@ -1660,15 +1660,7 @@ rt2661_start_locked(struct ifnet *ifp)
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;
}
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
m = ieee80211_encap(ni, m);
if (m == NULL) {
ieee80211_free_node(ni);
ifp->if_oerrors++;
continue;
}
if (rt2661_tx_data(sc, m, ni, ac) != 0) {
ieee80211_free_node(ni);
ifp->if_oerrors++;

View File

@ -1354,12 +1354,6 @@ rum_start(struct ifnet *ifp)
break;
}
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
m = ieee80211_encap(ni, m);
if (m == NULL) {
ieee80211_free_node(ni);
ifp->if_oerrors++;
continue;
}
if (rum_tx_data(sc, m, ni) != 0) {
ieee80211_free_node(ni);
ifp->if_oerrors++;

View File

@ -1436,12 +1436,6 @@ ural_start(struct ifnet *ifp)
break;
}
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
m = ieee80211_encap(ni, m);
if (m == NULL) {
ieee80211_free_node(ni);
ifp->if_oerrors++;
continue;
}
if (ural_tx_data(sc, m, ni) != 0) {
ieee80211_free_node(ni);
ifp->if_oerrors++;

View File

@ -2697,12 +2697,6 @@ zyd_start(struct ifnet *ifp)
break;
}
ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
m = ieee80211_encap(ni, m);
if (m == NULL) {
ieee80211_free_node(ni);
ifp->if_oerrors++;
continue;
}
if (zyd_tx_data(sc, m, ni) != 0) {
ieee80211_free_node(ni);
ifp->if_oerrors++;

View File

@ -89,6 +89,7 @@ __FBSDID("$FreeBSD$");
#include <net/if_arp.h>
#include <net/ethernet.h>
#include <net/if_dl.h>
#include <net/if_llc.h>
#include <net/if_media.h>
#include <net/if_types.h>
@ -978,6 +979,7 @@ wi_start_locked(struct ifnet *ifp)
struct mbuf *m0;
struct ieee80211_key *k;
struct wi_frame frmhdr;
const struct llc *llc;
int cur;
WI_LOCK_ASSERT(sc);
@ -996,19 +998,33 @@ wi_start_locked(struct ifnet *ifp)
ifp->if_drv_flags |= IFF_DRV_OACTIVE;
break;
}
/* NB: copy before 802.11 header is prepended */
m_copydata(m0, 0, ETHER_HDR_LEN,
(caddr_t)&frmhdr.wi_ehdr);
ni = (struct ieee80211_node *) m0->m_pkthdr.rcvif;
m0 = ieee80211_encap(ni, m0);
if (m0 == NULL) {
ifp->if_oerrors++;
ieee80211_free_node(ni);
continue;
}
/* reconstruct 802.3 header */
wh = mtod(m0, struct ieee80211_frame *);
switch (wh->i_fc[1]) {
case IEEE80211_FC1_DIR_TODS:
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_shost,
wh->i_addr2);
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_dhost,
wh->i_addr3);
break;
case IEEE80211_FC1_DIR_NODS:
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_shost,
wh->i_addr2);
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_dhost,
wh->i_addr1);
break;
case IEEE80211_FC1_DIR_FROMDS:
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_shost,
wh->i_addr3);
IEEE80211_ADDR_COPY(frmhdr.wi_ehdr.ether_dhost,
wh->i_addr1);
break;
}
llc = (const struct llc *)(
mtod(m0, const uint8_t *) + ieee80211_hdrsize(wh));
frmhdr.wi_ehdr.ether_type = llc->llc_snap.ether_type;
frmhdr.wi_tx_ctl = htole16(WI_ENC_TX_802_11|WI_TXCNTL_TX_EX);
if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
k = ieee80211_crypto_encap(ni, m0);

View File

@ -2048,7 +2048,6 @@ wpi_start_locked(struct ifnet *ifp)
IFQ_DRV_DEQUEUE(&ifp->if_snd, m);
if (m == NULL)
break;
/* no QoS encapsulation for EAPOL frames */
ac = M_WME_GETAC(m);
if (sc->txq[ac].queued > sc->txq[ac].count - 8) {
/* there is no place left in this ring */
@ -2057,12 +2056,6 @@ wpi_start_locked(struct ifnet *ifp)
break;
}
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
m = ieee80211_encap(ni, m);
if (m == NULL) {
ieee80211_free_node(ni);
ifp->if_oerrors++;
continue;
}
if (wpi_tx_data(sc, m, ni, ac) != 0) {
ieee80211_free_node(ni);
ifp->if_oerrors++;

View File

@ -189,6 +189,15 @@ SYSCTL_PROC(_net_wlan, OID_AUTO, addba_backoff, CTLFLAG_RW,
extern int ieee80211_addba_maxtries;
SYSCTL_INT(_net_wlan, OID_AUTO, addba_maxtries, CTLFLAG_RW,
&ieee80211_addba_maxtries, 0, "max ADDBA requests sent before backoff");
#ifdef IEEE80211_SUPPORT_SUPERG
extern int ieee80211_ffppsmin;
SYSCTL_INT(_net_wlan, OID_AUTO, ffppsmin, CTLFLAG_RW,
&ieee80211_ffppsmin, 0, "min packet rate before fast-frame staging");
extern int ieee80211_ffagemax;
SYSCTL_PROC(_net_wlan, OID_AUTO, ffagemax, CTLFLAG_RW,
&ieee80211_ffagemax, 0, ieee80211_sysctl_msecs_ticks, "I",
"max hold time for fast-frame staging (ms)");
#endif /* IEEE80211_SUPPORT_SUPERG */
static int
ieee80211_sysctl_inact(SYSCTL_HANDLER_ARGS)

View File

@ -2038,6 +2038,10 @@ hostap_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
ieee80211_ht_updatehtcap(ni, htcap);
} else if (ni->ni_flags & IEEE80211_NODE_HT)
ieee80211_ht_node_cleanup(ni);
#ifdef IEEE80211_SUPPORT_SUPERG
else if (ni->ni_ath_flags & IEEE80211_NODE_ATH)
ieee80211_ff_node_cleanup(ni);
#endif
/*
* Allow AMPDU operation only with unencrypted traffic
* or AES-CCM; the 11n spec only specifies these ciphers

View File

@ -217,7 +217,8 @@ struct ieee80211_stats {
uint8_t is_rx_authfail_code; /* last rx'd auth fail reason */
uint32_t is_beacon_miss; /* beacon miss notification */
uint32_t is_rx_badstate; /* rx discard state != RUN */
uint32_t is_spare[12];
uint32_t is_ff_flush; /* ff's flush'd from stageq */
uint32_t is_spare[11];
};
/*

View File

@ -929,6 +929,10 @@ node_cleanup(struct ieee80211_node *ni)
*/
if (ni->ni_flags & IEEE80211_NODE_HT)
ieee80211_ht_node_cleanup(ni);
#ifdef IEEE80211_SUPPORT_SUPERG
else if (ni->ni_ath_flags & IEEE80211_NODE_ATH)
ieee80211_ff_node_cleanup(ni);
#endif
/*
* Clear AREF flag that marks the authorization refcnt bump
* has happened. This is probably not needed as the node

View File

@ -217,6 +217,7 @@ ieee80211_start(struct ifnet *ifp)
ieee80211_free_node(ni);
continue;
}
if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) &&
(m->m_flags & M_PWR_SAV) == 0) {
/*
@ -241,23 +242,26 @@ ieee80211_start(struct ifnet *ifp)
continue;
}
BPF_MTAP(ifp, m); /* 802.11 tx path */
BPF_MTAP(ifp, m); /* 802.3 tx */
#ifdef IEEE80211_SUPPORT_SUPERG
if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF)) {
m = ieee80211_ff_check(ni, m);
if (m == NULL) {
/* NB: any ni ref held on stageq */
continue;
}
}
#endif /* IEEE80211_SUPPORT_SUPERG */
/*
* XXX When ni is associated with a WDS link then
* the vap will be the WDS vap but ni_vap will point
* to the ap vap the station associated to. Once
* we handoff the packet to the driver the callback
* to ieee80211_encap won't be able to tell if the
* packet should be encapsulated for WDS or not (e.g.
* multicast frames will not be handled correctly).
* We hack this by marking the mbuf so ieee80211_encap
* can do the right thing.
* Encapsulate the packet in prep for transmission.
*/
if (vap->iv_opmode == IEEE80211_M_WDS)
m->m_flags |= M_WDS;
else
m->m_flags &= ~M_WDS;
m = ieee80211_encap(vap, ni, m);
if (m == NULL) {
/* NB: stat+msg handled in ieee80211_encap */
ieee80211_free_node(ni);
continue;
}
/*
* Stash the node pointer and hand the frame off to
@ -267,7 +271,6 @@ ieee80211_start(struct ifnet *ifp)
*/
m->m_pkthdr.rcvif = (void *)ni;
/* XXX defer if_start calls? */
error = parent->if_transmit(parent, m);
if (error != 0) {
/* NB: IFQ_HANDOFF reclaims mbuf */
@ -852,10 +855,10 @@ ieee80211_crypto_getmcastkey(struct ieee80211vap *vap,
* marked EAPOL frames w/ M_EAPOL.
*/
struct mbuf *
ieee80211_encap(struct ieee80211_node *ni, struct mbuf *m)
ieee80211_encap(struct ieee80211vap *vap, struct ieee80211_node *ni,
struct mbuf *m)
{
#define WH4(wh) ((struct ieee80211_frame_addr4 *)(wh))
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
struct ether_header eh;
struct ieee80211_frame *wh;
@ -955,6 +958,9 @@ ieee80211_encap(struct ieee80211_node *ni, struct mbuf *m)
llc->llc_snap.ether_type = eh.ether_type;
} else {
#ifdef IEEE80211_SUPPORT_SUPERG
/*
* Aggregated frame.
*/
m = ieee80211_ff_encap(vap, m, hdrspace, key);
if (m == NULL)
#endif

View File

@ -75,8 +75,9 @@ void ieee80211_start(struct ifnet *);
int ieee80211_send_nulldata(struct ieee80211_node *);
int ieee80211_classify(struct ieee80211_node *, struct mbuf *m);
struct mbuf *ieee80211_mbuf_adjust(struct ieee80211vap *, int,
struct ieee80211_key *, struct mbuf *);
struct mbuf *ieee80211_encap(struct ieee80211_node *, struct mbuf *);
struct ieee80211_key *, struct mbuf *);
struct mbuf *ieee80211_encap(struct ieee80211vap *, struct ieee80211_node *,
struct mbuf *);
int ieee80211_send_mgmt(struct ieee80211_node *, int, int);
struct ieee80211_appie;
int ieee80211_send_probereq(struct ieee80211_node *ni,

View File

@ -1459,6 +1459,11 @@ sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0,
ieee80211_setup_htrates(ni, htcap,
IEEE80211_F_JOIN | IEEE80211_F_DOBRS);
ieee80211_setup_basic_htrates(ni, htinfo);
} else {
#ifdef IEEE80211_SUPPORT_SUPERG
if (IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_ATH))
ieee80211_ff_node_init(ni);
#endif
}
/*
* Configure state now that we are associated.

View File

@ -76,12 +76,21 @@ __FBSDID("$FreeBSD$");
#define ATH_FF_SNAP_ORGCODE_1 0x03
#define ATH_FF_SNAP_ORGCODE_2 0x7f
#define ATH_FF_TXQMIN 2 /* min txq depth for staging */
#define ATH_FF_TXQMAX 50 /* maximum # of queued frames allowed */
#define ATH_FF_STAGEMAX 5 /* max waiting period for staged frame*/
#define ETHER_HEADER_COPY(dst, src) \
memcpy(dst, src, sizeof(struct ether_header))
/* XXX public for sysctl hookup */
int ieee80211_ffppsmin = 2; /* pps threshold for ff aggregation */
int ieee80211_ffagemax = -1; /* max time frames held on stage q */
void
ieee80211_superg_attach(struct ieee80211com *ic)
{
ieee80211_ffagemax = msecs_to_ticks(150);
}
void
@ -354,10 +363,10 @@ ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
}
m1->m_nextpkt = NULL;
/*
* Include fast frame headers in adjusting header
* layout; this allocates space according to what
* ff_encap will do.
* Include fast frame headers in adjusting header layout.
*/
KASSERT(m1->m_len >= sizeof(eh1), ("no ethernet header!"));
ETHER_HEADER_COPY(&eh1, mtod(m1, caddr_t));
m1 = ieee80211_mbuf_adjust(vap,
hdrspace + sizeof(struct llc) + sizeof(uint32_t) + 2 +
sizeof(struct ether_header),
@ -461,6 +470,314 @@ ieee80211_ff_encap(struct ieee80211vap *vap, struct mbuf *m1, int hdrspace,
return NULL;
}
static void
ff_transmit(struct ieee80211_node *ni, struct mbuf *m)
{
struct ieee80211vap *vap = ni->ni_vap;
int error;
/* encap and xmit */
m = ieee80211_encap(vap, ni, m);
if (m != NULL) {
struct ifnet *ifp = vap->iv_ifp;
struct ifnet *parent = ni->ni_ic->ic_ifp;
error = parent->if_transmit(parent, m);
if (error != 0) {
/* NB: IFQ_HANDOFF reclaims mbuf */
ieee80211_free_node(ni);
} else {
ifp->if_opackets++;
}
} else
ieee80211_free_node(ni);
}
/*
* Flush frames to device; note we re-use the linked list
* the frames were stored on and use the sentinel (unchanged)
* which may be non-NULL.
*/
static void
ff_flush(struct mbuf *head, struct mbuf *last)
{
struct mbuf *m, *next;
struct ieee80211_node *ni;
struct ieee80211vap *vap;
for (m = head; m != last; m = next) {
next = m->m_nextpkt;
m->m_nextpkt = NULL;
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
vap = ni->ni_vap;
IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
"%s: flush frame, age %u", __func__, M_AGE_GET(m));
vap->iv_stats.is_ff_flush++;
ff_transmit(ni, m);
}
}
/*
* Age frames on the staging queue.
*/
void
ieee80211_ff_age(struct ieee80211com *ic, struct ieee80211_stageq *sq, int quanta)
{
struct mbuf *m, *head;
struct ieee80211_node *ni;
struct ieee80211_tx_ampdu *tap;
KASSERT(sq->head != NULL, ("stageq empty"));
IEEE80211_LOCK(ic);
head = sq->head;
while ((m = sq->head) != NULL && M_AGE_GET(m) < quanta) {
/* clear tap ref to frame */
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
tap = &ni->ni_tx_ampdu[M_WME_GETAC(m)];
KASSERT(tap->txa_private == m, ("staging queue empty"));
tap->txa_private = NULL;
sq->head = m->m_nextpkt;
sq->depth--;
ic->ic_stageqdepth--;
}
if (m == NULL)
sq->tail = NULL;
else
M_AGE_SUB(m, quanta);
IEEE80211_UNLOCK(ic);
ff_flush(head, m);
}
static void
stageq_add(struct ieee80211_stageq *sq, struct mbuf *m)
{
int age = ieee80211_ffagemax;
if (sq->tail != NULL) {
sq->tail->m_nextpkt = m;
age -= M_AGE_GET(sq->head);
} else
sq->head = m;
KASSERT(age >= 0, ("age %d", age));
M_AGE_SET(m, age);
m->m_nextpkt = NULL;
sq->tail = m;
sq->depth++;
}
static void
stageq_remove(struct ieee80211_stageq *sq, struct mbuf *mstaged)
{
struct mbuf *m, *mprev;
mprev = NULL;
for (m = sq->head; m != NULL; m = m->m_nextpkt) {
if (m == mstaged) {
if (mprev == NULL)
sq->head = m->m_nextpkt;
else
mprev->m_nextpkt = m->m_nextpkt;
if (sq->tail == m)
sq->tail = mprev;
sq->depth--;
return;
}
mprev = m;
}
printf("%s: packet not found\n", __func__);
}
static uint32_t
ff_approx_txtime(struct ieee80211_node *ni,
const struct mbuf *m1, const struct mbuf *m2)
{
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211vap *vap = ni->ni_vap;
uint32_t framelen;
/*
* Approximate the frame length to be transmitted. A swag to add
* the following maximal values to the skb payload:
* - 32: 802.11 encap + CRC
* - 24: encryption overhead (if wep bit)
* - 4 + 6: fast-frame header and padding
* - 16: 2 LLC FF tunnel headers
* - 14: 1 802.3 FF tunnel header (mbuf already accounts for 2nd)
*/
framelen = m1->m_pkthdr.len + 32 +
ATH_FF_MAX_HDR_PAD + ATH_FF_MAX_SEP_PAD + ATH_FF_MAX_HDR;
if (vap->iv_flags & IEEE80211_F_PRIVACY)
framelen += 24;
if (m2 != NULL)
framelen += m2->m_pkthdr.len;
return ieee80211_compute_duration(ic->ic_rt, framelen, ni->ni_txrate, 0);
}
/*
* Check if the supplied frame can be partnered with an existing
* or pending frame. Return a reference to any frame that should be
* sent on return; otherwise return NULL.
*/
struct mbuf *
ieee80211_ff_check(struct ieee80211_node *ni, struct mbuf *m)
{
struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
const int pri = M_WME_GETAC(m);
struct ieee80211_stageq *sq;
struct ieee80211_tx_ampdu *tap;
struct mbuf *mstaged;
uint32_t txtime, limit;
/*
* Check if the supplied frame can be aggregated.
*
* NB: we allow EAPOL frames to be aggregated with other ucast traffic.
* Do 802.1x EAPOL frames proceed in the clear? Then they couldn't
* be aggregated with other types of frames when encryption is on?
*/
IEEE80211_LOCK(ic);
tap = &ni->ni_tx_ampdu[pri];
mstaged = tap->txa_private; /* NB: we reuse AMPDU state */
ieee80211_txampdu_count_packet(tap);
/*
* When not in station mode never aggregate a multicast
* frame; this insures, for example, that a combined frame
* does not require multiple encryption keys.
*/
if (vap->iv_opmode != IEEE80211_M_STA &&
ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) {
/* XXX flush staged frame? */
IEEE80211_UNLOCK(ic);
return m;
}
/*
* If there is no frame to combine with and the pps is
* too low; then do not attempt to aggregate this frame.
*/
if (mstaged == NULL &&
ieee80211_txampdu_getpps(tap) < ieee80211_ffppsmin) {
IEEE80211_UNLOCK(ic);
return m;
}
sq = &ic->ic_ff_stageq[pri];
/*
* Check the txop limit to insure the aggregate fits.
*/
limit = IEEE80211_TXOP_TO_US(
ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit);
if (limit != 0 &&
(txtime = ff_approx_txtime(ni, m, mstaged)) > limit) {
/*
* Aggregate too long, return to the caller for direct
* transmission. In addition, flush any pending frame
* before sending this one.
*/
IEEE80211_DPRINTF(vap, IEEE80211_MSG_SUPERG,
"%s: txtime %u exceeds txop limit %u\n",
__func__, txtime, limit);
tap->txa_private = NULL;
if (mstaged != NULL)
stageq_remove(sq, mstaged);
IEEE80211_UNLOCK(ic);
if (mstaged != NULL) {
IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
"%s: flush staged frame", __func__);
/* encap and xmit */
ff_transmit(ni, mstaged);
}
return m; /* NB: original frame */
}
/*
* An aggregation candidate. If there's a frame to partner
* with then combine and return for processing. Otherwise
* save this frame and wait for a partner to show up (or
* the frame to be flushed). Note that staged frames also
* hold their node reference.
*/
if (mstaged != NULL) {
tap->txa_private = NULL;
stageq_remove(sq, mstaged);
IEEE80211_UNLOCK(ic);
IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
"%s: aggregate fast-frame", __func__);
/*
* Release the node reference; we only need
* the one already in mstaged.
*/
KASSERT(mstaged->m_pkthdr.rcvif == (void *)ni,
("rcvif %p ni %p", mstaged->m_pkthdr.rcvif, ni));
ieee80211_free_node(ni);
m->m_nextpkt = NULL;
mstaged->m_nextpkt = m;
mstaged->m_flags |= M_FF; /* NB: mark for encap work */
} else {
m->m_pkthdr.rcvif = (void *)ni; /* NB: hold node reference */
KASSERT(tap->txa_private == NULL,
("txa_private %p", tap->txa_private));
tap->txa_private = m;
stageq_add(sq, m);
ic->ic_stageqdepth++;
IEEE80211_UNLOCK(ic);
IEEE80211_NOTE(vap, IEEE80211_MSG_SUPERG, ni,
"%s: stage frame, %u queued", __func__, sq->depth);
/* NB: mstaged is NULL */
}
return mstaged;
}
void
ieee80211_ff_node_init(struct ieee80211_node *ni)
{
/*
* Clean FF state on re-associate. This handles the case
* where a station leaves w/o notifying us and then returns
* before node is reaped for inactivity.
*/
ieee80211_ff_node_cleanup(ni);
}
void
ieee80211_ff_node_cleanup(struct ieee80211_node *ni)
{
struct ieee80211com *ic = ni->ni_ic;
struct ieee80211_tx_ampdu *tap;
struct mbuf *m, *head;
int ac;
IEEE80211_LOCK(ic);
head = NULL;
for (ac = 0; ac < WME_NUM_AC; ac++) {
tap = &ni->ni_tx_ampdu[ac];
m = tap->txa_private;
if (m != NULL) {
tap->txa_private = NULL;
stageq_remove(&ic->ic_ff_stageq[ac], m);
m->m_nextpkt = head;
head = m;
}
}
IEEE80211_UNLOCK(ic);
for (m = head; m != NULL; m = m->m_nextpkt) {
m_freem(m);
ieee80211_free_node(ni);
}
}
/*
* Switch between turbo and non-turbo operating modes.
* Use the specified channel flags to locate the new

View File

@ -68,6 +68,26 @@ void ieee80211_parse_ath(struct ieee80211_node *, uint8_t *);
int ieee80211_parse_athparams(struct ieee80211_node *, uint8_t *,
const struct ieee80211_frame *);
void ieee80211_ff_node_init(struct ieee80211_node *);
void ieee80211_ff_node_cleanup(struct ieee80211_node *);
struct mbuf *ieee80211_ff_check(struct ieee80211_node *, struct mbuf *);
void ieee80211_ff_age(struct ieee80211com *, struct ieee80211_stageq *, int);
static __inline void
ieee80211_flush_stageq(struct ieee80211com *ic, int ac)
{
if (ic->ic_ff_stageq[ac].depth)
ieee80211_ff_age(ic, &ic->ic_ff_stageq[ac], 0x7fffffff);
}
static __inline void
ieee80211_age_stageq(struct ieee80211com *ic, int ac, int quanta)
{
if (ic->ic_ff_stageq[ac].depth)
ieee80211_ff_age(ic, &ic->ic_ff_stageq[ac], quanta);
}
struct mbuf *ieee80211_ff_encap(struct ieee80211vap *, struct mbuf *,
int, struct ieee80211_key *);

View File

@ -107,6 +107,13 @@ struct ieee80211_appie {
};
struct ieee80211_tdma_param;
struct ieee80211_rate_table;
struct ieee80211_stageq {
struct mbuf *head; /* frames linked w/ m_nextpkt */
struct mbuf *tail; /* last frame in queue */
int depth; /* # items on head */
};
struct ieee80211com {
struct ifnet *ic_ifp; /* associated device */
@ -197,6 +204,10 @@ struct ieee80211com {
int ic_lastnonerp; /* last time non-ERP sta noted*/
int ic_lastnonht; /* last time non-HT sta noted */
/* fast-frames staging q */
struct ieee80211_stageq ic_ff_stageq[WME_NUM_AC];
int ic_stageqdepth; /* cumulative depth */
/* virtual ap create/delete */
struct ieee80211vap* (*ic_vap_create)(struct ieee80211com *,
const char name[IFNAMSIZ], int unit,