Begin merging in some of my 802.11n TX aggregation driver changes.

* Add a PCU lock, which isn't currently used but will eventually be
  used to serialise some of the driver access.

* Add in all the software TX aggregation state, that's kept per-node
  and per-TID.

* Add in the software and aggregation state to ath_buf.

* Add in hooks to ath_softc for aggregation state and the (upcoming)
  aggregation TX state calls.

* Add / fix the HAL access macros.

Obtained from:	Linux, ath9k
Sponsored by:	Hobnob, Inc.
This commit is contained in:
Adrian Chadd 2011-11-08 02:12:11 +00:00
parent 712a80b873
commit 3dd85b265f
5 changed files with 327 additions and 7 deletions

View File

@ -3162,6 +3162,11 @@ ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
}
ath_rate_node_init(sc, an);
/* Setup the mutex - there's no associd yet so set the name to NULL */
snprintf(an->an_name, sizeof(an->an_name), "%s: node %p",
device_get_nameunit(sc->sc_dev), an);
mtx_init(&an->an_mtx, an->an_name, NULL, MTX_DEF);
DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an);
return &an->an_node;
}
@ -3173,7 +3178,7 @@ ath_node_free(struct ieee80211_node *ni)
struct ath_softc *sc = ic->ic_ifp->if_softc;
DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni);
mtx_destroy(&ATH_NODE(ni)->an_mtx);
ath_rate_node_cleanup(sc, ATH_NODE(ni));
sc->sc_node_free(ni);
}

View File

@ -190,11 +190,13 @@ ath_ahb_attach(device_t dev)
}
ATH_LOCK_INIT(sc);
ATH_PCU_LOCK_INIT(sc);
error = ath_attach(AR9130_DEVID, sc);
if (error == 0) /* success */
return 0;
ATH_PCU_LOCK_DESTROY(sc);
ATH_LOCK_DESTROY(sc);
bus_dma_tag_destroy(sc->sc_dmat);
bad3:
@ -234,6 +236,7 @@ ath_ahb_detach(device_t dev)
if (sc->sc_eepromdata)
free(sc->sc_eepromdata, M_TEMP);
ATH_PCU_LOCK_DESTROY(sc);
ATH_LOCK_DESTROY(sc);
return (0);

View File

@ -190,11 +190,13 @@ ath_pci_attach(device_t dev)
}
ATH_LOCK_INIT(sc);
ATH_PCU_LOCK_INIT(sc);
error = ath_attach(pci_get_device(dev), sc);
if (error == 0) /* success */
return 0;
ATH_PCU_LOCK_DESTROY(sc);
ATH_LOCK_DESTROY(sc);
bus_dma_tag_destroy(sc->sc_dmat);
bad3:
@ -230,6 +232,7 @@ ath_pci_detach(device_t dev)
bus_dma_tag_destroy(sc->sc_dmat);
bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, psc->sc_sr);
ATH_PCU_LOCK_DESTROY(sc);
ATH_LOCK_DESTROY(sc);
return (0);

View File

@ -298,6 +298,68 @@ ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
}
static int
ath_sysctl_txagg(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int i, t, param = 0;
int error;
struct ath_buf *bf;
error = sysctl_handle_int(oidp, &param, 0, req);
if (error || !req->newptr)
return error;
if (param != 1)
return 0;
printf("no tx bufs (empty list): %d\n", sc->sc_stats.ast_tx_getnobuf);
printf("no tx bufs (was busy): %d\n", sc->sc_stats.ast_tx_getbusybuf);
printf("aggr single packet: %d\n",
sc->sc_aggr_stats.aggr_single_pkt);
printf("aggr single packet w/ BAW closed: %d\n",
sc->sc_aggr_stats.aggr_baw_closed_single_pkt);
printf("aggr non-baw packet: %d\n",
sc->sc_aggr_stats.aggr_nonbaw_pkt);
printf("aggr aggregate packet: %d\n",
sc->sc_aggr_stats.aggr_aggr_pkt);
printf("aggr single packet low hwq: %d\n",
sc->sc_aggr_stats.aggr_low_hwq_single_pkt);
printf("aggr sched, no work: %d\n",
sc->sc_aggr_stats.aggr_sched_nopkt);
for (i = 0; i < 64; i++) {
printf("%2d: %10d ", i, sc->sc_aggr_stats.aggr_pkts[i]);
if (i % 4 == 3)
printf("\n");
}
printf("\n");
for (i = 0; i < HAL_NUM_TX_QUEUES; i++) {
if (ATH_TXQ_SETUP(sc, i)) {
printf("HW TXQ %d: axq_depth=%d, axq_aggr_depth=%d\n",
i,
sc->sc_txq[i].axq_depth,
sc->sc_txq[i].axq_aggr_depth);
}
}
i = t = 0;
ATH_TXBUF_LOCK(sc);
STAILQ_FOREACH(bf, &sc->sc_txbuf, bf_list) {
if (bf->bf_flags & ATH_BUF_BUSY) {
printf("Busy: %d\n", t);
i++;
}
t++;
}
ATH_TXBUF_UNLOCK(sc);
printf("Total TX buffers: %d; Total TX buffers busy: %d\n",
t, i);
return 0;
}
static int
ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS)
{
@ -387,6 +449,24 @@ ath_sysctl_setcca(SYSCTL_HANDLER_ARGS)
}
#endif /* IEEE80211_SUPPORT_TDMA */
static int
ath_sysctl_forcebstuck(SYSCTL_HANDLER_ARGS)
{
struct ath_softc *sc = arg1;
int val = 0;
int error;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return error;
if (val == 0)
return 0;
taskqueue_enqueue_fast(sc->sc_tq, &sc->sc_bstucktask);
val = 0;
return 0;
}
void
ath_sysctlattach(struct ath_softc *sc)
{
@ -465,6 +545,15 @@ ath_sysctlattach(struct ath_softc *sc)
"rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_rfkill, "I", "enable/disable RF kill switch");
}
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"txagg", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_txagg, "I", "");
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"forcebstuck", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ath_sysctl_forcebstuck, "I", "");
if (ath_hal_hasintmit(ah)) {
SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"intmit", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
@ -474,6 +563,17 @@ ath_sysctlattach(struct ath_softc *sc)
SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"monpass", CTLFLAG_RW, &sc->sc_monpass, 0,
"mask of error frames to pass when monitoring");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"hwq_limit", CTLFLAG_RW, &sc->sc_hwq_limit, 0,
"");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"tid_hwq_lo", CTLFLAG_RW, &sc->sc_tid_hwq_lo, 0,
"");
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"tid_hwq_hi", CTLFLAG_RW, &sc->sc_tid_hwq_hi, 0,
"");
#ifdef IEEE80211_SUPPORT_TDMA
if (ath_hal_macversion(ah) > 0x78) {
sc->sc_tdmadbaprep = 2;
@ -510,6 +610,8 @@ ath_sysctl_clearstats(SYSCTL_HANDLER_ARGS)
if (val == 0)
return 0; /* Not clearing the stats is still valid */
memset(&sc->sc_stats, 0, sizeof(sc->sc_stats));
memset(&sc->sc_aggr_stats, 0, sizeof(sc->sc_aggr_stats));
val = 0;
return 0;
}

View File

@ -83,12 +83,73 @@ struct taskqueue;
struct kthread;
struct ath_buf;
#define ATH_TID_MAX_BUFS (2 * IEEE80211_AGGR_BAWMAX)
/*
* Per-TID state
*
* Note that TID 16 (WME_NUM_TID+1) is for handling non-QoS frames.
*/
struct ath_tid {
TAILQ_HEAD(,ath_buf) axq_q; /* pending buffers */
u_int axq_depth; /* SW queue depth */
char axq_name[48]; /* lock name */
struct ath_node *an; /* pointer to parent */
int tid; /* tid */
int ac; /* which AC gets this trafic */
int hwq_depth; /* how many buffers are on HW */
/*
* Entry on the ath_txq; when there's traffic
* to send
*/
TAILQ_ENTRY(ath_tid) axq_qelem;
int sched;
int paused; /* >0 if the TID has been paused */
/*
* Is the TID being cleaned up after a transition
* from aggregation to non-aggregation?
* When this is set to 1, this TID will be paused
* and no further traffic will be queued until all
* the hardware packets pending for this TID have been
* TXed/completed; at which point (non-aggregation)
* traffic will resume being TXed.
*/
int cleanup_inprogress;
/*
* How many hardware-queued packets are
* waiting to be cleaned up.
* This is only valid if cleanup_inprogress is 1.
*/
int incomp;
/*
* The following implements a ring representing
* the frames in the current BAW.
* To avoid copying the array content each time
* the BAW is moved, the baw_head/baw_tail point
* to the current BAW begin/end; when the BAW is
* shifted the head/tail of the array are also
* appropriately shifted.
*/
/* active tx buffers, beginning at current BAW */
struct ath_buf *tx_buf[ATH_TID_MAX_BUFS];
/* where the baw head is in the array */
int baw_head;
/* where the BAW tail is in the array */
int baw_tail;
};
/* driver-specific node state */
struct ath_node {
struct ieee80211_node an_node; /* base class */
u_int8_t an_mgmtrix; /* min h/w rate index */
u_int8_t an_mcastrix; /* mcast h/w rate index */
struct ath_buf *an_ff_buf[WME_NUM_AC]; /* ff staging area */
struct ath_tid an_tid[IEEE80211_TID_SIZE]; /* per-TID state */
char an_name[32]; /* eg "wlan0_a1" */
struct mtx an_mtx; /* protecting the ath_node state */
/* variable-length rate control state follows */
};
#define ATH_NODE(ni) ((struct ath_node *)(ni))
@ -110,6 +171,7 @@ struct ath_node {
struct ath_buf {
STAILQ_ENTRY(ath_buf) bf_list;
struct ath_buf * bf_next; /* next buffer in the aggregate */
int bf_nseg;
uint16_t bf_txflags; /* tx descriptor flags */
uint16_t bf_flags; /* status flags (below) */
@ -119,9 +181,63 @@ struct ath_buf {
bus_dmamap_t bf_dmamap; /* DMA map for mbuf chain */
struct mbuf *bf_m; /* mbuf for buf */
struct ieee80211_node *bf_node; /* pointer to the node */
struct ath_desc *bf_lastds; /* last descriptor for comp status */
struct ath_buf *bf_last; /* last buffer in aggregate, or self for non-aggregate */
bus_size_t bf_mapsize;
#define ATH_MAX_SCATTER ATH_TXDESC /* max(tx,rx,beacon) desc's */
bus_dma_segment_t bf_segs[ATH_MAX_SCATTER];
/* Completion function to call on TX complete (fail or not) */
/*
* "fail" here is set to 1 if the queue entries were removed
* through a call to ath_tx_draintxq().
*/
void(* bf_comp) (struct ath_softc *sc, struct ath_buf *bf, int fail);
/* This state is kept to support software retries and aggregation */
struct {
int bfs_seqno; /* sequence number of this packet */
int bfs_retries; /* retry count */
uint16_t bfs_tid; /* packet TID (or TID_MAX for no QoS) */
uint16_t bfs_pri; /* packet AC priority */
struct ath_txq *bfs_txq; /* eventual dest hardware TXQ */
uint16_t bfs_pktdur; /* packet duration (at current rate?) */
uint16_t bfs_nframes; /* number of frames in aggregate */
uint16_t bfs_ndelim; /* number of delims for padding */
int bfs_aggr:1; /* part of aggregate? */
int bfs_aggrburst:1; /* part of aggregate burst? */
int bfs_isretried:1; /* retried frame? */
int bfs_dobaw:1; /* actually check against BAW? */
int bfs_addedbaw:1; /* has been added to the BAW */
int bfs_shpream:1; /* use short preamble */
int bfs_istxfrag:1; /* is fragmented */
int bfs_ismrr:1; /* do multi-rate TX retry */
int bfs_doprot:1; /* do RTS/CTS based protection */
int bfs_doratelookup:1; /* do rate lookup before each TX */
int bfs_nfl; /* next fragment length */
/*
* These fields are passed into the
* descriptor setup functions.
*/
HAL_PKT_TYPE bfs_atype; /* packet type */
int bfs_pktlen; /* length of this packet */
int bfs_hdrlen; /* length of this packet header */
uint16_t bfs_al; /* length of aggregate */
int bfs_flags; /* HAL descriptor flags */
int bfs_txrate0; /* first TX rate */
int bfs_try0; /* first try count */
uint8_t bfs_ctsrate0; /* Non-zero - use this as ctsrate */
int bfs_keyix; /* crypto key index */
int bfs_txpower; /* tx power */
int bfs_txantenna; /* TX antenna config */
enum ieee80211_protmode bfs_protmode;
HAL_11N_RATE_SERIES bfs_rc11n[ATH_RC_NUM]; /* 11n TX series */
int bfs_ctsrate; /* CTS rate */
int bfs_ctsduration; /* CTS duration (pre-11n NICs) */
struct ath_rc_series bfs_rc[ATH_RC_NUM]; /* non-11n TX series */
} bf_state;
};
typedef STAILQ_HEAD(, ath_buf) ath_bufhead;
@ -151,19 +267,27 @@ struct ath_descdma {
* hardware queue).
*/
struct ath_txq {
struct ath_softc *axq_softc; /* Needed for scheduling */
u_int axq_qnum; /* hardware q number */
#define ATH_TXQ_SWQ (HAL_NUM_TX_QUEUES+1) /* qnum for s/w only queue */
u_int axq_ac; /* WME AC */
u_int axq_flags;
#define ATH_TXQ_PUTPENDING 0x0001 /* ath_hal_puttxbuf pending */
u_int axq_depth; /* queue depth (stat only) */
u_int axq_aggr_depth; /* how many aggregates are queued */
u_int axq_intrcnt; /* interrupt count */
u_int32_t *axq_link; /* link ptr in last TX desc */
STAILQ_HEAD(, ath_buf) axq_q; /* transmit queue */
struct mtx axq_lock; /* lock on q and link */
char axq_name[12]; /* e.g. "ath0_txq4" */
/* Per-TID traffic queue for software -> hardware TX */
TAILQ_HEAD(axq_t_s,ath_tid) axq_tidq;
};
#define ATH_NODE_LOCK(_an) mtx_lock(&(_an)->an_mtx)
#define ATH_NODE_UNLOCK(_an) mtx_unlock(&(_an)->an_mtx)
#define ATH_NODE_LOCK_ASSERT(_an) mtx_assert(&(_an)->an_mtx, MA_OWNED)
#define ATH_TXQ_LOCK_INIT(_sc, _tq) do { \
snprintf((_tq)->axq_name, sizeof((_tq)->axq_name), "%s_txq%u", \
device_get_nameunit((_sc)->sc_dev), (_tq)->axq_qnum); \
@ -173,6 +297,7 @@ struct ath_txq {
#define ATH_TXQ_LOCK(_tq) mtx_lock(&(_tq)->axq_lock)
#define ATH_TXQ_UNLOCK(_tq) mtx_unlock(&(_tq)->axq_lock)
#define ATH_TXQ_LOCK_ASSERT(_tq) mtx_assert(&(_tq)->axq_lock, MA_OWNED)
#define ATH_TXQ_IS_LOCKED(_tq) mtx_owned(&(_tq)->axq_lock)
#define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \
STAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \
@ -205,9 +330,20 @@ struct ath_vap {
struct taskqueue;
struct ath_tx99;
/*
* Whether to reset the TX/RX queue with or without
* a queue flush.
*/
typedef enum {
ATH_RESET_DEFAULT = 0,
ATH_RESET_NOLOSS = 1,
ATH_RESET_FULL = 2,
} ATH_RESET_TYPE;
struct ath_softc {
struct ifnet *sc_ifp; /* interface common */
struct ath_stats sc_stats; /* interface statistics */
struct ath_tx_aggr_stats sc_aggr_stats;
int sc_debug;
int sc_nvaps; /* # vaps */
int sc_nstavaps; /* # station vaps */
@ -216,12 +352,15 @@ struct ath_softc {
u_int8_t sc_nbssid0; /* # vap's using base mac */
uint32_t sc_bssidmask; /* bssid mask */
void (*sc_node_cleanup)(struct ieee80211_node *);
void (*sc_node_free)(struct ieee80211_node *);
device_t sc_dev;
HAL_BUS_TAG sc_st; /* bus space tag */
HAL_BUS_HANDLE sc_sh; /* bus space handle */
bus_dma_tag_t sc_dmat; /* bus DMA tag */
struct mtx sc_mtx; /* master lock (recursive) */
struct mtx sc_pcu_mtx; /* PCU access mutex */
char sc_pcu_mtx_name[32];
struct taskqueue *sc_tq; /* private task queue */
struct ath_hal *sc_ah; /* Atheros HAL */
struct ath_ratectrl *sc_rc; /* tx rate control support */
@ -360,10 +499,38 @@ struct ath_softc {
int sc_txchainmask; /* currently configured TX chainmask */
int sc_rxchainmask; /* currently configured RX chainmask */
/*
* Aggregation twiddles
*
* hwq_limit: how busy to keep the hardware queue - don't schedule
* further packets to the hardware, regardless of the TID
* tid_hwq_lo: how low the per-TID hwq count has to be before the
* TID will be scheduled again
* tid_hwq_hi: how many frames to queue to the HWQ before the TID
* stops being scheduled.
*/
int sc_hwq_limit;
int sc_tid_hwq_lo;
int sc_tid_hwq_hi;
/* DFS related state */
void *sc_dfs; /* Used by an optional DFS module */
int sc_dodfs; /* Whether to enable DFS rx filter bits */
struct task sc_dfstask; /* DFS processing task */
/* TX AMPDU handling */
int (*sc_addba_request)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int, int, int);
int (*sc_addba_response)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *, int, int, int);
void (*sc_addba_stop)(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
void (*sc_addba_response_timeout)
(struct ieee80211_node *,
struct ieee80211_tx_ampdu *);
void (*sc_bar_response)(struct ieee80211_node *ni,
struct ieee80211_tx_ampdu *tap,
int status);
};
#define ATH_LOCK_INIT(_sc) \
@ -374,6 +541,37 @@ struct ath_softc {
#define ATH_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define ATH_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
/*
* The PCU lock is non-recursive and should be treated as a spinlock.
* Although currently the interrupt code is run in netisr context and
* doesn't require this, this may change in the future.
* Please keep this in mind when protecting certain code paths
* with the PCU lock.
*
* The PCU lock is used to serialise access to the PCU so things such
* as TX, RX, state change (eg channel change), channel reset and updates
* from interrupt context (eg kickpcu, txqactive bits) do not clash.
*
* Although the current single-thread taskqueue mechanism protects the
* majority of these situations by simply serialising them, there are
* a few others which occur at the same time. These include the TX path
* (which only acquires ATH_LOCK when recycling buffers to the free list),
* ath_set_channel, the channel scanning API and perhaps quite a bit more.
*/
#define ATH_PCU_LOCK_INIT(_sc) do {\
snprintf((_sc)->sc_pcu_mtx_name, \
sizeof((_sc)->sc_pcu_mtx_name), \
"%s PCU lock", \
device_get_nameunit((_sc)->sc_dev)); \
mtx_init(&(_sc)->sc_pcu_mtx, (_sc)->sc_pcu_mtx_name, \
NULL, MTX_DEF); \
} while (0)
#define ATH_PCU_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_pcu_mtx)
#define ATH_PCU_LOCK(_sc) mtx_lock(&(_sc)->sc_pcu_mtx)
#define ATH_PCU_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_pcu_mtx)
#define ATH_PCU_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_pcu_mtx, \
MA_OWNED)
#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
#define ATH_TXBUF_LOCK_INIT(_sc) do { \
@ -686,24 +884,33 @@ void ath_intr(void *);
#define ath_hal_gettxcompletionrates(_ah, _ds, _rates, _tries) \
((*(_ah)->ah_getTxCompletionRates)((_ah), (_ds), (_rates), (_tries)))
#define ath_hal_chaintxdesc(_ah, _ds, _pktlen, _hdrlen, _type, _keyix, \
_cipher, _delims, _seglen, _first, _last) \
((*(_ah)->ah_chainTxDesc((_ah), (_ds), (_pktlen), (_hdrlen), \
(_type), (_keyix), (_cipher), (_delims), (_seglen), \
(_first), (_last))))
#define ath_hal_setupfirsttxdesc(_ah, _ds, _aggrlen, _flags, _txpower, \
_txr0, _txtr0, _antm, _rcr, _rcd) \
((*(_ah)->ah_setupFirstTxDesc)((_ah), (_ds), (_aggrlen), (_flags), \
(_txpower), (_txr0), (_txtr0), (_antm), (_rcr), (_rcd)))
#define ath_hal_chaintxdesc(_ah, _ds, _pktlen, _hdrlen, _type, _keyix, \
_cipher, _delims, _seglen, _first, _last) \
((*(_ah)->ah_chainTxDesc)((_ah), (_ds), (_pktlen), (_hdrlen), \
(_type), (_keyix), (_cipher), (_delims), (_seglen), \
(_first), (_last)))
#define ath_hal_setuplasttxdesc(_ah, _ds, _ds0) \
((*(_ah)->ah_setupLastTxDesc)((_ah), (_ds), (_ds0)))
#define ath_hal_set11nratescenario(_ah, _ds, _dur, _rt, _series, _ns, _flags) \
((*(_ah)->ah_set11nRateScenario)((_ah), (_ds), (_dur), (_rt), \
(_series), (_ns), (_flags)))
#define ath_hal_set11n_aggr_first(_ah, _ds, _len, _num) \
((*(_ah)->ah_set11nAggrFirst)((_ah), (_ds), (_len), (_num)))
#define ath_hal_set11naggrmiddle(_ah, _ds, _num) \
((*(_ah)->ah_set11nAggrMiddle((_ah), (_ds), (_num))))
((*(_ah)->ah_set11nAggrMiddle)((_ah), (_ds), (_num)))
#define ath_hal_set11n_aggr_last(_ah, _ds) \
((*(_ah)->ah_set11nAggrLast)((_ah), (_ds)))
#define ath_hal_set11nburstduration(_ah, _ds, _dur) \
((*(_ah)->ah_set11nBurstDuration)((_ah), (_ds), (_dur)))
#define ath_hal_clr11n_aggr(_ah, _ds) \
((*(_ah)->ah_clr11nAggr)((_ah), (_ds)))
/*
* This is badly-named; you need to set the correct parameters