Further preparations for the RX EDMA support.
Break out the DMA descriptor setup/teardown code into a method. The EDMA RX code doesn't allocate descriptors, just ath_buf entries.
This commit is contained in:
parent
ef23194991
commit
3d184db2f8
@ -157,8 +157,6 @@ static void ath_update_promisc(struct ifnet *);
|
||||
static void ath_updateslot(struct ifnet *);
|
||||
static void ath_bstuck_proc(void *, int);
|
||||
static void ath_reset_proc(void *, int);
|
||||
static void ath_descdma_cleanup(struct ath_softc *sc,
|
||||
struct ath_descdma *, ath_bufhead *);
|
||||
static int ath_desc_alloc(struct ath_softc *);
|
||||
static void ath_desc_free(struct ath_softc *);
|
||||
static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *,
|
||||
@ -239,15 +237,15 @@ static int ath_anicalinterval = 100; /* ANI calibration - 100 msec */
|
||||
SYSCTL_INT(_hw_ath, OID_AUTO, anical, CTLFLAG_RW, &ath_anicalinterval,
|
||||
0, "ANI calibration (msecs)");
|
||||
|
||||
static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
|
||||
int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */
|
||||
SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf,
|
||||
0, "rx buffers allocated");
|
||||
TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf);
|
||||
static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
|
||||
int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */
|
||||
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf,
|
||||
0, "tx buffers allocated");
|
||||
TUNABLE_INT("hw.ath.txbuf", &ath_txbuf);
|
||||
static int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
|
||||
int ath_txbuf_mgmt = ATH_MGMT_TXBUF; /* # mgmt tx buffers to allocate */
|
||||
SYSCTL_INT(_hw_ath, OID_AUTO, txbuf_mgmt, CTLFLAG_RW, &ath_txbuf_mgmt,
|
||||
0, "tx (mgmt) buffers allocated");
|
||||
TUNABLE_INT("hw.ath.txbuf_mgmt", &ath_txbuf_mgmt);
|
||||
@ -308,9 +306,10 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
|
||||
*
|
||||
* This is required before the descriptors are allocated.
|
||||
*/
|
||||
if (ath_hal_hasedma(sc->sc_ah))
|
||||
if (ath_hal_hasedma(sc->sc_ah)) {
|
||||
sc->sc_isedma = 1;
|
||||
ath_recv_setup_edma(sc);
|
||||
else
|
||||
} else
|
||||
ath_recv_setup_legacy(sc);
|
||||
|
||||
/*
|
||||
@ -378,6 +377,14 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
|
||||
if_printf(ifp, "failed to allocate descriptors: %d\n", error);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
error = ath_rxdma_setup(sc);
|
||||
if (error != 0) {
|
||||
if_printf(ifp, "failed to allocate RX descriptors: %d\n",
|
||||
error);
|
||||
goto bad;
|
||||
}
|
||||
|
||||
callout_init_mtx(&sc->sc_cal_ch, &sc->sc_mtx, 0);
|
||||
callout_init_mtx(&sc->sc_wd_ch, &sc->sc_mtx, 0);
|
||||
|
||||
@ -854,6 +861,7 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
|
||||
bad2:
|
||||
ath_tx_cleanup(sc);
|
||||
ath_desc_free(sc);
|
||||
ath_rxdma_teardown(sc);
|
||||
bad:
|
||||
if (ah)
|
||||
ath_hal_detach(ah);
|
||||
@ -896,6 +904,7 @@ ath_detach(struct ath_softc *sc)
|
||||
|
||||
ath_dfs_detach(sc);
|
||||
ath_desc_free(sc);
|
||||
ath_rxdma_teardown(sc);
|
||||
ath_tx_cleanup(sc);
|
||||
ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */
|
||||
if_free(ifp);
|
||||
@ -2594,6 +2603,13 @@ ath_mode_init(struct ath_softc *sc)
|
||||
/* configure operational mode */
|
||||
ath_hal_setopmode(ah);
|
||||
|
||||
DPRINTF(sc, ATH_DEBUG_STATE | ATH_DEBUG_MODE,
|
||||
"%s: ah=%p, ifp=%p, if_addr=%p\n",
|
||||
__func__,
|
||||
ah,
|
||||
ifp,
|
||||
(ifp == NULL) ? NULL : ifp->if_addr);
|
||||
|
||||
/* handle any link-level address change */
|
||||
ath_hal_setmac(ah, IF_LLADDR(ifp));
|
||||
|
||||
@ -2724,7 +2740,7 @@ ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
|
||||
*paddr = segs->ds_addr;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
ath_descdma_setup(struct ath_softc *sc,
|
||||
struct ath_descdma *dd, ath_bufhead *head,
|
||||
const char *name, int nbuf, int ndesc)
|
||||
@ -2863,7 +2879,7 @@ fail0:
|
||||
#undef ATH_DESC_4KB_BOUND_CHECK
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
ath_descdma_cleanup(struct ath_softc *sc,
|
||||
struct ath_descdma *dd, ath_bufhead *head)
|
||||
{
|
||||
@ -2904,15 +2920,9 @@ ath_desc_alloc(struct ath_softc *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
|
||||
"rx", ath_rxbuf, 1);
|
||||
if (error != 0)
|
||||
return error;
|
||||
|
||||
error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
|
||||
"tx", ath_txbuf, ATH_TXDESC);
|
||||
if (error != 0) {
|
||||
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
|
||||
return error;
|
||||
}
|
||||
sc->sc_txbuf_cnt = ath_txbuf;
|
||||
@ -2920,7 +2930,6 @@ ath_desc_alloc(struct ath_softc *sc)
|
||||
error = ath_descdma_setup(sc, &sc->sc_txdma_mgmt, &sc->sc_txbuf_mgmt,
|
||||
"tx_mgmt", ath_txbuf_mgmt, ATH_TXDESC);
|
||||
if (error != 0) {
|
||||
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
|
||||
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
|
||||
return error;
|
||||
}
|
||||
@ -2933,7 +2942,6 @@ ath_desc_alloc(struct ath_softc *sc)
|
||||
error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
|
||||
"beacon", ATH_BCBUF, 1);
|
||||
if (error != 0) {
|
||||
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
|
||||
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
|
||||
ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
|
||||
&sc->sc_txbuf_mgmt);
|
||||
@ -2950,8 +2958,6 @@ ath_desc_free(struct ath_softc *sc)
|
||||
ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
|
||||
if (sc->sc_txdma.dd_desc_len != 0)
|
||||
ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
|
||||
if (sc->sc_rxdma.dd_desc_len != 0)
|
||||
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
|
||||
if (sc->sc_txdma_mgmt.dd_desc_len != 0)
|
||||
ath_descdma_cleanup(sc, &sc->sc_txdma_mgmt,
|
||||
&sc->sc_txbuf_mgmt);
|
||||
|
@ -48,6 +48,10 @@
|
||||
((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \
|
||||
(((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)))
|
||||
|
||||
extern int ath_rxbuf;
|
||||
extern int ath_txbuf;
|
||||
extern int ath_txbuf_mgmt;
|
||||
|
||||
extern int ath_tx_findrix(const struct ath_softc *sc, uint8_t rate);
|
||||
|
||||
extern struct ath_buf * ath_getbuf(struct ath_softc *sc,
|
||||
@ -80,6 +84,11 @@ extern void ath_setdefantenna(struct ath_softc *sc, u_int antenna);
|
||||
|
||||
extern void ath_setslottime(struct ath_softc *sc);
|
||||
|
||||
extern int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
|
||||
ath_bufhead *head, const char *name, int nbuf, int ndesc);
|
||||
extern void ath_descdma_cleanup(struct ath_softc *sc,
|
||||
struct ath_descdma *dd, ath_bufhead *head);
|
||||
|
||||
/*
|
||||
* This is only here so that the RX proc function can call it.
|
||||
* It's very likely that the "start TX after RX" call should be
|
||||
|
@ -1053,6 +1053,31 @@ ath_legacy_startrecv(struct ath_softc *sc)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
ath_legacy_dma_rxsetup(struct ath_softc *sc)
|
||||
{
|
||||
int error;
|
||||
|
||||
device_printf(sc->sc_dev, "%s: called\n", __func__);
|
||||
|
||||
error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf,
|
||||
"rx", ath_rxbuf, 1);
|
||||
if (error != 0)
|
||||
return (error);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
ath_legacy_dma_rxteardown(struct ath_softc *sc)
|
||||
{
|
||||
|
||||
device_printf(sc->sc_dev, "%s: called\n", __func__);
|
||||
|
||||
if (sc->sc_rxdma.dd_desc_len != 0)
|
||||
ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
ath_recv_setup_legacy(struct ath_softc *sc)
|
||||
@ -1065,4 +1090,7 @@ ath_recv_setup_legacy(struct ath_softc *sc)
|
||||
sc->sc_rx.recv_flush = ath_legacy_flushrecv;
|
||||
sc->sc_rx.recv_tasklet = ath_legacy_rx_tasklet;
|
||||
sc->sc_rx.recv_rxbuf_init = ath_legacy_rxbuf_init;
|
||||
|
||||
sc->sc_rx.recv_setup = ath_legacy_dma_rxsetup;
|
||||
sc->sc_rx.recv_teardown = ath_legacy_dma_rxteardown;
|
||||
}
|
||||
|
@ -43,6 +43,10 @@ extern void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m,
|
||||
(_sc)->sc_rx.recv_flush((_sc))
|
||||
#define ath_rxbuf_init(_sc, _bf) \
|
||||
(_sc)->sc_rx.recv_rxbuf_init((_sc), (_bf))
|
||||
#define ath_rxdma_setup(_sc) \
|
||||
(_sc)->sc_rx.recv_setup(_sc)
|
||||
#define ath_rxdma_teardown(_sc) \
|
||||
(_sc)->sc_rx.recv_teardown(_sc)
|
||||
|
||||
#if 0
|
||||
extern int ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf);
|
||||
|
@ -379,6 +379,20 @@ struct ath_rx_methods {
|
||||
void (*recv_tasklet)(void *arg, int npending);
|
||||
int (*recv_rxbuf_init)(struct ath_softc *sc,
|
||||
struct ath_buf *bf);
|
||||
int (*recv_setup)(struct ath_softc *sc);
|
||||
int (*recv_teardown)(struct ath_softc *sc);
|
||||
};
|
||||
|
||||
/*
|
||||
* Represent the current state of the RX FIFO.
|
||||
*/
|
||||
struct ath_rx_edma {
|
||||
struct ath_buf **m_fifo;
|
||||
int m_fifolen;
|
||||
int m_fifo_head;
|
||||
int m_fifo_tail;
|
||||
int m_fifo_depth;
|
||||
struct mbuf *m_rxpending;
|
||||
};
|
||||
|
||||
struct ath_softc {
|
||||
@ -395,6 +409,12 @@ struct ath_softc {
|
||||
uint32_t sc_bssidmask; /* bssid mask */
|
||||
|
||||
struct ath_rx_methods sc_rx;
|
||||
struct ath_rx_edma sc_rxedma[2]; /* HP/LP queues */
|
||||
int sc_rx_statuslen;
|
||||
int sc_tx_desclen;
|
||||
int sc_tx_statuslen;
|
||||
int sc_tx_nmaps; /* Number of TX maps */
|
||||
int sc_edma_bufsize;
|
||||
|
||||
void (*sc_node_cleanup)(struct ieee80211_node *);
|
||||
void (*sc_node_free)(struct ieee80211_node *);
|
||||
@ -439,7 +459,8 @@ struct ath_softc {
|
||||
sc_setcca : 1,/* set/clr CCA with TDMA */
|
||||
sc_resetcal : 1,/* reset cal state next trip */
|
||||
sc_rxslink : 1,/* do self-linked final descriptor */
|
||||
sc_rxtsf32 : 1;/* RX dec TSF is 32 bits */
|
||||
sc_rxtsf32 : 1,/* RX dec TSF is 32 bits */
|
||||
sc_isedma : 1;/* supports EDMA */
|
||||
uint32_t sc_eerd; /* regdomain from EEPROM */
|
||||
uint32_t sc_eecc; /* country code from EEPROM */
|
||||
/* rate tables */
|
||||
|
Loading…
x
Reference in New Issue
Block a user