Create a new TX lock specifically for queuing frames.
This now separates out the act of queuing frames from the act of running TX and TX completion.
This commit is contained in:
parent
dae3dc73f6
commit
1b3502e5a1
@ -2687,7 +2687,7 @@ ath_txq_qadd(struct ifnet *ifp, struct mbuf *m0)
|
||||
struct mbuf *m;
|
||||
|
||||
/* XXX recursive TX completion -> TX? */
|
||||
ATH_TX_UNLOCK_ASSERT(sc);
|
||||
ATH_TX_IC_UNLOCK_ASSERT(sc);
|
||||
|
||||
/*
|
||||
* We grab the node pointer, but we don't deref
|
||||
@ -2749,7 +2749,7 @@ ath_txq_qadd(struct ifnet *ifp, struct mbuf *m0)
|
||||
* into the driver.
|
||||
*/
|
||||
|
||||
ATH_TX_LOCK(sc);
|
||||
ATH_TX_IC_LOCK(sc);
|
||||
|
||||
/*
|
||||
* Throw the single frame onto the queue.
|
||||
@ -2797,7 +2797,7 @@ ath_txq_qadd(struct ifnet *ifp, struct mbuf *m0)
|
||||
|
||||
m = m->m_nextpkt;
|
||||
}
|
||||
ATH_TX_UNLOCK(sc);
|
||||
ATH_TX_IC_UNLOCK(sc);
|
||||
|
||||
return (0);
|
||||
bad:
|
||||
@ -2825,13 +2825,13 @@ ath_txq_qflush(struct ifnet *ifp)
|
||||
TAILQ_INIT(&txlist);
|
||||
|
||||
/* Grab lock */
|
||||
ATH_TX_LOCK(sc);
|
||||
ATH_TX_IC_LOCK(sc);
|
||||
|
||||
/* Copy everything out of sc_txbuf_list into txlist */
|
||||
TAILQ_CONCAT(&txlist, &sc->sc_txbuf_list, bf_list);
|
||||
|
||||
/* Unlock */
|
||||
ATH_TX_UNLOCK(sc);
|
||||
ATH_TX_IC_UNLOCK(sc);
|
||||
|
||||
/* Now, walk the list, freeing things */
|
||||
while ((bf = TAILQ_FIRST(&txlist)) != NULL) {
|
||||
@ -2879,16 +2879,9 @@ ath_txq_qrun(struct ifnet *ifp)
|
||||
*/
|
||||
|
||||
/* Copy everything out of sc_txbuf_list into txlist */
|
||||
ATH_TX_LOCK(sc);
|
||||
ATH_TX_IC_LOCK(sc);
|
||||
TAILQ_CONCAT(&txlist, &sc->sc_txbuf_list, bf_list);
|
||||
ATH_TX_UNLOCK(sc);
|
||||
|
||||
/*
|
||||
* For now, the ath_tx_start() code sits behind the same lock;
|
||||
* worry about serialising this in a taskqueue later.
|
||||
*/
|
||||
|
||||
ATH_TX_LOCK(sc);
|
||||
ATH_TX_IC_UNLOCK(sc);
|
||||
|
||||
/*
|
||||
* Attempt to transmit each frame.
|
||||
@ -2899,6 +2892,7 @@ ath_txq_qrun(struct ifnet *ifp)
|
||||
* It would be nice to chain together TX fragments in this
|
||||
* way so they can be aborted together.
|
||||
*/
|
||||
ATH_TX_LOCK(sc);
|
||||
TAILQ_FOREACH_SAFE(bf, &txlist, bf_list, bf_next) {
|
||||
/*
|
||||
* Clear, because we're going to reuse this
|
||||
|
@ -195,6 +195,7 @@ ath_ahb_attach(device_t dev)
|
||||
ATH_PCU_LOCK_INIT(sc);
|
||||
ATH_RX_LOCK_INIT(sc);
|
||||
ATH_TX_LOCK_INIT(sc);
|
||||
ATH_TX_IC_LOCK_INIT(sc);
|
||||
ATH_TXSTATUS_LOCK_INIT(sc);
|
||||
|
||||
error = ath_attach(AR9130_DEVID, sc);
|
||||
@ -204,6 +205,7 @@ ath_ahb_attach(device_t dev)
|
||||
ATH_TXSTATUS_LOCK_DESTROY(sc);
|
||||
ATH_RX_LOCK_DESTROY(sc);
|
||||
ATH_TX_LOCK_DESTROY(sc);
|
||||
ATH_TX_IC_LOCK_DESTROY(sc);
|
||||
ATH_PCU_LOCK_DESTROY(sc);
|
||||
ATH_LOCK_DESTROY(sc);
|
||||
bus_dma_tag_destroy(sc->sc_dmat);
|
||||
@ -247,6 +249,7 @@ ath_ahb_detach(device_t dev)
|
||||
ATH_TXSTATUS_LOCK_DESTROY(sc);
|
||||
ATH_RX_LOCK_DESTROY(sc);
|
||||
ATH_TX_LOCK_DESTROY(sc);
|
||||
ATH_TX_IC_LOCK_DESTROY(sc);
|
||||
ATH_PCU_LOCK_DESTROY(sc);
|
||||
ATH_LOCK_DESTROY(sc);
|
||||
|
||||
|
@ -251,6 +251,7 @@ ath_pci_attach(device_t dev)
|
||||
ATH_PCU_LOCK_INIT(sc);
|
||||
ATH_RX_LOCK_INIT(sc);
|
||||
ATH_TX_LOCK_INIT(sc);
|
||||
ATH_TX_IC_LOCK_INIT(sc);
|
||||
ATH_TXSTATUS_LOCK_INIT(sc);
|
||||
|
||||
error = ath_attach(pci_get_device(dev), sc);
|
||||
@ -260,6 +261,7 @@ ath_pci_attach(device_t dev)
|
||||
ATH_TXSTATUS_LOCK_DESTROY(sc);
|
||||
ATH_PCU_LOCK_DESTROY(sc);
|
||||
ATH_RX_LOCK_DESTROY(sc);
|
||||
ATH_TX_IC_LOCK_DESTROY(sc);
|
||||
ATH_TX_LOCK_DESTROY(sc);
|
||||
ATH_LOCK_DESTROY(sc);
|
||||
bus_dma_tag_destroy(sc->sc_dmat);
|
||||
@ -302,6 +304,7 @@ ath_pci_detach(device_t dev)
|
||||
ATH_TXSTATUS_LOCK_DESTROY(sc);
|
||||
ATH_PCU_LOCK_DESTROY(sc);
|
||||
ATH_RX_LOCK_DESTROY(sc);
|
||||
ATH_TX_IC_LOCK_DESTROY(sc);
|
||||
ATH_TX_LOCK_DESTROY(sc);
|
||||
ATH_LOCK_DESTROY(sc);
|
||||
|
||||
|
@ -520,8 +520,10 @@ struct ath_softc {
|
||||
char sc_pcu_mtx_name[32];
|
||||
struct mtx sc_rx_mtx; /* RX access mutex */
|
||||
char sc_rx_mtx_name[32];
|
||||
struct mtx sc_tx_mtx; /* TX access mutex */
|
||||
struct mtx sc_tx_mtx; /* TX handling/comp mutex */
|
||||
char sc_tx_mtx_name[32];
|
||||
struct mtx sc_tx_ic_mtx; /* TX queue mutex */
|
||||
char sc_tx_ic_mtx_name[32];
|
||||
struct taskqueue *sc_tq; /* private task queue */
|
||||
struct taskqueue *sc_tx_tq; /* private TX task queue */
|
||||
struct ath_hal *sc_ah; /* Atheros HAL */
|
||||
@ -795,10 +797,8 @@ struct ath_softc {
|
||||
#define ATH_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_NOTOWNED)
|
||||
|
||||
/*
|
||||
* The TX lock is non-reentrant and serialises the TX send operations.
|
||||
* (ath_start(), ath_raw_xmit().) It doesn't yet serialise the TX
|
||||
* completion operations; thus it can't be used (yet!) to protect
|
||||
* hardware / software TXQ operations.
|
||||
* The TX lock is non-reentrant and serialises the TX frame send
|
||||
* and completion operations.
|
||||
*/
|
||||
#define ATH_TX_LOCK_INIT(_sc) do {\
|
||||
snprintf((_sc)->sc_tx_mtx_name, \
|
||||
@ -816,6 +816,26 @@ struct ath_softc {
|
||||
#define ATH_TX_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_mtx, \
|
||||
MA_NOTOWNED)
|
||||
|
||||
/*
|
||||
* The IC TX lock is non-reentrant and serialises packet queuing from
|
||||
* the upper layers.
|
||||
*/
|
||||
#define ATH_TX_IC_LOCK_INIT(_sc) do {\
|
||||
snprintf((_sc)->sc_tx_ic_mtx_name, \
|
||||
sizeof((_sc)->sc_tx_ic_mtx_name), \
|
||||
"%s IC TX lock", \
|
||||
device_get_nameunit((_sc)->sc_dev)); \
|
||||
mtx_init(&(_sc)->sc_tx_ic_mtx, (_sc)->sc_tx_ic_mtx_name, \
|
||||
NULL, MTX_DEF); \
|
||||
} while (0)
|
||||
#define ATH_TX_IC_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_tx_ic_mtx)
|
||||
#define ATH_TX_IC_LOCK(_sc) mtx_lock(&(_sc)->sc_tx_ic_mtx)
|
||||
#define ATH_TX_IC_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_tx_ic_mtx)
|
||||
#define ATH_TX_IC_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_ic_mtx, \
|
||||
MA_OWNED)
|
||||
#define ATH_TX_IC_UNLOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_tx_ic_mtx, \
|
||||
MA_NOTOWNED)
|
||||
|
||||
/*
|
||||
* The PCU lock is non-recursive and should be treated as a spinlock.
|
||||
* Although currently the interrupt code is run in netisr context and
|
||||
|
Loading…
Reference in New Issue
Block a user