Abstract the locking in fxp(4) a bit more by using macros for

mtx_assert() and mtx_owned(), as it is done in other places,
for instance proc locking.
This commit is contained in:
mux 2004-06-02 22:59:57 +00:00
parent 9fb70f161a
commit c04518013a
2 changed files with 11 additions and 8 deletions

View File

@ -863,7 +863,7 @@ fxp_release(struct fxp_softc *sc)
struct fxp_tx *txp; struct fxp_tx *txp;
int i; int i;
mtx_assert(&sc->sc_mtx, MA_NOTOWNED); FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
KASSERT(sc->ih == NULL, KASSERT(sc->ih == NULL,
("fxp_release() called with intr handle still active")); ("fxp_release() called with intr handle still active"));
if (sc->miibus) if (sc->miibus)
@ -1295,7 +1295,7 @@ fxp_start_body(struct ifnet *ifp)
struct mbuf *mb_head; struct mbuf *mb_head;
int error; int error;
mtx_assert(&sc->sc_mtx, MA_OWNED); FXP_LOCK_ASSERT(sc, MA_OWNED);
/* /*
* See if we need to suspend xmit until the multicast filter * See if we need to suspend xmit until the multicast filter
* has been reprogrammed (which can only be done at the head * has been reprogrammed (which can only be done at the head
@ -1634,7 +1634,7 @@ fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, u_int8_t statack,
struct fxp_rfa *rfa; struct fxp_rfa *rfa;
int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0; int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
mtx_assert(&sc->sc_mtx, MA_OWNED); FXP_LOCK_ASSERT(sc, MA_OWNED);
if (rnr) if (rnr)
sc->rnr++; sc->rnr++;
#ifdef DEVICE_POLLING #ifdef DEVICE_POLLING
@ -1993,7 +1993,7 @@ fxp_init_body(struct fxp_softc *sc)
struct fxp_cb_mcs *mcsp; struct fxp_cb_mcs *mcsp;
int i, prm, s; int i, prm, s;
mtx_assert(&sc->sc_mtx, MA_OWNED); FXP_LOCK_ASSERT(sc, MA_OWNED);
s = splimp(); s = splimp();
/* /*
* Cancel any pending I/O * Cancel any pending I/O
@ -2434,7 +2434,7 @@ fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
* Detaching causes us to call ioctl with the mutex owned. Preclude * Detaching causes us to call ioctl with the mutex owned. Preclude
* that by saying we're busy if the lock is already held. * that by saying we're busy if the lock is already held.
*/ */
if (mtx_owned(&sc->sc_mtx)) if (FXP_LOCKED(sc))
return (EBUSY); return (EBUSY);
FXP_LOCK(sc); FXP_LOCK(sc);
@ -2517,7 +2517,7 @@ fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
FXP_UNLOCK(sc); FXP_UNLOCK(sc);
error = ether_ioctl(ifp, command, data); error = ether_ioctl(ifp, command, data);
} }
if (mtx_owned(&sc->sc_mtx)) if (FXP_LOCKED(sc))
FXP_UNLOCK(sc); FXP_UNLOCK(sc);
splx(s); splx(s);
return (error); return (error);
@ -2579,6 +2579,7 @@ fxp_mc_setup(struct fxp_softc *sc)
struct fxp_tx *txp; struct fxp_tx *txp;
int count; int count;
FXP_LOCK_ASSERT(sc, MA_OWNED);
/* /*
* If there are queued commands, we must wait until they are all * If there are queued commands, we must wait until they are all
* completed. If we are already waiting, then add a NOP command * completed. If we are already waiting, then add a NOP command

View File

@ -104,15 +104,17 @@
#if __FreeBSD_version < 500000 #if __FreeBSD_version < 500000
#define FXP_LOCK(_sc) #define FXP_LOCK(_sc)
#define FXP_UNLOCK(_sc) #define FXP_UNLOCK(_sc)
#define FXP_LOCKED(_sc)
#define FXP_LOCK_ASSERT(_sc, _what)
#define INTR_MPSAFE 0 #define INTR_MPSAFE 0
#define mtx_owned(a) 0
#define mtx_assert(a, b)
#define mtx_init(a, b, c, d) #define mtx_init(a, b, c, d)
#define mtx_destroy(a) #define mtx_destroy(a)
struct mtx { int dummy; }; struct mtx { int dummy; };
#else #else
#define FXP_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define FXP_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
#define FXP_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define FXP_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
#define FXP_LOCKED(_sc) mtx_owned(&(_sc)->sc_mtx)
#define FXP_LOCK_ASSERT(_sc, _what) mtx_assert(&(_sc)->sc_mtx, (_what))
#endif #endif
/* /*