Wrap mutex(9), rwlock(9) and sx(9) macros into __extension__ ({})

instead of do {} while (0).

This makes them real void expressions, and they can be used anywhere
where a void function call can be used, for example in a conditional
operator.

Reviewed by:		kib, mjg
Differential revision:	https://reviews.freebsd.org/D32696
This commit is contained in:
Gleb Smirnoff 2021-10-27 10:33:01 -07:00
parent 63d24336fd
commit 840680e601
3 changed files with 28 additions and 20 deletions

View File

@ -236,14 +236,15 @@ void _thread_lock(struct thread *);
*/
/* Lock a normal mutex. */
#define __mtx_lock(mp, tid, opts, file, line) do { \
#define __mtx_lock(mp, tid, opts, file, line) __extension__ ({ \
uintptr_t _tid = (uintptr_t)(tid); \
uintptr_t _v = MTX_UNOWNED; \
\
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__acquire) ||\
!_mtx_obtain_lock_fetch((mp), &_v, _tid))) \
_mtx_lock_sleep((mp), _v, (opts), (file), (line)); \
} while (0)
(void)0; /* ensure void type for expression */ \
})
/*
* Lock a spin mutex. For spinlocks, we handle recursion inline (it
@ -252,7 +253,7 @@ void _thread_lock(struct thread *);
* inlining this code is not too big a deal.
*/
#ifdef SMP
#define __mtx_lock_spin(mp, tid, opts, file, line) do { \
#define __mtx_lock_spin(mp, tid, opts, file, line) __extension__ ({ \
uintptr_t _tid = (uintptr_t)(tid); \
uintptr_t _v = MTX_UNOWNED; \
\
@ -260,7 +261,8 @@ void _thread_lock(struct thread *);
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \
!_mtx_obtain_lock_fetch((mp), &_v, _tid))) \
_mtx_lock_spin((mp), _v, (opts), (file), (line)); \
} while (0)
(void)0; /* ensure void type for expression */ \
})
#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \
uintptr_t _tid = (uintptr_t)(tid); \
int _ret; \
@ -277,7 +279,7 @@ void _thread_lock(struct thread *);
_ret; \
})
#else /* SMP */
#define __mtx_lock_spin(mp, tid, opts, file, line) do { \
#define __mtx_lock_spin(mp, tid, opts, file, line) __extension__ ({ \
uintptr_t _tid = (uintptr_t)(tid); \
\
spinlock_enter(); \
@ -287,7 +289,8 @@ void _thread_lock(struct thread *);
KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \
(mp)->mtx_lock = _tid; \
} \
} while (0)
(void)0; /* ensure void type for expression */ \
})
#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \
uintptr_t _tid = (uintptr_t)(tid); \
int _ret; \
@ -305,13 +308,14 @@ void _thread_lock(struct thread *);
#endif /* SMP */
/* Unlock a normal mutex. */
#define __mtx_unlock(mp, tid, opts, file, line) do { \
#define __mtx_unlock(mp, tid, opts, file, line) __extension__ ({ \
uintptr_t _v = (uintptr_t)(tid); \
\
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(adaptive__release) ||\
!_mtx_release_lock_fetch((mp), &_v))) \
_mtx_unlock_sleep((mp), _v, (opts), (file), (line)); \
} while (0)
(void)0; /* ensure void type for expression */ \
})
/*
* Unlock a spin mutex. For spinlocks, we can handle everything
@ -324,7 +328,7 @@ void _thread_lock(struct thread *);
* releasing a spin lock. This includes the recursion cases.
*/
#ifdef SMP
#define __mtx_unlock_spin(mp) do { \
#define __mtx_unlock_spin(mp) __extension__ ({ \
if (mtx_recursed((mp))) \
(mp)->mtx_recurse--; \
else { \
@ -332,9 +336,9 @@ void _thread_lock(struct thread *);
_mtx_release_lock_quick((mp)); \
} \
spinlock_exit(); \
} while (0)
})
#else /* SMP */
#define __mtx_unlock_spin(mp) do { \
#define __mtx_unlock_spin(mp) __extension__ ({ \
if (mtx_recursed((mp))) \
(mp)->mtx_recurse--; \
else { \
@ -342,7 +346,7 @@ void _thread_lock(struct thread *);
(mp)->mtx_lock = MTX_UNOWNED; \
} \
spinlock_exit(); \
} while (0)
})
#endif /* SMP */
/*

View File

@ -103,23 +103,25 @@
*/
/* Acquire a write lock. */
#define __rw_wlock(rw, tid, file, line) do { \
#define __rw_wlock(rw, tid, file, line) __extension__ ({ \
uintptr_t _tid = (uintptr_t)(tid); \
uintptr_t _v = RW_UNLOCKED; \
\
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__acquire) || \
!_rw_write_lock_fetch((rw), &_v, _tid))) \
_rw_wlock_hard((rw), _v, (file), (line)); \
} while (0)
(void)0; /* ensure void type for expression */ \
})
/* Release a write lock. */
#define __rw_wunlock(rw, tid, file, line) do { \
#define __rw_wunlock(rw, tid, file, line) __extension__ ({ \
uintptr_t _v = (uintptr_t)(tid); \
\
if (__predict_false(LOCKSTAT_PROFILE_ENABLED(rw__release) || \
!_rw_write_unlock_fetch((rw), &_v))) \
_rw_wunlock_hard((rw), _v, (file), (line)); \
} while (0)
(void)0; /* ensure void type for expression */ \
})
/*
* Function prototypes. Routines that start with _ are not part of the
@ -231,12 +233,13 @@ void __rw_assert(const volatile uintptr_t *c, int what, const char *file,
#define rw_try_upgrade(rw) _rw_try_upgrade((rw), LOCK_FILE, LOCK_LINE)
#define rw_try_wlock(rw) _rw_try_wlock((rw), LOCK_FILE, LOCK_LINE)
#define rw_downgrade(rw) _rw_downgrade((rw), LOCK_FILE, LOCK_LINE)
#define rw_unlock(rw) do { \
#define rw_unlock(rw) __extension__ ({ \
if (rw_wowned(rw)) \
rw_wunlock(rw); \
else \
rw_runlock(rw); \
} while (0)
(void)0; /* ensure void type for expression */ \
})
#define rw_sleep(chan, rw, pri, wmesg, timo) \
_sleep((chan), &(rw)->lock_object, (pri), (wmesg), \
tick_sbt * (timo), 0, C_HARDCLOCK)

View File

@ -253,12 +253,13 @@ __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
(((sx)->sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)) == \
(uintptr_t)curthread)
#define sx_unlock_(sx, file, line) do { \
#define sx_unlock_(sx, file, line) __extension__ ({ \
if (sx_xlocked(sx)) \
sx_xunlock_(sx, file, line); \
else \
sx_sunlock_(sx, file, line); \
} while (0)
(void)0; /* ensure void type for expression */ \
})
#define sx_unlock(sx) sx_unlock_((sx), LOCK_FILE, LOCK_LINE)