- Placing the 'volatile' on the right side of the * in the td_lock
declaration removes the need for __DEVOLATILE(). Pointed out by: tegge
This commit is contained in:
parent
bca072af62
commit
710eacdc5f
@ -504,7 +504,7 @@ _thread_lock_flags(struct thread *td, int opts, const char *file, int line)
|
||||
for (;;) {
|
||||
retry:
|
||||
spinlock_enter();
|
||||
m = __DEVOLATILE(struct mtx *, td->td_lock);
|
||||
m = td->td_lock;
|
||||
WITNESS_CHECKORDER(&m->lock_object,
|
||||
opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line);
|
||||
while (!_obtain_lock(m, tid)) {
|
||||
@ -542,7 +542,7 @@ thread_lock_block(struct thread *td)
|
||||
|
||||
spinlock_enter();
|
||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||
lock = __DEVOLATILE(struct mtx *, td->td_lock);
|
||||
lock = td->td_lock;
|
||||
td->td_lock = &blocked_lock;
|
||||
mtx_unlock_spin(lock);
|
||||
|
||||
@ -565,7 +565,7 @@ thread_lock_set(struct thread *td, struct mtx *new)
|
||||
|
||||
mtx_assert(new, MA_OWNED);
|
||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||
lock = __DEVOLATILE(struct mtx *, td->td_lock);
|
||||
lock = td->td_lock;
|
||||
td->td_lock = new;
|
||||
mtx_unlock_spin(lock);
|
||||
}
|
||||
|
@ -903,7 +903,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
#endif
|
||||
|
||||
/* I feel sleepy */
|
||||
cpu_switch(td, newtd, __DEVOLATILE(struct mtx *, td->td_lock));
|
||||
cpu_switch(td, newtd, td->td_lock);
|
||||
/*
|
||||
* Where am I? What year is it?
|
||||
* We are in the same thread that went to sleep above,
|
||||
|
@ -1487,7 +1487,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
|
||||
#endif
|
||||
|
||||
cpu_switch(td, newtd, __DEVOLATILE(struct mtx *, td->td_lock));
|
||||
cpu_switch(td, newtd, td->td_lock);
|
||||
#ifdef HWPMC_HOOKS
|
||||
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
|
||||
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
|
||||
|
@ -132,7 +132,7 @@ void _thread_lock_flags(struct thread *, int, const char *, int);
|
||||
#define thread_lock_flags(tdp, opt) \
|
||||
_thread_lock_flags((tdp), (opt), __FILE__, __LINE__)
|
||||
#define thread_unlock(tdp) \
|
||||
mtx_unlock_spin(__DEVOLATILE(struct mtx *, (tdp)->td_lock))
|
||||
mtx_unlock_spin((tdp)->td_lock)
|
||||
|
||||
/*
|
||||
* We define our machine-independent (unoptimized) mutex micro-operations
|
||||
|
@ -202,7 +202,7 @@ struct mqueue_notifier;
|
||||
* Thread context. Processes may have multiple threads.
|
||||
*/
|
||||
struct thread {
|
||||
volatile struct mtx *td_lock; /* replaces sched lock */
|
||||
struct mtx *volatile td_lock; /* replaces sched lock */
|
||||
struct proc *td_proc; /* (*) Associated process. */
|
||||
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
|
||||
|
||||
@ -306,7 +306,7 @@ void thread_lock_unblock(struct thread *, struct mtx *);
|
||||
void thread_lock_set(struct thread *, struct mtx *);
|
||||
#define THREAD_LOCK_ASSERT(td, type) \
|
||||
do { \
|
||||
struct mtx *__m = __DEVOLATILE(struct mtx *, (td)->td_lock); \
|
||||
struct mtx *__m = (td)->td_lock; \
|
||||
if (__m != &blocked_lock) \
|
||||
mtx_assert(__m, (type)); \
|
||||
} while (0)
|
||||
|
Loading…
x
Reference in New Issue
Block a user