* Change the scope of the ASSERT_ATOMIC_LOAD() from a generic check to
a pointer-fetching specific operation check. Consequently, rename the operation ASSERT_ATOMIC_LOAD_PTR(). * Fix the implementation of ASSERT_ATOMIC_LOAD_PTR() by checking directly alignment on the word boundry, for all the given specific architectures. That's a bit too strict for some common case, but it assures safety. * Add a comment explaining the scope of the macro * Add a new stub in the lockmgr specific implementation Tested by: marcel (initial version), marius Reviewed by: rwatson, jhb (comment specific review) Approved by: re (kib)
This commit is contained in:
parent
8530137252
commit
353998acc3
@ -334,6 +334,9 @@ lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
|
||||
int iflags;
|
||||
|
||||
MPASS((flags & ~LK_INIT_MASK) == 0);
|
||||
ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
|
||||
("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
|
||||
&lk->lk_lock));
|
||||
|
||||
iflags = LO_SLEEPABLE | LO_UPGRADABLE;
|
||||
if (flags & LK_CANRECURSE)
|
||||
|
@ -783,8 +783,9 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
|
||||
|
||||
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
|
||||
MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
|
||||
ASSERT_ATOMIC_LOAD(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p",
|
||||
__func__, name, &m->mtx_lock));
|
||||
ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
|
||||
("%s: mtx_lock not aligned for %s: %p", __func__, name,
|
||||
&m->mtx_lock));
|
||||
|
||||
#ifdef MUTEX_DEBUG
|
||||
/* Diagnostic and error correction */
|
||||
|
@ -174,8 +174,9 @@ rw_init_flags(struct rwlock *rw, const char *name, int opts)
|
||||
|
||||
MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
|
||||
RW_RECURSE)) == 0);
|
||||
ASSERT_ATOMIC_LOAD(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p",
|
||||
__func__, name, &rw->rw_lock));
|
||||
ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
|
||||
("%s: rw_lock not aligned for %s: %p", __func__, name,
|
||||
&rw->rw_lock));
|
||||
|
||||
flags = LO_UPGRADABLE;
|
||||
if (opts & RW_DUPOK)
|
||||
|
@ -205,8 +205,9 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
|
||||
|
||||
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
|
||||
SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
|
||||
ASSERT_ATOMIC_LOAD(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p",
|
||||
__func__, description, &sx->sx_lock));
|
||||
ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
|
||||
("%s: sx_lock not aligned for %s: %p", __func__, description,
|
||||
&sx->sx_lock));
|
||||
|
||||
flags = LO_SLEEPABLE | LO_UPGRADABLE;
|
||||
if (opts & SX_DUPOK)
|
||||
|
@ -89,9 +89,16 @@ extern int maxusers; /* system tune hint */
|
||||
#define __CTASSERT(x, y) typedef char __assert ## y[(x) ? 1 : -1]
|
||||
#endif
|
||||
|
||||
#define ASSERT_ATOMIC_LOAD(var,msg) \
|
||||
KASSERT(sizeof(var) <= sizeof(uintptr_t) && \
|
||||
ALIGN(&(var)) == (uintptr_t)&(var), msg)
|
||||
/*
|
||||
* Assert that a pointer can be loaded from memory atomically.
|
||||
*
|
||||
* This assertion enforces stronger alignment than necessary. For example,
|
||||
* on some architectures, atomicity for unaligned loads will depend on
|
||||
* whether or not the load spans multiple cache lines.
|
||||
*/
|
||||
#define ASSERT_ATOMIC_LOAD_PTR(var, msg) \
|
||||
KASSERT(sizeof(var) == sizeof(void *) && \
|
||||
((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg)
|
||||
|
||||
/*
|
||||
* XXX the hints declarations are even more misplaced than most declarations
|
||||
|
Loading…
Reference in New Issue
Block a user