* Change the scope of the ASSERT_ATOMIC_LOAD() from a generic check to

a pointer-fetching specific operation check. Consequently, rename the
  operation ASSERT_ATOMIC_LOAD_PTR().
* Fix the implementation of ASSERT_ATOMIC_LOAD_PTR() by checking
  directly alignment on the word boundry, for all the given specific
  architectures. That's a bit too strict for some common case, but it
  assures safety.
* Add a comment explaining the scope of the macro
* Add a new stub in the lockmgr specific implementation

Tested by: marcel (initial version), marius
Reviewed by: rwatson, jhb (comment specific review)
Approved by: re (kib)
This commit is contained in:
Attilio Rao 2009-08-17 16:17:21 +00:00
parent 8530137252
commit 353998acc3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=196334
5 changed files with 22 additions and 9 deletions

View File

@ -334,6 +334,9 @@ lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags)
int iflags;
MPASS((flags & ~LK_INIT_MASK) == 0);
ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock,
("%s: lockmgr not aligned for %s: %p", __func__, wmesg,
&lk->lk_lock));
iflags = LO_SLEEPABLE | LO_UPGRADABLE;
if (flags & LK_CANRECURSE)

View File

@ -783,8 +783,9 @@ mtx_init(struct mtx *m, const char *name, const char *type, int opts)
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
ASSERT_ATOMIC_LOAD(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p",
__func__, name, &m->mtx_lock));
ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
("%s: mtx_lock not aligned for %s: %p", __func__, name,
&m->mtx_lock));
#ifdef MUTEX_DEBUG
/* Diagnostic and error correction */

View File

@ -174,8 +174,9 @@ rw_init_flags(struct rwlock *rw, const char *name, int opts)
MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
RW_RECURSE)) == 0);
ASSERT_ATOMIC_LOAD(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p",
__func__, name, &rw->rw_lock));
ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
("%s: rw_lock not aligned for %s: %p", __func__, name,
&rw->rw_lock));
flags = LO_UPGRADABLE;
if (opts & RW_DUPOK)

View File

@ -205,8 +205,9 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
ASSERT_ATOMIC_LOAD(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p",
__func__, description, &sx->sx_lock));
ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
("%s: sx_lock not aligned for %s: %p", __func__, description,
&sx->sx_lock));
flags = LO_SLEEPABLE | LO_UPGRADABLE;
if (opts & SX_DUPOK)

View File

@ -89,9 +89,16 @@ extern int maxusers; /* system tune hint */
#define __CTASSERT(x, y) typedef char __assert ## y[(x) ? 1 : -1]
#endif
#define ASSERT_ATOMIC_LOAD(var,msg) \
KASSERT(sizeof(var) <= sizeof(uintptr_t) && \
ALIGN(&(var)) == (uintptr_t)&(var), msg)
/*
* Assert that a pointer can be loaded from memory atomically.
*
* This assertion enforces stronger alignment than necessary. For example,
* on some architectures, atomicity for unaligned loads will depend on
* whether or not the load spans multiple cache lines.
*/
#define ASSERT_ATOMIC_LOAD_PTR(var, msg) \
KASSERT(sizeof(var) == sizeof(void *) && \
((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg)
/*
* XXX the hints declarations are even more misplaced than most declarations