When releasing a read/shared lock we need to use a write memory barrier

in order to avoid, on architectures which doesn't have strong ordered
writes, CPU instructions reordering.

Diagnosed by:	fabio
Reviewed by:	jhb
Tested by:	Giovanni Trematerra
		<giovanni dot trematerra at gmail dot com>
This commit is contained in:
Attilio Rao 2009-09-30 13:26:31 +00:00
parent 78edb09e67
commit ddce63ca73
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=197643
4 changed files with 9 additions and 22 deletions

View File

@ -541,7 +541,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
*/
x = rw->rw_lock;
if (RW_READERS(x) > 1) {
if (atomic_cmpset_ptr(&rw->rw_lock, x,
if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
x - RW_ONE_READER)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR4(KTR_LOCK,
@ -559,7 +559,8 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
if (!(x & RW_LOCK_WAITERS)) {
MPASS((x & ~RW_LOCK_WRITE_SPINNER) ==
RW_READERS_LOCK(1));
if (atomic_cmpset_ptr(&rw->rw_lock, x, RW_UNLOCKED)) {
if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
RW_UNLOCKED)) {
if (LOCK_LOG_TEST(&rw->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded",
__func__, rw);
@ -597,7 +598,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line)
x |= (v & RW_LOCK_READ_WAITERS);
} else
queue = TS_SHARED_QUEUE;
if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
x)) {
turnstile_chain_unlock(&rw->lock_object);
continue;

View File

@ -931,7 +931,7 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
* so, just drop one and return.
*/
if (SX_SHARERS(x) > 1) {
if (atomic_cmpset_ptr(&sx->sx_lock, x,
if (atomic_cmpset_rel_ptr(&sx->sx_lock, x,
x - SX_ONE_SHARER)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR4(KTR_LOCK,
@ -949,8 +949,8 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
*/
if (!(x & SX_LOCK_EXCLUSIVE_WAITERS)) {
MPASS(x == SX_SHARERS_LOCK(1));
if (atomic_cmpset_ptr(&sx->sx_lock, SX_SHARERS_LOCK(1),
SX_LOCK_UNLOCKED)) {
if (atomic_cmpset_rel_ptr(&sx->sx_lock,
SX_SHARERS_LOCK(1), SX_LOCK_UNLOCKED)) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p last succeeded",
__func__, sx);
@ -973,7 +973,7 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line)
* Note that the state of the lock could have changed,
* so if it fails loop back and retry.
*/
if (!atomic_cmpset_ptr(&sx->sx_lock,
if (!atomic_cmpset_rel_ptr(&sx->sx_lock,
SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
SX_LOCK_UNLOCKED)) {
sleepq_release(&sx->lock_object);

View File

@ -55,13 +55,6 @@
*
* When the lock is not locked by any thread, it is encoded as a read lock
* with zero waiters.
*
* A note about memory barriers. Write locks need to use the same memory
* barriers as mutexes: _acq when acquiring a write lock and _rel when
* releasing a write lock. Read locks also need to use an _acq barrier when
* acquiring a read lock. However, since read locks do not update any
* locked data (modulo bugs of course), no memory barrier is needed when
* releasing a read lock.
*/
#define RW_LOCK_READ 0x01

View File

@ -63,13 +63,6 @@
*
* When the lock is not locked by any thread, it is encoded as a
* shared lock with zero waiters.
*
* A note about memory barriers. Exclusive locks need to use the same
* memory barriers as mutexes: _acq when acquiring an exclusive lock
* and _rel when releasing an exclusive lock. On the other side,
* shared lock needs to use an _acq barrier when acquiring the lock
* but, since they don't update any locked data, no memory barrier is
* needed when releasing a shared lock.
*/
#define SX_LOCK_SHARED 0x01
@ -200,7 +193,7 @@ __sx_sunlock(struct sx *sx, const char *file, int line)
uintptr_t x = sx->sx_lock;
if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
!atomic_cmpset_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
!atomic_cmpset_rel_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
_sx_sunlock_hard(sx, file, line);
}