Consistently use the same value to indicate exclusively-held and

shared-held locks for all the primitives in lc_lock/lc_unlock routines.
This fixes the problems introduced in r255747, which indeed introduced an
inversion in the logic.

Reported by:	many
Tested by:	bdrewery, pho, lme, Adam McDougall, O. Hartmann
Approved by:	re (glebius)
This commit is contained in:
Davide Italiano 2013-09-22 14:09:07 +00:00
parent 255c1caae3
commit cf6b879fad
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=255788
2 changed files with 8 additions and 8 deletions

View File

@ -147,9 +147,9 @@ lock_rw(struct lock_object *lock, uintptr_t how)
rw = (struct rwlock *)lock;
if (how)
rw_wlock(rw);
else
rw_rlock(rw);
else
rw_wlock(rw);
}
uintptr_t
@ -161,10 +161,10 @@ unlock_rw(struct lock_object *lock)
rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
if (rw->rw_lock & RW_LOCK_READ) {
rw_runlock(rw);
return (0);
return (1);
} else {
rw_wunlock(rw);
return (1);
return (0);
}
}

View File

@ -162,9 +162,9 @@ lock_sx(struct lock_object *lock, uintptr_t how)
sx = (struct sx *)lock;
if (how)
sx_xlock(sx);
else
sx_slock(sx);
else
sx_xlock(sx);
}
uintptr_t
@ -176,10 +176,10 @@ unlock_sx(struct lock_object *lock)
sx_assert(sx, SA_LOCKED | SA_NOTRECURSED);
if (sx_xlocked(sx)) {
sx_xunlock(sx);
return (1);
return (0);
} else {
sx_sunlock(sx);
return (0);
return (1);
}
}