Add functions sx_xlock_sig() and sx_slock_sig().

These functions are intended to do the same actions of sx_xlock() and
sx_slock() but with the difference to perform an interruptible sleep, so
that sleep can be interrupted by external events.
In order to support these new featueres, some code renstruction is needed,
but external API won't be affected at all.

Note: use "void" cast for "int" returning functions in order to avoid tools
like Coverity prevents to whine.

Requested by: rwatson
Tested by: rwatson
Reviewed by: jhb
Approved by: jeff (mentor)
This commit is contained in:
Attilio Rao 2007-05-31 09:14:48 +00:00
parent 345a0942e9
commit f9819486e5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=170149
3 changed files with 154 additions and 92 deletions

View File

@ -45,7 +45,6 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/lock_profile.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sleepqueue.h>
@ -191,18 +190,23 @@ sx_destroy(struct sx *sx)
lock_destroy(&sx->lock_object);
}
void
_sx_slock(struct sx *sx, const char *file, int line)
int
_sx_slock(struct sx *sx, int opts, const char *file, int line)
{
int error = 0;
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_slock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER, file, line);
__sx_slock(sx, file, line);
LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
WITNESS_LOCK(&sx->lock_object, 0, file, line);
curthread->td_locks++;
error = __sx_slock(sx, opts, file, line);
if (!error) {
LOCK_LOG_LOCK("SLOCK", &sx->lock_object, 0, 0, file, line);
WITNESS_LOCK(&sx->lock_object, 0, file, line);
curthread->td_locks++;
}
return (error);
}
int
@ -225,19 +229,25 @@ _sx_try_slock(struct sx *sx, const char *file, int line)
return (0);
}
void
_sx_xlock(struct sx *sx, const char *file, int line)
int
_sx_xlock(struct sx *sx, int opts, const char *file, int line)
{
int error = 0;
MPASS(curthread != NULL);
KASSERT(sx->sx_lock != SX_LOCK_DESTROYED,
("sx_xlock() of destroyed sx @ %s:%d", file, line));
WITNESS_CHECKORDER(&sx->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file,
line);
__sx_xlock(sx, curthread, file, line);
LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse, file, line);
WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
error = __sx_xlock(sx, curthread, opts, file, line);
if (!error) {
LOCK_LOG_LOCK("XLOCK", &sx->lock_object, 0, sx->sx_recurse,
file, line);
WITNESS_LOCK(&sx->lock_object, LOP_EXCLUSIVE, file, line);
curthread->td_locks++;
}
return (error);
}
int
@ -394,15 +404,16 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
* that ideally this would be a static function, but it needs to be
* accessible from at least sx.h.
*/
void
_sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
int
_sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file,
int line)
{
GIANT_DECLARE;
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
#endif
uintptr_t x;
int contested = 0;
int contested = 0, error = 0;
uint64_t waitstart = 0;
/* If we already hold an exclusive lock, then recurse. */
@ -414,7 +425,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED);
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p recursing", __func__, sx);
return;
return (0);
}
lock_profile_obtain_lock_failed(&(sx)->lock_object,
&contested, &waitstart);
@ -528,17 +539,30 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
GIANT_SAVE();
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX, SQ_EXCLUSIVE_QUEUE);
sleepq_wait(&sx->lock_object);
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_EXCLUSIVE_QUEUE);
if (!(opts & SX_INTERRUPTIBLE))
sleepq_wait(&sx->lock_object);
else
error = sleepq_wait_sig(&sx->lock_object);
if (error) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK,
"%s: interruptible sleep by %p suspended by signal",
__func__, sx);
break;
}
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
__func__, sx);
}
GIANT_RESTORE();
lock_profile_obtain_lock_success(&(sx)->lock_object, contested,
waitstart, file, line);
if (!error)
lock_profile_obtain_lock_success(&(sx)->lock_object, contested,
waitstart, file, line);
return (error);
}
/*
@ -598,8 +622,8 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line)
* that ideally this would be a static function, but it needs to be
* accessible from at least sx.h.
*/
void
_sx_slock_hard(struct sx *sx, const char *file, int line)
int
_sx_slock_hard(struct sx *sx, int opts, const char *file, int line)
{
GIANT_DECLARE;
#ifdef ADAPTIVE_SX
@ -607,7 +631,7 @@ _sx_slock_hard(struct sx *sx, const char *file, int line)
#endif
uintptr_t x;
uint64_t waitstart = 0;
int contested = 0;
int contested = 0, error = 0;
/*
* As with rwlocks, we don't make any attempt to try to block
* shared locks once there is an exclusive waiter.
@ -729,15 +753,27 @@ _sx_slock_hard(struct sx *sx, const char *file, int line)
GIANT_SAVE();
sleepq_add(&sx->lock_object, NULL, sx->lock_object.lo_name,
SLEEPQ_SX, SQ_SHARED_QUEUE);
sleepq_wait(&sx->lock_object);
SLEEPQ_SX | ((opts & SX_INTERRUPTIBLE) ?
SLEEPQ_INTERRUPTIBLE : 0), SQ_SHARED_QUEUE);
if (!(opts & SX_INTERRUPTIBLE))
sleepq_wait(&sx->lock_object);
else
error = sleepq_wait_sig(&sx->lock_object);
if (error) {
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK,
"%s: interruptible sleep by %p suspended by signal",
__func__, sx);
break;
}
if (LOCK_LOG_TEST(&sx->lock_object, 0))
CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
__func__, sx);
}
GIANT_RESTORE();
return (error);
}
/*

View File

@ -34,6 +34,7 @@
#include <sys/_lock.h>
#include <sys/_sx.h>
#include <sys/lock_profile.h>
#ifdef _KERNEL
#include <machine/atomic.h>
@ -90,61 +91,6 @@
#ifdef _KERNEL
/*
* Full lock operations that are suitable to be inlined in non-debug kernels.
* If the lock can't be acquired or released trivially then the work is
* deferred to 'tougher' functions.
*/
/* Acquire an exclusive lock. */
#define __sx_xlock(sx, tid, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
\
if (!atomic_cmpset_acq_ptr(&(sx)->sx_lock, SX_LOCK_UNLOCKED, \
_tid)) \
_sx_xlock_hard((sx), _tid, (file), (line)); \
else \
lock_profile_obtain_lock_success(&(sx)->lock_object, 0, \
0, (file), (line)); \
} while (0)
/* Release an exclusive lock. */
#define __sx_xunlock(sx, tid, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
\
if (!atomic_cmpset_rel_ptr(&(sx)->sx_lock, _tid, \
SX_LOCK_UNLOCKED)) \
_sx_xunlock_hard((sx), _tid, (file), (line)); \
} while (0)
/* Acquire a shared lock. */
#define __sx_slock(sx, file, line) do { \
uintptr_t x = (sx)->sx_lock; \
\
if (!(x & SX_LOCK_SHARED) || \
!atomic_cmpset_acq_ptr(&(sx)->sx_lock, x, \
x + SX_ONE_SHARER)) \
_sx_slock_hard((sx), (file), (line)); \
else \
lock_profile_obtain_lock_success(&(sx)->lock_object, 0, \
0, (file), (line)); \
} while (0)
/*
* Release a shared lock. We can just drop a single shared lock so
* long as we aren't trying to drop the last shared lock when other
* threads are waiting for an exclusive lock. This takes advantage of
* the fact that an unlocked lock is encoded as a shared lock with a
* count of 0.
*/
#define __sx_sunlock(sx, file, line) do { \
uintptr_t x = (sx)->sx_lock; \
\
if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) || \
!atomic_cmpset_ptr(&(sx)->sx_lock, x, x - SX_ONE_SHARER)) \
_sx_sunlock_hard((sx), (file), (line)); \
} while (0)
/*
* Function prototipes. Routines that start with an underscore are not part
* of the public interface and are wrappered with a macro.
@ -153,17 +99,17 @@ void sx_sysinit(void *arg);
#define sx_init(sx, desc) sx_init_flags((sx), (desc), 0)
void sx_init_flags(struct sx *sx, const char *description, int opts);
void sx_destroy(struct sx *sx);
void _sx_slock(struct sx *sx, const char *file, int line);
void _sx_xlock(struct sx *sx, const char *file, int line);
int _sx_slock(struct sx *sx, int opts, const char *file, int line);
int _sx_xlock(struct sx *sx, int opts, const char *file, int line);
int _sx_try_slock(struct sx *sx, const char *file, int line);
int _sx_try_xlock(struct sx *sx, const char *file, int line);
void _sx_sunlock(struct sx *sx, const char *file, int line);
void _sx_xunlock(struct sx *sx, const char *file, int line);
int _sx_try_upgrade(struct sx *sx, const char *file, int line);
void _sx_downgrade(struct sx *sx, const char *file, int line);
void _sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int
line);
void _sx_slock_hard(struct sx *sx, const char *file, int line);
int _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts,
const char *file, int line);
int _sx_slock_hard(struct sx *sx, int opts, const char *file, int line);
void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int
line);
void _sx_sunlock_hard(struct sx *sx, const char *file, int line);
@ -189,6 +135,73 @@ struct sx_args {
SYSUNINIT(name##_sx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
sx_destroy, (sxa))
/*
* Full lock operations that are suitable to be inlined in non-debug kernels.
* If the lock can't be acquired or released trivially then the work is
* deferred to 'tougher' functions.
*/
/* Acquire an exclusive lock. */
static __inline int
__sx_xlock(struct sx *sx, struct thread *td, int opts, const char *file,
int line)
{
uintptr_t tid = (uintptr_t)td;
int error = 0;
if (!atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
error = _sx_xlock_hard(sx, tid, opts, file, line);
else
lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, file,
line);
return (error);
}
/* Release an exclusive lock. */
static __inline void
__sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
{
uintptr_t tid = (uintptr_t)td;
if (!atomic_cmpset_rel_ptr(&sx->sx_lock, tid, SX_LOCK_UNLOCKED))
_sx_xunlock_hard(sx, tid, file, line);
}
/* Acquire a shared lock. */
static __inline int
__sx_slock(struct sx *sx, int opts, const char *file, int line)
{
uintptr_t x = sx->sx_lock;
int error = 0;
if (!(x & SX_LOCK_SHARED) ||
!atomic_cmpset_acq_ptr(&sx->sx_lock, x, x + SX_ONE_SHARER))
error = _sx_slock_hard(sx, opts, file, line);
else
lock_profile_obtain_lock_success(&sx->lock_object, 0, 0, file,
line);
return (error);
}
/*
* Release a shared lock. We can just drop a single shared lock so
* long as we aren't trying to drop the last shared lock when other
* threads are waiting for an exclusive lock. This takes advantage of
* the fact that an unlocked lock is encoded as a shared lock with a
* count of 0.
*/
static __inline void
__sx_sunlock(struct sx *sx, const char *file, int line)
{
uintptr_t x = sx->sx_lock;
if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) ||
!atomic_cmpset_ptr(&sx->sx_lock, x, x - SX_ONE_SHARER))
_sx_sunlock_hard(sx, file, line);
}
/*
* Public interface for lock operations.
*/
@ -196,16 +209,24 @@ struct sx_args {
#error "LOCK_DEBUG not defined, include <sys/lock.h> before <sys/sx.h>"
#endif
#if (LOCK_DEBUG > 0) || defined(SX_NOINLINE)
#define sx_xlock(sx) _sx_xlock((sx), LOCK_FILE, LOCK_LINE)
#define sx_xlock(sx) (void)_sx_xlock((sx), 0, LOCK_FILE, LOCK_LINE)
#define sx_xlock_sig(sx) \
_sx_xlock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
#define sx_xunlock(sx) _sx_xunlock((sx), LOCK_FILE, LOCK_LINE)
#define sx_slock(sx) _sx_slock((sx), LOCK_FILE, LOCK_LINE)
#define sx_slock(sx) (void)_sx_slock((sx), 0, LOCK_FILE, LOCK_LINE)
#define sx_slock_sig(sx) \
_sx_slock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
#define sx_sunlock(sx) _sx_sunlock((sx), LOCK_FILE, LOCK_LINE)
#else
#define sx_xlock(sx) \
__sx_xlock((sx), curthread, LOCK_FILE, LOCK_LINE)
(void)__sx_xlock((sx), curthread, 0, LOCK_FILE, LOCK_LINE)
#define sx_xlock_sig(sx) \
__sx_xlock((sx), curthread, SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
#define sx_xunlock(sx) \
__sx_xunlock((sx), curthread, LOCK_FILE, LOCK_LINE)
#define sx_slock(sx) __sx_slock((sx), LOCK_FILE, LOCK_LINE)
#define sx_slock(sx) (void)__sx_slock((sx), 0, LOCK_FILE, LOCK_LINE)
#define sx_slock_sig(sx) \
__sx_slock((sx), SX_INTERRUPTIBLE, LOCK_FILE, LOCK_LINE)
#define sx_sunlock(sx) __sx_sunlock((sx), LOCK_FILE, LOCK_LINE)
#endif /* LOCK_DEBUG > 0 || SX_NOINLINE */
#define sx_try_slock(sx) _sx_try_slock((sx), LOCK_FILE, LOCK_LINE)
@ -245,6 +266,11 @@ struct sx_args {
#define SX_ADAPTIVESPIN 0x10
#define SX_RECURSE 0x20
/*
* Options passed to sx_*lock_hard().
*/
#define SX_INTERRUPTIBLE 0x40
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
#define SA_LOCKED LA_LOCKED
#define SA_SLOCKED LA_SLOCKED

View File

@ -423,7 +423,7 @@ _vm_map_lock(vm_map_t map, const char *file, int line)
if (map->system_map)
_mtx_lock_flags(&map->system_mtx, 0, file, line);
else
_sx_xlock(&map->lock, file, line);
(void)_sx_xlock(&map->lock, 0, file, line);
map->timestamp++;
}
@ -444,7 +444,7 @@ _vm_map_lock_read(vm_map_t map, const char *file, int line)
if (map->system_map)
_mtx_lock_flags(&map->system_mtx, 0, file, line);
else
_sx_xlock(&map->lock, file, line);
(void)_sx_xlock(&map->lock, 0, file, line);
}
void