Optimize sx locks to use simple atomic operations for the common cases of

obtaining and releasing shared and exclusive locks.  The algorithms for
manipulating the lock cookie are very similar to that rwlocks.  This patch
also adds support for exclusive locks using the same algorithm as mutexes.

A new sx_init_flags() function has been added so that optional flags can be
specified to alter a given locks behavior.  The flags include SX_DUPOK,
SX_NOWITNESS, SX_NOPROFILE, and SX_QUITE which are all identical in nature
to the similar flags for mutexes.

Adaptive spinning on select locks may be enabled by enabling the
ADAPTIVE_SX kernel option.  Only locks initialized with the SX_ADAPTIVESPIN
flag via sx_init_flags() will adaptively spin.

The common cases for sx_slock(), sx_sunlock(), sx_xlock(), and sx_xunlock()
are now performed inline in non-debug kernels.  As a result, <sys/sx.h> now
requires <sys/lock.h> to be included prior to <sys/sx.h>.

The new kernel option SX_NOINLINE can be used to disable the aforementioned
inlining in non-debug kernels.

The size of struct sx has changed, so the kernel ABI is probably greatly
disturbed.

MFC after:	1 month
Submitted by:	attilio
Tested by:	kris, pjd
This commit is contained in:
John Baldwin 2007-03-31 23:23:42 +00:00
parent 511cecafd6
commit 4e7f640dfb
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=168191
16 changed files with 998 additions and 334 deletions

View File

@ -1058,6 +1058,7 @@ MLINKS+=sx.9 sx_assert.9 \
sx.9 sx_destroy.9 \
sx.9 sx_downgrade.9 \
sx.9 sx_init.9 \
sx.9 sx_init_flags.9 \
sx.9 sx_slock.9 \
sx.9 sx_sunlock.9 \
sx.9 SX_SYSINIT.9 \

View File

@ -32,6 +32,7 @@
.Sh NAME
.Nm sx ,
.Nm sx_init ,
.Nm sx_init_flags ,
.Nm sx_destroy ,
.Nm sx_slock ,
.Nm sx_xlock ,
@ -54,6 +55,8 @@
.Ft void
.Fn sx_init "struct sx *sx" "const char *description"
.Ft void
.Fn sx_init_flags "struct sx *sx" "const char *description" "int opts"
.Ft void
.Fn sx_destroy "struct sx *sx"
.Ft void
.Fn sx_slock "struct sx *sx"
@ -87,11 +90,14 @@
.Sh DESCRIPTION
Shared/exclusive locks are used to protect data that are read far more often
than they are written.
Mutexes are inherently more efficient than shared/exclusive locks, so
Shared/exclusive locks do not implement priority propagation like mutexes and
reader/writer locks to prevent priority inversions, so
shared/exclusive locks should be used prudently.
.Pp
Shared/exclusive locks are created with
.Fn sx_init ,
Shared/exclusive locks are created with either
.Fn sx_init
or
.Fn sx_init_flags
where
.Fa sx
is a pointer to space for a
@ -100,8 +106,40 @@ and
.Fa description
is a pointer to a null-terminated character string that describes the
shared/exclusive lock.
The
.Fa opts
argument to
.Fn sx_init_flags
specifies a set of optional flags to alter the behavior of
.Fa sx .
It contains one or more of the following flags:
.Bl -tag -width SX_ADAPTIVESPIN
.It Dv SX_ADAPTIVESPIN
If the kernel is compiled with
.Cd "options ADAPTIVE_SX" ,
then lock operations for
.Fa sx
will spin instead of sleeping while an exclusive lock holder is executing on
another CPU.
.It Dv SX_DUPOK
Witness should not log messages about duplicate locks being acquired.
.It Dv SX_NOWITNESS
Instruct
.Xr witness 4
to ignore this lock.
.It Dv SX_NOPROFILE
Do not profile this lock.
.It Dv SX_QUIET
Do not log any operations for this lock via
.Xr ktr 4 .
.El
.Pp
Shared/exclusive locks are destroyed with
.Fn sx_destroy .
The lock
.Fa sx
must not be locked by any thread when it is destroyed.
.Pp
Threads acquire and release a shared lock by calling
.Fn sx_slock
or
@ -155,7 +193,7 @@ function tests
for the assertions specified in
.Fa what ,
and panics if they are not met.
The following assertions are supported:
One of the following assertions must be specified:
.Bl -tag -width ".Dv SX_UNLOCKED"
.It Dv SX_LOCKED
Assert that the current thread has either a shared or an exclusive lock on the
@ -178,6 +216,22 @@ lock pointed to
by the first argument.
.El
.Pp
In addition, one of the following optional assertions may be included with
either an
.Dv SX_LOCKED ,
.Dv SX_SLOCKED ,
or
.Dv SX_XLOCKED
assertion:
.Bl -tag -width ".Dv SX_NOTRECURSED"
.It Dv SX_RECURSED
Assert that the current thread has a recursed lock on
.Fa sx .
.It Dv SX_NOTRECURSED
Assert that the current thread does not have a recursed lock on
.Fa sx .
.El
.Pp
.Fn sx_xlocked
will return non-zero if the current thread holds the exclusive lock;
otherwise, it will return zero.

View File

@ -214,6 +214,12 @@ options NO_ADAPTIVE_RWLOCKS
# to sleep rather than spinning.
options ADAPTIVE_GIANT
# ADAPTIVE_SX changes the behavior of sx locks to spin if the thread
# that currently owns the lock is executing on another CPU. Note that
# in addition to enabling this option, individual sx locks must be
# initialized with the SX_ADAPTIVESPIN flag.
options ADAPTIVE_SX
# MUTEX_NOINLINE forces mutex operations to call functions to perform each
# operation rather than inlining the simple cases. This can be used to
# shrink the size of the kernel text segment. Note that this behavior is
@ -233,6 +239,13 @@ options MUTEX_WAKE_ALL
# and WITNESS options.
options RWLOCK_NOINLINE
# SX_NOINLINE forces sx lock operations to call functions to perform each
# operation rather than inlining the simple cases. This can be used to
# shrink the size of the kernel text segment. Note that this behavior is
# already implied by the INVARIANT_SUPPORT, INVARIANTS, KTR, LOCK_PROFILING,
# and WITNESS options.
options SX_NOINLINE
# SMP Debugging Options:
#
# PREEMPTION allows the threads that are in the kernel to be preempted

View File

@ -59,6 +59,7 @@ NO_SYSCTL_DESCR opt_global.h
# Miscellaneous options.
ADAPTIVE_GIANT opt_adaptive_mutexes.h
ADAPTIVE_SX
ALQ
AUDIT opt_global.h
CODA_COMPAT_5 opt_coda.h
@ -555,6 +556,7 @@ MSIZE opt_global.h
REGRESSION opt_global.h
RESTARTABLE_PANICS opt_global.h
RWLOCK_NOINLINE opt_global.h
SX_NOINLINE opt_global.h
VFS_BIO_DEBUG opt_global.h
# These are VM related options

View File

@ -142,6 +142,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/sx.h>

View File

@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/memrange.h>
#include <sys/socket.h>

View File

@ -75,6 +75,7 @@ __FBSDID("$FreeBSD$");
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/kdb.h>
#include <sys/lock.h>
#include <sys/module.h>
#include <sys/socket.h>
#include <sys/sx.h>

View File

@ -72,6 +72,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/socket.h>

View File

@ -1,49 +1,14 @@
#include <sys/param.h>
#include <machine/pcpu.h>
#include <support/debug.h>
#include <support/mrlock.h>
void
_sx_xfs_destroy(struct sx *sx)
{
if (sx->sx_cnt == -1)
sx_xunlock(sx);
sx_destroy(sx);
}
void
_sx_xfs_lock(struct sx *sx, int type, const char *file, int line)
{
if (type == MR_ACCESS)
_sx_slock(sx, file, line);
else if (type == MR_UPDATE)
_sx_sunlock(sx, file, line);
else
panic("Invalid lock type passed");
}
void
_sx_xfs_unlock(struct sx *sx, const char *file, int line)
{
if (_sx_xfs_xowned(sx))
_sx_xunlock(sx, file, line);
else if (_sx_xfs_sowned(sx))
_sx_sunlock(sx, file, line);
else
panic("lock is not locked");
}
int
ismrlocked(mrlock_t *mrp, int type)
{
if (type == MR_ACCESS)
return _sx_xfs_sowned(mrp); /* Read lock */
else if (type == MR_UPDATE)
return _sx_xfs_xowned(mrp); /* Write lock */
else if (type == (MR_UPDATE | MR_ACCESS))
return _sx_xfs_sowned(mrp) ||
_sx_xfs_xowned(mrp); /* Any type of lock held */
return (mrp->sx_shrd_wcnt > 0 || mrp->sx_excl_wcnt > 0);
{
sx_assert(mrp, SX_LOCKED);
if (type == MR_UPDATE)
return sx_xlocked(mrp);
return 1;
}

View File

@ -4,62 +4,38 @@
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sx.h>
#include <support/debug.h>
/*
* Implement mrlocks on FreeBSD that work for XFS.
* Use FreeBSD sx lock and add necessary functions
* if additional functionality is requested
* Map mrlock functions to corresponding equivalents in
* sx.
*/
typedef struct sx mrlock_t;
#define MR_ACCESS 1
#define MR_UPDATE 2
/*
/*
* Compatibility defines, not really used
*/
#define MRLOCK_BARRIER 0x1
#define MRLOCK_ALLOW_EQUAL_PRI 0x8
/*
* mraccessf/mrupdatef take flags to be passed in while sleeping;
* only PLTWAIT is currently supported.
*/
#define mrinit(lock, name) sx_init(lock, name)
#define mrlock_init(lock, type, name, seq) sx_init(lock, name)
#define mrfree(lock) _sx_xfs_destroy(lock)
#define mraccessf(lock, f) sx_slock(lock)
#define mrupdatef(lock, f) sx_xlock(lock)
#define mraccunlock(lock) sx_sunlock(lock)
#define mrtryaccess(lock) sx_try_slock(lock)
#define mrtryupdate(lock) sx_try_xlock(lock)
#define mraccess(mrp) mraccessf(mrp, 0)
#define mrupdate(mrp) mrupdatef(mrp, 0)
#define mrislocked_access(lock) _sx_xfs_xowned(lock)
#define mrislocked_update(lock) _sx_xfs_sowned(lock)
#define mrtrypromote(lock) sx_try_upgrade(lock)
#define mraccess(lock) sx_slock(lock)
#define mrupdate(lock) sx_xlock(lock)
#define mrdemote(lock) sx_downgrade(lock)
#define mrunlock(lock) sx_unlock(lock)
int ismrlocked(mrlock_t *, int);
void _sx_xfs_lock(struct sx *sx, int type, const char *file, int line);
void _sx_xfs_unlock(struct sx *sx, const char *file, int line);
void _sx_xfs_destroy(struct sx *sx);
#define _sx_xfs_xowned(lock) ((lock)->sx_cnt < 0)
#define _sx_xfs_sowned(lock) ((lock)->sx_cnt > 0)
/*
* Functions, not implemented in FreeBSD
*/
#define mrunlock(lock) \
_sx_xfs_unlock(lock, __FILE__, __LINE__)
#define mrlock(lock, type, flags) \
_sx_xfs_lock(lock, type, __FILE__, __LINE__)
#define mrfree(lock) do { \
if (sx_xlocked(lock)) \
sx_xunlock(lock); \
sx_destroy(lock); \
} while (0)
int ismrlocked(mrlock_t *mrp, int type);
#endif /* __XFS_SUPPORT_MRLOCK_H__ */

View File

@ -29,6 +29,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/condvar.h>
#include <sys/conf.h>
#include <sys/fcntl.h>
#include <sys/kernel.h>

File diff suppressed because it is too large Load Diff

View File

@ -66,6 +66,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/priv.h>

43
sys/sys/_sx.h Normal file
View File

@ -0,0 +1,43 @@
/*-
* Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice(s), this list of conditions and the following disclaimer as
* the first lines of this file unmodified other than the possible
* addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
* notice(s), this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS__SX_H_
#define _SYS__SX_H_
/*
* Shared/exclusive lock main structure definition.
*/
struct sx {
struct lock_object lock_object;
volatile uintptr_t sx_lock;
volatile unsigned sx_recurse;
};
#endif /* !_SYS__SX_H_ */

View File

@ -33,8 +33,9 @@
#define _SYS_SLEEPQUEUE_H_
/*
* Sleep queue interface. Sleep/wakeup and condition variables use a sleep
* queue for the queue of threads blocked on a sleep channel.
* Sleep queue interface. Sleep/wakeup, condition variables, and sx
* locks use a sleep queue for the queue of threads blocked on a sleep
* channel.
*
* A thread calls sleepq_lock() to lock the sleep queue chain associated
* with a given wait channel. A thread can then call call sleepq_add() to
@ -85,6 +86,7 @@ struct thread;
#define SLEEPQ_SLEEP 0x00 /* Used by sleep/wakeup. */
#define SLEEPQ_CONDVAR 0x01 /* Used for a cv. */
#define SLEEPQ_PAUSE 0x02 /* Used by pause. */
#define SLEEPQ_SX 0x03 /* Used by an sx lock. */
#define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */
void init_sleepqueues(void);

View File

@ -1,5 +1,7 @@
/*-
* Copyright (C) 2001 Jason Evans <jasone@freebsd.org>. All rights reserved.
* Copyright (c) 2007 Attilio Rao <attilio@freebsd.org>
* Copyright (c) 2001 Jason Evans <jasone@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -30,24 +32,132 @@
#ifndef _SYS_SX_H_
#define _SYS_SX_H_
#include <sys/queue.h>
#include <sys/_lock.h>
#include <sys/condvar.h> /* XXX */
#include <sys/_sx.h>
struct sx {
struct lock_object lock_object; /* Common lock properties. */
struct mtx *sx_lock; /* General protection lock. */
int sx_cnt; /* -1: xlock, > 0: slock count. */
struct cv sx_shrd_cv; /* slock waiters. */
int sx_shrd_wcnt; /* Number of slock waiters. */
struct cv sx_excl_cv; /* xlock waiters. */
int sx_excl_wcnt; /* Number of xlock waiters. */
struct thread *sx_xholder; /* Thread presently holding xlock. */
};
#ifdef _KERNEL
#include <machine/atomic.h>
#endif
/*
* In general, the sx locks and rwlocks use very similar algorithms.
* The main difference in the implementations is how threads are
* blocked when a lock is unavailable. For this, sx locks use sleep
* queues which do not support priority propagation, and rwlocks use
* turnstiles which do.
*
* The sx_lock field consists of several fields. The low bit
* indicates if the lock is locked with a shared or exclusive lock. A
* value of 0 indicates an exclusive lock, and a value of 1 indicates
* a shared lock. Bit 1 is a boolean indicating if there are any
* threads waiting for a shared lock. Bit 2 is a boolean indicating
* if there are any threads waiting for an exclusive lock. Bit 3 is a
* boolean indicating if an exclusive lock is recursively held. The
* rest of the variable's definition is dependent on the value of the
* first bit. For an exclusive lock, it is a pointer to the thread
* holding the lock, similar to the mtx_lock field of mutexes. For
* shared locks, it is a count of read locks that are held.
*
* When the lock is not locked by any thread, it is encoded as a
* shared lock with zero waiters.
*
* A note about memory barriers. Exclusive locks need to use the same
* memory barriers as mutexes: _acq when acquiring an exclusive lock
* and _rel when releasing an exclusive lock. On the other side,
* shared lock needs to use an _acq barrier when acquiring the lock
* but, since they don't update any locked data, no memory barrier is
* needed when releasing a shared lock.
*/
#define SX_LOCK_SHARED 0x01
#define SX_LOCK_SHARED_WAITERS 0x02
#define SX_LOCK_EXCLUSIVE_WAITERS 0x04
#define SX_LOCK_RECURSED 0x08
#define SX_LOCK_FLAGMASK \
(SX_LOCK_SHARED | SX_LOCK_SHARED_WAITERS | \
SX_LOCK_EXCLUSIVE_WAITERS | SX_LOCK_RECURSED)
#define SX_OWNER(x) ((x) & ~SX_LOCK_FLAGMASK)
#define SX_SHARERS_SHIFT 4
#define SX_SHARERS(x) (SX_OWNER(x) >> SX_SHARERS_SHIFT)
#define SX_SHARERS_LOCK(x) \
((x) << SX_SHARERS_SHIFT | SX_LOCK_SHARED)
#define SX_ONE_SHARER (1 << SX_SHARERS_SHIFT)
#define SX_LOCK_UNLOCKED SX_SHARERS_LOCK(0)
#ifdef _KERNEL
/*
* Full lock operations that are suitable to be inlined in non-debug kernels.
* If the lock can't be acquired or released trivially then the work is
* deferred to 'tougher' functions.
*/
/* Acquire an exclusive lock. */
#define __sx_xlock(sx, tid, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
int contested = 0; \
uint64_t waitstart = 0; \
\
if (!atomic_cmpset_acq_ptr(&(sx)->sx_lock, SX_LOCK_UNLOCKED, \
_tid)) { \
lock_profile_obtain_lock_failed(&(sx)->lock_object, \
&contested, &waitstart); \
_sx_xlock_hard((sx), _tid, (file), (line)); \
} \
lock_profile_obtain_lock_success(&(sx)->lock_object, contested, \
waitstart, (file), (line)); \
} while (0)
/* Release an exclusive lock. */
#define __sx_xunlock(sx, tid, file, line) do { \
uintptr_t _tid = (uintptr_t)(tid); \
\
if (!atomic_cmpset_rel_ptr(&(sx)->sx_lock, _tid, \
SX_LOCK_UNLOCKED)) \
_sx_xunlock_hard((sx), _tid, (file), (line)); \
} while (0)
/* Acquire a shared lock. */
#define __sx_slock(sx, file, line) do { \
uintptr_t x = (sx)->sx_lock; \
int contested = 0; \
uint64_t waitstart = 0; \
\
if (!(x & SX_LOCK_SHARED) || \
!atomic_cmpset_acq_ptr(&(sx)->sx_lock, x, \
x + SX_ONE_SHARER)) { \
lock_profile_obtain_lock_failed(&(sx)->lock_object, \
&contested, &waitstart); \
_sx_slock_hard((sx), (file), (line)); \
} \
lock_profile_obtain_lock_success(&(sx)->lock_object, contested, \
waitstart, (file), (line)); \
} while (0)
/*
* Release a shared lock. We can just drop a single shared lock so
* long as we aren't trying to drop the last shared lock when other
* threads are waiting for an exclusive lock. This takes advantage of
* the fact that an unlocked lock is encoded as a shared lock with a
* count of 0.
*/
#define __sx_sunlock(sx, file, line) do { \
uintptr_t x = (sx)->sx_lock; \
\
if (x == (SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS) || \
!atomic_cmpset_ptr(&(sx)->sx_lock, x, x - SX_ONE_SHARER)) \
_sx_sunlock_hard((sx), (file), (line)); \
} while (0)
/*
* Function prototipes. Routines that start with an underscore are not part
* of the public interface and are wrappered with a macro.
*/
void sx_sysinit(void *arg);
void sx_init(struct sx *sx, const char *description);
#define sx_init(sx, desc) sx_init_flags((sx), (desc), 0)
void sx_init_flags(struct sx *sx, const char *description, int opts);
void sx_destroy(struct sx *sx);
void _sx_slock(struct sx *sx, const char *file, int line);
void _sx_xlock(struct sx *sx, const char *file, int line);
@ -57,6 +167,12 @@ void _sx_sunlock(struct sx *sx, const char *file, int line);
void _sx_xunlock(struct sx *sx, const char *file, int line);
int _sx_try_upgrade(struct sx *sx, const char *file, int line);
void _sx_downgrade(struct sx *sx, const char *file, int line);
void _sx_xlock_hard(struct sx *sx, uintptr_t tid, const char *file, int
line);
void _sx_slock_hard(struct sx *sx, const char *file, int line);
void _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int
line);
void _sx_sunlock_hard(struct sx *sx, const char *file, int line);
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
void _sx_assert(struct sx *sx, int what, const char *file, int line);
#endif
@ -79,29 +195,63 @@ struct sx_args {
SYSUNINIT(name##_sx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \
sx_destroy, (sxa))
#define sx_xlocked(sx) ((sx)->sx_cnt < 0 && (sx)->sx_xholder == curthread)
#define sx_slock(sx) _sx_slock((sx), LOCK_FILE, LOCK_LINE)
/*
* Public interface for lock operations.
*/
#ifndef LOCK_DEBUG
#error "LOCK_DEBUG not defined, include <sys/lock.h> before <sys/sx.h>"
#endif
#if (LOCK_DEBUG > 0) || defined(SX_NOINLINE)
#define sx_xlock(sx) _sx_xlock((sx), LOCK_FILE, LOCK_LINE)
#define sx_xunlock(sx) _sx_xunlock((sx), LOCK_FILE, LOCK_LINE)
#define sx_slock(sx) _sx_slock((sx), LOCK_FILE, LOCK_LINE)
#define sx_sunlock(sx) _sx_sunlock((sx), LOCK_FILE, LOCK_LINE)
#else
#define sx_xlock(sx) \
__sx_xlock((sx), curthread, LOCK_FILE, LOCK_LINE)
#define sx_xunlock(sx) \
__sx_xunlock((sx), curthread, LOCK_FILE, LOCK_LINE)
#define sx_slock(sx) __sx_slock((sx), LOCK_FILE, LOCK_LINE)
#define sx_sunlock(sx) __sx_sunlock((sx), LOCK_FILE, LOCK_LINE)
#endif /* LOCK_DEBUG > 0 || SX_NOINLINE */
#define sx_try_slock(sx) _sx_try_slock((sx), LOCK_FILE, LOCK_LINE)
#define sx_try_xlock(sx) _sx_try_xlock((sx), LOCK_FILE, LOCK_LINE)
#define sx_sunlock(sx) _sx_sunlock((sx), LOCK_FILE, LOCK_LINE)
#define sx_xunlock(sx) _sx_xunlock((sx), LOCK_FILE, LOCK_LINE)
#define sx_try_upgrade(sx) _sx_try_upgrade((sx), LOCK_FILE, LOCK_LINE)
#define sx_downgrade(sx) _sx_downgrade((sx), LOCK_FILE, LOCK_LINE)
#define sx_xlocked(sx) \
(((sx)->sx_lock & ~(SX_LOCK_FLAGMASK & ~SX_LOCK_SHARED)) == \
(uintptr_t)curthread)
#define sx_unlock(sx) do { \
if (sx_xlocked(sx)) \
sx_xunlock(sx); \
else \
sx_sunlock(sx); \
} while (0)
#define sx_sleep(chan, sx, pri, wmesg, timo) \
_sleep((chan), &(sx)->lock_object, (pri), (wmesg), (timo))
/*
* Options passed to sx_init_flags().
*/
#define SX_DUPOK 0x01
#define SX_NOPROFILE 0x02
#define SX_NOWITNESS 0x04
#define SX_QUIET 0x08
#define SX_ADAPTIVESPIN 0x10
/*
* XXX: These options should be renamed as SA_*
*/
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
#define SX_LOCKED LA_LOCKED
#define SX_SLOCKED LA_SLOCKED
#define SX_XLOCKED LA_XLOCKED
#define SX_UNLOCKED LA_UNLOCKED
#define SX_RECURSED LA_RECURSED
#define SX_NOTRECURSED LA_NOTRECURSED
#endif
#ifdef INVARIANTS