sx: retire SX_NOADAPTIVE

The flag is not used by anything for years and supporting it requires an
explicit read from the lock when entering slow path.

Flag value is left unused on purpose.

Sponsored by:	The FreeBSD Foundation
This commit is contained in:
mjg 2018-12-05 16:43:03 +00:00
parent 9de0264534
commit f8fa891369
3 changed files with 11 additions and 37 deletions

View File

@ -162,10 +162,6 @@ struct mutex {
*/
typedef struct rwlock rwlock_t;
#if defined(SX_ADAPTIVESPIN) && !defined(SX_NOADAPTIVE)
#define SX_NOADAPTIVE SX_ADAPTIVESPIN
#endif
#define DEFINE_RWLOCK(name) \
struct rwlock name; \
SX_SYSINIT(name, &name, #name)

View File

@ -71,8 +71,6 @@ __FBSDID("$FreeBSD$");
#define ADAPTIVE_SX
#endif
CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE);
#ifdef HWPMC_HOOKS
#include <sys/pmckern.h>
PMC_SOFT_DECLARE( , , lock, failed);
@ -233,7 +231,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
int flags;
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
SX_NOPROFILE | SX_NEW)) == 0);
ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
("%s: sx_lock not aligned for %s: %p", __func__, description,
&sx->sx_lock));
@ -252,7 +250,6 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
if (opts & SX_NEW)
flags |= LO_NEW;
flags |= opts & SX_NOADAPTIVE;
lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);
sx->sx_lock = SX_LOCK_UNLOCKED;
sx->sx_recurse = 0;
@ -572,7 +569,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
volatile struct thread *owner;
u_int i, n, spintries = 0;
enum { READERS, WRITER } sleep_reason = READERS;
bool adaptive;
bool in_critical = false;
#endif
#ifdef LOCK_PROFILING
@ -642,10 +638,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__,
sx->lock_object.lo_name, (void *)sx->sx_lock, file, line);
#ifdef ADAPTIVE_SX
adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0);
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@ -669,8 +661,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
lda.spin_cnt++;
#endif
#ifdef ADAPTIVE_SX
if (__predict_false(!adaptive))
goto sleepq;
/*
* If the lock is write locked and the owner is
* running on another CPU, spin until the owner stops
@ -762,20 +752,18 @@ _sx_xlock_hard(struct sx *sx, uintptr_t x, int opts LOCK_FILE_LINE_ARG_DEF)
* chain lock. If so, drop the sleep queue lock and try
* again.
*/
if (adaptive) {
if (!(x & SX_LOCK_SHARED)) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
sx_drop_critical(x, &in_critical,
&extra_work);
continue;
}
} else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
if (!(x & SX_LOCK_SHARED)) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);
sx_drop_critical(x, &in_critical, &extra_work);
sx_drop_critical(x, &in_critical,
&extra_work);
continue;
}
} else if (SX_SHARERS(x) > 0 && sleep_reason == WRITER) {
sleepq_release(&sx->lock_object);
sx_drop_critical(x, &in_critical, &extra_work);
continue;
}
#endif
@ -1021,7 +1009,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
#ifdef ADAPTIVE_SX
volatile struct thread *owner;
u_int i, n, spintries = 0;
bool adaptive;
#endif
#ifdef LOCK_PROFILING
uint64_t waittime = 0;
@ -1066,10 +1053,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
lock_delay_arg_init(&lda, NULL);
#endif
#ifdef ADAPTIVE_SX
adaptive = ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0);
#endif
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@ -1095,9 +1078,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
#endif
#ifdef ADAPTIVE_SX
if (__predict_false(!adaptive))
goto sleepq;
/*
* If the owner is running on another CPU, spin until
* the owner stops running or the state of the lock
@ -1154,7 +1134,6 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
continue;
}
}
sleepq:
#endif
/*
@ -1176,7 +1155,7 @@ _sx_slock_hard(struct sx *sx, int opts, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
* the owner stops running or the state of the lock
* changes.
*/
if (!(x & SX_LOCK_SHARED) && adaptive) {
if (!(x & SX_LOCK_SHARED)) {
owner = (struct thread *)SX_OWNER(x);
if (TD_IS_RUNNING(owner)) {
sleepq_release(&sx->lock_object);

View File

@ -273,7 +273,6 @@ __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
#define SX_NOPROFILE 0x02
#define SX_NOWITNESS 0x04
#define SX_QUIET 0x08
#define SX_NOADAPTIVE 0x10
#define SX_RECURSE 0x20
#define SX_NEW 0x40