lockmgr: implement fast path

The main lockmgr routine takes 8 arguments which makes it impossible to
tail-call it by the intermediate vop_stdlock/unlock routines.

The routine itself starts with an if-forest and reads from the lock itself
several times.

This slows things down both single- and multi-threaded. With the patch
single-threaded fstats go 4% up and multithreaded up to ~27%.

Note that there is still a lot of room for improvement.

Reviewed by:	kib
Tested by:	pho
This commit is contained in:
Mateusz Guzik 2017-02-12 09:49:44 +00:00
parent ad68ac431b
commit c4a48867f1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=313683
3 changed files with 256 additions and 65 deletions

View File

@ -168,6 +168,72 @@ SYSCTL_UINT(_debug_lockmgr, OID_AUTO, retries, CTLFLAG_RW, &alk_retries, 0, "");
SYSCTL_UINT(_debug_lockmgr, OID_AUTO, loops, CTLFLAG_RW, &alk_loops, 0, "");
#endif
static bool __always_inline lockmgr_slock_try(struct lock *lk, uintptr_t *xp,
int flags);
static bool __always_inline lockmgr_sunlock_try(struct lock *lk, uintptr_t x);
static void
lockmgr_note_shared_acquire(struct lock *lk, int contested,
uint64_t waittime, const char *file, int line, int flags)
{
lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
file, line);
LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line);
WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file, line);
TD_LOCKS_INC(curthread);
TD_SLOCKS_INC(curthread);
STACK_SAVE(lk);
}
static void
lockmgr_note_shared_release(struct lock *lk, const char *file, int line)
{
lock_profile_release_lock(&lk->lock_object);
WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
TD_LOCKS_DEC(curthread);
TD_SLOCKS_DEC(curthread);
}
static void
lockmgr_note_exclusive_acquire(struct lock *lk, int contested,
uint64_t waittime, const char *file, int line, int flags)
{
lock_profile_obtain_lock_success(&lk->lock_object, contested, waittime,
file, line);
LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, lk->lk_recurse, file, line);
WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | LK_TRYWIT(flags), file,
line);
TD_LOCKS_INC(curthread);
STACK_SAVE(lk);
}
static void
lockmgr_note_exclusive_release(struct lock *lk, const char *file, int line)
{
lock_profile_release_lock(&lk->lock_object);
LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, lk->lk_recurse, file,
line);
WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line);
TD_LOCKS_DEC(curthread);
}
static void
lockmgr_note_exclusive_upgrade(struct lock *lk, const char *file, int line,
int flags)
{
LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file,
line);
WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE |
LK_TRYWIT(flags), file, line);
TD_SLOCKS_DEC(curthread);
}
static __inline struct thread *
lockmgr_xholder(const struct lock *lk)
{
@ -234,35 +300,11 @@ wakeupshlk(struct lock *lk, const char *file, int line)
u_int realexslp;
int queue, wakeup_swapper;
WITNESS_UNLOCK(&lk->lock_object, 0, file, line);
LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line);
wakeup_swapper = 0;
for (;;) {
x = lk->lk_lock;
/*
* If there is more than one shared lock held, just drop one
* and return.
*/
if (LK_SHARERS(x) > 1) {
if (atomic_cmpset_rel_ptr(&lk->lk_lock, x,
x - LK_ONE_SHARER))
break;
continue;
}
/*
* If there are not waiters on the exclusive queue, drop the
* lock quickly.
*/
if ((x & LK_ALL_WAITERS) == 0) {
MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
LK_SHARERS_LOCK(1));
if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_UNLOCKED))
break;
continue;
}
if (lockmgr_sunlock_try(lk, x))
break;
/*
* We should have a sharer with waiters, so enter the hard
@ -332,9 +374,7 @@ wakeupshlk(struct lock *lk, const char *file, int line)
break;
}
lock_profile_release_lock(&lk->lock_object);
TD_LOCKS_DEC(curthread);
TD_SLOCKS_DEC(curthread);
lockmgr_note_shared_release(lk, file, line);
return (wakeup_swapper);
}
@ -448,6 +488,165 @@ lockdestroy(struct lock *lk)
lock_destroy(&lk->lock_object);
}
static bool __always_inline
lockmgr_slock_try(struct lock *lk, uintptr_t *xp, int flags)
{
/*
* If no other thread has an exclusive lock, or
* no exclusive waiter is present, bump the count of
* sharers. Since we have to preserve the state of
* waiters, if we fail to acquire the shared lock
* loop back and retry.
*/
*xp = lk->lk_lock;
while (LK_CAN_SHARE(*xp, flags)) {
if (atomic_fcmpset_acq_ptr(&lk->lk_lock, xp,
*xp + LK_ONE_SHARER)) {
return (true);
}
}
return (false);
}
static bool __always_inline
lockmgr_sunlock_try(struct lock *lk, uintptr_t x)
{
for (;;) {
/*
* If there is more than one shared lock held, just drop one
* and return.
*/
if (LK_SHARERS(x) > 1) {
if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
x - LK_ONE_SHARER))
return (true);
continue;
}
/*
* If there are not waiters on the exclusive queue, drop the
* lock quickly.
*/
if ((x & LK_ALL_WAITERS) == 0) {
MPASS((x & ~LK_EXCLUSIVE_SPINNERS) ==
LK_SHARERS_LOCK(1));
if (atomic_fcmpset_rel_ptr(&lk->lk_lock, &x,
LK_UNLOCKED))
return (true);
continue;
}
break;
}
return (false);
}
int
lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *file, int line)
{
struct lock_class *class;
uintptr_t x, v, tid;
u_int op;
bool locked;
op = flags & LK_TYPE_MASK;
locked = false;
switch (op) {
case LK_SHARED:
if (LK_CAN_WITNESS(flags))
WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
file, line, flags & LK_INTERLOCK ? ilk : NULL);
if (__predict_false(lk->lock_object.lo_flags & LK_NOSHARE))
break;
if (lockmgr_slock_try(lk, &x, flags)) {
lockmgr_note_shared_acquire(lk, 0, 0,
file, line, flags);
locked = true;
}
break;
case LK_EXCLUSIVE:
if (LK_CAN_WITNESS(flags))
WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER |
LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ?
ilk : NULL);
tid = (uintptr_t)curthread;
if (lk->lk_lock == LK_UNLOCKED &&
atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) {
lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
flags);
locked = true;
}
break;
case LK_UPGRADE:
case LK_TRYUPGRADE:
_lockmgr_assert(lk, KA_SLOCKED, file, line);
tid = (uintptr_t)curthread;
v = lk->lk_lock;
x = v & LK_ALL_WAITERS;
v &= LK_EXCLUSIVE_SPINNERS;
if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v,
tid | x)) {
lockmgr_note_exclusive_upgrade(lk, file, line, flags);
locked = true;
}
break;
default:
break;
}
if (__predict_true(locked)) {
if (__predict_false(flags & LK_INTERLOCK)) {
class = LOCK_CLASS(ilk);
class->lc_unlock(ilk);
}
return (0);
} else {
return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT,
LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line));
}
}
int
lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
{
struct lock_class *class;
uintptr_t x, tid;
bool unlocked;
const char *file;
int line;
file = __FILE__;
line = __LINE__;
_lockmgr_assert(lk, KA_LOCKED, file, line);
unlocked = false;
x = lk->lk_lock;
if (__predict_true(x & LK_SHARE) != 0) {
if (lockmgr_sunlock_try(lk, x)) {
lockmgr_note_shared_release(lk, file, line);
unlocked = true;
}
} else {
tid = (uintptr_t)curthread;
if (!lockmgr_recursed(lk) &&
atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
lockmgr_note_exclusive_release(lk, file, line);
unlocked = true;
}
}
if (__predict_true(unlocked)) {
if (__predict_false(flags & LK_INTERLOCK)) {
class = LOCK_CLASS(ilk);
class->lc_unlock(ilk);
}
return (0);
} else {
return (__lockmgr_args(lk, flags | LK_RELEASE, ilk, LK_WMESG_DEFAULT,
LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE));
}
}
int
__lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *wmesg, int pri, int timo, const char *file, int line)
@ -518,21 +717,8 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER,
file, line, flags & LK_INTERLOCK ? ilk : NULL);
for (;;) {
x = lk->lk_lock;
/*
* If no other thread has an exclusive lock, or
* no exclusive waiter is present, bump the count of
* sharers. Since we have to preserve the state of
* waiters, if we fail to acquire the shared lock
* loop back and retry.
*/
if (LK_CAN_SHARE(x, flags)) {
if (atomic_cmpset_acq_ptr(&lk->lk_lock, x,
x + LK_ONE_SHARER))
break;
continue;
}
if (lockmgr_slock_try(lk, &x, flags))
break;
#ifdef HWPMC_HOOKS
PMC_SOFT_CALL( , , lock, failed);
#endif
@ -697,15 +883,13 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
__func__, lk);
}
if (error == 0) {
lock_profile_obtain_lock_success(&lk->lock_object,
contested, waittime, file, line);
LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file,
line);
WITNESS_LOCK(&lk->lock_object, LK_TRYWIT(flags), file,
line);
TD_LOCKS_INC(curthread);
TD_SLOCKS_INC(curthread);
STACK_SAVE(lk);
#ifdef LOCK_PROFILING
lockmgr_note_shared_acquire(lk, contested, waittime,
file, line, flags);
#else
lockmgr_note_shared_acquire(lk, 0, 0, file, line,
flags);
#endif
}
break;
case LK_UPGRADE:
@ -968,14 +1152,13 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
__func__, lk);
}
if (error == 0) {
lock_profile_obtain_lock_success(&lk->lock_object,
contested, waittime, file, line);
LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0,
lk->lk_recurse, file, line);
WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE |
LK_TRYWIT(flags), file, line);
TD_LOCKS_INC(curthread);
STACK_SAVE(lk);
#ifdef LOCK_PROFILING
lockmgr_note_exclusive_acquire(lk, contested, waittime,
file, line, flags);
#else
lockmgr_note_exclusive_acquire(lk, 0, 0, file, line,
flags);
#endif
}
break;
case LK_DOWNGRADE:

View File

@ -520,10 +520,11 @@ vop_stdlock(ap)
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct mtx *ilk;
return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp),
LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file,
ap->a_line));
ilk = VI_MTX(vp);
return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags,
(ilk != NULL) ? &ilk->lock_object : NULL, ap->a_file, ap->a_line));
}
/* See above. */
@ -535,8 +536,11 @@ vop_stdunlock(ap)
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct mtx *ilk;
return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp)));
ilk = VI_MTX(vp);
return (lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags,
(ilk != NULL) ? &ilk->lock_object : NULL));
}
/* See above. */

View File

@ -68,6 +68,10 @@ struct thread;
*/
int __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *wmesg, int prio, int timo, const char *file, int line);
int lockmgr_lock_fast_path(struct lock *lk, u_int flags,
struct lock_object *ilk, const char *file, int line);
int lockmgr_unlock_fast_path(struct lock *lk, u_int flags,
struct lock_object *ilk);
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
void _lockmgr_assert(const struct lock *lk, int what, const char *file, int line);
#endif