rmlock: partially depessimize lock/unlock fastpath

Previusly the slow path was folded in and partially jumped over in the
common case.
This commit is contained in:
mjg 2018-05-11 06:59:54 +00:00
parent fa401e141a
commit e6cfcf1248

View File

@ -344,7 +344,7 @@ rm_sysinit(void *arg)
rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags);
}
static int
static __noinline int
_rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
{
struct pcpu *pc;
@ -459,15 +459,15 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
* Fast path to combine two common conditions into a single
* conditional jump.
*/
if (0 == (td->td_owepreempt |
CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
if (__predict_true(0 == (td->td_owepreempt |
CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus))))
return (1);
/* We do not have a read token and need to acquire one. */
return _rm_rlock_hard(rm, tracker, trylock);
}
static void
static __noinline void
_rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker)
{
@ -518,7 +518,7 @@ _rm_runlock(struct rmlock *rm, struct rm_priotracker *tracker)
if (rm->lock_object.lo_flags & LO_SLEEPABLE)
THREAD_SLEEPING_OK();
if (0 == (td->td_owepreempt | tracker->rmp_flags))
if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags)))
return;
_rm_unlock_hard(td, tracker);