Inlined sched_userret.

The tested condition is rarely true and it induces a function call
on each return to userspace.

Bumps getuid rate by about 1% on Broadwell.
This commit is contained in:
Mateusz Guzik 2018-05-07 23:36:16 +00:00
parent 75e9b455a9
commit 2824088536
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=333344
3 changed files with 33 additions and 38 deletions

View File

@ -1481,25 +1481,13 @@ sched_preempt(struct thread *td)
}
void
sched_userret(struct thread *td)
sched_userret_slowpath(struct thread *td)
{
/*
* XXX we cheat slightly on the locking here to avoid locking in
* the usual case. Setting td_priority here is essentially an
* incomplete workaround for not setting it properly elsewhere.
* Now that some interrupt handlers are threads, not setting it
* properly elsewhere can clobber it in the window between setting
* it here and returning to user mode, so don't waste time setting
* it perfectly here.
*/
KASSERT((td->td_flags & TDF_BORROWING) == 0,
("thread with borrowed priority returning to userland"));
if (td->td_priority != td->td_user_pri) {
thread_lock(td);
td->td_priority = td->td_user_pri;
td->td_base_pri = td->td_user_pri;
thread_unlock(td);
}
thread_lock(td);
td->td_priority = td->td_user_pri;
td->td_base_pri = td->td_user_pri;
thread_unlock(td);
}
void

View File

@ -2356,26 +2356,14 @@ sched_preempt(struct thread *td)
* to static priorities in msleep() or similar.
*/
void
sched_userret(struct thread *td)
sched_userret_slowpath(struct thread *td)
{
/*
* XXX we cheat slightly on the locking here to avoid locking in
* the usual case. Setting td_priority here is essentially an
* incomplete workaround for not setting it properly elsewhere.
* Now that some interrupt handlers are threads, not setting it
* properly elsewhere can clobber it in the window between setting
* it here and returning to user mode, so don't waste time setting
* it perfectly here.
*/
KASSERT((td->td_flags & TDF_BORROWING) == 0,
("thread with borrowed priority returning to userland"));
if (td->td_priority != td->td_user_pri) {
thread_lock(td);
td->td_priority = td->td_user_pri;
td->td_base_pri = td->td_user_pri;
tdq_setlowpri(TDQ_SELF(), td);
thread_unlock(td);
}
thread_lock(td);
td->td_priority = td->td_user_pri;
td->td_base_pri = td->td_user_pri;
tdq_setlowpri(TDQ_SELF(), td);
thread_unlock(td);
}
/*

View File

@ -103,7 +103,7 @@ void sched_switch(struct thread *td, struct thread *newtd, int flags);
void sched_throw(struct thread *td);
void sched_unlend_prio(struct thread *td, u_char prio);
void sched_user_prio(struct thread *td, u_char prio);
void sched_userret(struct thread *td);
void sched_userret_slowpath(struct thread *td);
void sched_wakeup(struct thread *td);
#ifdef RACCT
#ifdef SCHED_4BSD
@ -111,6 +111,25 @@ fixpt_t sched_pctcpu_delta(struct thread *td);
#endif
#endif
static inline void
sched_userret(struct thread *td)
{
/*
* XXX we cheat slightly on the locking here to avoid locking in
* the usual case. Setting td_priority here is essentially an
* incomplete workaround for not setting it properly elsewhere.
* Now that some interrupt handlers are threads, not setting it
* properly elsewhere can clobber it in the window between setting
* it here and returning to user mode, so don't waste time setting
* it perfectly here.
*/
KASSERT((td->td_flags & TDF_BORROWING) == 0,
("thread with borrowed priority returning to userland"));
if (__predict_false(td->td_priority != td->td_user_pri))
sched_userret_slowpath(td);
}
/*
* Threads are moved on and off of run queues
*/