- Add a flags parameter to mi_switch. The value of flags may be SW_VOL or

SW_INVOL.  Assert that one of these is set in mi_switch() and propery
   adjust the rusage statistics.  This is to simplify the large number of
   users of this interface which were previously all required to adjust the
   proper counter prior to calling mi_switch().  This also facilitates more
   switch and locking optimizations.
 - Change all callers of mi_switch() to pass the appropriate paramter and
   remove direct references to the process statistics.
This commit is contained in:
Jeff Roberson 2004-01-25 03:54:52 +00:00
parent 8dc10be885
commit 29bcc4514f
16 changed files with 37 additions and 48 deletions

View File

@ -119,8 +119,7 @@ static __inline void
cv_switch(struct thread *td)
{
TD_SET_SLEEPING(td);
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mi_switch(SW_VOL);
CTR3(KTR_PROC, "cv_switch: resume thread %p (pid %d, %s)", td,
td->td_proc->p_pid, td->td_proc->p_comm);
}
@ -370,8 +369,7 @@ cv_timedwait(struct cv *cvp, struct mtx *mp, int timo)
* Go back to sleep.
*/
TD_SET_SLEEPING(td);
td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
td->td_flags &= ~TDF_TIMOFAIL;
}
@ -447,8 +445,7 @@ cv_timedwait_sig(struct cv *cvp, struct mtx *mp, int timo)
* Go back to sleep.
*/
TD_SET_SLEEPING(td);
td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
td->td_flags &= ~TDF_TIMOFAIL;
}
mtx_unlock_spin(&sched_lock);

View File

@ -87,8 +87,7 @@ idle_proc(void *dummy)
mtx_lock_spin(&sched_lock);
td->td_state = TDS_CAN_RUN;
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
}
}

View File

@ -407,10 +407,9 @@ ithread_schedule(struct ithd *ithread, int do_switch)
(ctd->td_critnest == 1) ) {
KASSERT((TD_IS_RUNNING(ctd)),
("ithread_schedule: Bad state for curthread."));
ctd->td_proc->p_stats->p_ru.ru_nivcsw++;
if (ctd->td_flags & TDF_IDLETD)
ctd->td_state = TDS_CAN_RUN; /* XXXKSE */
mi_switch();
mi_switch(SW_INVOL);
} else {
curthread->td_flags |= TDF_NEEDRESCHED;
}
@ -566,9 +565,8 @@ ithread_loop(void *arg)
if (ithd->it_enable != NULL)
ithd->it_enable(ithd->it_vector);
TD_SET_IWAIT(td); /* we're idle */
p->p_stats->p_ru.ru_nvcsw++;
CTR2(KTR_INTR, "%s: pid %d: done", __func__, p->p_pid);
mi_switch();
mi_switch(SW_VOL);
CTR2(KTR_INTR, "%s: pid %d: resumed", __func__, p->p_pid);
}
mtx_unlock_spin(&sched_lock);

View File

@ -1939,8 +1939,7 @@ thread_single(int force_exit)
thread_suspend_one(td);
DROP_GIANT();
PROC_UNLOCK(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);
@ -2042,8 +2041,7 @@ thread_suspend_check(int return_instead)
}
DROP_GIANT();
PROC_UNLOCK(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);

View File

@ -654,8 +654,7 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line)
"_mtx_unlock_sleep: %p switching out lock=%p", m,
(void *)m->mtx_lock);
td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
if (LOCK_LOG_TEST(&m->mtx_object, opts))
CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p",
m, (void *)m->mtx_lock);

View File

@ -504,8 +504,7 @@ poll_idle(void)
mtx_unlock(&Giant);
mtx_assert(&Giant, MA_NOTOWNED);
mtx_lock_spin(&sched_lock);
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
} else {
idlepoll_sleeping = 1;

View File

@ -308,8 +308,10 @@ boot(int howto)
DROP_GIANT();
for (subiter = 0; subiter < 50 * iter; subiter++) {
mtx_lock_spin(&sched_lock);
curthread->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch(); /* Allow interrupt threads to run */
/*
* Allow interrupt threads to run
*/
mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
DELAY(1000);
}

View File

@ -2036,8 +2036,7 @@ ptracestop(struct thread *td, int sig)
thread_suspend_one(td);
PROC_UNLOCK(p);
DROP_GIANT();
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
}
@ -2186,8 +2185,7 @@ issignal(td)
thread_suspend_one(td);
PROC_UNLOCK(p);
DROP_GIANT();
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);

View File

@ -441,8 +441,7 @@ uio_yield(void)
mtx_lock_spin(&sched_lock);
DROP_GIANT();
sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */
td->td_proc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
}

View File

@ -250,9 +250,8 @@ msleep(ident, mtx, priority, wmesg, timo)
sched_sleep(td, priority & PRIMASK);
if (TD_ON_SLEEPQ(td)) {
p->p_stats->p_ru.ru_nvcsw++;
TD_SET_SLEEPING(td);
mi_switch();
mi_switch(SW_VOL);
}
/*
* We're awake from voluntary sleep.
@ -278,8 +277,7 @@ msleep(ident, mtx, priority, wmesg, timo)
* the wrong msleep(). Yuck.
*/
TD_SET_SLEEPING(td);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
td->td_flags &= ~TDF_TIMOFAIL;
}
if ((td->td_flags & TDF_INTERRUPT) && (priority & PCATCH) &&
@ -453,7 +451,7 @@ wakeup_one(ident)
* The machine independent parts of mi_switch().
*/
void
mi_switch(void)
mi_switch(int flags)
{
struct bintime new_switchtime;
struct thread *td;
@ -469,7 +467,13 @@ mi_switch(void)
#endif
KASSERT(td->td_critnest == 1,
("mi_switch: switch in a critical section"));
KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
("mi_switch: switch must be voluntary or involuntary"));
if (flags & SW_VOL)
p->p_stats->p_ru.ru_nvcsw++;
else
p->p_stats->p_ru.ru_nivcsw++;
/*
* Compute the amount of time during which the current
* process was running, and add that to its total so far.
@ -647,9 +651,8 @@ yield(struct thread *td, struct yield_args *uap)
kg = td->td_ksegrp;
mtx_assert(&Giant, MA_NOTOWNED);
mtx_lock_spin(&sched_lock);
kg->kg_proc->p_stats->p_ru.ru_nvcsw++;
sched_prio(td, PRI_MAX_TIMESHARE);
mi_switch();
mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
td->td_retval[0] = 0;
return (0);

View File

@ -1939,8 +1939,7 @@ thread_single(int force_exit)
thread_suspend_one(td);
DROP_GIANT();
PROC_UNLOCK(p);
p->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);
@ -2042,8 +2041,7 @@ thread_suspend_check(int return_instead)
}
DROP_GIANT();
PROC_UNLOCK(p);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
PICKUP_GIANT();
PROC_LOCK(p);

View File

@ -1707,8 +1707,7 @@ sched_bind(struct thread *td, int cpu)
kseq_load_rem(KSEQ_CPU(ke->ke_cpu), ke);
kseq_notify(ke, cpu);
/* When we return from mi_switch we'll be on the correct cpu. */
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mi_switch(SW_VOL);
#endif
}

View File

@ -247,8 +247,7 @@ ast(struct trapframe *framep)
#endif
mtx_lock_spin(&sched_lock);
sched_prio(td, kg->kg_user_pri);
p->p_stats->p_ru.ru_nivcsw++;
mi_switch();
mi_switch(SW_INVOL);
mtx_unlock_spin(&sched_lock);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))

View File

@ -513,8 +513,7 @@ turnstile_wait(struct turnstile *ts, struct lock_object *lock,
CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td,
lock, lock->lo_name);
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mi_switch(SW_VOL);
if (LOCK_LOG_TEST(lock, 0))
CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s",

View File

@ -841,7 +841,10 @@ void fork_exit(void (*)(void *, struct trapframe *), void *,
void fork_return(struct thread *, struct trapframe *);
int inferior(struct proc *p);
int leavepgrp(struct proc *p);
void mi_switch(void);
void mi_switch(int flags);
/* Flags for mi_switch(). */
#define SW_VOL 0x0001 /* Voluntary switch. */
#define SW_INVOL 0x0002 /* Involuntary switch. */
int p_candebug(struct thread *td, struct proc *p);
int p_cansee(struct thread *td, struct proc *p);
int p_cansched(struct thread *td, struct proc *p);

View File

@ -155,8 +155,7 @@ vm_pagezero(void)
pages += vm_page_zero_idle();
if (pages > idlezero_maxrun || sched_runnable()) {
mtx_lock_spin(&sched_lock);
td->td_proc->p_stats->p_ru.ru_nvcsw++;
mi_switch();
mi_switch(SW_VOL);
mtx_unlock_spin(&sched_lock);
pages = 0;
}