- Move TDF_OWEPREEMPT, TDF_OWEUPC, and TDF_USTATCLOCK over to td_pflags

since they are only accessed by curthread and thus do not need any
  locking.
- Move pr_addr and pr_ticks out of struct uprof (which is per-process)
  and directly into struct thread as td_profil_addr and td_profil_ticks
  as these variables are really per-thread.  (They are used to defer an
  addupc_intr() that was too "hard" until ast()).
This commit is contained in:
jhb 2004-07-16 21:04:55 +00:00
parent 1115416b3b
commit 0cb3276d57
9 changed files with 31 additions and 24 deletions

View File

@ -838,8 +838,9 @@ thread_statclock(int user)
return (0);
if (user) {
/* Current always do via ast() */
td->td_pflags |= TDP_USTATCLOCK;
mtx_lock_spin(&sched_lock);
td->td_flags |= (TDF_USTATCLOCK|TDF_ASTPENDING);
td->td_flags |= TDF_ASTPENDING;
mtx_unlock_spin(&sched_lock);
td->td_uuticks++;
} else if (td->td_mailbox != NULL)
@ -1129,11 +1130,9 @@ thread_userret(struct thread *td, struct trapframe *frame)
* is returning from interrupt, charge thread's
* userland time for UTS.
*/
if (td->td_flags & TDF_USTATCLOCK) {
if (td->td_pflags & TDP_USTATCLOCK) {
thread_update_usr_ticks(td, 1);
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_USTATCLOCK;
mtx_unlock_spin(&sched_lock);
td->td_pflags &= ~TDP_USTATCLOCK;
}
/*

View File

@ -451,7 +451,8 @@ critical_exit(void)
("critical_exit: td_critnest == 0"));
if (td->td_critnest == 1) {
#ifdef PREEMPTION
if (td->td_flags & TDF_OWEPREEMPT) {
mtx_assert(&sched_lock, MA_NOTOWNED);
if (td->td_pflags & TDP_OWEPREEMPT) {
mtx_lock_spin(&sched_lock);
mi_switch(SW_INVOL, NULL);
mtx_unlock_spin(&sched_lock);
@ -485,7 +486,9 @@ maybe_preempt(struct thread *td)
* The new thread should not preempt the current thread if any of the
* following conditions are true:
*
* - The current thread has a higher (numerically lower) priority.
* - The current thread has a higher (numerically lower) or
* equivalent priority. Note that this prevents curthread from
* trying to preempt to itself.
* - It is too early in the boot for context switches (cold is set).
* - The current thread has an inhibitor set or is in the process of
* exiting. In this case, the current thread is about to switch
@ -515,7 +518,7 @@ maybe_preempt(struct thread *td)
if (ctd->td_critnest > 1) {
CTR1(KTR_PROC, "maybe_preempt: in critical section %d",
ctd->td_critnest);
ctd->td_flags |= TDF_OWEPREEMPT;
ctd->td_pflags |= TDP_OWEPREEMPT;
return (0);
}

View File

@ -297,11 +297,12 @@ mi_switch(int flags, struct thread *newtd)
mtx_assert(&Giant, MA_NOTOWNED);
#endif
KASSERT(td->td_critnest == 1 || (td->td_critnest == 2 &&
(td->td_flags & TDF_OWEPREEMPT) != 0 && (flags & SW_INVOL) != 0 &&
(td->td_pflags & TDP_OWEPREEMPT) != 0 && (flags & SW_INVOL) != 0 &&
newtd == NULL),
("mi_switch: switch in a critical section"));
KASSERT((flags & (SW_INVOL | SW_VOL)) != 0,
("mi_switch: switch must be voluntary or involuntary"));
KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself"));
if (flags & SW_VOL)
p->p_stats->p_ru.ru_nvcsw++;

View File

@ -654,7 +654,8 @@ sched_switch(struct thread *td, struct thread *newtd)
sched_tdcnt++;
td->td_lastcpu = td->td_oncpu;
td->td_last_kse = ke;
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_OWEPREEMPT);
td->td_flags &= ~TDF_NEEDRESCHED;
td->td_pflags &= ~TDP_OWEPREEMPT;
td->td_oncpu = NOCPU;
/*
* At the last moment, if this thread is still marked RUNNING,

View File

@ -1138,7 +1138,8 @@ sched_switch(struct thread *td, struct thread *newtd)
td->td_last_kse = ke;
td->td_lastcpu = td->td_oncpu;
td->td_oncpu = NOCPU;
td->td_flags &= ~(TDF_NEEDRESCHED | TDF_OWEPREEMPT);
td->td_flags &= ~TDF_NEEDRESCHED;
td->td_pflags &= ~TDP_OWEPREEMPT;
/*
* If the KSE has been assigned it may be in the process of switching

View File

@ -481,10 +481,11 @@ addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
addr = prof->pr_base + i;
mtx_unlock_spin(&sched_lock);
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
prof->pr_addr = pc;
prof->pr_ticks = ticks;
td->td_profil_addr = pc;
td->td_profil_ticks = ticks;
td->td_pflags |= TDP_OWEUPC;
mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_OWEUPC | TDF_ASTPENDING;
td->td_flags |= TDF_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}

View File

@ -178,7 +178,7 @@ ast(struct trapframe *framep)
p->p_sflag &= ~PS_MACPEND;
#endif
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
TDF_NEEDRESCHED | TDF_OWEUPC | TDF_INTERRUPT);
TDF_NEEDRESCHED | TDF_INTERRUPT);
cnt.v_soft++;
mtx_unlock_spin(&sched_lock);
/*
@ -191,10 +191,10 @@ ast(struct trapframe *framep)
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
if (flags & TDF_OWEUPC && p->p_flag & P_PROFIL) {
addupc_task(td, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
p->p_stats->p_prof.pr_ticks = 0;
if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
td->td_profil_ticks = 0;
td->td_pflags &= ~TDP_OWEUPC;
}
if (sflag & PS_ALRMPEND) {
PROC_LOCK(p);

View File

@ -304,6 +304,9 @@ struct thread {
stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
int td_kflags; /* (c) Flags for KSE threading. */
int td_xsig; /* (c) Signal for ptrace */
u_long td_profil_addr; /* (k) Temporary addr until AST. */
u_int td_profil_ticks; /* (k) Temporary ticks until AST. */
#define td_endzero td_base_pri
/* Copied during fork1() or thread_sched_upcall(). */
@ -346,12 +349,9 @@ struct thread {
#define TDF_IDLETD 0x000020 /* This is one of the per-CPU idle threads. */
#define TDF_SELECT 0x000040 /* Selecting; wakeup/waiting danger. */
#define TDF_TSNOBLOCK 0x000100 /* Don't block on a turnstile due to race. */
#define TDF_OWEPREEMPT 0x000200 /* Thread has a pending preemption. */
#define TDF_ASTPENDING 0x000800 /* Thread has some asynchronous events. */
#define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */
#define TDF_INTERRUPT 0x002000 /* Thread is marked as interrupted. */
#define TDF_USTATCLOCK 0x004000 /* Finish user statclock hit at next AST. */
#define TDF_OWEUPC 0x008000 /* Owe thread an addupc() call at next AST. */
#define TDF_NEEDRESCHED 0x010000 /* Thread needs to yield. */
#define TDF_NEEDSIGCHK 0x020000 /* Thread may need signal delivery. */
#define TDF_XSIG 0x040000 /* Thread is exchanging signal under traced */
@ -368,6 +368,9 @@ struct thread {
#define TDP_ALTSTACK 0x0020 /* Have alternate signal stack. */
#define TDP_DEADLKTREAT 0x0040 /* Lock aquisition - deadlock treatment. */
#define TDP_SA 0x0080 /* A scheduler activation based thread. */
#define TDP_OWEPREEMPT 0x0100 /* Thread has a pending preemption. */
#define TDP_OWEUPC 0x0200 /* Owe thread an addupc() call at next AST. */
#define TDP_USTATCLOCK 0x0400 /* Finish user statclock hit at next AST. */
#define TDI_SUSPENDED 0x0001 /* On suspension queue. */
#define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */

View File

@ -63,8 +63,6 @@ struct pstats {
u_long pr_size; /* (c + j) Buffer size. */
u_long pr_off; /* (c + j) PC offset. */
u_long pr_scale; /* (c + j) PC scaling. */
u_long pr_addr; /* (k) Temporary addr until AST. */
u_int pr_ticks; /* (k) Temporary ticks until AST. */
} p_prof;
#define pstat_endcopy p_start
struct timeval p_start; /* (b) Starting time. */