Reversion of commit by Davidxu plus fixes since applied.

I'm not convinced there is anything major wrong with the patch but
them's the rules..

I am using my "David's mentor" hat to revert this as he's
offline for a while.
This commit is contained in:
Julian Elischer 2003-02-01 12:17:09 +00:00
parent 3c62126d84
commit 6f8132a867
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=110190
50 changed files with 1551 additions and 1698 deletions

View File

@ -479,7 +479,7 @@ alpha_clock_interrupt(struct trapframe *framep)
mtx_lock_spin(&sched_lock);
hardclock_process(curthread, TRAPF_USERMODE(framep));
if ((schedclk2 & 0x7) == 0)
statclock_process(curthread, TRAPF_PC(framep),
statclock_process(curkse, TRAPF_PC(framep),
TRAPF_USERMODE(framep));
mtx_unlock_spin(&sched_lock);
}

View File

@ -296,7 +296,7 @@ trap(a0, a1, a2, entry, framep)
CTR5(KTR_TRAP, "%s trap: pid %d, (%lx, %lx, %lx)",
user ? "user" : "kernel", p->p_pid, a0, a1, a2);
if (user) {
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = framep;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
@ -666,7 +666,7 @@ syscall(code, framep)
cnt.v_syscall++;
td->td_frame = framep;
opc = framep->tf_regs[FRAME_PC] - 4;
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
if (p->p_flag & P_KSES)

View File

@ -309,7 +309,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
}
void
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
{
/* XXX */

View File

@ -137,7 +137,7 @@ i386_unpend(void)
break;
case 1: /* bit 1 - statclock */
mtx_lock_spin(&sched_lock);
statclock_process(curthread,
statclock_process(curthread->td_kse,
(register_t)i386_unpend, 0);
mtx_unlock_spin(&sched_lock);
break;

View File

@ -280,12 +280,9 @@ doreti_ast:
*/
cli
movl PCPU(CURTHREAD),%eax
testl $TDF_ASTPENDING, TD_FLAGS(%eax)
jnz call_ast
movl TD_KSE(%eax), %eax
testl $KEF_ASTPENDING | KEF_NEEDRESCHED,KE_FLAGS(%eax)
je doreti_exit
call_ast:
sti
pushl %esp /* pass a pointer to the trapframe */
call ast

View File

@ -280,12 +280,9 @@ doreti_ast:
*/
cli
movl PCPU(CURTHREAD),%eax
testl $TDF_ASTPENDING, TD_FLAGS(%eax)
jnz call_ast
movl TD_KSE(%eax), %eax
testl $KEF_ASTPENDING | KEF_NEEDRESCHED,KE_FLAGS(%eax)
je doreti_exit
call_ast:
sti
pushl %esp /* pass a pointer to the trapframe */
call ast

View File

@ -92,7 +92,6 @@ ASSYM(TD_INTR_NESTING_LEVEL, offsetof(struct thread, td_intr_nesting_level));
ASSYM(TD_CRITNEST, offsetof(struct thread, td_critnest));
ASSYM(TD_SWITCHIN, offsetof(struct thread, td_switchin));
ASSYM(TD_MD, offsetof(struct thread, td_md));
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
ASSYM(P_MD, offsetof(struct proc, p_md));
ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt));

View File

@ -2611,7 +2611,8 @@ forwarded_statclock(struct trapframe frame)
{
mtx_lock_spin(&sched_lock);
statclock_process(curthread, TRAPF_PC(&frame), TRAPF_USERMODE(&frame));
statclock_process(curthread->td_kse, TRAPF_PC(&frame),
TRAPF_USERMODE(&frame));
mtx_unlock_spin(&sched_lock);
}

View File

@ -2611,7 +2611,8 @@ forwarded_statclock(struct trapframe frame)
{
mtx_lock_spin(&sched_lock);
statclock_process(curthread, TRAPF_PC(&frame), TRAPF_USERMODE(&frame));
statclock_process(curthread->td_kse, TRAPF_PC(&frame),
TRAPF_USERMODE(&frame));
mtx_unlock_spin(&sched_lock);
}

View File

@ -264,7 +264,7 @@ trap(frame)
!(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL))) {
/* user trap */
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = &frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
@ -957,7 +957,7 @@ syscall(frame)
KASSERT((td->td_kse != NULL), ("syscall: kse/thread UNLINKED"));
KASSERT((td->td_kse->ke_thread == td), ("syscall:kse/thread mismatch"));
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = &frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);

View File

@ -312,6 +312,8 @@ cpu_set_upcall(struct thread *td, void *pcb)
{
struct pcb *pcb2;
td->td_flags |= TDF_UPCALLING;
/* Point the pcb to the top of the stack. */
pcb2 = td->td_pcb;
@ -368,7 +370,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
* in thread_userret() itself can be done as well.
*/
void
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
{
/*
@ -385,15 +387,15 @@ cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
* function.
*/
td->td_frame->tf_esp =
(int)ku->ku_stack.ss_sp + ku->ku_stack.ss_size - 16;
td->td_frame->tf_eip = (int)ku->ku_func;
(int)ke->ke_stack.ss_sp + ke->ke_stack.ss_size - 16;
td->td_frame->tf_eip = (int)ke->ke_upcall;
/*
* Pass the address of the mailbox for this kse to the uts
* function as a parameter on the stack.
*/
suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
(int)ku->ku_mailbox);
(int)ke->ke_mailbox);
}
void

View File

@ -2611,7 +2611,8 @@ forwarded_statclock(struct trapframe frame)
{
mtx_lock_spin(&sched_lock);
statclock_process(curthread, TRAPF_PC(&frame), TRAPF_USERMODE(&frame));
statclock_process(curthread->td_kse, TRAPF_PC(&frame),
TRAPF_USERMODE(&frame));
mtx_unlock_spin(&sched_lock);
}

View File

@ -170,6 +170,15 @@ dumpthread(volatile struct proc *p, volatile struct thread *td)
if (TD_AWAITING_INTR(td)) {
db_printf("[IWAIT]");
}
if (TD_LENDER(td)) {
db_printf("[LOAN]");
}
if (TD_IS_IDLE(td)) {
db_printf("[IDLE]");
}
if (TD_IS_EXITING(td)) {
db_printf("[EXIT]");
}
break;
case TDS_CAN_RUN:
db_printf("[Can run]");

View File

@ -137,7 +137,7 @@ i386_unpend(void)
break;
case 1: /* bit 1 - statclock */
mtx_lock_spin(&sched_lock);
statclock_process(curthread,
statclock_process(curthread->td_kse,
(register_t)i386_unpend, 0);
mtx_unlock_spin(&sched_lock);
break;

View File

@ -280,12 +280,9 @@ doreti_ast:
*/
cli
movl PCPU(CURTHREAD),%eax
testl $TDF_ASTPENDING, TD_FLAGS(%eax)
jnz call_ast
movl TD_KSE(%eax), %eax
testl $KEF_ASTPENDING | KEF_NEEDRESCHED,KE_FLAGS(%eax)
je doreti_exit
call_ast:
sti
pushl %esp /* pass a pointer to the trapframe */
call ast

View File

@ -92,7 +92,6 @@ ASSYM(TD_INTR_NESTING_LEVEL, offsetof(struct thread, td_intr_nesting_level));
ASSYM(TD_CRITNEST, offsetof(struct thread, td_critnest));
ASSYM(TD_SWITCHIN, offsetof(struct thread, td_switchin));
ASSYM(TD_MD, offsetof(struct thread, td_md));
ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
ASSYM(P_MD, offsetof(struct proc, p_md));
ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt));

View File

@ -2611,7 +2611,8 @@ forwarded_statclock(struct trapframe frame)
{
mtx_lock_spin(&sched_lock);
statclock_process(curthread, TRAPF_PC(&frame), TRAPF_USERMODE(&frame));
statclock_process(curthread->td_kse, TRAPF_PC(&frame),
TRAPF_USERMODE(&frame));
mtx_unlock_spin(&sched_lock);
}

View File

@ -2611,7 +2611,8 @@ forwarded_statclock(struct trapframe frame)
{
mtx_lock_spin(&sched_lock);
statclock_process(curthread, TRAPF_PC(&frame), TRAPF_USERMODE(&frame));
statclock_process(curthread->td_kse, TRAPF_PC(&frame),
TRAPF_USERMODE(&frame));
mtx_unlock_spin(&sched_lock);
}

View File

@ -264,7 +264,7 @@ trap(frame)
!(PCPU_GET(curpcb)->pcb_flags & PCB_VM86CALL))) {
/* user trap */
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = &frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
@ -957,7 +957,7 @@ syscall(frame)
KASSERT((td->td_kse != NULL), ("syscall: kse/thread UNLINKED"));
KASSERT((td->td_kse->ke_thread == td), ("syscall:kse/thread mismatch"));
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = &frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);

View File

@ -312,6 +312,8 @@ cpu_set_upcall(struct thread *td, void *pcb)
{
struct pcb *pcb2;
td->td_flags |= TDF_UPCALLING;
/* Point the pcb to the top of the stack. */
pcb2 = td->td_pcb;
@ -368,7 +370,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
* in thread_userret() itself can be done as well.
*/
void
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
{
/*
@ -385,15 +387,15 @@ cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
* function.
*/
td->td_frame->tf_esp =
(int)ku->ku_stack.ss_sp + ku->ku_stack.ss_size - 16;
td->td_frame->tf_eip = (int)ku->ku_func;
(int)ke->ke_stack.ss_sp + ke->ke_stack.ss_size - 16;
td->td_frame->tf_eip = (int)ke->ke_upcall;
/*
* Pass the address of the mailbox for this kse to the uts
* function as a parameter on the stack.
*/
suword((void *)(td->td_frame->tf_esp + sizeof(void *)),
(int)ku->ku_mailbox);
(int)ke->ke_mailbox);
}
void

View File

@ -2611,7 +2611,8 @@ forwarded_statclock(struct trapframe frame)
{
mtx_lock_spin(&sched_lock);
statclock_process(curthread, TRAPF_PC(&frame), TRAPF_USERMODE(&frame));
statclock_process(curthread->td_kse, TRAPF_PC(&frame),
TRAPF_USERMODE(&frame));
mtx_unlock_spin(&sched_lock);
}

View File

@ -139,7 +139,7 @@ interrupt(u_int64_t vector, struct trapframe *framep)
mtx_lock_spin(&sched_lock);
hardclock_process(curthread, TRAPF_USERMODE(framep));
if ((schedclk2 & 0x7) == 0)
statclock_process(curthread, TRAPF_PC(framep),
statclock_process(curkse, TRAPF_PC(framep),
TRAPF_USERMODE(framep));
mtx_unlock_spin(&sched_lock);
}

View File

@ -331,7 +331,7 @@ trap(int vector, int imm, struct trapframe *framep)
user = ((framep->tf_cr_ipsr & IA64_PSR_CPL) == IA64_PSR_CPL_USER);
if (user) {
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = framep;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
@ -792,7 +792,7 @@ syscall(int code, u_int64_t *args, struct trapframe *framep)
p = td->td_proc;
td->td_frame = framep;
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
@ -939,7 +939,7 @@ ia32_syscall(struct trapframe *framep)
*/
cnt.v_syscall++;
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = framep;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);

View File

@ -117,7 +117,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
}
void
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
{
}

View File

@ -379,6 +379,7 @@ proc0_init(void *dummy __unused)
ke->ke_oncpu = 0;
ke->ke_state = KES_THREAD;
ke->ke_thread = td;
ke->ke_owner = td;
p->p_peers = 0;
p->p_leader = p;

View File

@ -320,10 +320,6 @@ startprofclock(p)
* cover psdiv, etc. as well.
*/
mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_STOPPROF) {
mtx_unlock_spin(&sched_lock);
return;
}
if ((p->p_sflag & PS_PROFIL) == 0) {
p->p_sflag |= PS_PROFIL;
if (++profprocs == 1 && stathz != 0) {
@ -345,19 +341,9 @@ stopprofclock(p)
{
int s;
PROC_LOCK_ASSERT(p, MA_OWNED);
retry:
mtx_lock_spin(&sched_lock);
if (p->p_sflag & PS_PROFIL) {
if (p->p_profthreads) {
p->p_sflag |= PS_STOPPROF;
mtx_unlock_spin(&sched_lock);
msleep(&p->p_profthreads, &p->p_mtx, PPAUSE,
"stopprof", NULL);
goto retry;
}
p->p_sflag &= ~(PS_PROFIL|PS_STOPPROF);
p->p_sflag &= ~PS_PROFIL;
if (--profprocs == 0 && stathz != 0) {
s = splstatclock();
psdiv = pscnt = 1;
@ -377,7 +363,10 @@ stopprofclock(p)
* this function's relationship to statclock.
*/
void
statclock_process(struct thread *td, register_t pc, int user)
statclock_process(ke, pc, user)
struct kse *ke;
register_t pc;
int user;
{
#ifdef GPROF
struct gmonparam *g;
@ -387,31 +376,27 @@ statclock_process(struct thread *td, register_t pc, int user)
long rss;
struct rusage *ru;
struct vmspace *vm;
struct proc *p = td->td_proc;
struct proc *p = ke->ke_proc;
struct thread *td = ke->ke_thread; /* current thread */
KASSERT(ke == curthread->td_kse, ("statclock_process: td != curthread"));
mtx_assert(&sched_lock, MA_OWNED);
if (user) {
/*
* Came from user mode; CPU was in user state.
* If this process is being profiled, record the tick.
*/
if (p->p_sflag & PS_PROFIL) {
/* Only when thread is not in transition */
if (!(td->td_flags & TDF_UPCALLING))
addupc_intr(td, pc, 1);
}
if (p->p_sflag & PS_PROFIL)
addupc_intr(ke, pc, 1);
if (pscnt < psdiv)
return;
/*
* Charge the time as appropriate.
*/
if (p->p_flag & P_KSES)
thread_statclock(1);
/*
td->td_uticks++;
*/
p->p_uticks++;
if (td->td_ksegrp->kg_nice > NZERO)
thread_add_ticks_intr(1, 1);
ke->ke_uticks++;
if (ke->ke_ksegrp->kg_nice > NZERO)
cp_time[CP_NICE]++;
else
cp_time[CP_USER]++;
@ -444,16 +429,12 @@ statclock_process(struct thread *td, register_t pc, int user)
* in ``non-process'' (i.e., interrupt) work.
*/
if ((td->td_ithd != NULL) || td->td_intr_nesting_level >= 2) {
p->p_iticks++;
/*
td->td_iticks++;
*/
ke->ke_iticks++;
cp_time[CP_INTR]++;
} else {
if (p->p_flag & P_KSES)
thread_statclock(0);
td->td_sticks++;
p->p_sticks++;
thread_add_ticks_intr(0, 1);
ke->ke_sticks++;
if (p != PCPU_GET(idlethread)->td_proc)
cp_time[CP_SYS]++;
else
@ -461,7 +442,7 @@ statclock_process(struct thread *td, register_t pc, int user)
}
}
sched_clock(td);
sched_clock(ke->ke_thread);
/* Update resource usage integrals and maximums. */
if ((pstats = p->p_stats) != NULL &&
@ -491,7 +472,7 @@ statclock(frame)
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
if (--pscnt == 0)
pscnt = psdiv;
statclock_process(curthread, CLKF_PC(frame), CLKF_USERMODE(frame));
statclock_process(curthread->td_kse, CLKF_PC(frame), CLKF_USERMODE(frame));
mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
}

View File

@ -210,7 +210,10 @@ kern_execve(td, fname, argv, envv, mac_p)
* so unset the associated flags and lose KSE mode.
*/
p->p_flag &= ~P_KSES;
td->td_flags &= ~TDF_UNBOUND;
td->td_mailbox = NULL;
td->td_kse->ke_mailbox = NULL;
td->td_kse->ke_flags &= ~KEF_DOUPCALL;
thread_single_end();
}
p->p_flag |= P_INEXEC;

View File

@ -147,7 +147,7 @@ exit1(td, rv)
}
/*
* XXXKSE: MUST abort all other threads before proceeding past here.
* XXXXKSE: MUST abort all other threads before proceeding past here.
*/
PROC_LOCK(p);
if (p->p_flag & P_KSES) {
@ -156,6 +156,17 @@ exit1(td, rv)
* if so, act apropriatly, (exit or suspend);
*/
thread_suspend_check(0);
/*
* Here is a trick..
* We need to free up our KSE to process other threads
* so that we can safely set the UNBOUND flag
* (whether or not we have a mailbox) as we are NEVER
* going to return to the user.
* The flag will not be set yet if we are exiting
* because of a signal, pagefault, or similar
* (or even an exit(2) from the UTS).
*/
td->td_flags |= TDF_UNBOUND;
/*
* Kill off the other threads. This requires
@ -181,6 +192,7 @@ exit1(td, rv)
* Turn off threading support.
*/
p->p_flag &= ~P_KSES;
td->td_flags &= ~TDF_UNBOUND;
thread_single_end(); /* Don't need this any more. */
}
/*
@ -225,10 +237,8 @@ exit1(td, rv)
*/
TAILQ_FOREACH(ep, &exit_list, next)
(*ep->function)(p);
PROC_LOCK(p);
stopprofclock(p);
PROC_UNLOCK(p);
MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage),
M_ZOMBIE, 0);

View File

@ -492,7 +492,9 @@ fork1(td, flags, pages, procp)
/* Set up the thread as an active thread (as if runnable). */
ke2->ke_state = KES_THREAD;
ke2->ke_thread = td2;
ke2->ke_owner = td2;
td2->td_kse = ke2;
td2->td_flags &= ~TDF_UNBOUND; /* For the rest of this syscall. */
/*
* Duplicate sub-structures as needed.

File diff suppressed because it is too large Load Diff

View File

@ -219,7 +219,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
#endif
{
int error;
struct thread *thr;
pid_t pid;
int extflags, lockflags;
CTR5(KTR_LOCKMGR,
@ -228,9 +228,9 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
error = 0;
if (td == NULL)
thr = LK_KERNPROC;
pid = LK_KERNPROC;
else
thr = td;
pid = td->td_proc->p_pid;
mtx_lock(lkp->lk_interlock);
if (flags & LK_INTERLOCK) {
@ -257,7 +257,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
* lock requests or upgrade requests ( but not the exclusive
* lock itself ).
*/
if (lkp->lk_lockholder != thr) {
if (lkp->lk_lockholder != pid) {
lockflags = LK_HAVE_EXCL;
mtx_lock_spin(&sched_lock);
if (td != NULL && !(td->td_flags & TDF_DEADLKTREAT))
@ -268,7 +268,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
break;
sharelock(lkp, 1);
#if defined(DEBUG_LOCKS)
lkp->lk_slockholder = thr;
lkp->lk_slockholder = pid;
lkp->lk_sfilename = file;
lkp->lk_slineno = line;
lkp->lk_slockername = name;
@ -283,14 +283,14 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
/* FALLTHROUGH downgrade */
case LK_DOWNGRADE:
KASSERT(lkp->lk_lockholder == thr && lkp->lk_exclusivecount != 0,
KASSERT(lkp->lk_lockholder == pid && lkp->lk_exclusivecount != 0,
("lockmgr: not holding exclusive lock "
"(owner thread (%p) != thread (%p), exlcnt (%d) != 0",
lkp->lk_lockholder, thr, lkp->lk_exclusivecount));
"(owner pid (%d) != pid (%d), exlcnt (%d) != 0",
lkp->lk_lockholder, pid, lkp->lk_exclusivecount));
sharelock(lkp, lkp->lk_exclusivecount);
lkp->lk_exclusivecount = 0;
lkp->lk_flags &= ~LK_HAVE_EXCL;
lkp->lk_lockholder = (struct thread *)LK_NOPROC;
lkp->lk_lockholder = LK_NOPROC;
if (lkp->lk_waitcount)
wakeup((void *)lkp);
break;
@ -317,7 +317,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
* after the upgrade). If we return an error, the file
* will always be unlocked.
*/
if ((lkp->lk_lockholder == thr) || (lkp->lk_sharecount <= 0))
if ((lkp->lk_lockholder == pid) || (lkp->lk_sharecount <= 0))
panic("lockmgr: upgrade exclusive lock");
shareunlock(lkp, 1);
/*
@ -342,7 +342,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
if (error)
break;
lkp->lk_flags |= LK_HAVE_EXCL;
lkp->lk_lockholder = thr;
lkp->lk_lockholder = pid;
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
@ -364,7 +364,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
/* FALLTHROUGH exclusive request */
case LK_EXCLUSIVE:
if (lkp->lk_lockholder == thr && thr != LK_KERNPROC) {
if (lkp->lk_lockholder == pid && pid != LK_KERNPROC) {
/*
* Recursive lock.
*/
@ -398,7 +398,7 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
if (error)
break;
lkp->lk_flags |= LK_HAVE_EXCL;
lkp->lk_lockholder = thr;
lkp->lk_lockholder = pid;
if (lkp->lk_exclusivecount != 0)
panic("lockmgr: non-zero exclusive count");
lkp->lk_exclusivecount = 1;
@ -411,10 +411,10 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
case LK_RELEASE:
if (lkp->lk_exclusivecount != 0) {
if (lkp->lk_lockholder != thr &&
if (lkp->lk_lockholder != pid &&
lkp->lk_lockholder != LK_KERNPROC) {
panic("lockmgr: thread %p, not %s %p unlocking",
thr, "exclusive lock holder",
panic("lockmgr: pid %d, not %s %d unlocking",
pid, "exclusive lock holder",
lkp->lk_lockholder);
}
if (lkp->lk_exclusivecount == 1) {
@ -437,14 +437,14 @@ debuglockmgr(lkp, flags, interlkp, td, name, file, line)
* check for holding a shared lock, but at least we can
* check for an exclusive one.
*/
if (lkp->lk_lockholder == thr)
if (lkp->lk_lockholder == pid)
panic("lockmgr: draining against myself");
error = acquiredrain(lkp, extflags);
if (error)
break;
lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL;
lkp->lk_lockholder = thr;
lkp->lk_lockholder = pid;
lkp->lk_exclusivecount = 1;
#if defined(DEBUG_LOCKS)
lkp->lk_filename = file;
@ -589,7 +589,7 @@ lockstatus(lkp, td)
mtx_lock(lkp->lk_interlock);
if (lkp->lk_exclusivecount != 0) {
if (td == NULL || lkp->lk_lockholder == td)
if (td == NULL || lkp->lk_lockholder == td->td_proc->p_pid)
lock_type = LK_EXCLUSIVE;
else
lock_type = LK_EXCLOTHER;
@ -627,7 +627,7 @@ lockmgr_printinfo(lkp)
printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg,
lkp->lk_sharecount);
else if (lkp->lk_flags & LK_HAVE_EXCL)
printf(" lock type %s: EXCL (count %d) by thread %p",
printf(" lock type %s: EXCL (count %d) by pid %d",
lkp->lk_wmesg, lkp->lk_exclusivecount, lkp->lk_lockholder);
if (lkp->lk_waitcount > 0)
printf(" with %d pending", lkp->lk_waitcount);

View File

@ -671,23 +671,32 @@ calcru(p, up, sp, ip)
{
/* {user, system, interrupt, total} {ticks, usec}; previous tu: */
u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
u_int64_t uut = 0, sut = 0, iut = 0;
int s;
struct timeval tv;
struct bintime bt;
struct kse *ke;
struct ksegrp *kg;
mtx_assert(&sched_lock, MA_OWNED);
/* XXX: why spl-protect ? worst case is an off-by-one report */
ut = p->p_uticks;
st = p->p_sticks;
it = p->p_iticks;
FOREACH_KSEGRP_IN_PROC(p, kg) {
/* we could accumulate per ksegrp and per process here*/
FOREACH_KSE_IN_GROUP(kg, ke) {
s = splstatclock();
ut = ke->ke_uticks;
st = ke->ke_sticks;
it = ke->ke_iticks;
splx(s);
tt = ut + st + it;
if (tt == 0) {
st = 1;
tt = 1;
}
tt = ut + st + it;
if (tt == 0) {
st = 1;
tt = 1;
}
if (curthread->td_proc == p) {
if (ke == curthread->td_kse) {
/*
* Adjust for the current time slice. This is actually fairly
* important since the error here is on the order of a time
@ -696,59 +705,64 @@ calcru(p, up, sp, ip)
* processors also being 'current'.
*/
binuptime(&bt);
bintime_sub(&bt, PCPU_PTR(switchtime));
bintime_add(&bt, &p->p_runtime);
} else {
bt = p->p_runtime;
}
bintime2timeval(&bt, &tv);
tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
ptu = p->p_uu + p->p_su + p->p_iu;
if (tu < ptu || (int64_t)tu < 0) {
/* XXX no %qd in kernel. Truncate. */
printf("calcru: negative time of %ld usec for pid %d (%s)\n",
(long)tu, p->p_pid, p->p_comm);
tu = ptu;
}
binuptime(&bt);
bintime_sub(&bt, PCPU_PTR(switchtime));
bintime_add(&bt, &p->p_runtime);
} else {
bt = p->p_runtime;
}
bintime2timeval(&bt, &tv);
tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
ptu = ke->ke_uu + ke->ke_su + ke->ke_iu;
if (tu < ptu || (int64_t)tu < 0) {
/* XXX no %qd in kernel. Truncate. */
printf("calcru: negative time of %ld usec for pid %d (%s)\n",
(long)tu, p->p_pid, p->p_comm);
tu = ptu;
}
/* Subdivide tu. */
uu = (tu * ut) / tt;
su = (tu * st) / tt;
iu = tu - uu - su;
/* Subdivide tu. */
uu = (tu * ut) / tt;
su = (tu * st) / tt;
iu = tu - uu - su;
/* Enforce monotonicity. */
if (uu < p->p_uu || su < p->p_su || iu < p->p_iu) {
if (uu < p->p_uu)
uu = p->p_uu;
else if (uu + p->p_su + p->p_iu > tu)
uu = tu - p->p_su - p->p_iu;
if (st == 0)
su = p->p_su;
else {
su = ((tu - uu) * st) / (st + it);
if (su < p->p_su)
su = p->p_su;
else if (uu + su + p->p_iu > tu)
su = tu - uu - p->p_iu;
}
KASSERT(uu + su + p->p_iu <= tu,
("calcru: monotonisation botch 1"));
iu = tu - uu - su;
KASSERT(iu >= p->p_iu,
("calcru: monotonisation botch 2"));
}
p->p_uu = uu;
p->p_su = su;
p->p_iu = iu;
up->tv_sec = uu / 1000000;
up->tv_usec = uu % 1000000;
sp->tv_sec = su / 1000000;
sp->tv_usec = su % 1000000;
/* Enforce monotonicity. */
if (uu < ke->ke_uu || su < ke->ke_su || iu < ke->ke_iu) {
if (uu < ke->ke_uu)
uu = ke->ke_uu;
else if (uu + ke->ke_su + ke->ke_iu > tu)
uu = tu - ke->ke_su - ke->ke_iu;
if (st == 0)
su = ke->ke_su;
else {
su = ((tu - uu) * st) / (st + it);
if (su < ke->ke_su)
su = ke->ke_su;
else if (uu + su + ke->ke_iu > tu)
su = tu - uu - ke->ke_iu;
}
KASSERT(uu + su + ke->ke_iu <= tu,
("calcru: monotonisation botch 1"));
iu = tu - uu - su;
KASSERT(iu >= ke->ke_iu,
("calcru: monotonisation botch 2"));
}
ke->ke_uu = uu;
ke->ke_su = su;
ke->ke_iu = iu;
uut += uu;
sut += su;
iut += iu;
} /* end kse loop */
} /* end kseg loop */
up->tv_sec = uut / 1000000;
up->tv_usec = uut % 1000000;
sp->tv_sec = sut / 1000000;
sp->tv_usec = sut % 1000000;
if (ip != NULL) {
ip->tv_sec = iu / 1000000;
ip->tv_usec = iu % 1000000;
ip->tv_sec = iut / 1000000;
ip->tv_usec = iut % 1000000;
}
}

View File

@ -1522,6 +1522,9 @@ psignal(p, sig)
if (TD_IS_SLEEPING(td) &&
(td->td_flags & TDF_SINTR))
thread_suspend_one(td);
else if (TD_IS_IDLE(td)) {
thread_suspend_one(td);
}
}
if (p->p_suspcount == p->p_numthreads) {
mtx_unlock_spin(&sched_lock);
@ -1634,6 +1637,9 @@ tdsignal(struct thread *td, int sig, sig_t action)
cv_abort(td);
else
abortsleep(td);
} else if (TD_IS_IDLE(td)) {
TD_CLR_IDLE(td);
setrunnable(td);
}
#ifdef SMP
else {

View File

@ -111,7 +111,7 @@ static void runq_readjust(struct runq *rq, struct kse *ke);
* Functions that manipulate runnability from a thread perspective. *
************************************************************************/
/*
* Select the KSE that will be run next. From that find the thread, and
* Select the KSE that will be run next. From that find the thread, and x
* remove it from the KSEGRP's run queue. If there is thread clustering,
* this will be what does it.
*/
@ -127,7 +127,7 @@ choosethread(void)
td = ke->ke_thread;
KASSERT((td->td_kse == ke), ("kse/thread mismatch"));
kg = ke->ke_ksegrp;
if (td->td_proc->p_flag & P_KSES) {
if (TD_IS_UNBOUND(td)) {
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
if (kg->kg_last_assigned == td) {
kg->kg_last_assigned = TAILQ_PREV(td,
@ -158,8 +158,9 @@ choosethread(void)
}
/*
* Given a surplus KSE, either assign a new runable thread to it
* (and put it in the run queue) or put it in the ksegrp's idle KSE list.
* Given a KSE (now surplus or at least loanable), either assign a new
* runable thread to it (and put it in the run queue) or put it in
* the ksegrp's idle KSE list.
* Or maybe give it back to its owner if it's been loaned.
* Assumes that the original thread is either not runnable or
* already on the run queue
@ -169,54 +170,108 @@ kse_reassign(struct kse *ke)
{
struct ksegrp *kg;
struct thread *td;
struct thread *owner;
struct thread *original;
struct kse_upcall *ku;
int loaned;
KASSERT((ke->ke_owner), ("reassigning KSE with no owner"));
KASSERT((ke->ke_thread && TD_IS_INHIBITED(ke->ke_thread)),
("reassigning KSE with no or runnable thread"));
mtx_assert(&sched_lock, MA_OWNED);
original = ke->ke_thread;
KASSERT(original == NULL || TD_IS_INHIBITED(original),
("reassigning KSE with runnable thread"));
kg = ke->ke_ksegrp;
if (original) {
owner = ke->ke_owner;
loaned = TD_LENDER(owner);
original = ke->ke_thread;
if (TD_CAN_UNBIND(original) && (original->td_standin)) {
KASSERT((owner == original),
("Early thread borrowing?"));
/*
* If the outgoing thread is in threaded group and has never
* scheduled an upcall, decide whether this is a short
* or long term event and thus whether or not to schedule
* an upcall.
* If it is a short term event, just suspend it in
* The outgoing thread is "threaded" and has never
* scheduled an upcall.
* decide whether this is a short or long term event
* and thus whether or not to schedule an upcall.
* if it is a short term event, just suspend it in
* a way that takes its KSE with it.
* Select the events for which we want to schedule upcalls.
* For now it's just sleep.
* XXXKSE eventually almost any inhibition could do.
* Other threads that still have not fired an upcall
* are held to their KSE using the temorary Binding.
*/
if (TD_CAN_UNBIND(original) && (original->td_standin) &&
TD_ON_SLEEPQ(original)) {
/*
* Release ownership of upcall, and schedule an upcall
* thread, this new upcall thread becomes the owner of
* the upcall structure.
if (TD_ON_SLEEPQ(original)) {
/*
* An bound thread that can still unbind itself
* has been scheduled out.
* If it is sleeping, then we need to schedule an
* upcall.
* XXXKSE eventually almost any inhibition could do.
*/
ku = original->td_upcall;
ku->ku_owner = NULL;
original->td_upcall = NULL;
original->td_flags &= ~TDF_CAN_UNBIND;
thread_schedule_upcall(original, ku);
original->td_flags |= TDF_UNBOUND;
thread_schedule_upcall(original, ke);
owner = ke->ke_owner;
loaned = 1;
}
original->td_kse = NULL;
}
/*
* If the current thread was borrowing, then make things consistent
* by giving it back to the owner for the moment. The original thread
* must be unbound and have already used its chance for
* firing off an upcall. Threads that have not yet made an upcall
* can not borrow KSEs.
*/
if (loaned) {
TD_CLR_LOAN(owner);
ke->ke_thread = owner;
original->td_kse = NULL; /* give it amnesia */
/*
* Upcalling threads have lower priority than all
* in-kernel threads, However threads that have loaned out
* their KSE and are NOT upcalling have the priority that
* they have. In other words, only look for other work if
* the owner is not runnable, OR is upcalling.
*/
if (TD_CAN_RUN(owner) &&
((owner->td_flags & TDF_UPCALLING) == 0)) {
setrunnable(owner);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
ke, owner);
return;
}
}
/*
* Either the owner is not runnable, or is an upcall.
* Find the first unassigned thread
* If there is a 'last assigned' then see what's next.
* otherwise look at what is first.
*/
if ((td = kg->kg_last_assigned)) {
td = TAILQ_NEXT(td, td_runq);
} else {
td = TAILQ_FIRST(&kg->kg_runq);
}
/*
* Find the first unassigned thread
*/
if ((td = kg->kg_last_assigned) != NULL)
td = TAILQ_NEXT(td, td_runq);
else
td = TAILQ_FIRST(&kg->kg_runq);
/*
* If we found one, assign it the kse, otherwise idle the kse.
* If we found one assign it the kse, otherwise idle the kse.
*/
if (td) {
/*
* Assign the new thread to the KSE.
* and make the KSE runnable again,
*/
if (TD_IS_BOUND(owner)) {
/*
* If there is a reason to keep the previous
* owner, do so.
*/
TD_SET_LOAN(owner);
} else {
/* otherwise, cut it free */
ke->ke_owner = td;
owner->td_kse = NULL;
}
kg->kg_last_assigned = td;
td->td_kse = ke;
ke->ke_thread = td;
@ -225,11 +280,43 @@ kse_reassign(struct kse *ke)
return;
}
ke->ke_state = KES_IDLE;
ke->ke_thread = NULL;
TAILQ_INSERT_TAIL(&kg->kg_iq, ke, ke_kgrlist);
kg->kg_idle_kses++;
CTR1(KTR_RUNQ, "kse_reassign: ke%p on idle queue", ke);
/*
* Now handle any waiting upcall.
* Since we didn't make them runnable before.
*/
if (TD_CAN_RUN(owner)) {
setrunnable(owner);
CTR2(KTR_RUNQ, "kse_reassign: ke%p -> td%p (give back)",
ke, owner);
return;
}
/*
* It is possible that this is the last thread in the group
* because the KSE is being shut down or the process
* is exiting.
*/
if (TD_IS_EXITING(owner) || (ke->ke_flags & KEF_EXIT)) {
ke->ke_thread = NULL;
owner->td_kse = NULL;
kse_unlink(ke);
return;
}
/*
* At this stage all we know is that the owner
* is the same as the 'active' thread in the KSE
* and that it is
* Presently NOT loaned out.
* Put it on the loanable queue. Make it fifo
* so that long term sleepers donate their KSE's first.
*/
KASSERT((TD_IS_BOUND(owner)), ("kse_reassign: UNBOUND lender"));
ke->ke_state = KES_THREAD;
ke->ke_flags |= KEF_ONLOANQ;
TAILQ_INSERT_TAIL(&kg->kg_lq, ke, ke_kgrlist);
kg->kg_loan_kses++;
CTR1(KTR_RUNQ, "kse_reassign: ke%p on loan queue", ke);
return;
}
@ -238,7 +325,7 @@ kse_reassign(struct kse *ke)
* Remove a thread from its KSEGRP's run queue.
* This in turn may remove it from a KSE if it was already assigned
* to one, possibly causing a new thread to be assigned to the KSE
* and the KSE getting a new priority.
* and the KSE getting a new priority (unless it's a BOUND thread/KSE pair).
*/
static void
remrunqueue(struct thread *td)
@ -248,16 +335,17 @@ remrunqueue(struct thread *td)
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
KASSERT ((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue"));
kg = td->td_ksegrp;
ke = td->td_kse;
/*
* If it's a bound thread/KSE pair, take the shortcut. All non-KSE
* threads are BOUND.
*/
CTR1(KTR_RUNQ, "remrunqueue: td%p", td);
kg->kg_runnable--;
TD_SET_CAN_RUN(td);
/*
* If it is not a threaded process, take the shortcut.
*/
if ((td->td_proc->p_flag & P_KSES) == 0) {
if (TD_IS_BOUND(td)) {
/* Bring its kse with it, leave the thread attached */
sched_rem(ke);
ke->ke_state = KES_THREAD;
@ -275,7 +363,7 @@ remrunqueue(struct thread *td)
sched_rem(ke);
ke->ke_state = KES_THREAD;
td2 = kg->kg_last_assigned;
KASSERT((td2 != NULL), ("last assigned has wrong value"));
KASSERT((td2 != NULL), ("last assigned has wrong value "));
if (td2 == td)
kg->kg_last_assigned = td3;
kse_reassign(ke);
@ -293,14 +381,14 @@ adjustrunqueue( struct thread *td, int newpri)
struct kse *ke;
mtx_assert(&sched_lock, MA_OWNED);
KASSERT((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
KASSERT ((TD_ON_RUNQ(td)), ("adjustrunqueue: Bad state on run queue"));
/*
* If it's a bound thread/KSE pair, take the shortcut. All non-KSE
* threads are BOUND.
*/
ke = td->td_kse;
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
/*
* If it is not a threaded process, take the shortcut.
*/
if ((td->td_proc->p_flag & P_KSES) == 0) {
if (TD_IS_BOUND(td)) {
/* We only care about the kse in the run queue. */
td->td_priority = newpri;
if (ke->ke_rqindex != (newpri / RQ_PPQ)) {
@ -309,8 +397,9 @@ adjustrunqueue( struct thread *td, int newpri)
}
return;
}
/* It is a threaded process */
/*
* An unbound thread. This is not optimised yet.
*/
kg = td->td_ksegrp;
kg->kg_runnable--;
TD_SET_CAN_RUN(td);
@ -350,17 +439,48 @@ setrunqueue(struct thread *td)
sched_add(td->td_kse);
return;
}
/*
* If the process is threaded but the thread is bound then
* there is still a little extra to do re. KSE loaning.
*/
if (TD_IS_BOUND(td)) {
KASSERT((td->td_kse != NULL),
("queueing BAD thread to run queue"));
ke = td->td_kse;
KASSERT((ke->ke_owner == ke->ke_thread),
("setrunqueue: Hey KSE loaned out"));
if (ke->ke_flags & KEF_ONLOANQ) {
ke->ke_flags &= ~KEF_ONLOANQ;
TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
kg->kg_loan_kses--;
}
sched_add(td->td_kse);
return;
}
/*
* Ok, so we are threading with this thread.
* We don't have a KSE, see if we can get one..
*/
tda = kg->kg_last_assigned;
if ((ke = td->td_kse) == NULL) {
if (kg->kg_idle_kses) {
/*
* We will need a KSE, see if there is one..
* First look for a free one, before getting desperate.
* If we can't get one, our priority is not high enough..
* that's ok..
*/
if (kg->kg_loan_kses) {
/*
* There is a free one so it's ours for the asking..
* Failing that see if we can borrow one.
*/
ke = TAILQ_FIRST(&kg->kg_iq);
TAILQ_REMOVE(&kg->kg_iq, ke, ke_kgrlist);
ke = TAILQ_FIRST(&kg->kg_lq);
TAILQ_REMOVE(&kg->kg_lq, ke, ke_kgrlist);
ke->ke_flags &= ~KEF_ONLOANQ;
ke->ke_state = KES_THREAD;
kg->kg_idle_kses--;
TD_SET_LOAN(ke->ke_owner);
ke->ke_thread = NULL;
kg->kg_loan_kses--;
} else if (tda && (tda->td_priority > td->td_priority)) {
/*
* None free, but there is one we can commandeer.
@ -375,7 +495,11 @@ setrunqueue(struct thread *td)
} else {
/*
* Temporarily disassociate so it looks like the other cases.
* If the owner wasn't lending before, then it is now..
*/
if (!TD_LENDER(ke->ke_owner)) {
TD_SET_LOAN(ke->ke_owner);
}
ke->ke_thread = NULL;
td->td_kse = NULL;
}
@ -707,7 +831,6 @@ thread_sanity_check(struct thread *td, char *string)
if (kg->kg_last_assigned && (saw_lastassigned == 0)) {
panc(string, "where on earth does lastassigned point?");
}
#if 0
FOREACH_THREAD_IN_GROUP(kg, td2) {
if (((td2->td_flags & TDF_UNBOUND) == 0) &&
(TD_ON_RUNQ(td2))) {
@ -717,7 +840,6 @@ thread_sanity_check(struct thread *td, char *string)
}
}
}
#endif
#if 0
if ((unassigned + assigned) != kg->kg_runnable) {
panc(string, "wrong number in runnable");

File diff suppressed because it is too large Load Diff

View File

@ -358,9 +358,7 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
return (0);
if (state == GMON_PROF_OFF) {
gp->state = state;
PROC_LOCK(&proc0);
stopprofclock(&proc0);
PROC_UNLOCK(&proc0);
stopguprof(gp);
} else if (state == GMON_PROF_ON) {
gp->state = GMON_PROF_OFF;
@ -371,9 +369,7 @@ sysctl_kern_prof(SYSCTL_HANDLER_ARGS)
#ifdef GUPROF
} else if (state == GMON_PROF_HIRES) {
gp->state = GMON_PROF_OFF;
PROC_LOCK(&proc0);
stopprofclock(&proc0);
PROC_UNLOCK(&proc0);
startguprof(gp);
gp->state = state;
#endif
@ -423,7 +419,7 @@ profil(td, uap)
struct thread *td;
register struct profil_args *uap;
{
struct uprof *upp;
register struct uprof *upp;
int s;
int error = 0;
@ -434,9 +430,7 @@ profil(td, uap)
goto done2;
}
if (uap->scale == 0) {
PROC_LOCK(td->td_proc);
stopprofclock(td->td_proc);
PROC_UNLOCK(td->td_proc);
goto done2;
}
upp = &td->td_proc->p_stats->p_prof;
@ -478,16 +472,19 @@ profil(td, uap)
* inaccurate.
*/
void
addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
addupc_intr(ke, pc, ticks)
register struct kse *ke;
register uintptr_t pc;
u_int ticks;
{
struct uprof *prof;
caddr_t addr;
u_int i;
int v;
register struct uprof *prof;
register caddr_t addr;
register u_int i;
register int v;
if (ticks == 0)
return;
prof = &td->td_proc->p_stats->p_prof;
prof = &ke->ke_proc->p_stats->p_prof;
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
return; /* out of range; ignore */
@ -495,9 +492,9 @@ addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
addr = prof->pr_base + i;
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
mtx_lock_spin(&sched_lock);
td->td_praddr = pc;
td->td_prticks = ticks;
td->td_flags |= (TDF_OWEUPC | TDF_ASTPENDING);
prof->pr_addr = pc;
prof->pr_ticks = ticks;
ke->ke_flags |= KEF_OWEUPC | KEF_ASTPENDING ;
mtx_unlock_spin(&sched_lock);
}
}
@ -505,56 +502,34 @@ addupc_intr(struct thread *td, uintptr_t pc, u_int ticks)
/*
* Much like before, but we can afford to take faults here. If the
* update fails, we simply turn off profiling.
* XXXKSE, don't use kse unless we got sched lock.
*/
void
addupc_task(struct thread *td, uintptr_t pc, u_int ticks)
addupc_task(ke, pc, ticks)
register struct kse *ke;
register uintptr_t pc;
u_int ticks;
{
struct proc *p = td->td_proc;
struct proc *p = ke->ke_proc;
register struct uprof *prof;
register caddr_t addr;
register u_int i;
u_short v;
int stop = 0;
if (ticks == 0)
return;
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
if (!(p->p_sflag & PS_PROFIL)) {
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
return;
}
p->p_profthreads++;
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
prof = &p->p_stats->p_prof;
if (pc < prof->pr_off ||
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) {
goto out;
}
(i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
return;
addr = prof->pr_base + i;
if (copyin(addr, &v, sizeof(v)) == 0) {
v += ticks;
if (copyout(&v, addr, sizeof(v)) == 0)
goto out;
return;
}
stop = 1;
out:
PROC_LOCK(p);
if (--p->p_profthreads == 0) {
if (p->p_sflag & PS_STOPPROF) {
wakeup(&p->p_profthreads);
stop = 0;
}
}
if (stop)
stopprofclock(p);
PROC_UNLOCK(p);
stopprofclock(p);
}
#if defined(__i386__) && __GNUC__ >= 2

View File

@ -73,22 +73,15 @@ userret(td, frame, oticks)
u_int oticks;
{
struct proc *p = td->td_proc;
#ifdef INVARIANTS
struct kse *ke;
#endif
u_int64_t eticks;
struct kse *ke = td->td_kse;
CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid,
p->p_comm);
#ifdef INVARIANTS
/*
* Check that we called signotify() enough.
* XXXKSE this checking is bogus for threaded program,
*/
/* Check that we called signotify() enough. */
mtx_lock(&Giant);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
ke = td->td_kse;
if (SIGPENDING(p) && ((p->p_sflag & PS_NEEDSIGCHK) == 0 ||
(td->td_kse->ke_flags & KEF_ASTPENDING) == 0))
printf("failed to set signal flags properly for ast()\n");
@ -102,18 +95,6 @@ userret(td, frame, oticks)
*/
sched_userret(td);
/*
* Charge system time if profiling.
*
* XXX should move PS_PROFIL to a place that can obviously be
* accessed safely without sched_lock.
*/
if (p->p_sflag & PS_PROFIL) {
eticks = td->td_sticks - oticks;
addupc_task(td, TRAPF_PC(frame), (u_int)eticks * psratio);
}
/*
* We need to check to see if we have to exit or wait due to a
* single threading requirement or some other STOP condition.
@ -132,6 +113,21 @@ userret(td, frame, oticks)
if (p->p_flag & P_KSES) {
thread_userret(td, frame);
}
/*
* Charge system time if profiling.
*
* XXX should move PS_PROFIL to a place that can obviously be
* accessed safely without sched_lock.
*/
if (p->p_sflag & PS_PROFIL) {
quad_t ticks;
mtx_lock_spin(&sched_lock);
ticks = ke->ke_sticks - oticks;
mtx_unlock_spin(&sched_lock);
addupc_task(ke, TRAPF_PC(frame), (u_int)ticks * psratio);
}
}
/*
@ -150,7 +146,6 @@ ast(struct trapframe *framep)
u_int prticks, sticks;
int sflag;
int flags;
int tflags;
int sig;
#if defined(DEV_NPX) && !defined(SMP)
int ucode;
@ -180,21 +175,19 @@ ast(struct trapframe *framep)
*/
mtx_lock_spin(&sched_lock);
ke = td->td_kse;
sticks = td->td_sticks;
tflags = td->td_flags;
sticks = ke->ke_sticks;
flags = ke->ke_flags;
sflag = p->p_sflag;
p->p_sflag &= ~(PS_ALRMPEND | PS_NEEDSIGCHK | PS_PROFPEND | PS_XCPU);
#ifdef MAC
p->p_sflag &= ~PS_MACPEND;
#endif
ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED);
td->td_flags &= ~(TDF_ASTPENDING | TDF_OWEUPC);
ke->ke_flags &= ~(KEF_ASTPENDING | KEF_NEEDRESCHED | KEF_OWEUPC);
cnt.v_soft++;
prticks = 0;
if (tflags & TDF_OWEUPC && sflag & PS_PROFIL) {
prticks = td->td_prticks;
td->td_prticks = 0;
if (flags & KEF_OWEUPC && sflag & PS_PROFIL) {
prticks = p->p_stats->p_prof.pr_ticks;
p->p_stats->p_prof.pr_ticks = 0;
}
mtx_unlock_spin(&sched_lock);
/*
@ -207,9 +200,8 @@ ast(struct trapframe *framep)
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
if (tflags & TDF_OWEUPC && sflag & PS_PROFIL) {
addupc_task(td, td->td_praddr, prticks);
}
if (flags & KEF_OWEUPC && sflag & PS_PROFIL)
addupc_task(ke, p->p_stats->p_prof.pr_addr, prticks);
if (sflag & PS_ALRMPEND) {
PROC_LOCK(p);
psignal(p, SIGVTALRM);

View File

@ -244,7 +244,7 @@ static struct witness_order_list_entry order_lists[] = {
#endif
{ "clk", &lock_class_mtx_spin },
{ "mutex profiling lock", &lock_class_mtx_spin },
{ "kse zombie lock", &lock_class_mtx_spin },
{ "zombie_thread_lock", &lock_class_mtx_spin },
{ "ALD Queue", &lock_class_mtx_spin },
#ifdef __ia64__
{ "MCA spin lock", &lock_class_mtx_spin },

View File

@ -169,7 +169,7 @@ trap(struct trapframe *frame)
trapname(type), user ? "user" : "kernel");
if (user) {
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);

View File

@ -285,7 +285,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
}
void
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
{
return;

View File

@ -169,7 +169,7 @@ trap(struct trapframe *frame)
trapname(type), user ? "user" : "kernel");
if (user) {
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);

View File

@ -285,7 +285,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
}
void
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
{
return;

View File

@ -71,7 +71,8 @@ tick_process(struct clockframe *cf)
CTR1(KTR_CLK, "tick_process: AP, cpuid=%d", PCPU_GET(cpuid));
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
hardclock_process(curthread, CLKF_USERMODE(cf));
statclock_process(curthread, CLKF_PC(cf), CLKF_USERMODE(cf));
statclock_process(curthread->td_kse, CLKF_PC(cf),
CLKF_USERMODE(cf));
mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
}
#else

View File

@ -243,7 +243,7 @@ trap(struct trapframe *tf)
KASSERT(td->td_proc != NULL, ("trap: curproc NULL"));
p = td->td_proc;
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = tf;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
@ -495,7 +495,7 @@ syscall(struct trapframe *tf)
reg = 0;
regcnt = REG_MAXARGS;
sticks = td->td_sticks;
sticks = td->td_kse->ke_sticks;
td->td_frame = tf;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);

View File

@ -141,7 +141,7 @@ cpu_set_upcall(struct thread *td, void *pcb)
}
void
cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku)
cpu_set_upcall_kse(struct thread *td, struct kse *ke)
{
}

View File

@ -353,7 +353,7 @@ BUF_KERNPROC(struct buf *bp)
struct thread *td = curthread;
if ((td != PCPU_GET(idlethread))
&& bp->b_lock.lk_lockholder == td)
&& bp->b_lock.lk_lockholder == td->td_proc->p_pid)
td->td_locks--;
bp->b_lock.lk_lockholder = LK_KERNPROC;
}

View File

@ -57,14 +57,14 @@ struct lock {
short lk_prio; /* priority at which to sleep */
const char *lk_wmesg; /* resource sleeping (for tsleep) */
int lk_timo; /* maximum sleep time (for tsleep) */
struct thread *lk_lockholder; /* thread of exclusive lock holder */
pid_t lk_lockholder; /* pid of exclusive lock holder */
struct lock *lk_newlock; /* lock taking over this lock */
#ifdef DEBUG_LOCKS
const char *lk_filename;
const char *lk_lockername;
int lk_lineno;
struct thread *lk_slockholder;
pid_t lk_slockholder;
const char *lk_sfilename;
const char *lk_slockername;
int lk_slineno;
@ -176,8 +176,8 @@ struct lock {
/*
* Indicator that no process holds exclusive lock
*/
#define LK_KERNPROC ((struct thread *)-2)
#define LK_NOPROC ((struct thread *)-1)
#define LK_KERNPROC ((pid_t) -2)
#define LK_NOPROC ((pid_t) -1)
#ifdef INVARIANTS
#define LOCKMGR_ASSERT(lkp, what, p) do { \

View File

@ -185,7 +185,7 @@ struct trapframe;
*
* It is important to remember that a particular thread structure only
* exists as long as the system call or kernel entrance (e.g. by pagefault)
* which it is currently executing. It should therefore NEVER be referenced
* which it is currently executing. It should threfore NEVER be referenced
* by pointers in long lived structures that live longer than a single
* request. If several threads complete their work at the same time,
* they will all rewind their stacks to the user boundary, report their
@ -216,7 +216,7 @@ struct kse;
/*
* The KSEGRP is allocated resources across a number of CPUs.
* (Including a number of CPUxQUANTA. It parcels these QUANTA up among
* its KSEs, each of which should be running in a different CPU.
* Its KSEs, each of which should be running in a different CPU.
* BASE priority and total available quanta are properties of a KSEGRP.
* Multiple KSEGRPs in a single process compete against each other
* for total quanta in the same way that a forked child competes against
@ -258,7 +258,7 @@ They would be given priorities calculated from the KSEG.
* This is what is put to sleep and reactivated.
* The first KSE available in the correct group will run this thread.
* If several are available, use the one on the same CPU as last time.
* When waiting to be run, threads are hung off the KSEGRP in priority order.
* When waing to be run, threads are hung off the KSEGRP in priority order.
* with N runnable and queued KSEs in the KSEGRP, the first N threads
* are linked to them. Other threads are not yet assigned.
*/
@ -298,16 +298,7 @@ struct thread {
struct ucred *td_ucred; /* (k) Reference to credentials. */
void (*td_switchin)(void); /* (k) Switchin special func. */
struct thread *td_standin; /* (?) Use this for an upcall */
struct kse_upcall *td_upcall; /* owned upcall structure. */
u_int64_t td_sticks; /* (j) Statclock hits in system mode. */
#if 0
u_int64_t td_uticks; /* (j) Statclock hits in user mode. */
u_int64_t td_iticks; /* (j) Statclock hits in intr. */
#endif
u_long td_praddr; /* temp storage for addr util AST */
u_int td_prticks; /* temp storage for ticks until AST */
u_int td_usticks; /* Stateclock kern mode hits, UTS */
u_int td_uuticks; /* Stateclock user mode hits, UTS */
u_int td_usticks; /* (?) Statclock kernel hits, for UTS */
u_int td_critnest; /* (k) Critical section nest level. */
#define td_endzero td_base_pri
@ -342,6 +333,7 @@ struct thread {
struct td_sched *td_sched; /* Scheduler specific data */
};
/* flags kept in td_flags */
#define TDF_UNBOUND 0x000001 /* May give away the kse, uses the kg runq. */
#define TDF_INPANIC 0x000002 /* Caused a panic, let it drive crashdump. */
#define TDF_CAN_UNBIND 0x000004 /* Only temporarily bound. */
#define TDF_SINTR 0x000008 /* Sleep is interruptible. */
@ -351,11 +343,8 @@ struct thread {
#define TDF_UPCALLING 0x000100 /* This thread is doing an upcall. */
#define TDF_ONSLEEPQ 0x000200 /* On the sleep queue. */
#define TDF_INMSLEEP 0x000400 /* Don't recurse in msleep(). */
#define TDF_ASTPENDING 0x000800 /* Thread has some asynchronous events. */
#define TDF_TIMOFAIL 0x001000 /* Timeout from sleep after we were awake. */
#define TDF_INTERRUPT 0x002000 /* Thread is marked as interrupted. */
#define TDF_USTATCLOCK 0x004000 /* State clock hits in userland. */
#define TDF_OWEUPC 0x008000 /* Owe thread an addupc() call at next ast. */
#define TDF_DEADLKTREAT 0x800000 /* Lock aquisition - deadlock treatment. */
#define TDI_SUSPENDED 0x0001 /* On suspension queue. */
@ -363,17 +352,25 @@ struct thread {
#define TDI_SWAPPED 0x0004 /* Stack not in mem.. bad juju if run. */
#define TDI_LOCK 0x0008 /* Stopped on a lock. */
#define TDI_IWAIT 0x0010 /* Awaiting interrupt. */
#define TDI_LOAN 0x0020 /* bound thread's KSE is lent */
#define TDI_IDLE 0x0040 /* kse_release() made us surplus */
#define TDI_EXITING 0x0080 /* Thread is in exit processing */
#define TD_CAN_UNBIND(td) \
(((td)->td_flags & TDF_CAN_UNBIND) == TDF_CAN_UNBIND && \
((td)->td_upcall != NULL))
#define TD_IS_UNBOUND(td) ((td)->td_flags & TDF_UNBOUND)
#define TD_IS_BOUND(td) (!TD_IS_UNBOUND(td))
#define TD_CAN_UNBIND(td) \
(((td)->td_flags & (TDF_UNBOUND|TDF_CAN_UNBIND)) == TDF_CAN_UNBIND)
#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
#define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED)
#define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED)
#define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK)
#define TD_LENDER(td) ((td)->td_inhibitors & TDI_LOAN)
#define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT)
#define TD_IS_IDLE(td) ((td)->td_inhibitors & TDI_IDLE)
#define TD_IS_EXITING(td) ((td)->td_inhibitors & TDI_EXITING)
#define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING)
#define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ)
#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
@ -381,12 +378,12 @@ struct thread {
#define TD_SET_INHIB(td, inhib) do { \
(td)->td_state = TDS_INHIBITED; \
(td)->td_inhibitors |= (inhib); \
(td)->td_inhibitors |= inhib; \
} while (0)
#define TD_CLR_INHIB(td, inhib) do { \
if (((td)->td_inhibitors & (inhib)) && \
(((td)->td_inhibitors &= ~(inhib)) == 0)) \
if (((td)->td_inhibitors & inhib) && \
(((td)->td_inhibitors &= ~inhib) == 0)) \
(td)->td_state = TDS_CAN_RUN; \
} while (0)
@ -395,6 +392,8 @@ struct thread {
#define TD_SET_LOCK(td) TD_SET_INHIB((td), TDI_LOCK)
#define TD_SET_SUSPENDED(td) TD_SET_INHIB((td), TDI_SUSPENDED)
#define TD_SET_IWAIT(td) TD_SET_INHIB((td), TDI_IWAIT)
#define TD_SET_LOAN(td) TD_SET_INHIB((td), TDI_LOAN)
#define TD_SET_IDLE(td) TD_SET_INHIB((td), TDI_IDLE)
#define TD_SET_EXITING(td) TD_SET_INHIB((td), TDI_EXITING)
#define TD_CLR_SLEEPING(td) TD_CLR_INHIB((td), TDI_SLEEPING)
@ -402,6 +401,8 @@ struct thread {
#define TD_CLR_LOCK(td) TD_CLR_INHIB((td), TDI_LOCK)
#define TD_CLR_SUSPENDED(td) TD_CLR_INHIB((td), TDI_SUSPENDED)
#define TD_CLR_IWAIT(td) TD_CLR_INHIB((td), TDI_IWAIT)
#define TD_CLR_LOAN(td) TD_CLR_INHIB((td), TDI_LOAN)
#define TD_CLR_IDLE(td) TD_CLR_INHIB((td), TDI_IDLE)
#define TD_SET_RUNNING(td) do {(td)->td_state = TDS_RUNNING; } while (0)
#define TD_SET_RUNQ(td) do {(td)->td_state = TDS_RUNQ; } while (0)
@ -412,6 +413,16 @@ struct thread {
(td)->td_wchan = NULL; \
} while (0)
/*
* Traps for young players:
* The main thread variable that controls whether a thread acts as a threaded
* or unthreaded thread is the TDF_UNBOUND flag.
* i.e. they bind themselves to whatever thread thay are first scheduled with.
* You may see BOUND threads in KSE processes but you should never see
* UNBOUND threads in non KSE processes.
*/
/*
* The schedulable entity that can be given a context to run.
* A process may have several of these. Probably one per processor
@ -429,47 +440,54 @@ struct kse {
#define ke_startzero ke_flags
int ke_flags; /* (j) KEF_* flags. */
struct thread *ke_thread; /* Active associated thread. */
fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
struct thread *ke_owner; /* Always points to the owner */
fixpt_t ke_pctcpu; /* (j) %cpu during p_swtime. */
u_int64_t ke_uu; /* (j) Previous user time in usec. */
u_int64_t ke_su; /* (j) Previous system time in usec. */
u_int64_t ke_iu; /* (j) Previous intr time in usec. */
u_int64_t ke_uticks; /* (j) Statclock hits in user mode. */
u_int64_t ke_sticks; /* (j) Statclock hits in system mode. */
u_int64_t ke_iticks; /* (j) Statclock hits in intr. */
u_int ke_uuticks; /* Statclock hits in user, for UTS */
u_int ke_usticks; /* Statclock hits in kernel, for UTS */
u_char ke_oncpu; /* (j) Which cpu we are on. */
char ke_rqindex; /* (j) Run queue index. */
enum {
KES_UNUSED = 0x0,
KES_IDLE,
KES_ONRUNQ,
KES_UNQUEUED, /* in transit */
KES_THREAD /* slaved to thread state */
} ke_state; /* (j) S* process status. */
struct kse_mailbox *ke_mailbox; /* the userland mailbox address */
stack_t ke_stack;
void *ke_upcall;
struct thread *ke_tdspare; /* spare thread for upcalls */
#define ke_endzero ke_dummy
u_char ke_dummy;
struct ke_sched *ke_sched; /* Scheduler specific data */
};
/* flags kept in ke_flags */
#define KEF_OWEUPC 0x00002 /* Owe process an addupc() call at next ast. */
#define KEF_IDLEKSE 0x00004 /* A 'Per CPU idle process'.. has one thread */
#define KEF_LOANED 0x00008 /* On loan from the bound thread to another */
#define KEF_USER 0x00200 /* Process is not officially in the kernel */
#define KEF_ASTPENDING 0x00400 /* KSE has a pending ast. */
#define KEF_NEEDRESCHED 0x00800 /* Process needs to yield. */
#define KEF_ONLOANQ 0x01000 /* KSE is on loan queue. */
#define KEF_DIDRUN 0x02000 /* KSE actually ran. */
#define KEF_EXIT 0x04000 /* KSE is being killed. */
#define KEF_DOUPCALL 0x08000 /* KSE should do upcall now. */
/*
* The upcall management structure. Any thread owns an upcall structure
* can goto userland, it is various whether it uses the upcall or not.
* Any thread does not own an upcall should export its context and
* suicide at user boundary, they do not directly return to userland,
* an upcall thread takes them back to userland.
* (*) A bound KSE with a bound thread in a KSE process may be lent to
* Other threads, as long as those threads do not leave the kernel.
* The other threads must be either exiting, or be unbound with a valid
* mailbox so that they can save their state there rather than going
* to user space. While this happens the real bound thread is still linked
* to the kse via the ke_bound field, and the KSE has its "KEF_LOANED
* flag set.
*/
struct kse_upcall {
TAILQ_ENTRY(kse_upcall) ku_link;/* List of upcall in KSEG. */
struct ksegrp *ku_ksegrp; /* Associated KSEG. */
struct thread *ku_owner; /* Thread owns the upcall. */
int ku_flags; /* KUF_* flags. */
struct kse_mailbox *ku_mailbox; /* The userland mailbox address. */
stack_t ku_stack; /* The userland upcall stack. */
void *ku_func; /* The userland upcall function. */
};
#define KUF_DOUPCALL 0x00001 /* Do upcall now, don't wait */
/*
* Kernel-scheduled entity group (KSEG). The scheduler considers each KSEG to
@ -480,20 +498,18 @@ struct ksegrp {
struct proc *kg_proc; /* Process that contains this KSEG. */
TAILQ_ENTRY(ksegrp) kg_ksegrp; /* Queue of KSEGs in kg_proc. */
TAILQ_HEAD(, kse) kg_kseq; /* (ke_kglist) All KSEs. */
TAILQ_HEAD(, kse) kg_iq; /* (ke_kgrlist) All idle KSEs. */
TAILQ_HEAD(, kse) kg_lq; /* (ke_kgrlist) Loan KSEs. */
TAILQ_HEAD(, thread) kg_threads;/* (td_kglist) All threads. */
TAILQ_HEAD(, thread) kg_runq; /* (td_runq) waiting RUNNABLE threads */
TAILQ_HEAD(, thread) kg_slpq; /* (td_runq) NONRUNNABLE threads. */
TAILQ_HEAD(, kse_upcall) kg_upcalls; /* All upcalls in the group */
#define kg_startzero kg_estcpu
u_int kg_estcpu; /* Sum of the same field in KSEs. */
u_int kg_slptime; /* (j) How long completely blocked. */
struct thread *kg_last_assigned; /* (j) Last thread assigned to a KSE */
int kg_runnable; /* (j) Num runnable threads on queue. */
int kg_runq_kses; /* (j) Num KSEs on runq. */
int kg_idle_kses; /* (j) Num KSEs on iq */
int kg_numupcalls; /* (j) Num upcalls */
int kg_upsleeps; /* (c) Num threads in kse_release() */
struct thread *kg_last_assigned; /* Last thread assigned to a KSE */
int kg_runnable; /* Num runnable threads on queue. */
int kg_runq_kses; /* Num KSEs on runq. */
int kg_loan_kses; /* Num KSEs on loan queue. */
struct kse_thr_mailbox *kg_completed; /* (c) completed thread mboxes */
#define kg_endzero kg_pri_class
@ -502,8 +518,8 @@ struct ksegrp {
u_char kg_user_pri; /* (j) User pri from estcpu and nice. */
char kg_nice; /* (j?/k?) Process "nice" value. */
#define kg_endcopy kg_numthreads
int kg_numthreads; /* (j) Num threads in total */
int kg_kses; /* (j) Num KSEs in group. */
int kg_numthreads; /* Num threads in total */
int kg_kses; /* Num KSEs in group. */
struct kg_sched *kg_sched; /* Scheduler specific data */
};
@ -553,19 +569,12 @@ struct proc {
u_int p_swtime; /* (j) Time swapped in or out. */
struct itimerval p_realtimer; /* (h?/k?) Alarm timer. */
struct bintime p_runtime; /* (j) Real time. */
u_int64_t p_uu; /* (j) Previous user time in usec. */
u_int64_t p_su; /* (j) Previous system time in usec. */
u_int64_t p_iu; /* (j) Previous intr time in usec. */
u_int64_t p_uticks; /* (j) Statclock hits in user mode. */
u_int64_t p_sticks; /* (j) Statclock hits in system mode. */
u_int64_t p_iticks; /* (j) Statclock hits in intr. */
int p_profthreads; /* (c) Num threads in addupc_task */
int p_traceflag; /* (o) Kernel trace points. */
struct vnode *p_tracep; /* (c + o) Trace to vnode. */
sigset_t p_siglist; /* (c) Sigs arrived, not delivered. */
struct vnode *p_textvp; /* (b) Vnode of executable. */
char p_lock; /* (c) Proclock (prevent swap) count. */
struct klist p_klist; /* (c) Knotes attached to this proc. */
struct klist p_klist; /* (c) Knotes attached to this proc. */
struct sigiolst p_sigiolst; /* (c) List of sigio sources. */
int p_sigparent; /* (c) Signal to parent on exit. */
sigset_t p_oldsigmask; /* (c) Saved mask from pre sigpause. */
@ -660,7 +669,6 @@ struct proc {
#define PS_INMEM 0x00001 /* Loaded into memory. */
#define PS_XCPU 0x00002 /* Exceeded CPU limit. */
#define PS_PROFIL 0x00004 /* Has started profiling. */
#define PS_STOPPROF 0x00008 /* Has thread in requesting to stop prof */
#define PS_ALRMPEND 0x00020 /* Pending SIGVTALRM needs to be posted. */
#define PS_PROFPEND 0x00040 /* Pending SIGPROF needs to be posted. */
#define PS_SWAPINREQ 0x00100 /* Swapin request due to wakeup. */
@ -698,8 +706,6 @@ MALLOC_DECLARE(M_ZOMBIE);
TAILQ_FOREACH((td), &(kg)->kg_threads, td_kglist)
#define FOREACH_KSE_IN_GROUP(kg, ke) \
TAILQ_FOREACH((ke), &(kg)->kg_kseq, ke_kglist)
#define FOREACH_UPCALL_IN_GROUP(kg, ku) \
TAILQ_FOREACH((ku), &(kg)->kg_upcalls, ku_link)
#define FOREACH_THREAD_IN_PROC(p, td) \
TAILQ_FOREACH((td), &(p)->p_threads, td_plist)
@ -914,7 +920,7 @@ struct kse *kse_alloc(void);
void kse_free(struct kse *ke);
void kse_stash(struct kse *ke);
void cpu_set_upcall(struct thread *td, void *pcb);
void cpu_set_upcall_kse(struct thread *td, struct kse_upcall *ku);
void cpu_set_upcall_kse(struct thread *td, struct kse *ke);
void cpu_thread_clean(struct thread *);
void cpu_thread_exit(struct thread *);
void cpu_thread_setup(struct thread *td);
@ -923,6 +929,7 @@ void kse_link(struct kse *ke, struct ksegrp *kg);
void kse_unlink(struct kse *ke);
void ksegrp_link(struct ksegrp *kg, struct proc *p);
void ksegrp_unlink(struct ksegrp *kg);
void make_kse_runnable(struct kse *ke);
struct thread *signal_upcall(struct proc *p, int sig);
struct thread *thread_alloc(void);
void thread_exit(void) __dead2;
@ -931,7 +938,7 @@ void thread_free(struct thread *td);
void thread_getcontext(struct thread *td, ucontext_t *uc);
void thread_link(struct thread *td, struct ksegrp *kg);
void thread_reap(void);
struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
struct thread *thread_schedule_upcall(struct thread *td, struct kse *ke);
int thread_setcontext(struct thread *td, ucontext_t *uc);
int thread_single(int how);
#define SINGLE_NO_EXIT 0 /* values for 'how' */
@ -945,13 +952,8 @@ void thread_unsuspend_one(struct thread *td);
int thread_userret(struct thread *td, struct trapframe *frame);
void thread_user_enter(struct proc *p, struct thread *td);
void thread_wait(struct proc *p);
int thread_statclock(int user);
struct kse_upcall *upcall_alloc(void);
void upcall_free(struct kse_upcall *ku);
void upcall_link(struct kse_upcall *ku, struct ksegrp *kg);
void upcall_unlink(struct kse_upcall *ku);
void upcall_remove(struct thread *td);
void upcall_stash(struct kse_upcall *ke);
int thread_add_ticks_intr(int user, uint ticks);
void thread_sanity_check(struct thread *td, char *);
#endif /* _KERNEL */

View File

@ -62,6 +62,8 @@ struct pstats {
u_long pr_size; /* buffer size */
u_long pr_off; /* pc offset */
u_long pr_scale; /* pc scaling */
u_long pr_addr; /* temp storage for addr until AST */
u_int pr_ticks; /* temp storage for ticks until AST */
} p_prof;
#define pstat_endcopy p_start
struct timeval p_start; /* starting time */
@ -103,8 +105,8 @@ struct thread;
struct kse;
struct proc;
void addupc_intr(struct thread *td, uintptr_t pc, u_int ticks);
void addupc_task(struct thread *td, uintptr_t pc, u_int ticks);
void addupc_intr(struct kse *ke, uintptr_t pc, u_int ticks);
void addupc_task(struct kse *ke, uintptr_t pc, u_int ticks);
void calcru(struct proc *p, struct timeval *up, struct timeval *sp,
struct timeval *ip);
int chgproccnt(struct uidinfo *uip, int diff, int max);

View File

@ -202,7 +202,7 @@ void hardclock(struct clockframe *frame);
void hardclock_process(struct thread *td, int user);
void softclock(void *);
void statclock(struct clockframe *frame);
void statclock_process(struct thread *td, register_t pc, int user);
void statclock_process(struct kse *ke, register_t pc, int user);
void startprofclock(struct proc *);
void stopprofclock(struct proc *);