Move TDF_SA from td_flags to td_pflags (and rename it accordingly)

so that it is no longer necessary to hold sched_lock while
manipulating it.

Reviewed by:	davidxu
This commit is contained in:
Tim J. Robbins 2004-06-02 07:52:36 +00:00
parent cd4dc87a34
commit aa0aa7a113
8 changed files with 30 additions and 34 deletions

View File

@ -262,7 +262,7 @@ trap(frame)
break;
case T_PAGEFLT: /* page fault */
if (td->td_flags & TDF_SA)
if (td->td_pflags & TDP_SA)
thread_user_enter(p, td);
i = trap_pfault(&frame, TRUE);
if (i == -1)

View File

@ -313,7 +313,7 @@ trap(frame)
break;
case T_PAGEFLT: /* page fault */
if (td->td_flags & TDF_SA)
if (td->td_pflags & TDP_SA)
thread_user_enter(p, td);
i = trap_pfault(&frame, TRUE, eva);

View File

@ -266,9 +266,7 @@ kern_execve(td, fname, argv, envv, mac_p)
*/
p->p_flag &= ~P_SA;
td->td_mailbox = NULL;
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_SA;
mtx_unlock_spin(&sched_lock);
td->td_pflags &= ~TDP_SA;
thread_single_end();
}
p->p_flag |= P_INEXEC;

View File

@ -162,9 +162,7 @@ exit1(struct thread *td, int rv)
* Turn off threading support.
*/
p->p_flag &= ~P_SA;
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_SA;
mtx_unlock_spin(&sched_lock);
td->td_pflags &= ~TDP_SA;
thread_single_end(); /* Don't need this any more. */
}

View File

@ -609,7 +609,7 @@ kse_release(struct thread *td, struct kse_release_args *uap)
return (error);
TIMESPEC_TO_TIMEVAL(&tv, &timeout);
}
if (td->td_flags & TDF_SA)
if (td->td_pflags & TDP_SA)
td->td_pflags |= TDP_UPCALLING;
else {
ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags);
@ -748,7 +748,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
if (virtual_cpu != 0)
ncpus = virtual_cpu;
if (!(mbx.km_flags & KMF_BOUND))
sa = TDF_SA;
sa = TDP_SA;
else
ncpus = 1;
PROC_LOCK(p);
@ -787,7 +787,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
if (!first && ((td->td_pflags & TDP_SA) ^ sa) != 0)
return (EINVAL);
newkg = kg;
}
@ -891,14 +891,14 @@ kse_create(struct thread *td, struct kse_create_args *uap)
}
if (!sa) {
newtd->td_mailbox = mbx.km_curthread;
newtd->td_flags &= ~TDF_SA;
newtd->td_pflags &= ~TDP_SA;
if (newtd != td) {
mtx_unlock_spin(&sched_lock);
cpu_set_upcall_kse(newtd, newku);
mtx_lock_spin(&sched_lock);
}
} else {
newtd->td_flags |= TDF_SA;
newtd->td_pflags |= TDP_SA;
}
if (newtd != td)
setrunqueue(newtd);
@ -1263,7 +1263,7 @@ thread_statclock(int user)
struct thread *td = curthread;
struct ksegrp *kg = td->td_ksegrp;
if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA))
if (kg->kg_numupcalls == 0 || !(td->td_pflags & TDP_SA))
return (0);
if (user) {
/* Current always do via ast() */
@ -1621,8 +1621,8 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
/* Let the new thread become owner of the upcall */
ku->ku_owner = td2;
td2->td_upcall = ku;
td2->td_flags = TDF_SA;
td2->td_pflags = TDP_UPCALLING;
td2->td_flags = 0;
td2->td_pflags = TDP_SA|TDP_UPCALLING;
td2->td_kse = NULL;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
@ -1729,7 +1729,7 @@ thread_user_enter(struct proc *p, struct thread *td)
* but for now do it every time.
*/
kg = td->td_ksegrp;
if (td->td_flags & TDF_SA) {
if (td->td_pflags & TDP_SA) {
ku = td->td_upcall;
KASSERT(ku, ("%s: no upcall owned", __func__));
KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
@ -1788,7 +1788,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
ku = td->td_upcall;
/* Nothing to do with bound thread */
if (!(td->td_flags & TDF_SA))
if (!(td->td_pflags & TDP_SA))
return (0);
/*

View File

@ -1490,7 +1490,7 @@ trapsignal(struct thread *td, int sig, u_long code)
int error;
p = td->td_proc;
if (td->td_flags & TDF_SA) {
if (td->td_pflags & TDP_SA) {
if (td->td_mailbox == NULL)
thread_user_enter(p, td);
PROC_LOCK(p);
@ -1524,7 +1524,7 @@ trapsignal(struct thread *td, int sig, u_long code)
ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
&td->td_sigmask, code);
#endif
if (!(td->td_flags & TDF_SA))
if (!(td->td_pflags & TDP_SA))
(*p->p_sysent->sv_sendsig)(
ps->ps_sigact[_SIG_IDX(sig)], sig,
&td->td_sigmask, code);
@ -2291,7 +2291,7 @@ postsig(sig)
mtx_lock(&ps->ps_mtx);
}
if (!(td->td_flags & TDF_SA && td->td_mailbox) &&
if (!(td->td_pflags & TDP_SA && td->td_mailbox) &&
action == SIG_DFL) {
/*
* Default action, where the default is to kill
@ -2301,7 +2301,7 @@ postsig(sig)
sigexit(td, sig);
/* NOTREACHED */
} else {
if (td->td_flags & TDF_SA && td->td_mailbox) {
if (td->td_pflags & TDP_SA && td->td_mailbox) {
if (sig == SIGKILL) {
mtx_unlock(&ps->ps_mtx);
sigexit(td, sig);
@ -2350,7 +2350,7 @@ postsig(sig)
p->p_code = 0;
p->p_sig = 0;
}
if (td->td_flags & TDF_SA && td->td_mailbox)
if (td->td_pflags & TDP_SA && td->td_mailbox)
thread_signal_add(curthread, sig);
else
(*p->p_sysent->sv_sendsig)(action, sig,

View File

@ -609,7 +609,7 @@ kse_release(struct thread *td, struct kse_release_args *uap)
return (error);
TIMESPEC_TO_TIMEVAL(&tv, &timeout);
}
if (td->td_flags & TDF_SA)
if (td->td_pflags & TDP_SA)
td->td_pflags |= TDP_UPCALLING;
else {
ku->ku_mflags = fuword(&ku->ku_mailbox->km_flags);
@ -748,7 +748,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
if (virtual_cpu != 0)
ncpus = virtual_cpu;
if (!(mbx.km_flags & KMF_BOUND))
sa = TDF_SA;
sa = TDP_SA;
else
ncpus = 1;
PROC_LOCK(p);
@ -787,7 +787,7 @@ kse_create(struct thread *td, struct kse_create_args *uap)
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
} else {
if (!first && ((td->td_flags & TDF_SA) ^ sa) != 0)
if (!first && ((td->td_pflags & TDP_SA) ^ sa) != 0)
return (EINVAL);
newkg = kg;
}
@ -891,14 +891,14 @@ kse_create(struct thread *td, struct kse_create_args *uap)
}
if (!sa) {
newtd->td_mailbox = mbx.km_curthread;
newtd->td_flags &= ~TDF_SA;
newtd->td_pflags &= ~TDP_SA;
if (newtd != td) {
mtx_unlock_spin(&sched_lock);
cpu_set_upcall_kse(newtd, newku);
mtx_lock_spin(&sched_lock);
}
} else {
newtd->td_flags |= TDF_SA;
newtd->td_pflags |= TDP_SA;
}
if (newtd != td)
setrunqueue(newtd);
@ -1263,7 +1263,7 @@ thread_statclock(int user)
struct thread *td = curthread;
struct ksegrp *kg = td->td_ksegrp;
if (kg->kg_numupcalls == 0 || !(td->td_flags & TDF_SA))
if (kg->kg_numupcalls == 0 || !(td->td_pflags & TDP_SA))
return (0);
if (user) {
/* Current always do via ast() */
@ -1621,8 +1621,8 @@ thread_schedule_upcall(struct thread *td, struct kse_upcall *ku)
/* Let the new thread become owner of the upcall */
ku->ku_owner = td2;
td2->td_upcall = ku;
td2->td_flags = TDF_SA;
td2->td_pflags = TDP_UPCALLING;
td2->td_flags = 0;
td2->td_pflags = TDP_SA|TDP_UPCALLING;
td2->td_kse = NULL;
td2->td_state = TDS_CAN_RUN;
td2->td_inhibitors = 0;
@ -1729,7 +1729,7 @@ thread_user_enter(struct proc *p, struct thread *td)
* but for now do it every time.
*/
kg = td->td_ksegrp;
if (td->td_flags & TDF_SA) {
if (td->td_pflags & TDP_SA) {
ku = td->td_upcall;
KASSERT(ku, ("%s: no upcall owned", __func__));
KASSERT((ku->ku_owner == td), ("%s: wrong owner", __func__));
@ -1788,7 +1788,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
ku = td->td_upcall;
/* Nothing to do with bound thread */
if (!(td->td_flags & TDF_SA))
if (!(td->td_pflags & TDP_SA))
return (0);
/*

View File

@ -353,7 +353,6 @@ struct thread {
#define TDF_OWEUPC 0x008000 /* Owe thread an addupc() call at next AST. */
#define TDF_NEEDRESCHED 0x010000 /* Thread needs to yield. */
#define TDF_NEEDSIGCHK 0x020000 /* Thread may need signal delivery. */
#define TDF_SA 0x040000 /* A scheduler activation based thread. */
#define TDF_UMTXWAKEUP 0x080000 /* Libthr thread must not sleep on a umtx. */
#define TDF_THRWAKEUP 0x100000 /* Libthr thread must not suspend itself. */
#define TDF_DEADLKTREAT 0x800000 /* Lock aquisition - deadlock treatment. */
@ -365,6 +364,7 @@ struct thread {
#define TDP_UPCALLING 0x0008 /* This thread is doing an upcall. */
#define TDP_COWINPROGRESS 0x0010 /* Snapshot copy-on-write in progress. */
#define TDP_ALTSTACK 0x0020 /* Have alternate signal stack. */
#define TDP_SA 0x0080 /* A scheduler activation based thread. */
#define TDI_SUSPENDED 0x0001 /* On suspension queue. */
#define TDI_SLEEPING 0x0002 /* Actually asleep! (tricky). */