Move TDF_CAN_UNBIND to thread private flags td_pflags, this eliminates

need of sched_lock in some places. Also in thread_userret, remove
spare thread allocation code, it is already done in thread_user_enter.

Reviewed by: julian
This commit is contained in:
David Xu 2004-08-28 04:08:05 +00:00
parent b7131a2672
commit ad1280b593
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=134425
2 changed files with 7 additions and 23 deletions

View File

@ -152,9 +152,7 @@ kse_switchin(struct thread *td, struct kse_switchin_args *uap)
suword32(&uap->tmbx->tm_lwp, td->td_tid);
if (uap->flags & KSE_SWITCHIN_SETTMBX) {
td->td_mailbox = uap->tmbx;
mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_CAN_UNBIND;
mtx_unlock_spin(&sched_lock);
td->td_pflags |= TDP_CAN_UNBIND;
}
if (td->td_proc->p_flag & P_TRACED) {
if (tmbx.tm_dflags & TMDF_SSTEP)
@ -1134,7 +1132,7 @@ thread_switchout(struct thread *td)
ku = td->td_upcall;
ku->ku_owner = NULL;
td->td_upcall = NULL;
td->td_flags &= ~TDF_CAN_UNBIND;
td->td_pflags &= ~TDP_CAN_UNBIND;
td2 = thread_schedule_upcall(td, ku);
setrunqueue(td2);
}
@ -1186,9 +1184,7 @@ thread_user_enter(struct proc *p, struct thread *td)
td->td_mailbox = NULL;
} else {
td->td_mailbox = tmbx;
mtx_lock_spin(&sched_lock);
td->td_flags |= TDF_CAN_UNBIND;
mtx_unlock_spin(&sched_lock);
td->td_pflags |= TDP_CAN_UNBIND;
if (__predict_false(p->p_flag & P_TRACED)) {
flags = fuword32(&tmbx->tm_dflags);
if (flags & TMDF_SUSPEND) {
@ -1250,13 +1246,11 @@ thread_userret(struct thread *td, struct trapframe *frame)
* then it can return direct to userland.
*/
if (TD_CAN_UNBIND(td)) {
mtx_lock_spin(&sched_lock);
td->td_flags &= ~TDF_CAN_UNBIND;
td->td_pflags &= ~TDP_CAN_UNBIND;
if ((td->td_flags & TDF_NEEDSIGCHK) == 0 &&
(kg->kg_completed == NULL) &&
(ku->ku_flags & KUF_DOUPCALL) == 0 &&
(kg->kg_upquantum && ticks < kg->kg_nextupcall)) {
mtx_unlock_spin(&sched_lock);
thread_update_usr_ticks(td, 0);
nanotime(&ts);
error = copyout(&ts,
@ -1268,7 +1262,6 @@ thread_userret(struct thread *td, struct trapframe *frame)
goto out;
return (0);
}
mtx_unlock_spin(&sched_lock);
thread_export_context(td, 0);
/*
* There is something to report, and we own an upcall
@ -1378,14 +1371,6 @@ thread_userret(struct thread *td, struct trapframe *frame)
PROC_LOCK(td->td_proc);
psignal(td->td_proc, SIGSEGV);
PROC_UNLOCK(td->td_proc);
} else {
/*
* Optimisation:
* Ensure that we have a spare thread available,
* for when we re-enter the kernel.
*/
if (td->td_standin == NULL)
thread_alloc_spare(td);
}
ku->ku_mflags = 0;

View File

@ -347,7 +347,6 @@ struct thread {
*/
#define TDF_UNUSED0 0x00000001 /* --available -- */
#define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */
#define TDF_CAN_UNBIND 0x00000004 /* Only temporarily bound. */
#define TDF_SINTR 0x00000008 /* Sleep is interruptible. */
#define TDF_TIMEOUT 0x00000010 /* Timing out during sleep. */
#define TDF_IDLETD 0x00000020 /* This is a per-CPU idle thread. */
@ -389,7 +388,7 @@ struct thread {
#define TDP_OWEPREEMPT 0x00000100 /* Thread has a pending preemption. */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_USTATCLOCK 0x00000400 /* Finish user statclock hit at next AST. */
#define TDP_UNUSED11 0x00000800 /* -- available-- */
#define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */
#define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */
#define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */
#define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */
@ -412,8 +411,8 @@ struct thread {
#define TDK_KSERELSIG 0x0002 /* Blocked in msleep on p->p_siglist. */
#define TDK_WAKEUP 0x0004 /* Thread has been woken by kse_wakeup. */
#define TD_CAN_UNBIND(td) \
(((td)->td_flags & TDF_CAN_UNBIND) == TDF_CAN_UNBIND && \
#define TD_CAN_UNBIND(td) \
(((td)->td_pflags & TDP_CAN_UNBIND) && \
((td)->td_upcall != NULL))
#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)