From a8b491c121bc27e671efda1f96cf2ca3fa2d4aa9 Mon Sep 17 00:00:00 2001 From: Julian Elischer Date: Tue, 7 Sep 2004 06:33:39 +0000 Subject: [PATCH] Give libthr a choice (per system) of scope_system or scope_thread scheduling. MFC after: 4 days --- sys/kern/kern_thr.c | 47 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 10 deletions(-) diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c index e4ec7be15eaa..975239cab262 100644 --- a/sys/kern/kern_thr.c +++ b/sys/kern/kern_thr.c @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -47,6 +48,15 @@ __FBSDID("$FreeBSD$"); extern int max_threads_per_proc; extern int max_groups_per_proc; +SYSCTL_DECL(_kern_threads); +static int thr_scope_sys = 0; +SYSCTL_INT(_kern_threads, OID_AUTO, thr_scope_sys, CTLFLAG_RW, + &thr_scope_sys, 0, "sys or proc scope scheduling"); + +static int thr_concurency = 0; +SYSCTL_INT(_kern_threads, OID_AUTO, thr_concurrency, CTLFLAG_RW, + &thr_concurrency, 0, "a concurrency value if not default"); + /* * Back end support functions. */ @@ -79,14 +89,18 @@ thr_create(struct thread *td, struct thr_create_args *uap) } /* Initialize our td and new ksegrp.. */ newtd = thread_alloc(); - newkg = ksegrp_alloc(); + if (thr_scope_sys) + newkg = ksegrp_alloc(); + else + newkg = kg; /* * Try the copyout as soon as we allocate the td so we don't have to * tear things down in a failure case below. */ id = newtd->td_tid; if ((error = copyout(&id, uap->id, sizeof(long)))) { - ksegrp_free(newkg); + if (thr_scope_sys) + ksegrp_free(newkg); thread_free(newtd); return (error); } @@ -96,10 +110,12 @@ thr_create(struct thread *td, struct thr_create_args *uap) bcopy(&td->td_startcopy, &newtd->td_startcopy, (unsigned) RANGEOF(struct thread, td_startcopy, td_endcopy)); - bzero(&newkg->kg_startzero, - (unsigned) RANGEOF(struct ksegrp, kg_startzero, kg_endzero)); - bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, - (unsigned) RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); + if (thr_scope_sys) { + bzero(&newkg->kg_startzero, + (unsigned)RANGEOF(struct ksegrp, kg_startzero, kg_endzero)); + bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, + (unsigned)RANGEOF(struct ksegrp, kg_startcopy, kg_endcopy)); + } newtd->td_proc = td->td_proc; newtd->td_ucred = crhold(td->td_ucred); @@ -108,7 +124,8 @@ thr_create(struct thread *td, struct thr_create_args *uap) cpu_set_upcall(newtd, td); error = set_mcontext(newtd, &ctx.uc_mcontext); if (error != 0) { - ksegrp_free(newkg); + if (thr_scope_sys) + ksegrp_free(newkg); thread_free(newtd); crfree(td->td_ucred); goto out; @@ -116,18 +133,28 @@ thr_create(struct thread *td, struct thr_create_args *uap) /* Link the thread and kse into the ksegrp and make it runnable. */ PROC_LOCK(td->td_proc); + if (thr_scope_sys) { + sched_init_concurrency(newkg); + } else { + if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { + sched_set_concurrency(kg, + thr_concurrency ? thr_concurrency : (2*mp_ncpus)); + } + } + td->td_proc->p_flag |= P_HADTHREADS; newtd->td_sigmask = td->td_sigmask; mtx_lock_spin(&sched_lock); - ksegrp_link(newkg, p); + if (thr_scope_sys) + ksegrp_link(newkg, p); thread_link(newtd, newkg); mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); - sched_init_concurrency(newkg); /* let the scheduler know about these things. */ mtx_lock_spin(&sched_lock); - sched_fork_ksegrp(td, newkg); + if (thr_scope_sys) + sched_fork_ksegrp(td, newkg); sched_fork_thread(td, newtd); TD_SET_CAN_RUN(newtd);