Make KSE a kernel option, turned on by default in all GENERIC
kernel configs except sun4v (which doesn't process signals properly with KSE). Reviewed by: davidxu@
This commit is contained in:
parent
b3e38fbc69
commit
f82c799735
9
UPDATING
9
UPDATING
@ -21,6 +21,15 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 7.x IS SLOW:
|
||||
developers choose to disable these features on build machines
|
||||
to maximize performance.
|
||||
|
||||
20061025:
|
||||
KSE in the kernel has now been made optional and turned on by
|
||||
default in the GENERIC kernels. Either add 'options KSE' to your
|
||||
custom kernel or edit /etc/libmap.conf to redirect libpthread.so.2
|
||||
to libthr.so.2. All kernel modules *must* be recompiled after
|
||||
this change. There-after, modules from a KSE kernel should be
|
||||
compatible with modules from a NOKSE kernel due to the temporary
|
||||
padding fields added to 'struct proc'.
|
||||
|
||||
20060929:
|
||||
mrouted and its utilities have been removed from the base system.
|
||||
|
||||
|
@ -114,7 +114,7 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
|
||||
struct prison pr;
|
||||
struct thread mtd;
|
||||
/*struct kse mke;*/
|
||||
struct ksegrp mkg;
|
||||
/*struct ksegrp mkg;*/
|
||||
struct proc proc;
|
||||
struct proc pproc;
|
||||
struct timeval tv;
|
||||
@ -137,6 +137,7 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
|
||||
TAILQ_FIRST(&proc.p_threads));
|
||||
return (-1);
|
||||
}
|
||||
#if 0
|
||||
if ((proc.p_flag & P_SA) == 0) {
|
||||
if (KREAD(kd,
|
||||
(u_long)TAILQ_FIRST(&proc.p_ksegrps),
|
||||
@ -146,7 +147,6 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
|
||||
TAILQ_FIRST(&proc.p_ksegrps));
|
||||
return (-1);
|
||||
}
|
||||
#if 0
|
||||
if (KREAD(kd,
|
||||
(u_long)TAILQ_FIRST(&mkg.kg_kseq), &mke)) {
|
||||
_kvm_err(kd, kd->program,
|
||||
@ -154,8 +154,8 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
|
||||
TAILQ_FIRST(&mkg.kg_kseq));
|
||||
return (-1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
}
|
||||
if (KREAD(kd, (u_long)proc.p_ucred, &ucred) == 0) {
|
||||
kp->ki_ruid = ucred.cr_ruid;
|
||||
@ -425,13 +425,13 @@ kvm_proclist(kd, what, arg, p, bp, maxcnt)
|
||||
kp->ki_oncpu = mtd.td_oncpu;
|
||||
|
||||
if (!(proc.p_flag & P_SA)) {
|
||||
#if 0
|
||||
/* stuff from the ksegrp */
|
||||
kp->ki_slptime = mkg.kg_slptime;
|
||||
kp->ki_pri.pri_class = mkg.kg_pri_class;
|
||||
kp->ki_pri.pri_user = mkg.kg_user_pri;
|
||||
kp->ki_estcpu = mkg.kg_estcpu;
|
||||
|
||||
#if 0
|
||||
/* Stuff from the kse */
|
||||
kp->ki_pctcpu = mke.ke_pctcpu;
|
||||
kp->ki_rqindex = mke.ke_rqindex;
|
||||
|
@ -1120,7 +1120,11 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
|
||||
* This may be done better later if it gets more high level
|
||||
* components in it. If so just link td->td_proc here.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
|
||||
preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
|
||||
preload_bootstrap_relocate(KERNBASE);
|
||||
|
@ -299,8 +299,10 @@ trap(frame)
|
||||
|
||||
case T_PAGEFLT: /* page fault */
|
||||
addr = frame.tf_addr;
|
||||
#ifdef KSE
|
||||
if (td->td_pflags & TDP_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
i = trap_pfault(&frame, TRUE);
|
||||
if (i == -1)
|
||||
goto userout;
|
||||
@ -757,8 +759,10 @@ syscall(frame)
|
||||
td->td_frame = &frame;
|
||||
if (td->td_ucred != p->p_ucred)
|
||||
cred_update_thread(td);
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
params = (caddr_t)frame.tf_rsp + sizeof(register_t);
|
||||
code = frame.tf_rax;
|
||||
orig_tf_rflags = frame.tf_rflags;
|
||||
|
@ -58,6 +58,7 @@ options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions
|
||||
options KBD_INSTALL_CDEV # install a CDEV entry in /dev
|
||||
options ADAPTIVE_GIANT # Giant mutex is adaptive.
|
||||
options STOP_NMI # Stop CPUS using NMI instead of IPI
|
||||
options KSE # KSE support
|
||||
|
||||
# Debugging for use in -current
|
||||
options KDB # Enable kernel debugger support.
|
||||
|
@ -263,8 +263,10 @@ data_abort_handler(trapframe_t *tf)
|
||||
td->td_frame = tf;
|
||||
if (td->td_ucred != td->td_proc->p_ucred)
|
||||
cred_update_thread(td);
|
||||
#ifdef KSE
|
||||
if (td->td_pflags & TDP_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
|
||||
}
|
||||
/* Grab the current pcb */
|
||||
@ -730,8 +732,10 @@ prefetch_abort_handler(trapframe_t *tf)
|
||||
td->td_frame = tf;
|
||||
if (td->td_ucred != td->td_proc->p_ucred)
|
||||
cred_update_thread(td);
|
||||
#ifdef KSE
|
||||
if (td->td_proc->p_flag & P_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
}
|
||||
fault_pc = tf->tf_pc;
|
||||
if (td->td_md.md_spinlock_count == 0) {
|
||||
@ -1005,8 +1009,10 @@ swi_handler(trapframe_t *frame)
|
||||
td->td_frame = frame;
|
||||
|
||||
td->td_pticks = 0;
|
||||
#ifdef KSE
|
||||
if (td->td_proc->p_flag & P_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
/*
|
||||
* Make sure the program counter is correctly aligned so we
|
||||
* don't take an alignment fault trying to read the opcode.
|
||||
|
@ -456,7 +456,11 @@ initarm(void *arg, void *arg2)
|
||||
undefined_handler_address = (u_int)undefinedinstruction_bounce;
|
||||
undefined_init();
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_kstack = kernelstack.pv_va;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
|
@ -59,6 +59,7 @@ options KBD_INSTALL_CDEV # install a CDEV entry in /dev
|
||||
options GEOM_GPT # GUID Partition Tables.
|
||||
options GEOM_MBR # DOS/MBR partitioning
|
||||
options GEOM_LABEL # Providers labelization.
|
||||
options KSE # KSE support
|
||||
|
||||
options BOOTP
|
||||
options BOOTP_NFSROOT
|
||||
|
@ -61,6 +61,7 @@ options BOOTP_NFSV3
|
||||
options BOOTP_WIRED_TO=em0
|
||||
options BOOTP_COMPAT
|
||||
#options PREEMPTION
|
||||
options KSE # KSE support
|
||||
device genclock
|
||||
device loop
|
||||
device ether
|
||||
|
@ -60,6 +60,7 @@ options MUTEX_NOINLINE
|
||||
options RWLOCK_NOINLINE
|
||||
options NO_FFS_SNAPSHOT
|
||||
options NO_SWAPPING
|
||||
options KSE # KSE support
|
||||
device genclock
|
||||
device random
|
||||
device pty
|
||||
|
@ -53,6 +53,7 @@ options SYSVSHM #SYSV-style shared memory
|
||||
options SYSVMSG #SYSV-style message queues
|
||||
options SYSVSEM #SYSV-style semaphores
|
||||
options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions
|
||||
options KSE # KSE support
|
||||
device genclock
|
||||
device loop
|
||||
device ether
|
||||
|
@ -58,6 +58,7 @@ options SYSVMSG #SYSV-style message queues
|
||||
options SYSVSEM #SYSV-style semaphores
|
||||
options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions
|
||||
options KBD_INSTALL_CDEV # install a CDEV entry in /dev
|
||||
options KSE # KSE support
|
||||
device genclock
|
||||
device loop
|
||||
device ether
|
||||
|
@ -423,7 +423,11 @@ initarm(void *arg, void *arg2)
|
||||
|
||||
/* Set stack for exception handlers */
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_kstack = kernelstack.pv_va;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
|
@ -429,7 +429,11 @@ initarm(void *arg, void *arg2)
|
||||
undefined_handler_address = (u_int)undefinedinstruction_bounce;
|
||||
undefined_init();
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_kstack = kernelstack.pv_va;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
|
@ -427,7 +427,11 @@ initarm(void *arg, void *arg2)
|
||||
undefined_handler_address = (u_int)undefinedinstruction_bounce;
|
||||
undefined_init();
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_kstack = kernelstack.pv_va;
|
||||
thread0.td_pcb = (struct pcb *)
|
||||
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
|
||||
|
@ -292,8 +292,12 @@ DB_SHOW_COMMAND(thread, db_show_thread)
|
||||
td = kdb_thread;
|
||||
|
||||
db_printf("Thread %d at %p:\n", td->td_tid, td);
|
||||
#ifdef KSE
|
||||
db_printf(" proc (pid %d): %p ", td->td_proc->p_pid, td->td_proc);
|
||||
db_printf(" ksegrp: %p\n", td->td_ksegrp);
|
||||
#else
|
||||
db_printf(" proc (pid %d): %p\n", td->td_proc->p_pid, td->td_proc);
|
||||
#endif
|
||||
if (td->td_name[0] != '\0')
|
||||
db_printf(" name: %s\n", td->td_name);
|
||||
db_printf(" flags: %#x ", td->td_flags);
|
||||
|
@ -113,6 +113,7 @@ procfs_doprocstatus(PFS_FILL_ARGS)
|
||||
}
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
wmesg = "-kse- ";
|
||||
else {
|
||||
@ -124,6 +125,15 @@ procfs_doprocstatus(PFS_FILL_ARGS)
|
||||
} else
|
||||
wmesg = "nochan";
|
||||
}
|
||||
#else
|
||||
tdfirst = FIRST_THREAD_IN_PROC(p);
|
||||
if (tdfirst->td_wchan != NULL) {
|
||||
KASSERT(tdfirst->td_wmesg != NULL,
|
||||
("wchan %p has no wmesg", tdfirst->td_wchan));
|
||||
wmesg = tdfirst->td_wmesg;
|
||||
} else
|
||||
wmesg = "nochan";
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
if (p->p_sflag & PS_INMEM) {
|
||||
|
@ -58,6 +58,7 @@ options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions
|
||||
options KBD_INSTALL_CDEV # install a CDEV entry in /dev
|
||||
options ADAPTIVE_GIANT # Giant mutex is adaptive.
|
||||
options STOP_NMI # Stop CPUS using NMI instead of IPI
|
||||
options KSE # KSE support
|
||||
|
||||
# Debugging for use in -current
|
||||
options KDB # Enable kernel debugger support.
|
||||
|
@ -2057,7 +2057,11 @@ init386(first)
|
||||
* This may be done better later if it gets more high level
|
||||
* components in it. If so just link td->td_proc here.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
|
||||
metadata_missing = 0;
|
||||
if (bootinfo.bi_modulep) {
|
||||
|
@ -346,8 +346,10 @@ trap(frame)
|
||||
break;
|
||||
|
||||
case T_PAGEFLT: /* page fault */
|
||||
#ifdef KSE
|
||||
if (td->td_pflags & TDP_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
|
||||
i = trap_pfault(&frame, TRUE, eva);
|
||||
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
|
||||
@ -935,8 +937,10 @@ syscall(frame)
|
||||
td->td_frame = &frame;
|
||||
if (td->td_ucred != p->p_ucred)
|
||||
cred_update_thread(td);
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
params = (caddr_t)frame.tf_esp + sizeof(int);
|
||||
code = frame.tf_eax;
|
||||
orig_tf_eflags = frame.tf_eflags;
|
||||
|
@ -53,6 +53,7 @@ options SYSVSHM # SYSV-style shared memory
|
||||
options UFS_ACL # Support for access control lists
|
||||
options UFS_DIRHASH # Hash-based directory lookup scheme
|
||||
options _KPOSIX_PRIORITY_SCHEDULING # Posix P1003_1B RT extensions
|
||||
options KSE # KSE support
|
||||
|
||||
# Various "busses"
|
||||
device firewire # FireWire bus code
|
||||
|
@ -775,7 +775,11 @@ ia64_init(void)
|
||||
msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
|
||||
msgbufinit(msgbufp, MSGBUF_SIZE);
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
/*
|
||||
* Init mapping for kernel stack for proc 0
|
||||
*/
|
||||
|
@ -975,8 +975,10 @@ syscall(struct trapframe *tf)
|
||||
td->td_pticks = 0;
|
||||
if (td->td_ucred != p->p_ucred)
|
||||
cred_update_thread(td);
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
|
||||
if (p->p_sysent->sv_prepsyscall) {
|
||||
/* (*p->p_sysent->sv_prepsyscall)(tf, args, &code, ¶ms); */
|
||||
|
@ -95,7 +95,9 @@ static struct session session0;
|
||||
static struct pgrp pgrp0;
|
||||
struct proc proc0;
|
||||
struct thread thread0 __aligned(8);
|
||||
#ifdef KSE
|
||||
struct ksegrp ksegrp0;
|
||||
#endif
|
||||
struct vmspace vmspace0;
|
||||
struct proc *initproc;
|
||||
|
||||
@ -364,18 +366,23 @@ proc0_init(void *dummy __unused)
|
||||
struct proc *p;
|
||||
unsigned i;
|
||||
struct thread *td;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
GIANT_REQUIRED;
|
||||
p = &proc0;
|
||||
td = &thread0;
|
||||
#ifdef KSE
|
||||
kg = &ksegrp0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize magic number.
|
||||
*/
|
||||
p->p_magic = P_MAGIC;
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Initialize thread, process and ksegrp structures.
|
||||
*/
|
||||
@ -386,6 +393,18 @@ proc0_init(void *dummy __unused)
|
||||
* Initialise scheduler resources.
|
||||
* Add scheduler specific parts to proc, ksegrp, thread as needed.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* Initialize thread and process structures.
|
||||
*/
|
||||
procinit(); /* set up proc zone */
|
||||
threadinit(); /* set up UMA zones */
|
||||
|
||||
/*
|
||||
* Initialise scheduler resources.
|
||||
* Add scheduler specific parts to proc, thread as needed.
|
||||
*/
|
||||
#endif
|
||||
schedinit(); /* scheduler gets its house in order */
|
||||
/*
|
||||
* Initialize sleep queue hash table
|
||||
@ -421,9 +440,14 @@ proc0_init(void *dummy __unused)
|
||||
STAILQ_INIT(&p->p_ktr);
|
||||
p->p_nice = NZERO;
|
||||
td->td_state = TDS_RUNNING;
|
||||
#ifdef KSE
|
||||
kg->kg_pri_class = PRI_TIMESHARE;
|
||||
kg->kg_user_pri = PUSER;
|
||||
kg->kg_base_user_pri = PUSER;
|
||||
#else
|
||||
td->td_pri_class = PRI_TIMESHARE;
|
||||
td->td_user_pri = PUSER;
|
||||
#endif
|
||||
td->td_priority = PVM;
|
||||
td->td_base_pri = PUSER;
|
||||
td->td_oncpu = 0;
|
||||
@ -733,7 +757,11 @@ kick_init(const void *udata __unused)
|
||||
td = FIRST_THREAD_IN_PROC(initproc);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
TD_SET_CAN_RUN(td);
|
||||
#ifdef KSE
|
||||
setrunqueue(td, SRQ_BORING); /* XXXKSE */
|
||||
#else
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
|
||||
|
@ -202,6 +202,7 @@ hardclock_cpu(int usermode)
|
||||
*/
|
||||
mtx_lock_spin_flags(&sched_lock, MTX_QUIET);
|
||||
sched_tick();
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA) {
|
||||
/* XXXKSE What to do? */
|
||||
} else {
|
||||
@ -218,6 +219,20 @@ hardclock_cpu(int usermode)
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
}
|
||||
#else
|
||||
pstats = p->p_stats;
|
||||
if (usermode &&
|
||||
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
|
||||
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
|
||||
p->p_sflag |= PS_ALRMPEND;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
|
||||
itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
|
||||
p->p_sflag |= PS_PROFPEND;
|
||||
td->td_flags |= TDF_ASTPENDING;
|
||||
}
|
||||
#endif
|
||||
mtx_unlock_spin_flags(&sched_lock, MTX_QUIET);
|
||||
|
||||
#ifdef HWPMC_HOOKS
|
||||
@ -414,8 +429,10 @@ statclock(int usermode)
|
||||
/*
|
||||
* Charge the time as appropriate.
|
||||
*/
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
thread_statclock(1);
|
||||
#endif
|
||||
td->td_uticks++;
|
||||
if (p->p_nice > NZERO)
|
||||
cp_time[CP_NICE]++;
|
||||
@ -439,8 +456,10 @@ statclock(int usermode)
|
||||
td->td_iticks++;
|
||||
cp_time[CP_INTR]++;
|
||||
} else {
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
thread_statclock(0);
|
||||
#endif
|
||||
td->td_pticks++;
|
||||
td->td_sticks++;
|
||||
if (td != PCPU_GET(idlethread))
|
||||
|
@ -204,7 +204,9 @@ fork1(td, flags, pages, procp)
|
||||
struct filedesc *fd;
|
||||
struct filedesc_to_leader *fdtol;
|
||||
struct thread *td2;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg2;
|
||||
#endif
|
||||
struct sigacts *newsigacts;
|
||||
int error;
|
||||
|
||||
@ -471,7 +473,9 @@ fork1(td, flags, pages, procp)
|
||||
* then copy the section that is copied directly from the parent.
|
||||
*/
|
||||
td2 = FIRST_THREAD_IN_PROC(p2);
|
||||
#ifdef KSE
|
||||
kg2 = FIRST_KSEGRP_IN_PROC(p2);
|
||||
#endif
|
||||
|
||||
/* Allocate and switch to an alternate kstack if specified. */
|
||||
if (pages != 0)
|
||||
@ -484,15 +488,19 @@ fork1(td, flags, pages, procp)
|
||||
__rangeof(struct proc, p_startzero, p_endzero));
|
||||
bzero(&td2->td_startzero,
|
||||
__rangeof(struct thread, td_startzero, td_endzero));
|
||||
#ifdef KSE
|
||||
bzero(&kg2->kg_startzero,
|
||||
__rangeof(struct ksegrp, kg_startzero, kg_endzero));
|
||||
#endif
|
||||
|
||||
bcopy(&p1->p_startcopy, &p2->p_startcopy,
|
||||
__rangeof(struct proc, p_startcopy, p_endcopy));
|
||||
bcopy(&td->td_startcopy, &td2->td_startcopy,
|
||||
__rangeof(struct thread, td_startcopy, td_endcopy));
|
||||
#ifdef KSE
|
||||
bcopy(&td->td_ksegrp->kg_startcopy, &kg2->kg_startcopy,
|
||||
__rangeof(struct ksegrp, kg_startcopy, kg_endcopy));
|
||||
#endif
|
||||
|
||||
td2->td_sigstk = td->td_sigstk;
|
||||
td2->td_sigmask = td->td_sigmask;
|
||||
@ -514,7 +522,11 @@ fork1(td, flags, pages, procp)
|
||||
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
p2->p_ucred = crhold(td->td_ucred);
|
||||
#ifdef KSE
|
||||
td2->td_ucred = crhold(p2->p_ucred); /* XXXKSE */
|
||||
#else
|
||||
td2->td_ucred = crhold(p2->p_ucred);
|
||||
#endif
|
||||
#ifdef AUDIT
|
||||
audit_proc_fork(p1, p2);
|
||||
#endif
|
||||
|
@ -79,7 +79,11 @@ idle_setup(void *dummy)
|
||||
td = FIRST_THREAD_IN_PROC(p);
|
||||
TD_SET_CAN_RUN(td);
|
||||
td->td_flags |= TDF_IDLETD;
|
||||
#ifdef KSE
|
||||
sched_class(td->td_ksegrp, PRI_IDLE);
|
||||
#else
|
||||
sched_class(td, PRI_IDLE);
|
||||
#endif
|
||||
sched_prio(td, PRI_MAX_IDLE);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_UNLOCK(p);
|
||||
@ -118,7 +122,12 @@ idle_proc(void *dummy)
|
||||
#ifdef SMP
|
||||
idle_cpus_mask &= ~mycpu;
|
||||
#endif
|
||||
#ifdef KSE
|
||||
mi_switch(SW_VOL, NULL);
|
||||
#else
|
||||
if ((td = choosethread()) != curthread)
|
||||
mi_switch(SW_VOL, td);
|
||||
#endif
|
||||
#ifdef SMP
|
||||
idle_cpus_mask |= mycpu;
|
||||
#endif
|
||||
|
@ -296,7 +296,11 @@ ithread_create(const char *name)
|
||||
panic("kthread_create() failed with %d", error);
|
||||
td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
td->td_ksegrp->kg_pri_class = PRI_ITHD;
|
||||
#else
|
||||
td->td_pri_class = PRI_ITHD;
|
||||
#endif
|
||||
TD_SET_IWAIT(td);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
td->td_pflags |= TDP_ITHREAD;
|
||||
|
@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/ktr.h>
|
||||
#include <vm/uma.h>
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* KSEGRP related storage.
|
||||
*/
|
||||
@ -117,6 +118,7 @@ upcall_remove(struct thread *td)
|
||||
td->td_upcall = NULL;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef _SYS_SYSPROTO_H_
|
||||
struct kse_switchin_args {
|
||||
@ -128,6 +130,7 @@ struct kse_switchin_args {
|
||||
int
|
||||
kse_switchin(struct thread *td, struct kse_switchin_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct kse_thr_mailbox tmbx;
|
||||
struct kse_upcall *ku;
|
||||
int error;
|
||||
@ -167,6 +170,9 @@ kse_switchin(struct thread *td, struct kse_switchin_args *uap)
|
||||
PROC_UNLOCK(td->td_proc);
|
||||
}
|
||||
return ((error == 0) ? EJUSTRETURN : error);
|
||||
#else /* !KSE */
|
||||
return (EOPNOTSUPP);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -179,6 +185,7 @@ struct kse_thr_interrupt_args {
|
||||
int
|
||||
kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct kse_execve_args args;
|
||||
struct image_args iargs;
|
||||
struct proc *p;
|
||||
@ -283,6 +290,9 @@ kse_thr_interrupt(struct thread *td, struct kse_thr_interrupt_args *uap)
|
||||
return (EINVAL);
|
||||
}
|
||||
return (0);
|
||||
#else /* !KSE */
|
||||
return (EOPNOTSUPP);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -293,6 +303,7 @@ struct kse_exit_args {
|
||||
int
|
||||
kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
struct kse_upcall *ku, *ku2;
|
||||
@ -379,6 +390,9 @@ kse_exit(struct thread *td, struct kse_exit_args *uap)
|
||||
#else
|
||||
exit1(td, 0);
|
||||
#endif
|
||||
#else /* !KSE */
|
||||
return (EOPNOTSUPP);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -393,6 +407,7 @@ struct kse_release_args {
|
||||
int
|
||||
kse_release(struct thread *td, struct kse_release_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
struct kse_upcall *ku;
|
||||
@ -454,6 +469,9 @@ kse_release(struct thread *td, struct kse_release_args *uap)
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
return (0);
|
||||
#else /* !KSE */
|
||||
return (EOPNOTSUPP);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* struct kse_wakeup_args {
|
||||
@ -462,6 +480,7 @@ kse_release(struct thread *td, struct kse_release_args *uap)
|
||||
int
|
||||
kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct proc *p;
|
||||
struct ksegrp *kg;
|
||||
struct kse_upcall *ku;
|
||||
@ -517,6 +536,9 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
|
||||
}
|
||||
PROC_UNLOCK(p);
|
||||
return (0);
|
||||
#else /* !KSE */
|
||||
return (EOPNOTSUPP);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -534,6 +556,7 @@ kse_wakeup(struct thread *td, struct kse_wakeup_args *uap)
|
||||
int
|
||||
kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *newkg;
|
||||
struct ksegrp *kg;
|
||||
struct proc *p;
|
||||
@ -805,8 +828,12 @@ kse_create(struct thread *td, struct kse_create_args *uap)
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
return (0);
|
||||
#else /* !KSE */
|
||||
return (EOPNOTSUPP);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Initialize global thread allocation resources.
|
||||
*/
|
||||
@ -1479,3 +1506,4 @@ thread_continued(struct proc *p)
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -581,7 +581,11 @@ poll_idle(void)
|
||||
rtp.prio = RTP_PRIO_MAX; /* lowest priority */
|
||||
rtp.type = RTP_PRIO_IDLE;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
rtp_to_pri(&rtp, td->td_ksegrp);
|
||||
#else
|
||||
rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
for (;;) {
|
||||
|
@ -141,7 +141,7 @@ proc_dtor(void *mem, int size, void *arg)
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
#ifdef INVARIANTS
|
||||
#if defined(INVARIANTS) && defined(KSE)
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
@ -151,10 +151,14 @@ proc_dtor(void *mem, int size, void *arg)
|
||||
#ifdef INVARIANTS
|
||||
KASSERT((p->p_numthreads == 1),
|
||||
("bad number of threads in exiting process"));
|
||||
#ifdef KSE
|
||||
KASSERT((p->p_numksegrps == 1), ("free proc with > 1 ksegrp"));
|
||||
#endif
|
||||
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
|
||||
#ifdef KSE
|
||||
kg = FIRST_KSEGRP_IN_PROC(p);
|
||||
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
|
||||
#endif
|
||||
KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
|
||||
#endif
|
||||
|
||||
@ -177,17 +181,25 @@ proc_init(void *mem, int size, int flags)
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
p = (struct proc *)mem;
|
||||
p->p_sched = (struct p_sched *)&p[1];
|
||||
td = thread_alloc();
|
||||
#ifdef KSE
|
||||
kg = ksegrp_alloc();
|
||||
#endif
|
||||
bzero(&p->p_mtx, sizeof(struct mtx));
|
||||
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
|
||||
p->p_stats = pstats_alloc();
|
||||
#ifdef KSE
|
||||
proc_linkup(p, kg, td);
|
||||
sched_newproc(p, kg, td);
|
||||
#else
|
||||
proc_linkup(p, td);
|
||||
#endif
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -203,7 +215,9 @@ proc_fini(void *mem, int size)
|
||||
|
||||
p = (struct proc *)mem;
|
||||
pstats_free(p->p_stats);
|
||||
#ifdef KSE
|
||||
ksegrp_free(FIRST_KSEGRP_IN_PROC(p));
|
||||
#endif
|
||||
thread_free(FIRST_THREAD_IN_PROC(p));
|
||||
mtx_destroy(&p->p_mtx);
|
||||
if (p->p_ksi != NULL)
|
||||
@ -768,7 +782,9 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
|
||||
static void
|
||||
fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
struct proc *p;
|
||||
|
||||
p = td->td_proc;
|
||||
@ -808,6 +824,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
|
||||
kp->ki_stat = SIDL;
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
|
||||
/* things in the KSE GROUP */
|
||||
@ -815,7 +832,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
|
||||
kp->ki_slptime = kg->kg_slptime;
|
||||
kp->ki_pri.pri_user = kg->kg_user_pri;
|
||||
kp->ki_pri.pri_class = kg->kg_pri_class;
|
||||
|
||||
#endif
|
||||
/* Things in the thread */
|
||||
kp->ki_wchan = td->td_wchan;
|
||||
kp->ki_pri.pri_level = td->td_priority;
|
||||
@ -828,6 +845,12 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp)
|
||||
kp->ki_pcb = td->td_pcb;
|
||||
kp->ki_kstack = (void *)td->td_kstack;
|
||||
kp->ki_pctcpu = sched_pctcpu(td);
|
||||
#ifndef KSE
|
||||
kp->ki_estcpu = td->td_estcpu;
|
||||
kp->ki_slptime = td->td_slptime;
|
||||
kp->ki_pri.pri_class = td->td_pri_class;
|
||||
kp->ki_pri.pri_user = td->td_user_pri;
|
||||
#endif
|
||||
|
||||
/* We can't get this anymore but ps etc never used it anyway. */
|
||||
kp->ki_rqindex = 0;
|
||||
|
@ -318,7 +318,11 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
|
||||
else
|
||||
td1 = thread_find(p, uap->lwpid);
|
||||
if (td1 != NULL)
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td1->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td1, &rtp);
|
||||
#endif
|
||||
else
|
||||
error = ESRCH;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -354,7 +358,11 @@ rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
|
||||
else
|
||||
td1 = thread_find(p, uap->lwpid);
|
||||
if (td1 != NULL)
|
||||
#ifdef KSE
|
||||
error = rtp_to_pri(&rtp, td1->td_ksegrp);
|
||||
#else
|
||||
error = rtp_to_pri(&rtp, td1);
|
||||
#endif
|
||||
else
|
||||
error = ESRCH;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -387,7 +395,11 @@ rtprio(td, uap)
|
||||
{
|
||||
struct proc *curp;
|
||||
struct proc *p;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#else
|
||||
struct thread *tdp;
|
||||
#endif
|
||||
struct rtprio rtp;
|
||||
int cierror, error;
|
||||
|
||||
@ -423,14 +435,23 @@ rtprio(td, uap)
|
||||
* as leaving it zero.
|
||||
*/
|
||||
if (uap->pid == 0) {
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td, &rtp);
|
||||
#endif
|
||||
} else {
|
||||
struct rtprio rtp2;
|
||||
|
||||
rtp.type = RTP_PRIO_IDLE;
|
||||
rtp.prio = RTP_PRIO_MAX;
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
pri_to_rtp(kg, &rtp2);
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p, tdp) {
|
||||
pri_to_rtp(tdp, &rtp2);
|
||||
#endif
|
||||
if (rtp2.type < rtp.type ||
|
||||
(rtp2.type == rtp.type &&
|
||||
rtp2.prio < rtp.prio)) {
|
||||
@ -471,20 +492,39 @@ rtprio(td, uap)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* If we are setting our own priority, set just our
|
||||
* KSEGRP but if we are doing another process,
|
||||
* do all the groups on that process. If we
|
||||
* specify our own pid we do the latter.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* If we are setting our own priority, set just our
|
||||
* thread but if we are doing another process,
|
||||
* do all the threads on that process. If we
|
||||
* specify our own pid we do the latter.
|
||||
*/
|
||||
#endif
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (uap->pid == 0) {
|
||||
#ifdef KSE
|
||||
error = rtp_to_pri(&rtp, td->td_ksegrp);
|
||||
#else
|
||||
error = rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
} else {
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
if ((error = rtp_to_pri(&rtp, kg)) != 0) {
|
||||
break;
|
||||
}
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if ((error = rtp_to_pri(&rtp, td)) != 0)
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -498,7 +538,11 @@ rtprio(td, uap)
|
||||
}
|
||||
|
||||
int
|
||||
#ifdef KSE
|
||||
rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
|
||||
#else
|
||||
rtp_to_pri(struct rtprio *rtp, struct thread *td)
|
||||
#endif
|
||||
{
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
@ -506,43 +550,85 @@ rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
|
||||
return (EINVAL);
|
||||
switch (RTP_PRIO_BASE(rtp->type)) {
|
||||
case RTP_PRIO_REALTIME:
|
||||
#ifdef KSE
|
||||
kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
|
||||
#else
|
||||
td->td_user_pri = PRI_MIN_REALTIME + rtp->prio;
|
||||
#endif
|
||||
break;
|
||||
case RTP_PRIO_NORMAL:
|
||||
#ifdef KSE
|
||||
kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
|
||||
#else
|
||||
td->td_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
|
||||
#endif
|
||||
break;
|
||||
case RTP_PRIO_IDLE:
|
||||
#ifdef KSE
|
||||
kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
|
||||
#else
|
||||
td->td_user_pri = PRI_MIN_IDLE + rtp->prio;
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
return (EINVAL);
|
||||
}
|
||||
#ifdef KSE
|
||||
sched_class(kg, rtp->type);
|
||||
if (curthread->td_ksegrp == kg) {
|
||||
sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */
|
||||
}
|
||||
#else
|
||||
sched_class(td, rtp->type); /* XXX fix */
|
||||
if (curthread == td)
|
||||
sched_prio(curthread, td->td_user_pri); /* XXX dubious */
|
||||
#endif
|
||||
return (0);
|
||||
}
|
||||
|
||||
void
|
||||
#ifdef KSE
|
||||
pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
|
||||
#else
|
||||
pri_to_rtp(struct thread *td, struct rtprio *rtp)
|
||||
#endif
|
||||
{
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
#ifdef KSE
|
||||
switch (PRI_BASE(kg->kg_pri_class)) {
|
||||
#else
|
||||
switch (PRI_BASE(td->td_pri_class)) {
|
||||
#endif
|
||||
case PRI_REALTIME:
|
||||
#ifdef KSE
|
||||
rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
|
||||
#else
|
||||
rtp->prio = td->td_user_pri - PRI_MIN_REALTIME;
|
||||
#endif
|
||||
break;
|
||||
case PRI_TIMESHARE:
|
||||
#ifdef KSE
|
||||
rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
|
||||
#else
|
||||
rtp->prio = td->td_user_pri - PRI_MIN_TIMESHARE;
|
||||
#endif
|
||||
break;
|
||||
case PRI_IDLE:
|
||||
#ifdef KSE
|
||||
rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
|
||||
#else
|
||||
rtp->prio = td->td_user_pri - PRI_MIN_IDLE;
|
||||
#endif
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#ifdef KSE
|
||||
rtp->type = kg->kg_pri_class;
|
||||
#else
|
||||
rtp->type = td->td_pri_class;
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(COMPAT_43)
|
||||
|
@ -94,7 +94,9 @@ static int filt_sigattach(struct knote *kn);
|
||||
static void filt_sigdetach(struct knote *kn);
|
||||
static int filt_signal(struct knote *kn, long hint);
|
||||
static struct thread *sigtd(struct proc *p, int sig, int prop);
|
||||
#ifdef KSE
|
||||
static int do_tdsignal(struct proc *, struct thread *, int, ksiginfo_t *);
|
||||
#endif
|
||||
static void sigqueue_start(void);
|
||||
|
||||
static uma_zone_t ksiginfo_zone = NULL;
|
||||
@ -565,7 +567,11 @@ void
|
||||
signotify(struct thread *td)
|
||||
{
|
||||
struct proc *p;
|
||||
#ifdef KSE
|
||||
sigset_t set, saved;
|
||||
#else
|
||||
sigset_t set;
|
||||
#endif
|
||||
|
||||
p = td->td_proc;
|
||||
|
||||
@ -576,8 +582,10 @@ signotify(struct thread *td)
|
||||
* previously masked by all threads to our sigqueue.
|
||||
*/
|
||||
set = p->p_sigqueue.sq_signals;
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
saved = p->p_sigqueue.sq_signals;
|
||||
#endif
|
||||
SIGSETNAND(set, td->td_sigmask);
|
||||
if (! SIGISEMPTY(set))
|
||||
sigqueue_move_set(&p->p_sigqueue, &td->td_sigqueue, &set);
|
||||
@ -586,6 +594,7 @@ signotify(struct thread *td)
|
||||
td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
#ifdef KSE
|
||||
if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
|
||||
if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) {
|
||||
/* pending set changed */
|
||||
@ -593,6 +602,7 @@ signotify(struct thread *td)
|
||||
wakeup(&p->p_siglist);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
@ -744,11 +754,13 @@ kern_sigaction(td, sig, act, oact, flags)
|
||||
if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
|
||||
(sigprop(sig) & SA_IGNORE &&
|
||||
ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
|
||||
#ifdef KSE
|
||||
if ((p->p_flag & P_SA) &&
|
||||
SIGISMEMBER(p->p_sigqueue.sq_signals, sig)) {
|
||||
p->p_flag |= P_SIGEVENT;
|
||||
wakeup(&p->p_siglist);
|
||||
}
|
||||
#endif
|
||||
/* never to be seen again */
|
||||
sigqueue_delete_proc(p, sig);
|
||||
if (sig != SIGCONT)
|
||||
@ -1206,10 +1218,12 @@ kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
|
||||
continue;
|
||||
if (!SIGISMEMBER(td->td_sigqueue.sq_signals, i)) {
|
||||
if (SIGISMEMBER(p->p_sigqueue.sq_signals, i)) {
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA) {
|
||||
p->p_flag |= P_SIGEVENT;
|
||||
wakeup(&p->p_siglist);
|
||||
}
|
||||
#endif
|
||||
sigqueue_move(&p->p_sigqueue,
|
||||
&td->td_sigqueue, i);
|
||||
} else
|
||||
@ -1882,7 +1896,9 @@ trapsignal(struct thread *td, ksiginfo_t *ksi)
|
||||
{
|
||||
struct sigacts *ps;
|
||||
struct proc *p;
|
||||
#ifdef KSE
|
||||
int error;
|
||||
#endif
|
||||
int sig;
|
||||
int code;
|
||||
|
||||
@ -1891,6 +1907,7 @@ trapsignal(struct thread *td, ksiginfo_t *ksi)
|
||||
code = ksi->ksi_code;
|
||||
KASSERT(_SIG_VALID(sig), ("invalid signal"));
|
||||
|
||||
#ifdef KSE
|
||||
if (td->td_pflags & TDP_SA) {
|
||||
if (td->td_mailbox == NULL)
|
||||
thread_user_enter(td);
|
||||
@ -1908,6 +1925,9 @@ trapsignal(struct thread *td, ksiginfo_t *ksi)
|
||||
} else {
|
||||
PROC_LOCK(p);
|
||||
}
|
||||
#else
|
||||
PROC_LOCK(p);
|
||||
#endif
|
||||
ps = p->p_sigacts;
|
||||
mtx_lock(&ps->ps_mtx);
|
||||
if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
|
||||
@ -1918,9 +1938,15 @@ trapsignal(struct thread *td, ksiginfo_t *ksi)
|
||||
ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
|
||||
&td->td_sigmask, code);
|
||||
#endif
|
||||
#ifdef KSE
|
||||
if (!(td->td_pflags & TDP_SA))
|
||||
(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
|
||||
ksi, &td->td_sigmask);
|
||||
#else
|
||||
(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
|
||||
ksi, &td->td_sigmask);
|
||||
#endif
|
||||
#ifdef KSE
|
||||
else if (td->td_mailbox == NULL) {
|
||||
mtx_unlock(&ps->ps_mtx);
|
||||
/* UTS caused a sync signal */
|
||||
@ -1939,6 +1965,7 @@ trapsignal(struct thread *td, ksiginfo_t *ksi)
|
||||
sigexit(td, SIGSEGV);
|
||||
mtx_lock(&ps->ps_mtx);
|
||||
}
|
||||
#endif
|
||||
SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
|
||||
if (!SIGISMEMBER(ps->ps_signodefer, sig))
|
||||
SIGADDSET(td->td_sigmask, sig);
|
||||
@ -2052,6 +2079,7 @@ psignal_event(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
|
||||
int
|
||||
tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
|
||||
{
|
||||
#ifdef KSE
|
||||
sigset_t saved;
|
||||
int ret;
|
||||
|
||||
@ -2071,6 +2099,7 @@ tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
|
||||
static int
|
||||
do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
|
||||
{
|
||||
#endif
|
||||
sig_t action;
|
||||
sigqueue_t *sigqueue;
|
||||
int prop;
|
||||
@ -2081,9 +2110,17 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
|
||||
if (!_SIG_VALID(sig))
|
||||
#ifdef KSE
|
||||
panic("do_tdsignal(): invalid signal");
|
||||
#else
|
||||
panic("tdsignal(): invalid signal");
|
||||
#endif
|
||||
|
||||
#ifdef KSE
|
||||
KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("do_tdsignal: ksi on queue"));
|
||||
#else
|
||||
KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("tdsignal: ksi on queue"));
|
||||
#endif
|
||||
|
||||
/*
|
||||
* IEEE Std 1003.1-2001: return success when killing a zombie.
|
||||
@ -2240,6 +2277,7 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
|
||||
if (action == SIG_DFL) {
|
||||
sigqueue_delete(sigqueue, sig);
|
||||
} else if (action == SIG_CATCH) {
|
||||
#ifdef KSE
|
||||
/*
|
||||
* The process wants to catch it so it needs
|
||||
* to run at least one thread, but which one?
|
||||
@ -2250,6 +2288,12 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
|
||||
* single thread is runnable asap.
|
||||
* XXXKSE for now however, make them all run.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* The process wants to catch it so it needs
|
||||
* to run at least one thread, but which one?
|
||||
*/
|
||||
#endif
|
||||
goto runfast;
|
||||
}
|
||||
/*
|
||||
@ -2541,8 +2585,10 @@ issignal(td)
|
||||
*/
|
||||
if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
|
||||
sigqueue_delete(&td->td_sigqueue, sig);
|
||||
#ifdef KSE
|
||||
if (td->td_pflags & TDP_SA)
|
||||
SIGADDSET(td->td_sigmask, sig);
|
||||
#endif
|
||||
continue;
|
||||
}
|
||||
if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
|
||||
@ -2553,9 +2599,11 @@ issignal(td)
|
||||
newsig = ptracestop(td, sig);
|
||||
mtx_lock(&ps->ps_mtx);
|
||||
|
||||
#ifdef KSE
|
||||
if (td->td_pflags & TDP_SA)
|
||||
SIGADDSET(td->td_sigmask, sig);
|
||||
|
||||
#endif
|
||||
if (sig != newsig) {
|
||||
ksiginfo_t ksi;
|
||||
/*
|
||||
@ -2579,8 +2627,10 @@ issignal(td)
|
||||
* signal is being masked, look for other signals.
|
||||
*/
|
||||
SIGADDSET(td->td_sigqueue.sq_signals, sig);
|
||||
#ifdef KSE
|
||||
if (td->td_pflags & TDP_SA)
|
||||
SIGDELSET(td->td_sigmask, sig);
|
||||
#endif
|
||||
if (SIGISMEMBER(td->td_sigmask, sig))
|
||||
continue;
|
||||
signotify(td);
|
||||
@ -2743,7 +2793,11 @@ postsig(sig)
|
||||
mtx_lock(&ps->ps_mtx);
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) {
|
||||
#else
|
||||
if (action == SIG_DFL) {
|
||||
#endif
|
||||
/*
|
||||
* Default action, where the default is to kill
|
||||
* the process. (Other cases were ignored above.)
|
||||
@ -2752,6 +2806,7 @@ postsig(sig)
|
||||
sigexit(td, sig);
|
||||
/* NOTREACHED */
|
||||
} else {
|
||||
#ifdef KSE
|
||||
if (td->td_pflags & TDP_SA) {
|
||||
if (sig == SIGKILL) {
|
||||
mtx_unlock(&ps->ps_mtx);
|
||||
@ -2759,6 +2814,7 @@ postsig(sig)
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
/*
|
||||
* If we get here, the signal must be caught.
|
||||
*/
|
||||
@ -2801,10 +2857,14 @@ postsig(sig)
|
||||
p->p_code = 0;
|
||||
p->p_sig = 0;
|
||||
}
|
||||
#ifdef KSE
|
||||
if (td->td_pflags & TDP_SA)
|
||||
thread_signal_add(curthread, &ksi);
|
||||
else
|
||||
(*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
|
||||
#else
|
||||
(*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -430,7 +430,11 @@ uio_yield(void)
|
||||
td = curthread;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
DROP_GIANT();
|
||||
#ifdef KSE
|
||||
sched_prio(td, td->td_ksegrp->kg_user_pri); /* XXXKSE */
|
||||
#else
|
||||
sched_prio(td, td->td_user_pri);
|
||||
#endif
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
|
@ -24,6 +24,7 @@
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#ifdef KSE
|
||||
/***
|
||||
Here is the logic..
|
||||
|
||||
@ -84,6 +85,7 @@ queued at the priorities they have inherrited from the M highest priority
|
||||
threads for that KSEGROUP. If this situation changes, the KSEs are
|
||||
reassigned to keep this true.
|
||||
***/
|
||||
#endif
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
@ -142,31 +144,48 @@ SYSCTL_INT(_kern_sched, OID_AUTO, preemption, CTLFLAG_RD,
|
||||
/************************************************************************
|
||||
* Functions that manipulate runnability from a thread perspective. *
|
||||
************************************************************************/
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Select the KSE that will be run next. From that find the thread, and
|
||||
* remove it from the KSEGRP's run queue. If there is thread clustering,
|
||||
* this will be what does it.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* Select the thread that will be run next.
|
||||
*/
|
||||
#endif
|
||||
struct thread *
|
||||
choosethread(void)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct kse *ke;
|
||||
#endif
|
||||
struct thread *td;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
#if defined(SMP) && (defined(__i386__) || defined(__amd64__))
|
||||
if (smp_active == 0 && PCPU_GET(cpuid) != 0) {
|
||||
/* Shutting down, run idlethread on AP's */
|
||||
td = PCPU_GET(idlethread);
|
||||
#ifdef KSE
|
||||
ke = td->td_kse;
|
||||
#endif
|
||||
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
|
||||
#ifdef KSE
|
||||
ke->ke_flags |= KEF_DIDRUN;
|
||||
#else
|
||||
td->td_kse->ke_flags |= KEF_DIDRUN;
|
||||
#endif
|
||||
TD_SET_RUNNING(td);
|
||||
return (td);
|
||||
}
|
||||
#endif
|
||||
|
||||
retry:
|
||||
#ifdef KSE
|
||||
ke = sched_choose();
|
||||
if (ke) {
|
||||
td = ke->ke_thread;
|
||||
@ -179,15 +198,25 @@ choosethread(void)
|
||||
}
|
||||
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
|
||||
}
|
||||
#else
|
||||
td = sched_choose();
|
||||
if (td) {
|
||||
#endif
|
||||
CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d",
|
||||
td, td->td_priority);
|
||||
} else {
|
||||
/* Simulate runq_choose() having returned the idle thread */
|
||||
td = PCPU_GET(idlethread);
|
||||
#ifdef KSE
|
||||
ke = td->td_kse;
|
||||
#endif
|
||||
CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td);
|
||||
}
|
||||
#ifdef KSE
|
||||
ke->ke_flags |= KEF_DIDRUN;
|
||||
#else
|
||||
td->td_kse->ke_flags |= KEF_DIDRUN;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If we are in panic, only allow system threads,
|
||||
@ -204,6 +233,7 @@ choosethread(void)
|
||||
return (td);
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Given a surplus system slot, try assign a new runnable thread to it.
|
||||
* Called from:
|
||||
@ -287,6 +317,7 @@ remrunqueue(struct thread *td)
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Change the priority of a thread that is on the run queue.
|
||||
@ -294,7 +325,9 @@ remrunqueue(struct thread *td)
|
||||
void
|
||||
adjustrunqueue( struct thread *td, int newpri)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
struct kse *ke;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
@ -302,6 +335,7 @@ adjustrunqueue( struct thread *td, int newpri)
|
||||
|
||||
ke = td->td_kse;
|
||||
CTR1(KTR_RUNQ, "adjustrunqueue: td%p", td);
|
||||
#ifdef KSE
|
||||
/*
|
||||
* If it is not a threaded process, take the shortcut.
|
||||
*/
|
||||
@ -338,8 +372,22 @@ adjustrunqueue( struct thread *td, int newpri)
|
||||
TD_SET_CAN_RUN(td);
|
||||
td->td_priority = newpri;
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
#else
|
||||
/* We only care about the kse in the run queue. */
|
||||
td->td_priority = newpri;
|
||||
#ifndef SCHED_CORE
|
||||
if (ke->ke_rqindex != (newpri / RQ_PPQ))
|
||||
#else
|
||||
if (ke->ke_rqindex != newpri)
|
||||
#endif
|
||||
{
|
||||
sched_rem(td);
|
||||
sched_add(td, SRQ_BORING);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* This function is called when a thread is about to be put on a
|
||||
* ksegrp run queue because it has been made runnable or its
|
||||
@ -485,15 +533,21 @@ maybe_preempt_in_ksegrp(struct thread *td)
|
||||
|
||||
|
||||
int limitcount;
|
||||
#endif
|
||||
void
|
||||
setrunqueue(struct thread *td, int flags)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
struct thread *td2;
|
||||
struct thread *tda;
|
||||
|
||||
CTR3(KTR_RUNQ, "setrunqueue: td:%p kg:%p pid:%d",
|
||||
td, td->td_ksegrp, td->td_proc->p_pid);
|
||||
#else
|
||||
CTR2(KTR_RUNQ, "setrunqueue: td:%p pid:%d",
|
||||
td, td->td_proc->p_pid);
|
||||
#endif
|
||||
CTR5(KTR_SCHED, "setrunqueue: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
curthread->td_proc->p_comm);
|
||||
@ -503,6 +557,7 @@ setrunqueue(struct thread *td, int flags)
|
||||
KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
|
||||
("setrunqueue: bad thread state"));
|
||||
TD_SET_RUNQ(td);
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
if ((td->td_proc->p_flag & P_HADTHREADS) == 0) {
|
||||
/*
|
||||
@ -594,6 +649,9 @@ setrunqueue(struct thread *td, int flags)
|
||||
if ((flags & SRQ_YIELDING) == 0)
|
||||
maybe_preempt_in_ksegrp(td);
|
||||
}
|
||||
#else
|
||||
sched_add(td, flags);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -705,6 +763,7 @@ maybe_preempt(struct thread *td)
|
||||
*/
|
||||
MPASS(TD_ON_RUNQ(td));
|
||||
MPASS(td->td_sched->ke_state != KES_ONRUNQ);
|
||||
#ifdef KSE
|
||||
if (td->td_proc->p_flag & P_HADTHREADS) {
|
||||
/*
|
||||
* If this is a threaded process we actually ARE on the
|
||||
@ -721,6 +780,7 @@ maybe_preempt(struct thread *td)
|
||||
TAILQ_REMOVE(&kg->kg_runq, td, td_runq);
|
||||
}
|
||||
|
||||
#endif
|
||||
TD_SET_RUNNING(td);
|
||||
CTR3(KTR_PROC, "preempting to thread %p (pid %d, %s)\n", td,
|
||||
td->td_proc->p_pid, td->td_proc->p_comm);
|
||||
@ -926,7 +986,11 @@ runq_remove(struct runq *rq, struct kse *ke)
|
||||
struct rqhead *rqh;
|
||||
int pri;
|
||||
|
||||
#ifdef KSE
|
||||
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
|
||||
#else
|
||||
KASSERT(ke->ke_thread->td_proc->p_sflag & PS_INMEM,
|
||||
#endif
|
||||
("runq_remove: process swapped out"));
|
||||
pri = ke->ke_rqindex;
|
||||
rqh = &rq->rq_queues[pri];
|
||||
@ -944,6 +1008,7 @@ runq_remove(struct runq *rq, struct kse *ke)
|
||||
#include <vm/uma.h>
|
||||
extern struct mtx kse_zombie_lock;
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Allocate scheduler specific per-process resources.
|
||||
* The thread and ksegrp have already been linked in.
|
||||
@ -959,6 +1024,7 @@ sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td)
|
||||
/* This can go in sched_fork */
|
||||
sched_init_concurrency(kg);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* thread is being either created or recycled.
|
||||
@ -980,6 +1046,7 @@ sched_newthread(struct thread *td)
|
||||
ke->ke_state = KES_THREAD;
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Set up an initial concurrency of 1
|
||||
* and set the given thread (if given) to be using that
|
||||
@ -1036,5 +1103,6 @@ sched_thread_exit(struct thread *td)
|
||||
SLOT_RELEASE(td->td_ksegrp);
|
||||
slot_fill(td->td_ksegrp);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* KERN_SWITCH_INCLUDE */
|
||||
|
@ -405,8 +405,10 @@ mi_switch(int flags, struct thread *newtd)
|
||||
PCPU_SET(switchticks, ticks);
|
||||
CTR4(KTR_PROC, "mi_switch: old thread %p (kse %p, pid %ld, %s)",
|
||||
(void *)td, td->td_sched, (long)p->p_pid, p->p_comm);
|
||||
#ifdef KSE
|
||||
if ((flags & SW_VOL) && (td->td_proc->p_flag & P_SA))
|
||||
newtd = thread_switchout(td, flags, newtd);
|
||||
#endif
|
||||
#if (KTR_COMPILE & KTR_SCHED) != 0
|
||||
if (td == PCPU_GET(idlethread))
|
||||
CTR3(KTR_SCHED, "mi_switch: %p(%s) prio %d idle",
|
||||
|
@ -142,14 +142,18 @@ create_thread(struct thread *td, mcontext_t *ctx,
|
||||
{
|
||||
stack_t stack;
|
||||
struct thread *newtd;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg, *newkg;
|
||||
#endif
|
||||
struct proc *p;
|
||||
long id;
|
||||
int error;
|
||||
|
||||
error = 0;
|
||||
p = td->td_proc;
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
#endif
|
||||
|
||||
/* Have race condition but it is cheap. */
|
||||
if (p->p_numthreads >= max_threads_per_proc)
|
||||
@ -225,6 +229,7 @@ create_thread(struct thread *td, mcontext_t *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
newkg = ksegrp_alloc();
|
||||
bzero(&newkg->kg_startzero,
|
||||
__rangeof(struct ksegrp, kg_startzero, kg_endzero));
|
||||
@ -238,7 +243,16 @@ create_thread(struct thread *td, mcontext_t *ctx,
|
||||
ksegrp_link(newkg, p);
|
||||
thread_link(newtd, newkg);
|
||||
PROC_UNLOCK(p);
|
||||
#else
|
||||
PROC_LOCK(td->td_proc);
|
||||
td->td_proc->p_flag |= P_HADTHREADS;
|
||||
newtd->td_sigmask = td->td_sigmask;
|
||||
mtx_lock_spin(&sched_lock);
|
||||
thread_link(newtd, p);
|
||||
PROC_UNLOCK(p);
|
||||
#endif
|
||||
|
||||
#ifdef KSE
|
||||
/* let the scheduler know about these things. */
|
||||
sched_fork_ksegrp(td, newkg);
|
||||
sched_fork_thread(td, newtd);
|
||||
@ -249,6 +263,16 @@ create_thread(struct thread *td, mcontext_t *ctx,
|
||||
sched_prio(newtd, newkg->kg_user_pri);
|
||||
} /* ignore timesharing class */
|
||||
}
|
||||
#else
|
||||
sched_fork(td, newtd);
|
||||
if (rtp != NULL) {
|
||||
if (!(td->td_pri_class == PRI_TIMESHARE &&
|
||||
rtp->type == RTP_PRIO_NORMAL)) {
|
||||
rtp_to_pri(rtp, newtd);
|
||||
sched_prio(newtd, newtd->td_user_pri);
|
||||
} /* ignore timesharing class */
|
||||
}
|
||||
#endif
|
||||
TD_SET_CAN_RUN(newtd);
|
||||
/* if ((flags & THR_SUSPENDED) == 0) */
|
||||
setrunqueue(newtd, SRQ_BORING);
|
||||
|
@ -50,10 +50,16 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/uma.h>
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* KSEGRP related storage.
|
||||
*/
|
||||
static uma_zone_t ksegrp_zone;
|
||||
#else
|
||||
/*
|
||||
* thread related storage.
|
||||
*/
|
||||
#endif
|
||||
static uma_zone_t thread_zone;
|
||||
|
||||
/* DEBUG ONLY */
|
||||
@ -74,13 +80,18 @@ int max_threads_hits;
|
||||
SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
|
||||
&max_threads_hits, 0, "");
|
||||
|
||||
#ifdef KSE
|
||||
int virtual_cpu;
|
||||
|
||||
#endif
|
||||
TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
|
||||
#ifdef KSE
|
||||
TAILQ_HEAD(, ksegrp) zombie_ksegrps = TAILQ_HEAD_INITIALIZER(zombie_ksegrps);
|
||||
#endif
|
||||
struct mtx kse_zombie_lock;
|
||||
MTX_SYSINIT(kse_zombie_lock, &kse_zombie_lock, "kse zombie lock", MTX_SPIN);
|
||||
|
||||
#ifdef KSE
|
||||
static int
|
||||
sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
|
||||
{
|
||||
@ -105,6 +116,7 @@ sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
|
||||
SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
|
||||
0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
|
||||
"debug virtual cpus");
|
||||
#endif
|
||||
|
||||
struct mtx tid_lock;
|
||||
static struct unrhdr *tid_unrhdr;
|
||||
@ -216,6 +228,7 @@ thread_fini(void *mem, int size)
|
||||
vm_thread_dispose(td);
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Initialize type-stable parts of a ksegrp (when newly created).
|
||||
*/
|
||||
@ -271,6 +284,7 @@ ksegrp_unlink(struct ksegrp *kg)
|
||||
if (p->p_procscopegrp == kg)
|
||||
p->p_procscopegrp = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For a newly created process,
|
||||
@ -281,10 +295,16 @@ ksegrp_unlink(struct ksegrp *kg)
|
||||
* proc_init()
|
||||
*/
|
||||
void
|
||||
#ifdef KSE
|
||||
proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
|
||||
#else
|
||||
proc_linkup(struct proc *p, struct thread *td)
|
||||
#endif
|
||||
{
|
||||
|
||||
#ifdef KSE
|
||||
TAILQ_INIT(&p->p_ksegrps); /* all ksegrps in proc */
|
||||
#endif
|
||||
TAILQ_INIT(&p->p_threads); /* all threads in proc */
|
||||
TAILQ_INIT(&p->p_suspended); /* Threads suspended */
|
||||
sigqueue_init(&p->p_sigqueue, p);
|
||||
@ -294,11 +314,17 @@ proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td)
|
||||
p->p_ksi->ksi_flags = KSI_EXT | KSI_INS;
|
||||
}
|
||||
LIST_INIT(&p->p_mqnotifier);
|
||||
#ifdef KSE
|
||||
p->p_numksegrps = 0;
|
||||
#endif
|
||||
p->p_numthreads = 0;
|
||||
|
||||
#ifdef KSE
|
||||
ksegrp_link(kg, p);
|
||||
thread_link(td, kg);
|
||||
#else
|
||||
thread_link(td, p);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -314,10 +340,12 @@ threadinit(void)
|
||||
thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
|
||||
thread_ctor, thread_dtor, thread_init, thread_fini,
|
||||
UMA_ALIGN_CACHE, 0);
|
||||
#ifdef KSE
|
||||
ksegrp_zone = uma_zcreate("KSEGRP", sched_sizeof_ksegrp(),
|
||||
ksegrp_ctor, NULL, NULL, NULL,
|
||||
UMA_ALIGN_CACHE, 0);
|
||||
kseinit(); /* set up kse specific stuff e.g. upcall zone*/
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -331,6 +359,7 @@ thread_stash(struct thread *td)
|
||||
mtx_unlock_spin(&kse_zombie_lock);
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Stash an embarasingly extra ksegrp into the zombie ksegrp queue.
|
||||
*/
|
||||
@ -341,6 +370,7 @@ ksegrp_stash(struct ksegrp *kg)
|
||||
TAILQ_INSERT_HEAD(&zombie_ksegrps, kg, kg_ksegrp);
|
||||
mtx_unlock_spin(&kse_zombie_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Reap zombie kse resource.
|
||||
@ -349,21 +379,31 @@ void
|
||||
thread_reap(void)
|
||||
{
|
||||
struct thread *td_first, *td_next;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg_first, * kg_next;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Don't even bother to lock if none at this instant,
|
||||
* we really don't care about the next instant..
|
||||
*/
|
||||
#ifdef KSE
|
||||
if ((!TAILQ_EMPTY(&zombie_threads))
|
||||
|| (!TAILQ_EMPTY(&zombie_ksegrps))) {
|
||||
#else
|
||||
if (!TAILQ_EMPTY(&zombie_threads)) {
|
||||
#endif
|
||||
mtx_lock_spin(&kse_zombie_lock);
|
||||
td_first = TAILQ_FIRST(&zombie_threads);
|
||||
#ifdef KSE
|
||||
kg_first = TAILQ_FIRST(&zombie_ksegrps);
|
||||
#endif
|
||||
if (td_first)
|
||||
TAILQ_INIT(&zombie_threads);
|
||||
#ifdef KSE
|
||||
if (kg_first)
|
||||
TAILQ_INIT(&zombie_ksegrps);
|
||||
#endif
|
||||
mtx_unlock_spin(&kse_zombie_lock);
|
||||
while (td_first) {
|
||||
td_next = TAILQ_NEXT(td_first, td_runq);
|
||||
@ -372,6 +412,7 @@ thread_reap(void)
|
||||
thread_free(td_first);
|
||||
td_first = td_next;
|
||||
}
|
||||
#ifdef KSE
|
||||
while (kg_first) {
|
||||
kg_next = TAILQ_NEXT(kg_first, kg_ksegrp);
|
||||
ksegrp_free(kg_first);
|
||||
@ -382,9 +423,11 @@ thread_reap(void)
|
||||
* is there.
|
||||
*/
|
||||
kse_GC();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Allocate a ksegrp.
|
||||
*/
|
||||
@ -393,6 +436,7 @@ ksegrp_alloc(void)
|
||||
{
|
||||
return (uma_zalloc(ksegrp_zone, M_WAITOK));
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Allocate a thread.
|
||||
@ -400,10 +444,12 @@ ksegrp_alloc(void)
|
||||
struct thread *
|
||||
thread_alloc(void)
|
||||
{
|
||||
|
||||
thread_reap(); /* check if any zombies to get */
|
||||
return (uma_zalloc(thread_zone, M_WAITOK));
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Deallocate a ksegrp.
|
||||
*/
|
||||
@ -412,6 +458,7 @@ ksegrp_free(struct ksegrp *td)
|
||||
{
|
||||
uma_zfree(ksegrp_zone, td);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Deallocate a thread.
|
||||
@ -449,8 +496,10 @@ thread_free(struct thread *td)
|
||||
* exit1()
|
||||
* kse_exit()
|
||||
* thr_exit()
|
||||
* ifdef KSE
|
||||
* thread_user_enter()
|
||||
* thread_userret()
|
||||
* endif
|
||||
* thread_suspend_check()
|
||||
*/
|
||||
void
|
||||
@ -459,17 +508,23 @@ thread_exit(void)
|
||||
uint64_t new_switchtime;
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
td = curthread;
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
#endif
|
||||
p = td->td_proc;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
KASSERT(p != NULL, ("thread exiting without a process"));
|
||||
#ifdef KSE
|
||||
KASSERT(kg != NULL, ("thread exiting without a kse group"));
|
||||
#endif
|
||||
CTR3(KTR_PROC, "thread_exit: thread %p (pid %ld, %s)", td,
|
||||
(long)p->p_pid, p->p_comm);
|
||||
KASSERT(TAILQ_EMPTY(&td->td_sigqueue.sq_list), ("signal pending"));
|
||||
@ -478,6 +533,7 @@ thread_exit(void)
|
||||
AUDIT_SYSCALL_EXIT(0, td);
|
||||
#endif
|
||||
|
||||
#ifdef KSE
|
||||
if (td->td_standin != NULL) {
|
||||
/*
|
||||
* Note that we don't need to free the cred here as it
|
||||
@ -486,6 +542,7 @@ thread_exit(void)
|
||||
thread_stash(td->td_standin);
|
||||
td->td_standin = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
umtx_thread_exit(td);
|
||||
|
||||
@ -496,6 +553,7 @@ thread_exit(void)
|
||||
*/
|
||||
cpu_thread_exit(td); /* XXXSMP */
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* The thread is exiting. scheduler can release its stuff
|
||||
* and collect stats etc.
|
||||
@ -503,6 +561,7 @@ thread_exit(void)
|
||||
* need scheduler stuff.
|
||||
*/
|
||||
sched_thread_exit(td);
|
||||
#endif
|
||||
|
||||
/* Do the same timestamp bookkeeping that mi_switch() would do. */
|
||||
new_switchtime = cpu_ticks();
|
||||
@ -529,9 +588,13 @@ thread_exit(void)
|
||||
if (p->p_flag & P_HADTHREADS) {
|
||||
if (p->p_numthreads > 1) {
|
||||
thread_unlink(td);
|
||||
#ifdef KSE
|
||||
|
||||
/* XXX first arg not used in 4BSD or ULE */
|
||||
sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
|
||||
#else
|
||||
sched_exit(p, td);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The test below is NOT true if we are the
|
||||
@ -544,6 +607,7 @@ thread_exit(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Because each upcall structure has an owner thread,
|
||||
* owner thread exits only when process is in exiting
|
||||
@ -582,8 +646,11 @@ thread_exit(void)
|
||||
ksegrp_unlink(kg);
|
||||
ksegrp_stash(kg);
|
||||
}
|
||||
#endif
|
||||
PROC_UNLOCK(p);
|
||||
#ifdef KSE
|
||||
td->td_ksegrp = NULL;
|
||||
#endif
|
||||
PCPU_SET(deadthread, td);
|
||||
} else {
|
||||
/*
|
||||
@ -593,8 +660,10 @@ thread_exit(void)
|
||||
* exit1() - clears threading flags before coming here
|
||||
* kse_exit() - treats last thread specially
|
||||
* thr_exit() - treats last thread specially
|
||||
* ifdef KSE
|
||||
* thread_user_enter() - only if more exist
|
||||
* thread_userret() - only if more exist
|
||||
* endif
|
||||
* thread_suspend_check() - only if more exist
|
||||
*/
|
||||
panic ("thread_exit: Last thread exiting on its own");
|
||||
@ -625,8 +694,11 @@ thread_wait(struct proc *p)
|
||||
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
|
||||
#ifdef KSE
|
||||
KASSERT((p->p_numksegrps == 1), ("Multiple ksegrps in wait1()"));
|
||||
#endif
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
#ifdef KSE
|
||||
if (td->td_standin != NULL) {
|
||||
if (td->td_standin->td_ucred != NULL) {
|
||||
crfree(td->td_standin->td_ucred);
|
||||
@ -635,6 +707,7 @@ thread_wait(struct proc *p)
|
||||
thread_free(td->td_standin);
|
||||
td->td_standin = NULL;
|
||||
}
|
||||
#endif
|
||||
cpu_thread_clean(td);
|
||||
crfree(td->td_ucred);
|
||||
}
|
||||
@ -650,28 +723,46 @@ thread_wait(struct proc *p)
|
||||
* The thread is linked as if running but no KSE assigned.
|
||||
* Called from:
|
||||
* proc_linkup()
|
||||
* ifdef KSE
|
||||
* thread_schedule_upcall()
|
||||
* endif
|
||||
* thr_create()
|
||||
*/
|
||||
void
|
||||
#ifdef KSE
|
||||
thread_link(struct thread *td, struct ksegrp *kg)
|
||||
#else
|
||||
thread_link(struct thread *td, struct proc *p)
|
||||
#endif
|
||||
{
|
||||
#ifdef KSE
|
||||
struct proc *p;
|
||||
#endif
|
||||
|
||||
#ifdef KSE
|
||||
p = kg->kg_proc;
|
||||
#endif
|
||||
td->td_state = TDS_INACTIVE;
|
||||
td->td_proc = p;
|
||||
#ifdef KSE
|
||||
td->td_ksegrp = kg;
|
||||
#endif
|
||||
td->td_flags = 0;
|
||||
#ifdef KSE
|
||||
td->td_kflags = 0;
|
||||
#endif
|
||||
|
||||
LIST_INIT(&td->td_contested);
|
||||
sigqueue_init(&td->td_sigqueue, p);
|
||||
callout_init(&td->td_slpcallout, CALLOUT_MPSAFE);
|
||||
TAILQ_INSERT_HEAD(&p->p_threads, td, td_plist);
|
||||
#ifdef KSE
|
||||
TAILQ_INSERT_HEAD(&kg->kg_threads, td, td_kglist);
|
||||
#endif
|
||||
p->p_numthreads++;
|
||||
#ifdef KSE
|
||||
kg->kg_numthreads++;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -686,6 +777,7 @@ thread_unthread(struct thread *td)
|
||||
struct proc *p = td->td_proc;
|
||||
|
||||
KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
|
||||
#ifdef KSE
|
||||
upcall_remove(td);
|
||||
p->p_flag &= ~(P_SA|P_HADTHREADS);
|
||||
td->td_mailbox = NULL;
|
||||
@ -695,6 +787,9 @@ thread_unthread(struct thread *td)
|
||||
td->td_standin = NULL;
|
||||
}
|
||||
sched_set_concurrency(td->td_ksegrp, 1);
|
||||
#else
|
||||
p->p_flag &= ~P_HADTHREADS;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -705,15 +800,23 @@ void
|
||||
thread_unlink(struct thread *td)
|
||||
{
|
||||
struct proc *p = td->td_proc;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
#endif
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
TAILQ_REMOVE(&p->p_threads, td, td_plist);
|
||||
p->p_numthreads--;
|
||||
#ifdef KSE
|
||||
TAILQ_REMOVE(&kg->kg_threads, td, td_kglist);
|
||||
kg->kg_numthreads--;
|
||||
#endif
|
||||
/* could clear a few other things here */
|
||||
#ifdef KSE
|
||||
/* Must NOT clear links to proc and ksegrp! */
|
||||
#else
|
||||
/* Must NOT clear links to proc! */
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1032,7 +1135,9 @@ thread_single_end(void)
|
||||
p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
p->p_singlethread = NULL;
|
||||
#ifdef KSE
|
||||
p->p_procscopegrp = NULL;
|
||||
#endif
|
||||
/*
|
||||
* If there are other threads they mey now run,
|
||||
* unless of course there is a blanket 'stop order'
|
||||
|
@ -166,9 +166,15 @@ struct umtxq_chain {
|
||||
* if it is using 100%CPU, this is unfair to other processes.
|
||||
*/
|
||||
|
||||
#ifdef KSE
|
||||
#define UPRI(td) (((td)->td_ksegrp->kg_user_pri >= PRI_MIN_TIMESHARE &&\
|
||||
(td)->td_ksegrp->kg_user_pri <= PRI_MAX_TIMESHARE) ?\
|
||||
PRI_MAX_TIMESHARE : (td)->td_ksegrp->kg_user_pri)
|
||||
#else
|
||||
#define UPRI(td) (((td)->td_user_pri >= PRI_MIN_TIMESHARE &&\
|
||||
(td)->td_user_pri <= PRI_MAX_TIMESHARE) ?\
|
||||
PRI_MAX_TIMESHARE : (td)->td_user_pri)
|
||||
#endif
|
||||
|
||||
#define GOLDEN_RATIO_PRIME 2654404609U
|
||||
#define UMTX_CHAINS 128
|
||||
|
@ -106,7 +106,11 @@ getscheduler(struct ksched *ksched, struct thread *td, int *policy)
|
||||
int e = 0;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td, &rtp);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
switch (rtp.type)
|
||||
{
|
||||
@ -153,7 +157,11 @@ ksched_getparam(struct ksched *ksched,
|
||||
struct rtprio rtp;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td, &rtp);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (RTP_PRIO_IS_REALTIME(rtp.type))
|
||||
param->sched_priority = rtpprio_to_p4prio(rtp.prio);
|
||||
@ -174,7 +182,9 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
{
|
||||
int e = 0;
|
||||
struct rtprio rtp;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
#endif
|
||||
|
||||
switch(policy)
|
||||
{
|
||||
@ -189,6 +199,7 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
rtp_to_pri(&rtp, kg);
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
@ -199,6 +210,9 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
else
|
||||
@ -212,6 +226,7 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
rtp.type = RTP_PRIO_NORMAL;
|
||||
rtp.prio = p4prio_to_rtpprio(param->sched_priority);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
rtp_to_pri(&rtp, kg);
|
||||
|
||||
/* XXX Simply revert to whatever we had for last
|
||||
@ -230,6 +245,9 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
}
|
||||
|
||||
}
|
||||
#else
|
||||
rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
break;
|
||||
|
@ -75,6 +75,7 @@ __FBSDID("$FreeBSD$");
|
||||
#endif
|
||||
#define NICE_WEIGHT 1 /* Priorities per nice level. */
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* The schedulable entity that can be given a context to run.
|
||||
* A process may have several of these. Probably one per processor
|
||||
@ -82,6 +83,13 @@ __FBSDID("$FreeBSD$");
|
||||
* with a KSEG that contains the priority and niceness
|
||||
* for the group.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* The schedulable entity that runs a context.
|
||||
* A process may have several of these. Probably one per processor
|
||||
* but posibly a few more.
|
||||
*/
|
||||
#endif
|
||||
struct kse {
|
||||
TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
|
||||
struct thread *ke_thread; /* (*) Active associated thread. */
|
||||
@ -95,8 +103,10 @@ struct kse {
|
||||
struct runq *ke_runq; /* runq the kse is currently on */
|
||||
};
|
||||
|
||||
#ifdef KSE
|
||||
#define ke_proc ke_thread->td_proc
|
||||
#define ke_ksegrp ke_thread->td_ksegrp
|
||||
#endif
|
||||
|
||||
#define td_kse td_sched
|
||||
|
||||
@ -113,6 +123,7 @@ struct kse {
|
||||
#define SKE_RUNQ_PCPU(ke) \
|
||||
((ke)->ke_runq != 0 && (ke)->ke_runq != &runq)
|
||||
|
||||
#ifdef KSE
|
||||
struct kg_sched {
|
||||
struct thread *skg_last_assigned; /* (j) Last thread assigned to */
|
||||
/* the system scheduler. */
|
||||
@ -144,6 +155,7 @@ do { \
|
||||
/* KASSERT((kg->kg_avail_opennings >= 0), \
|
||||
("slots out of whack"));*/ \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* KSE_CAN_MIGRATE macro returns true if the kse can migrate between
|
||||
@ -153,7 +165,9 @@ do { \
|
||||
((ke)->ke_thread->td_pinned == 0 && ((ke)->ke_flags & KEF_BOUND) == 0)
|
||||
|
||||
static struct kse kse0;
|
||||
#ifdef KSE
|
||||
static struct kg_sched kg_sched0;
|
||||
#endif
|
||||
|
||||
static int sched_tdcnt; /* Total runnable threads in the system. */
|
||||
static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
|
||||
@ -161,8 +175,12 @@ static int sched_quantum; /* Roundrobin scheduling quantum in ticks. */
|
||||
|
||||
static struct callout roundrobin_callout;
|
||||
|
||||
#ifdef KSE
|
||||
static void slot_fill(struct ksegrp *kg);
|
||||
static struct kse *sched_choose(void); /* XXX Should be thread * */
|
||||
#else
|
||||
static struct thread *sched_choose(void);
|
||||
#endif
|
||||
|
||||
static void setup_runqs(void);
|
||||
static void roundrobin(void *arg);
|
||||
@ -171,9 +189,15 @@ static void schedcpu_thread(void);
|
||||
static void sched_priority(struct thread *td, u_char prio);
|
||||
static void sched_setup(void *dummy);
|
||||
static void maybe_resched(struct thread *td);
|
||||
#ifdef KSE
|
||||
static void updatepri(struct ksegrp *kg);
|
||||
static void resetpriority(struct ksegrp *kg);
|
||||
static void resetpriority_thread(struct thread *td, struct ksegrp *kg);
|
||||
#else
|
||||
static void updatepri(struct thread *td);
|
||||
static void resetpriority(struct thread *td);
|
||||
static void resetpriority_thread(struct thread *td);
|
||||
#endif
|
||||
#ifdef SMP
|
||||
static int forward_wakeup(int cpunum);
|
||||
#endif
|
||||
@ -276,6 +300,7 @@ SYSCTL_INT(_kern_sched_ipiwakeup, OID_AUTO, htt2, CTLFLAG_RW,
|
||||
"account for htt");
|
||||
|
||||
#endif
|
||||
#ifdef KSE
|
||||
static int sched_followon = 0;
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, followon, CTLFLAG_RW,
|
||||
&sched_followon, 0,
|
||||
@ -290,6 +315,7 @@ static int sched_kgfollowons = 0;
|
||||
SYSCTL_INT(_kern_sched, OID_AUTO, kgfollowons, CTLFLAG_RD,
|
||||
&sched_kgfollowons, 0,
|
||||
"number of followons done in a ksegrp");
|
||||
#endif
|
||||
|
||||
static __inline void
|
||||
sched_load_add(void)
|
||||
@ -340,20 +366,40 @@ roundrobin(void *arg)
|
||||
|
||||
/*
|
||||
* Constants for digital decay and forget:
|
||||
* ifdef KSE
|
||||
* 90% of (kg_estcpu) usage in 5 * loadav time
|
||||
* else
|
||||
* 90% of (td_estcpu) usage in 5 * loadav time
|
||||
* endif
|
||||
* 95% of (ke_pctcpu) usage in 60 seconds (load insensitive)
|
||||
* Note that, as ps(1) mentions, this can let percentages
|
||||
* total over 100% (I've seen 137.9% for 3 processes).
|
||||
*
|
||||
* ifdef KSE
|
||||
* Note that schedclock() updates kg_estcpu and p_cpticks asynchronously.
|
||||
* else
|
||||
* Note that schedclock() updates td_estcpu and p_cpticks asynchronously.
|
||||
* endif
|
||||
*
|
||||
* ifdef KSE
|
||||
* We wish to decay away 90% of kg_estcpu in (5 * loadavg) seconds.
|
||||
* else
|
||||
* We wish to decay away 90% of td_estcpu in (5 * loadavg) seconds.
|
||||
* endif
|
||||
* That is, the system wants to compute a value of decay such
|
||||
* that the following for loop:
|
||||
* for (i = 0; i < (5 * loadavg); i++)
|
||||
* ifdef KSE
|
||||
* kg_estcpu *= decay;
|
||||
* else
|
||||
* td_estcpu *= decay;
|
||||
* endif
|
||||
* will compute
|
||||
* ifdef KSE
|
||||
* kg_estcpu *= 0.1;
|
||||
* else
|
||||
* td_estcpu *= 0.1;
|
||||
* endif
|
||||
* for all values of loadavg:
|
||||
*
|
||||
* Mathematically this loop can be expressed by saying:
|
||||
@ -436,7 +482,9 @@ schedcpu(void)
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
struct kse *ke;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
int awake, realstathz;
|
||||
|
||||
realstathz = stathz ? stathz : hz;
|
||||
@ -451,8 +499,13 @@ schedcpu(void)
|
||||
* 16-bit int's (remember them?) overflow takes 45 days.
|
||||
*/
|
||||
p->p_swtime++;
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
#endif
|
||||
awake = 0;
|
||||
#ifdef KSE
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
ke = td->td_kse;
|
||||
/*
|
||||
@ -502,12 +555,70 @@ schedcpu(void)
|
||||
#endif
|
||||
ke->ke_cpticks = 0;
|
||||
} /* end of kse loop */
|
||||
#else
|
||||
ke = td->td_kse;
|
||||
/*
|
||||
* Increment sleep time (if sleeping). We
|
||||
* ignore overflow, as above.
|
||||
*/
|
||||
/*
|
||||
* The kse slptimes are not touched in wakeup
|
||||
* because the thread may not HAVE a KSE.
|
||||
*/
|
||||
if (ke->ke_state == KES_ONRUNQ) {
|
||||
awake = 1;
|
||||
ke->ke_flags &= ~KEF_DIDRUN;
|
||||
} else if ((ke->ke_state == KES_THREAD) &&
|
||||
(TD_IS_RUNNING(td))) {
|
||||
awake = 1;
|
||||
/* Do not clear KEF_DIDRUN */
|
||||
} else if (ke->ke_flags & KEF_DIDRUN) {
|
||||
awake = 1;
|
||||
ke->ke_flags &= ~KEF_DIDRUN;
|
||||
}
|
||||
|
||||
/*
|
||||
* ke_pctcpu is only for ps and ttyinfo().
|
||||
* Do it per kse, and add them up at the end?
|
||||
* XXXKSE
|
||||
*/
|
||||
ke->ke_pctcpu = (ke->ke_pctcpu * ccpu) >>
|
||||
FSHIFT;
|
||||
/*
|
||||
* If the kse has been idle the entire second,
|
||||
* stop recalculating its priority until
|
||||
* it wakes up.
|
||||
*/
|
||||
if (ke->ke_cpticks == 0)
|
||||
continue;
|
||||
#if (FSHIFT >= CCPU_SHIFT)
|
||||
ke->ke_pctcpu += (realstathz == 100)
|
||||
? ((fixpt_t) ke->ke_cpticks) <<
|
||||
(FSHIFT - CCPU_SHIFT) :
|
||||
100 * (((fixpt_t) ke->ke_cpticks)
|
||||
<< (FSHIFT - CCPU_SHIFT)) / realstathz;
|
||||
#else
|
||||
ke->ke_pctcpu += ((FSCALE - ccpu) *
|
||||
(ke->ke_cpticks *
|
||||
FSCALE / realstathz)) >> FSHIFT;
|
||||
#endif
|
||||
ke->ke_cpticks = 0;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ifdef KSE
|
||||
* If there are ANY running threads in this KSEGRP,
|
||||
* else
|
||||
* If there are ANY running threads in this process,
|
||||
* endif
|
||||
* then don't count it as sleeping.
|
||||
*/
|
||||
if (awake) {
|
||||
#ifdef KSE
|
||||
if (kg->kg_slptime > 1) {
|
||||
#else
|
||||
if (td->td_slptime > 1) {
|
||||
#endif
|
||||
/*
|
||||
* In an ideal world, this should not
|
||||
* happen, because whoever woke us
|
||||
@ -517,6 +628,7 @@ schedcpu(void)
|
||||
* priority. Should KASSERT at some
|
||||
* point when all the cases are fixed.
|
||||
*/
|
||||
#ifdef KSE
|
||||
updatepri(kg);
|
||||
}
|
||||
kg->kg_slptime = 0;
|
||||
@ -530,6 +642,19 @@ schedcpu(void)
|
||||
resetpriority_thread(td, kg);
|
||||
}
|
||||
} /* end of ksegrp loop */
|
||||
#else
|
||||
updatepri(td);
|
||||
}
|
||||
td->td_slptime = 0;
|
||||
} else
|
||||
td->td_slptime++;
|
||||
if (td->td_slptime > 1)
|
||||
continue;
|
||||
td->td_estcpu = decay_cpu(loadfac, td->td_estcpu);
|
||||
resetpriority(td);
|
||||
resetpriority_thread(td);
|
||||
} /* end of thread loop */
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
} /* end of process loop */
|
||||
sx_sunlock(&allproc_lock);
|
||||
@ -551,24 +676,48 @@ schedcpu_thread(void)
|
||||
|
||||
/*
|
||||
* Recalculate the priority of a process after it has slept for a while.
|
||||
* ifdef KSE
|
||||
* For all load averages >= 1 and max kg_estcpu of 255, sleeping for at
|
||||
* least six times the loadfactor will decay kg_estcpu to zero.
|
||||
* else
|
||||
* For all load averages >= 1 and max td_estcpu of 255, sleeping for at
|
||||
* least six times the loadfactor will decay td_estcpu to zero.
|
||||
* endif
|
||||
*/
|
||||
static void
|
||||
#ifdef KSE
|
||||
updatepri(struct ksegrp *kg)
|
||||
#else
|
||||
updatepri(struct thread *td)
|
||||
#endif
|
||||
{
|
||||
register fixpt_t loadfac;
|
||||
register unsigned int newcpu;
|
||||
|
||||
loadfac = loadfactor(averunnable.ldavg[0]);
|
||||
#ifdef KSE
|
||||
if (kg->kg_slptime > 5 * loadfac)
|
||||
kg->kg_estcpu = 0;
|
||||
#else
|
||||
if (td->td_slptime > 5 * loadfac)
|
||||
td->td_estcpu = 0;
|
||||
#endif
|
||||
else {
|
||||
#ifdef KSE
|
||||
newcpu = kg->kg_estcpu;
|
||||
kg->kg_slptime--; /* was incremented in schedcpu() */
|
||||
while (newcpu && --kg->kg_slptime)
|
||||
#else
|
||||
newcpu = td->td_estcpu;
|
||||
td->td_slptime--; /* was incremented in schedcpu() */
|
||||
while (newcpu && --td->td_slptime)
|
||||
#endif
|
||||
newcpu = decay_cpu(loadfac, newcpu);
|
||||
#ifdef KSE
|
||||
kg->kg_estcpu = newcpu;
|
||||
#else
|
||||
td->td_estcpu = newcpu;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -578,16 +727,30 @@ updatepri(struct ksegrp *kg)
|
||||
* than that of the current process.
|
||||
*/
|
||||
static void
|
||||
#ifdef KSE
|
||||
resetpriority(struct ksegrp *kg)
|
||||
#else
|
||||
resetpriority(struct thread *td)
|
||||
#endif
|
||||
{
|
||||
register unsigned int newpriority;
|
||||
|
||||
#ifdef KSE
|
||||
if (kg->kg_pri_class == PRI_TIMESHARE) {
|
||||
newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT +
|
||||
NICE_WEIGHT * (kg->kg_proc->p_nice - PRIO_MIN);
|
||||
#else
|
||||
if (td->td_pri_class == PRI_TIMESHARE) {
|
||||
newpriority = PUSER + td->td_estcpu / INVERSE_ESTCPU_WEIGHT +
|
||||
NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
|
||||
#endif
|
||||
newpriority = min(max(newpriority, PRI_MIN_TIMESHARE),
|
||||
PRI_MAX_TIMESHARE);
|
||||
#ifdef KSE
|
||||
sched_user_prio(kg, newpriority);
|
||||
#else
|
||||
sched_user_prio(td, newpriority);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -596,7 +759,11 @@ resetpriority(struct ksegrp *kg)
|
||||
* priority changes.
|
||||
*/
|
||||
static void
|
||||
#ifdef KSE
|
||||
resetpriority_thread(struct thread *td, struct ksegrp *kg)
|
||||
#else
|
||||
resetpriority_thread(struct thread *td)
|
||||
#endif
|
||||
{
|
||||
|
||||
/* Only change threads with a time sharing user priority. */
|
||||
@ -607,7 +774,11 @@ resetpriority_thread(struct thread *td, struct ksegrp *kg)
|
||||
/* XXX the whole needresched thing is broken, but not silly. */
|
||||
maybe_resched(td);
|
||||
|
||||
#ifdef KSE
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
#else
|
||||
sched_prio(td, td->td_user_pri);
|
||||
#endif
|
||||
}
|
||||
|
||||
/* ARGSUSED */
|
||||
@ -643,12 +814,16 @@ schedinit(void)
|
||||
* Set up the scheduler specific parts of proc0.
|
||||
*/
|
||||
proc0.p_sched = NULL; /* XXX */
|
||||
#ifdef KSE
|
||||
ksegrp0.kg_sched = &kg_sched0;
|
||||
#endif
|
||||
thread0.td_sched = &kse0;
|
||||
kse0.ke_thread = &thread0;
|
||||
kse0.ke_state = KES_THREAD;
|
||||
#ifdef KSE
|
||||
kg_sched0.skg_concurrency = 1;
|
||||
kg_sched0.skg_avail_opennings = 0; /* we are already running */
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
@ -672,8 +847,13 @@ sched_rr_interval(void)
|
||||
/*
|
||||
* We adjust the priority of the current process. The priority of
|
||||
* a process gets worse as it accumulates CPU time. The cpu usage
|
||||
* ifdef KSE
|
||||
* estimator (kg_estcpu) is increased here. resetpriority() will
|
||||
* compute a different priority each time kg_estcpu increases by
|
||||
* else
|
||||
* estimator (td_estcpu) is increased here. resetpriority() will
|
||||
* compute a different priority each time td_estcpu increases by
|
||||
* endif
|
||||
* INVERSE_ESTCPU_WEIGHT
|
||||
* (until MAXPRI is reached). The cpu usage estimator ramps up
|
||||
* quite quickly when the process is running (linearly), and decays
|
||||
@ -686,21 +866,33 @@ sched_rr_interval(void)
|
||||
void
|
||||
sched_clock(struct thread *td)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
struct kse *ke;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
#endif
|
||||
ke = td->td_kse;
|
||||
|
||||
ke->ke_cpticks++;
|
||||
#ifdef KSE
|
||||
kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1);
|
||||
if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
|
||||
resetpriority(kg);
|
||||
resetpriority_thread(td, kg);
|
||||
#else
|
||||
td->td_estcpu = ESTCPULIM(td->td_estcpu + 1);
|
||||
if ((td->td_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
|
||||
resetpriority(td);
|
||||
resetpriority_thread(td);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* charge childs scheduling cpu usage to parent.
|
||||
*
|
||||
@ -709,13 +901,30 @@ sched_clock(struct thread *td)
|
||||
* all ksegrps, this is strictly as expected. Assume that the child process
|
||||
* aggregated all the estcpu into the 'built-in' ksegrp.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* charge childs scheduling cpu usage to parent.
|
||||
*/
|
||||
#endif
|
||||
void
|
||||
sched_exit(struct proc *p, struct thread *td)
|
||||
{
|
||||
#ifdef KSE
|
||||
sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), td);
|
||||
sched_exit_thread(FIRST_THREAD_IN_PROC(p), td);
|
||||
#else
|
||||
struct thread *parent = FIRST_THREAD_IN_PROC(p);
|
||||
|
||||
CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
|
||||
td, td->td_proc->p_comm, td->td_priority);
|
||||
|
||||
parent->td_estcpu = ESTCPULIM(parent->td_estcpu + td->td_estcpu);
|
||||
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_load_rem();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
void
|
||||
sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd)
|
||||
{
|
||||
@ -732,14 +941,21 @@ sched_exit_thread(struct thread *td, struct thread *child)
|
||||
if ((child->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_load_rem();
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
sched_fork(struct thread *td, struct thread *childtd)
|
||||
{
|
||||
#ifdef KSE
|
||||
sched_fork_ksegrp(td, childtd->td_ksegrp);
|
||||
sched_fork_thread(td, childtd);
|
||||
#else
|
||||
childtd->td_estcpu = td->td_estcpu;
|
||||
sched_newthread(childtd);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
void
|
||||
sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
|
||||
{
|
||||
@ -752,37 +968,61 @@ sched_fork_thread(struct thread *td, struct thread *childtd)
|
||||
{
|
||||
sched_newthread(childtd);
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
sched_nice(struct proc *p, int nice)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
struct thread *td;
|
||||
|
||||
PROC_LOCK_ASSERT(p, MA_OWNED);
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
p->p_nice = nice;
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
resetpriority(kg);
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
resetpriority_thread(td, kg);
|
||||
}
|
||||
}
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
resetpriority(td);
|
||||
resetpriority_thread(td);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
#ifdef KSE
|
||||
sched_class(struct ksegrp *kg, int class)
|
||||
#else
|
||||
sched_class(struct thread *td, int class)
|
||||
#endif
|
||||
{
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
#ifdef KSE
|
||||
kg->kg_pri_class = class;
|
||||
#else
|
||||
td->td_pri_class = class;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Adjust the priority of a thread.
|
||||
* This may include moving the thread within the KSEGRP,
|
||||
* changing the assignment of a kse to the thread,
|
||||
* and moving a KSE in the system run queue.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* Adjust the priority of a thread.
|
||||
*/
|
||||
#endif
|
||||
static void
|
||||
sched_priority(struct thread *td, u_char prio)
|
||||
{
|
||||
@ -827,7 +1067,11 @@ sched_unlend_prio(struct thread *td, u_char prio)
|
||||
|
||||
if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
|
||||
td->td_base_pri <= PRI_MAX_TIMESHARE)
|
||||
#ifdef KSE
|
||||
base_pri = td->td_ksegrp->kg_user_pri;
|
||||
#else
|
||||
base_pri = td->td_user_pri;
|
||||
#endif
|
||||
else
|
||||
base_pri = td->td_base_pri;
|
||||
if (prio >= base_pri) {
|
||||
@ -865,11 +1109,18 @@ sched_prio(struct thread *td, u_char prio)
|
||||
}
|
||||
|
||||
void
|
||||
#ifdef KSE
|
||||
sched_user_prio(struct ksegrp *kg, u_char prio)
|
||||
#else
|
||||
sched_user_prio(struct thread *td, u_char prio)
|
||||
#endif
|
||||
{
|
||||
#ifdef KSE
|
||||
struct thread *td;
|
||||
#endif
|
||||
u_char oldprio;
|
||||
|
||||
#ifdef KSE
|
||||
kg->kg_base_user_pri = prio;
|
||||
|
||||
/* XXXKSE only for 1:1 */
|
||||
@ -885,6 +1136,12 @@ sched_user_prio(struct ksegrp *kg, u_char prio)
|
||||
|
||||
oldprio = kg->kg_user_pri;
|
||||
kg->kg_user_pri = prio;
|
||||
#else
|
||||
td->td_base_user_pri = prio;
|
||||
|
||||
oldprio = td->td_user_pri;
|
||||
td->td_user_pri = prio;
|
||||
#endif
|
||||
|
||||
if (TD_ON_UPILOCK(td) && oldprio != prio)
|
||||
umtx_pi_adjust(td, oldprio);
|
||||
@ -897,8 +1154,13 @@ sched_lend_user_prio(struct thread *td, u_char prio)
|
||||
|
||||
td->td_flags |= TDF_UBORROWING;
|
||||
|
||||
#ifdef KSE
|
||||
oldprio = td->td_ksegrp->kg_user_pri;
|
||||
td->td_ksegrp->kg_user_pri = prio;
|
||||
#else
|
||||
oldprio = td->td_user_pri;
|
||||
td->td_user_pri = prio;
|
||||
#endif
|
||||
|
||||
if (TD_ON_UPILOCK(td) && oldprio != prio)
|
||||
umtx_pi_adjust(td, oldprio);
|
||||
@ -907,13 +1169,23 @@ sched_lend_user_prio(struct thread *td, u_char prio)
|
||||
void
|
||||
sched_unlend_user_prio(struct thread *td, u_char prio)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
#endif
|
||||
u_char base_pri;
|
||||
|
||||
#ifdef KSE
|
||||
base_pri = kg->kg_base_user_pri;
|
||||
#else
|
||||
base_pri = td->td_base_user_pri;
|
||||
#endif
|
||||
if (prio >= base_pri) {
|
||||
td->td_flags &= ~TDF_UBORROWING;
|
||||
#ifdef KSE
|
||||
sched_user_prio(kg, base_pri);
|
||||
#else
|
||||
sched_user_prio(td, base_pri);
|
||||
#endif
|
||||
} else
|
||||
sched_lend_user_prio(td, prio);
|
||||
}
|
||||
@ -923,16 +1195,24 @@ sched_sleep(struct thread *td)
|
||||
{
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
#ifdef KSE
|
||||
td->td_ksegrp->kg_slptime = 0;
|
||||
#else
|
||||
td->td_slptime = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
static void remrunqueue(struct thread *td);
|
||||
#endif
|
||||
|
||||
void
|
||||
sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
{
|
||||
struct kse *ke;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
struct proc *p;
|
||||
|
||||
ke = td->td_kse;
|
||||
@ -942,6 +1222,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
|
||||
if ((p->p_flag & P_NOLOAD) == 0)
|
||||
sched_load_rem();
|
||||
#ifdef KSE
|
||||
/*
|
||||
* We are volunteering to switch out so we get to nominate
|
||||
* a successor for the rest of our quantum
|
||||
@ -967,6 +1248,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (newtd)
|
||||
newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
|
||||
@ -984,12 +1266,15 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
if (td == PCPU_GET(idlethread))
|
||||
TD_SET_CAN_RUN(td);
|
||||
else {
|
||||
#ifdef KSE
|
||||
SLOT_RELEASE(td->td_ksegrp);
|
||||
#endif
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
/* Put us back on the run queue (kse and all). */
|
||||
setrunqueue(td, (flags & SW_PREEMPT) ?
|
||||
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
|
||||
SRQ_OURSELF|SRQ_YIELDING);
|
||||
#ifdef KSE
|
||||
} else if (p->p_flag & P_HADTHREADS) {
|
||||
/*
|
||||
* We will not be on the run queue. So we must be
|
||||
@ -999,6 +1284,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
*/
|
||||
if (newtd == NULL || newtd->td_ksegrp != td->td_ksegrp)
|
||||
slot_fill(td->td_ksegrp);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
if (newtd) {
|
||||
@ -1007,12 +1293,16 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
* as if it had been added to the run queue and selected.
|
||||
* It came from:
|
||||
* * A preemption
|
||||
* ifdef KSE
|
||||
* * An upcall
|
||||
* endif
|
||||
* * A followon
|
||||
*/
|
||||
KASSERT((newtd->td_inhibitors == 0),
|
||||
("trying to run inhibitted thread"));
|
||||
#ifdef KSE
|
||||
SLOT_USE(newtd->td_ksegrp);
|
||||
#endif
|
||||
newtd->td_kse->ke_flags |= KEF_DIDRUN;
|
||||
TD_SET_RUNNING(newtd);
|
||||
if ((newtd->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
@ -1026,6 +1316,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
|
||||
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
|
||||
#endif
|
||||
|
||||
cpu_switch(td, newtd);
|
||||
#ifdef HWPMC_HOOKS
|
||||
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
|
||||
@ -1040,15 +1331,25 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
void
|
||||
sched_wakeup(struct thread *td)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
if (kg->kg_slptime > 1) {
|
||||
updatepri(kg);
|
||||
resetpriority(kg);
|
||||
}
|
||||
kg->kg_slptime = 0;
|
||||
#else
|
||||
if (td->td_slptime > 1) {
|
||||
updatepri(td);
|
||||
resetpriority(td);
|
||||
}
|
||||
td->td_slptime = 0;
|
||||
#endif
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
}
|
||||
|
||||
@ -1188,8 +1489,13 @@ sched_add(struct thread *td, int flags)
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT(ke->ke_state != KES_ONRUNQ,
|
||||
("sched_add: kse %p (%s) already in run queue", ke,
|
||||
#ifdef KSE
|
||||
ke->ke_proc->p_comm));
|
||||
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
|
||||
#else
|
||||
td->td_proc->p_comm));
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
#endif
|
||||
("sched_add: process swapped out"));
|
||||
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
@ -1239,7 +1545,9 @@ sched_add(struct thread *td, int flags)
|
||||
|
||||
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_load_add();
|
||||
#ifdef KSE
|
||||
SLOT_USE(td->td_ksegrp);
|
||||
#endif
|
||||
runq_add(ke->ke_runq, ke, flags);
|
||||
ke->ke_state = KES_ONRUNQ;
|
||||
}
|
||||
@ -1250,8 +1558,13 @@ sched_add(struct thread *td, int flags)
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
KASSERT(ke->ke_state != KES_ONRUNQ,
|
||||
("sched_add: kse %p (%s) already in run queue", ke,
|
||||
#ifdef KSE
|
||||
ke->ke_proc->p_comm));
|
||||
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
|
||||
#else
|
||||
td->td_proc->p_comm));
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
#endif
|
||||
("sched_add: process swapped out"));
|
||||
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
|
||||
td, td->td_proc->p_comm, td->td_priority, curthread,
|
||||
@ -1276,7 +1589,9 @@ sched_add(struct thread *td, int flags)
|
||||
}
|
||||
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_load_add();
|
||||
#ifdef KSE
|
||||
SLOT_USE(td->td_ksegrp);
|
||||
#endif
|
||||
runq_add(ke->ke_runq, ke, flags);
|
||||
ke->ke_state = KES_ONRUNQ;
|
||||
maybe_resched(td);
|
||||
@ -1289,7 +1604,11 @@ sched_rem(struct thread *td)
|
||||
struct kse *ke;
|
||||
|
||||
ke = td->td_kse;
|
||||
#ifdef KSE
|
||||
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
|
||||
#else
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
#endif
|
||||
("sched_rem: process swapped out"));
|
||||
KASSERT((ke->ke_state == KES_ONRUNQ),
|
||||
("sched_rem: KSE not on run queue"));
|
||||
@ -1300,7 +1619,9 @@ sched_rem(struct thread *td)
|
||||
|
||||
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
sched_load_rem();
|
||||
#ifdef KSE
|
||||
SLOT_RELEASE(td->td_ksegrp);
|
||||
#endif
|
||||
runq_remove(ke->ke_runq, ke);
|
||||
|
||||
ke->ke_state = KES_THREAD;
|
||||
@ -1310,7 +1631,11 @@ sched_rem(struct thread *td)
|
||||
* Select threads to run.
|
||||
* Notice that the running threads still consume a slot.
|
||||
*/
|
||||
#ifdef KSE
|
||||
struct kse *
|
||||
#else
|
||||
struct thread *
|
||||
#endif
|
||||
sched_choose(void)
|
||||
{
|
||||
struct kse *ke;
|
||||
@ -1339,20 +1664,36 @@ sched_choose(void)
|
||||
ke = runq_choose(&runq);
|
||||
#endif
|
||||
|
||||
#ifdef KSE
|
||||
if (ke != NULL) {
|
||||
#else
|
||||
if (ke) {
|
||||
#endif
|
||||
runq_remove(rq, ke);
|
||||
ke->ke_state = KES_THREAD;
|
||||
|
||||
#ifdef KSE
|
||||
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
|
||||
("sched_choose: process swapped out"));
|
||||
#else
|
||||
KASSERT(ke->ke_thread->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_choose: process swapped out"));
|
||||
return (ke->ke_thread);
|
||||
#endif
|
||||
}
|
||||
#ifdef KSE
|
||||
return (ke);
|
||||
#else
|
||||
return (NULL);
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
sched_userret(struct thread *td)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
/*
|
||||
* XXX we cheat slightly on the locking here to avoid locking in
|
||||
* the usual case. Setting td_priority here is essentially an
|
||||
@ -1364,6 +1705,7 @@ sched_userret(struct thread *td)
|
||||
*/
|
||||
KASSERT((td->td_flags & TDF_BORROWING) == 0,
|
||||
("thread with borrowed priority returning to userland"));
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
if (td->td_priority != kg->kg_user_pri) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
@ -1371,6 +1713,14 @@ sched_userret(struct thread *td)
|
||||
td->td_base_pri = kg->kg_user_pri;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
#else
|
||||
if (td->td_priority != td->td_user_pri) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_priority = td->td_user_pri;
|
||||
td->td_base_pri = td->td_user_pri;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
@ -1413,11 +1763,17 @@ sched_is_bound(struct thread *td)
|
||||
void
|
||||
sched_relinquish(struct thread *td)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
|
||||
kg = td->td_ksegrp;
|
||||
#endif
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
if (kg->kg_pri_class == PRI_TIMESHARE)
|
||||
#else
|
||||
if (td->td_pri_class == PRI_TIMESHARE)
|
||||
#endif
|
||||
sched_prio(td, PRI_MAX_TIMESHARE);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -1429,11 +1785,13 @@ sched_load(void)
|
||||
return (sched_tdcnt);
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
int
|
||||
sched_sizeof_ksegrp(void)
|
||||
{
|
||||
return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
sched_sizeof_proc(void)
|
||||
|
@ -95,7 +95,7 @@ int tickincr = 1 << 10;
|
||||
* The schedulable entity that can be given a context to run. A process may
|
||||
* have several of these.
|
||||
*/
|
||||
struct kse {
|
||||
struct td_sched { /* really kse */
|
||||
TAILQ_ENTRY(kse) ke_procq; /* (j/z) Run queue. */
|
||||
int ke_flags; /* (j) KEF_* flags. */
|
||||
struct thread *ke_thread; /* (*) Active associated thread. */
|
||||
@ -114,11 +114,11 @@ struct kse {
|
||||
int ke_ftick; /* First tick that we were running on */
|
||||
int ke_ticks; /* Tick count */
|
||||
|
||||
/* originally from kg_sched */
|
||||
int skg_slptime; /* Number of ticks we vol. slept */
|
||||
int skg_runtime; /* Number of ticks we were running */
|
||||
};
|
||||
#define td_kse td_sched
|
||||
#define td_slptime td_kse->ke_slptime
|
||||
#define ke_proc ke_thread->td_proc
|
||||
#define ke_ksegrp ke_thread->td_ksegrp
|
||||
#define ke_assign ke_procq.tqe_next
|
||||
/* flags kept in ke_flags */
|
||||
#define KEF_ASSIGNED 0x0001 /* Thread is being migrated. */
|
||||
@ -131,25 +131,7 @@ struct kse {
|
||||
#define KEF_DIDRUN 0x02000 /* Thread actually ran. */
|
||||
#define KEF_EXIT 0x04000 /* Thread is being killed. */
|
||||
|
||||
struct kg_sched {
|
||||
struct thread *skg_last_assigned; /* (j) Last thread assigned to */
|
||||
/* the system scheduler */
|
||||
int skg_slptime; /* Number of ticks we vol. slept */
|
||||
int skg_runtime; /* Number of ticks we were running */
|
||||
int skg_avail_opennings; /* (j) Num unfilled slots in group.*/
|
||||
int skg_concurrency; /* (j) Num threads requested in group.*/
|
||||
};
|
||||
#define kg_last_assigned kg_sched->skg_last_assigned
|
||||
#define kg_avail_opennings kg_sched->skg_avail_opennings
|
||||
#define kg_concurrency kg_sched->skg_concurrency
|
||||
#define kg_runtime kg_sched->skg_runtime
|
||||
#define kg_slptime kg_sched->skg_slptime
|
||||
|
||||
#define SLOT_RELEASE(kg) (kg)->kg_avail_opennings++
|
||||
#define SLOT_USE(kg) (kg)->kg_avail_opennings--
|
||||
|
||||
static struct kse kse0;
|
||||
static struct kg_sched kg_sched0;
|
||||
|
||||
/*
|
||||
* The priority is primarily determined by the interactivity score. Thus, we
|
||||
@ -207,11 +189,11 @@ static struct kg_sched kg_sched0;
|
||||
* This macro determines whether or not the thread belongs on the current or
|
||||
* next run queue.
|
||||
*/
|
||||
#define SCHED_INTERACTIVE(kg) \
|
||||
(sched_interact_score(kg) < SCHED_INTERACT_THRESH)
|
||||
#define SCHED_CURR(kg, ke) \
|
||||
#define SCHED_INTERACTIVE(td) \
|
||||
(sched_interact_score(td) < SCHED_INTERACT_THRESH)
|
||||
#define SCHED_CURR(td, ke) \
|
||||
((ke->ke_thread->td_flags & TDF_BORROWING) || \
|
||||
(ke->ke_flags & KEF_PREEMPTED) || SCHED_INTERACTIVE(kg))
|
||||
(ke->ke_flags & KEF_PREEMPTED) || SCHED_INTERACTIVE(td))
|
||||
|
||||
/*
|
||||
* Cpu percentage computation macros and defines.
|
||||
@ -288,14 +270,13 @@ static struct kseq kseq_cpu;
|
||||
#define KSEQ_CPU(x) (&kseq_cpu)
|
||||
#endif
|
||||
|
||||
static void slot_fill(struct ksegrp *);
|
||||
static struct kse *sched_choose(void); /* XXX Should be thread * */
|
||||
static void sched_slice(struct kse *);
|
||||
static void sched_priority(struct ksegrp *);
|
||||
static void sched_priority(struct thread *);
|
||||
static void sched_thread_priority(struct thread *, u_char);
|
||||
static int sched_interact_score(struct ksegrp *);
|
||||
static void sched_interact_update(struct ksegrp *);
|
||||
static void sched_interact_fork(struct ksegrp *);
|
||||
static int sched_interact_score(struct thread *);
|
||||
static void sched_interact_update(struct thread *);
|
||||
static void sched_interact_fork(struct thread *);
|
||||
static void sched_pctcpu_update(struct kse *);
|
||||
|
||||
/* Operations on per processor queues */
|
||||
@ -379,19 +360,19 @@ kseq_load_add(struct kseq *kseq, struct kse *ke)
|
||||
{
|
||||
int class;
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
|
||||
class = PRI_BASE(ke->ke_thread->td_pri_class);
|
||||
if (class == PRI_TIMESHARE)
|
||||
kseq->ksq_load_timeshare++;
|
||||
kseq->ksq_load++;
|
||||
CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
|
||||
if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
|
||||
if (class != PRI_ITHD && (ke->ke_thread->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
#ifdef SMP
|
||||
kseq->ksq_group->ksg_load++;
|
||||
#else
|
||||
kseq->ksq_sysload++;
|
||||
#endif
|
||||
if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
|
||||
kseq_nice_add(kseq, ke->ke_proc->p_nice);
|
||||
if (ke->ke_thread->td_pri_class == PRI_TIMESHARE)
|
||||
kseq_nice_add(kseq, ke->ke_thread->td_proc->p_nice);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -399,10 +380,10 @@ kseq_load_rem(struct kseq *kseq, struct kse *ke)
|
||||
{
|
||||
int class;
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
|
||||
class = PRI_BASE(ke->ke_thread->td_pri_class);
|
||||
if (class == PRI_TIMESHARE)
|
||||
kseq->ksq_load_timeshare--;
|
||||
if (class != PRI_ITHD && (ke->ke_proc->p_flag & P_NOLOAD) == 0)
|
||||
if (class != PRI_ITHD && (ke->ke_thread->td_proc->p_flag & P_NOLOAD) == 0)
|
||||
#ifdef SMP
|
||||
kseq->ksq_group->ksg_load--;
|
||||
#else
|
||||
@ -411,8 +392,8 @@ kseq_load_rem(struct kseq *kseq, struct kse *ke)
|
||||
kseq->ksq_load--;
|
||||
CTR1(KTR_SCHED, "load: %d", kseq->ksq_load);
|
||||
ke->ke_runq = NULL;
|
||||
if (ke->ke_ksegrp->kg_pri_class == PRI_TIMESHARE)
|
||||
kseq_nice_rem(kseq, ke->ke_proc->p_nice);
|
||||
if (ke->ke_thread->td_pri_class == PRI_TIMESHARE)
|
||||
kseq_nice_rem(kseq, ke->ke_thread->td_proc->p_nice);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -686,7 +667,7 @@ kseq_notify(struct kse *ke, int cpu)
|
||||
|
||||
kseq = KSEQ_CPU(cpu);
|
||||
/* XXX */
|
||||
class = PRI_BASE(ke->ke_ksegrp->kg_pri_class);
|
||||
class = PRI_BASE(ke->ke_thread->td_pri_class);
|
||||
if ((class == PRI_TIMESHARE || class == PRI_REALTIME) &&
|
||||
(kseq_idle & kseq->ksq_group->ksg_mask))
|
||||
atomic_clear_int(&kseq_idle, kseq->ksq_group->ksg_mask);
|
||||
@ -889,10 +870,10 @@ kseq_choose(struct kseq *kseq)
|
||||
* TIMESHARE kse group and its nice was too far out
|
||||
* of the range that receives slices.
|
||||
*/
|
||||
nice = ke->ke_proc->p_nice + (0 - kseq->ksq_nicemin);
|
||||
nice = ke->ke_thread->td_proc->p_nice + (0 - kseq->ksq_nicemin);
|
||||
#if 0
|
||||
if (ke->ke_slice == 0 || (nice > SCHED_SLICE_NTHRESH &&
|
||||
ke->ke_proc->p_nice != 0)) {
|
||||
ke->ke_thread->td_proc->p_nice != 0)) {
|
||||
runq_remove(ke->ke_runq, ke);
|
||||
sched_slice(ke);
|
||||
ke->ke_runq = kseq->ksq_next;
|
||||
@ -1045,41 +1026,45 @@ sched_initticks(void *dummy)
|
||||
* process.
|
||||
*/
|
||||
static void
|
||||
sched_priority(struct ksegrp *kg)
|
||||
sched_priority(struct thread *td)
|
||||
{
|
||||
int pri;
|
||||
|
||||
if (kg->kg_pri_class != PRI_TIMESHARE)
|
||||
if (td->td_pri_class != PRI_TIMESHARE)
|
||||
return;
|
||||
|
||||
pri = SCHED_PRI_INTERACT(sched_interact_score(kg));
|
||||
pri = SCHED_PRI_INTERACT(sched_interact_score(td));
|
||||
pri += SCHED_PRI_BASE;
|
||||
pri += kg->kg_proc->p_nice;
|
||||
pri += td->td_proc->p_nice;
|
||||
|
||||
if (pri > PRI_MAX_TIMESHARE)
|
||||
pri = PRI_MAX_TIMESHARE;
|
||||
else if (pri < PRI_MIN_TIMESHARE)
|
||||
pri = PRI_MIN_TIMESHARE;
|
||||
|
||||
#ifdef KSE
|
||||
sched_user_prio(kg, pri);
|
||||
#else
|
||||
sched_user_prio(td, pri);
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate a time slice based on the properties of the kseg and the runq
|
||||
* that we're on. This is only for PRI_TIMESHARE ksegrps.
|
||||
* that we're on. This is only for PRI_TIMESHARE threads.
|
||||
*/
|
||||
static void
|
||||
sched_slice(struct kse *ke)
|
||||
{
|
||||
struct kseq *kseq;
|
||||
struct ksegrp *kg;
|
||||
struct thread *td;
|
||||
|
||||
kg = ke->ke_ksegrp;
|
||||
td = ke->ke_thread;
|
||||
kseq = KSEQ_CPU(ke->ke_cpu);
|
||||
|
||||
if (ke->ke_thread->td_flags & TDF_BORROWING) {
|
||||
if (td->td_flags & TDF_BORROWING) {
|
||||
ke->ke_slice = SCHED_SLICE_MIN;
|
||||
return;
|
||||
}
|
||||
@ -1099,7 +1084,7 @@ sched_slice(struct kse *ke)
|
||||
*
|
||||
* There is 20 point window that starts relative to the least
|
||||
* nice kse on the run queue. Slice size is determined by
|
||||
* the kse distance from the last nice ksegrp.
|
||||
* the kse distance from the last nice thread.
|
||||
*
|
||||
* If the kse is outside of the window it will get no slice
|
||||
* and will be reevaluated each time it is selected on the
|
||||
@ -1107,16 +1092,16 @@ sched_slice(struct kse *ke)
|
||||
* a nice -20 is running. They are always granted a minimum
|
||||
* slice.
|
||||
*/
|
||||
if (!SCHED_INTERACTIVE(kg)) {
|
||||
if (!SCHED_INTERACTIVE(td)) {
|
||||
int nice;
|
||||
|
||||
nice = kg->kg_proc->p_nice + (0 - kseq->ksq_nicemin);
|
||||
nice = td->td_proc->p_nice + (0 - kseq->ksq_nicemin);
|
||||
if (kseq->ksq_load_timeshare == 0 ||
|
||||
kg->kg_proc->p_nice < kseq->ksq_nicemin)
|
||||
td->td_proc->p_nice < kseq->ksq_nicemin)
|
||||
ke->ke_slice = SCHED_SLICE_MAX;
|
||||
else if (nice <= SCHED_SLICE_NTHRESH)
|
||||
ke->ke_slice = SCHED_SLICE_NICE(nice);
|
||||
else if (kg->kg_proc->p_nice == 0)
|
||||
else if (td->td_proc->p_nice == 0)
|
||||
ke->ke_slice = SCHED_SLICE_MIN;
|
||||
else
|
||||
ke->ke_slice = SCHED_SLICE_MIN; /* 0 */
|
||||
@ -1133,11 +1118,11 @@ sched_slice(struct kse *ke)
|
||||
* adjusted to more than double their maximum.
|
||||
*/
|
||||
static void
|
||||
sched_interact_update(struct ksegrp *kg)
|
||||
sched_interact_update(struct thread *td)
|
||||
{
|
||||
int sum;
|
||||
|
||||
sum = kg->kg_runtime + kg->kg_slptime;
|
||||
sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime;
|
||||
if (sum < SCHED_SLP_RUN_MAX)
|
||||
return;
|
||||
/*
|
||||
@ -1146,40 +1131,40 @@ sched_interact_update(struct ksegrp *kg)
|
||||
* us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
|
||||
*/
|
||||
if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
|
||||
kg->kg_runtime /= 2;
|
||||
kg->kg_slptime /= 2;
|
||||
td->td_sched->skg_runtime /= 2;
|
||||
td->td_sched->skg_slptime /= 2;
|
||||
return;
|
||||
}
|
||||
kg->kg_runtime = (kg->kg_runtime / 5) * 4;
|
||||
kg->kg_slptime = (kg->kg_slptime / 5) * 4;
|
||||
td->td_sched->skg_runtime = (td->td_sched->skg_runtime / 5) * 4;
|
||||
td->td_sched->skg_slptime = (td->td_sched->skg_slptime / 5) * 4;
|
||||
}
|
||||
|
||||
static void
|
||||
sched_interact_fork(struct ksegrp *kg)
|
||||
sched_interact_fork(struct thread *td)
|
||||
{
|
||||
int ratio;
|
||||
int sum;
|
||||
|
||||
sum = kg->kg_runtime + kg->kg_slptime;
|
||||
sum = td->td_sched->skg_runtime + td->td_sched->skg_slptime;
|
||||
if (sum > SCHED_SLP_RUN_FORK) {
|
||||
ratio = sum / SCHED_SLP_RUN_FORK;
|
||||
kg->kg_runtime /= ratio;
|
||||
kg->kg_slptime /= ratio;
|
||||
td->td_sched->skg_runtime /= ratio;
|
||||
td->td_sched->skg_slptime /= ratio;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
sched_interact_score(struct ksegrp *kg)
|
||||
sched_interact_score(struct thread *td)
|
||||
{
|
||||
int div;
|
||||
|
||||
if (kg->kg_runtime > kg->kg_slptime) {
|
||||
div = max(1, kg->kg_runtime / SCHED_INTERACT_HALF);
|
||||
if (td->td_sched->skg_runtime > td->td_sched->skg_slptime) {
|
||||
div = max(1, td->td_sched->skg_runtime / SCHED_INTERACT_HALF);
|
||||
return (SCHED_INTERACT_HALF +
|
||||
(SCHED_INTERACT_HALF - (kg->kg_slptime / div)));
|
||||
} if (kg->kg_slptime > kg->kg_runtime) {
|
||||
div = max(1, kg->kg_slptime / SCHED_INTERACT_HALF);
|
||||
return (kg->kg_runtime / div);
|
||||
(SCHED_INTERACT_HALF - (td->td_sched->skg_slptime / div)));
|
||||
} if (td->td_sched->skg_slptime > td->td_sched->skg_runtime) {
|
||||
div = max(1, td->td_sched->skg_slptime / SCHED_INTERACT_HALF);
|
||||
return (td->td_sched->skg_runtime / div);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1202,12 +1187,9 @@ schedinit(void)
|
||||
* Set up the scheduler specific parts of proc0.
|
||||
*/
|
||||
proc0.p_sched = NULL; /* XXX */
|
||||
ksegrp0.kg_sched = &kg_sched0;
|
||||
thread0.td_sched = &kse0;
|
||||
kse0.ke_thread = &thread0;
|
||||
kse0.ke_state = KES_THREAD;
|
||||
kg_sched0.skg_concurrency = 1;
|
||||
kg_sched0.skg_avail_opennings = 0; /* we are already running */
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1307,7 +1289,7 @@ sched_unlend_prio(struct thread *td, u_char prio)
|
||||
|
||||
if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
|
||||
td->td_base_pri <= PRI_MAX_TIMESHARE)
|
||||
base_pri = td->td_ksegrp->kg_user_pri;
|
||||
base_pri = td->td_user_pri;
|
||||
else
|
||||
base_pri = td->td_base_pri;
|
||||
if (prio >= base_pri) {
|
||||
@ -1345,11 +1327,18 @@ sched_prio(struct thread *td, u_char prio)
|
||||
}
|
||||
|
||||
void
|
||||
#ifdef KSE
|
||||
sched_user_prio(struct ksegrp *kg, u_char prio)
|
||||
#else
|
||||
sched_user_prio(struct thread *td, u_char prio)
|
||||
#endif
|
||||
{
|
||||
#ifdef KSE
|
||||
struct thread *td;
|
||||
#endif
|
||||
u_char oldprio;
|
||||
|
||||
#ifdef KSE
|
||||
kg->kg_base_user_pri = prio;
|
||||
|
||||
/* XXXKSE only for 1:1 */
|
||||
@ -1365,6 +1354,12 @@ sched_user_prio(struct ksegrp *kg, u_char prio)
|
||||
|
||||
oldprio = kg->kg_user_pri;
|
||||
kg->kg_user_pri = prio;
|
||||
#else
|
||||
td->td_base_user_pri = prio;
|
||||
|
||||
oldprio = td->td_user_pri;
|
||||
td->td_user_pri = prio;
|
||||
#endif
|
||||
|
||||
if (TD_ON_UPILOCK(td) && oldprio != prio)
|
||||
umtx_pi_adjust(td, oldprio);
|
||||
@ -1377,8 +1372,13 @@ sched_lend_user_prio(struct thread *td, u_char prio)
|
||||
|
||||
td->td_flags |= TDF_UBORROWING;
|
||||
|
||||
#ifdef KSE
|
||||
oldprio = td->td_ksegrp->kg_user_pri;
|
||||
td->td_ksegrp->kg_user_pri = prio;
|
||||
#else
|
||||
oldprio = td->td__user_pri;
|
||||
td->td_user_pri = prio;
|
||||
#endif
|
||||
|
||||
if (TD_ON_UPILOCK(td) && oldprio != prio)
|
||||
umtx_pi_adjust(td, oldprio);
|
||||
@ -1387,13 +1387,23 @@ sched_lend_user_prio(struct thread *td, u_char prio)
|
||||
void
|
||||
sched_unlend_user_prio(struct thread *td, u_char prio)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
#endif
|
||||
u_char base_pri;
|
||||
|
||||
#ifdef KSE
|
||||
base_pri = kg->kg_base_user_pri;
|
||||
#else
|
||||
base_pri = td->td_base_user_pri;
|
||||
#endif
|
||||
if (prio >= base_pri) {
|
||||
td->td_flags &= ~TDF_UBORROWING;
|
||||
#ifdef KSE
|
||||
sched_user_prio(kg, base_pri);
|
||||
#else
|
||||
sched_user_prio(td, base_pri);
|
||||
#endif
|
||||
} else
|
||||
sched_lend_user_prio(td, prio);
|
||||
}
|
||||
@ -1422,7 +1432,6 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
TD_SET_CAN_RUN(td);
|
||||
} else if ((ke->ke_flags & KEF_ASSIGNED) == 0) {
|
||||
/* We are ending our run so make our slot available again */
|
||||
SLOT_RELEASE(td->td_ksegrp);
|
||||
kseq_load_rem(ksq, ke);
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
/*
|
||||
@ -1434,15 +1443,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
|
||||
SRQ_OURSELF|SRQ_YIELDING);
|
||||
ke->ke_flags &= ~KEF_HOLD;
|
||||
} else if ((td->td_proc->p_flag & P_HADTHREADS) &&
|
||||
(newtd == NULL || newtd->td_ksegrp != td->td_ksegrp))
|
||||
/*
|
||||
* We will not be on the run queue.
|
||||
* So we must be sleeping or similar.
|
||||
* Don't use the slot if we will need it
|
||||
* for newtd.
|
||||
*/
|
||||
slot_fill(td->td_ksegrp);
|
||||
}
|
||||
}
|
||||
if (newtd != NULL) {
|
||||
/*
|
||||
@ -1453,15 +1454,6 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
newtd->td_kse->ke_runq = ksq->ksq_curr;
|
||||
TD_SET_RUNNING(newtd);
|
||||
kseq_load_add(KSEQ_SELF(), newtd->td_kse);
|
||||
/*
|
||||
* XXX When we preempt, we've already consumed a slot because
|
||||
* we got here through sched_add(). However, newtd can come
|
||||
* from thread_switchout() which can't SLOT_USE() because
|
||||
* the SLOT code is scheduler dependent. We must use the
|
||||
* slot here otherwise.
|
||||
*/
|
||||
if ((flags & SW_PREEMPT) == 0)
|
||||
SLOT_USE(newtd->td_ksegrp);
|
||||
} else
|
||||
newtd = choosethread();
|
||||
if (td != newtd) {
|
||||
@ -1469,6 +1461,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
|
||||
PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
|
||||
#endif
|
||||
|
||||
cpu_switch(td, newtd);
|
||||
#ifdef HWPMC_HOOKS
|
||||
if (PMC_PROC_IS_USING_PMCS(td->td_proc))
|
||||
@ -1484,7 +1477,6 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
void
|
||||
sched_nice(struct proc *p, int nice)
|
||||
{
|
||||
struct ksegrp *kg;
|
||||
struct kse *ke;
|
||||
struct thread *td;
|
||||
struct kseq *kseq;
|
||||
@ -1494,23 +1486,20 @@ sched_nice(struct proc *p, int nice)
|
||||
/*
|
||||
* We need to adjust the nice counts for running KSEs.
|
||||
*/
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
if (kg->kg_pri_class == PRI_TIMESHARE) {
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
ke = td->td_kse;
|
||||
if (ke->ke_runq == NULL)
|
||||
continue;
|
||||
kseq = KSEQ_CPU(ke->ke_cpu);
|
||||
kseq_nice_rem(kseq, p->p_nice);
|
||||
kseq_nice_add(kseq, nice);
|
||||
}
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if (td->td_pri_class == PRI_TIMESHARE) {
|
||||
ke = td->td_kse;
|
||||
if (ke->ke_runq == NULL)
|
||||
continue;
|
||||
kseq = KSEQ_CPU(ke->ke_cpu);
|
||||
kseq_nice_rem(kseq, p->p_nice);
|
||||
kseq_nice_add(kseq, nice);
|
||||
}
|
||||
}
|
||||
p->p_nice = nice;
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
sched_priority(kg);
|
||||
FOREACH_THREAD_IN_GROUP(kg, td)
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
sched_priority(td);
|
||||
td->td_flags |= TDF_NEEDRESCHED;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1519,7 +1508,7 @@ sched_sleep(struct thread *td)
|
||||
{
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
|
||||
td->td_slptime = ticks;
|
||||
td->td_kse->ke_slptime = ticks;
|
||||
}
|
||||
|
||||
void
|
||||
@ -1531,22 +1520,20 @@ sched_wakeup(struct thread *td)
|
||||
* Let the kseg know how long we slept for. This is because process
|
||||
* interactivity behavior is modeled in the kseg.
|
||||
*/
|
||||
if (td->td_slptime) {
|
||||
struct ksegrp *kg;
|
||||
if (td->td_kse->ke_slptime) {
|
||||
int hzticks;
|
||||
|
||||
kg = td->td_ksegrp;
|
||||
hzticks = (ticks - td->td_slptime) << 10;
|
||||
hzticks = (ticks - td->td_kse->ke_slptime) << 10;
|
||||
if (hzticks >= SCHED_SLP_RUN_MAX) {
|
||||
kg->kg_slptime = SCHED_SLP_RUN_MAX;
|
||||
kg->kg_runtime = 1;
|
||||
td->td_sched->skg_slptime = SCHED_SLP_RUN_MAX;
|
||||
td->td_sched->skg_runtime = 1;
|
||||
} else {
|
||||
kg->kg_slptime += hzticks;
|
||||
sched_interact_update(kg);
|
||||
td->td_sched->skg_slptime += hzticks;
|
||||
sched_interact_update(td);
|
||||
}
|
||||
sched_priority(kg);
|
||||
sched_priority(td);
|
||||
sched_slice(td->td_kse);
|
||||
td->td_slptime = 0;
|
||||
td->td_kse->ke_slptime = 0;
|
||||
}
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
}
|
||||
@ -1556,37 +1543,23 @@ sched_wakeup(struct thread *td)
|
||||
* priority.
|
||||
*/
|
||||
void
|
||||
sched_fork(struct thread *td, struct thread *childtd)
|
||||
{
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
|
||||
sched_fork_ksegrp(td, childtd->td_ksegrp);
|
||||
sched_fork_thread(td, childtd);
|
||||
}
|
||||
|
||||
void
|
||||
sched_fork_ksegrp(struct thread *td, struct ksegrp *child)
|
||||
{
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
|
||||
child->kg_slptime = kg->kg_slptime;
|
||||
child->kg_runtime = kg->kg_runtime;
|
||||
child->kg_user_pri = kg->kg_user_pri;
|
||||
child->kg_base_user_pri = kg->kg_base_user_pri;
|
||||
sched_interact_fork(child);
|
||||
kg->kg_runtime += tickincr;
|
||||
sched_interact_update(kg);
|
||||
}
|
||||
|
||||
void
|
||||
sched_fork_thread(struct thread *td, struct thread *child)
|
||||
sched_fork(struct thread *td, struct thread *child)
|
||||
{
|
||||
struct kse *ke;
|
||||
struct kse *ke2;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
|
||||
child->td_sched->skg_slptime = td->td_sched->skg_slptime;
|
||||
child->td_sched->skg_runtime = td->td_sched->skg_runtime;
|
||||
child->td_user_pri = td->td_user_pri;
|
||||
child->kg_base_user_pri = kg->kg_base_user_pri;
|
||||
sched_interact_fork(child);
|
||||
td->td_sched->skg_runtime += tickincr;
|
||||
sched_interact_update(td);
|
||||
|
||||
sched_newthread(child);
|
||||
|
||||
ke = td->td_kse;
|
||||
ke2 = child->td_kse;
|
||||
ke2->ke_slice = 1; /* Attempt to quickly learn interactivity. */
|
||||
@ -1600,55 +1573,52 @@ sched_fork_thread(struct thread *td, struct thread *child)
|
||||
}
|
||||
|
||||
void
|
||||
sched_class(struct ksegrp *kg, int class)
|
||||
sched_class(struct thread *td, int class)
|
||||
{
|
||||
struct kseq *kseq;
|
||||
struct kse *ke;
|
||||
struct thread *td;
|
||||
int nclass;
|
||||
int oclass;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
if (kg->kg_pri_class == class)
|
||||
if (td->td_pri_class == class)
|
||||
return;
|
||||
|
||||
nclass = PRI_BASE(class);
|
||||
oclass = PRI_BASE(kg->kg_pri_class);
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
ke = td->td_kse;
|
||||
if ((ke->ke_state != KES_ONRUNQ &&
|
||||
ke->ke_state != KES_THREAD) || ke->ke_runq == NULL)
|
||||
continue;
|
||||
kseq = KSEQ_CPU(ke->ke_cpu);
|
||||
oclass = PRI_BASE(td->td_pri_class);
|
||||
ke = td->td_kse;
|
||||
if ((ke->ke_state != KES_ONRUNQ &&
|
||||
ke->ke_state != KES_THREAD) || ke->ke_runq == NULL)
|
||||
continue;
|
||||
kseq = KSEQ_CPU(ke->ke_cpu);
|
||||
|
||||
#ifdef SMP
|
||||
/*
|
||||
* On SMP if we're on the RUNQ we must adjust the transferable
|
||||
* count because could be changing to or from an interrupt
|
||||
* class.
|
||||
*/
|
||||
if (ke->ke_state == KES_ONRUNQ) {
|
||||
if (KSE_CAN_MIGRATE(ke)) {
|
||||
kseq->ksq_transferable--;
|
||||
kseq->ksq_group->ksg_transferable--;
|
||||
}
|
||||
if (KSE_CAN_MIGRATE(ke)) {
|
||||
kseq->ksq_transferable++;
|
||||
kseq->ksq_group->ksg_transferable++;
|
||||
}
|
||||
/*
|
||||
* On SMP if we're on the RUNQ we must adjust the transferable
|
||||
* count because could be changing to or from an interrupt
|
||||
* class.
|
||||
*/
|
||||
if (ke->ke_state == KES_ONRUNQ) {
|
||||
if (KSE_CAN_MIGRATE(ke)) {
|
||||
kseq->ksq_transferable--;
|
||||
kseq->ksq_group->ksg_transferable--;
|
||||
}
|
||||
#endif
|
||||
if (oclass == PRI_TIMESHARE) {
|
||||
kseq->ksq_load_timeshare--;
|
||||
kseq_nice_rem(kseq, kg->kg_proc->p_nice);
|
||||
}
|
||||
if (nclass == PRI_TIMESHARE) {
|
||||
kseq->ksq_load_timeshare++;
|
||||
kseq_nice_add(kseq, kg->kg_proc->p_nice);
|
||||
if (KSE_CAN_MIGRATE(ke)) {
|
||||
kseq->ksq_transferable++;
|
||||
kseq->ksq_group->ksg_transferable++;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (oclass == PRI_TIMESHARE) {
|
||||
kseq->ksq_load_timeshare--;
|
||||
kseq_nice_rem(kseq, td->td_proc->p_nice);
|
||||
}
|
||||
if (nclass == PRI_TIMESHARE) {
|
||||
kseq->ksq_load_timeshare++;
|
||||
kseq_nice_add(kseq, td->td_proc->p_nice);
|
||||
}
|
||||
|
||||
kg->kg_pri_class = class;
|
||||
td->td_pri_class = class;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1657,24 +1627,16 @@ sched_class(struct ksegrp *kg, int class)
|
||||
void
|
||||
sched_exit(struct proc *p, struct thread *childtd)
|
||||
{
|
||||
struct thread *parent = FIRST_THREAD_IN_PROC(p);
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
sched_exit_ksegrp(FIRST_KSEGRP_IN_PROC(p), childtd);
|
||||
sched_exit_thread(NULL, childtd);
|
||||
}
|
||||
|
||||
void
|
||||
sched_exit_ksegrp(struct ksegrp *kg, struct thread *td)
|
||||
{
|
||||
/* kg->kg_slptime += td->td_ksegrp->kg_slptime; */
|
||||
kg->kg_runtime += td->td_ksegrp->kg_runtime;
|
||||
sched_interact_update(kg);
|
||||
}
|
||||
|
||||
void
|
||||
sched_exit_thread(struct thread *td, struct thread *childtd)
|
||||
{
|
||||
CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
|
||||
CTR3(KTR_SCHED, "sched_exit: %p(%s) prio %d",
|
||||
childtd, childtd->td_proc->p_comm, childtd->td_priority);
|
||||
|
||||
/* parent->td_sched->skg_slptime += childtd->td_sched->skg_slptime; */
|
||||
parent->td_sched->skg_runtime += childtd->td_sched->skg_runtime;
|
||||
sched_interact_update(parent);
|
||||
|
||||
kseq_load_rem(KSEQ_CPU(childtd->td_kse->ke_cpu), childtd->td_kse);
|
||||
}
|
||||
|
||||
@ -1682,7 +1644,6 @@ void
|
||||
sched_clock(struct thread *td)
|
||||
{
|
||||
struct kseq *kseq;
|
||||
struct ksegrp *kg;
|
||||
struct kse *ke;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
@ -1700,7 +1661,6 @@ sched_clock(struct thread *td)
|
||||
kseq_assign(kseq); /* Potentially sets NEEDRESCHED */
|
||||
#endif
|
||||
ke = td->td_kse;
|
||||
kg = ke->ke_ksegrp;
|
||||
|
||||
/* Adjust ticks for pctcpu */
|
||||
ke->ke_ticks++;
|
||||
@ -1713,16 +1673,16 @@ sched_clock(struct thread *td)
|
||||
if (td->td_flags & TDF_IDLETD)
|
||||
return;
|
||||
/*
|
||||
* We only do slicing code for TIMESHARE ksegrps.
|
||||
* We only do slicing code for TIMESHARE threads.
|
||||
*/
|
||||
if (kg->kg_pri_class != PRI_TIMESHARE)
|
||||
if (td->td_pri_class != PRI_TIMESHARE)
|
||||
return;
|
||||
/*
|
||||
* We used a tick charge it to the ksegrp so that we can compute our
|
||||
* We used a tick charge it to the thread so that we can compute our
|
||||
* interactivity.
|
||||
*/
|
||||
kg->kg_runtime += tickincr;
|
||||
sched_interact_update(kg);
|
||||
td->td_sched->skg_runtime += tickincr;
|
||||
sched_interact_update(td);
|
||||
|
||||
/*
|
||||
* We used up one time slice.
|
||||
@ -1733,9 +1693,9 @@ sched_clock(struct thread *td)
|
||||
* We're out of time, recompute priorities and requeue.
|
||||
*/
|
||||
kseq_load_rem(kseq, ke);
|
||||
sched_priority(kg);
|
||||
sched_priority(td);
|
||||
sched_slice(ke);
|
||||
if (SCHED_CURR(kg, ke))
|
||||
if (SCHED_CURR(td, ke))
|
||||
ke->ke_runq = kseq->ksq_curr;
|
||||
else
|
||||
ke->ke_runq = kseq->ksq_next;
|
||||
@ -1770,22 +1730,6 @@ sched_runnable(void)
|
||||
return (load);
|
||||
}
|
||||
|
||||
void
|
||||
sched_userret(struct thread *td)
|
||||
{
|
||||
struct ksegrp *kg;
|
||||
|
||||
KASSERT((td->td_flags & TDF_BORROWING) == 0,
|
||||
("thread with borrowed priority returning to userland"));
|
||||
kg = td->td_ksegrp;
|
||||
if (td->td_priority != kg->kg_user_pri) {
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td->td_priority = kg->kg_user_pri;
|
||||
td->td_base_pri = kg->kg_user_pri;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
}
|
||||
|
||||
struct kse *
|
||||
sched_choose(void)
|
||||
{
|
||||
@ -1802,7 +1746,7 @@ sched_choose(void)
|
||||
ke = kseq_choose(kseq);
|
||||
if (ke) {
|
||||
#ifdef SMP
|
||||
if (ke->ke_ksegrp->kg_pri_class == PRI_IDLE)
|
||||
if (ke->ke_thread->td_pri_class == PRI_IDLE)
|
||||
if (kseq_idled(kseq) == 0)
|
||||
goto restart;
|
||||
#endif
|
||||
@ -1822,7 +1766,6 @@ void
|
||||
sched_add(struct thread *td, int flags)
|
||||
{
|
||||
struct kseq *kseq;
|
||||
struct ksegrp *kg;
|
||||
struct kse *ke;
|
||||
int preemptive;
|
||||
int canmigrate;
|
||||
@ -1833,13 +1776,10 @@ sched_add(struct thread *td, int flags)
|
||||
curthread->td_proc->p_comm);
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
ke = td->td_kse;
|
||||
kg = td->td_ksegrp;
|
||||
canmigrate = 1;
|
||||
preemptive = !(flags & SRQ_YIELDING);
|
||||
class = PRI_BASE(kg->kg_pri_class);
|
||||
class = PRI_BASE(td->td_pri_class);
|
||||
kseq = KSEQ_SELF();
|
||||
if ((ke->ke_flags & KEF_INTERNAL) == 0)
|
||||
SLOT_USE(td->td_ksegrp);
|
||||
ke->ke_flags &= ~KEF_INTERNAL;
|
||||
#ifdef SMP
|
||||
if (ke->ke_flags & KEF_ASSIGNED) {
|
||||
@ -1859,8 +1799,8 @@ sched_add(struct thread *td, int flags)
|
||||
#endif
|
||||
KASSERT(ke->ke_state != KES_ONRUNQ,
|
||||
("sched_add: kse %p (%s) already in run queue", ke,
|
||||
ke->ke_proc->p_comm));
|
||||
KASSERT(ke->ke_proc->p_sflag & PS_INMEM,
|
||||
td->td_proc->p_comm));
|
||||
KASSERT(td->td_proc->p_sflag & PS_INMEM,
|
||||
("sched_add: process swapped out"));
|
||||
KASSERT(ke->ke_runq == NULL,
|
||||
("sched_add: KSE %p is still assigned to a run queue", ke));
|
||||
@ -1875,7 +1815,7 @@ sched_add(struct thread *td, int flags)
|
||||
ke->ke_cpu = PCPU_GET(cpuid);
|
||||
break;
|
||||
case PRI_TIMESHARE:
|
||||
if (SCHED_CURR(kg, ke))
|
||||
if (SCHED_CURR(td, ke))
|
||||
ke->ke_runq = kseq->ksq_curr;
|
||||
else
|
||||
ke->ke_runq = kseq->ksq_next;
|
||||
@ -1947,7 +1887,6 @@ sched_rem(struct thread *td)
|
||||
curthread->td_proc->p_comm);
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
ke = td->td_kse;
|
||||
SLOT_RELEASE(td->td_ksegrp);
|
||||
ke->ke_flags &= ~KEF_PREEMPTED;
|
||||
if (ke->ke_flags & KEF_ASSIGNED) {
|
||||
ke->ke_flags |= KEF_REMOVED;
|
||||
@ -1990,7 +1929,7 @@ sched_pctcpu(struct thread *td)
|
||||
pctcpu = (FSCALE * ((FSCALE * rtick)/realstathz)) >> FSHIFT;
|
||||
}
|
||||
|
||||
ke->ke_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
|
||||
td->td_proc->p_swtime = ke->ke_ltick - ke->ke_ftick;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
return (pctcpu);
|
||||
@ -2033,11 +1972,17 @@ sched_is_bound(struct thread *td)
|
||||
void
|
||||
sched_relinquish(struct thread *td)
|
||||
{
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
|
||||
kg = td->td_ksegrp;
|
||||
#endif
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
if (kg->kg_pri_class == PRI_TIMESHARE)
|
||||
#else
|
||||
if (td->td_pri_class == PRI_TIMESHARE)
|
||||
#endif
|
||||
sched_prio(td, PRI_MAX_TIMESHARE);
|
||||
mi_switch(SW_VOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
@ -2059,12 +2004,6 @@ sched_load(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
sched_sizeof_ksegrp(void)
|
||||
{
|
||||
return (sizeof(struct ksegrp) + sizeof(struct kg_sched));
|
||||
}
|
||||
|
||||
int
|
||||
sched_sizeof_proc(void)
|
||||
{
|
||||
|
@ -115,11 +115,13 @@ userret(struct thread *td, struct trapframe *frame)
|
||||
PROC_UNLOCK(p);
|
||||
}
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Do special thread processing, e.g. upcall tweaking and such.
|
||||
*/
|
||||
if (p->p_flag & P_SA)
|
||||
thread_userret(td, frame);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Charge system time if profiling.
|
||||
@ -147,7 +149,9 @@ ast(struct trapframe *framep)
|
||||
{
|
||||
struct thread *td;
|
||||
struct proc *p;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
struct rlimit rlim;
|
||||
int sflag;
|
||||
int flags;
|
||||
@ -159,7 +163,9 @@ ast(struct trapframe *framep)
|
||||
|
||||
td = curthread;
|
||||
p = td->td_proc;
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
#endif
|
||||
|
||||
CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
|
||||
p->p_comm);
|
||||
@ -170,8 +176,10 @@ ast(struct trapframe *framep)
|
||||
td->td_frame = framep;
|
||||
td->td_pticks = 0;
|
||||
|
||||
#ifdef KSE
|
||||
if ((p->p_flag & P_SA) && (td->td_mailbox == NULL))
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This updates the p_sflag's for the checks below in one
|
||||
@ -256,7 +264,11 @@ ast(struct trapframe *framep)
|
||||
ktrcsw(1, 1);
|
||||
#endif
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
sched_prio(td, kg->kg_user_pri);
|
||||
#else
|
||||
sched_prio(td, td->td_user_pri);
|
||||
#endif
|
||||
mi_switch(SW_INVOL, NULL);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
#ifdef KTRACE
|
||||
|
@ -802,7 +802,9 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
* continuing process.
|
||||
*/
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
thread_continued(p);
|
||||
#endif
|
||||
p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
thread_unsuspend(p);
|
||||
@ -940,6 +942,7 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
pl->pl_event = PL_EVENT_SIGNAL;
|
||||
else
|
||||
pl->pl_event = 0;
|
||||
#ifdef KSE
|
||||
if (td2->td_pflags & TDP_SA) {
|
||||
pl->pl_flags = PL_FLAG_SA;
|
||||
if (td2->td_upcall && !TD_CAN_UNBIND(td2))
|
||||
@ -947,6 +950,9 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
|
||||
} else {
|
||||
pl->pl_flags = 0;
|
||||
}
|
||||
#else
|
||||
pl->pl_flags = 0;
|
||||
#endif
|
||||
pl->pl_sigmask = td2->td_sigmask;
|
||||
pl->pl_siglist = td2->td_siglist;
|
||||
break;
|
||||
|
@ -2669,7 +2669,11 @@ proc_compare(struct proc *p1, struct proc *p2)
|
||||
{
|
||||
|
||||
int esta, estb;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#else
|
||||
struct thread *td;
|
||||
#endif
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
if (p1 == NULL)
|
||||
return (1);
|
||||
@ -2690,12 +2694,19 @@ proc_compare(struct proc *p1, struct proc *p2)
|
||||
* tie - favor one with highest recent cpu utilization
|
||||
*/
|
||||
esta = estb = 0;
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p1,kg) {
|
||||
esta += kg->kg_estcpu;
|
||||
}
|
||||
FOREACH_KSEGRP_IN_PROC(p2,kg) {
|
||||
estb += kg->kg_estcpu;
|
||||
}
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p1, td)
|
||||
esta += td->td_estcpu;
|
||||
FOREACH_THREAD_IN_PROC(p2, td)
|
||||
estb += td->td_estcpu;
|
||||
#endif
|
||||
if (estb > esta)
|
||||
return (1);
|
||||
if (esta > estb)
|
||||
|
@ -59,6 +59,7 @@ options SYSVMSG # SYSV-style message queues
|
||||
options SYSVSEM # SYSV-style semaphores
|
||||
options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions
|
||||
options KBD_INSTALL_CDEV # install a CDEV entry in /dev
|
||||
options KSE # KSE support
|
||||
|
||||
# Debugging for use in -current
|
||||
options KDB # Enable kernel debugger support.
|
||||
|
@ -1905,7 +1905,11 @@ init386(first)
|
||||
* This may be done better later if it gets more high level
|
||||
* components in it. If so just link td->td_proc here.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize DMAC
|
||||
|
@ -106,7 +106,11 @@ getscheduler(struct ksched *ksched, struct thread *td, int *policy)
|
||||
int e = 0;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td, &rtp);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
switch (rtp.type)
|
||||
{
|
||||
@ -153,7 +157,11 @@ ksched_getparam(struct ksched *ksched,
|
||||
struct rtprio rtp;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
pri_to_rtp(td->td_ksegrp, &rtp);
|
||||
#else
|
||||
pri_to_rtp(td, &rtp);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
if (RTP_PRIO_IS_REALTIME(rtp.type))
|
||||
param->sched_priority = rtpprio_to_p4prio(rtp.prio);
|
||||
@ -174,7 +182,9 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
{
|
||||
int e = 0;
|
||||
struct rtprio rtp;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg = td->td_ksegrp;
|
||||
#endif
|
||||
|
||||
switch(policy)
|
||||
{
|
||||
@ -189,6 +199,7 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
? RTP_PRIO_FIFO : RTP_PRIO_REALTIME;
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
rtp_to_pri(&rtp, kg);
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) { /* XXXKSE */
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
@ -199,6 +210,9 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
else
|
||||
@ -212,6 +226,7 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
rtp.type = RTP_PRIO_NORMAL;
|
||||
rtp.prio = p4prio_to_rtpprio(param->sched_priority);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
#ifdef KSE
|
||||
rtp_to_pri(&rtp, kg);
|
||||
|
||||
/* XXX Simply revert to whatever we had for last
|
||||
@ -230,6 +245,9 @@ ksched_setscheduler(struct ksched *ksched,
|
||||
}
|
||||
|
||||
}
|
||||
#else
|
||||
rtp_to_pri(&rtp, td);
|
||||
#endif
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
break;
|
||||
|
@ -295,7 +295,11 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
|
||||
/*
|
||||
* Start initializing proc0 and thread0.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_frame = &frame0;
|
||||
|
||||
/*
|
||||
|
@ -351,8 +351,10 @@ syscall(struct trapframe *frame)
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
|
||||
code = frame->fixreg[0];
|
||||
params = (caddr_t)(frame->fixreg + FIRSTARG);
|
||||
|
@ -56,6 +56,7 @@ options SYSVSHM #SYSV-style shared memory
|
||||
options SYSVMSG #SYSV-style message queues
|
||||
options SYSVSEM #SYSV-style semaphores
|
||||
options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions
|
||||
options KSE # KSE support
|
||||
|
||||
# Debugging for use in -current
|
||||
options KDB #Enable the kernel debugger
|
||||
|
@ -295,7 +295,11 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
|
||||
/*
|
||||
* Start initializing proc0 and thread0.
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
thread0.td_frame = &frame0;
|
||||
|
||||
/*
|
||||
|
@ -351,8 +351,10 @@ syscall(struct trapframe *frame)
|
||||
|
||||
PCPU_LAZY_INC(cnt.v_syscall);
|
||||
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
|
||||
code = frame->fixreg[0];
|
||||
params = (caddr_t)(frame->fixreg + FIRSTARG);
|
||||
|
@ -58,6 +58,7 @@ options SYSVMSG # SYSV-style message queues
|
||||
options SYSVSEM # SYSV-style semaphores
|
||||
options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions
|
||||
options ADAPTIVE_GIANT # Giant mutex is adaptive.
|
||||
options KSE # KSE support
|
||||
|
||||
# Debugging for use in -current
|
||||
options KDB # Enable kernel debugger support.
|
||||
|
@ -391,7 +391,11 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
|
||||
/*
|
||||
* Initialize proc0 stuff (p_contested needs to be done early).
|
||||
*/
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
proc0.p_md.md_sigtramp = NULL;
|
||||
proc0.p_md.md_utrap = NULL;
|
||||
thread0.td_kstack = kstack0;
|
||||
|
@ -529,8 +529,10 @@ syscall(struct trapframe *tf)
|
||||
td->td_frame = tf;
|
||||
if (td->td_ucred != p->p_ucred)
|
||||
cred_update_thread(td);
|
||||
#ifdef KSE
|
||||
if (p->p_flag & P_SA)
|
||||
thread_user_enter(td);
|
||||
#endif
|
||||
code = tf->tf_global[1];
|
||||
|
||||
/*
|
||||
|
@ -376,7 +376,11 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
|
||||
* Initialize proc0 stuff (p_contested needs to be done early).
|
||||
*/
|
||||
|
||||
#ifdef KSE
|
||||
proc_linkup(&proc0, &ksegrp0, &thread0);
|
||||
#else
|
||||
proc_linkup(&proc0, &thread0);
|
||||
#endif
|
||||
proc0.p_md.md_sigtramp = NULL;
|
||||
proc0.p_md.md_utrap = NULL;
|
||||
frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV;
|
||||
|
103
sys/sys/proc.h
103
sys/sys/proc.h
@ -144,7 +144,7 @@ struct pargs {
|
||||
* q - td_contested lock
|
||||
* r - p_peers lock
|
||||
* x - created at fork, only changes during single threading in exec
|
||||
* z - zombie threads/ksegroup lock
|
||||
* z - zombie threads lock
|
||||
*
|
||||
* If the locking key specifies two identifiers (for example, p_pptr) then
|
||||
* either lock is sufficient for read access, but both locks must be held
|
||||
@ -152,16 +152,26 @@ struct pargs {
|
||||
*/
|
||||
struct auditinfo;
|
||||
struct kaudit_record;
|
||||
#ifdef KSE
|
||||
struct kg_sched;
|
||||
#else
|
||||
struct td_sched;
|
||||
#endif
|
||||
struct nlminfo;
|
||||
struct kaioinfo;
|
||||
struct p_sched;
|
||||
struct proc;
|
||||
struct sleepqueue;
|
||||
#ifdef KSE
|
||||
struct td_sched;
|
||||
#else
|
||||
struct thread;
|
||||
#endif
|
||||
struct trapframe;
|
||||
struct turnstile;
|
||||
struct mqueue_notifier;
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Here we define the three structures used for process information.
|
||||
*
|
||||
@ -230,7 +240,9 @@ They would be given priorities calculated from the KSEG.
|
||||
|
||||
*
|
||||
*****************/
|
||||
#endif
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* Kernel runnable context (thread).
|
||||
* This is what is put to sleep and reactivated.
|
||||
@ -240,16 +252,33 @@ They would be given priorities calculated from the KSEG.
|
||||
* With N runnable and queued KSEs in the KSEGRP, the first N threads
|
||||
* are linked to them. Other threads are not yet assigned.
|
||||
*/
|
||||
#else
|
||||
/*
|
||||
* Thread context. Processes may have multiple threads.
|
||||
*/
|
||||
#endif
|
||||
struct thread {
|
||||
struct proc *td_proc; /* (*) Associated process. */
|
||||
#ifdef KSE
|
||||
struct ksegrp *td_ksegrp; /* (*) Associated KSEG. */
|
||||
#else
|
||||
void *was_td_ksegrp; /* Temporary padding. */
|
||||
#endif
|
||||
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
|
||||
#ifdef KSE
|
||||
TAILQ_ENTRY(thread) td_kglist; /* (*) All threads in this ksegrp. */
|
||||
#else
|
||||
TAILQ_ENTRY(thread) was_td_kglist; /* Temporary padding. */
|
||||
#endif
|
||||
|
||||
/* The two queues below should someday be merged. */
|
||||
TAILQ_ENTRY(thread) td_slpq; /* (j) Sleep queue. */
|
||||
TAILQ_ENTRY(thread) td_lockq; /* (j) Lock queue. */
|
||||
#ifdef KSE
|
||||
TAILQ_ENTRY(thread) td_runq; /* (j/z) Run queue(s). XXXKSE */
|
||||
#else
|
||||
TAILQ_ENTRY(thread) td_runq; /* (j/z) Run queue(s). */
|
||||
#endif
|
||||
|
||||
TAILQ_HEAD(, selinfo) td_selq; /* (p) List of selinfos. */
|
||||
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
|
||||
@ -278,10 +307,23 @@ struct thread {
|
||||
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
|
||||
int td_intr_nesting_level; /* (k) Interrupt recursion. */
|
||||
int td_pinned; /* (k) Temporary cpu pin count. */
|
||||
#ifdef KSE
|
||||
struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address. */
|
||||
#else
|
||||
void *was_td_mailbox; /* Temporary padding. */
|
||||
#endif
|
||||
struct ucred *td_ucred; /* (k) Reference to credentials. */
|
||||
#ifdef KSE
|
||||
struct thread *td_standin; /* (k + a) Use this for an upcall. */
|
||||
struct kse_upcall *td_upcall; /* (k + j) Upcall structure. */
|
||||
u_int new_td_estcpu; /* Temporary padding. */
|
||||
u_int new_td_slptime; /* Temporary padding. */
|
||||
#else
|
||||
void *was_td_standin; /* Temporary padding. */
|
||||
void *was_td_upcall; /* Temporary padding. */
|
||||
u_int td_estcpu; /* (j) Sum of the same field in KSEs. */
|
||||
u_int td_slptime; /* (j) How long completely blocked. */
|
||||
#endif
|
||||
u_int td_pticks; /* (k) Statclock hits for profiling */
|
||||
u_int td_sticks; /* (k) Statclock hits in system mode. */
|
||||
u_int td_iticks; /* (k) Statclock hits in intr mode. */
|
||||
@ -293,7 +335,11 @@ struct thread {
|
||||
sigset_t td_sigmask; /* (c) Current signal mask. */
|
||||
volatile u_int td_generation; /* (k) For detection of preemption */
|
||||
stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
|
||||
#ifdef KSE
|
||||
int td_kflags; /* (c) Flags for KSE threading. */
|
||||
#else
|
||||
int was_td_kflags; /* Temporary padding. */
|
||||
#endif
|
||||
int td_xsig; /* (c) Signal for ptrace */
|
||||
u_long td_profil_addr; /* (k) Temporary addr until AST. */
|
||||
u_int td_profil_ticks; /* (k) Temporary ticks until AST. */
|
||||
@ -304,6 +350,15 @@ struct thread {
|
||||
#define td_startcopy td_endzero
|
||||
u_char td_base_pri; /* (j) Thread base kernel priority. */
|
||||
u_char td_priority; /* (j) Thread active priority. */
|
||||
#ifdef KSE
|
||||
u_char new_td_pri_class; /* Temporary padding. */
|
||||
u_char new_td_user_pri; /* Temporary padding. */
|
||||
u_char new_td_base_user_pri; /* Temporary padding. */
|
||||
#else
|
||||
u_char td_pri_class; /* (j) Scheduling class. */
|
||||
u_char td_user_pri; /* (j) User pri from estcpu and nice. */
|
||||
u_char td_base_user_pri; /* (j) Base user pri */
|
||||
#endif
|
||||
#define td_endcopy td_pcb
|
||||
|
||||
/*
|
||||
@ -372,15 +427,27 @@ struct thread {
|
||||
#define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */
|
||||
#define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */
|
||||
#define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */
|
||||
#ifdef KSE
|
||||
#define TDP_UPCALLING 0x00000008 /* This thread is doing an upcall. */
|
||||
#else
|
||||
/* 0x00000008 */
|
||||
#endif
|
||||
#define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */
|
||||
#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
|
||||
#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
|
||||
#ifdef KSE
|
||||
#define TDP_SA 0x00000080 /* A scheduler activation based thread. */
|
||||
#else
|
||||
/* 0x00000080 */
|
||||
#endif
|
||||
#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
|
||||
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
|
||||
#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */
|
||||
#ifdef KSE
|
||||
#define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */
|
||||
#else
|
||||
/* 0x00000800 */
|
||||
#endif
|
||||
#define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */
|
||||
#define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */
|
||||
#define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */
|
||||
@ -399,6 +466,7 @@ struct thread {
|
||||
#define TDI_LOCK 0x0008 /* Stopped on a lock. */
|
||||
#define TDI_IWAIT 0x0010 /* Awaiting interrupt. */
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* flags (in kflags) related to M:N threading.
|
||||
*/
|
||||
@ -409,6 +477,7 @@ struct thread {
|
||||
#define TD_CAN_UNBIND(td) \
|
||||
(((td)->td_pflags & TDP_CAN_UNBIND) && \
|
||||
((td)->td_upcall != NULL))
|
||||
#endif
|
||||
|
||||
#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
|
||||
#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
|
||||
@ -450,6 +519,7 @@ struct thread {
|
||||
#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ
|
||||
#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN
|
||||
|
||||
#ifdef KSE
|
||||
/*
|
||||
* An upcall is used when returning to userland. If a thread does not have
|
||||
* an upcall on return to userland the thread exports its context and exits.
|
||||
@ -498,6 +568,7 @@ struct ksegrp {
|
||||
int kg_numthreads; /* (j) Num threads in total. */
|
||||
struct kg_sched *kg_sched; /* (*) Scheduler-specific data. */
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* XXX: Does this belong in resource.h or resourcevar.h instead?
|
||||
@ -525,7 +596,9 @@ struct rusage_ext {
|
||||
*/
|
||||
struct proc {
|
||||
LIST_ENTRY(proc) p_list; /* (d) List of all processes. */
|
||||
#ifdef KSE
|
||||
TAILQ_HEAD(, ksegrp) p_ksegrps; /* (c)(kg_ksegrp) All KSEGs. */
|
||||
#endif
|
||||
TAILQ_HEAD(, thread) p_threads; /* (j)(td_plist) Threads. (shortcut) */
|
||||
TAILQ_HEAD(, thread) p_suspended; /* (td_runq) Suspended threads. */
|
||||
struct ucred *p_ucred; /* (c) Process owner's identity. */
|
||||
@ -588,7 +661,9 @@ struct proc {
|
||||
int p_suspcount; /* (c) Num threads in suspended mode. */
|
||||
struct thread *p_xthread; /* (c) Trap thread */
|
||||
int p_boundary_count;/* (c) Num threads at user boundary */
|
||||
#ifdef KSE
|
||||
struct ksegrp *p_procscopegrp;
|
||||
#endif
|
||||
int p_pendingcnt; /* how many signals are pending */
|
||||
struct itimers *p_itimers; /* (c) POSIX interval timers. */
|
||||
/* End area that is zeroed on creation. */
|
||||
@ -609,7 +684,9 @@ struct proc {
|
||||
u_short p_xstat; /* (c) Exit status; also stop sig. */
|
||||
struct knlist p_klist; /* (c) Knotes attached to this proc. */
|
||||
int p_numthreads; /* (j) Number of threads. */
|
||||
#ifdef KSE
|
||||
int p_numksegrps; /* (c) Number of ksegrps. */
|
||||
#endif
|
||||
struct mdproc p_md; /* Any machine-dependent fields. */
|
||||
struct callout p_itcallout; /* (h + c) Interval timer callout. */
|
||||
u_short p_acflag; /* (c) Accounting flags. */
|
||||
@ -718,18 +795,22 @@ MALLOC_DECLARE(M_ZOMBIE);
|
||||
|
||||
#define FOREACH_PROC_IN_SYSTEM(p) \
|
||||
LIST_FOREACH((p), &allproc, p_list)
|
||||
#ifdef KSE
|
||||
#define FOREACH_KSEGRP_IN_PROC(p, kg) \
|
||||
TAILQ_FOREACH((kg), &(p)->p_ksegrps, kg_ksegrp)
|
||||
#define FOREACH_THREAD_IN_GROUP(kg, td) \
|
||||
TAILQ_FOREACH((td), &(kg)->kg_threads, td_kglist)
|
||||
#define FOREACH_UPCALL_IN_GROUP(kg, ku) \
|
||||
TAILQ_FOREACH((ku), &(kg)->kg_upcalls, ku_link)
|
||||
#endif
|
||||
#define FOREACH_THREAD_IN_PROC(p, td) \
|
||||
TAILQ_FOREACH((td), &(p)->p_threads, td_plist)
|
||||
|
||||
/* XXXKSE the following lines should probably only be used in 1:1 code: */
|
||||
#define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads)
|
||||
#ifdef KSE
|
||||
#define FIRST_KSEGRP_IN_PROC(p) TAILQ_FIRST(&(p)->p_ksegrps)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We use process IDs <= PID_MAX; PID_MAX + 1 must also fit in a pid_t,
|
||||
@ -840,7 +921,9 @@ extern u_long pgrphash;
|
||||
extern struct sx allproc_lock;
|
||||
extern struct sx proctree_lock;
|
||||
extern struct mtx ppeers_lock;
|
||||
#ifdef KSE
|
||||
extern struct ksegrp ksegrp0; /* Primary ksegrp in proc0. */
|
||||
#endif
|
||||
extern struct proc proc0; /* Process slot for swapper. */
|
||||
extern struct thread thread0; /* Primary thread in proc0. */
|
||||
extern struct vmspace vmspace0; /* VM space for proc0. */
|
||||
@ -891,7 +974,11 @@ void pargs_drop(struct pargs *pa);
|
||||
void pargs_free(struct pargs *pa);
|
||||
void pargs_hold(struct pargs *pa);
|
||||
void procinit(void);
|
||||
#ifdef KSE
|
||||
void proc_linkup(struct proc *p, struct ksegrp *kg, struct thread *td);
|
||||
#else
|
||||
void proc_linkup(struct proc *p, struct thread *td);
|
||||
#endif
|
||||
void proc_reparent(struct proc *child, struct proc *newparent);
|
||||
struct pstats *pstats_alloc(void);
|
||||
void pstats_fork(struct pstats *src, struct pstats *dst);
|
||||
@ -919,9 +1006,11 @@ void cpu_fork(struct thread *, struct proc *, struct thread *, int);
|
||||
void cpu_set_fork_handler(struct thread *, void (*)(void *), void *);
|
||||
|
||||
/* New in KSE. */
|
||||
#ifdef KSE
|
||||
struct ksegrp *ksegrp_alloc(void);
|
||||
void ksegrp_free(struct ksegrp *kg);
|
||||
void ksegrp_stash(struct ksegrp *kg);
|
||||
#endif
|
||||
void kse_GC(void);
|
||||
void kseinit(void);
|
||||
void cpu_set_upcall(struct thread *td, struct thread *td0);
|
||||
@ -932,16 +1021,24 @@ void cpu_thread_exit(struct thread *);
|
||||
void cpu_thread_setup(struct thread *td);
|
||||
void cpu_thread_swapin(struct thread *);
|
||||
void cpu_thread_swapout(struct thread *);
|
||||
#ifdef KSE
|
||||
void ksegrp_link(struct ksegrp *kg, struct proc *p);
|
||||
void ksegrp_unlink(struct ksegrp *kg);
|
||||
#endif
|
||||
struct thread *thread_alloc(void);
|
||||
void thread_continued(struct proc *p);
|
||||
void thread_exit(void) __dead2;
|
||||
int thread_export_context(struct thread *td, int willexit);
|
||||
void thread_free(struct thread *td);
|
||||
#ifdef KSE
|
||||
void thread_link(struct thread *td, struct ksegrp *kg);
|
||||
#else
|
||||
void thread_link(struct thread *td, struct proc *p);
|
||||
#endif
|
||||
void thread_reap(void);
|
||||
#ifdef KSE
|
||||
struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
|
||||
#endif
|
||||
void thread_signal_add(struct thread *td, ksiginfo_t *);
|
||||
int thread_single(int how);
|
||||
void thread_single_end(void);
|
||||
@ -959,17 +1056,21 @@ void thread_unlink(struct thread *td);
|
||||
void thread_unsuspend(struct proc *p);
|
||||
void thread_unsuspend_one(struct thread *td);
|
||||
void thread_unthread(struct thread *td);
|
||||
#ifdef KSE
|
||||
int thread_userret(struct thread *td, struct trapframe *frame);
|
||||
void thread_user_enter(struct thread *td);
|
||||
#endif
|
||||
void thread_wait(struct proc *p);
|
||||
struct thread *thread_find(struct proc *p, lwpid_t tid);
|
||||
void thr_exit1(void);
|
||||
#ifdef KSE
|
||||
struct kse_upcall *upcall_alloc(void);
|
||||
void upcall_free(struct kse_upcall *ku);
|
||||
void upcall_link(struct kse_upcall *ku, struct ksegrp *kg);
|
||||
void upcall_unlink(struct kse_upcall *ku);
|
||||
void upcall_remove(struct thread *td);
|
||||
void upcall_stash(struct kse_upcall *ke);
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
@ -75,9 +75,15 @@ struct rtprio {
|
||||
};
|
||||
|
||||
#ifdef _KERNEL
|
||||
#ifdef KSE
|
||||
struct ksegrp;
|
||||
int rtp_to_pri(struct rtprio *, struct ksegrp *);
|
||||
void pri_to_rtp(struct ksegrp *, struct rtprio *);
|
||||
#else
|
||||
struct thread;
|
||||
int rtp_to_pri(struct rtprio *, struct thread *);
|
||||
void pri_to_rtp(struct thread *, struct rtprio *);
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -52,17 +52,23 @@ void sched_fork(struct thread *td, struct thread *childtd);
|
||||
* KSE Groups contain scheduling priority information. They record the
|
||||
* behavior of groups of KSEs and threads.
|
||||
*/
|
||||
#ifdef KSE
|
||||
void sched_class(struct ksegrp *kg, int class);
|
||||
void sched_exit_ksegrp(struct ksegrp *kg, struct thread *childtd);
|
||||
void sched_fork_ksegrp(struct thread *td, struct ksegrp *child);
|
||||
#else
|
||||
void sched_class(struct thread *td, int class);
|
||||
#endif
|
||||
void sched_nice(struct proc *p, int nice);
|
||||
|
||||
/*
|
||||
* Threads are switched in and out, block on resources, have temporary
|
||||
* priorities inherited from their ksegs, and use up cpu time.
|
||||
*/
|
||||
#ifdef KSE
|
||||
void sched_exit_thread(struct thread *td, struct thread *child);
|
||||
void sched_fork_thread(struct thread *td, struct thread *child);
|
||||
#endif
|
||||
void sched_lend_prio(struct thread *td, u_char prio);
|
||||
void sched_lend_user_prio(struct thread *td, u_char pri);
|
||||
fixpt_t sched_pctcpu(struct thread *td);
|
||||
@ -71,7 +77,11 @@ void sched_sleep(struct thread *td);
|
||||
void sched_switch(struct thread *td, struct thread *newtd, int flags);
|
||||
void sched_unlend_prio(struct thread *td, u_char prio);
|
||||
void sched_unlend_user_prio(struct thread *td, u_char pri);
|
||||
#ifdef KSE
|
||||
void sched_user_prio(struct ksegrp *kg, u_char prio);
|
||||
#else
|
||||
void sched_user_prio(struct thread *td, u_char prio);
|
||||
#endif
|
||||
void sched_userret(struct thread *td);
|
||||
void sched_wakeup(struct thread *td);
|
||||
|
||||
@ -98,7 +108,9 @@ int sched_is_bound(struct thread *td);
|
||||
* These procedures tell the process data structure allocation code how
|
||||
* many bytes to actually allocate.
|
||||
*/
|
||||
#ifdef KSE
|
||||
int sched_sizeof_ksegrp(void);
|
||||
#endif
|
||||
int sched_sizeof_proc(void);
|
||||
int sched_sizeof_thread(void);
|
||||
|
||||
@ -116,11 +128,15 @@ sched_unpin(void)
|
||||
|
||||
/* temporarily here */
|
||||
void schedinit(void);
|
||||
#ifdef KSE
|
||||
void sched_init_concurrency(struct ksegrp *kg);
|
||||
void sched_set_concurrency(struct ksegrp *kg, int cuncurrency);
|
||||
#endif
|
||||
void sched_schedinit(void);
|
||||
#ifdef KSE
|
||||
void sched_newproc(struct proc *p, struct ksegrp *kg, struct thread *td);
|
||||
void sched_thread_exit(struct thread *td);
|
||||
#endif
|
||||
void sched_newthread(struct thread *td);
|
||||
|
||||
#endif /* !_SYS_SCHED_H_ */
|
||||
|
@ -682,7 +682,9 @@ scheduler(dummy)
|
||||
ppri = INT_MIN;
|
||||
sx_slock(&allproc_lock);
|
||||
FOREACH_PROC_IN_SYSTEM(p) {
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) {
|
||||
continue;
|
||||
}
|
||||
@ -694,14 +696,18 @@ scheduler(dummy)
|
||||
*
|
||||
*/
|
||||
if (td->td_inhibitors == TDI_SWAPPED) {
|
||||
#ifdef KSE
|
||||
kg = td->td_ksegrp;
|
||||
pri = p->p_swtime + kg->kg_slptime;
|
||||
#else
|
||||
pri = p->p_swtime + td->td_slptime;
|
||||
#endif
|
||||
if ((p->p_sflag & PS_SWAPINREQ) == 0) {
|
||||
pri -= p->p_nice * 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* if this ksegrp is higher priority
|
||||
* if this ksegrp/thread is higher priority
|
||||
* and there is enough space, then select
|
||||
* this process instead of the previous
|
||||
* selection.
|
||||
@ -810,7 +816,9 @@ int action;
|
||||
{
|
||||
struct proc *p;
|
||||
struct thread *td;
|
||||
#ifdef KSE
|
||||
struct ksegrp *kg;
|
||||
#endif
|
||||
int didswap = 0;
|
||||
|
||||
retry:
|
||||
@ -884,15 +892,24 @@ int action;
|
||||
* do not swapout a realtime process
|
||||
* Check all the thread groups..
|
||||
*/
|
||||
#ifdef KSE
|
||||
FOREACH_KSEGRP_IN_PROC(p, kg) {
|
||||
if (PRI_IS_REALTIME(kg->kg_pri_class))
|
||||
#else
|
||||
FOREACH_THREAD_IN_PROC(p, td) {
|
||||
if (PRI_IS_REALTIME(td->td_pri_class))
|
||||
#endif
|
||||
goto nextproc;
|
||||
|
||||
/*
|
||||
* Guarantee swap_idle_threshold1
|
||||
* time in memory.
|
||||
*/
|
||||
#ifdef KSE
|
||||
if (kg->kg_slptime < swap_idle_threshold1)
|
||||
#else
|
||||
if (td->td_slptime < swap_idle_threshold1)
|
||||
#endif
|
||||
goto nextproc;
|
||||
|
||||
/*
|
||||
@ -904,11 +921,16 @@ int action;
|
||||
* This could be refined to support
|
||||
* swapping out a thread.
|
||||
*/
|
||||
#ifdef KSE
|
||||
FOREACH_THREAD_IN_GROUP(kg, td) {
|
||||
if ((td->td_priority) < PSOCK ||
|
||||
!thread_safetoswapout(td))
|
||||
goto nextproc;
|
||||
}
|
||||
#else
|
||||
if ((td->td_priority) < PSOCK || !thread_safetoswapout(td))
|
||||
goto nextproc;
|
||||
#endif
|
||||
/*
|
||||
* If the system is under memory stress,
|
||||
* or if we are swapping
|
||||
@ -917,11 +939,20 @@ int action;
|
||||
*/
|
||||
if (((action & VM_SWAP_NORMAL) == 0) &&
|
||||
(((action & VM_SWAP_IDLE) == 0) ||
|
||||
#ifdef KSE
|
||||
(kg->kg_slptime < swap_idle_threshold2)))
|
||||
#else
|
||||
(td->td_slptime < swap_idle_threshold2)))
|
||||
#endif
|
||||
goto nextproc;
|
||||
|
||||
#ifdef KSE
|
||||
if (minslptime > kg->kg_slptime)
|
||||
minslptime = kg->kg_slptime;
|
||||
#else
|
||||
if (minslptime > td->td_slptime)
|
||||
minslptime = td->td_slptime;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -179,7 +179,11 @@ pagezero_start(void __unused *arg)
|
||||
PROC_UNLOCK(pagezero_proc);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
td = FIRST_THREAD_IN_PROC(pagezero_proc);
|
||||
#ifdef KSE
|
||||
sched_class(td->td_ksegrp, PRI_IDLE);
|
||||
#else
|
||||
sched_class(td, PRI_IDLE);
|
||||
#endif
|
||||
sched_prio(td, PRI_MAX_IDLE);
|
||||
setrunqueue(td, SRQ_BORING);
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user