Remove kernel support for M:N threading.

While the KSE project was quite successful in bringing threading to
FreeBSD, the M:N approach taken by the kse library was never developed
to its full potential.  Backwards compatibility will be provided via
libmap.conf for dynamically linked binaries and static binaries will
be broken.
This commit is contained in:
Jeff Roberson 2008-03-12 10:12:01 +00:00
parent 7f77f84497
commit 6617724c5f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=177091
69 changed files with 79 additions and 2343 deletions

View File

@ -1325,7 +1325,7 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
_ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL);
/* setup proc 0's pcb */
thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
thread0.td_pcb->pcb_flags = 0;
thread0.td_pcb->pcb_cr3 = KPML4phys;
thread0.td_frame = &proc0_tf;

View File

@ -301,10 +301,6 @@ trap(struct trapframe *frame)
case T_PAGEFLT: /* page fault */
addr = frame->tf_addr;
#ifdef KSE
if (td->td_pflags & TDP_SA)
thread_user_enter(td);
#endif
i = trap_pfault(frame, TRUE);
if (i == -1)
goto userout;
@ -782,10 +778,6 @@ syscall(struct trapframe *frame)
td->td_frame = frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
#ifdef KSE
if (p->p_flag & P_SA)
thread_user_enter(td);
#endif
params = (caddr_t)frame->tf_rsp + sizeof(register_t);
code = frame->tf_rax;
orig_tf_rflags = frame->tf_rflags;

View File

@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/kse.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
@ -272,22 +271,12 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
* Copy the upcall pcb. This loads kernel regs.
* Those not loaded individually below get their default
* values here.
*
* XXXKSE It might be a good idea to simply skip this as
* the values of the other registers may be unimportant.
* This would remove any requirement for knowing the KSE
* at this time (see the matching comment below for
* more analysis) (need a good safe default).
*/
bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
pcb2->pcb_flags &= ~PCB_FPUINITDONE;
/*
* Create a new fresh stack for the new thread.
* Don't forget to set this stack value into whatever supplies
* the address for the fault handlers.
* The contexts are filled in at the time we actually DO the
* upcall as only then do we know which KSE we got.
*/
bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));

View File

@ -18,6 +18,3 @@ device uart_ns8250
# Default partitioning schemes
options GEOM_BSD
options GEOM_MBR
# KSE support went from being default to a kernel option
options KSE

View File

@ -247,8 +247,7 @@ elf_linux_fixup(register_t **stack_base, struct image_params *imgp)
Elf32_Addr *base;
Elf32_Addr *pos;
KASSERT(curthread->td_proc == imgp->proc &&
(curthread->td_proc->p_flag & P_SA) == 0,
KASSERT(curthread->td_proc == imgp->proc,
("unsafe elf_linux_fixup(), should be curproc"));
base = (Elf32_Addr *)*stack_base;
args = (Elf32_Auxargs *)imgp->auxargs;

View File

@ -262,10 +262,6 @@ data_abort_handler(trapframe_t *tf)
td->td_frame = tf;
if (td->td_ucred != td->td_proc->p_ucred)
cred_update_thread(td);
#ifdef KSE
if (td->td_pflags & TDP_SA)
thread_user_enter(td);
#endif
}
/* Grab the current pcb */
@ -731,10 +727,6 @@ prefetch_abort_handler(trapframe_t *tf)
td->td_frame = tf;
if (td->td_ucred != td->td_proc->p_ucred)
cred_update_thread(td);
#ifdef KSE
if (td->td_proc->p_flag & P_SA)
thread_user_enter(td);
#endif
}
fault_pc = tf->tf_pc;
if (td->td_md.md_spinlock_count == 0) {
@ -1008,10 +1000,6 @@ swi_handler(trapframe_t *frame)
td->td_frame = frame;
td->td_pticks = 0;
#ifdef KSE
if (td->td_proc->p_flag & P_SA)
thread_user_enter(td);
#endif
/*
* Make sure the program counter is correctly aligned so we
* don't take an alignment fault trying to read the opcode.

View File

@ -49,7 +49,6 @@ options DDB #Enable the kernel debugger
#options DIAGNOSTIC
options SCHED_4BSD #4BSD scheduler
options KSE
options INET #InterNETworking
options INET6 #IPv6 communications protocols
options FFS #Berkeley Fast Filesystem

View File

@ -406,11 +406,7 @@ initarm(void *arg, void *arg2)
undefined_handler_address = (u_int)undefinedinstruction_bounce;
undefined_init();
#ifdef KSE
proc_linkup(&proc0, &ksegrp0, &thread0);
#else
proc_linkup0(&proc0, &thread0);
#endif
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;

View File

@ -284,11 +284,6 @@
#define FREEBSD32_SYS___setugid 374
#define FREEBSD32_SYS_eaccess 376
#define FREEBSD32_SYS_nmount 378
#define FREEBSD32_SYS_kse_exit 379
#define FREEBSD32_SYS_kse_wakeup 380
#define FREEBSD32_SYS_kse_create 381
#define FREEBSD32_SYS_kse_thr_interrupt 382
#define FREEBSD32_SYS_kse_release 383
#define FREEBSD32_SYS_kenv 390
#define FREEBSD32_SYS_lchflags 391
#define FREEBSD32_SYS_uuidgen 392

View File

@ -386,11 +386,11 @@ const char *freebsd32_syscallnames[] = {
"eaccess", /* 376 = eaccess */
"#377", /* 377 = afs_syscall */
"nmount", /* 378 = nmount */
"kse_exit", /* 379 = kse_exit */
"kse_wakeup", /* 380 = kse_wakeup */
"kse_create", /* 381 = kse_create */
"kse_thr_interrupt", /* 382 = kse_thr_interrupt */
"kse_release", /* 383 = kse_release */
"#379", /* 379 = kse_exit */
"#380", /* 380 = kse_wakeup */
"#381", /* 381 = kse_create */
"#382", /* 382 = kse_thr_interrupt */
"#383", /* 383 = kse_release */
"#384", /* 384 = __mac_get_proc */
"#385", /* 385 = __mac_set_proc */
"#386", /* 386 = __mac_get_fd */

View File

@ -418,11 +418,11 @@ struct sysent freebsd32_sysent[] = {
{ AS(eaccess_args), (sy_call_t *)eaccess, AUE_EACCESS, NULL, 0, 0 }, /* 376 = eaccess */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 377 = afs_syscall */
{ AS(nmount_args), (sy_call_t *)nmount, AUE_NMOUNT, NULL, 0, 0 }, /* 378 = nmount */
{ 0, (sy_call_t *)kse_exit, AUE_NULL, NULL, 0, 0 }, /* 379 = kse_exit */
{ AS(kse_wakeup_args), (sy_call_t *)kse_wakeup, AUE_NULL, NULL, 0, 0 }, /* 380 = kse_wakeup */
{ AS(kse_create_args), (sy_call_t *)kse_create, AUE_NULL, NULL, 0, 0 }, /* 381 = kse_create */
{ AS(kse_thr_interrupt_args), (sy_call_t *)kse_thr_interrupt, AUE_NULL, NULL, 0, 0 }, /* 382 = kse_thr_interrupt */
{ 0, (sy_call_t *)kse_release, AUE_NULL, NULL, 0, 0 }, /* 383 = kse_release */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 379 = kse_exit */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 380 = kse_wakeup */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 381 = kse_create */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 382 = kse_thr_interrupt */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 383 = kse_release */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 384 = __mac_get_proc */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 385 = __mac_set_proc */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 386 = __mac_get_fd */

View File

@ -645,13 +645,11 @@
377 AUE_NULL UNIMPL afs_syscall
378 AUE_NMOUNT NOPROTO { int nmount(struct iovec *iovp, \
unsigned int iovcnt, int flags); }
379 AUE_NULL NOPROTO { int kse_exit(void); }
380 AUE_NULL NOPROTO { int kse_wakeup(struct kse_mailbox *mbx); }
381 AUE_NULL NOPROTO { int kse_create(struct kse_mailbox *mbx, \
int newgroup); }
382 AUE_NULL NOPROTO { int kse_thr_interrupt( \
struct kse_thr_mailbox *tmbx); }
383 AUE_NULL NOPROTO { int kse_release(void); }
379 AUE_NULL UNIMPL kse_exit
380 AUE_NULL UNIMPL kse_wakeup
381 AUE_NULL UNIMPL kse_create
382 AUE_NULL UNIMPL kse_thr_interrupt
383 AUE_NULL UNIMPL kse_release
384 AUE_NULL UNIMPL __mac_get_proc
385 AUE_NULL UNIMPL __mac_set_proc
386 AUE_NULL UNIMPL __mac_get_fd

View File

@ -1459,7 +1459,6 @@ kern/kern_fork.c standard
kern/kern_idle.c standard
kern/kern_intr.c standard
kern/kern_jail.c standard
kern/kern_kse.c standard
kern/kern_kthread.c standard
kern/kern_ktr.c optional ktr
kern/kern_ktrace.c standard

View File

@ -104,7 +104,6 @@ GEOM_UZIP opt_geom.h
GEOM_VIRSTOR opt_geom.h
GEOM_VOL opt_geom.h
GEOM_ZERO opt_geom.h
KSE opt_global.h
KSTACK_MAX_PAGES
KSTACK_PAGES
KTRACE

View File

@ -43,7 +43,7 @@
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/poll.h>
#include <sys/proc.h> /* XXXKSE */
#include <sys/proc.h>
#include <sys/signalvar.h>
#include <sys/socket.h>
#include <sys/socketvar.h>

View File

@ -189,7 +189,7 @@ procfs_control(struct thread *td, struct proc *p, int op)
/*
* do single-step fixup if needed
*/
FIX_SSTEP(FIRST_THREAD_IN_PROC(p)); /* XXXKSE */
FIX_SSTEP(FIRST_THREAD_IN_PROC(p));
#endif
/*
@ -245,7 +245,7 @@ procfs_control(struct thread *td, struct proc *p, int op)
* What does it mean to single step a threaded program?
*/
case PROCFS_CTL_STEP:
error = proc_sstep(FIRST_THREAD_IN_PROC(p)); /* XXXKSE */
error = proc_sstep(FIRST_THREAD_IN_PROC(p));
PROC_UNLOCK(p);
if (error)
return (error);
@ -335,14 +335,11 @@ procfs_doprocctl(PFS_FILL_ARGS)
printf("procfs: got a sig%s\n", sbuf_data(sb));
PROC_LOCK(p);
/* This is very broken XXXKSE: */
if (TRACE_WAIT_P(td->td_proc, p)) {
p->p_xstat = nm->nm_val;
#ifdef FIX_SSTEP
/* XXXKSE: */
FIX_SSTEP(FIRST_THREAD_IN_PROC(p));
#endif
/* XXXKSE: */
p->p_flag &= ~P_STOPPED_SIG;
PROC_SLOCK(p);
thread_unsuspend(p);

View File

@ -105,7 +105,6 @@ procfs_doprocdbregs(PFS_FILL_ARGS)
return (EPERM);
}
/* XXXKSE: */
td2 = FIRST_THREAD_IN_PROC(p);
#ifdef COMPAT_IA32
if (td->td_proc->p_sysent == &ia32_freebsd_sysvec) {

View File

@ -112,20 +112,13 @@ procfs_doprocstatus(PFS_FILL_ARGS)
sbuf_printf(sb, "noflags");
}
#ifdef KSE
if (p->p_flag & P_SA)
wmesg = "-kse- ";
else
#endif
{
tdfirst = FIRST_THREAD_IN_PROC(p);
if (tdfirst->td_wchan != NULL) {
KASSERT(tdfirst->td_wmesg != NULL,
("wchan %p has no wmesg", tdfirst->td_wchan));
wmesg = tdfirst->td_wmesg;
} else
wmesg = "nochan";
}
tdfirst = FIRST_THREAD_IN_PROC(p);
if (tdfirst->td_wchan != NULL) {
KASSERT(tdfirst->td_wmesg != NULL,
("wchan %p has no wmesg", tdfirst->td_wchan));
wmesg = tdfirst->td_wmesg;
} else
wmesg = "nochan";
if (p->p_flag & P_INMEM) {
struct timeval start, ut, st;

View File

@ -22,6 +22,3 @@ device uart_ns8250
# Default partitioning schemes
options GEOM_BSD
options GEOM_MBR
# KSE support went from being default to a kernel option
options KSE

View File

@ -233,9 +233,6 @@ i386_extend_pcb(struct thread *td)
0 /* granularity */
};
if (td->td_proc->p_flag & P_SA)
return (EINVAL); /* XXXKSE */
/* XXXKSE All the code below only works in 1:1 needs changing */
ext = (struct pcb_ext *)kmem_alloc(kernel_map, ctob(IOPAGES+1));
if (ext == 0)
return (ENOMEM);

View File

@ -348,10 +348,6 @@ trap(struct trapframe *frame)
break;
case T_PAGEFLT: /* page fault */
#ifdef KSE
if (td->td_pflags & TDP_SA)
thread_user_enter(td);
#endif
i = trap_pfault(frame, TRUE, eva);
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
@ -959,10 +955,6 @@ syscall(struct trapframe *frame)
td->td_frame = frame;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
#ifdef KSE
if (p->p_flag & P_SA)
thread_user_enter(td);
#endif
params = (caddr_t)frame->tf_esp + sizeof(int);
code = frame->tf_eax;
orig_tf_eflags = frame->tf_eflags;

View File

@ -53,7 +53,6 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/kse.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/lock.h>
@ -338,7 +337,6 @@ cpu_thread_clean(struct thread *td)
pcb = td->td_pcb;
if (pcb->pcb_ext != NULL) {
/* XXXKSE XXXSMP not SMP SAFE.. what locks do we have? */
/* if (pcb->pcb_ext->ext_refcount-- == 1) ?? */
/*
* XXX do we need to move the TSS off the allocated pages
@ -396,23 +394,12 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
* Copy the upcall pcb. This loads kernel regs.
* Those not loaded individually below get their default
* values here.
*
* XXXKSE It might be a good idea to simply skip this as
* the values of the other registers may be unimportant.
* This would remove any requirement for knowing the KSE
* at this time (see the matching comment below for
* more analysis) (need a good safe default).
*/
bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
pcb2->pcb_flags &= ~(PCB_NPXTRAP|PCB_NPXINITDONE);
/*
* Create a new fresh stack for the new thread.
* The -16 is so we can expand the trapframe if we go to vm86.
* Don't forget to set this stack value into whatever supplies
* the address for the fault handlers.
* The contexts are filled in at the time we actually DO the
* upcall as only then do we know which KSE we got.
*/
bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
@ -439,7 +426,7 @@ cpu_set_upcall(struct thread *td, struct thread *td0)
* pcb2->pcb_savefpu: cloned above.
* pcb2->pcb_flags: cloned above.
* pcb2->pcb_onfault: cloned above (always NULL here?).
* pcb2->pcb_gs: cloned above. XXXKSE ???
* pcb2->pcb_gs: cloned above.
* pcb2->pcb_ext: cleared below.
*/
pcb2->pcb_ext = NULL;

View File

@ -405,7 +405,7 @@ exec_coff_imgact(imgp)
DPRINTF(("%s(%d): shared library %s\n",
__FILE__, __LINE__, libname));
strlcpy(&libbuf[emul_path_len], libname, MAXPATHLEN);
/* XXXKSE only 1:1 in coff */ error = coff_load_file(
error = coff_load_file(
FIRST_THREAD_IN_PROC(imgp->proc), libbuf);
if (error)
error = coff_load_file(

View File

@ -239,8 +239,7 @@ elf_linux_fixup(register_t **stack_base, struct image_params *imgp)
Elf32_Auxargs *args;
register_t *pos;
KASSERT(curthread->td_proc == imgp->proc &&
(curthread->td_proc->p_flag & P_SA) == 0,
KASSERT(curthread->td_proc == imgp->proc,
("unsafe elf_linux_fixup(), should be curproc"));
args = (Elf32_Auxargs *)imgp->auxargs;
pos = *stack_base + (imgp->args->argc + imgp->args->envc + 2);

View File

@ -17,6 +17,3 @@ device uart_ns8250
options GEOM_PART_BSD
options GEOM_PART_GPT
options GEOM_PART_MBR
# KSE support went from being default to a kernel option
options KSE

View File

@ -1231,9 +1231,6 @@ set_mcontext(struct thread *td, const mcontext_t *mc)
restore_callee_saved(&mc->mc_preserved);
restore_callee_saved_fp(&mc->mc_preserved_fp);
if (mc->mc_flags & _MC_FLAGS_KSE_SET_MBOX)
suword((caddr_t)mc->mc_special.ifa, mc->mc_special.isr);
return (0);
}

View File

@ -987,10 +987,6 @@ syscall(struct trapframe *tf)
td->td_pticks = 0;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
#ifdef KSE
if (p->p_flag & P_SA)
thread_user_enter(td);
#endif
if (p->p_sysent->sv_prepsyscall) {
/* (*p->p_sysent->sv_prepsyscall)(tf, args, &code, &params); */

View File

@ -79,7 +79,6 @@ typedef struct __mcontext {
unsigned long mc_flags;
#define _MC_FLAGS_ASYNC_CONTEXT 0x0001
#define _MC_FLAGS_HIGHFP_VALID 0x0002
#define _MC_FLAGS_KSE_SET_MBOX 0x0004 /* Undocumented. Has to go. */
#define _MC_FLAGS_SYSCALL_CONTEXT 0x0008
unsigned long _reserved_;
struct _special mc_special;

View File

@ -480,9 +480,6 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
u_long base_addr = 0;
int vfslocked, error, i, numsegs;
if (curthread->td_proc != p)
panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
nd = &tempdata->nd;
attr = &tempdata->attr;
@ -498,7 +495,6 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
imgp->object = NULL;
imgp->execlabel = NULL;
/* XXXKSE */
NDINIT(nd, LOOKUP, MPSAFE|LOCKLEAF|FOLLOW, UIO_SYSSPACE, file,
curthread);
vfslocked = 0;
@ -999,7 +995,7 @@ __elfN(coredump)(td, vp, limit)
(caddr_t)(uintptr_t)php->p_vaddr,
php->p_filesz, offset, UIO_USERSPACE,
IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
curthread); /* XXXKSE */
curthread);
if (error != 0)
break;
offset += php->p_filesz;
@ -1147,7 +1143,7 @@ __elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
/* Write it to the core file. */
return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
td)); /* XXXKSE */
td));
}
#if defined(COMPAT_IA32) && __ELF_WORD_SIZE == 32

View File

@ -408,11 +408,11 @@ struct sysent sysent[] = {
{ AS(eaccess_args), (sy_call_t *)eaccess, AUE_EACCESS, NULL, 0, 0 }, /* 376 = eaccess */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 377 = afs_syscall */
{ AS(nmount_args), (sy_call_t *)nmount, AUE_NMOUNT, NULL, 0, 0 }, /* 378 = nmount */
{ 0, (sy_call_t *)kse_exit, AUE_NULL, NULL, 0, 0 }, /* 379 = kse_exit */
{ AS(kse_wakeup_args), (sy_call_t *)kse_wakeup, AUE_NULL, NULL, 0, 0 }, /* 380 = kse_wakeup */
{ AS(kse_create_args), (sy_call_t *)kse_create, AUE_NULL, NULL, 0, 0 }, /* 381 = kse_create */
{ AS(kse_thr_interrupt_args), (sy_call_t *)kse_thr_interrupt, AUE_NULL, NULL, 0, 0 }, /* 382 = kse_thr_interrupt */
{ AS(kse_release_args), (sy_call_t *)kse_release, AUE_NULL, NULL, 0, 0 }, /* 383 = kse_release */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 379 = kse_exit */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 380 = kse_wakeup */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 381 = kse_create */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 382 = kse_thr_interrupt */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 383 = kse_release */
{ AS(__mac_get_proc_args), (sy_call_t *)__mac_get_proc, AUE_NULL, NULL, 0, 0 }, /* 384 = __mac_get_proc */
{ AS(__mac_set_proc_args), (sy_call_t *)__mac_set_proc, AUE_NULL, NULL, 0, 0 }, /* 385 = __mac_set_proc */
{ AS(__mac_get_fd_args), (sy_call_t *)__mac_get_fd, AUE_NULL, NULL, 0, 0 }, /* 386 = __mac_get_fd */
@ -469,7 +469,7 @@ struct sysent sysent[] = {
{ AS(extattr_list_fd_args), (sy_call_t *)extattr_list_fd, AUE_EXTATTR_LIST_FD, NULL, 0, 0 }, /* 437 = extattr_list_fd */
{ AS(extattr_list_file_args), (sy_call_t *)extattr_list_file, AUE_EXTATTR_LIST_FILE, NULL, 0, 0 }, /* 438 = extattr_list_file */
{ AS(extattr_list_link_args), (sy_call_t *)extattr_list_link, AUE_EXTATTR_LIST_LINK, NULL, 0, 0 }, /* 439 = extattr_list_link */
{ AS(kse_switchin_args), (sy_call_t *)kse_switchin, AUE_NULL, NULL, 0, 0 }, /* 440 = kse_switchin */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0 }, /* 440 = kse_switchin */
{ AS(ksem_timedwait_args), (sy_call_t *)lkmressys, AUE_NULL, NULL, 0, 0 }, /* 441 = ksem_timedwait */
{ AS(thr_suspend_args), (sy_call_t *)thr_suspend, AUE_NULL, NULL, 0, 0 }, /* 442 = thr_suspend */
{ AS(thr_wake_args), (sy_call_t *)thr_wake, AUE_NULL, NULL, 0, 0 }, /* 443 = thr_wake */

View File

@ -477,10 +477,6 @@ statclock(int usermode)
/*
* Charge the time as appropriate.
*/
#ifdef KSE
if (p->p_flag & P_SA)
thread_statclock(1);
#endif
td->td_uticks++;
if (p->p_nice > NZERO)
cp_time[CP_NICE]++;
@ -504,10 +500,6 @@ statclock(int usermode)
td->td_iticks++;
cp_time[CP_INTR]++;
} else {
#ifdef KSE
if (p->p_flag & P_SA)
thread_statclock(0);
#endif
td->td_pticks++;
td->td_sticks++;
if (!TD_IS_IDLETHREAD(td))

View File

@ -1194,7 +1194,7 @@ exec_check_permissions(imgp)
struct thread *td;
int error;
td = curthread; /* XXXKSE */
td = curthread;
/* Get file attributes */
error = VOP_GETATTR(vp, attr, td->td_ucred, td);

View File

@ -764,7 +764,7 @@ fork_exit(callout, arg, frame)
p = td->td_proc;
KASSERT(p->p_state == PRS_NORMAL, ("executing process is still new"));
CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)",
CTR4(KTR_PROC, "fork_exit: new thread %p (td_sched %p, pid %d, %s)",
td, td->td_sched, p->p_pid, td->td_name);
sched_fork_exit(td);

View File

@ -905,7 +905,6 @@ swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler,
}
return (intr_event_add_handler(ie, name, NULL, handler, arg,
(pri * RQ_PPQ) + PI_SOFT, flags, cookiep));
/* XXKSE.. think of a better way to get separate queues */
}
/*

File diff suppressed because it is too large Load Diff

View File

@ -645,7 +645,7 @@ fill_kinfo_proc_only(struct proc *p, struct kinfo_proc *kp)
kp->ki_structsize = sizeof(*kp);
kp->ki_paddr = p;
PROC_LOCK_ASSERT(p, MA_OWNED);
kp->ki_addr =/* p->p_addr; */0; /* XXXKSE */
kp->ki_addr =/* p->p_addr; */0; /* XXX */
kp->ki_args = p->p_args;
kp->ki_textvp = p->p_textvp;
#ifdef KTRACE
@ -794,7 +794,7 @@ fill_kinfo_thread(struct thread *td, struct kinfo_proc *kp, int preferthread)
bzero(kp->ki_lockname, sizeof(kp->ki_lockname));
}
if (p->p_state == PRS_NORMAL) { /* XXXKSE very approximate */
if (p->p_state == PRS_NORMAL) { /* approximate. */
if (TD_ON_RUNQ(td) ||
TD_CAN_RUN(td) ||
TD_IS_RUNNING(td)) {

View File

@ -407,8 +407,6 @@ rtprio(td, uap)
* or if one is, report the highest priority
* in the process. There isn't much more you can do as
* there is only room to return a single priority.
* XXXKSE: maybe need a new interface to report
* priorities of multiple system scope threads.
* Note: specifying our own pid is not the same
* as leaving it zero.
*/

View File

@ -49,7 +49,6 @@ __FBSDID("$FreeBSD$");
#include <sys/event.h>
#include <sys/fcntl.h>
#include <sys/kernel.h>
#include <sys/kse.h>
#include <sys/ktr.h>
#include <sys/ktrace.h>
#include <sys/lock.h>
@ -94,9 +93,6 @@ static int filt_sigattach(struct knote *kn);
static void filt_sigdetach(struct knote *kn);
static int filt_signal(struct knote *kn, long hint);
static struct thread *sigtd(struct proc *p, int sig, int prop);
#ifdef KSE
static int do_tdsignal(struct proc *, struct thread *, int, ksiginfo_t *);
#endif
static void sigqueue_start(void);
static uma_zone_t ksiginfo_zone = NULL;
@ -566,11 +562,7 @@ void
signotify(struct thread *td)
{
struct proc *p;
#ifdef KSE
sigset_t set, saved;
#else
sigset_t set;
#endif
p = td->td_proc;
@ -581,10 +573,6 @@ signotify(struct thread *td)
* previously masked by all threads to our sigqueue.
*/
set = p->p_sigqueue.sq_signals;
#ifdef KSE
if (p->p_flag & P_SA)
saved = p->p_sigqueue.sq_signals;
#endif
SIGSETNAND(set, td->td_sigmask);
if (! SIGISEMPTY(set))
sigqueue_move_set(&p->p_sigqueue, &td->td_sigqueue, &set);
@ -593,15 +581,6 @@ signotify(struct thread *td)
td->td_flags |= TDF_NEEDSIGCHK | TDF_ASTPENDING;
thread_unlock(td);
}
#ifdef KSE
if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) {
/* pending set changed */
p->p_flag |= P_SIGEVENT;
wakeup(&p->p_siglist);
}
}
#endif
}
int
@ -754,13 +733,6 @@ kern_sigaction(td, sig, act, oact, flags)
if (ps->ps_sigact[_SIG_IDX(sig)] == SIG_IGN ||
(sigprop(sig) & SA_IGNORE &&
ps->ps_sigact[_SIG_IDX(sig)] == SIG_DFL)) {
#ifdef KSE
if ((p->p_flag & P_SA) &&
SIGISMEMBER(p->p_sigqueue.sq_signals, sig)) {
p->p_flag |= P_SIGEVENT;
wakeup(&p->p_siglist);
}
#endif
/* never to be seen again */
PROC_SLOCK(p);
sigqueue_delete_proc(p, sig);
@ -1200,12 +1172,6 @@ kern_sigtimedwait(struct thread *td, sigset_t waitset, ksiginfo_t *ksi,
continue;
if (!SIGISMEMBER(td->td_sigqueue.sq_signals, i)) {
if (SIGISMEMBER(p->p_sigqueue.sq_signals, i)) {
#ifdef KSE
if (p->p_flag & P_SA) {
p->p_flag |= P_SIGEVENT;
wakeup(&p->p_siglist);
}
#endif
sigqueue_move(&p->p_sigqueue,
&td->td_sigqueue, i);
} else
@ -1842,9 +1808,6 @@ trapsignal(struct thread *td, ksiginfo_t *ksi)
{
struct sigacts *ps;
struct proc *p;
#ifdef KSE
int error;
#endif
int sig;
int code;
@ -1853,27 +1816,7 @@ trapsignal(struct thread *td, ksiginfo_t *ksi)
code = ksi->ksi_code;
KASSERT(_SIG_VALID(sig), ("invalid signal"));
#ifdef KSE
if (td->td_pflags & TDP_SA) {
if (td->td_mailbox == NULL)
thread_user_enter(td);
PROC_LOCK(p);
SIGDELSET(td->td_sigmask, sig);
thread_lock(td);
/*
* Force scheduling an upcall, so UTS has chance to
* process the signal before thread runs again in
* userland.
*/
if (td->td_upcall)
td->td_upcall->ku_flags |= KUF_DOUPCALL;
thread_unlock(td);
} else {
PROC_LOCK(p);
}
#else
PROC_LOCK(p);
#endif
ps = p->p_sigacts;
mtx_lock(&ps->ps_mtx);
if ((p->p_flag & P_TRACED) == 0 && SIGISMEMBER(ps->ps_sigcatch, sig) &&
@ -1884,34 +1827,8 @@ trapsignal(struct thread *td, ksiginfo_t *ksi)
ktrpsig(sig, ps->ps_sigact[_SIG_IDX(sig)],
&td->td_sigmask, code);
#endif
#ifdef KSE
if (!(td->td_pflags & TDP_SA))
(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
ksi, &td->td_sigmask);
#else
(*p->p_sysent->sv_sendsig)(ps->ps_sigact[_SIG_IDX(sig)],
ksi, &td->td_sigmask);
#endif
#ifdef KSE
else if (td->td_mailbox == NULL) {
mtx_unlock(&ps->ps_mtx);
/* UTS caused a sync signal */
p->p_code = code; /* XXX for core dump/debugger */
p->p_sig = sig; /* XXX to verify code */
sigexit(td, sig);
} else {
mtx_unlock(&ps->ps_mtx);
SIGADDSET(td->td_sigmask, sig);
PROC_UNLOCK(p);
error = copyout(&ksi->ksi_info, &td->td_mailbox->tm_syncsig,
sizeof(siginfo_t));
PROC_LOCK(p);
/* UTS memory corrupted */
if (error)
sigexit(td, SIGSEGV);
mtx_lock(&ps->ps_mtx);
}
#endif
SIGSETOR(td->td_sigmask, ps->ps_catchmask[_SIG_IDX(sig)]);
if (!SIGISMEMBER(ps->ps_signodefer, sig))
SIGADDSET(td->td_sigmask, sig);
@ -2024,27 +1941,6 @@ psignal_event(struct proc *p, struct sigevent *sigev, ksiginfo_t *ksi)
int
tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
{
#ifdef KSE
sigset_t saved;
int ret;
if (p->p_flag & P_SA)
saved = p->p_sigqueue.sq_signals;
ret = do_tdsignal(p, td, sig, ksi);
if ((p->p_flag & P_SA) && !(p->p_flag & P_SIGEVENT)) {
if (!SIGSETEQ(saved, p->p_sigqueue.sq_signals)) {
/* pending set changed */
p->p_flag |= P_SIGEVENT;
wakeup(&p->p_siglist);
}
}
return (ret);
}
static int
do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
{
#endif
sig_t action;
sigqueue_t *sigqueue;
int prop;
@ -2055,17 +1951,9 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
PROC_LOCK_ASSERT(p, MA_OWNED);
if (!_SIG_VALID(sig))
#ifdef KSE
panic("do_tdsignal(): invalid signal %d", sig);
#else
panic("tdsignal(): invalid signal %d", sig);
#endif
#ifdef KSE
KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("do_tdsignal: ksi on queue"));
#else
KASSERT(ksi == NULL || !KSI_ONQ(ksi), ("tdsignal: ksi on queue"));
#endif
/*
* IEEE Std 1003.1-2001: return success when killing a zombie.
@ -2232,18 +2120,6 @@ do_tdsignal(struct proc *p, struct thread *td, int sig, ksiginfo_t *ksi)
goto out;
}
if (action == SIG_CATCH) {
#ifdef KSE
/*
* The process wants to catch it so it needs
* to run at least one thread, but which one?
* It would seem that the answer would be to
* run an upcall in the next KSE to run, and
* deliver the signal that way. In a NON KSE
* process, we need to make sure that the
* single thread is runnable asap.
* XXXKSE for now however, make them all run.
*/
#endif
/*
* The process wants to catch it so it needs
* to run at least one thread, but which one?
@ -2540,10 +2416,6 @@ issignal(td)
*/
if (SIGISMEMBER(ps->ps_sigignore, sig) && (traced == 0)) {
sigqueue_delete(&td->td_sigqueue, sig);
#ifdef KSE
if (td->td_pflags & TDP_SA)
SIGADDSET(td->td_sigmask, sig);
#endif
continue;
}
if (p->p_flag & P_TRACED && (p->p_flag & P_PPWAIT) == 0) {
@ -2554,11 +2426,6 @@ issignal(td)
newsig = ptracestop(td, sig);
mtx_lock(&ps->ps_mtx);
#ifdef KSE
if (td->td_pflags & TDP_SA)
SIGADDSET(td->td_sigmask, sig);
#endif
if (sig != newsig) {
ksiginfo_t ksi;
/*
@ -2582,10 +2449,6 @@ issignal(td)
* signal is being masked, look for other signals.
*/
SIGADDSET(td->td_sigqueue.sq_signals, sig);
#ifdef KSE
if (td->td_pflags & TDP_SA)
SIGDELSET(td->td_sigmask, sig);
#endif
if (SIGISMEMBER(td->td_sigmask, sig))
continue;
signotify(td);
@ -2739,11 +2602,7 @@ postsig(sig)
mtx_lock(&ps->ps_mtx);
}
#ifdef KSE
if (!(td->td_pflags & TDP_SA) && action == SIG_DFL) {
#else
if (action == SIG_DFL) {
#endif
/*
* Default action, where the default is to kill
* the process. (Other cases were ignored above.)
@ -2752,15 +2611,6 @@ postsig(sig)
sigexit(td, sig);
/* NOTREACHED */
} else {
#ifdef KSE
if (td->td_pflags & TDP_SA) {
if (sig == SIGKILL) {
mtx_unlock(&ps->ps_mtx);
sigexit(td, sig);
}
}
#endif
/*
* If we get here, the signal must be caught.
*/
@ -2803,14 +2653,7 @@ postsig(sig)
p->p_code = 0;
p->p_sig = 0;
}
#ifdef KSE
if (td->td_pflags & TDP_SA)
thread_signal_add(curthread, &ksi);
else
(*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
#else
(*p->p_sysent->sv_sendsig)(action, &ksi, &returnmask);
#endif
}
}

View File

@ -521,7 +521,7 @@ runq_choose_from(struct runq *rq, u_char idx)
ts = TAILQ_FIRST(rqh);
KASSERT(ts != NULL, ("runq_choose: no proc on busy queue"));
CTR4(KTR_RUNQ,
"runq_choose_from: pri=%d kse=%p idx=%d rqh=%p",
"runq_choose_from: pri=%d td_sched=%p idx=%d rqh=%p",
pri, ts, ts->ts_rqindex, rqh);
return (ts);
}

View File

@ -412,7 +412,7 @@ mi_switch(int flags, struct thread *newtd)
td->td_generation++; /* bump preempt-detect counter */
PCPU_INC(cnt.v_swtch);
PCPU_SET(switchticks, ticks);
CTR4(KTR_PROC, "mi_switch: old thread %ld (kse %p, pid %ld, %s)",
CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, td->td_name);
#if (KTR_COMPILE & KTR_SCHED) != 0
if (TD_IS_IDLETHREAD(td))
@ -428,20 +428,12 @@ mi_switch(int flags, struct thread *newtd)
"mi_switch: %p(%s) prio %d inhibit %d wmesg %s lock %s",
td, td->td_name, td->td_priority,
td->td_inhibitors, td->td_wmesg, td->td_lockname);
#endif
/*
* We call thread_switchout after the KTR_SCHED prints above so kse
* selecting a new thread to run does not show up as a preemption.
*/
#ifdef KSE
if ((flags & SW_VOL) && (td->td_proc->p_flag & P_SA))
newtd = thread_switchout(td, flags, newtd);
#endif
sched_switch(td, newtd, flags);
CTR3(KTR_SCHED, "mi_switch: running %p(%s) prio %d",
td, td->td_name, td->td_priority);
CTR4(KTR_PROC, "mi_switch: new thread %ld (kse %p, pid %ld, %s)",
CTR4(KTR_PROC, "mi_switch: new thread %ld (td_sched %p, pid %ld, %s)",
td->td_tid, td->td_sched, p->p_pid, td->td_name);
/*
@ -500,8 +492,6 @@ setrunnable(struct thread *td)
/*
* Compute a tenex style load average of a quantity on
* 1, 5 and 15 minute intervals.
* XXXKSE Needs complete rewrite when correct info is available.
* Completely Bogus.. only works with 1:1 (but compiles ok now :-)
*/
static void
loadav(void *arg)

View File

@ -68,43 +68,12 @@ int max_threads_hits;
SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
&max_threads_hits, 0, "");
#ifdef KSE
int virtual_cpu;
#endif
TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads);
static struct mtx zombie_lock;
MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
static void thread_zombie(struct thread *);
#ifdef KSE
static int
sysctl_kse_virtual_cpu(SYSCTL_HANDLER_ARGS)
{
int error, new_val;
int def_val;
def_val = mp_ncpus;
if (virtual_cpu == 0)
new_val = def_val;
else
new_val = virtual_cpu;
error = sysctl_handle_int(oidp, &new_val, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
if (new_val < 0)
return (EINVAL);
virtual_cpu = new_val;
return (0);
}
/* DEBUG ONLY */
SYSCTL_PROC(_kern_threads, OID_AUTO, virtual_cpu, CTLTYPE_INT|CTLFLAG_RW,
0, sizeof(virtual_cpu), sysctl_kse_virtual_cpu, "I",
"debug virtual cpus");
#endif
struct mtx tid_lock;
static struct unrhdr *tid_unrhdr;
@ -230,9 +199,6 @@ void
proc_linkup(struct proc *p, struct thread *td)
{
#ifdef KSE
TAILQ_INIT(&p->p_upcalls); /* upcall list */
#endif
sigqueue_init(&p->p_sigqueue, p);
p->p_ksi = ksiginfo_alloc(1);
if (p->p_ksi != NULL) {
@ -258,9 +224,6 @@ threadinit(void)
thread_zone = uma_zcreate("THREAD", sched_sizeof_thread(),
thread_ctor, thread_dtor, thread_init, thread_fini,
16 - 1, 0);
#ifdef KSE
kseinit(); /* set up kse specific stuff e.g. upcall zone*/
#endif
}
/*
@ -286,7 +249,7 @@ thread_stash(struct thread *td)
}
/*
* Reap zombie kse resource.
* Reap zombie resources.
*/
void
thread_reap(void)
@ -311,9 +274,6 @@ thread_reap(void)
td_first = td_next;
}
}
#ifdef KSE
upcall_reap();
#endif
}
/*
@ -343,12 +303,7 @@ thread_alloc(void)
void
thread_free(struct thread *td)
{
#ifdef KSE
if (td->td_cpuset != NULL)
cpuset_rel(td->td_cpuset);
#else
cpuset_rel(td->td_cpuset);
#endif
td->td_cpuset = NULL;
cpu_thread_free(td);
if (td->td_altkstack != 0)
@ -365,29 +320,7 @@ thread_free(struct thread *td)
* Because we can't free a thread while we're operating under its context,
* push the current thread into our CPU's deadthread holder. This means
* we needn't worry about someone else grabbing our context before we
* do a cpu_throw(). This may not be needed now as we are under schedlock.
* Maybe we can just do a thread_stash() as thr_exit1 does.
*/
/* XXX
* libthr expects its thread exit to return for the last
* thread, meaning that the program is back to non-threaded
* mode I guess. Because we do this (cpu_throw) unconditionally
* here, they have their own version of it. (thr_exit1())
* that doesn't do it all if this was the last thread.
* It is also called from thread_suspend_check().
* Of course in the end, they end up coming here through exit1
* anyhow.. After fixing 'thr' to play by the rules we should be able
* to merge these two functions together.
*
* called from:
* exit1()
* kse_exit()
* thr_exit()
* ifdef KSE
* thread_user_enter()
* thread_userret()
* endif
* thread_suspend_check()
* do a cpu_throw().
*/
void
thread_exit(void)
@ -413,17 +346,6 @@ thread_exit(void)
AUDIT_SYSCALL_EXIT(0, td);
#endif
#ifdef KSE
if (td->td_standin != NULL) {
/*
* Note that we don't need to free the cred here as it
* is done in thread_reap().
*/
thread_zombie(td->td_standin);
td->td_standin = NULL;
}
#endif
umtx_thread_exit(td);
/*
@ -453,11 +375,7 @@ thread_exit(void)
if (p->p_flag & P_HADTHREADS) {
if (p->p_numthreads > 1) {
thread_lock(td);
#ifdef KSE
kse_unlink(td);
#else
thread_unlink(td);
#endif
thread_unlock(td);
td2 = FIRST_THREAD_IN_PROC(p);
sched_exit_thread(td2, td);
@ -480,16 +398,6 @@ thread_exit(void)
} else {
/*
* The last thread is exiting.. but not through exit()
* what should we do?
* Theoretically this can't happen
* exit1() - clears threading flags before coming here
* kse_exit() - treats last thread specially
* thr_exit() - treats last thread specially
* ifdef KSE
* thread_user_enter() - only if more exist
* thread_userret() - only if more exist
* endif
* thread_suspend_check() - only if more exist
*/
panic ("thread_exit: Last thread exiting on its own");
}
@ -518,16 +426,6 @@ thread_wait(struct proc *p)
mtx_assert(&Giant, MA_NOTOWNED);
KASSERT((p->p_numthreads == 1), ("Multiple threads in wait1()"));
td = FIRST_THREAD_IN_PROC(p);
#ifdef KSE
if (td->td_standin != NULL) {
if (td->td_standin->td_ucred != NULL) {
crfree(td->td_standin->td_ucred);
td->td_standin->td_ucred = NULL;
}
thread_free(td->td_standin);
td->td_standin = NULL;
}
#endif
/* Lock the last thread so we spin until it exits cpu_throw(). */
thread_lock(td);
thread_unlock(td);
@ -545,13 +443,6 @@ thread_wait(struct proc *p)
* Link a thread to a process.
* set up anything that needs to be initialized for it to
* be used by the process.
*
* Note that we do not link to the proc's ucred here.
* The thread is linked as if running but no KSE assigned.
* Called from:
* proc_linkup()
* thread_schedule_upcall()
* thr_create()
*/
void
thread_link(struct thread *td, struct proc *p)
@ -577,9 +468,6 @@ thread_link(struct thread *td, struct proc *p)
/*
* Convert a process with one thread to an unthreaded process.
* Called from:
* thread_single(exit) (called from execve and exit)
* kse_exit() XXX may need cleaning up wrt KSE stuff
*/
void
thread_unthread(struct thread *td)
@ -587,20 +475,7 @@ thread_unthread(struct thread *td)
struct proc *p = td->td_proc;
KASSERT((p->p_numthreads == 1), ("Unthreading with >1 threads"));
#ifdef KSE
thread_lock(td);
upcall_remove(td);
thread_unlock(td);
p->p_flag &= ~(P_SA|P_HADTHREADS);
td->td_mailbox = NULL;
td->td_pflags &= ~(TDP_SA | TDP_CAN_UNBIND);
if (td->td_standin != NULL) {
thread_zombie(td->td_standin);
td->td_standin = NULL;
}
#else
p->p_flag &= ~P_HADTHREADS;
#endif
}
/*

View File

@ -156,7 +156,7 @@ sched_getparam(struct thread *td, struct sched_getparam_args *uap)
if (targetp == NULL) {
return (ESRCH);
}
targettd = FIRST_THREAD_IN_PROC(targetp); /* XXXKSE */
targettd = FIRST_THREAD_IN_PROC(targetp);
}
e = p_cansee(td, targetp);
@ -223,7 +223,7 @@ sched_getscheduler(struct thread *td, struct sched_getscheduler_args *uap)
e = ESRCH;
goto done2;
}
targettd = FIRST_THREAD_IN_PROC(targetp); /* XXXKSE */
targettd = FIRST_THREAD_IN_PROC(targetp);
}
e = p_cansee(td, targetp);

View File

@ -383,8 +383,6 @@ schedcpu(void)
/*
* ts_pctcpu is only for ps and ttyinfo().
* Do it per td_sched, and add them up at the end?
* XXXKSE
*/
ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
/*

View File

@ -2013,15 +2013,6 @@ sched_exit_thread(struct thread *td, struct thread *child)
CTR3(KTR_SCHED, "sched_exit_thread: %p(%s) prio %d",
child, child->td_name, child->td_priority);
#ifdef KSE
/*
* KSE forks and exits so often that this penalty causes short-lived
* threads to always be non-interactive. This causes mozilla to
* crawl under load.
*/
if ((td->td_pflags & TDP_SA) && td->td_proc == child->td_proc)
return;
#endif
/*
* Give the child's runtime to the parent without returning the
* sleep time as a penalty to the parent. This causes shells that

View File

@ -410,13 +410,8 @@ sleepq_catch_signals(void *wchan, int pri)
PROC_UNLOCK(p);
thread_lock(td);
if (ret == 0) {
if (!(td->td_flags & TDF_INTERRUPT)) {
sleepq_switch(wchan, pri);
return (0);
}
/* KSE threads tried unblocking us. */
ret = td->td_intrval;
MPASS(ret == EINTR || ret == ERESTART || ret == EWOULDBLOCK);
sleepq_switch(wchan, pri);
return (0);
}
/*
* There were pending signals and this thread is still
@ -540,9 +535,6 @@ sleepq_check_signals(void)
return (td->td_intrval);
}
if (td->td_flags & TDF_INTERRUPT)
return (td->td_intrval);
return (0);
}

View File

@ -119,15 +119,6 @@ userret(struct thread *td, struct trapframe *frame)
thread_suspend_check(0); /* Can suspend or kill */
PROC_UNLOCK(p);
}
#ifdef KSE
/*
* Do special thread processing, e.g. upcall tweaking and such.
*/
if (p->p_flag & P_SA)
thread_userret(td, frame);
#endif
/*
* Charge system time if profiling.
*/
@ -135,7 +126,6 @@ userret(struct thread *td, struct trapframe *frame)
addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio);
}
/*
* Let the scheduler adjust our priority etc.
*/
@ -173,11 +163,6 @@ ast(struct trapframe *framep)
td->td_frame = framep;
td->td_pticks = 0;
#ifdef KSE
if ((p->p_flag & P_SA) && (td->td_mailbox == NULL))
thread_user_enter(td);
#endif
/*
* This updates the td_flag's for the checks below in one
* "atomic" operation with turning off the astpending flag.
@ -188,18 +173,11 @@ ast(struct trapframe *framep)
thread_lock(td);
flags = td->td_flags;
td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK |
TDF_NEEDRESCHED | TDF_INTERRUPT | TDF_ALRMPEND | TDF_PROFPEND |
TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND |
TDF_MACPEND);
thread_unlock(td);
PCPU_INC(cnt.v_trap);
/*
* XXXKSE While the fact that we owe a user profiling
* tick is stored per thread in this code, the statistics
* themselves are still stored per process.
* This should probably change, by which I mean that
* possibly the location of both might change.
*/
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {

View File

@ -450,7 +450,6 @@ static struct witness_order_list_entry order_lists[] = {
#endif
{ "clk", &lock_class_mtx_spin },
{ "mprof lock", &lock_class_mtx_spin },
{ "kse lock", &lock_class_mtx_spin },
{ "zombie lock", &lock_class_mtx_spin },
{ "ALD Queue", &lock_class_mtx_spin },
#ifdef __ia64__

View File

@ -803,11 +803,6 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
* you should use PT_SUSPEND to suspend it before
* continuing process.
*/
#ifdef KSE
PROC_SUNLOCK(p);
thread_continued(p);
PROC_SLOCK(p);
#endif
p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
thread_unsuspend(p);
PROC_SUNLOCK(p);
@ -943,17 +938,7 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
pl->pl_event = PL_EVENT_SIGNAL;
else
pl->pl_event = 0;
#ifdef KSE
if (td2->td_pflags & TDP_SA) {
pl->pl_flags = PL_FLAG_SA;
if (td2->td_upcall && !TD_CAN_UNBIND(td2))
pl->pl_flags |= PL_FLAG_BOUND;
} else {
pl->pl_flags = 0;
}
#else
pl->pl_flags = 0;
#endif
pl->pl_sigmask = td2->td_sigmask;
pl->pl_siglist = td2->td_siglist;
break;

View File

@ -386,11 +386,11 @@ const char *syscallnames[] = {
"eaccess", /* 376 = eaccess */
"#377", /* 377 = afs_syscall */
"nmount", /* 378 = nmount */
"kse_exit", /* 379 = kse_exit */
"kse_wakeup", /* 380 = kse_wakeup */
"kse_create", /* 381 = kse_create */
"kse_thr_interrupt", /* 382 = kse_thr_interrupt */
"kse_release", /* 383 = kse_release */
"#379", /* 379 = kse_exit */
"#380", /* 380 = kse_wakeup */
"#381", /* 381 = kse_create */
"#382", /* 382 = kse_thr_interrupt */
"#383", /* 383 = kse_release */
"__mac_get_proc", /* 384 = __mac_get_proc */
"__mac_set_proc", /* 385 = __mac_set_proc */
"__mac_get_fd", /* 386 = __mac_get_fd */
@ -447,7 +447,7 @@ const char *syscallnames[] = {
"extattr_list_fd", /* 437 = extattr_list_fd */
"extattr_list_file", /* 438 = extattr_list_file */
"extattr_list_link", /* 439 = extattr_list_link */
"kse_switchin", /* 440 = kse_switchin */
"#440", /* 440 = kse_switchin */
"ksem_timedwait", /* 441 = ksem_timedwait */
"thr_suspend", /* 442 = thr_suspend */
"thr_wake", /* 443 = thr_wake */

View File

@ -664,14 +664,11 @@
377 AUE_NULL UNIMPL afs_syscall
378 AUE_NMOUNT STD { int nmount(struct iovec *iovp, \
unsigned int iovcnt, int flags); }
379 AUE_NULL STD { int kse_exit(void); }
380 AUE_NULL STD { int kse_wakeup(struct kse_mailbox *mbx); }
381 AUE_NULL STD { int kse_create(struct kse_mailbox *mbx, \
int newgroup); }
382 AUE_NULL STD { int kse_thr_interrupt( \
struct kse_thr_mailbox *tmbx, int cmd, \
long data); }
383 AUE_NULL STD { int kse_release(struct timespec *timeout); }
379 AUE_NULL UNIMPL kse_exit
380 AUE_NULL UNIMPL kse_wakeup
381 AUE_NULL UNIMPL kse_create
382 AUE_NULL UNIMPL kse_thr_interrupt
383 AUE_NULL UNIMPL kse_release
384 AUE_NULL STD { int __mac_get_proc(struct mac *mac_p); }
385 AUE_NULL STD { int __mac_set_proc(struct mac *mac_p); }
386 AUE_NULL STD { int __mac_get_fd(int fd, \
@ -772,9 +769,7 @@
439 AUE_EXTATTR_LIST_LINK STD { ssize_t extattr_list_link( \
const char *path, int attrnamespace, \
void *data, size_t nbytes); }
440 AUE_NULL STD { int kse_switchin( \
struct kse_thr_mailbox *tmbx, \
int flags); }
440 AUE_NULL UNIMPL kse_switchin
441 AUE_NULL NOSTD { int ksem_timedwait(semid_t id, \
const struct timespec *abstime); }
442 AUE_NULL STD { int thr_suspend( \

View File

@ -2067,42 +2067,6 @@ systrace_args(int sysnum, void *params, u_int64_t *uarg, int *n_args)
*n_args = 3;
break;
}
/* kse_exit */
case 379: {
*n_args = 0;
break;
}
/* kse_wakeup */
case 380: {
struct kse_wakeup_args *p = params;
uarg[0] = (intptr_t) p->mbx; /* struct kse_mailbox * */
*n_args = 1;
break;
}
/* kse_create */
case 381: {
struct kse_create_args *p = params;
uarg[0] = (intptr_t) p->mbx; /* struct kse_mailbox * */
iarg[1] = p->newgroup; /* int */
*n_args = 2;
break;
}
/* kse_thr_interrupt */
case 382: {
struct kse_thr_interrupt_args *p = params;
uarg[0] = (intptr_t) p->tmbx; /* struct kse_thr_mailbox * */
iarg[1] = p->cmd; /* int */
iarg[2] = p->data; /* long */
*n_args = 3;
break;
}
/* kse_release */
case 383: {
struct kse_release_args *p = params;
uarg[0] = (intptr_t) p->timeout; /* struct timespec * */
*n_args = 1;
break;
}
/* __mac_get_proc */
case 384: {
struct __mac_get_proc_args *p = params;
@ -2534,14 +2498,6 @@ systrace_args(int sysnum, void *params, u_int64_t *uarg, int *n_args)
*n_args = 4;
break;
}
/* kse_switchin */
case 440: {
struct kse_switchin_args *p = params;
uarg[0] = (intptr_t) p->tmbx; /* struct kse_thr_mailbox * */
iarg[1] = p->flags; /* int */
*n_args = 2;
break;
}
/* ksem_timedwait */
case 441: {
struct ksem_timedwait_args *p = params;

View File

@ -24,6 +24,3 @@ device uart_ns8250
# Default partitioning schemes
options GEOM_BSD
options GEOM_PC98
# KSE support went from being default to a kernel option
options KSE

View File

@ -2148,7 +2148,7 @@ init386(first)
_udatasel = GSEL(GUDATA_SEL, SEL_UPL);
/* setup proc 0's pcb */
thread0.td_pcb->pcb_flags = 0; /* XXXKSE */
thread0.td_pcb->pcb_flags = 0;
thread0.td_pcb->pcb_cr3 = (int)IdlePTD;
thread0.td_pcb->pcb_ext = 0;
thread0.td_frame = &proc0_tf;

View File

@ -349,11 +349,6 @@ syscall(struct trapframe *frame)
PCPU_INC(cnt.v_syscall);
#ifdef KSE
if (p->p_flag & P_SA)
thread_user_enter(td);
#endif
code = frame->fixreg[0];
params = (caddr_t)(frame->fixreg + FIRSTARG);
n = NARGREG;

View File

@ -264,7 +264,7 @@ is_physical_memory(addr)
}
/*
* KSE functions
* Threading functions
*/
void
cpu_thread_exit(struct thread *td)

View File

@ -343,11 +343,6 @@ syscall(struct trapframe *frame)
PCPU_INC(cnt.v_syscall);
#if KSE
if (p->p_flag & P_SA)
thread_user_enter(td);
#endif
code = frame->fixreg[0];
params = (caddr_t)(frame->fixreg + FIRSTARG);
n = NARGREG;

View File

@ -408,7 +408,7 @@ is_physical_memory(vm_offset_t addr)
}
/*
* KSE functions
* Thread functions
*/
void
cpu_thread_exit(struct thread *td)

View File

@ -14,6 +14,3 @@ device uart_z8530
options GEOM_PART_APM
options GEOM_PART_MBR
# KSE support went from being default to a kernel option
options KSE

View File

@ -13,9 +13,6 @@ device uart_ns8250
device uart_sab82532
device uart_z8530
# KSE support went from being default to a kernel option
options KSE
# Default partitioning schemes
options GEOM_BSD
options GEOM_SUNLABEL

View File

@ -529,10 +529,6 @@ syscall(struct trapframe *tf)
td->td_frame = tf;
if (td->td_ucred != p->p_ucred)
cred_update_thread(td);
#ifdef KSE
if (p->p_flag & P_SA)
thread_user_enter(td);
#endif
code = tf->tf_global[1];
/*

View File

@ -1,135 +0,0 @@
/*-
* Copyright (C) 2001 Julian Elischer <julian@freebsd.org>
* for the FreeBSD Foundation.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice(s), this list of conditions and the following disclaimer as
* the first lines of this file unmodified other than the possible
* addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
* notice(s), this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_KSE_H_
#define _SYS_KSE_H_
#include <sys/ucontext.h>
#include <sys/time.h>
#include <sys/signal.h>
/*
* This file defines the structures needed for communication between
* the userland and the kernel when running a KSE-based threading system.
* The only programs that should see this file are the user thread
* scheduler (UTS) and the kernel.
*/
struct kse_mailbox;
typedef void kse_func_t(struct kse_mailbox *);
/*
* Thread mailbox.
*
* This describes a user thread to the kernel scheduler.
*/
struct kse_thr_mailbox {
ucontext_t tm_context; /* User and machine context */
uint32_t tm_flags; /* Thread flags */
struct kse_thr_mailbox *tm_next; /* Next thread in list */
void *tm_udata; /* For use by the UTS */
uint32_t tm_uticks; /* Time in userland */
uint32_t tm_sticks; /* Time in kernel */
siginfo_t tm_syncsig;
uint32_t tm_dflags; /* Debug flags */
lwpid_t tm_lwp; /* kernel thread UTS runs on */
uint32_t __spare__[6];
};
/*
* KSE mailbox.
*
* Communication path between the UTS and the kernel scheduler specific to
* a single KSE.
*/
struct kse_mailbox {
uint32_t km_version; /* Mailbox version */
struct kse_thr_mailbox *km_curthread; /* Currently running thread */
struct kse_thr_mailbox *km_completed; /* Threads back from kernel */
sigset_t km_sigscaught; /* Caught signals */
uint32_t km_flags; /* Mailbox flags */
kse_func_t *km_func; /* UTS function */
stack_t km_stack; /* UTS stack */
void *km_udata; /* For use by the UTS */
struct timespec km_timeofday; /* Time of day */
uint32_t km_quantum; /* Upcall quantum in msecs */
lwpid_t km_lwp; /* kernel thread UTS runs on */
uint32_t __spare2__[7];
};
#define KSE_VER_0 0
#define KSE_VERSION KSE_VER_0
/* These flags are kept in km_flags */
#define KMF_NOUPCALL 0x01
#define KMF_NOCOMPLETED 0x02
#define KMF_DONE 0x04
#define KMF_BOUND 0x08
#define KMF_WAITSIGEVENT 0x10
/* These flags are kept in tm_flags */
#define TMF_NOUPCALL 0x01
/* These flags are kept in tm_dlfags */
#define TMDF_SSTEP 0x01
#define TMDF_SUSPEND 0x02
/* Flags for kse_switchin */
#define KSE_SWITCHIN_SETTMBX 0x01
/* Commands for kse_thr_interrupt */
#define KSE_INTR_INTERRUPT 1
#define KSE_INTR_RESTART 2
#define KSE_INTR_SENDSIG 3
#define KSE_INTR_SIGEXIT 4
#define KSE_INTR_DBSUSPEND 5
#define KSE_INTR_EXECVE 6
struct kse_execve_args {
sigset_t sigmask;
sigset_t sigpend;
char *path;
char **argv;
char **envp;
void *reserved;
};
#ifndef _KERNEL
int kse_create(struct kse_mailbox *, int);
int kse_exit(void);
int kse_release(struct timespec *);
int kse_thr_interrupt(struct kse_thr_mailbox *, int, long);
int kse_wakeup(struct kse_mailbox *);
int kse_switchin(struct kse_thr_mailbox *, int flags);
#endif /* !_KERNEL */
#endif /* !_SYS_KSE_H_ */

View File

@ -165,37 +165,6 @@ struct turnstile;
struct mqueue_notifier;
struct cpuset;
/*
* Here we define the two structures used for process information.
*
* The first is the thread. It might be thought of as a "Kernel
* Schedulable Entity Context".
* This structure contains all the information as to where a thread of
* execution is now, or was when it was suspended, why it was suspended,
* and anything else that will be needed to restart it when it is
* rescheduled. It includes a scheduler specific substructure that is different
* for each scheduler.
*
* M:N notes.
* It is important to remember that when using M:N threading,
* a particular thread structure may only exist as long as
* the system call or kernel entrance (e.g. by pagefault)
* which it is currently executing. It should therefore NEVER be referenced
* by pointers in long lived structures that live longer than a single
* request. If several threads complete their work at the same time,
* they will all rewind their stacks to the user boundary, report their
* completion state, and all but one will be freed. That last one will
* be kept to provide a kernel stack and pcb for the NEXT syscall or kernel
* entrance (basically to save freeing and then re-allocating it). The existing
* thread keeps a cached spare thread available to allow it to quickly
* get one when it needs a new one. There is also a system
* cache of free threads. Threads have priority and partake in priority
* inheritance schemes.
*
* The second is the proc (process) which owns all the resources of a process
* other than CPU cycles, which are parceled out to the threads.
*/
/*
* Kernel runnable context (thread).
* This is what is put to sleep and reactivated.
@ -218,7 +187,7 @@ struct thread {
sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */
#define td_siglist td_sigqueue.sq_signals
/* Cleared during fork1() or thread_schedule_upcall(). */
/* Cleared during fork1() */
#define td_startzero td_flags
int td_flags; /* (t) TDF_* flags. */
int td_inhibitors; /* (t) Why can not run. */
@ -239,10 +208,7 @@ struct thread {
struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */
int td_intr_nesting_level; /* (k) Interrupt recursion. */
int td_pinned; /* (k) Temporary cpu pin count. */
struct kse_thr_mailbox *td_mailbox; /* (*) Userland mailbox address. */
struct ucred *td_ucred; /* (k) Reference to credentials. */
struct thread *td_standin; /* (k + a) Use this for an upcall. */
struct kse_upcall *td_upcall; /* (k + t) Upcall structure. */
u_int td_estcpu; /* (t) estimated cpu utilization */
u_int td_slptick; /* (t) Time at sleep. */
struct rusage td_ru; /* (t) rusage information */
@ -254,12 +220,11 @@ struct thread {
u_int td_uticks; /* (t) Statclock hits in user mode. */
u_int td_uuticks; /* (k) Statclock hits (usr), for UTS. */
u_int td_usticks; /* (k) Statclock hits (sys), for UTS. */
int td_intrval; /* (t) Return value of TDF_INTERRUPT. */
int td_intrval; /* (t) Return value for sleepq. */
sigset_t td_oldsigmask; /* (k) Saved mask from pre sigpause. */
sigset_t td_sigmask; /* (c) Current signal mask. */
volatile u_int td_generation; /* (k) For detection of preemption */
stack_t td_sigstk; /* (k) Stack ptr and on-stack flag. */
int td_kflags; /* (c) Flags for KSE threading. */
int td_xsig; /* (c) Signal for ptrace */
u_long td_profil_addr; /* (k) Temporary addr until AST. */
u_int td_profil_ticks; /* (k) Temporary ticks until AST. */
@ -342,7 +307,7 @@ do { \
#define TDF_BOUNDARY 0x00000400 /* Thread suspended at user boundary */
#define TDF_ASTPENDING 0x00000800 /* Thread has some asynchronous events. */
#define TDF_TIMOFAIL 0x00001000 /* Timeout from sleep after we were awake. */
#define TDF_INTERRUPT 0x00002000 /* Thread is marked as interrupted. */
#define TDF_UNUSED2000 0x00002000 /* --available-- */
#define TDF_UPIBLOCKED 0x00004000 /* Thread blocked on user PI mutex. */
#define TDF_UNUSED15 0x00008000 /* --available-- */
#define TDF_NEEDRESCHED 0x00010000 /* Thread needs to yield. */
@ -368,15 +333,15 @@ do { \
#define TDP_OLDMASK 0x00000001 /* Need to restore mask after suspend. */
#define TDP_INKTR 0x00000002 /* Thread is currently in KTR code. */
#define TDP_INKTRACE 0x00000004 /* Thread is currently in KTRACE code. */
#define TDP_UPCALLING 0x00000008 /* This thread is doing an upcall. */
#define TDP_UNUSED8 0x00000008 /* available */
#define TDP_COWINPROGRESS 0x00000010 /* Snapshot copy-on-write in progress. */
#define TDP_ALTSTACK 0x00000020 /* Have alternate signal stack. */
#define TDP_DEADLKTREAT 0x00000040 /* Lock aquisition - deadlock treatment. */
#define TDP_SA 0x00000080 /* A scheduler activation based thread. */
#define TDP_UNUSED80 0x00000080 /* available. */
#define TDP_NOSLEEPING 0x00000100 /* Thread is not allowed to sleep on a sq. */
#define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */
#define TDP_ITHREAD 0x00000400 /* Thread is an interrupt thread. */
#define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */
#define TDP_UNUSED800 0x00000800 /* available. */
#define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */
#define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */
#define TDP_SCHED3 0x00004000 /* Reserved for scheduler private use */
@ -399,17 +364,6 @@ do { \
#define TDI_LOCK 0x0008 /* Stopped on a lock. */
#define TDI_IWAIT 0x0010 /* Awaiting interrupt. */
/*
* flags (in kflags) related to M:N threading.
*/
#define TDK_KSEREL 0x0001 /* Blocked in msleep on p->p_completed. */
#define TDK_KSERELSIG 0x0002 /* Blocked in msleep on p->p_siglist. */
#define TDK_WAKEUP 0x0004 /* Thread has been woken by kse_wakeup. */
#define TD_CAN_UNBIND(td) \
(((td)->td_pflags & TDP_CAN_UNBIND) && \
((td)->td_upcall != NULL))
#define TD_IS_SLEEPING(td) ((td)->td_inhibitors & TDI_SLEEPING)
#define TD_ON_SLEEPQ(td) ((td)->td_wchan != NULL)
#define TD_IS_SUSPENDED(td) ((td)->td_inhibitors & TDI_SUSPENDED)
@ -421,11 +375,7 @@ do { \
#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
#define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED)
#define TD_ON_UPILOCK(td) ((td)->td_flags & TDF_UPIBLOCKED)
#if 0
#define TD_IS_IDLETHREAD(td) ((td) == pcpu(idlethread))
#else
#define TD_IS_IDLETHREAD(td) ((td)->td_flags & TDF_IDLETD)
#endif
#define TD_SET_INHIB(td, inhib) do { \
@ -456,24 +406,6 @@ do { \
#define TD_SET_RUNQ(td) (td)->td_state = TDS_RUNQ
#define TD_SET_CAN_RUN(td) (td)->td_state = TDS_CAN_RUN
/*
* An upcall is used when returning to userland. If a thread does not have
* an upcall on return to userland the thread exports its context and exits.
*/
struct kse_upcall {
TAILQ_ENTRY(kse_upcall) ku_link; /* List of upcalls in proc. */
struct proc *ku_proc; /* Associated proc. */
struct thread *ku_owner; /* Owning thread. */
int ku_flags; /* KUF_* flags. */
struct kse_mailbox *ku_mailbox; /* Userland mailbox address. */
stack_t ku_stack; /* Userland upcall stack. */
void *ku_func; /* Userland upcall function. */
unsigned int ku_mflags; /* Cached upcall mbox flags. */
};
#define KUF_DOUPCALL 0x00001 /* Do upcall now; don't wait. */
#define KUF_EXITING 0x00002 /* Upcall structure is exiting. */
/*
* XXX: Does this belong in resource.h or resourcevar.h instead?
* Resource usage extension. The times in rusage structs in the kernel are
@ -495,18 +427,15 @@ struct rusage_ext {
};
/*
* The old fashionned process. May have multiple threads.
* Starts off with a single embedded THREAD.
* Process structure.
*/
struct proc {
LIST_ENTRY(proc) p_list; /* (d) List of all processes. */
TAILQ_HEAD(, thread) p_threads; /* (j) all threads. */
TAILQ_HEAD(, kse_upcall) p_upcalls; /* (j) All upcalls in the proc. */
struct mtx p_slock; /* process spin lock */
struct ucred *p_ucred; /* (c) Process owner's identity. */
struct filedesc *p_fd; /* (b) Open files. */
struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */
/* Accumulated stats for all threads? */
struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */
struct plimit *p_limit; /* (c) Process limits. */
struct callout p_limco; /* (c) Limit callout handle */
@ -565,11 +494,6 @@ struct proc {
int p_boundary_count;/* (c) Num threads at user boundary */
int p_pendingcnt; /* how many signals are pending */
struct itimers *p_itimers; /* (c) POSIX interval timers. */
int p_numupcalls; /* (j) Num upcalls. */
int p_upsleeps; /* (c) Num threads in kse_release(). */
struct kse_thr_mailbox *p_completed; /* (c) Completed thread mboxes. */
int p_nextupcall; /* (n) Next upcall time. */
int p_upquantum; /* (n) Quantum to schedule an upcall. */
/* End area that is zeroed on creation. */
#define p_endzero p_magic
@ -627,7 +551,7 @@ struct proc {
#define P_WAITED 0x01000 /* Someone is waiting for us. */
#define P_WEXIT 0x02000 /* Working on exiting. */
#define P_EXEC 0x04000 /* Process called exec. */
#define P_SA 0x08000 /* Using scheduler activations. */
#define P_UNUSED8000 0x08000 /* available. */
#define P_CONTINUED 0x10000 /* Proc has continued from a stopped state. */
#define P_STOPPED_SIG 0x20000 /* Stopped due to SIGSTOP/SIGTSTP. */
#define P_STOPPED_TRACE 0x40000 /* Stopped because of tracing. */
@ -673,8 +597,6 @@ struct proc {
#define SINGLE_EXIT 1
#define SINGLE_BOUNDARY 2
/* XXXKSE: Missing values for thread_suspend_check(). */
#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_PARGS);
MALLOC_DECLARE(M_PGRP);
@ -687,10 +609,7 @@ MALLOC_DECLARE(M_ZOMBIE);
LIST_FOREACH((p), &allproc, p_list)
#define FOREACH_THREAD_IN_PROC(p, td) \
TAILQ_FOREACH((td), &(p)->p_threads, td_plist)
#define FOREACH_UPCALL_IN_PROC(p, ku) \
TAILQ_FOREACH((ku), &(p)->p_upcalls, ku_link)
/* XXXKSE the following lines should probably only be used in 1:1 code: */
#define FIRST_THREAD_IN_PROC(p) TAILQ_FIRST(&(p)->p_threads)
/*
@ -884,15 +803,9 @@ void exit1(struct thread *, int) __dead2;
void cpu_fork(struct thread *, struct proc *, struct thread *, int);
void cpu_set_fork_handler(struct thread *, void (*)(void *), void *);
/* New in KSE. */
#ifdef KSE
void kse_unlink(struct thread *);
void kseinit(void);
void upcall_reap(void);
void upcall_remove(struct thread *td);
#endif
void cpu_set_upcall(struct thread *td, struct thread *td0);
void cpu_set_upcall_kse(struct thread *, void (*)(void *), void *, stack_t *);
void cpu_set_upcall_kse(struct thread *, void (*)(void *), void *,
stack_t *);
int cpu_set_user_tls(struct thread *, void *tls_base);
void cpu_thread_alloc(struct thread *);
void cpu_thread_clean(struct thread *);
@ -901,17 +814,13 @@ void cpu_thread_free(struct thread *);
void cpu_thread_swapin(struct thread *);
void cpu_thread_swapout(struct thread *);
struct thread *thread_alloc(void);
void thread_continued(struct proc *p);
void thread_exit(void) __dead2;
int thread_export_context(struct thread *td, int willexit);
void thread_free(struct thread *td);
void thread_link(struct thread *td, struct proc *p);
void thread_reap(void);
void thread_signal_add(struct thread *td, ksiginfo_t *);
int thread_single(int how);
void thread_single_end(void);
void thread_stash(struct thread *td);
int thread_statclock(int user);
void thread_stopped(struct proc *p);
void childproc_stopped(struct proc *child, int reason);
void childproc_continued(struct proc *child);
@ -919,14 +828,10 @@ void childproc_exited(struct proc *child);
int thread_suspend_check(int how);
void thread_suspend_switch(struct thread *);
void thread_suspend_one(struct thread *td);
struct thread *thread_switchout(struct thread *td, int flags,
struct thread *newtd);
void thread_unlink(struct thread *td);
void thread_unsuspend(struct proc *p);
void thread_unsuspend_one(struct thread *td);
void thread_unthread(struct thread *td);
int thread_userret(struct thread *td, struct trapframe *frame);
void thread_user_enter(struct thread *td);
void thread_wait(struct proc *p);
struct thread *thread_find(struct proc *p, lwpid_t tid);
void thr_exit1(void);

View File

@ -82,11 +82,6 @@ int sched_runnable(void);
void sched_exit(struct proc *p, struct thread *childtd);
void sched_fork(struct thread *td, struct thread *childtd);
void sched_fork_exit(struct thread *td);
/*
* KSE Groups contain scheduling priority information. They record the
* behavior of groups of KSEs and threads.
*/
void sched_class(struct thread *td, int class);
void sched_nice(struct proc *p, int nice);

View File

@ -306,11 +306,6 @@
#define SYS_nfsclnt 375
#define SYS_eaccess 376
#define SYS_nmount 378
#define SYS_kse_exit 379
#define SYS_kse_wakeup 380
#define SYS_kse_create 381
#define SYS_kse_thr_interrupt 382
#define SYS_kse_release 383
#define SYS___mac_get_proc 384
#define SYS___mac_set_proc 385
#define SYS___mac_get_fd 386
@ -363,7 +358,6 @@
#define SYS_extattr_list_fd 437
#define SYS_extattr_list_file 438
#define SYS_extattr_list_link 439
#define SYS_kse_switchin 440
#define SYS_ksem_timedwait 441
#define SYS_thr_suspend 442
#define SYS_thr_wake 443

View File

@ -255,11 +255,6 @@ MIASM = \
nfsclnt.o \
eaccess.o \
nmount.o \
kse_exit.o \
kse_wakeup.o \
kse_create.o \
kse_thr_interrupt.o \
kse_release.o \
__mac_get_proc.o \
__mac_set_proc.o \
__mac_get_fd.o \
@ -312,7 +307,6 @@ MIASM = \
extattr_list_fd.o \
extattr_list_file.o \
extattr_list_link.o \
kse_switchin.o \
ksem_timedwait.o \
thr_suspend.o \
thr_wake.o \

View File

@ -1094,24 +1094,6 @@ struct nmount_args {
char iovcnt_l_[PADL_(unsigned int)]; unsigned int iovcnt; char iovcnt_r_[PADR_(unsigned int)];
char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)];
};
struct kse_exit_args {
register_t dummy;
};
struct kse_wakeup_args {
char mbx_l_[PADL_(struct kse_mailbox *)]; struct kse_mailbox * mbx; char mbx_r_[PADR_(struct kse_mailbox *)];
};
struct kse_create_args {
char mbx_l_[PADL_(struct kse_mailbox *)]; struct kse_mailbox * mbx; char mbx_r_[PADR_(struct kse_mailbox *)];
char newgroup_l_[PADL_(int)]; int newgroup; char newgroup_r_[PADR_(int)];
};
struct kse_thr_interrupt_args {
char tmbx_l_[PADL_(struct kse_thr_mailbox *)]; struct kse_thr_mailbox * tmbx; char tmbx_r_[PADR_(struct kse_thr_mailbox *)];
char cmd_l_[PADL_(int)]; int cmd; char cmd_r_[PADR_(int)];
char data_l_[PADL_(long)]; long data; char data_r_[PADR_(long)];
};
struct kse_release_args {
char timeout_l_[PADL_(struct timespec *)]; struct timespec * timeout; char timeout_r_[PADR_(struct timespec *)];
};
struct __mac_get_proc_args {
char mac_p_l_[PADL_(struct mac *)]; struct mac * mac_p; char mac_p_r_[PADR_(struct mac *)];
};
@ -1335,10 +1317,6 @@ struct extattr_list_link_args {
char data_l_[PADL_(void *)]; void * data; char data_r_[PADR_(void *)];
char nbytes_l_[PADL_(size_t)]; size_t nbytes; char nbytes_r_[PADR_(size_t)];
};
struct kse_switchin_args {
char tmbx_l_[PADL_(struct kse_thr_mailbox *)]; struct kse_thr_mailbox * tmbx; char tmbx_r_[PADR_(struct kse_thr_mailbox *)];
char flags_l_[PADL_(int)]; int flags; char flags_r_[PADR_(int)];
};
struct ksem_timedwait_args {
char id_l_[PADL_(semid_t)]; semid_t id; char id_r_[PADR_(semid_t)];
char abstime_l_[PADL_(const struct timespec *)]; const struct timespec * abstime; char abstime_r_[PADR_(const struct timespec *)];
@ -1800,11 +1778,6 @@ int __setugid(struct thread *, struct __setugid_args *);
int nfsclnt(struct thread *, struct nfsclnt_args *);
int eaccess(struct thread *, struct eaccess_args *);
int nmount(struct thread *, struct nmount_args *);
int kse_exit(struct thread *, struct kse_exit_args *);
int kse_wakeup(struct thread *, struct kse_wakeup_args *);
int kse_create(struct thread *, struct kse_create_args *);
int kse_thr_interrupt(struct thread *, struct kse_thr_interrupt_args *);
int kse_release(struct thread *, struct kse_release_args *);
int __mac_get_proc(struct thread *, struct __mac_get_proc_args *);
int __mac_set_proc(struct thread *, struct __mac_set_proc_args *);
int __mac_get_fd(struct thread *, struct __mac_get_fd_args *);
@ -1857,7 +1830,6 @@ int jail_attach(struct thread *, struct jail_attach_args *);
int extattr_list_fd(struct thread *, struct extattr_list_fd_args *);
int extattr_list_file(struct thread *, struct extattr_list_file_args *);
int extattr_list_link(struct thread *, struct extattr_list_link_args *);
int kse_switchin(struct thread *, struct kse_switchin_args *);
int ksem_timedwait(struct thread *, struct ksem_timedwait_args *);
int thr_suspend(struct thread *, struct thr_suspend_args *);
int thr_wake(struct thread *, struct thr_wake_args *);
@ -2371,11 +2343,6 @@ int freebsd4_sigreturn(struct thread *, struct freebsd4_sigreturn_args *);
#define SYS_AUE_nfsclnt AUE_NULL
#define SYS_AUE_eaccess AUE_EACCESS
#define SYS_AUE_nmount AUE_NMOUNT
#define SYS_AUE_kse_exit AUE_NULL
#define SYS_AUE_kse_wakeup AUE_NULL
#define SYS_AUE_kse_create AUE_NULL
#define SYS_AUE_kse_thr_interrupt AUE_NULL
#define SYS_AUE_kse_release AUE_NULL
#define SYS_AUE___mac_get_proc AUE_NULL
#define SYS_AUE___mac_set_proc AUE_NULL
#define SYS_AUE___mac_get_fd AUE_NULL
@ -2428,7 +2395,6 @@ int freebsd4_sigreturn(struct thread *, struct freebsd4_sigreturn_args *);
#define SYS_AUE_extattr_list_fd AUE_EXTATTR_LIST_FD
#define SYS_AUE_extattr_list_file AUE_EXTATTR_LIST_FILE
#define SYS_AUE_extattr_list_link AUE_EXTATTR_LIST_LINK
#define SYS_AUE_kse_switchin AUE_NULL
#define SYS_AUE_ksem_timedwait AUE_NULL
#define SYS_AUE_thr_suspend AUE_NULL
#define SYS_AUE_thr_wake AUE_NULL

View File

@ -116,7 +116,6 @@ struct lock_object;
struct malloc_type;
struct mtx;
struct proc;
struct kse;
struct socket;
struct thread;
struct tty;

View File

@ -663,8 +663,6 @@ faultin(p)
* is enough space for them. Of course, if a process waits for a long
* time, it will be swapped in anyway.
*
* XXXKSE - process with the thread with highest priority counts..
*
* Giant is held on entry.
*/
/* ARGSUSED*/

View File

@ -3036,7 +3036,7 @@ vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
PROC_VMSPACE_LOCK(p);
p->p_vmspace = newvmspace;
PROC_VMSPACE_UNLOCK(p);
if (p == curthread->td_proc) /* XXXKSE ? */
if (p == curthread->td_proc)
pmap_activate(curthread);
vmspace_free(oldvmspace);
return (0);
@ -3060,7 +3060,7 @@ vmspace_unshare(struct proc *p)
PROC_VMSPACE_LOCK(p);
p->p_vmspace = newvmspace;
PROC_VMSPACE_UNLOCK(p);
if (p == curthread->td_proc) /* XXXKSE ? */
if (p == curthread->td_proc)
pmap_activate(curthread);
vmspace_free(oldvmspace);
return (0);