Move ast() and userret() to sys/kern/subr_trap.c now that they are MI.
This commit is contained in:
parent
8d232144a3
commit
cbc88996c6
@ -132,59 +132,6 @@ static const char *mmfault_causes[] = {
|
||||
"store instruction"
|
||||
};
|
||||
|
||||
/*
|
||||
* Define the code needed before returning to user mode, for
|
||||
* trap and syscall.
|
||||
*/
|
||||
void
|
||||
userret(p, frame, oticks)
|
||||
register struct proc *p;
|
||||
struct trapframe *frame;
|
||||
u_quad_t oticks;
|
||||
{
|
||||
int sig;
|
||||
|
||||
/* take pending signals */
|
||||
PROC_LOCK(p);
|
||||
while ((sig = CURSIG(p)) != 0)
|
||||
postsig(sig);
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
p->p_pri.pri_level = p->p_pri.pri_user;
|
||||
if (resched_wanted(p)) {
|
||||
/*
|
||||
* Since we are curproc, a clock interrupt could
|
||||
* change our priority without changing run queues
|
||||
* (the running process is not kept on a run queue).
|
||||
* If this happened after we setrunqueue ourselves but
|
||||
* before we switch()'ed, we might not be on the queue
|
||||
* indicated by our priority.
|
||||
*/
|
||||
DROP_GIANT_NOSWITCH();
|
||||
setrunqueue(p);
|
||||
p->p_stats->p_ru.ru_nivcsw++;
|
||||
mi_switch();
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
PROC_LOCK(p);
|
||||
while ((sig = CURSIG(p)) != 0)
|
||||
postsig(sig);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* If profiling, charge recent system time to the trapped pc.
|
||||
*/
|
||||
if (p->p_sflag & PS_PROFIL) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
addupc_task(p, TRAPF_PC(frame),
|
||||
(int)(p->p_sticks - oticks) * psratio);
|
||||
} else
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
printtrap(a0, a1, a2, entry, framep, isfatal, user)
|
||||
const unsigned long a0, a1, a2, entry;
|
||||
@ -880,66 +827,6 @@ syscall(code, framep)
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process an asynchronous software trap.
|
||||
* This is relatively easy.
|
||||
*/
|
||||
void
|
||||
ast(framep)
|
||||
struct trapframe *framep;
|
||||
{
|
||||
struct proc *p = CURPROC;
|
||||
u_quad_t sticks;
|
||||
|
||||
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
|
||||
|
||||
/*
|
||||
* We check for a pending AST here rather than in the assembly as
|
||||
* acquiring and releasing mutexes in assembly is not fun.
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (!(astpending(p) || resched_wanted(p))) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
sticks = p->p_sticks;
|
||||
p->p_frame = framep;
|
||||
|
||||
astoff(p);
|
||||
cnt.v_soft++;
|
||||
mtx_intr_enable(&sched_lock);
|
||||
if (p->p_sflag & PS_OWEUPC) {
|
||||
p->p_sflag &= ~PS_OWEUPC;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
mtx_lock(&Giant);
|
||||
addupc_task(p, p->p_stats->p_prof.pr_addr,
|
||||
p->p_stats->p_prof.pr_ticks);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
if (p->p_sflag & PS_ALRMPEND) {
|
||||
p->p_sflag &= ~PS_ALRMPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGVTALRM);
|
||||
PROC_UNLOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
if (p->p_sflag & PS_PROFPEND) {
|
||||
p->p_sflag &= ~PS_PROFPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGPROF);
|
||||
PROC_UNLOCK(p);
|
||||
} else
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
userret(p, framep, sticks);
|
||||
|
||||
if (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unaligned access handler. It's not clear that this can get much slower...
|
||||
*
|
||||
|
@ -134,7 +134,6 @@ void alpha_fpstate_check __P((struct proc *p));
|
||||
void alpha_fpstate_save __P((struct proc *p, int write));
|
||||
void alpha_fpstate_drop __P((struct proc *p));
|
||||
void alpha_fpstate_switch __P((struct proc *p));
|
||||
void ast __P((struct trapframe *));
|
||||
int badaddr __P((void *, size_t));
|
||||
int badaddr_read __P((void *, size_t, void *));
|
||||
u_int64_t console_restart __P((u_int64_t, u_int64_t, u_int64_t));
|
||||
|
@ -163,54 +163,6 @@ SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
|
||||
extern char *syscallnames[];
|
||||
#endif
|
||||
|
||||
void
|
||||
userret(p, frame, oticks)
|
||||
struct proc *p;
|
||||
struct trapframe *frame;
|
||||
u_quad_t oticks;
|
||||
{
|
||||
int sig;
|
||||
|
||||
PROC_LOCK(p);
|
||||
while ((sig = CURSIG(p)) != 0)
|
||||
postsig(sig);
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
p->p_pri.pri_level = p->p_pri.pri_user;
|
||||
if (resched_wanted(p)) {
|
||||
/*
|
||||
* Since we are curproc, clock will normally just change
|
||||
* our priority without moving us from one queue to another
|
||||
* (since the running process is not on a queue.)
|
||||
* If that happened after we setrunqueue ourselves but before we
|
||||
* mi_switch()'ed, we might not be on the queue indicated by
|
||||
* our priority.
|
||||
*/
|
||||
DROP_GIANT_NOSWITCH();
|
||||
setrunqueue(p);
|
||||
p->p_stats->p_ru.ru_nivcsw++;
|
||||
mi_switch();
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
PROC_LOCK(p);
|
||||
while ((sig = CURSIG(p)) != 0)
|
||||
postsig(sig);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Charge system time if profiling.
|
||||
*/
|
||||
if (p->p_sflag & PS_PROFIL) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
addupc_task(p, TRAPF_PC(frame),
|
||||
(u_int)(p->p_sticks - oticks) * psratio);
|
||||
} else
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Exception, fault, and trap interface to the FreeBSD kernel.
|
||||
* This common code is called from assembly language IDT gate entry
|
||||
@ -1255,75 +1207,3 @@ bad:
|
||||
mtx_assert(&sched_lock, MA_NOTOWNED);
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
}
|
||||
|
||||
void
|
||||
ast(framep)
|
||||
struct trapframe *framep;
|
||||
{
|
||||
struct proc *p = CURPROC;
|
||||
u_quad_t sticks;
|
||||
#if defined(DEV_NPX) && !defined(SMP)
|
||||
int ucode;
|
||||
#endif
|
||||
|
||||
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
|
||||
|
||||
/*
|
||||
* We check for a pending AST here rather than in the assembly as
|
||||
* acquiring and releasing mutexes in assembly is not fun.
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (!(astpending(p) || resched_wanted(p))) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
sticks = p->p_sticks;
|
||||
p->p_frame = framep;
|
||||
|
||||
astoff(p);
|
||||
cnt.v_soft++;
|
||||
mtx_intr_enable(&sched_lock);
|
||||
if (p->p_sflag & PS_OWEUPC) {
|
||||
p->p_sflag &= ~PS_OWEUPC;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
mtx_lock(&Giant);
|
||||
addupc_task(p, p->p_stats->p_prof.pr_addr,
|
||||
p->p_stats->p_prof.pr_ticks);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
if (p->p_sflag & PS_ALRMPEND) {
|
||||
p->p_sflag &= ~PS_ALRMPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGVTALRM);
|
||||
PROC_UNLOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
#if defined(DEV_NPX) && !defined(SMP)
|
||||
if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
|
||||
PCPU_GET(curpcb)->pcb_flags &= ~PCB_NPXTRAP;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
ucode = npxtrap();
|
||||
if (ucode != -1) {
|
||||
if (!mtx_owned(&Giant))
|
||||
mtx_lock(&Giant);
|
||||
trapsignal(p, SIGFPE, ucode);
|
||||
}
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
#endif
|
||||
if (p->p_sflag & PS_PROFPEND) {
|
||||
p->p_sflag &= ~PS_PROFPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGPROF);
|
||||
PROC_UNLOCK(p);
|
||||
} else
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
userret(p, framep, sticks);
|
||||
|
||||
if (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
|
@ -806,6 +806,7 @@ kern/subr_sbuf.c standard
|
||||
kern/subr_scanf.c standard
|
||||
kern/subr_smp.c optional smp
|
||||
kern/subr_taskqueue.c standard
|
||||
kern/subr_trap.c standard
|
||||
kern/subr_witness.c optional witness
|
||||
kern/subr_xxx.c standard
|
||||
kern/sys_generic.c standard
|
||||
|
@ -163,54 +163,6 @@ SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW,
|
||||
extern char *syscallnames[];
|
||||
#endif
|
||||
|
||||
void
|
||||
userret(p, frame, oticks)
|
||||
struct proc *p;
|
||||
struct trapframe *frame;
|
||||
u_quad_t oticks;
|
||||
{
|
||||
int sig;
|
||||
|
||||
PROC_LOCK(p);
|
||||
while ((sig = CURSIG(p)) != 0)
|
||||
postsig(sig);
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
p->p_pri.pri_level = p->p_pri.pri_user;
|
||||
if (resched_wanted(p)) {
|
||||
/*
|
||||
* Since we are curproc, clock will normally just change
|
||||
* our priority without moving us from one queue to another
|
||||
* (since the running process is not on a queue.)
|
||||
* If that happened after we setrunqueue ourselves but before we
|
||||
* mi_switch()'ed, we might not be on the queue indicated by
|
||||
* our priority.
|
||||
*/
|
||||
DROP_GIANT_NOSWITCH();
|
||||
setrunqueue(p);
|
||||
p->p_stats->p_ru.ru_nivcsw++;
|
||||
mi_switch();
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
PROC_LOCK(p);
|
||||
while ((sig = CURSIG(p)) != 0)
|
||||
postsig(sig);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* Charge system time if profiling.
|
||||
*/
|
||||
if (p->p_sflag & PS_PROFIL) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
addupc_task(p, TRAPF_PC(frame),
|
||||
(u_int)(p->p_sticks - oticks) * psratio);
|
||||
} else
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Exception, fault, and trap interface to the FreeBSD kernel.
|
||||
* This common code is called from assembly language IDT gate entry
|
||||
@ -1255,75 +1207,3 @@ bad:
|
||||
mtx_assert(&sched_lock, MA_NOTOWNED);
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
}
|
||||
|
||||
void
|
||||
ast(framep)
|
||||
struct trapframe *framep;
|
||||
{
|
||||
struct proc *p = CURPROC;
|
||||
u_quad_t sticks;
|
||||
#if defined(DEV_NPX) && !defined(SMP)
|
||||
int ucode;
|
||||
#endif
|
||||
|
||||
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
|
||||
|
||||
/*
|
||||
* We check for a pending AST here rather than in the assembly as
|
||||
* acquiring and releasing mutexes in assembly is not fun.
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (!(astpending(p) || resched_wanted(p))) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
sticks = p->p_sticks;
|
||||
p->p_frame = framep;
|
||||
|
||||
astoff(p);
|
||||
cnt.v_soft++;
|
||||
mtx_intr_enable(&sched_lock);
|
||||
if (p->p_sflag & PS_OWEUPC) {
|
||||
p->p_sflag &= ~PS_OWEUPC;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
mtx_lock(&Giant);
|
||||
addupc_task(p, p->p_stats->p_prof.pr_addr,
|
||||
p->p_stats->p_prof.pr_ticks);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
if (p->p_sflag & PS_ALRMPEND) {
|
||||
p->p_sflag &= ~PS_ALRMPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGVTALRM);
|
||||
PROC_UNLOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
#if defined(DEV_NPX) && !defined(SMP)
|
||||
if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
|
||||
PCPU_GET(curpcb)->pcb_flags &= ~PCB_NPXTRAP;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
ucode = npxtrap();
|
||||
if (ucode != -1) {
|
||||
if (!mtx_owned(&Giant))
|
||||
mtx_lock(&Giant);
|
||||
trapsignal(p, SIGFPE, ucode);
|
||||
}
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
#endif
|
||||
if (p->p_sflag & PS_PROFPEND) {
|
||||
p->p_sflag &= ~PS_PROFPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGPROF);
|
||||
PROC_UNLOCK(p);
|
||||
} else
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
userret(p, framep, sticks);
|
||||
|
||||
if (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
|
@ -75,56 +75,6 @@ static int unaligned_fixup(struct trapframe *framep, struct proc *p);
|
||||
extern char *syscallnames[];
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Define the code needed before returning to user mode, for
|
||||
* trap and syscall.
|
||||
*/
|
||||
void
|
||||
userret(register struct proc *p, struct trapframe *frame, u_quad_t oticks)
|
||||
{
|
||||
int sig;
|
||||
|
||||
/* take pending signals */
|
||||
PROC_LOCK(p);
|
||||
while ((sig = CURSIG(p)) != 0)
|
||||
postsig(sig);
|
||||
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
p->p_pri.pri_level = p->p_pri.pri_user;
|
||||
if (resched_wanted(p)) {
|
||||
/*
|
||||
* Since we are curproc, a clock interrupt could
|
||||
* change our priority without changing run queues
|
||||
* (the running process is not kept on a run queue).
|
||||
* If this happened after we setrunqueue ourselves but
|
||||
* before we switch()'ed, we might not be on the queue
|
||||
* indicated by our priority.
|
||||
*/
|
||||
DROP_GIANT_NOSWITCH();
|
||||
setrunqueue(p);
|
||||
p->p_stats->p_ru.ru_nivcsw++;
|
||||
mi_switch();
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PICKUP_GIANT();
|
||||
PROC_LOCK(p);
|
||||
while ((sig = CURSIG(p)) != 0)
|
||||
postsig(sig);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
PROC_UNLOCK_NOSWITCH(p);
|
||||
}
|
||||
|
||||
/*
|
||||
* If profiling, charge recent system time to the trapped pc.
|
||||
*/
|
||||
if (p->p_sflag & PS_PROFIL) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
addupc_task(p, TRAPF_PC(frame),
|
||||
(int)(p->p_sticks - oticks) * psratio);
|
||||
} else
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
}
|
||||
|
||||
static const char *ia64_vector_names[] = {
|
||||
"VHPT Translation", /* 0 */
|
||||
"Instruction TLB", /* 1 */
|
||||
@ -608,68 +558,6 @@ syscall(int code, u_int64_t *args, struct trapframe *framep)
|
||||
mtx_assert(&Giant, MA_NOTOWNED);
|
||||
}
|
||||
|
||||
/*
|
||||
* Process an asynchronous software trap.
|
||||
* This is relatively easy.
|
||||
*/
|
||||
void
|
||||
ast(framep)
|
||||
struct trapframe *framep;
|
||||
{
|
||||
register struct proc *p;
|
||||
u_quad_t sticks;
|
||||
|
||||
p = curproc;
|
||||
|
||||
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
|
||||
|
||||
/*
|
||||
* We check for a pending AST here rather than in assembly as
|
||||
* acquiring and release mutexes in assembly is not fun.
|
||||
*/
|
||||
mtx_lock_spin(&sched_lock);
|
||||
if (!(astpending(p) || resched_wanted(p))) {
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
sticks = p->p_sticks;
|
||||
p->p_frame = framep;
|
||||
|
||||
astoff(p);
|
||||
cnt.v_soft++;
|
||||
mtx_intr_enable(&sched_lock);
|
||||
if (p->p_sflag & PS_OWEUPC) {
|
||||
p->p_sflag &= ~PS_OWEUPC;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
mtx_lock(&Giant);
|
||||
addupc_task(p, p->p_stats->p_prof.pr_addr,
|
||||
p->p_stats->p_prof.pr_ticks);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
if (p->p_sflag & PS_ALRMPEND) {
|
||||
p->p_sflag &= ~PS_ALRMPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGVTALRM);
|
||||
PROC_UNLOCK(p);
|
||||
mtx_lock_spin(&sched_lock);
|
||||
}
|
||||
if (p->p_sflag & PS_PROFPEND) {
|
||||
p->p_sflag &= ~PS_PROFPEND;
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
PROC_LOCK(p);
|
||||
psignal(p, SIGPROF);
|
||||
PROC_UNLOCK(p);
|
||||
} else
|
||||
mtx_unlock_spin(&sched_lock);
|
||||
|
||||
userret(p, framep, sticks);
|
||||
|
||||
if (mtx_owned(&Giant))
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
|
||||
extern int ia64_unaligned_print, ia64_unaligned_fix;
|
||||
extern int ia64_unaligned_sigbus;
|
||||
|
||||
|
@ -120,7 +120,6 @@ struct trapframe;
|
||||
extern struct rpb *hwrpb;
|
||||
extern volatile int mc_expected, mc_received;
|
||||
|
||||
void ast __P((struct trapframe *));
|
||||
int badaddr __P((void *, size_t));
|
||||
int badaddr_read __P((void *, size_t, void *));
|
||||
void child_return __P((struct proc *p));
|
||||
|
1179
sys/kern/subr_trap.c
1179
sys/kern/subr_trap.c
File diff suppressed because it is too large
Load Diff
@ -597,11 +597,3 @@ fix_unaligned(p, frame)
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
|
||||
{
|
||||
|
||||
/* XXX: Coming soon */
|
||||
return;
|
||||
}
|
||||
|
@ -597,11 +597,3 @@ fix_unaligned(p, frame)
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
|
||||
{
|
||||
|
||||
/* XXX: Coming soon */
|
||||
return;
|
||||
}
|
||||
|
@ -506,6 +506,7 @@ struct proc *pfind __P((pid_t)); /* Find process by id. */
|
||||
struct pgrp *pgfind __P((pid_t)); /* Find process group by id. */
|
||||
struct proc *zpfind __P((pid_t)); /* Find zombie process by id. */
|
||||
|
||||
void ast __P((struct trapframe *framep));
|
||||
struct proc *chooseproc __P((void));
|
||||
int enterpgrp __P((struct proc *p, pid_t pgid, int mksess));
|
||||
void faultin __P((struct proc *p));
|
||||
|
Loading…
x
Reference in New Issue
Block a user