- Close races with signals and other AST's being triggered while we are in

the process of exiting the kernel.  The ast() function now loops as long
  as the PS_ASTPENDING or PS_NEEDRESCHED flags are set.  It returns with
  preemption disabled so that any further AST's that arrive via an
  interrupt will be delayed until the low-level MD code returns to user
  mode.
- Use u_int's to store the tick counts for profiling purposes so that we
  do not need sched_lock just to read p_sticks.  This also closes a
  problem where the call to addupc_task() could screw up the arithmetic
  due to non-atomic reads of p_sticks.
- Axe need_proftick(), aston(), astoff(), astpending(), need_resched(),
  clear_resched(), and resched_wanted() in favor of direct bit operations
  on p_sflag.
- Fix up locking with sched_lock some.  In addupc_intr(), use sched_lock
  to ensure pr_addr and pr_ticks are updated atomically with setting
  PS_OWEUPC.  In ast() we clear pr_ticks atomically with clearing
  PS_OWEUPC.  We also do not grab the lock just to test a flag.
- Simplify the handling of Giant in ast() slightly.

Reviewed by:	bde (mostly)
This commit is contained in:
John Baldwin 2001-08-10 22:53:32 +00:00
parent 827dcaf663
commit 688ebe120c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=81493
29 changed files with 116 additions and 230 deletions

View File

@ -259,7 +259,7 @@ trap(a0, a1, a2, entry, framep)
register struct proc *p;
register int i;
u_int64_t ucode;
u_quad_t sticks;
u_int sticks;
int user;
#ifdef SMP
critical_t s;
@ -289,9 +289,7 @@ trap(a0, a1, a2, entry, framep)
CTR5(KTR_TRAP, "%s trap: pid %d, (%lx, %lx, %lx)",
user ? "user" : "kernel", p->p_pid, a0, a1, a2);
if (user) {
mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
mtx_unlock_spin(&sched_lock);
p->p_frame = framep;
} else {
sticks = 0; /* XXX bogus -Wuninitialized warning */
@ -654,7 +652,7 @@ syscall(code, framep)
struct proc *p;
int error = 0;
u_int64_t opc;
u_quad_t sticks;
u_int sticks;
u_int64_t args[10]; /* XXX */
u_int hidden = 0, nargs;
#ifdef SMP
@ -685,9 +683,7 @@ syscall(code, framep)
cnt.v_syscall++;
p->p_frame = framep;
opc = framep->tf_regs[FRAME_PC] - 4;
mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
mtx_unlock_spin(&sched_lock);
#ifdef DIAGNOSTIC
alpha_fpstate_check(p);

View File

@ -69,20 +69,6 @@ struct clockframe {
#define CLKF_USERMODE(framep) TRAPF_USERMODE(&(framep)->cf_tf)
#define CLKF_PC(framep) TRAPF_PC(&(framep)->cf_tf)
/*
* Arrange to handle pending profiling ticks before returning to user mode.
*
* XXX this is now poorly named and implemented. It used to handle only a
* single tick and the PS_OWEUPC flag served as a counter. Now there is a
* counter in the proc table and flag isn't really necessary.
*/
#define need_proftick(p) do { \
mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
aston(p); \
mtx_unlock_spin(&sched_lock); \
} while (0)
/*
* CTL_MACHDEP definitions.
*/

View File

@ -261,7 +261,7 @@ npx_intr(dummy)
if (p != NULL) {
p->p_addr->u_pcb.pcb_flags |= PCB_NPXTRAP;
mtx_lock_spin(&sched_lock);
aston(p);
p->p_sflag |= PS_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}

View File

@ -157,7 +157,7 @@ i386_extend_pcb(struct proc *p)
p->p_addr->u_pcb.pcb_ext = ext;
/* switch to the new TSS after syscall completes */
need_resched(p);
p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
return 0;

View File

@ -174,7 +174,7 @@ trap(frame)
struct trapframe frame;
{
struct proc *p = curproc;
u_quad_t sticks = 0;
u_int sticks = 0;
int i = 0, ucode = 0, type, code;
vm_offset_t eva;
#ifdef POWERFAIL_NMI
@ -225,9 +225,7 @@ trap(frame)
((frame.tf_eflags & PSL_VM) && !in_vm86call)) {
/* user trap */
mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
mtx_unlock_spin(&sched_lock);
p->p_frame = &frame;
switch (type) {
@ -1031,7 +1029,7 @@ syscall(frame)
int i;
struct sysent *callp;
struct proc *p = curproc;
u_quad_t sticks;
u_int sticks;
int error;
int narg;
int args[8];
@ -1047,10 +1045,7 @@ syscall(frame)
}
#endif
mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
mtx_unlock_spin(&sched_lock);
p->p_frame = &frame;
params = (caddr_t)frame.tf_esp + sizeof(int);
code = frame.tf_eax;

View File

@ -67,20 +67,6 @@
((ISPL((framep)->cf_cs) == SEL_UPL) || ((framep)->cf_eflags & PSL_VM))
#define CLKF_PC(framep) ((framep)->cf_eip)
/*
* Arrange to handle pending profiling ticks before returning to user mode.
*
* XXX this is now poorly named and implemented. It used to handle only a
* single tick and the PS_OWEUPC flag served as a counter. Now there is a
* counter in the proc table and flag isn't really necessary.
*/
#define need_proftick(p) do { \
mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
aston(p); \
mtx_unlock_spin(&sched_lock); \
} while (0)
/*
* CTL_MACHDEP definitions.
*/

View File

@ -261,7 +261,7 @@ npx_intr(dummy)
if (p != NULL) {
p->p_addr->u_pcb.pcb_flags |= PCB_NPXTRAP;
mtx_lock_spin(&sched_lock);
aston(p);
p->p_sflag |= PS_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}

View File

@ -157,7 +157,7 @@ i386_extend_pcb(struct proc *p)
p->p_addr->u_pcb.pcb_ext = ext;
/* switch to the new TSS after syscall completes */
need_resched(p);
p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
return 0;

View File

@ -174,7 +174,7 @@ trap(frame)
struct trapframe frame;
{
struct proc *p = curproc;
u_quad_t sticks = 0;
u_int sticks = 0;
int i = 0, ucode = 0, type, code;
vm_offset_t eva;
#ifdef POWERFAIL_NMI
@ -225,9 +225,7 @@ trap(frame)
((frame.tf_eflags & PSL_VM) && !in_vm86call)) {
/* user trap */
mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
mtx_unlock_spin(&sched_lock);
p->p_frame = &frame;
switch (type) {
@ -1031,7 +1029,7 @@ syscall(frame)
int i;
struct sysent *callp;
struct proc *p = curproc;
u_quad_t sticks;
u_int sticks;
int error;
int narg;
int args[8];
@ -1047,10 +1045,7 @@ syscall(frame)
}
#endif
mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
mtx_unlock_spin(&sched_lock);
p->p_frame = &frame;
params = (caddr_t)frame.tf_esp + sizeof(int);
code = frame.tf_eax;

View File

@ -67,20 +67,6 @@
((ISPL((framep)->cf_cs) == SEL_UPL) || ((framep)->cf_eflags & PSL_VM))
#define CLKF_PC(framep) ((framep)->cf_eip)
/*
* Arrange to handle pending profiling ticks before returning to user mode.
*
* XXX this is now poorly named and implemented. It used to handle only a
* single tick and the PS_OWEUPC flag served as a counter. Now there is a
* counter in the proc table and flag isn't really necessary.
*/
#define need_proftick(p) do { \
mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
aston(p); \
mtx_unlock_spin(&sched_lock); \
} while (0)
/*
* CTL_MACHDEP definitions.
*/

View File

@ -261,7 +261,7 @@ npx_intr(dummy)
if (p != NULL) {
p->p_addr->u_pcb.pcb_flags |= PCB_NPXTRAP;
mtx_lock_spin(&sched_lock);
aston(p);
p->p_sflag |= PS_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}

View File

@ -180,7 +180,7 @@ trap(int vector, int imm, struct trapframe *framep)
struct proc *p;
int i;
u_int64_t ucode;
u_quad_t sticks;
u_int sticks;
int user;
cnt.v_trap++;
@ -189,9 +189,7 @@ trap(int vector, int imm, struct trapframe *framep)
user = ((framep->tf_cr_ipsr & IA64_PSR_CPL) == IA64_PSR_CPL_USER);
if (user) {
mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
mtx_unlock_spin(&sched_lock);
p->p_frame = framep;
} else {
sticks = 0; /* XXX bogus -Wuninitialized warning */
@ -444,14 +442,12 @@ syscall(int code, u_int64_t *args, struct trapframe *framep)
struct proc *p;
int error = 0;
u_int64_t oldip, oldri;
u_quad_t sticks;
u_int sticks;
cnt.v_syscall++;
p = curproc;
p->p_frame = framep;
mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
mtx_unlock_spin(&sched_lock);
mtx_lock(&Giant);
/*

View File

@ -69,19 +69,6 @@ struct clockframe {
#define CLKF_USERMODE(framep) TRAPF_USERMODE(&(framep)->cf_tf)
#define CLKF_PC(framep) TRAPF_PC(&(framep)->cf_tf)
/*
* Give a profiling tick to the current process when the user profiling
* buffer pages are invalid. On the hp300, request an ast to send us
* through trap, marking the proc as needing a profiling tick.
*/
#define need_proftick(p) do { \
mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
aston((p)); \
mtx_unlock_spin(&sched_lock); \
} while (0)
/*
* CTL_MACHDEP definitions.
*/

View File

@ -172,15 +172,11 @@ hardclock_process(p, user)
pstats = p->p_stats;
if (user &&
timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) {
p->p_sflag |= PS_ALRMPEND;
aston(p);
}
itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
p->p_sflag |= PS_ALRMPEND | PS_ASTPENDING;
if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) {
p->p_sflag |= PS_PROFPEND;
aston(p);
}
itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
p->p_sflag |= PS_PROFPEND | PS_ASTPENDING;
}
/*

View File

@ -371,8 +371,8 @@ ithread_schedule(struct ithd *ithread, int do_switch)
* Set it_need to tell the thread to keep running if it is already
* running. Then, grab sched_lock and see if we actually need to
* put this thread on the runqueue. If so and the do_switch flag is
* true, then switch to the ithread immediately. Otherwise, use
* need_resched() to guarantee that this ithread will run before any
* true, then switch to the ithread immediately. Otherwise, set the
* needresched flag to guarantee that this ithread will run before any
* userland processes.
*/
ithread->it_need = 1;
@ -387,7 +387,7 @@ ithread_schedule(struct ithd *ithread, int do_switch)
curproc->p_stats->p_ru.ru_nivcsw++;
mi_switch();
} else
need_resched(curproc);
curproc->p_sflag |= PS_NEEDRESCHED;
} else {
CTR3(KTR_INTR, __func__ ": pid %d: it_need %d, state %d",
p->p_pid, ithread->it_need, p->p_stat);

View File

@ -109,7 +109,7 @@ maybe_resched(p)
mtx_assert(&sched_lock, MA_OWNED);
if (p->p_pri.pri_level < curproc->p_pri.pri_level)
need_resched(curproc);
curproc->p_sflag |= PS_NEEDRESCHED;
}
int
@ -702,7 +702,7 @@ mi_switch()
sched_nest = sched_lock.mtx_recurse;
p->p_lastcpu = p->p_oncpu;
p->p_oncpu = NOCPU;
clear_resched(p);
p->p_sflag &= ~PS_NEEDRESCHED;
cpu_switch();
p->p_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_savecrit = sched_crit;

View File

@ -176,7 +176,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
mtx_lock_spin(&sched_lock);
rtp_to_pri(&rtp, &p->p_pri);
need_resched(p);
p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
}
else
@ -198,7 +198,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
* on the scheduling code: You must leave the
* scheduling info alone.
*/
need_resched(p);
p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
}
break;
@ -217,7 +217,7 @@ int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct proc *p)
int ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
need_resched(curproc);
curproc->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
return 0;
}

View File

@ -422,9 +422,11 @@ addupc_intr(p, pc, ticks)
addr = prof->pr_base + i;
if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
mtx_lock_spin(&sched_lock);
prof->pr_addr = pc;
prof->pr_ticks = ticks;
need_proftick(p);
p->p_sflag |= PS_OWEUPC | PS_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}

View File

@ -150,7 +150,7 @@ forward_roundrobin(void)
id = gd->gd_cpuid;
if (id != PCPU_GET(cpuid) && (id & stopped_cpus) == 0 &&
p != gd->gd_idleproc) {
need_resched(p);
p->p_sflag |= PS_NEEDRESCHED;
map |= id;
}
}

View File

@ -63,7 +63,7 @@ void
userret(p, frame, oticks)
struct proc *p;
struct trapframe *frame;
u_quad_t oticks;
u_int oticks;
{
int sig;
@ -72,11 +72,11 @@ userret(p, frame, oticks)
while ((sig = CURSIG(p)) != 0)
postsig(sig);
mtx_unlock(&Giant);
PROC_UNLOCK(p);
mtx_lock_spin(&sched_lock);
PROC_UNLOCK_NOSWITCH(p);
p->p_pri.pri_level = p->p_pri.pri_user;
if (resched_wanted(p)) {
if (p->p_sflag & PS_NEEDRESCHED) {
/*
* Since we are curproc, a clock interrupt could
* change our priority without changing run queues
@ -96,93 +96,97 @@ userret(p, frame, oticks)
while ((sig = CURSIG(p)) != 0)
postsig(sig);
mtx_unlock(&Giant);
mtx_lock_spin(&sched_lock);
PROC_UNLOCK_NOSWITCH(p);
}
PROC_UNLOCK(p);
} else
mtx_unlock_spin(&sched_lock);
/*
* Charge system time if profiling.
*/
if (p->p_sflag & PS_PROFIL) {
mtx_unlock_spin(&sched_lock);
if (p->p_sflag & PS_PROFIL)
addupc_task(p, TRAPF_PC(frame),
(u_int)(p->p_sticks - oticks) * psratio);
} else
mtx_unlock_spin(&sched_lock);
((u_int)p->p_sticks - oticks) * psratio);
}
/*
* Process an asynchronous software trap.
* This is relatively easy.
* This function will return with preemption disabled.
*/
void
ast(framep)
struct trapframe *framep;
{
struct proc *p = CURPROC;
u_quad_t sticks;
u_int prticks, sticks;
critical_t s;
int sflag;
#if defined(DEV_NPX) && !defined(SMP)
int ucode;
#endif
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
/*
* We check for a pending AST here rather than in the assembly as
* acquiring and releasing mutexes in assembly is not fun.
*/
mtx_lock_spin(&sched_lock);
if (!(astpending(p) || resched_wanted(p))) {
mtx_unlock_spin(&sched_lock);
return;
}
sticks = p->p_sticks;
p->p_frame = framep;
astoff(p);
cnt.v_soft++;
mtx_intr_enable(&sched_lock);
if (p->p_sflag & PS_OWEUPC) {
p->p_sflag &= ~PS_OWEUPC;
mtx_unlock_spin(&sched_lock);
mtx_lock(&Giant);
addupc_task(p, p->p_stats->p_prof.pr_addr,
p->p_stats->p_prof.pr_ticks);
mtx_lock_spin(&sched_lock);
}
if (p->p_sflag & PS_ALRMPEND) {
p->p_sflag &= ~PS_ALRMPEND;
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
psignal(p, SIGVTALRM);
PROC_UNLOCK(p);
mtx_lock_spin(&sched_lock);
}
#if defined(DEV_NPX) && !defined(SMP)
if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
PCPU_GET(curpcb)->pcb_flags &= ~PCB_NPXTRAP;
mtx_unlock_spin(&sched_lock);
ucode = npxtrap();
if (ucode != -1) {
if (!mtx_owned(&Giant))
mtx_lock(&Giant);
trapsignal(p, SIGFPE, ucode);
}
mtx_lock_spin(&sched_lock);
}
#ifdef WITNESS
if (witness_list(p))
panic("Returning to user mode with mutex(s) held");
#endif
if (p->p_sflag & PS_PROFPEND) {
p->p_sflag &= ~PS_PROFPEND;
mtx_unlock_spin(&sched_lock);
PROC_LOCK(p);
psignal(p, SIGPROF);
PROC_UNLOCK(p);
} else
mtx_unlock_spin(&sched_lock);
mtx_assert(&Giant, MA_NOTOWNED);
s = critical_enter();
while ((p->p_sflag & (PS_ASTPENDING | PS_NEEDRESCHED)) != 0) {
critical_exit(s);
p->p_frame = framep;
/*
* This updates the p_sflag's for the checks below in one
* "atomic" operation with turning off the astpending flag.
* If another AST is triggered while we are handling the
* AST's saved in sflag, the astpending flag will be set and
* we will loop again.
*/
mtx_lock_spin(&sched_lock);
sticks = p->p_sticks;
sflag = p->p_sflag;
p->p_sflag &= ~(PS_OWEUPC | PS_ALRMPEND | PS_PROFPEND |
PS_ASTPENDING);
cnt.v_soft++;
if (sflag & PS_OWEUPC) {
prticks = p->p_stats->p_prof.pr_ticks;
p->p_stats->p_prof.pr_ticks = 0;
mtx_unlock_spin(&sched_lock);
addupc_task(p, p->p_stats->p_prof.pr_addr, prticks);
} else
mtx_unlock_spin(&sched_lock);
if (sflag & PS_ALRMPEND) {
PROC_LOCK(p);
psignal(p, SIGVTALRM);
PROC_UNLOCK(p);
}
#if defined(DEV_NPX) && !defined(SMP)
if (PCPU_GET(curpcb)->pcb_flags & PCB_NPXTRAP) {
atomic_clear_char(&PCPU_GET(curpcb)->pcb_flags,
PCB_NPXTRAP);
ucode = npxtrap();
if (ucode != -1) {
mtx_lock(&Giant);
trapsignal(p, SIGFPE, ucode);
}
}
#endif
if (sflag & PS_PROFPEND) {
PROC_LOCK(p);
psignal(p, SIGPROF);
PROC_UNLOCK(p);
}
userret(p, framep, sticks);
if (mtx_owned(&Giant))
mtx_unlock(&Giant);
userret(p, framep, sticks);
if (mtx_owned(&Giant))
mtx_unlock(&Giant);
s = critical_enter();
}
mtx_assert(&Giant, MA_NOTOWNED);
/*
* We need to keep interrupts disabled so that if any further AST's
* come in, the interrupt they come in on will be delayed until we
* finish returning to userland. We assume that the return to userland
* will perform the equivalent of critical_exit().
*/
}

View File

@ -279,7 +279,7 @@ npx_intr(dummy)
if (p != NULL) {
p->p_addr->u_pcb.pcb_flags |= PCB_NPXTRAP;
mtx_lock_spin(&sched_lock);
aston(p);
p->p_sflag |= PS_ASTPENDING;
mtx_unlock_spin(&sched_lock);
}
}

View File

@ -176,7 +176,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
mtx_lock_spin(&sched_lock);
rtp_to_pri(&rtp, &p->p_pri);
need_resched(p);
p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
}
else
@ -198,7 +198,7 @@ int ksched_setscheduler(register_t *ret, struct ksched *ksched,
* on the scheduling code: You must leave the
* scheduling info alone.
*/
need_resched(p);
p->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
}
break;
@ -217,7 +217,7 @@ int ksched_getscheduler(register_t *ret, struct ksched *ksched, struct proc *p)
int ksched_yield(register_t *ret, struct ksched *ksched)
{
mtx_lock_spin(&sched_lock);
need_resched(curproc);
curproc->p_sflag |= PS_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
return 0;
}

View File

@ -76,7 +76,7 @@ trap(struct trapframe *frame)
#if 0 /* XXX: This code hasn't been reworked yet. */
struct proc *p;
int type;
u_quad_t sticks;
u_int sticks;
p = curproc;
type = frame->exc;

View File

@ -56,8 +56,6 @@ extern void delay __P((unsigned));
extern int want_resched;
extern int astpending;
#define need_proftick(p) ((p)->p_flag |= PS_OWEUPC, astpending = 1)
extern char bootpath[];
#if defined(_KERNEL) || defined(_STANDALONE)

View File

@ -76,7 +76,7 @@ trap(struct trapframe *frame)
#if 0 /* XXX: This code hasn't been reworked yet. */
struct proc *p;
int type;
u_quad_t sticks;
u_int sticks;
p = curproc;
type = frame->exc;

View File

@ -40,16 +40,6 @@
#define cpu_getstack(p) ((p)->p_frame->tf_sp)
#define cpu_setstack(p, sp) ((p)->p_frame->tf_sp = (sp))
/*
* Arrange to handle pending profiling ticks before returning to user mode.
*/
#define need_proftick(p) do { \
mtx_lock_spin(&sched_lock); \
(p)->p_sflag |= PS_OWEUPC; \
aston(p); \
mtx_unlock_spin(&sched_lock); \
} while (0)
/*
* CTL_MACHDEP definitions.
*/

View File

@ -134,7 +134,7 @@ const char *trap_msg[] = {
void
trap(struct trapframe *tf)
{
u_quad_t sticks;
u_int sticks;
struct proc *p;
int error;
int ucode;
@ -148,11 +148,8 @@ trap(struct trapframe *tf)
type = T_TYPE(tf->tf_type);
ucode = type; /* XXX */
if ((type & T_KERNEL) == 0) {
mtx_lock_spin(&sched_lock);
if ((type & T_KERNEL) == 0)
sticks = p->p_sticks;
mtx_unlock_spin(&sched_lock);
}
switch (type) {
case T_FP_DISABLED:

View File

@ -352,42 +352,14 @@ sigonstack(size_t sp)
: 0);
}
/*
* Preempt the current process if in interrupt from user mode,
* or after the current trap/syscall if in system mode.
*/
#define need_resched(p) do { \
mtx_assert(&sched_lock, MA_OWNED); \
(p)->p_sflag |= PS_NEEDRESCHED; \
} while (0)
#define resched_wanted(p) ((p)->p_sflag & PS_NEEDRESCHED)
#define clear_resched(p) do { \
mtx_assert(&sched_lock, MA_OWNED); \
(p)->p_sflag &= ~PS_NEEDRESCHED; \
} while (0)
/*
* Schedule an Asynchronous System Trap (AST) on return to user mode.
*/
#define aston(p) do { \
mtx_assert(&sched_lock, MA_OWNED); \
(p)->p_sflag |= PS_ASTPENDING; \
} while (0)
#define astpending(p) ((p)->p_sflag & PS_ASTPENDING)
#define astoff(p) do { \
mtx_assert(&sched_lock, MA_OWNED); \
(p)->p_sflag &= ~PS_ASTPENDING; \
} while (0)
/*
* Notify the current process (p) that it has a signal pending,
* process as soon as possible.
*/
#define signotify(p) aston(p)
#define signotify(p) do { \
mtx_assert(&sched_lock, MA_OWNED); \
(p)->p_sflag |= PS_ASTPENDING; \
} while (0)
/* Handy macro to determine if p1 can mangle p2. */
#define PRISON_CHECK(p1, p2) \
@ -530,7 +502,7 @@ void cpu_switch __P((void));
void cpu_throw __P((void)) __dead2;
void unsleep __P((struct proc *));
void updatepri __P((struct proc *));
void userret __P((struct proc *, struct trapframe *, u_quad_t));
void userret __P((struct proc *, struct trapframe *, u_int));
void maybe_resched __P((struct proc *));
void cpu_exit __P((struct proc *)) __dead2;

View File

@ -63,7 +63,7 @@ struct pstats {
u_long pr_off; /* pc offset */
u_long pr_scale; /* pc scaling */
u_long pr_addr; /* temp storage for addr until AST */
u_long pr_ticks; /* temp storage for ticks until AST */
u_int pr_ticks; /* temp storage for ticks until AST */
} p_prof;
#define pstat_endcopy p_start
struct timeval p_start; /* starting time */