- Make astpending and need_resched process attributes rather than CPU
attributes. This is needed for AST's to be properly posted in a preemptive kernel. They are backed by two new flags in p_sflag: PS_ASTPENDING and PS_NEEDRESCHED. They are still accesssed by their old macros: aston(), astoff(), etc. For completeness, an astpending() macro has been added to check for a pending AST, and clear_resched() has been added to clear need_resched(). - Rename syscall2() on the x86 back to syscall() to be consistent with other architectures.
This commit is contained in:
parent
90b99402a9
commit
142ba5f3d7
@ -135,14 +135,10 @@ XentSys1: LDGP(pv)
|
|||||||
and t1, FRAME_FLAGS_SYSCALL
|
and t1, FRAME_FLAGS_SYSCALL
|
||||||
beq t1, exception_return
|
beq t1, exception_return
|
||||||
|
|
||||||
ldl t2, GD_ASTPENDING(globalp) /* AST pending? */
|
/* Handle any AST's. */
|
||||||
beq t2, 2f /* no: return */
|
|
||||||
|
|
||||||
/* We've got an AST. Handle it. */
|
|
||||||
mov sp, a0 /* only arg is frame */
|
mov sp, a0 /* only arg is frame */
|
||||||
CALL(ast)
|
CALL(ast)
|
||||||
|
|
||||||
2:
|
|
||||||
/* set the hae register if this process has specified a value */
|
/* set the hae register if this process has specified a value */
|
||||||
ldq t0, GD_CURPROC(globalp)
|
ldq t0, GD_CURPROC(globalp)
|
||||||
beq t0, 3f
|
beq t0, 3f
|
||||||
@ -264,12 +260,7 @@ Ler1: LDGP(pv)
|
|||||||
and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */
|
and s1, ALPHA_PSL_USERMODE, t0 /* are we returning to user? */
|
||||||
beq t0, Lrestoreregs /* no: just return */
|
beq t0, Lrestoreregs /* no: just return */
|
||||||
|
|
||||||
ldl t2, GD_ASTPENDING(globalp) /* AST pending? */
|
/* Handle any AST's or resched's. */
|
||||||
beq t2, Lrestoreregs /* no: return */
|
|
||||||
|
|
||||||
/* We've got an AST. Handle it. */
|
|
||||||
ldiq a0, ALPHA_PSL_IPL_0 /* drop IPL to zero */
|
|
||||||
call_pal PAL_OSF1_swpipl
|
|
||||||
mov sp, a0 /* only arg is frame */
|
mov sp, a0 /* only arg is frame */
|
||||||
CALL(ast)
|
CALL(ast)
|
||||||
|
|
||||||
|
@ -73,7 +73,6 @@ ASSYM(GD_CURPCB, offsetof(struct globaldata, gd_curpcb));
|
|||||||
ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime));
|
ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime));
|
||||||
ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid));
|
ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid));
|
||||||
ASSYM(GD_IDLEPCBPHYS, offsetof(struct globaldata, gd_idlepcbphys));
|
ASSYM(GD_IDLEPCBPHYS, offsetof(struct globaldata, gd_idlepcbphys));
|
||||||
ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending));
|
|
||||||
|
|
||||||
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
|
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
|
||||||
ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse));
|
ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse));
|
||||||
|
@ -70,8 +70,6 @@
|
|||||||
#include <ddb/ddb.h>
|
#include <ddb/ddb.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
u_int32_t want_resched;
|
|
||||||
|
|
||||||
unsigned long Sfloat_to_reg __P((unsigned int));
|
unsigned long Sfloat_to_reg __P((unsigned int));
|
||||||
unsigned int reg_to_Sfloat __P((unsigned long));
|
unsigned int reg_to_Sfloat __P((unsigned long));
|
||||||
unsigned long Tfloat_reg_cvt __P((unsigned long));
|
unsigned long Tfloat_reg_cvt __P((unsigned long));
|
||||||
@ -101,7 +99,7 @@ userret(p, frame, oticks)
|
|||||||
struct trapframe *frame;
|
struct trapframe *frame;
|
||||||
u_quad_t oticks;
|
u_quad_t oticks;
|
||||||
{
|
{
|
||||||
int sig, s;
|
int sig;
|
||||||
|
|
||||||
/* take pending signals */
|
/* take pending signals */
|
||||||
while ((sig = CURSIG(p)) != 0) {
|
while ((sig = CURSIG(p)) != 0) {
|
||||||
@ -111,7 +109,7 @@ userret(p, frame, oticks)
|
|||||||
}
|
}
|
||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
p->p_priority = p->p_usrpri;
|
p->p_priority = p->p_usrpri;
|
||||||
if (want_resched) {
|
if (resched_wanted()) {
|
||||||
/*
|
/*
|
||||||
* Since we are curproc, a clock interrupt could
|
* Since we are curproc, a clock interrupt could
|
||||||
* change our priority without changing run queues
|
* change our priority without changing run queues
|
||||||
@ -120,14 +118,12 @@ userret(p, frame, oticks)
|
|||||||
* before we switch()'ed, we might not be on the queue
|
* before we switch()'ed, we might not be on the queue
|
||||||
* indicated by our priority.
|
* indicated by our priority.
|
||||||
*/
|
*/
|
||||||
s = splstatclock();
|
|
||||||
DROP_GIANT_NOSWITCH();
|
DROP_GIANT_NOSWITCH();
|
||||||
setrunqueue(p);
|
setrunqueue(p);
|
||||||
p->p_stats->p_ru.ru_nivcsw++;
|
p->p_stats->p_ru.ru_nivcsw++;
|
||||||
mi_switch();
|
mi_switch();
|
||||||
mtx_unlock_spin(&sched_lock);
|
mtx_unlock_spin(&sched_lock);
|
||||||
PICKUP_GIANT();
|
PICKUP_GIANT();
|
||||||
splx(s);
|
|
||||||
while ((sig = CURSIG(p)) != 0) {
|
while ((sig = CURSIG(p)) != 0) {
|
||||||
if (!mtx_owned(&Giant))
|
if (!mtx_owned(&Giant))
|
||||||
mtx_lock(&Giant);
|
mtx_lock(&Giant);
|
||||||
@ -759,22 +755,27 @@ void
|
|||||||
ast(framep)
|
ast(framep)
|
||||||
struct trapframe *framep;
|
struct trapframe *framep;
|
||||||
{
|
{
|
||||||
register struct proc *p;
|
struct proc *p = CURPROC;
|
||||||
u_quad_t sticks;
|
u_quad_t sticks;
|
||||||
|
|
||||||
p = curproc;
|
KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We check for a pending AST here rather than in the assembly as
|
||||||
|
* acquiring and releasing mutexes in assembly is not fun.
|
||||||
|
*/
|
||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
|
if (!(astpending() || resched_wanted())) {
|
||||||
|
mtx_unlock_spin(&sched_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
sticks = p->p_sticks;
|
sticks = p->p_sticks;
|
||||||
mtx_unlock_spin(&sched_lock);
|
|
||||||
p->p_md.md_tf = framep;
|
p->p_md.md_tf = framep;
|
||||||
|
|
||||||
if ((framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) == 0)
|
astoff();
|
||||||
panic("ast and not user");
|
|
||||||
|
|
||||||
cnt.v_soft++;
|
cnt.v_soft++;
|
||||||
|
mtx_intr_enable(&sched_lock);
|
||||||
PCPU_SET(astpending, 0);
|
|
||||||
mtx_lock_spin(&sched_lock);
|
|
||||||
if (p->p_sflag & PS_OWEUPC) {
|
if (p->p_sflag & PS_OWEUPC) {
|
||||||
p->p_sflag &= ~PS_OWEUPC;
|
p->p_sflag &= ~PS_OWEUPC;
|
||||||
mtx_unlock_spin(&sched_lock);
|
mtx_unlock_spin(&sched_lock);
|
||||||
|
@ -62,44 +62,28 @@
|
|||||||
struct clockframe {
|
struct clockframe {
|
||||||
struct trapframe cf_tf;
|
struct trapframe cf_tf;
|
||||||
};
|
};
|
||||||
#define CLKF_USERMODE(framep) \
|
#define TRAPF_USERMODE(framep) \
|
||||||
(((framep)->cf_tf.tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0)
|
(((framep)->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0)
|
||||||
#define CLKF_PC(framep) ((framep)->cf_tf.tf_regs[FRAME_PC])
|
#define TRAPF_PC(framep) ((framep)->tf_regs[FRAME_PC])
|
||||||
|
|
||||||
|
#define CLKF_USERMODE(framep) TRAPF_USERMODE(&(framep)->cf_tf)
|
||||||
|
#define CLKF_PC(framep) TRAPF_PC(&(framep)->cf_tf)
|
||||||
#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2)
|
#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Preempt the current process if in interrupt from user mode,
|
* Arrange to handle pending profiling ticks before returning to user mode.
|
||||||
* or after the current trap/syscall if in system mode.
|
*
|
||||||
*/
|
* XXX this is now poorly named and implemented. It used to handle only a
|
||||||
#define need_resched() do { want_resched = 1; aston(); } while (0)
|
* single tick and the PS_OWEUPC flag served as a counter. Now there is a
|
||||||
|
* counter in the proc table and flag isn't really necessary.
|
||||||
#define resched_wanted() want_resched
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Give a profiling tick to the current process when the user profiling
|
|
||||||
* buffer pages are invalid. On the hp300, request an ast to send us
|
|
||||||
* through trap, marking the proc as needing a profiling tick.
|
|
||||||
*/
|
*/
|
||||||
#define need_proftick(p) do { \
|
#define need_proftick(p) do { \
|
||||||
mtx_lock_spin(&sched_lock); \
|
mtx_lock_spin(&sched_lock); \
|
||||||
(p)->p_sflag |= PS_OWEUPC; \
|
(p)->p_sflag |= PS_OWEUPC; \
|
||||||
mtx_unlock_spin(&sched_lock); \
|
|
||||||
aston(); \
|
aston(); \
|
||||||
|
mtx_unlock_spin(&sched_lock); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
|
||||||
* Notify the current process (p) that it has a signal pending,
|
|
||||||
* process as soon as possible.
|
|
||||||
*/
|
|
||||||
#define signotify(p) aston()
|
|
||||||
|
|
||||||
#define aston() PCPU_SET(astpending, 1)
|
|
||||||
|
|
||||||
#ifdef _KERNEL
|
|
||||||
extern u_int32_t want_resched; /* resched() was called */
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CTL_MACHDEP definitions.
|
* CTL_MACHDEP definitions.
|
||||||
*/
|
*/
|
||||||
|
@ -57,7 +57,6 @@ struct globaldata {
|
|||||||
u_int32_t gd_next_asn; /* next ASN to allocate */
|
u_int32_t gd_next_asn; /* next ASN to allocate */
|
||||||
u_int32_t gd_current_asngen; /* ASN rollover check */
|
u_int32_t gd_current_asngen; /* ASN rollover check */
|
||||||
|
|
||||||
u_int gd_astpending;
|
|
||||||
SLIST_ENTRY(globaldata) gd_allcpu;
|
SLIST_ENTRY(globaldata) gd_allcpu;
|
||||||
int gd_witness_spin_check;
|
int gd_witness_spin_check;
|
||||||
#ifdef KTR_PERCPU
|
#ifdef KTR_PERCPU
|
||||||
|
@ -57,7 +57,6 @@ struct globaldata {
|
|||||||
u_int32_t gd_next_asn; /* next ASN to allocate */
|
u_int32_t gd_next_asn; /* next ASN to allocate */
|
||||||
u_int32_t gd_current_asngen; /* ASN rollover check */
|
u_int32_t gd_current_asngen; /* ASN rollover check */
|
||||||
|
|
||||||
u_int gd_astpending;
|
|
||||||
SLIST_ENTRY(globaldata) gd_allcpu;
|
SLIST_ENTRY(globaldata) gd_allcpu;
|
||||||
int gd_witness_spin_check;
|
int gd_witness_spin_check;
|
||||||
#ifdef KTR_PERCPU
|
#ifdef KTR_PERCPU
|
||||||
|
@ -304,10 +304,9 @@ _Xcpuast:
|
|||||||
|
|
||||||
FAKE_MCOUNT(13*4(%esp))
|
FAKE_MCOUNT(13*4(%esp))
|
||||||
|
|
||||||
orl $AST_PENDING, PCPU(ASTPENDING) /* XXX */
|
MTX_LOCK_SPIN(sched_lock, 0)
|
||||||
movl PCPU(CURPROC),%ebx
|
movl PCPU(CURPROC),%ebx
|
||||||
incl P_INTR_NESTING_LEVEL(%ebx)
|
orl $PS_ASTPENDING, P_SFLAG(%ebx)
|
||||||
sti
|
|
||||||
|
|
||||||
movl PCPU(CPUID), %eax
|
movl PCPU(CPUID), %eax
|
||||||
lock
|
lock
|
||||||
@ -315,13 +314,13 @@ _Xcpuast:
|
|||||||
lock
|
lock
|
||||||
btrl %eax, CNAME(resched_cpus)
|
btrl %eax, CNAME(resched_cpus)
|
||||||
jnc 2f
|
jnc 2f
|
||||||
orl $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING)
|
orl $PS_NEEDRESCHED, P_SFLAG(%ebx)
|
||||||
lock
|
lock
|
||||||
incl CNAME(want_resched_cnt)
|
incl CNAME(want_resched_cnt)
|
||||||
2:
|
2:
|
||||||
|
MTX_UNLOCK_SPIN(sched_lock)
|
||||||
lock
|
lock
|
||||||
incl CNAME(cpuast_cnt)
|
incl CNAME(cpuast_cnt)
|
||||||
decl P_INTR_NESTING_LEVEL(%ebx)
|
|
||||||
MEXITCOUNT
|
MEXITCOUNT
|
||||||
jmp _doreti
|
jmp _doreti
|
||||||
1:
|
1:
|
||||||
|
@ -180,9 +180,6 @@ sw1a:
|
|||||||
sw1b:
|
sw1b:
|
||||||
movl %eax,%ecx
|
movl %eax,%ecx
|
||||||
|
|
||||||
xorl %eax,%eax
|
|
||||||
andl $~AST_RESCHED,PCPU(ASTPENDING)
|
|
||||||
|
|
||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
cmpb $SRUN,P_STAT(%ecx)
|
cmpb $SRUN,P_STAT(%ecx)
|
||||||
jne badsw2
|
jne badsw2
|
||||||
|
@ -230,7 +230,7 @@ calltrap:
|
|||||||
* temporarily altered for the pushfl - an interrupt might come in
|
* temporarily altered for the pushfl - an interrupt might come in
|
||||||
* and clobber the saved cs/eip.
|
* and clobber the saved cs/eip.
|
||||||
*
|
*
|
||||||
* We do not obtain the MP lock, but the call to syscall2 might. If it
|
* We do not obtain the MP lock, but the call to syscall might. If it
|
||||||
* does it will release the lock prior to returning.
|
* does it will release the lock prior to returning.
|
||||||
*/
|
*/
|
||||||
SUPERALIGN_TEXT
|
SUPERALIGN_TEXT
|
||||||
@ -250,11 +250,8 @@ IDTVEC(syscall)
|
|||||||
movl %eax,TF_EFLAGS(%esp)
|
movl %eax,TF_EFLAGS(%esp)
|
||||||
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
|
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
|
||||||
FAKE_MCOUNT(13*4(%esp))
|
FAKE_MCOUNT(13*4(%esp))
|
||||||
call _syscall2
|
call _syscall
|
||||||
MEXITCOUNT
|
MEXITCOUNT
|
||||||
cli /* atomic astpending access */
|
|
||||||
cmpl $0,PCPU(ASTPENDING) /* AST pending? */
|
|
||||||
je doreti_syscall_ret /* no, get out of here */
|
|
||||||
jmp _doreti
|
jmp _doreti
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -264,7 +261,7 @@ IDTVEC(syscall)
|
|||||||
* rather then an IGT (interrupt gate). Thus interrupts are enabled on
|
* rather then an IGT (interrupt gate). Thus interrupts are enabled on
|
||||||
* entry just as they are for a normal syscall.
|
* entry just as they are for a normal syscall.
|
||||||
*
|
*
|
||||||
* We do not obtain the MP lock, but the call to syscall2 might. If it
|
* We do not obtain the MP lock, but the call to syscall might. If it
|
||||||
* does it will release the lock prior to returning.
|
* does it will release the lock prior to returning.
|
||||||
*/
|
*/
|
||||||
SUPERALIGN_TEXT
|
SUPERALIGN_TEXT
|
||||||
@ -281,11 +278,8 @@ IDTVEC(int0x80_syscall)
|
|||||||
mov %ax,%fs
|
mov %ax,%fs
|
||||||
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
|
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
|
||||||
FAKE_MCOUNT(13*4(%esp))
|
FAKE_MCOUNT(13*4(%esp))
|
||||||
call _syscall2
|
call _syscall
|
||||||
MEXITCOUNT
|
MEXITCOUNT
|
||||||
cli /* atomic astpending access */
|
|
||||||
cmpl $0,PCPU(ASTPENDING) /* AST pending? */
|
|
||||||
je doreti_syscall_ret /* no, get out of here */
|
|
||||||
jmp _doreti
|
jmp _doreti
|
||||||
|
|
||||||
ENTRY(fork_trampoline)
|
ENTRY(fork_trampoline)
|
||||||
|
@ -230,7 +230,7 @@ calltrap:
|
|||||||
* temporarily altered for the pushfl - an interrupt might come in
|
* temporarily altered for the pushfl - an interrupt might come in
|
||||||
* and clobber the saved cs/eip.
|
* and clobber the saved cs/eip.
|
||||||
*
|
*
|
||||||
* We do not obtain the MP lock, but the call to syscall2 might. If it
|
* We do not obtain the MP lock, but the call to syscall might. If it
|
||||||
* does it will release the lock prior to returning.
|
* does it will release the lock prior to returning.
|
||||||
*/
|
*/
|
||||||
SUPERALIGN_TEXT
|
SUPERALIGN_TEXT
|
||||||
@ -250,11 +250,8 @@ IDTVEC(syscall)
|
|||||||
movl %eax,TF_EFLAGS(%esp)
|
movl %eax,TF_EFLAGS(%esp)
|
||||||
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
|
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
|
||||||
FAKE_MCOUNT(13*4(%esp))
|
FAKE_MCOUNT(13*4(%esp))
|
||||||
call _syscall2
|
call _syscall
|
||||||
MEXITCOUNT
|
MEXITCOUNT
|
||||||
cli /* atomic astpending access */
|
|
||||||
cmpl $0,PCPU(ASTPENDING) /* AST pending? */
|
|
||||||
je doreti_syscall_ret /* no, get out of here */
|
|
||||||
jmp _doreti
|
jmp _doreti
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -264,7 +261,7 @@ IDTVEC(syscall)
|
|||||||
* rather then an IGT (interrupt gate). Thus interrupts are enabled on
|
* rather then an IGT (interrupt gate). Thus interrupts are enabled on
|
||||||
* entry just as they are for a normal syscall.
|
* entry just as they are for a normal syscall.
|
||||||
*
|
*
|
||||||
* We do not obtain the MP lock, but the call to syscall2 might. If it
|
* We do not obtain the MP lock, but the call to syscall might. If it
|
||||||
* does it will release the lock prior to returning.
|
* does it will release the lock prior to returning.
|
||||||
*/
|
*/
|
||||||
SUPERALIGN_TEXT
|
SUPERALIGN_TEXT
|
||||||
@ -281,11 +278,8 @@ IDTVEC(int0x80_syscall)
|
|||||||
mov %ax,%fs
|
mov %ax,%fs
|
||||||
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
|
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
|
||||||
FAKE_MCOUNT(13*4(%esp))
|
FAKE_MCOUNT(13*4(%esp))
|
||||||
call _syscall2
|
call _syscall
|
||||||
MEXITCOUNT
|
MEXITCOUNT
|
||||||
cli /* atomic astpending access */
|
|
||||||
cmpl $0,PCPU(ASTPENDING) /* AST pending? */
|
|
||||||
je doreti_syscall_ret /* no, get out of here */
|
|
||||||
jmp _doreti
|
jmp _doreti
|
||||||
|
|
||||||
ENTRY(fork_trampoline)
|
ENTRY(fork_trampoline)
|
||||||
|
@ -81,9 +81,13 @@ ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
|
|||||||
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
|
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
|
||||||
ASSYM(P_ADDR, offsetof(struct proc, p_addr));
|
ASSYM(P_ADDR, offsetof(struct proc, p_addr));
|
||||||
ASSYM(P_INTR_NESTING_LEVEL, offsetof(struct proc, p_intr_nesting_level));
|
ASSYM(P_INTR_NESTING_LEVEL, offsetof(struct proc, p_intr_nesting_level));
|
||||||
|
ASSYM(P_SFLAG, offsetof(struct proc, p_sflag));
|
||||||
ASSYM(P_STAT, offsetof(struct proc, p_stat));
|
ASSYM(P_STAT, offsetof(struct proc, p_stat));
|
||||||
ASSYM(P_WCHAN, offsetof(struct proc, p_wchan));
|
ASSYM(P_WCHAN, offsetof(struct proc, p_wchan));
|
||||||
|
|
||||||
|
ASSYM(PS_ASTPENDING, PS_ASTPENDING);
|
||||||
|
ASSYM(PS_NEEDRESCHED, PS_NEEDRESCHED);
|
||||||
|
|
||||||
#ifdef SMP
|
#ifdef SMP
|
||||||
ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu));
|
ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu));
|
||||||
ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu));
|
ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu));
|
||||||
@ -180,9 +184,6 @@ ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime));
|
|||||||
ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks));
|
ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks));
|
||||||
ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd));
|
ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd));
|
||||||
ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt));
|
ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt));
|
||||||
ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending));
|
|
||||||
ASSYM(AST_PENDING, AST_PENDING);
|
|
||||||
ASSYM(AST_RESCHED, AST_RESCHED);
|
|
||||||
|
|
||||||
#ifdef USER_LDT
|
#ifdef USER_LDT
|
||||||
ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt));
|
ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt));
|
||||||
|
@ -180,9 +180,6 @@ sw1a:
|
|||||||
sw1b:
|
sw1b:
|
||||||
movl %eax,%ecx
|
movl %eax,%ecx
|
||||||
|
|
||||||
xorl %eax,%eax
|
|
||||||
andl $~AST_RESCHED,PCPU(ASTPENDING)
|
|
||||||
|
|
||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
cmpb $SRUN,P_STAT(%ecx)
|
cmpb $SRUN,P_STAT(%ecx)
|
||||||
jne badsw2
|
jne badsw2
|
||||||
|
@ -105,7 +105,7 @@ int (*pmath_emulate) __P((struct trapframe *));
|
|||||||
|
|
||||||
extern void trap __P((struct trapframe frame));
|
extern void trap __P((struct trapframe frame));
|
||||||
extern int trapwrite __P((unsigned addr));
|
extern int trapwrite __P((unsigned addr));
|
||||||
extern void syscall2 __P((struct trapframe frame));
|
extern void syscall __P((struct trapframe frame));
|
||||||
extern void ast __P((struct trapframe frame));
|
extern void ast __P((struct trapframe frame));
|
||||||
|
|
||||||
static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
|
static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
|
||||||
@ -212,7 +212,7 @@ userret(p, frame, oticks)
|
|||||||
if (!mtx_owned(&Giant))
|
if (!mtx_owned(&Giant))
|
||||||
mtx_lock(&Giant);
|
mtx_lock(&Giant);
|
||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
addupc_task(p, frame->tf_eip,
|
addupc_task(p, TRAPF_PC(frame),
|
||||||
(u_int)(p->p_sticks - oticks) * psratio);
|
(u_int)(p->p_sticks - oticks) * psratio);
|
||||||
}
|
}
|
||||||
curpriority = p->p_priority;
|
curpriority = p->p_priority;
|
||||||
@ -1075,7 +1075,7 @@ int trapwrite(addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* syscall2 - MP aware system call request C handler
|
* syscall - MP aware system call request C handler
|
||||||
*
|
*
|
||||||
* A system call is essentially treated as a trap except that the
|
* A system call is essentially treated as a trap except that the
|
||||||
* MP lock is not held on entry or return. We are responsible for
|
* MP lock is not held on entry or return. We are responsible for
|
||||||
@ -1086,7 +1086,7 @@ int trapwrite(addr)
|
|||||||
* the current stack is allowed without having to hold MP lock.
|
* the current stack is allowed without having to hold MP lock.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
syscall2(frame)
|
syscall(frame)
|
||||||
struct trapframe frame;
|
struct trapframe frame;
|
||||||
{
|
{
|
||||||
caddr_t params;
|
caddr_t params;
|
||||||
@ -1278,10 +1278,22 @@ ast(frame)
|
|||||||
struct proc *p = CURPROC;
|
struct proc *p = CURPROC;
|
||||||
u_quad_t sticks;
|
u_quad_t sticks;
|
||||||
|
|
||||||
|
KASSERT(TRAPF_USERMODE(&frame), ("ast in kernel mode"));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We check for a pending AST here rather than in the assembly as
|
||||||
|
* acquiring and releasing mutexes in assembly is not fun.
|
||||||
|
*/
|
||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
|
if (!(astpending() || resched_wanted())) {
|
||||||
|
mtx_unlock_spin(&sched_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
sticks = p->p_sticks;
|
sticks = p->p_sticks;
|
||||||
|
|
||||||
astoff();
|
astoff();
|
||||||
|
mtx_intr_enable(&sched_lock);
|
||||||
atomic_add_int(&cnt.v_soft, 1);
|
atomic_add_int(&cnt.v_soft, 1);
|
||||||
if (p->p_sflag & PS_OWEUPC) {
|
if (p->p_sflag & PS_OWEUPC) {
|
||||||
p->p_sflag &= ~PS_OWEUPC;
|
p->p_sflag &= ~PS_OWEUPC;
|
||||||
|
@ -59,31 +59,16 @@
|
|||||||
#define cpu_getstack(p) ((p)->p_md.md_regs->tf_esp)
|
#define cpu_getstack(p) ((p)->p_md.md_regs->tf_esp)
|
||||||
#define cpu_setstack(p, ap) ((p)->p_md.md_regs->tf_esp = (ap))
|
#define cpu_setstack(p, ap) ((p)->p_md.md_regs->tf_esp = (ap))
|
||||||
|
|
||||||
|
#define TRAPF_USERMODE(framep) \
|
||||||
|
((ISPL((framep)->tf_cs) == SEL_UPL) || ((framep)->tf_eflags & PSL_VM))
|
||||||
|
#define TRAPF_PC(framep) ((framep)->tf_eip)
|
||||||
|
|
||||||
#define CLKF_USERMODE(framep) \
|
#define CLKF_USERMODE(framep) \
|
||||||
((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM))
|
((ISPL((framep)->cf_cs) == SEL_UPL) || ((framep)->cf_eflags & PSL_VM))
|
||||||
|
|
||||||
#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2)
|
#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2)
|
||||||
#define CLKF_PC(framep) ((framep)->cf_eip)
|
#define CLKF_PC(framep) ((framep)->cf_eip)
|
||||||
|
|
||||||
/*
|
|
||||||
* astpending bits
|
|
||||||
*/
|
|
||||||
#define AST_PENDING 0x00000001
|
|
||||||
#define AST_RESCHED 0x00000002
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Preempt the current process if in interrupt from user mode,
|
|
||||||
* or after the current trap/syscall if in system mode.
|
|
||||||
*
|
|
||||||
* XXX: if astpending is later changed to an |= here due to more flags being
|
|
||||||
* added, we will have an atomicy problem. The type of atomicy we need is
|
|
||||||
* a non-locked orl.
|
|
||||||
*/
|
|
||||||
#define need_resched() do { \
|
|
||||||
PCPU_SET(astpending, AST_RESCHED|AST_PENDING); \
|
|
||||||
} while (0)
|
|
||||||
#define resched_wanted() (PCPU_GET(astpending) & AST_RESCHED)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Arrange to handle pending profiling ticks before returning to user mode.
|
* Arrange to handle pending profiling ticks before returning to user mode.
|
||||||
*
|
*
|
||||||
@ -92,27 +77,12 @@
|
|||||||
* counter in the proc table and flag isn't really necessary.
|
* counter in the proc table and flag isn't really necessary.
|
||||||
*/
|
*/
|
||||||
#define need_proftick(p) do { \
|
#define need_proftick(p) do { \
|
||||||
mtx_lock_spin(&sched_lock); \
|
mtx_lock_spin(&sched_lock); \
|
||||||
(p)->p_sflag |= PS_OWEUPC; \
|
(p)->p_sflag |= PS_OWEUPC; \
|
||||||
mtx_unlock_spin(&sched_lock); \
|
|
||||||
aston(); \
|
aston(); \
|
||||||
|
mtx_unlock_spin(&sched_lock); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
|
||||||
* Notify the current process (p) that it has a signal pending,
|
|
||||||
* process as soon as possible.
|
|
||||||
*
|
|
||||||
* XXX: aston() really needs to be an atomic (not locked, but an orl),
|
|
||||||
* in case need_resched() is set by an interrupt. But with astpending a
|
|
||||||
* per-cpu variable this is not trivial to do efficiently. For now we blow
|
|
||||||
* it off (asynchronous need_resched() conflicts are not critical).
|
|
||||||
*/
|
|
||||||
#define signotify(p) aston()
|
|
||||||
#define aston() do { \
|
|
||||||
PCPU_SET(astpending, PCPU_GET(astpending) | AST_PENDING); \
|
|
||||||
} while (0)
|
|
||||||
#define astoff()
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CTL_MACHDEP definitions.
|
* CTL_MACHDEP definitions.
|
||||||
*/
|
*/
|
||||||
|
@ -63,7 +63,6 @@ struct globaldata {
|
|||||||
int gd_currentldt; /* only used for USER_LDT */
|
int gd_currentldt; /* only used for USER_LDT */
|
||||||
u_int gd_cpuid;
|
u_int gd_cpuid;
|
||||||
u_int gd_other_cpus;
|
u_int gd_other_cpus;
|
||||||
u_int gd_astpending;
|
|
||||||
SLIST_ENTRY(globaldata) gd_allcpu;
|
SLIST_ENTRY(globaldata) gd_allcpu;
|
||||||
int gd_witness_spin_check;
|
int gd_witness_spin_check;
|
||||||
#ifdef KTR_PERCPU
|
#ifdef KTR_PERCPU
|
||||||
|
@ -304,10 +304,9 @@ _Xcpuast:
|
|||||||
|
|
||||||
FAKE_MCOUNT(13*4(%esp))
|
FAKE_MCOUNT(13*4(%esp))
|
||||||
|
|
||||||
orl $AST_PENDING, PCPU(ASTPENDING) /* XXX */
|
MTX_LOCK_SPIN(sched_lock, 0)
|
||||||
movl PCPU(CURPROC),%ebx
|
movl PCPU(CURPROC),%ebx
|
||||||
incl P_INTR_NESTING_LEVEL(%ebx)
|
orl $PS_ASTPENDING, P_SFLAG(%ebx)
|
||||||
sti
|
|
||||||
|
|
||||||
movl PCPU(CPUID), %eax
|
movl PCPU(CPUID), %eax
|
||||||
lock
|
lock
|
||||||
@ -315,13 +314,13 @@ _Xcpuast:
|
|||||||
lock
|
lock
|
||||||
btrl %eax, CNAME(resched_cpus)
|
btrl %eax, CNAME(resched_cpus)
|
||||||
jnc 2f
|
jnc 2f
|
||||||
orl $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING)
|
orl $PS_NEEDRESCHED, P_SFLAG(%ebx)
|
||||||
lock
|
lock
|
||||||
incl CNAME(want_resched_cnt)
|
incl CNAME(want_resched_cnt)
|
||||||
2:
|
2:
|
||||||
|
MTX_UNLOCK_SPIN(sched_lock)
|
||||||
lock
|
lock
|
||||||
incl CNAME(cpuast_cnt)
|
incl CNAME(cpuast_cnt)
|
||||||
decl P_INTR_NESTING_LEVEL(%ebx)
|
|
||||||
MEXITCOUNT
|
MEXITCOUNT
|
||||||
jmp _doreti
|
jmp _doreti
|
||||||
1:
|
1:
|
||||||
|
@ -230,7 +230,7 @@ calltrap:
|
|||||||
* temporarily altered for the pushfl - an interrupt might come in
|
* temporarily altered for the pushfl - an interrupt might come in
|
||||||
* and clobber the saved cs/eip.
|
* and clobber the saved cs/eip.
|
||||||
*
|
*
|
||||||
* We do not obtain the MP lock, but the call to syscall2 might. If it
|
* We do not obtain the MP lock, but the call to syscall might. If it
|
||||||
* does it will release the lock prior to returning.
|
* does it will release the lock prior to returning.
|
||||||
*/
|
*/
|
||||||
SUPERALIGN_TEXT
|
SUPERALIGN_TEXT
|
||||||
@ -250,11 +250,8 @@ IDTVEC(syscall)
|
|||||||
movl %eax,TF_EFLAGS(%esp)
|
movl %eax,TF_EFLAGS(%esp)
|
||||||
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
|
movl $7,TF_ERR(%esp) /* sizeof "lcall 7,0" */
|
||||||
FAKE_MCOUNT(13*4(%esp))
|
FAKE_MCOUNT(13*4(%esp))
|
||||||
call _syscall2
|
call _syscall
|
||||||
MEXITCOUNT
|
MEXITCOUNT
|
||||||
cli /* atomic astpending access */
|
|
||||||
cmpl $0,PCPU(ASTPENDING) /* AST pending? */
|
|
||||||
je doreti_syscall_ret /* no, get out of here */
|
|
||||||
jmp _doreti
|
jmp _doreti
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -264,7 +261,7 @@ IDTVEC(syscall)
|
|||||||
* rather then an IGT (interrupt gate). Thus interrupts are enabled on
|
* rather then an IGT (interrupt gate). Thus interrupts are enabled on
|
||||||
* entry just as they are for a normal syscall.
|
* entry just as they are for a normal syscall.
|
||||||
*
|
*
|
||||||
* We do not obtain the MP lock, but the call to syscall2 might. If it
|
* We do not obtain the MP lock, but the call to syscall might. If it
|
||||||
* does it will release the lock prior to returning.
|
* does it will release the lock prior to returning.
|
||||||
*/
|
*/
|
||||||
SUPERALIGN_TEXT
|
SUPERALIGN_TEXT
|
||||||
@ -281,11 +278,8 @@ IDTVEC(int0x80_syscall)
|
|||||||
mov %ax,%fs
|
mov %ax,%fs
|
||||||
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
|
movl $2,TF_ERR(%esp) /* sizeof "int 0x80" */
|
||||||
FAKE_MCOUNT(13*4(%esp))
|
FAKE_MCOUNT(13*4(%esp))
|
||||||
call _syscall2
|
call _syscall
|
||||||
MEXITCOUNT
|
MEXITCOUNT
|
||||||
cli /* atomic astpending access */
|
|
||||||
cmpl $0,PCPU(ASTPENDING) /* AST pending? */
|
|
||||||
je doreti_syscall_ret /* no, get out of here */
|
|
||||||
jmp _doreti
|
jmp _doreti
|
||||||
|
|
||||||
ENTRY(fork_trampoline)
|
ENTRY(fork_trampoline)
|
||||||
|
@ -81,9 +81,13 @@ ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap));
|
|||||||
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
|
ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active));
|
||||||
ASSYM(P_ADDR, offsetof(struct proc, p_addr));
|
ASSYM(P_ADDR, offsetof(struct proc, p_addr));
|
||||||
ASSYM(P_INTR_NESTING_LEVEL, offsetof(struct proc, p_intr_nesting_level));
|
ASSYM(P_INTR_NESTING_LEVEL, offsetof(struct proc, p_intr_nesting_level));
|
||||||
|
ASSYM(P_SFLAG, offsetof(struct proc, p_sflag));
|
||||||
ASSYM(P_STAT, offsetof(struct proc, p_stat));
|
ASSYM(P_STAT, offsetof(struct proc, p_stat));
|
||||||
ASSYM(P_WCHAN, offsetof(struct proc, p_wchan));
|
ASSYM(P_WCHAN, offsetof(struct proc, p_wchan));
|
||||||
|
|
||||||
|
ASSYM(PS_ASTPENDING, PS_ASTPENDING);
|
||||||
|
ASSYM(PS_NEEDRESCHED, PS_NEEDRESCHED);
|
||||||
|
|
||||||
#ifdef SMP
|
#ifdef SMP
|
||||||
ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu));
|
ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu));
|
||||||
ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu));
|
ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu));
|
||||||
@ -180,9 +184,6 @@ ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime));
|
|||||||
ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks));
|
ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks));
|
||||||
ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd));
|
ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd));
|
||||||
ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt));
|
ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt));
|
||||||
ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending));
|
|
||||||
ASSYM(AST_PENDING, AST_PENDING);
|
|
||||||
ASSYM(AST_RESCHED, AST_RESCHED);
|
|
||||||
|
|
||||||
#ifdef USER_LDT
|
#ifdef USER_LDT
|
||||||
ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt));
|
ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt));
|
||||||
|
@ -180,9 +180,6 @@ sw1a:
|
|||||||
sw1b:
|
sw1b:
|
||||||
movl %eax,%ecx
|
movl %eax,%ecx
|
||||||
|
|
||||||
xorl %eax,%eax
|
|
||||||
andl $~AST_RESCHED,PCPU(ASTPENDING)
|
|
||||||
|
|
||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
cmpb $SRUN,P_STAT(%ecx)
|
cmpb $SRUN,P_STAT(%ecx)
|
||||||
jne badsw2
|
jne badsw2
|
||||||
|
@ -105,7 +105,7 @@ int (*pmath_emulate) __P((struct trapframe *));
|
|||||||
|
|
||||||
extern void trap __P((struct trapframe frame));
|
extern void trap __P((struct trapframe frame));
|
||||||
extern int trapwrite __P((unsigned addr));
|
extern int trapwrite __P((unsigned addr));
|
||||||
extern void syscall2 __P((struct trapframe frame));
|
extern void syscall __P((struct trapframe frame));
|
||||||
extern void ast __P((struct trapframe frame));
|
extern void ast __P((struct trapframe frame));
|
||||||
|
|
||||||
static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
|
static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
|
||||||
@ -212,7 +212,7 @@ userret(p, frame, oticks)
|
|||||||
if (!mtx_owned(&Giant))
|
if (!mtx_owned(&Giant))
|
||||||
mtx_lock(&Giant);
|
mtx_lock(&Giant);
|
||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
addupc_task(p, frame->tf_eip,
|
addupc_task(p, TRAPF_PC(frame),
|
||||||
(u_int)(p->p_sticks - oticks) * psratio);
|
(u_int)(p->p_sticks - oticks) * psratio);
|
||||||
}
|
}
|
||||||
curpriority = p->p_priority;
|
curpriority = p->p_priority;
|
||||||
@ -1075,7 +1075,7 @@ int trapwrite(addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* syscall2 - MP aware system call request C handler
|
* syscall - MP aware system call request C handler
|
||||||
*
|
*
|
||||||
* A system call is essentially treated as a trap except that the
|
* A system call is essentially treated as a trap except that the
|
||||||
* MP lock is not held on entry or return. We are responsible for
|
* MP lock is not held on entry or return. We are responsible for
|
||||||
@ -1086,7 +1086,7 @@ int trapwrite(addr)
|
|||||||
* the current stack is allowed without having to hold MP lock.
|
* the current stack is allowed without having to hold MP lock.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
syscall2(frame)
|
syscall(frame)
|
||||||
struct trapframe frame;
|
struct trapframe frame;
|
||||||
{
|
{
|
||||||
caddr_t params;
|
caddr_t params;
|
||||||
@ -1278,10 +1278,22 @@ ast(frame)
|
|||||||
struct proc *p = CURPROC;
|
struct proc *p = CURPROC;
|
||||||
u_quad_t sticks;
|
u_quad_t sticks;
|
||||||
|
|
||||||
|
KASSERT(TRAPF_USERMODE(&frame), ("ast in kernel mode"));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We check for a pending AST here rather than in the assembly as
|
||||||
|
* acquiring and releasing mutexes in assembly is not fun.
|
||||||
|
*/
|
||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
|
if (!(astpending() || resched_wanted())) {
|
||||||
|
mtx_unlock_spin(&sched_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
sticks = p->p_sticks;
|
sticks = p->p_sticks;
|
||||||
|
|
||||||
astoff();
|
astoff();
|
||||||
|
mtx_intr_enable(&sched_lock);
|
||||||
atomic_add_int(&cnt.v_soft, 1);
|
atomic_add_int(&cnt.v_soft, 1);
|
||||||
if (p->p_sflag & PS_OWEUPC) {
|
if (p->p_sflag & PS_OWEUPC) {
|
||||||
p->p_sflag &= ~PS_OWEUPC;
|
p->p_sflag &= ~PS_OWEUPC;
|
||||||
|
@ -304,7 +304,7 @@
|
|||||||
#define _swi_net swi_net
|
#define _swi_net swi_net
|
||||||
#define _swi_null swi_null
|
#define _swi_null swi_null
|
||||||
#define _swi_vm swi_vm
|
#define _swi_vm swi_vm
|
||||||
#define _syscall2 syscall2
|
#define _syscall syscall
|
||||||
#define _szsigcode szsigcode
|
#define _szsigcode szsigcode
|
||||||
#define _ticks ticks
|
#define _ticks ticks
|
||||||
#define _time time
|
#define _time time
|
||||||
|
@ -59,31 +59,16 @@
|
|||||||
#define cpu_getstack(p) ((p)->p_md.md_regs->tf_esp)
|
#define cpu_getstack(p) ((p)->p_md.md_regs->tf_esp)
|
||||||
#define cpu_setstack(p, ap) ((p)->p_md.md_regs->tf_esp = (ap))
|
#define cpu_setstack(p, ap) ((p)->p_md.md_regs->tf_esp = (ap))
|
||||||
|
|
||||||
|
#define TRAPF_USERMODE(framep) \
|
||||||
|
((ISPL((framep)->tf_cs) == SEL_UPL) || ((framep)->tf_eflags & PSL_VM))
|
||||||
|
#define TRAPF_PC(framep) ((framep)->tf_eip)
|
||||||
|
|
||||||
#define CLKF_USERMODE(framep) \
|
#define CLKF_USERMODE(framep) \
|
||||||
((ISPL((framep)->cf_cs) == SEL_UPL) || (framep->cf_eflags & PSL_VM))
|
((ISPL((framep)->cf_cs) == SEL_UPL) || ((framep)->cf_eflags & PSL_VM))
|
||||||
|
|
||||||
#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2)
|
#define CLKF_INTR(framep) (curproc->p_intr_nesting_level >= 2)
|
||||||
#define CLKF_PC(framep) ((framep)->cf_eip)
|
#define CLKF_PC(framep) ((framep)->cf_eip)
|
||||||
|
|
||||||
/*
|
|
||||||
* astpending bits
|
|
||||||
*/
|
|
||||||
#define AST_PENDING 0x00000001
|
|
||||||
#define AST_RESCHED 0x00000002
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Preempt the current process if in interrupt from user mode,
|
|
||||||
* or after the current trap/syscall if in system mode.
|
|
||||||
*
|
|
||||||
* XXX: if astpending is later changed to an |= here due to more flags being
|
|
||||||
* added, we will have an atomicy problem. The type of atomicy we need is
|
|
||||||
* a non-locked orl.
|
|
||||||
*/
|
|
||||||
#define need_resched() do { \
|
|
||||||
PCPU_SET(astpending, AST_RESCHED|AST_PENDING); \
|
|
||||||
} while (0)
|
|
||||||
#define resched_wanted() (PCPU_GET(astpending) & AST_RESCHED)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Arrange to handle pending profiling ticks before returning to user mode.
|
* Arrange to handle pending profiling ticks before returning to user mode.
|
||||||
*
|
*
|
||||||
@ -92,27 +77,12 @@
|
|||||||
* counter in the proc table and flag isn't really necessary.
|
* counter in the proc table and flag isn't really necessary.
|
||||||
*/
|
*/
|
||||||
#define need_proftick(p) do { \
|
#define need_proftick(p) do { \
|
||||||
mtx_lock_spin(&sched_lock); \
|
mtx_lock_spin(&sched_lock); \
|
||||||
(p)->p_sflag |= PS_OWEUPC; \
|
(p)->p_sflag |= PS_OWEUPC; \
|
||||||
mtx_unlock_spin(&sched_lock); \
|
|
||||||
aston(); \
|
aston(); \
|
||||||
|
mtx_unlock_spin(&sched_lock); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
/*
|
|
||||||
* Notify the current process (p) that it has a signal pending,
|
|
||||||
* process as soon as possible.
|
|
||||||
*
|
|
||||||
* XXX: aston() really needs to be an atomic (not locked, but an orl),
|
|
||||||
* in case need_resched() is set by an interrupt. But with astpending a
|
|
||||||
* per-cpu variable this is not trivial to do efficiently. For now we blow
|
|
||||||
* it off (asynchronous need_resched() conflicts are not critical).
|
|
||||||
*/
|
|
||||||
#define signotify(p) aston()
|
|
||||||
#define aston() do { \
|
|
||||||
PCPU_SET(astpending, PCPU_GET(astpending) | AST_PENDING); \
|
|
||||||
} while (0)
|
|
||||||
#define astoff()
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CTL_MACHDEP definitions.
|
* CTL_MACHDEP definitions.
|
||||||
*/
|
*/
|
||||||
|
@ -63,7 +63,6 @@ struct globaldata {
|
|||||||
int gd_currentldt; /* only used for USER_LDT */
|
int gd_currentldt; /* only used for USER_LDT */
|
||||||
u_int gd_cpuid;
|
u_int gd_cpuid;
|
||||||
u_int gd_other_cpus;
|
u_int gd_other_cpus;
|
||||||
u_int gd_astpending;
|
|
||||||
SLIST_ENTRY(globaldata) gd_allcpu;
|
SLIST_ENTRY(globaldata) gd_allcpu;
|
||||||
int gd_witness_spin_check;
|
int gd_witness_spin_check;
|
||||||
#ifdef KTR_PERCPU
|
#ifdef KTR_PERCPU
|
||||||
|
@ -63,7 +63,6 @@ struct globaldata {
|
|||||||
int gd_currentldt; /* only used for USER_LDT */
|
int gd_currentldt; /* only used for USER_LDT */
|
||||||
u_int gd_cpuid;
|
u_int gd_cpuid;
|
||||||
u_int gd_other_cpus;
|
u_int gd_other_cpus;
|
||||||
u_int gd_astpending;
|
|
||||||
SLIST_ENTRY(globaldata) gd_allcpu;
|
SLIST_ENTRY(globaldata) gd_allcpu;
|
||||||
int gd_witness_spin_check;
|
int gd_witness_spin_check;
|
||||||
#ifdef KTR_PERCPU
|
#ifdef KTR_PERCPU
|
||||||
|
@ -304,10 +304,9 @@ _Xcpuast:
|
|||||||
|
|
||||||
FAKE_MCOUNT(13*4(%esp))
|
FAKE_MCOUNT(13*4(%esp))
|
||||||
|
|
||||||
orl $AST_PENDING, PCPU(ASTPENDING) /* XXX */
|
MTX_LOCK_SPIN(sched_lock, 0)
|
||||||
movl PCPU(CURPROC),%ebx
|
movl PCPU(CURPROC),%ebx
|
||||||
incl P_INTR_NESTING_LEVEL(%ebx)
|
orl $PS_ASTPENDING, P_SFLAG(%ebx)
|
||||||
sti
|
|
||||||
|
|
||||||
movl PCPU(CPUID), %eax
|
movl PCPU(CPUID), %eax
|
||||||
lock
|
lock
|
||||||
@ -315,13 +314,13 @@ _Xcpuast:
|
|||||||
lock
|
lock
|
||||||
btrl %eax, CNAME(resched_cpus)
|
btrl %eax, CNAME(resched_cpus)
|
||||||
jnc 2f
|
jnc 2f
|
||||||
orl $AST_PENDING+AST_RESCHED, PCPU(ASTPENDING)
|
orl $PS_NEEDRESCHED, P_SFLAG(%ebx)
|
||||||
lock
|
lock
|
||||||
incl CNAME(want_resched_cnt)
|
incl CNAME(want_resched_cnt)
|
||||||
2:
|
2:
|
||||||
|
MTX_UNLOCK_SPIN(sched_lock)
|
||||||
lock
|
lock
|
||||||
incl CNAME(cpuast_cnt)
|
incl CNAME(cpuast_cnt)
|
||||||
decl P_INTR_NESTING_LEVEL(%ebx)
|
|
||||||
MEXITCOUNT
|
MEXITCOUNT
|
||||||
jmp _doreti
|
jmp _doreti
|
||||||
1:
|
1:
|
||||||
|
@ -55,17 +55,20 @@
|
|||||||
SUPERALIGN_TEXT
|
SUPERALIGN_TEXT
|
||||||
.type _doreti,@function
|
.type _doreti,@function
|
||||||
_doreti:
|
_doreti:
|
||||||
|
|
||||||
FAKE_MCOUNT(_bintr) /* init "from" _bintr -> _doreti */
|
FAKE_MCOUNT(_bintr) /* init "from" _bintr -> _doreti */
|
||||||
doreti_next:
|
doreti_next:
|
||||||
/* Check for ASTs that can be handled now. */
|
/* Check for ASTs that can be handled now. */
|
||||||
testl $AST_PENDING,PCPU(ASTPENDING)
|
|
||||||
je doreti_exit /* no AST, exit */
|
|
||||||
testb $SEL_RPL_MASK,TF_CS(%esp) /* are we in user mode? */
|
testb $SEL_RPL_MASK,TF_CS(%esp) /* are we in user mode? */
|
||||||
jne doreti_ast /* yes, do it now. */
|
jne doreti_ast /* yes, do it now. */
|
||||||
testl $PSL_VM,TF_EFLAGS(%esp) /* kernel mode */
|
testl $PSL_VM,TF_EFLAGS(%esp) /* kernel mode */
|
||||||
je doreti_exit /* and not VM86 mode, defer */
|
je doreti_exit /* and not VM86 mode, defer */
|
||||||
cmpl $1,_in_vm86call /* are we in a VM86 call? */
|
cmpl $1,_in_vm86call /* are we in a VM86 call? */
|
||||||
jne doreti_ast /* yes, we can do it */
|
je doreti_exit /* no, defer */
|
||||||
|
|
||||||
|
doreti_ast:
|
||||||
|
movl $T_ASTFLT,TF_TRAPNO(%esp)
|
||||||
|
call _ast
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* doreti_exit: release MP lock, pop registers, iret.
|
* doreti_exit: release MP lock, pop registers, iret.
|
||||||
@ -80,7 +83,6 @@ doreti_exit:
|
|||||||
|
|
||||||
.globl doreti_popl_fs
|
.globl doreti_popl_fs
|
||||||
.globl doreti_syscall_ret
|
.globl doreti_syscall_ret
|
||||||
doreti_syscall_ret:
|
|
||||||
doreti_popl_fs:
|
doreti_popl_fs:
|
||||||
popl %fs
|
popl %fs
|
||||||
.globl doreti_popl_es
|
.globl doreti_popl_es
|
||||||
@ -120,14 +122,6 @@ doreti_popl_fs_fault:
|
|||||||
movl $T_PROTFLT,TF_TRAPNO(%esp)
|
movl $T_PROTFLT,TF_TRAPNO(%esp)
|
||||||
jmp alltraps_with_regs_pushed
|
jmp alltraps_with_regs_pushed
|
||||||
|
|
||||||
ALIGN_TEXT
|
|
||||||
doreti_ast:
|
|
||||||
andl $~AST_PENDING,PCPU(ASTPENDING)
|
|
||||||
sti
|
|
||||||
movl $T_ASTFLT,TF_TRAPNO(%esp)
|
|
||||||
call _ast
|
|
||||||
jmp doreti_next
|
|
||||||
|
|
||||||
#ifdef APIC_IO
|
#ifdef APIC_IO
|
||||||
#include "i386/isa/apic_ipl.s"
|
#include "i386/isa/apic_ipl.s"
|
||||||
#else
|
#else
|
||||||
|
@ -1251,8 +1251,8 @@ psignal(p, sig)
|
|||||||
* It will either never be noticed, or noticed very soon.
|
* It will either never be noticed, or noticed very soon.
|
||||||
*/
|
*/
|
||||||
if (p == curproc) {
|
if (p == curproc) {
|
||||||
mtx_unlock_spin(&sched_lock);
|
|
||||||
signotify(p);
|
signotify(p);
|
||||||
|
mtx_unlock_spin(&sched_lock);
|
||||||
}
|
}
|
||||||
#ifdef SMP
|
#ifdef SMP
|
||||||
else if (p->p_stat == SRUN) {
|
else if (p->p_stat == SRUN) {
|
||||||
|
@ -105,7 +105,7 @@ int (*pmath_emulate) __P((struct trapframe *));
|
|||||||
|
|
||||||
extern void trap __P((struct trapframe frame));
|
extern void trap __P((struct trapframe frame));
|
||||||
extern int trapwrite __P((unsigned addr));
|
extern int trapwrite __P((unsigned addr));
|
||||||
extern void syscall2 __P((struct trapframe frame));
|
extern void syscall __P((struct trapframe frame));
|
||||||
extern void ast __P((struct trapframe frame));
|
extern void ast __P((struct trapframe frame));
|
||||||
|
|
||||||
static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
|
static int trap_pfault __P((struct trapframe *, int, vm_offset_t));
|
||||||
@ -212,7 +212,7 @@ userret(p, frame, oticks)
|
|||||||
if (!mtx_owned(&Giant))
|
if (!mtx_owned(&Giant))
|
||||||
mtx_lock(&Giant);
|
mtx_lock(&Giant);
|
||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
addupc_task(p, frame->tf_eip,
|
addupc_task(p, TRAPF_PC(frame),
|
||||||
(u_int)(p->p_sticks - oticks) * psratio);
|
(u_int)(p->p_sticks - oticks) * psratio);
|
||||||
}
|
}
|
||||||
curpriority = p->p_priority;
|
curpriority = p->p_priority;
|
||||||
@ -1075,7 +1075,7 @@ int trapwrite(addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* syscall2 - MP aware system call request C handler
|
* syscall - MP aware system call request C handler
|
||||||
*
|
*
|
||||||
* A system call is essentially treated as a trap except that the
|
* A system call is essentially treated as a trap except that the
|
||||||
* MP lock is not held on entry or return. We are responsible for
|
* MP lock is not held on entry or return. We are responsible for
|
||||||
@ -1086,7 +1086,7 @@ int trapwrite(addr)
|
|||||||
* the current stack is allowed without having to hold MP lock.
|
* the current stack is allowed without having to hold MP lock.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
syscall2(frame)
|
syscall(frame)
|
||||||
struct trapframe frame;
|
struct trapframe frame;
|
||||||
{
|
{
|
||||||
caddr_t params;
|
caddr_t params;
|
||||||
@ -1278,10 +1278,22 @@ ast(frame)
|
|||||||
struct proc *p = CURPROC;
|
struct proc *p = CURPROC;
|
||||||
u_quad_t sticks;
|
u_quad_t sticks;
|
||||||
|
|
||||||
|
KASSERT(TRAPF_USERMODE(&frame), ("ast in kernel mode"));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We check for a pending AST here rather than in the assembly as
|
||||||
|
* acquiring and releasing mutexes in assembly is not fun.
|
||||||
|
*/
|
||||||
mtx_lock_spin(&sched_lock);
|
mtx_lock_spin(&sched_lock);
|
||||||
|
if (!(astpending() || resched_wanted())) {
|
||||||
|
mtx_unlock_spin(&sched_lock);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
sticks = p->p_sticks;
|
sticks = p->p_sticks;
|
||||||
|
|
||||||
astoff();
|
astoff();
|
||||||
|
mtx_intr_enable(&sched_lock);
|
||||||
atomic_add_int(&cnt.v_soft, 1);
|
atomic_add_int(&cnt.v_soft, 1);
|
||||||
if (p->p_sflag & PS_OWEUPC) {
|
if (p->p_sflag & PS_OWEUPC) {
|
||||||
p->p_sflag &= ~PS_OWEUPC;
|
p->p_sflag &= ~PS_OWEUPC;
|
||||||
|
@ -57,7 +57,6 @@ struct globaldata {
|
|||||||
u_int32_t gd_next_asn; /* next ASN to allocate */
|
u_int32_t gd_next_asn; /* next ASN to allocate */
|
||||||
u_int32_t gd_current_asngen; /* ASN rollover check */
|
u_int32_t gd_current_asngen; /* ASN rollover check */
|
||||||
|
|
||||||
u_int gd_astpending;
|
|
||||||
SLIST_ENTRY(globaldata) gd_allcpu;
|
SLIST_ENTRY(globaldata) gd_allcpu;
|
||||||
int gd_witness_spin_check;
|
int gd_witness_spin_check;
|
||||||
#ifdef KTR_PERCPU
|
#ifdef KTR_PERCPU
|
||||||
|
@ -57,7 +57,6 @@ struct globaldata {
|
|||||||
u_int32_t gd_next_asn; /* next ASN to allocate */
|
u_int32_t gd_next_asn; /* next ASN to allocate */
|
||||||
u_int32_t gd_current_asngen; /* ASN rollover check */
|
u_int32_t gd_current_asngen; /* ASN rollover check */
|
||||||
|
|
||||||
u_int gd_astpending;
|
|
||||||
SLIST_ENTRY(globaldata) gd_allcpu;
|
SLIST_ENTRY(globaldata) gd_allcpu;
|
||||||
int gd_witness_spin_check;
|
int gd_witness_spin_check;
|
||||||
#ifdef KTR_PERCPU
|
#ifdef KTR_PERCPU
|
||||||
|
@ -73,7 +73,6 @@ ASSYM(GD_CURPCB, offsetof(struct globaldata, gd_curpcb));
|
|||||||
ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime));
|
ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime));
|
||||||
ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid));
|
ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid));
|
||||||
ASSYM(GD_IDLEPCBPHYS, offsetof(struct globaldata, gd_idlepcbphys));
|
ASSYM(GD_IDLEPCBPHYS, offsetof(struct globaldata, gd_idlepcbphys));
|
||||||
ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending));
|
|
||||||
|
|
||||||
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
|
ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock));
|
||||||
ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse));
|
ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse));
|
||||||
|
@ -329,6 +329,8 @@ struct proc {
|
|||||||
#define PS_CVWAITQ 0x00080 /* Proces is on a cv_waitq (not slpq). */
|
#define PS_CVWAITQ 0x00080 /* Proces is on a cv_waitq (not slpq). */
|
||||||
#define PS_SWAPINREQ 0x00100 /* Swapin request due to wakeup. */
|
#define PS_SWAPINREQ 0x00100 /* Swapin request due to wakeup. */
|
||||||
#define PS_SWAPPING 0x00200 /* Process is being swapped. */
|
#define PS_SWAPPING 0x00200 /* Process is being swapped. */
|
||||||
|
#define PS_ASTPENDING 0x00400 /* Process has a pending ast. */
|
||||||
|
#define PS_NEEDRESCHED 0x00800 /* Process needs to yield. */
|
||||||
|
|
||||||
#define P_MAGIC 0xbeefface
|
#define P_MAGIC 0xbeefface
|
||||||
|
|
||||||
@ -378,6 +380,39 @@ sigonstack(size_t sp)
|
|||||||
: 0);
|
: 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Preempt the current process if in interrupt from user mode,
|
||||||
|
* or after the current trap/syscall if in system mode.
|
||||||
|
*/
|
||||||
|
#define need_resched() do { \
|
||||||
|
mtx_assert(&sched_lock, MA_OWNED); \
|
||||||
|
curproc->p_sflag |= PS_NEEDRESCHED; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define resched_wanted() (curproc->p_sflag & PS_NEEDRESCHED)
|
||||||
|
|
||||||
|
#define clear_resched() do { \
|
||||||
|
mtx_assert(&sched_lock, MA_OWNED); \
|
||||||
|
curproc->p_sflag &= ~PS_NEEDRESCHED; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Notify the current process (p) that it has a signal pending,
|
||||||
|
* process as soon as possible.
|
||||||
|
*/
|
||||||
|
#define aston() signotify(CURPROC)
|
||||||
|
#define signotify(p) do { \
|
||||||
|
mtx_assert(&sched_lock, MA_OWNED); \
|
||||||
|
(p)->p_sflag |= PS_ASTPENDING; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define astpending() (curproc->p_sflag & PS_ASTPENDING)
|
||||||
|
|
||||||
|
#define astoff() do { \
|
||||||
|
mtx_assert(&sched_lock, MA_OWNED); \
|
||||||
|
CURPROC->p_sflag &= ~PS_ASTPENDING; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
/* Handy macro to determine if p1 can mangle p2. */
|
/* Handy macro to determine if p1 can mangle p2. */
|
||||||
#define PRISON_CHECK(p1, p2) \
|
#define PRISON_CHECK(p1, p2) \
|
||||||
((p1)->p_prison == NULL || (p1)->p_prison == (p2)->p_prison)
|
((p1)->p_prison == NULL || (p1)->p_prison == (p2)->p_prison)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user