Remove abuse of intr_disable/restore in MI code by moving the loop in ast()

back into the calling MD code.  The MD code must ensure no races between
checking the astpening flag and returning to usermode.

Submitted by:	peter (ia64 bits)
Tested on:	alpha (peter, jeff), i386, ia64 (peter), sparc64
This commit is contained in:
Jake Burkholder 2002-03-29 16:35:26 +00:00
parent 1183d01466
commit d0ce9a7e07
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=93389
9 changed files with 144 additions and 29 deletions

View File

@ -130,16 +130,26 @@
CALL(syscall)
/* Handle any AST's. */
2: ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
call_pal PAL_OSF1_swpipl
ldq s0, PC_CURTHREAD(pcpup) /* checking for pending asts */
ldq s1, TD_KSE(s0) /* atomically with returning */
ldl s1, KE_FLAGS(s1)
ldiq s2, KEF_ASTPENDING | KEF_NEEDRESCHED
and s1, s2
beq s1, 3f
ldiq a0, ALPHA_PSL_IPL_0 /* reenable interrupts */
call_pal PAL_OSF1_swpipl
mov sp, a0 /* only arg is frame */
CALL(ast)
jmp zero, 2b
/* see if we need a full exception_return */
ldq t1, (FRAME_FLAGS*8)(sp)
3: ldq t1, (FRAME_FLAGS*8)(sp)
and t1, FRAME_FLAGS_SYSCALL
beq t1, exception_return
/* set the hae register if this process has specified a value */
ldq s0, PC_CURTHREAD(pcpup)
ldq t1, TD_MD_FLAGS(s0)
and t1, MDP_HAEUSED
beq t1, 3f
@ -266,8 +276,19 @@ Ler1: LDGP(pv)
beq t0, Lkernelret /* no: kernel return */
/* Handle any AST's or resched's. */
1: ldiq a0, ALPHA_PSL_IPL_HIGH /* disable all interrupts */
call_pal PAL_OSF1_swpipl
ldq s2, TD_KSE(s0) /* checking for pending asts */
ldl s2, KE_FLAGS(s2) /* atomically with returning */
ldiq s3, KEF_ASTPENDING | KEF_NEEDRESCHED
and s2, s3
beq s2, 2f
ldiq a0, ALPHA_PSL_IPL_0 /* reenable interrupts */
call_pal PAL_OSF1_swpipl
mov sp, a0 /* only arg is frame */
CALL(ast)
jmp zero, 1b
2:
#ifdef SMP
br Lrestoreregs
#endif

View File

@ -81,6 +81,11 @@ ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TD_KSE, offsetof(struct thread, td_kse));
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
ASSYM(TD_MD_FLAGS, offsetof(struct thread, td_md.md_flags));
ASSYM(TD_MD_PCBPADDR, offsetof(struct thread, td_md.md_pcbpaddr));
ASSYM(TD_MD_HAE, offsetof(struct thread, td_md.md_hae));

View File

@ -80,6 +80,7 @@ doreti_ast:
pushl %esp /* pass a pointer to the trapframe */
call ast
add $4,%esp
jmp doreti_ast
/*
* doreti_exit: pop registers, iret.

View File

@ -822,10 +822,38 @@ ENTRY(exception_restore, 0)
extr.u r16=rIPSR,32,2 // extract ipsr.cpl
;;
cmp.eq p1,p2=r0,r16 // test for return to kernel mode
(p1) br.cond.dpnt 2f // skip ast checking for returns to kernel
3:
add r3=PC_CURTHREAD,r13 // &curthread
;;
(p2) add out0=16,sp // trapframe argument to ast()
(p2) br.call.dptk.many rp=ast // note: p1, p2 preserved
ld8 r3=[r3] // curthread
add r2=(KEF_ASTPENDING|KEF_NEEDRESCHED),r0
;;
add r3=TD_KSE,r3 // &curthread->td_kse
mov r15=psr // save interrupt enable status
;;
ld8 r3=[r3] // curkse
;;
add r3=KE_FLAGS,r3 // &curkse->ke_flags
rsm psr.i // disable interrupts
;;
ld4 r14=[r3] // fetch curkse->ke_flags
;;
and r14=r2,r14 // flags & (KEF_ASTPENDING|KEF_NEEDRESCHED)
;;
cmp4.eq p6,p7=r0,r14 // == 0 ?
(p6) br.cond.dptk 2f
;;
mov psr.l=r15 // restore interrups
;;
srlz.d
;;
add out0=16,sp // trapframe argument to ast()
br.call.sptk.many rp=ast // note: p1, p2 preserved
;;
br 3b
;;
2:
rsm psr.ic|psr.dt|psr.i // disable interrupt collection and vm
add r3=16,sp;
;;

View File

@ -822,10 +822,38 @@ ENTRY(exception_restore, 0)
extr.u r16=rIPSR,32,2 // extract ipsr.cpl
;;
cmp.eq p1,p2=r0,r16 // test for return to kernel mode
(p1) br.cond.dpnt 2f // skip ast checking for returns to kernel
3:
add r3=PC_CURTHREAD,r13 // &curthread
;;
(p2) add out0=16,sp // trapframe argument to ast()
(p2) br.call.dptk.many rp=ast // note: p1, p2 preserved
ld8 r3=[r3] // curthread
add r2=(KEF_ASTPENDING|KEF_NEEDRESCHED),r0
;;
add r3=TD_KSE,r3 // &curthread->td_kse
mov r15=psr // save interrupt enable status
;;
ld8 r3=[r3] // curkse
;;
add r3=KE_FLAGS,r3 // &curkse->ke_flags
rsm psr.i // disable interrupts
;;
ld4 r14=[r3] // fetch curkse->ke_flags
;;
and r14=r2,r14 // flags & (KEF_ASTPENDING|KEF_NEEDRESCHED)
;;
cmp4.eq p6,p7=r0,r14 // == 0 ?
(p6) br.cond.dptk 2f
;;
mov psr.l=r15 // restore interrups
;;
srlz.d
;;
add out0=16,sp // trapframe argument to ast()
br.call.sptk.many rp=ast // note: p1, p2 preserved
;;
br 3b
;;
2:
rsm psr.ic|psr.dt|psr.i // disable interrupt collection and vm
add r3=16,sp;
;;

View File

@ -76,9 +76,15 @@ ASSYM(MTX_UNOWNED, MTX_UNOWNED);
ASSYM(TD_PROC, offsetof(struct thread, td_proc));
ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
ASSYM(TD_KSE, offsetof(struct thread, td_kse));
ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack));
ASSYM(TD_MD_FLAGS, offsetof(struct thread, td_md.md_flags));
ASSYM(KE_FLAGS, offsetof(struct kse, ke_flags));
ASSYM(KEF_ASTPENDING, KEF_ASTPENDING);
ASSYM(KEF_NEEDRESCHED, KEF_NEEDRESCHED);
ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);
ASSYM(FRAME_SYSCALL, FRAME_SYSCALL);

View File

@ -123,7 +123,6 @@ ast(framep)
struct proc *p = td->td_proc;
struct kse *ke = td->td_kse;
u_int prticks, sticks;
register_t s;
int sflag;
int flags;
#if defined(DEV_NPX) && !defined(SMP)
@ -137,16 +136,13 @@ ast(framep)
#endif
mtx_assert(&Giant, MA_NOTOWNED);
prticks = 0; /* XXX: Quiet warning. */
s = intr_disable();
while ((ke->ke_flags & (KEF_ASTPENDING | KEF_NEEDRESCHED)) != 0) {
intr_restore(s);
td->td_frame = framep;
/*
* This updates the p_sflag's for the checks below in one
* "atomic" operation with turning off the astpending flag.
* If another AST is triggered while we are handling the
* AST's saved in sflag, the astpending flag will be set and
* we will loop again.
* ast() will be called again.
*/
mtx_lock_spin(&sched_lock);
sticks = ke->ke_sticks;
@ -190,13 +186,5 @@ ast(framep)
#ifdef DIAGNOSTIC
cred_free_thread(td);
#endif
s = intr_disable();
}
mtx_assert(&Giant, MA_NOTOWNED);
/*
* We need to keep interrupts disabled so that if any further AST's
* come in, the interrupt they come in on will be delayed until we
* finish returning to userland. We assume that the return to userland
* will perform the equivalent of intr_restore().
*/
}

View File

@ -2249,19 +2249,38 @@ ENTRY(tl0_ret)
9:
#endif
wrpr %g0, PIL_TICK, %pil
/*
* Check for pending asts atomically with returning. We must raise
* the pil before checking, and if no asts are found the pil must
* remain raised until the retry is executed, or we risk missing asts
* caused by interrupts occuring after the test. If the pil is lowered,
* as it is when we call ast, the check must be re-executed.
*/
1: wrpr %g0, PIL_TICK, %pil
ldx [PCPU(CURTHREAD)], %l0
ldx [%l0 + TD_KSE], %l1
lduw [%l1 + KE_FLAGS], %l2
and %l2, KEF_ASTPENDING | KEF_NEEDRESCHED, %l2
brz,pt %l2, 1f
brz,a,pt %l2, 2f
nop
wrpr %g0, 0, %pil
call ast
add %sp, CCFSZ + SPOFF, %o0
ba,a %xcc, 1b
nop
1: ldx [PCB_REG + PCB_NSAVED], %l1
/*
* Check for windows that were spilled to the pcb and need to be
* copied out. This must be the last thing that is done before the
* return to usermode. If there are still user windows in the cpu
* and we call a nested function after this, which causes them to be
* spilled to the pcb, they will not be copied out and the stack will
* be inconsistent.
*/
2: ldx [PCB_REG + PCB_NSAVED], %l1
mov T_SPILL, %o0
brnz,a,pn %l1, .Ltl0_trap_reenter
mov T_SPILL, %o0
wrpr %g0, 0, %pil
ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1

View File

@ -2249,19 +2249,38 @@ ENTRY(tl0_ret)
9:
#endif
wrpr %g0, PIL_TICK, %pil
/*
* Check for pending asts atomically with returning. We must raise
* the pil before checking, and if no asts are found the pil must
* remain raised until the retry is executed, or we risk missing asts
* caused by interrupts occuring after the test. If the pil is lowered,
* as it is when we call ast, the check must be re-executed.
*/
1: wrpr %g0, PIL_TICK, %pil
ldx [PCPU(CURTHREAD)], %l0
ldx [%l0 + TD_KSE], %l1
lduw [%l1 + KE_FLAGS], %l2
and %l2, KEF_ASTPENDING | KEF_NEEDRESCHED, %l2
brz,pt %l2, 1f
brz,a,pt %l2, 2f
nop
wrpr %g0, 0, %pil
call ast
add %sp, CCFSZ + SPOFF, %o0
ba,a %xcc, 1b
nop
1: ldx [PCB_REG + PCB_NSAVED], %l1
/*
* Check for windows that were spilled to the pcb and need to be
* copied out. This must be the last thing that is done before the
* return to usermode. If there are still user windows in the cpu
* and we call a nested function after this, which causes them to be
* spilled to the pcb, they will not be copied out and the stack will
* be inconsistent.
*/
2: ldx [PCB_REG + PCB_NSAVED], %l1
mov T_SPILL, %o0
brnz,a,pn %l1, .Ltl0_trap_reenter
mov T_SPILL, %o0
wrpr %g0, 0, %pil
ldx [%sp + SPOFF + CCFSZ + TF_O0], %i0
ldx [%sp + SPOFF + CCFSZ + TF_O1], %i1