- Since critical sections no longer raise the processor interrupt level to

above what's used for fast interrupts, only interrupts with the level of
  the interrupt which led to calling intr_fast() (which is used with both
  fast and ithread interrupts) are blocked while in that function. Thus
  intr_fast() can be preempted by a fast interrupt (which are of a higher
  level than ithread interrupts) while servicing an ithread interrupt. This
  can lead to a stale pointer to the head of the active interrupt requests
  list when back in the ithread interrupt invocation of intr_fast(), in turn
  resulting in corruption of the interrupt request lists and consequently
  in a panic. Solve this be turning off interrupts in intr_fast() before
  reading the pointer to the head of the active list rather than after. [1]
- Add a KASSERT in intr_fast() which asserts that ir_func is non-zero before
  calling it. [1]
- Increment interrupt stats after calling the handlers rather than before.
  This reduces the delay until direct and fast handlers are serviced, in my
  testings by 30% on average for the direct tick interrupt handler, in turn
  resulting in less clock drift.

PR:		94778 [1]
Submitted by:	Andrew Belashov [1]
MFC after:	2 weeks
This commit is contained in:
Marius Strobl 2006-04-17 21:03:24 +00:00
parent efff0b01b6
commit 338199fe88
2 changed files with 46 additions and 38 deletions
sys/sparc64/sparc64

@ -2324,6 +2324,19 @@ ENTRY(tl0_intr)
stx %i6, [%sp + SPOFF + CCFSZ + TF_O6]
stx %i7, [%sp + SPOFF + CCFSZ + TF_O7]
call critical_enter
nop
SET(intr_handlers, %l1, %l0)
sllx %l3, IH_SHIFT, %l1
ldx [%l0 + %l1], %l1
KASSERT(%l1, "tl0_intr: ih null")
call %l1
add %sp, CCFSZ + SPOFF, %o0
call critical_exit
nop
/* %l3 contains PIL */
SET(intrcnt, %l1, %l2)
prefetcha [%l2] ASI_N, 1
@ -2336,23 +2349,10 @@ ENTRY(tl0_intr)
inc %l1
stx %l1, [%l0]
call critical_enter
nop
lduw [PCPU(CNT) + V_INTR], %l0
inc %l0
stw %l0, [PCPU(CNT) + V_INTR]
SET(intr_handlers, %l1, %l0)
sllx %l3, IH_SHIFT, %l1
ldx [%l0 + %l1], %l1
KASSERT(%l1, "tl0_intr: ih null")
call %l1
add %sp, CCFSZ + SPOFF, %o0
call critical_exit
nop
ba,a %xcc, tl0_ret
nop
END(tl0_intr)
@ -2841,6 +2841,19 @@ ENTRY(tl1_intr)
mov %l5, PCPU_REG
wrpr %g0, PSTATE_KERNEL, %pstate
call critical_enter
nop
SET(intr_handlers, %l5, %l4)
sllx %l7, IH_SHIFT, %l5
ldx [%l4 + %l5], %l5
KASSERT(%l5, "tl1_intr: ih null")
call %l5
add %sp, CCFSZ + SPOFF, %o0
call critical_exit
nop
/* %l7 contains PIL */
SET(intrcnt, %l5, %l4)
prefetcha [%l4] ASI_N, 1
@ -2853,23 +2866,10 @@ ENTRY(tl1_intr)
inc %l5
stx %l5, [%l4]
call critical_enter
nop
lduw [PCPU(CNT) + V_INTR], %l4
inc %l4
stw %l4, [PCPU(CNT) + V_INTR]
SET(intr_handlers, %l5, %l4)
sllx %l7, IH_SHIFT, %l5
ldx [%l4 + %l5], %l5
KASSERT(%l5, "tl1_intr: ih null")
call %l5
add %sp, CCFSZ + SPOFF, %o0
call critical_exit
nop
ldx [%sp + SPOFF + CCFSZ + TF_Y], %l4
ldx [%sp + SPOFF + CCFSZ + TF_G1], %g1

@ -152,16 +152,22 @@ END(intr_vector)
ENTRY(intr_fast)
save %sp, -CCFSZ, %sp
1: ldx [PCPU(IRHEAD)], %l0
/*
* Disable interrupts while we fiddle with the interrupt request lists
* as interrupts at levels higher than what got us here aren't blocked.
*/
1: wrpr %g0, PSTATE_NORMAL, %pstate
ldx [PCPU(IRHEAD)], %l0
brnz,a,pt %l0, 2f
nop
wrpr %g0, PSTATE_KERNEL, %pstate
ret
restore
2: wrpr %g0, PSTATE_NORMAL, %pstate
ldx [%l0 + IR_NEXT], %l1
2: ldx [%l0 + IR_NEXT], %l1
brnz,pt %l1, 3f
stx %l1, [PCPU(IRHEAD)]
PCPU_ADDR(IRHEAD, %l1)
@ -171,6 +177,16 @@ ENTRY(intr_fast)
ldx [%l0 + IR_ARG], %o1
lduw [%l0 + IR_VEC], %o2
ldx [PCPU(IRFREE)], %l1
stx %l1, [%l0 + IR_NEXT]
stx %l0, [PCPU(IRFREE)]
wrpr %g0, PSTATE_KERNEL, %pstate
KASSERT(%o0, "intr_fast: ir_func null")
call %o0
mov %o1, %o0
/* intrcnt[intr_countp[%o2]]++ */
SET(intrcnt, %l7, %l2) /* %l2 = intrcnt */
prefetcha [%l2] ASI_N, 1
@ -183,14 +199,6 @@ ENTRY(intr_fast)
inc %l2
stx %l2, [%l7]
ldx [PCPU(IRFREE)], %l1
stx %l1, [%l0 + IR_NEXT]
stx %l0, [PCPU(IRFREE)]
wrpr %g0, PSTATE_KERNEL, %pstate
call %o0
mov %o1, %o0
ba,a %xcc, 1b
nop
END(intr_fast)