- MFi386: sys/i386/i386/intr_machdep.c rev. 1.11

Don't use atomic ops to increment interrupt stats.
  On sparc64 this reduces delay until tick interrupts are service by 1/10th
  on average. In turn this reduces the clock drift caused by these delays
  so there's less drift which has to be compensated in tick_hardclock().
  This includes switching from atomically incrementing the global cnt.v_intr
  to the asm equivalent of PCPU_LAZY_INC(cnt.v_intr) in exception.S
- Correct some comments to match the registers actually used.
- Correct some format specifiers, interrupt levels passed in are u_int.
- Use FBSDID.

Ok'ed by:	jhb
This commit is contained in:
Marius Strobl 2005-04-16 15:05:56 +00:00
parent 197bb5864f
commit 2f15864c85
4 changed files with 28 additions and 20 deletions

View File

@ -2320,14 +2320,16 @@ ENTRY(tl0_intr)
lduh [%l0 + %l1], %l0
sllx %l0, 3, %l0
add %l0, %l2, %l0
ATOMIC_INC_ULONG(%l0, %l1, %l2)
ldx [%l0], %l1
inc %l1
stx %l1, [%l0]
call critical_enter
nop
SET(cnt+V_INTR, %l1, %l0)
ATOMIC_INC_INT(%l0, %l1, %l2)
lduw [PCPU(CNT) + V_INTR], %l0
inc %l0
stw %l0, [PCPU(CNT) + V_INTR]
SET(intr_handlers, %l1, %l0)
sllx %l3, IH_SHIFT, %l1
@ -2779,7 +2781,7 @@ ENTRY(tl1_intr)
#if KTR_COMPILE & KTR_INTR
CATR(KTR_INTR,
"tl1_intr: td=%p level=%#lx pil=%#lx pc=%#lx sp=%#lx"
"tl1_intr: td=%p level=%#x pil=%#lx pc=%#lx sp=%#lx"
, %g1, %g2, %g3, 7, 8, 9)
ldx [PCPU(CURTHREAD)], %g2
stx %g2, [%g1 + KTR_PARM1]
@ -2827,7 +2829,7 @@ ENTRY(tl1_intr)
mov %l5, PCPU_REG
wrpr %g0, PSTATE_KERNEL, %pstate
/* %l3 contains PIL */
/* %l7 contains PIL */
SET(intrcnt, %l5, %l4)
prefetcha [%l4] ASI_N, 1
SET(pil_countp, %l5, %l6)
@ -2835,14 +2837,16 @@ ENTRY(tl1_intr)
lduh [%l5 + %l6], %l5
sllx %l5, 3, %l5
add %l5, %l4, %l4
ATOMIC_INC_ULONG(%l4, %l5, %l6)
ldx [%l4], %l5
inc %l5
stx %l5, [%l4]
call critical_enter
nop
SET(cnt+V_INTR, %l5, %l4)
ATOMIC_INC_INT(%l4, %l5, %l6)
lduw [PCPU(CNT) + V_INTR], %l4
inc %l4
stw %l4, [PCPU(CNT) + V_INTR]
SET(intr_handlers, %l5, %l4)
sllx %l7, IH_SHIFT, %l5
@ -2880,7 +2884,7 @@ ENTRY(tl1_intr)
wrpr %g3, 0, %tnpc
#if KTR_COMPILE & KTR_INTR
CATR(KTR_INTR, "tl1_intr: td=%#lx pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
CATR(KTR_INTR, "tl1_intr: td=%#x pil=%#lx ts=%#lx pc=%#lx sp=%#lx"
, %g2, %g3, %g4, 7, 8, 9)
ldx [PCPU(CURTHREAD)], %g3
stx %g3, [%g2 + KTR_PARM1]

View File

@ -24,9 +24,11 @@
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_kstack_pages.h"
#include <sys/param.h>
@ -197,6 +199,7 @@ ASSYM(PC_TLB_CTX, offsetof(struct pcpu, pc_tlb_ctx));
ASSYM(PC_TLB_CTX_MAX, offsetof(struct pcpu, pc_tlb_ctx_max));
ASSYM(PC_TLB_CTX_MIN, offsetof(struct pcpu, pc_tlb_ctx_min));
ASSYM(PC_PMAP, offsetof(struct pcpu, pc_pmap));
ASSYM(PC_CNT, offsetof(struct pcpu, pc_cnt));
ASSYM(PC_SIZEOF, sizeof(struct pcpu));
ASSYM(IH_SHIFT, IH_SHIFT);

View File

@ -171,16 +171,17 @@ ENTRY(intr_fast)
ldx [%l0 + IR_ARG], %o1
lduw [%l0 + IR_VEC], %o2
/* load intrcnt[intr_countp[%o2]] into %l4 */
SET(intrcnt, %l7, %l2) /* %l5 = intrcnt */
/* intrcnt[intr_countp[%o2]]++ */
SET(intrcnt, %l7, %l2) /* %l2 = intrcnt */
prefetcha [%l2] ASI_N, 1
SET(intr_countp, %l7, %l3) /* %l6 = intr_countp */
SET(intr_countp, %l7, %l3) /* %l3 = intr_countp */
sllx %o2, 1, %l4 /* %l4 = vec << 1 */
lduh [%l4 + %l3], %l5 /* %l6 = intr_countp[%o2] */
lduh [%l4 + %l3], %l5 /* %l5 = intr_countp[%o2] */
sllx %l5, 3, %l6 /* %l6 = intr_countp[%o2] << 3 */
add %l6, %l2, %l7 /* %l4 = intrcnt[intr_countp[%o2]] */
ATOMIC_INC_ULONG(%l7, %l5, %l2)
add %l6, %l2, %l7 /* %l7 = intrcnt[intr_countp[%o2]] */
ldx [%l7], %l2
inc %l2
stx %l2, [%l7]
ldx [PCPU(IRFREE)], %l1
stx %l1, [%l0 + IR_NEXT]

View File

@ -199,7 +199,7 @@ intr_stray_vector(void *cookie)
iv = cookie;
if (intr_stray_count[iv->iv_vec] < MAX_STRAY_LOG) {
printf("stray vector interrupt %d\n", iv->iv_vec);
atomic_add_long(&intr_stray_count[iv->iv_vec], 1);
intr_stray_count[iv->iv_vec]++;
if (intr_stray_count[iv->iv_vec] >= MAX_STRAY_LOG)
printf("got %d stray interrupt %d's: not logging "
"anymore\n", MAX_STRAY_LOG, iv->iv_vec);