Fix wakeup latency when sleeping with 'wait'
If we handle an interrupt just before the 'wait' and the interrupt schedules some work, we need to skip the 'wait' call. The simple solution of calling sched_runnable() with interrupts disabled immediately before wait still leaves a window after the call and before 'wait' in which the same issue can occur. The solution implemented is to check the EPC in the interrupt handler, and if it is in a region before the 'wait' call, to fix up the EPC to skip the wait call. Reported/analysed by: adrian Fix suggested by: kib Reviewed by: jmallett, imp
This commit is contained in:
parent
dceed24a7c
commit
29550c285c
sys/mips
@ -56,6 +56,7 @@ void MipsSwitchFPState(struct thread *, struct trapframe *);
|
||||
u_long kvtop(void *addr);
|
||||
int is_cacheable_mem(vm_paddr_t addr);
|
||||
void mips_generic_reset(void);
|
||||
void mips_wait(void);
|
||||
|
||||
#define MIPS_DEBUG 0
|
||||
|
||||
|
@ -557,6 +557,33 @@ NNON_LEAF(MipsUserGenException, CALLFRAME_SIZ, ra)
|
||||
.set at
|
||||
END(MipsUserGenException)
|
||||
|
||||
.set push
|
||||
.set noat
|
||||
NON_LEAF(mips_wait, CALLFRAME_SIZ, ra)
|
||||
PTR_SUBU sp, sp, CALLFRAME_SIZ
|
||||
.mask 0x80000000, (CALLFRAME_RA - CALLFRAME_SIZ)
|
||||
REG_S ra, CALLFRAME_RA(sp) # save RA
|
||||
mfc0 t0, MIPS_COP_0_STATUS
|
||||
xori t1, t0, MIPS_SR_INT_IE
|
||||
mtc0 t1, MIPS_COP_0_STATUS
|
||||
COP0_SYNC
|
||||
jal sched_runnable
|
||||
nop
|
||||
REG_L ra, CALLFRAME_RA(sp)
|
||||
mfc0 t0, MIPS_COP_0_STATUS
|
||||
ori t1, t0, MIPS_SR_INT_IE
|
||||
.align 4
|
||||
GLOBAL(MipsWaitStart) # this is 16 byte aligned
|
||||
mtc0 t1, MIPS_COP_0_STATUS
|
||||
bnez v0, MipsWaitEnd
|
||||
nop
|
||||
wait
|
||||
GLOBAL(MipsWaitEnd) # MipsWaitStart + 16
|
||||
jr ra
|
||||
PTR_ADDU sp, sp, CALLFRAME_SIZ
|
||||
END(mips_wait)
|
||||
.set pop
|
||||
|
||||
/*----------------------------------------------------------------------------
|
||||
*
|
||||
* MipsKernIntr --
|
||||
@ -578,6 +605,19 @@ NNON_LEAF(MipsKernIntr, KERN_EXC_FRAME_SIZE, ra)
|
||||
.set noat
|
||||
PTR_SUBU sp, sp, KERN_EXC_FRAME_SIZE
|
||||
.mask 0x80000000, (CALLFRAME_RA - KERN_EXC_FRAME_SIZE)
|
||||
|
||||
/*
|
||||
* Check for getting interrupts just before wait
|
||||
*/
|
||||
MFC0 k0, MIPS_COP_0_EXC_PC
|
||||
ori k0, 0xf
|
||||
xori k0, 0xf # 16 byte align
|
||||
PTR_LA k1, MipsWaitStart
|
||||
bne k0, k1, 1f
|
||||
nop
|
||||
PTR_ADDU k1, 16 # skip over wait
|
||||
MTC0 k1, MIPS_COP_0_EXC_PC
|
||||
1:
|
||||
/*
|
||||
* Save CPU state, building 'frame'.
|
||||
*/
|
||||
|
@ -163,6 +163,9 @@ extern char MipsTLBMiss[], MipsTLBMissEnd[];
|
||||
/* Cache error handler */
|
||||
extern char MipsCache[], MipsCacheEnd[];
|
||||
|
||||
/* MIPS wait skip region */
|
||||
extern char MipsWaitStart[], MipsWaitEnd[];
|
||||
|
||||
extern char edata[], end[];
|
||||
#ifdef DDB
|
||||
extern vm_offset_t ksym_start, ksym_end;
|
||||
@ -326,6 +329,12 @@ struct msgbuf *msgbufp=0;
|
||||
void
|
||||
mips_vector_init(void)
|
||||
{
|
||||
/*
|
||||
* Make sure that the Wait region logic is not been
|
||||
* changed
|
||||
*/
|
||||
if (MipsWaitEnd - MipsWaitStart != 16)
|
||||
panic("startup: MIPS wait region not correct");
|
||||
/*
|
||||
* Copy down exception vector code.
|
||||
*/
|
||||
@ -485,24 +494,9 @@ spinlock_exit(void)
|
||||
/*
|
||||
* call platform specific code to halt (until next interrupt) for the idle loop
|
||||
*/
|
||||
/*
|
||||
* This is disabled because of three issues:
|
||||
*
|
||||
* + By calling critical_enter(), any interrupt which occurs after that but
|
||||
* before the wait instruction will be handled but not serviced (in the case
|
||||
* of a netisr) because preemption is not allowed at this point;
|
||||
* + Any fast interrupt handler which schedules an immediate or fast callout
|
||||
* will not occur until the wait instruction is interrupted, as the clock
|
||||
* has already been set by cpu_idleclock();
|
||||
* + There is currently no known way to atomically enable interrupts and call
|
||||
* wait, which is how the i386/amd64 code gets around (1). Thus even if
|
||||
* interrupts were disabled and reenabled just before the wait call, any
|
||||
* interrupt that did occur may not interrupt wait.
|
||||
*/
|
||||
void
|
||||
cpu_idle(int busy)
|
||||
{
|
||||
#if 0
|
||||
KASSERT((mips_rd_status() & MIPS_SR_INT_IE) != 0,
|
||||
("interrupts disabled in idle process."));
|
||||
KASSERT((mips_rd_status() & MIPS_INT_MASK) != 0,
|
||||
@ -512,12 +506,11 @@ cpu_idle(int busy)
|
||||
critical_enter();
|
||||
cpu_idleclock();
|
||||
}
|
||||
__asm __volatile ("wait");
|
||||
mips_wait();
|
||||
if (!busy) {
|
||||
cpu_activeclock();
|
||||
critical_exit();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
int
|
||||
|
Loading…
x
Reference in New Issue
Block a user