From 56a114967b012c4cae54928ace15b2178481ad14 Mon Sep 17 00:00:00 2001 From: Jeff Roberson Date: Tue, 17 Jul 2007 22:34:14 +0000 Subject: [PATCH] - Add support for blocking and releasing threads to i386 cpu_switch(). This is required for per-cpu scheduler lock support. Obtained from: attilio Tested by: current@ many users Approved by: re --- sys/i386/i386/genassym.c | 1 + sys/i386/i386/swtch.s | 32 +++++++++++++++++++++++++++++--- 2 files changed, 30 insertions(+), 3 deletions(-) diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c index 791bf094ec37..2c93d53534e5 100644 --- a/sys/i386/i386/genassym.c +++ b/sys/i386/i386/genassym.c @@ -81,6 +81,7 @@ ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(P_SFLAG, offsetof(struct proc, p_sflag)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); +ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_MD, offsetof(struct thread, td_md)); diff --git a/sys/i386/i386/swtch.s b/sys/i386/i386/swtch.s index 9002a8160fe1..e47de5e6e064 100644 --- a/sys/i386/i386/swtch.s +++ b/sys/i386/i386/swtch.s @@ -33,11 +33,28 @@ */ #include "opt_npx.h" +#include "opt_sched.h" #include #include "assym.s" +#if defined(SMP) && defined(SCHED_ULE) +#define SETOP xchgl +#define BLOCK_SPIN(reg) \ + movl $blocked_lock,%eax ; \ + 100: ; \ + lock ; \ + cmpxchgl %eax,TD_LOCK(reg) ; \ + jne 101f ; \ + pause ; \ + jmp 100b ; \ + 101: +#else +#define SETOP movl +#define BLOCK_SPIN(reg) +#endif + /*****************************************************************************/ /* Scheduling */ /*****************************************************************************/ @@ -91,6 +108,7 @@ ENTRY(cpu_throw) * 0(%esp) = ret * 4(%esp) = oldtd * 8(%esp) = newtd + * 12(%esp) = newlock */ ENTRY(cpu_switch) @@ -145,13 +163,14 @@ ENTRY(cpu_switch) #endif /* Save is done. Now fire up new thread. Leave old vmspace. */ + movl 4(%esp),%edi movl 8(%esp),%ecx /* New thread */ + movl 12(%esp),%esi /* New lock */ #ifdef INVARIANTS testl %ecx,%ecx /* no thread? */ jz badsw3 /* no, panic */ #endif movl TD_PCB(%ecx),%edx - movl PCPU(CPUID), %esi /* switch address space */ movl PCB_CR3(%edx),%eax @@ -160,11 +179,14 @@ ENTRY(cpu_switch) #else cmpl %eax,IdlePTD /* Kernel address space? */ #endif - je sw1 + je sw0 movl %cr3,%ebx /* The same address space? */ cmpl %ebx,%eax - je sw1 + je sw0 movl %eax,%cr3 /* new address space */ + movl %esi,%eax + movl PCPU(CPUID),%esi + SETOP %eax,TD_LOCK(%edi) /* Switchout td_lock */ /* Release bit from old pmap->pm_active */ movl PCPU(CURPMAP), %ebx @@ -182,8 +204,12 @@ ENTRY(cpu_switch) lock #endif btsl %esi, PM_ACTIVE(%ebx) /* set new */ + jmp sw1 +sw0: + SETOP %esi,TD_LOCK(%edi) /* Switchout td_lock */ sw1: + BLOCK_SPIN(%ecx) /* * At this point, we've switched address spaces and are ready * to load up the rest of the next context.