From e9911cf591a610bdc5ba4843942d0f58cf8829e8 Mon Sep 17 00:00:00 2001 From: John Baldwin Date: Tue, 12 Aug 2003 19:33:36 +0000 Subject: [PATCH] - Convert Alpha over to the new calling conventions for cpu_throw() and cpu_switch() where both the old and new threads are passed in as arguments. Only powerpc uses the old conventions now. - Update comments in the Alpha swtch.s to reflect KSE changes. Tested by: obrien, marcel --- sys/alpha/alpha/mp_machdep.c | 2 +- sys/alpha/alpha/swtch.s | 101 +++++++++++++++-------------------- sys/kern/kern_kse.c | 2 +- sys/kern/kern_synch.c | 4 +- sys/kern/kern_thr.c | 2 +- sys/kern/kern_thread.c | 2 +- sys/sys/proc.h | 2 +- 7 files changed, 50 insertions(+), 65 deletions(-) diff --git a/sys/alpha/alpha/mp_machdep.c b/sys/alpha/alpha/mp_machdep.c index 78f9c8668eb7..8d395592bbaf 100644 --- a/sys/alpha/alpha/mp_machdep.c +++ b/sys/alpha/alpha/mp_machdep.c @@ -208,7 +208,7 @@ smp_init_secondary(void) /* ok, now grab sched_lock and enter the scheduler */ mtx_lock_spin(&sched_lock); - cpu_throw(); /* doesn't return */ + cpu_throw(NULL, choosethread()); /* doesn't return */ panic("scheduler returned us to %s", __func__); } diff --git a/sys/alpha/alpha/swtch.s b/sys/alpha/alpha/swtch.s index 34f345330219..8523a2457973 100644 --- a/sys/alpha/alpha/swtch.s +++ b/sys/alpha/alpha/swtch.s @@ -49,7 +49,7 @@ /* * savectx: save process context, i.e. callee-saved registers * - * Note that savectx() only works for processes other than curthread, + * Note that savectx() only works for threads other than curthread, * since cpu_switch will copy over the info saved here. (It _can_ * sanely be used for curthread iff cpu_switch won't be called again, e.g. * from if called from boot().) @@ -88,58 +88,67 @@ IMPORT(Lev1map, 8) /* * cpu_throw() - * Switch to a new task discarding our current state. + * Switch to a new thread discarding our current state. + * + * Arguments: + * a0 'struct thread *' of the old thread + * a1 'struct thread *' of the new thread */ LEAF(cpu_throw, 0) LDGP(pv) - mov zero, s0 /* ensure newproc != oldproc */ CALL(Lcs1) END(cpu_throw) /* * cpu_switch() - * Find the highest priority process and resume it. + * Switch to a new thread saving the current state in the old thread. + * + * Arguments: + * a0 'struct thread *' of the old thread + * a1 'struct thread *' of the new thread */ LEAF(cpu_switch, 1) LDGP(pv) /* do an inline savectx(), to save old context */ - ldq a0, PC_CURTHREAD(pcpup) - ldq a1, TD_PCB(a0) + ldq a2, TD_PCB(a0) /* NOTE: ksp is stored by the swpctx */ - stq s0, PCB_CONTEXT+(0 * 8)(a1) /* store s0 - s6 */ - stq s1, PCB_CONTEXT+(1 * 8)(a1) - stq s2, PCB_CONTEXT+(2 * 8)(a1) - stq s3, PCB_CONTEXT+(3 * 8)(a1) - stq s4, PCB_CONTEXT+(4 * 8)(a1) - stq s5, PCB_CONTEXT+(5 * 8)(a1) - stq s6, PCB_CONTEXT+(6 * 8)(a1) - stq ra, PCB_CONTEXT+(7 * 8)(a1) /* store ra */ + stq s0, PCB_CONTEXT+(0 * 8)(a2) /* store s0 - s6 */ + stq s1, PCB_CONTEXT+(1 * 8)(a2) + stq s2, PCB_CONTEXT+(2 * 8)(a2) + stq s3, PCB_CONTEXT+(3 * 8)(a2) + stq s4, PCB_CONTEXT+(4 * 8)(a2) + stq s5, PCB_CONTEXT+(5 * 8)(a2) + stq s6, PCB_CONTEXT+(6 * 8)(a2) + stq ra, PCB_CONTEXT+(7 * 8)(a2) /* store ra */ call_pal PAL_OSF1_rdps /* NOTE: doesn't kill a0 */ - stq v0, PCB_CONTEXT+(8 * 8)(a1) /* store ps, for ipl */ + stq v0, PCB_CONTEXT+(8 * 8)(a2) /* store ps, for ipl */ mov a0, s0 /* s0 = old curthread */ - mov a1, s1 /* s1 = old pcb */ + mov a2, s1 /* s1 = old pcb */ + + /* + * Deactivate the old address space before activating the + * new one. We need to do this before activating the + * new thread's address space in the event that new + * thread is using the same vmspace as the old. If we + * do this after we activate, then we might end up + * incorrectly marking the pmap inactive! + * + * We don't deactivate if we came here from switch_exit + * (old pmap no longer exists; vmspace has been freed). + * oldproc will be NULL in this case. We have actually + * taken care of calling pmap_deactivate() in cpu_exit(), + * before the vmspace went away. + */ + beq a0, sw1 + CALL(pmap_deactivate) /* pmap_deactivate(oldthread) */ sw1: br pv, Lcs1 Lcs1: LDGP(pv) - CALL(choosethread) /* can't return NULL */ - mov v0, s2 /* s2 = new thread */ + mov a1, s2 /* s2 = new thread */ ldq s3, TD_MD_PCBPADDR(s2) /* s3 = new pcbpaddr */ - /* - * Check to see if we're switching to ourself. If we are, - * don't bother loading the new context. - * - * Note that even if we re-enter cpu_switch() from idle(), - * s0 will still contain the old curthread value because any - * users of that register between then and now must have - * saved it. Also note that switch_exit() ensures that - * s0 is clear before jumping here to find a new process. - */ - cmpeq s0, s2, t0 /* oldthread == newthread? */ - bne t0, Lcs7 /* Yes! Skip! */ - #ifdef SMP /* * Save fp state if we have some. @@ -150,27 +159,7 @@ Lcs1: LDGP(pv) #endif /* - * Deactivate the old address space before activating the - * new one. We need to do this before activating the - * new process's address space in the event that new - * process is using the same vmspace as the old. If we - * do this after we activate, then we might end up - * incorrectly marking the pmap inactive! - * - * We don't deactivate if we came here from switch_exit - * (old pmap no longer exists; vmspace has been freed). - * oldproc will be NULL in this case. We have actually - * taken care of calling pmap_deactivate() in cpu_exit(), - * before the vmspace went away. - */ - beq s0, Lcs6 - - mov s0, a0 /* pmap_deactivate(oldthread) */ - CALL(pmap_deactivate) /* XXXKSE */ - -Lcs6: - /* - * Activate the new process's address space and perform + * Activate the new thread's address space and perform * the actual context swap. */ @@ -180,18 +169,14 @@ Lcs6: mov s3, a0 /* swap the context */ SWITCH_CONTEXT -Lcs7: - /* * Now that the switch is done, update curthread and other - * globals. We must do this even if switching to ourselves - * because we might have re-entered cpu_switch() from idle(), - * in which case curthread would be NULL. + * globals. */ stq s2, PC_CURTHREAD(pcpup) /* curthread = p */ /* - * Now running on the new u struct. + * Now running on the new pcb. * Restore registers and return. */ ldq t0, TD_PCB(s2) diff --git a/sys/kern/kern_kse.c b/sys/kern/kern_kse.c index 40a2923be8a9..a5a294da7e31 100644 --- a/sys/kern/kern_kse.c +++ b/sys/kern/kern_kse.c @@ -1289,7 +1289,7 @@ thread_exit(void) } /* XXX Shouldn't cpu_throw() here. */ mtx_assert(&sched_lock, MA_OWNED); -#if !defined(__alpha__) && !defined(__powerpc__) +#if !defined(__powerpc__) cpu_throw(td, choosethread()); #else cpu_throw(); diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 384feea84000..7525f3a32a7c 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -459,7 +459,7 @@ mi_switch(void) { struct bintime new_switchtime; struct thread *td; -#if !defined(__alpha__) && !defined(__powerpc__) +#if !defined(__powerpc__) struct thread *newtd; #endif struct proc *p; @@ -517,7 +517,7 @@ mi_switch(void) thread_switchout(td); sched_switchout(td); -#if !defined(__alpha__) && !defined(__powerpc__) +#if !defined(__powerpc__) newtd = choosethread(); if (td != newtd) cpu_switch(td, newtd); /* SHAZAM!! */ diff --git a/sys/kern/kern_thr.c b/sys/kern/kern_thr.c index 9f2405751e27..4cb4df4b25db 100644 --- a/sys/kern/kern_thr.c +++ b/sys/kern/kern_thr.c @@ -111,7 +111,7 @@ thr_exit1(void) sched_exit_thread(TAILQ_NEXT(td, td_kglist), td); thread_stash(td); -#if !defined(__alpha__) && !defined(__powerpc__) +#if !defined(__powerpc__) cpu_throw(td, choosethread()); #else cpu_throw(); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 40a2923be8a9..a5a294da7e31 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -1289,7 +1289,7 @@ thread_exit(void) } /* XXX Shouldn't cpu_throw() here. */ mtx_assert(&sched_lock, MA_OWNED); -#if !defined(__alpha__) && !defined(__powerpc__) +#if !defined(__powerpc__) cpu_throw(td, choosethread()); #else cpu_throw(); diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 97d0699dcc93..21b5e33f50ee 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -865,7 +865,7 @@ int sigonstack(size_t sp); void sleepinit(void); void stopevent(struct proc *, u_int, u_int); void cpu_idle(void); -#if !defined(__alpha__) && !defined(__powerpc__) +#if !defined(__powerpc__) void cpu_switch(struct thread *old, struct thread *new); void cpu_throw(struct thread *old, struct thread *new) __dead2; #else