- Don't call clear_resched() in userret(), instead, clear the resched flag

in mi_switch() just before calling cpu_switch() so that the first switch
  after a resched request will satisfy the request.
- While I'm at it, move a few things into mi_switch() and out of
  cpu_switch(), specifically set the p_oncpu and p_lastcpu members of
  proc in mi_switch(), and handle the sched_lock state change across a
  context switch in mi_switch().
- Since cpu_switch() no longer handles the sched_lock state change, we
  have to setup an initial state for sched_lock in fork_exit() before we
  release it.
This commit is contained in:
John Baldwin 2001-02-20 05:26:15 +00:00
parent d2a1864b0a
commit 5813dc03bd
25 changed files with 18 additions and 131 deletions

View File

@ -94,7 +94,6 @@ ASSYM(PTESIZE, PTESIZE);
ASSYM(U_PCB_ONFAULT, offsetof(struct user, u_pcb.pcb_onfault));
ASSYM(U_PCB_HWPCB_KSP, offsetof(struct user, u_pcb.pcb_hw.apcb_ksp));
ASSYM(U_PCB_CONTEXT, offsetof(struct user, u_pcb.pcb_context));
ASSYM(U_PCB_SCHEDNEST, offsetof(struct user, u_pcb.pcb_schednest));
ASSYM(PCB_HW, offsetof(struct pcb, pcb_hw));

View File

@ -85,7 +85,6 @@ Lsavectx1: LDGP(pv)
/**************************************************************************/
IMPORT(Lev1map, 8)
IMPORT(sched_lock, 72)
/*
* cpu_switch()
@ -96,8 +95,6 @@ LEAF(cpu_switch, 1)
/* do an inline savectx(), to save old context */
ldq a0, GD_CURPROC(globalp)
ldq a1, P_ADDR(a0)
ldl t0, sched_lock+MTX_RECURSE /* save sched_lock state */
stl t0, U_PCB_SCHEDNEST(a1)
/* NOTE: ksp is stored by the swpctx */
stq s0, U_PCB_CONTEXT+(0 * 8)(a1) /* store s0 - s6 */
stq s1, U_PCB_CONTEXT+(1 * 8)(a1)
@ -173,7 +170,6 @@ Lcs7:
* in which case curproc would be NULL.
*/
stq s2, GD_CURPROC(globalp) /* curproc = p */
CALL (alpha_clear_resched) /* we've rescheduled */
/*
* Now running on the new u struct.
@ -190,10 +186,6 @@ Lcs7:
ldq s5, U_PCB_CONTEXT+(5 * 8)(t0)
ldq s6, U_PCB_CONTEXT+(6 * 8)(t0)
ldq ra, U_PCB_CONTEXT+(7 * 8)(t0) /* restore ra */
ldl t1, U_PCB_SCHEDNEST(t0)
stl t1, sched_lock+MTX_RECURSE /* restore lock */
ldq t1, GD_CURPROC(globalp)
stq t1, sched_lock+MTX_LOCK
ldiq v0, 1 /* possible ret to savectx() */
RET

View File

@ -89,14 +89,6 @@ static void printtrap __P((const unsigned long, const unsigned long,
extern char *syscallnames[];
#endif
void alpha_clear_resched(void);
void
alpha_clear_resched(void)
{
clear_resched();
}
/*
* Define the code needed before returning to user mode, for
* trap and syscall.
@ -126,7 +118,6 @@ userret(p, frame, oticks)
* before we switch()'ed, we might not be on the queue
* indicated by our priority.
*/
clear_resched();
DROP_GIANT_NOSWITCH();
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;

View File

@ -210,13 +210,6 @@ cpu_fork(p1, p2, flags)
up->u_pcb.pcb_context[2] = (u_long) p2; /* s2: a1 */
up->u_pcb.pcb_context[7] =
(u_int64_t)switch_trampoline; /* ra: assembly magic */
/*
* Clear the saved recursion count for sched_lock
* since the child needs only one count which is
* released in switch_trampoline.
*/
up->u_pcb.pcb_schednest = 0;
}
}

View File

@ -53,7 +53,6 @@ struct pcb {
u_int64_t pcb_fp_control; /* IEEE control word [SW] */
unsigned long pcb_onfault; /* for copy faults [SW] */
unsigned long pcb_accessaddr; /* for [fs]uswintr [SW] */
u_int32_t pcb_schednest; /* state of sched_lock [SW] */
};
/*

View File

@ -85,10 +85,6 @@ ENTRY(cpu_switch)
testl %ecx,%ecx
jz sw1
movb P_ONCPU(%ecx), %al /* save "last" cpu */
movb %al, P_LASTCPU(%ecx)
movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
movl P_VMSPACE(%ecx), %edx
movl PCPU(CPUID), %eax
btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
@ -124,10 +120,6 @@ ENTRY(cpu_switch)
movl %eax,PCB_DR0(%edx)
1:
/* save sched_lock recursion count */
movl _sched_lock+MTX_RECURSECNT,%eax
movl %eax,PCB_SCHEDNEST(%edx)
#ifdef SMP
/* XXX FIXME: we should be saving the local APIC TPR */
#endif /* SMP */
@ -242,9 +234,6 @@ sw1b:
#endif /** CHEAP_TPR */
#endif /** GRAB_LOPRIO */
#endif /* SMP */
movl PCPU(CPUID),%eax
movb %al, P_ONCPU(%ecx)
movl %edx, PCPU(CURPCB)
movl %ecx, PCPU(CURPROC) /* into next process */
@ -289,17 +278,6 @@ cpu_switch_load_gs:
movl PCB_DR7(%edx),%eax
movl %eax,%dr7
1:
/*
* restore sched_lock recursion count and transfer ownership to
* new process
*/
movl PCB_SCHEDNEST(%edx),%eax
movl %eax,_sched_lock+MTX_RECURSECNT
movl PCPU(CURPROC),%eax
movl %eax,_sched_lock+MTX_LOCK
ret
CROSSJUMPTARGET(sw1a)

View File

@ -88,9 +88,6 @@ ASSYM(P_WCHAN, offsetof(struct proc, p_wchan));
ASSYM(PS_ASTPENDING, PS_ASTPENDING);
ASSYM(PS_NEEDRESCHED, PS_NEEDRESCHED);
ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu));
ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu));
ASSYM(SSLEEP, SSLEEP);
ASSYM(SRUN, SRUN);
ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
@ -132,8 +129,6 @@ ASSYM(PCB_DR7, offsetof(struct pcb, pcb_dr7));
ASSYM(PCB_DBREGS, PCB_DBREGS);
ASSYM(PCB_EXT, offsetof(struct pcb, pcb_ext));
ASSYM(PCB_SCHEDNEST, offsetof(struct pcb, pcb_schednest));
ASSYM(PCB_SPARE, offsetof(struct pcb, __pcb_spare));
ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
ASSYM(PCB_SAVEFPU, offsetof(struct pcb, pcb_savefpu));

View File

@ -2056,7 +2056,6 @@ init386(first)
/* setup proc 0's pcb */
proc0.p_addr->u_pcb.pcb_flags = 0;
proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD;
proc0.p_addr->u_pcb.pcb_schednest = 0;
proc0.p_addr->u_pcb.pcb_ext = 0;
proc0.p_md.md_regs = &proc0_tf;
}

View File

@ -85,10 +85,6 @@ ENTRY(cpu_switch)
testl %ecx,%ecx
jz sw1
movb P_ONCPU(%ecx), %al /* save "last" cpu */
movb %al, P_LASTCPU(%ecx)
movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
movl P_VMSPACE(%ecx), %edx
movl PCPU(CPUID), %eax
btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
@ -124,10 +120,6 @@ ENTRY(cpu_switch)
movl %eax,PCB_DR0(%edx)
1:
/* save sched_lock recursion count */
movl _sched_lock+MTX_RECURSECNT,%eax
movl %eax,PCB_SCHEDNEST(%edx)
#ifdef SMP
/* XXX FIXME: we should be saving the local APIC TPR */
#endif /* SMP */
@ -242,9 +234,6 @@ sw1b:
#endif /** CHEAP_TPR */
#endif /** GRAB_LOPRIO */
#endif /* SMP */
movl PCPU(CPUID),%eax
movb %al, P_ONCPU(%ecx)
movl %edx, PCPU(CURPCB)
movl %ecx, PCPU(CURPROC) /* into next process */
@ -289,17 +278,6 @@ cpu_switch_load_gs:
movl PCB_DR7(%edx),%eax
movl %eax,%dr7
1:
/*
* restore sched_lock recursion count and transfer ownership to
* new process
*/
movl PCB_SCHEDNEST(%edx),%eax
movl %eax,_sched_lock+MTX_RECURSECNT
movl PCPU(CURPROC),%eax
movl %eax,_sched_lock+MTX_LOCK
ret
CROSSJUMPTARGET(sw1a)

View File

@ -189,7 +189,6 @@ userret(p, frame, oticks)
* mi_switch()'ed, we might not be on the queue indicated by
* our priority.
*/
clear_resched();
DROP_GIANT_NOSWITCH();
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;

View File

@ -183,8 +183,6 @@ cpu_fork(p1, p2, flags)
* pcb2->pcb_onfault: cloned above (always NULL here?).
*/
pcb2->pcb_schednest = 0;
/*
* XXX don't copy the i/o pages. this should probably be fixed.
*/

View File

@ -71,7 +71,6 @@ struct pcb {
#define FP_SOFTFP 0x01 /* process using software fltng pnt emulator */
#define PCB_DBREGS 0x02 /* process using debug registers */
caddr_t pcb_onfault; /* copyin/out fault recovery */
int pcb_schednest;
int pcb_gs;
struct pcb_ext *pcb_ext; /* optional pcb extension */
u_long __pcb_spare[3]; /* adjust to avoid core dump size changes */

View File

@ -88,9 +88,6 @@ ASSYM(P_WCHAN, offsetof(struct proc, p_wchan));
ASSYM(PS_ASTPENDING, PS_ASTPENDING);
ASSYM(PS_NEEDRESCHED, PS_NEEDRESCHED);
ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu));
ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu));
ASSYM(SSLEEP, SSLEEP);
ASSYM(SRUN, SRUN);
ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap));
@ -132,8 +129,6 @@ ASSYM(PCB_DR7, offsetof(struct pcb, pcb_dr7));
ASSYM(PCB_DBREGS, PCB_DBREGS);
ASSYM(PCB_EXT, offsetof(struct pcb, pcb_ext));
ASSYM(PCB_SCHEDNEST, offsetof(struct pcb, pcb_schednest));
ASSYM(PCB_SPARE, offsetof(struct pcb, __pcb_spare));
ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
ASSYM(PCB_SAVEFPU, offsetof(struct pcb, pcb_savefpu));

View File

@ -2056,7 +2056,6 @@ init386(first)
/* setup proc 0's pcb */
proc0.p_addr->u_pcb.pcb_flags = 0;
proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD;
proc0.p_addr->u_pcb.pcb_schednest = 0;
proc0.p_addr->u_pcb.pcb_ext = 0;
proc0.p_md.md_regs = &proc0_tf;
}

View File

@ -85,10 +85,6 @@ ENTRY(cpu_switch)
testl %ecx,%ecx
jz sw1
movb P_ONCPU(%ecx), %al /* save "last" cpu */
movb %al, P_LASTCPU(%ecx)
movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */
movl P_VMSPACE(%ecx), %edx
movl PCPU(CPUID), %eax
btrl %eax, VM_PMAP+PM_ACTIVE(%edx)
@ -124,10 +120,6 @@ ENTRY(cpu_switch)
movl %eax,PCB_DR0(%edx)
1:
/* save sched_lock recursion count */
movl _sched_lock+MTX_RECURSECNT,%eax
movl %eax,PCB_SCHEDNEST(%edx)
#ifdef SMP
/* XXX FIXME: we should be saving the local APIC TPR */
#endif /* SMP */
@ -242,9 +234,6 @@ sw1b:
#endif /** CHEAP_TPR */
#endif /** GRAB_LOPRIO */
#endif /* SMP */
movl PCPU(CPUID),%eax
movb %al, P_ONCPU(%ecx)
movl %edx, PCPU(CURPCB)
movl %ecx, PCPU(CURPROC) /* into next process */
@ -289,17 +278,6 @@ cpu_switch_load_gs:
movl PCB_DR7(%edx),%eax
movl %eax,%dr7
1:
/*
* restore sched_lock recursion count and transfer ownership to
* new process
*/
movl PCB_SCHEDNEST(%edx),%eax
movl %eax,_sched_lock+MTX_RECURSECNT
movl PCPU(CURPROC),%eax
movl %eax,_sched_lock+MTX_LOCK
ret
CROSSJUMPTARGET(sw1a)

View File

@ -189,7 +189,6 @@ userret(p, frame, oticks)
* mi_switch()'ed, we might not be on the queue indicated by
* our priority.
*/
clear_resched();
DROP_GIANT_NOSWITCH();
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;

View File

@ -183,8 +183,6 @@ cpu_fork(p1, p2, flags)
* pcb2->pcb_onfault: cloned above (always NULL here?).
*/
pcb2->pcb_schednest = 0;
/*
* XXX don't copy the i/o pages. this should probably be fixed.
*/

View File

@ -71,7 +71,6 @@ struct pcb {
#define FP_SOFTFP 0x01 /* process using software fltng pnt emulator */
#define PCB_DBREGS 0x02 /* process using debug registers */
caddr_t pcb_onfault; /* copyin/out fault recovery */
int pcb_schednest;
int pcb_gs;
struct pcb_ext *pcb_ext; /* optional pcb extension */
u_long __pcb_spare[3]; /* adjust to avoid core dump size changes */

View File

@ -651,6 +651,11 @@ fork_exit(callout, arg, frame)
{
struct proc *p;
/*
* Setup the sched_lock state so that we can release it.
*/
sched_lock.mtx_lock = curproc;
sched_lock.mtx_recurse = 0;
mtx_unlock_spin(&sched_lock);
/*
* XXX: We really shouldn't have to do this.
@ -668,6 +673,7 @@ fork_exit(callout, arg, frame)
* have this call a non-return function to stay in kernel mode.
* initproc has its own fork handler, but it does return.
*/
KASSERT(callout != NULL, ("NULL callout in fork_exit"));
callout(arg, frame);
/*

View File

@ -848,6 +848,7 @@ mi_switch()
register struct rlimit *rlim;
#endif
int x;
u_int sched_nest;
/*
* XXX this spl is almost unnecessary. It is partly to allow for
@ -922,7 +923,14 @@ mi_switch()
PCPU_SET(switchtime, new_switchtime);
CTR4(KTR_PROC, "mi_switch: old proc %p (pid %d, %s), schedlock %p",
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
sched_nest = sched_lock.mtx_recurse;
curproc->p_lastcpu = curproc->p_oncpu;
curproc->p_oncpu = NOCPU;
clear_resched();
cpu_switch();
curproc->p_oncpu = PCPU_GET(cpuid);
sched_lock.mtx_recurse = sched_nest;
sched_lock.mtx_lock = curproc;
CTR4(KTR_PROC, "mi_switch: new proc %p (pid %d, %s), schedlock %p",
p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock);
if (PCPU_GET(switchtime.tv_sec) == 0)

View File

@ -189,7 +189,6 @@ userret(p, frame, oticks)
* mi_switch()'ed, we might not be on the queue indicated by
* our priority.
*/
clear_resched();
DROP_GIANT_NOSWITCH();
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;

View File

@ -210,13 +210,6 @@ cpu_fork(p1, p2, flags)
up->u_pcb.pcb_context[2] = (u_long) p2; /* s2: a1 */
up->u_pcb.pcb_context[7] =
(u_int64_t)switch_trampoline; /* ra: assembly magic */
/*
* Clear the saved recursion count for sched_lock
* since the child needs only one count which is
* released in switch_trampoline.
*/
up->u_pcb.pcb_schednest = 0;
}
}

View File

@ -94,7 +94,6 @@ ASSYM(PTESIZE, PTESIZE);
ASSYM(U_PCB_ONFAULT, offsetof(struct user, u_pcb.pcb_onfault));
ASSYM(U_PCB_HWPCB_KSP, offsetof(struct user, u_pcb.pcb_hw.apcb_ksp));
ASSYM(U_PCB_CONTEXT, offsetof(struct user, u_pcb.pcb_context));
ASSYM(U_PCB_SCHEDNEST, offsetof(struct user, u_pcb.pcb_schednest));
ASSYM(PCB_HW, offsetof(struct pcb, pcb_hw));

View File

@ -210,13 +210,6 @@ cpu_fork(p1, p2, flags)
up->u_pcb.pcb_context[2] = (u_long) p2; /* s2: a1 */
up->u_pcb.pcb_context[7] =
(u_int64_t)switch_trampoline; /* ra: assembly magic */
/*
* Clear the saved recursion count for sched_lock
* since the child needs only one count which is
* released in switch_trampoline.
*/
up->u_pcb.pcb_schednest = 0;
}
}

View File

@ -175,8 +175,7 @@ struct proc {
int p_flag; /* (c) P_* flags. */
int p_sflag; /* (j) PS_* flags. */
int p_intr_nesting_level; /* (k) Interrupt recursion. */
char p_stat; /* (j) S* process status. */
char p_pad1[3];
int p_stat; /* (j) S* process status. */
pid_t p_pid; /* (b) Process identifier. */
LIST_ENTRY(proc) p_hash; /* (d) Hash chain. */
@ -219,8 +218,8 @@ struct proc {
struct vnode *p_textvp; /* (b) Vnode of executable. */
char p_lock; /* (c) Process lock (prevent swap) count. */
struct mtx p_mtx; /* (k) Lock for this struct. */
char p_lock; /* (c) Process lock (prevent swap) count. */
u_char p_oncpu; /* (j) Which cpu we are on. */
u_char p_lastcpu; /* (j) Last cpu we were on. */
char p_rqindex; /* (j) Run queue index. */
@ -283,6 +282,8 @@ struct proc {
#define p_session p_pgrp->pg_session
#define p_pgid p_pgrp->pg_id
#define NOCPU 0xff /* For p_oncpu when we aren't on a CPU. */
/* Status values (p_stat). */
#define SIDL 1 /* Process being created by fork. */
#define SRUN 2 /* Currently runnable. */