Small cleanups to the SMP code:

- Axe inlvtlb_ok as it was completely redundant with smp_active.
- Remove references to non-existent variable and non-existent file
  in i386/include/smp.h.
- Don't perform initializations local to each CPU while holding the
  ap boot lock on i386 while an AP bootstraps itself.
- Reorganize the AP startup code some to unify the latter half of the
  functions to bring an AP up.  Eventually this might be broken out into
  a MI function in subr_smp.c.
This commit is contained in:
John Baldwin 2001-12-17 23:14:35 +00:00
parent 3d09cebfce
commit 1ecf0d56c8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=88085
9 changed files with 100 additions and 214 deletions

View File

@ -153,14 +153,6 @@ smp_init_secondary(void)
(void)alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH);
mc_expected = 0;
/*
* Set curproc to our per-cpu idleproc so that mutexes have
* something unique to lock with.
*
* XXX: shouldn't this already be set for us?
*/
PCPU_SET(curthread, PCPU_GET(idlethread));
/*
* Set flags in our per-CPU slot in the HWRPB.
*/
@ -187,8 +179,9 @@ smp_init_secondary(void)
smp_cpus++;
CTR0(KTR_SMP, "smp_init_secondary");
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
@ -201,17 +194,16 @@ smp_init_secondary(void)
mtx_unlock_spin(&ap_boot_mtx);
while (smp_started == 0)
alpha_mb(); /* nothing */
; /* nothing */
microuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
(void)alpha_pal_swpipl(ALPHA_PSL_IPL_0);
mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to " __func__);
panic("scheduler returned us to %s", __func__);
}
static int

View File

@ -287,9 +287,6 @@ extern pt_entry_t *SMPpt;
struct pcb stoppcbs[MAXCPU];
int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
/*
* Local data and functions.
*/
@ -2191,7 +2188,7 @@ void
smp_invltlb(void)
{
#if defined(APIC_IO)
if (smp_started && invltlb_ok)
if (smp_started)
ipi_all_but_self(IPI_INVLTLB);
#endif /* APIC_IO */
}
@ -2236,29 +2233,13 @@ ap_init(void)
while (!aps_ready)
/* spin */ ;
/*
* Set curproc to our per-cpu idleproc so that mutexes have
* something unique to lock with.
*/
PCPU_SET(curthread, PCPU_GET(idlethread));
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
/* BSP may have changed PTD while we were waiting */
cpu_invltlb();
smp_cpus++;
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
lidt(&r_idt);
#endif
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
/* set up CPU registers and state */
cpu_setregs();
@ -2283,18 +2264,22 @@ ap_init(void)
/* Set memory range attributes for this CPU to match the BSP */
mem_range_AP_init();
/*
* Activate smp_invltlb, although strictly speaking, this isn't
* quite correct yet. We should have a bitfield for cpus willing
* to accept TLB flush IPI's or something and sync them.
*/
mtx_lock_spin(&ap_boot_mtx);
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
smp_cpus++;
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
if (smp_cpus == mp_ncpus) {
invltlb_ok = 1;
smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
smp_active = 1; /* historic */
}
/* let other AP's wake up now */
mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
@ -2305,11 +2290,10 @@ ap_init(void)
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
panic("scheduler returned us to %s", __func__);
}
/*
@ -2332,7 +2316,7 @@ forward_statclock(void)
CTR0(KTR_SMP, "forward_statclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
@ -2363,7 +2347,7 @@ forward_hardclock(void)
CTR0(KTR_SMP, "forward_hardclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;

View File

@ -287,9 +287,6 @@ extern pt_entry_t *SMPpt;
struct pcb stoppcbs[MAXCPU];
int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
/*
* Local data and functions.
*/
@ -2191,7 +2188,7 @@ void
smp_invltlb(void)
{
#if defined(APIC_IO)
if (smp_started && invltlb_ok)
if (smp_started)
ipi_all_but_self(IPI_INVLTLB);
#endif /* APIC_IO */
}
@ -2236,29 +2233,13 @@ ap_init(void)
while (!aps_ready)
/* spin */ ;
/*
* Set curproc to our per-cpu idleproc so that mutexes have
* something unique to lock with.
*/
PCPU_SET(curthread, PCPU_GET(idlethread));
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
/* BSP may have changed PTD while we were waiting */
cpu_invltlb();
smp_cpus++;
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
lidt(&r_idt);
#endif
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
/* set up CPU registers and state */
cpu_setregs();
@ -2283,18 +2264,22 @@ ap_init(void)
/* Set memory range attributes for this CPU to match the BSP */
mem_range_AP_init();
/*
* Activate smp_invltlb, although strictly speaking, this isn't
* quite correct yet. We should have a bitfield for cpus willing
* to accept TLB flush IPI's or something and sync them.
*/
mtx_lock_spin(&ap_boot_mtx);
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
smp_cpus++;
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
if (smp_cpus == mp_ncpus) {
invltlb_ok = 1;
smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
smp_active = 1; /* historic */
}
/* let other AP's wake up now */
mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
@ -2305,11 +2290,10 @@ ap_init(void)
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
panic("scheduler returned us to %s", __func__);
}
/*
@ -2332,7 +2316,7 @@ forward_statclock(void)
CTR0(KTR_SMP, "forward_statclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
@ -2363,7 +2347,7 @@ forward_hardclock(void)
CTR0(KTR_SMP, "forward_hardclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;

View File

@ -287,9 +287,6 @@ extern pt_entry_t *SMPpt;
struct pcb stoppcbs[MAXCPU];
int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
/*
* Local data and functions.
*/
@ -2191,7 +2188,7 @@ void
smp_invltlb(void)
{
#if defined(APIC_IO)
if (smp_started && invltlb_ok)
if (smp_started)
ipi_all_but_self(IPI_INVLTLB);
#endif /* APIC_IO */
}
@ -2236,29 +2233,13 @@ ap_init(void)
while (!aps_ready)
/* spin */ ;
/*
* Set curproc to our per-cpu idleproc so that mutexes have
* something unique to lock with.
*/
PCPU_SET(curthread, PCPU_GET(idlethread));
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
/* BSP may have changed PTD while we were waiting */
cpu_invltlb();
smp_cpus++;
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
lidt(&r_idt);
#endif
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
/* set up CPU registers and state */
cpu_setregs();
@ -2283,18 +2264,22 @@ ap_init(void)
/* Set memory range attributes for this CPU to match the BSP */
mem_range_AP_init();
/*
* Activate smp_invltlb, although strictly speaking, this isn't
* quite correct yet. We should have a bitfield for cpus willing
* to accept TLB flush IPI's or something and sync them.
*/
mtx_lock_spin(&ap_boot_mtx);
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
smp_cpus++;
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
if (smp_cpus == mp_ncpus) {
invltlb_ok = 1;
smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
smp_active = 1; /* historic */
}
/* let other AP's wake up now */
mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
@ -2305,11 +2290,10 @@ ap_init(void)
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
panic("scheduler returned us to %s", __func__);
}
/*
@ -2332,7 +2316,7 @@ forward_statclock(void)
CTR0(KTR_SMP, "forward_statclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
@ -2363,7 +2347,7 @@ forward_hardclock(void)
CTR0(KTR_SMP, "forward_hardclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;

View File

@ -142,11 +142,6 @@ void u_sleep __P((int));
u_int io_apic_read __P((int, int));
void io_apic_write __P((int, int, u_int));
/* global data in init_smp.c */
extern int invltlb_ok;
extern volatile int smp_idle_loops;
#endif /* !LOCORE */
#endif /* SMP && !APIC_IO */

View File

@ -287,9 +287,6 @@ extern pt_entry_t *SMPpt;
struct pcb stoppcbs[MAXCPU];
int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
/*
* Local data and functions.
*/
@ -2191,7 +2188,7 @@ void
smp_invltlb(void)
{
#if defined(APIC_IO)
if (smp_started && invltlb_ok)
if (smp_started)
ipi_all_but_self(IPI_INVLTLB);
#endif /* APIC_IO */
}
@ -2236,29 +2233,13 @@ ap_init(void)
while (!aps_ready)
/* spin */ ;
/*
* Set curproc to our per-cpu idleproc so that mutexes have
* something unique to lock with.
*/
PCPU_SET(curthread, PCPU_GET(idlethread));
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
/* BSP may have changed PTD while we were waiting */
cpu_invltlb();
smp_cpus++;
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
lidt(&r_idt);
#endif
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
/* set up CPU registers and state */
cpu_setregs();
@ -2283,18 +2264,22 @@ ap_init(void)
/* Set memory range attributes for this CPU to match the BSP */
mem_range_AP_init();
/*
* Activate smp_invltlb, although strictly speaking, this isn't
* quite correct yet. We should have a bitfield for cpus willing
* to accept TLB flush IPI's or something and sync them.
*/
mtx_lock_spin(&ap_boot_mtx);
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
smp_cpus++;
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
if (smp_cpus == mp_ncpus) {
invltlb_ok = 1;
smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
smp_active = 1; /* historic */
}
/* let other AP's wake up now */
mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
@ -2305,11 +2290,10 @@ ap_init(void)
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
panic("scheduler returned us to %s", __func__);
}
/*
@ -2332,7 +2316,7 @@ forward_statclock(void)
CTR0(KTR_SMP, "forward_statclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
@ -2363,7 +2347,7 @@ forward_hardclock(void)
CTR0(KTR_SMP, "forward_hardclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;

View File

@ -287,9 +287,6 @@ extern pt_entry_t *SMPpt;
struct pcb stoppcbs[MAXCPU];
int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
/*
* Local data and functions.
*/
@ -2191,7 +2188,7 @@ void
smp_invltlb(void)
{
#if defined(APIC_IO)
if (smp_started && invltlb_ok)
if (smp_started)
ipi_all_but_self(IPI_INVLTLB);
#endif /* APIC_IO */
}
@ -2236,29 +2233,13 @@ ap_init(void)
while (!aps_ready)
/* spin */ ;
/*
* Set curproc to our per-cpu idleproc so that mutexes have
* something unique to lock with.
*/
PCPU_SET(curthread, PCPU_GET(idlethread));
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
/* BSP may have changed PTD while we were waiting */
cpu_invltlb();
smp_cpus++;
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
lidt(&r_idt);
#endif
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
/* set up CPU registers and state */
cpu_setregs();
@ -2283,18 +2264,22 @@ ap_init(void)
/* Set memory range attributes for this CPU to match the BSP */
mem_range_AP_init();
/*
* Activate smp_invltlb, although strictly speaking, this isn't
* quite correct yet. We should have a bitfield for cpus willing
* to accept TLB flush IPI's or something and sync them.
*/
mtx_lock_spin(&ap_boot_mtx);
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
smp_cpus++;
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
if (smp_cpus == mp_ncpus) {
invltlb_ok = 1;
smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
smp_active = 1; /* historic */
}
/* let other AP's wake up now */
mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
@ -2305,11 +2290,10 @@ ap_init(void)
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
panic("scheduler returned us to %s", __func__);
}
/*
@ -2332,7 +2316,7 @@ forward_statclock(void)
CTR0(KTR_SMP, "forward_statclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
@ -2363,7 +2347,7 @@ forward_hardclock(void)
CTR0(KTR_SMP, "forward_hardclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;

View File

@ -287,9 +287,6 @@ extern pt_entry_t *SMPpt;
struct pcb stoppcbs[MAXCPU];
int invltlb_ok = 0; /* throttle smp_invltlb() till safe */
SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, "");
/*
* Local data and functions.
*/
@ -2191,7 +2188,7 @@ void
smp_invltlb(void)
{
#if defined(APIC_IO)
if (smp_started && invltlb_ok)
if (smp_started)
ipi_all_but_self(IPI_INVLTLB);
#endif /* APIC_IO */
}
@ -2236,29 +2233,13 @@ ap_init(void)
while (!aps_ready)
/* spin */ ;
/*
* Set curproc to our per-cpu idleproc so that mutexes have
* something unique to lock with.
*/
PCPU_SET(curthread, PCPU_GET(idlethread));
/* lock against other AP's that are waking up */
mtx_lock_spin(&ap_boot_mtx);
/* BSP may have changed PTD while we're waiting for the lock */
/* BSP may have changed PTD while we were waiting */
cpu_invltlb();
smp_cpus++;
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
lidt(&r_idt);
#endif
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
/* set up CPU registers and state */
cpu_setregs();
@ -2283,18 +2264,22 @@ ap_init(void)
/* Set memory range attributes for this CPU to match the BSP */
mem_range_AP_init();
/*
* Activate smp_invltlb, although strictly speaking, this isn't
* quite correct yet. We should have a bitfield for cpus willing
* to accept TLB flush IPI's or something and sync them.
*/
mtx_lock_spin(&ap_boot_mtx);
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
smp_cpus++;
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
if (smp_cpus == mp_ncpus) {
invltlb_ok = 1;
smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
smp_active = 1; /* historic */
}
/* let other AP's wake up now */
mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
@ -2305,11 +2290,10 @@ ap_init(void)
PCPU_SET(switchticks, ticks);
/* ok, now grab sched_lock and enter the scheduler */
enable_intr();
mtx_lock_spin(&sched_lock);
cpu_throw(); /* doesn't return */
panic("scheduler returned us to ap_init");
panic("scheduler returned us to %s", __func__);
}
/*
@ -2332,7 +2316,7 @@ forward_statclock(void)
CTR0(KTR_SMP, "forward_statclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;
@ -2363,7 +2347,7 @@ forward_hardclock(void)
CTR0(KTR_SMP, "forward_hardclock");
if (!smp_started || !invltlb_ok || cold || panicstr)
if (!smp_started || cold || panicstr)
return;
map = PCPU_GET(other_cpus) & ~stopped_cpus ;

View File

@ -142,11 +142,6 @@ void u_sleep __P((int));
u_int io_apic_read __P((int, int));
void io_apic_write __P((int, int, u_int));
/* global data in init_smp.c */
extern int invltlb_ok;
extern volatile int smp_idle_loops;
#endif /* !LOCORE */
#endif /* SMP && !APIC_IO */