Remove some, but not all, assumptions that the BSP is CPU 0 and that CPUs
are numbered densely from there to n_cpus. MFC after: 1 month
This commit is contained in:
parent
7cb00caf52
commit
3cd3799a91
@ -573,7 +573,9 @@ hardclock_cnt(int cnt, int usermode)
|
||||
void
|
||||
hardclock_sync(int cpu)
|
||||
{
|
||||
int *t = DPCPU_ID_PTR(cpu, pcputicks);
|
||||
int *t;
|
||||
KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu));
|
||||
t = DPCPU_ID_PTR(cpu, pcputicks);
|
||||
|
||||
*t = ticks;
|
||||
}
|
||||
|
@ -822,6 +822,8 @@ cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt)
|
||||
CTR6(KTR_SPARE2, "new co at %d: on %d at %d.%08x - %d.%08x",
|
||||
curcpu, cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff),
|
||||
(int)(bt >> 32), (u_int)(bt & 0xffffffff));
|
||||
|
||||
KASSERT(!CPU_ABSENT(cpu), ("Absent CPU %d", cpu));
|
||||
state = DPCPU_ID_PTR(cpu, timerstate);
|
||||
ET_HW_LOCK(state);
|
||||
|
||||
|
@ -373,15 +373,16 @@ kern_reboot(int howto)
|
||||
|
||||
#if defined(SMP)
|
||||
/*
|
||||
* Bind us to CPU 0 so that all shutdown code runs there. Some
|
||||
* Bind us to the first CPU so that all shutdown code runs there. Some
|
||||
* systems don't shutdown properly (i.e., ACPI power off) if we
|
||||
* run on another processor.
|
||||
*/
|
||||
if (!SCHEDULER_STOPPED()) {
|
||||
thread_lock(curthread);
|
||||
sched_bind(curthread, 0);
|
||||
sched_bind(curthread, CPU_FIRST());
|
||||
thread_unlock(curthread);
|
||||
KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0"));
|
||||
KASSERT(PCPU_GET(cpuid) == CPU_FIRST(),
|
||||
("boot: not running on cpu 0"));
|
||||
}
|
||||
#endif
|
||||
/* We're in the process of rebooting. */
|
||||
|
@ -264,7 +264,7 @@ cc_cce_migrating(struct callout_cpu *cc, int direct)
|
||||
|
||||
/*
|
||||
* Kernel low level callwheel initialization
|
||||
* called on cpu0 during kernel startup.
|
||||
* called on the BSP during kernel startup.
|
||||
*/
|
||||
static void
|
||||
callout_callwheel_init(void *dummy)
|
||||
@ -277,7 +277,7 @@ callout_callwheel_init(void *dummy)
|
||||
* XXX: Clip callout to result of previous function of maxusers
|
||||
* maximum 384. This is still huge, but acceptable.
|
||||
*/
|
||||
memset(CC_CPU(0), 0, sizeof(cc_cpu));
|
||||
memset(CC_CPU(curcpu), 0, sizeof(cc_cpu));
|
||||
ncallout = imin(16 + maxproc + maxfiles, 18508);
|
||||
TUNABLE_INT_FETCH("kern.ncallout", &ncallout);
|
||||
|
||||
@ -295,7 +295,7 @@ callout_callwheel_init(void *dummy)
|
||||
TUNABLE_INT_FETCH("kern.pin_pcpu_swi", &pin_pcpu_swi);
|
||||
|
||||
/*
|
||||
* Only cpu0 handles timeout(9) and receives a preallocation.
|
||||
* Only BSP handles timeout(9) and receives a preallocation.
|
||||
*
|
||||
* XXX: Once all timeout(9) consumers are converted this can
|
||||
* be removed.
|
||||
@ -330,7 +330,7 @@ callout_cpu_init(struct callout_cpu *cc, int cpu)
|
||||
cc_cce_cleanup(cc, i);
|
||||
snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
|
||||
"callwheel cpu %d", cpu);
|
||||
if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */
|
||||
if (cc->cc_callout == NULL) /* Only BSP handles timeout(9) */
|
||||
return;
|
||||
for (i = 0; i < ncallout; i++) {
|
||||
c = &cc->cc_callout[i];
|
||||
@ -400,7 +400,7 @@ start_softclock(void *dummy)
|
||||
if (cpu == timeout_cpu)
|
||||
continue;
|
||||
cc = CC_CPU(cpu);
|
||||
cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */
|
||||
cc->cc_callout = NULL; /* Only BSP handles timeout(9). */
|
||||
callout_cpu_init(cc, cpu);
|
||||
snprintf(name, sizeof(name), "clock (%d)", cpu);
|
||||
ie = NULL;
|
||||
|
@ -1224,6 +1224,8 @@ sched_pickcpu(struct thread *td, int flags)
|
||||
|
||||
self = PCPU_GET(cpuid);
|
||||
ts = td_get_sched(td);
|
||||
KASSERT(!CPU_ABSENT(ts->ts_cpu), ("sched_pickcpu: Start scheduler on "
|
||||
"absent CPU %d for thread %s.", ts->ts_cpu, td->td_name));
|
||||
if (smp_started == 0)
|
||||
return (self);
|
||||
/*
|
||||
@ -1294,6 +1296,7 @@ sched_pickcpu(struct thread *td, int flags)
|
||||
if (cpu == -1)
|
||||
cpu = sched_lowest(cpu_top, mask, -1, INT_MAX, ts->ts_cpu);
|
||||
KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu."));
|
||||
KASSERT(!CPU_ABSENT(cpu), ("sched_pickcpu: Picked absent CPU %d.", cpu));
|
||||
/*
|
||||
* Compare the lowest loaded cpu to current cpu.
|
||||
*/
|
||||
@ -1400,6 +1403,7 @@ sched_setup(void *dummy)
|
||||
|
||||
/* Add thread0's load since it's running. */
|
||||
TDQ_LOCK(tdq);
|
||||
td_get_sched(&thread0)->ts_cpu = curcpu; /* Something valid to start */
|
||||
thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
|
||||
tdq_load_add(tdq, &thread0);
|
||||
tdq->tdq_lowpri = thread0.td_priority;
|
||||
@ -1837,6 +1841,9 @@ sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
|
||||
{
|
||||
struct tdq *tdn;
|
||||
|
||||
KASSERT(!CPU_ABSENT(td_get_sched(td)->ts_cpu), ("sched_switch_migrate: "
|
||||
"thread %s queued on absent CPU %d.", td->td_name,
|
||||
td_get_sched(td)->ts_cpu));
|
||||
tdn = TDQ_CPU(td_get_sched(td)->ts_cpu);
|
||||
#ifdef SMP
|
||||
tdq_load_rem(tdq, td);
|
||||
@ -2444,6 +2451,7 @@ sched_add(struct thread *td, int flags)
|
||||
* Pick the destination cpu and if it isn't ours transfer to the
|
||||
* target cpu.
|
||||
*/
|
||||
td_get_sched(td)->ts_cpu = curcpu; /* Pick something valid to start */
|
||||
cpu = sched_pickcpu(td, flags);
|
||||
tdq = sched_setcpu(td, cpu, flags);
|
||||
tdq_add(tdq, td, flags);
|
||||
|
@ -279,6 +279,8 @@ pcpu_destroy(struct pcpu *pcpu)
|
||||
struct pcpu *
|
||||
pcpu_find(u_int cpuid)
|
||||
{
|
||||
KASSERT(cpuid_to_pcpu[cpuid] != NULL,
|
||||
("Getting uninitialized PCPU %d", cpuid));
|
||||
|
||||
return (cpuid_to_pcpu[cpuid]);
|
||||
}
|
||||
@ -409,7 +411,7 @@ DB_SHOW_ALL_COMMAND(pcpu, db_show_cpu_all)
|
||||
int id;
|
||||
|
||||
db_printf("Current CPU: %d\n\n", PCPU_GET(cpuid));
|
||||
for (id = 0; id <= mp_maxid; id++) {
|
||||
CPU_FOREACH(id) {
|
||||
pc = pcpu_find(id);
|
||||
if (pc != NULL) {
|
||||
show_pcpu(pc);
|
||||
|
Loading…
Reference in New Issue
Block a user