Remove pc_cpumask and pc_other_cpus usage from MI code.

Tested by:	pluknet
This commit is contained in:
Attilio Rao 2011-06-13 13:28:31 +00:00
parent b8764e519d
commit a38f1f263b
5 changed files with 46 additions and 47 deletions

View File

@ -197,6 +197,7 @@ extern void xencons_resume(void);
static void static void
xctrl_suspend() xctrl_suspend()
{ {
u_int cpuid;
int i, j, k, fpp; int i, j, k, fpp;
unsigned long max_pfn, start_info_mfn; unsigned long max_pfn, start_info_mfn;
@ -210,11 +211,11 @@ xctrl_suspend()
thread_lock(td); thread_lock(td);
sched_bind(td, 0); sched_bind(td, 0);
thread_unlock(td); thread_unlock(td);
KASSERT(PCPU_GET(cpuid) == 0, ("xen_suspend: not running on cpu 0")); cpuid = PCPU_GET(cpuid);
KASSERT(cpuid == 0, ("xen_suspend: not running on cpu 0"));
sched_pin(); map = all_cpus;
map = PCPU_GET(other_cpus); CPU_CLR(cpuid, &map);
sched_unpin();
CPU_NAND(&map, &stopped_cpus); CPU_NAND(&map, &stopped_cpus);
if (!CPU_EMPTY(&map)) if (!CPU_EMPTY(&map))
stop_cpus(map); stop_cpus(map);

View File

@ -263,7 +263,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
pc = pcpu_find(curcpu); pc = pcpu_find(curcpu);
/* Check if we just need to do a proper critical_exit. */ /* Check if we just need to do a proper critical_exit. */
if (!CPU_OVERLAP(&pc->pc_cpumask, &rm->rm_writecpus)) { if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
critical_exit(); critical_exit();
return (1); return (1);
} }
@ -325,7 +325,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
critical_enter(); critical_enter();
pc = pcpu_find(curcpu); pc = pcpu_find(curcpu);
CPU_NAND(&rm->rm_writecpus, &pc->pc_cpumask); CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
rm_tracker_add(pc, tracker); rm_tracker_add(pc, tracker);
sched_pin(); sched_pin();
critical_exit(); critical_exit();
@ -367,7 +367,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
* conditional jump. * conditional jump.
*/ */
if (0 == (td->td_owepreempt | if (0 == (td->td_owepreempt |
CPU_OVERLAP(&rm->rm_writecpus, &pc->pc_cpumask))) CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
return (1); return (1);
/* We do not have a read token and need to acquire one. */ /* We do not have a read token and need to acquire one. */

View File

@ -951,8 +951,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
if (td->td_flags & TDF_IDLETD) { if (td->td_flags & TDF_IDLETD) {
TD_SET_CAN_RUN(td); TD_SET_CAN_RUN(td);
#ifdef SMP #ifdef SMP
/* Spinlock held here, assume no migration. */ CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask);
CPU_NAND(&idle_cpus_mask, PCPU_PTR(cpumask));
#endif #endif
} else { } else {
if (TD_IS_RUNNING(td)) { if (TD_IS_RUNNING(td)) {
@ -1026,7 +1025,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
#ifdef SMP #ifdef SMP
if (td->td_flags & TDF_IDLETD) if (td->td_flags & TDF_IDLETD)
CPU_OR(&idle_cpus_mask, PCPU_PTR(cpumask)); CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask);
#endif #endif
sched_lock.mtx_lock = (uintptr_t)td; sched_lock.mtx_lock = (uintptr_t)td;
td->td_oncpu = PCPU_GET(cpuid); td->td_oncpu = PCPU_GET(cpuid);
@ -1055,7 +1054,8 @@ static int
forward_wakeup(int cpunum) forward_wakeup(int cpunum)
{ {
struct pcpu *pc; struct pcpu *pc;
cpuset_t dontuse, id, map, map2, me; cpuset_t dontuse, map, map2;
u_int id, me;
int iscpuset; int iscpuset;
mtx_assert(&sched_lock, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED);
@ -1073,27 +1073,24 @@ forward_wakeup(int cpunum)
/* /*
* Check the idle mask we received against what we calculated * Check the idle mask we received against what we calculated
* before in the old version. * before in the old version.
*
* Also note that sched_lock is held now, thus no migration is
* expected.
*/ */
me = PCPU_GET(cpumask); me = PCPU_GET(cpuid);
/* Don't bother if we should be doing it ourself. */ /* Don't bother if we should be doing it ourself. */
if (CPU_OVERLAP(&me, &idle_cpus_mask) && if (CPU_ISSET(me, &idle_cpus_mask) &&
(cpunum == NOCPU || CPU_ISSET(cpunum, &me))) (cpunum == NOCPU || me == cpunum))
return (0); return (0);
dontuse = me; CPU_SETOF(me, &dontuse);
CPU_OR(&dontuse, &stopped_cpus); CPU_OR(&dontuse, &stopped_cpus);
CPU_OR(&dontuse, &hlt_cpus_mask); CPU_OR(&dontuse, &hlt_cpus_mask);
CPU_ZERO(&map2); CPU_ZERO(&map2);
if (forward_wakeup_use_loop) { if (forward_wakeup_use_loop) {
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask; id = pc->pc_cpuid;
if (!CPU_OVERLAP(&id, &dontuse) && if (!CPU_ISSET(id, &dontuse) &&
pc->pc_curthread == pc->pc_idlethread) { pc->pc_curthread == pc->pc_idlethread) {
CPU_OR(&map2, &id); CPU_SET(id, &map2);
} }
} }
} }
@ -1125,11 +1122,11 @@ forward_wakeup(int cpunum)
if (!CPU_EMPTY(&map)) { if (!CPU_EMPTY(&map)) {
forward_wakeups_delivered++; forward_wakeups_delivered++;
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask; id = pc->pc_cpuid;
if (!CPU_OVERLAP(&map, &id)) if (!CPU_ISSET(id, &map))
continue; continue;
if (cpu_idle_wakeup(pc->pc_cpuid)) if (cpu_idle_wakeup(pc->pc_cpuid))
CPU_NAND(&map, &id); CPU_CLR(id, &map);
} }
if (!CPU_EMPTY(&map)) if (!CPU_EMPTY(&map))
ipi_selected(map, IPI_AST); ipi_selected(map, IPI_AST);
@ -1147,7 +1144,7 @@ kick_other_cpu(int pri, int cpuid)
int cpri; int cpri;
pcpu = pcpu_find(cpuid); pcpu = pcpu_find(cpuid);
if (CPU_OVERLAP(&idle_cpus_mask, &pcpu->pc_cpumask)) { if (CPU_ISSET(cpuid, &idle_cpus_mask)) {
forward_wakeups_delivered++; forward_wakeups_delivered++;
if (!cpu_idle_wakeup(cpuid)) if (!cpu_idle_wakeup(cpuid))
ipi_cpu(cpuid, IPI_AST); ipi_cpu(cpuid, IPI_AST);
@ -1205,10 +1202,10 @@ void
sched_add(struct thread *td, int flags) sched_add(struct thread *td, int flags)
#ifdef SMP #ifdef SMP
{ {
cpuset_t idle, me, tidlemsk; cpuset_t tidlemsk;
struct td_sched *ts; struct td_sched *ts;
u_int cpu, cpuid;
int forwarded = 0; int forwarded = 0;
int cpu;
int single_cpu = 0; int single_cpu = 0;
ts = td->td_sched; ts = td->td_sched;
@ -1271,23 +1268,17 @@ sched_add(struct thread *td, int flags)
ts->ts_runq = &runq; ts->ts_runq = &runq;
} }
if (single_cpu && (cpu != PCPU_GET(cpuid))) { cpuid = PCPU_GET(cpuid);
if (single_cpu && cpu != cpuid) {
kick_other_cpu(td->td_priority, cpu); kick_other_cpu(td->td_priority, cpu);
} else { } else {
if (!single_cpu) { if (!single_cpu) {
tidlemsk = idle_cpus_mask;
CPU_NAND(&tidlemsk, &hlt_cpus_mask);
CPU_CLR(cpuid, &tidlemsk);
/* if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
* Thread spinlock is held here, assume no ((flags & SRQ_INTR) == 0) &&
* migration is possible.
*/
me = PCPU_GET(cpumask);
idle = idle_cpus_mask;
tidlemsk = idle;
CPU_AND(&idle, &me);
CPU_OR(&me, &hlt_cpus_mask);
CPU_NAND(&tidlemsk, &me);
if (CPU_EMPTY(&idle) && ((flags & SRQ_INTR) == 0) &&
!CPU_EMPTY(&tidlemsk)) !CPU_EMPTY(&tidlemsk))
forwarded = forward_wakeup(cpu); forwarded = forward_wakeup(cpu);
} }

View File

@ -211,9 +211,12 @@ kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS)
void void
kdb_panic(const char *msg) kdb_panic(const char *msg)
{ {
#ifdef SMP #ifdef SMP
stop_cpus_hard(PCPU_GET(other_cpus)); cpuset_t other_cpus;
other_cpus = all_cpus;
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
stop_cpus_hard(other_cpus);
#endif #endif
printf("KDB: panic\n"); printf("KDB: panic\n");
panic("%s", msg); panic("%s", msg);
@ -414,7 +417,7 @@ kdb_thr_ctx(struct thread *thr)
#if defined(SMP) && defined(KDB_STOPPEDPCB) #if defined(SMP) && defined(KDB_STOPPEDPCB)
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
if (pc->pc_curthread == thr && if (pc->pc_curthread == thr &&
CPU_OVERLAP(&stopped_cpus, &pc->pc_cpumask)) CPU_ISSET(pc->pc_cpuid, &stopped_cpus))
return (KDB_STOPPEDPCB(pc)); return (KDB_STOPPEDPCB(pc));
} }
#endif #endif
@ -501,6 +504,7 @@ kdb_trap(int type, int code, struct trapframe *tf)
struct kdb_dbbe *be; struct kdb_dbbe *be;
register_t intr; register_t intr;
#ifdef SMP #ifdef SMP
cpuset_t other_cpus;
int did_stop_cpus; int did_stop_cpus;
#endif #endif
int handled; int handled;
@ -516,8 +520,11 @@ kdb_trap(int type, int code, struct trapframe *tf)
intr = intr_disable(); intr = intr_disable();
#ifdef SMP #ifdef SMP
if ((did_stop_cpus = kdb_stop_cpus) != 0) if ((did_stop_cpus = kdb_stop_cpus) != 0) {
stop_cpus_hard(PCPU_GET(other_cpus)); other_cpus = all_cpus;
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
stop_cpus_hard(other_cpus);
}
#endif #endif
kdb_active++; kdb_active++;

View File

@ -142,7 +142,7 @@ mp_start(void *dummy)
/* Probe for MP hardware. */ /* Probe for MP hardware. */
if (smp_disabled != 0 || cpu_mp_probe() == 0) { if (smp_disabled != 0 || cpu_mp_probe() == 0) {
mp_ncpus = 1; mp_ncpus = 1;
all_cpus = PCPU_GET(cpumask); CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
return; return;
} }
@ -708,7 +708,7 @@ mp_setvariables_for_up(void *dummy)
{ {
mp_ncpus = 1; mp_ncpus = 1;
mp_maxid = PCPU_GET(cpuid); mp_maxid = PCPU_GET(cpuid);
all_cpus = PCPU_GET(cpumask); CPU_SETOF(mp_maxid, &all_cpus);
KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero")); KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
} }
SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST, SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,