Remove pc_cpumask and pc_other_cpus usage from MI code.
Tested by: pluknet
This commit is contained in:
parent
b8764e519d
commit
a38f1f263b
@ -197,6 +197,7 @@ extern void xencons_resume(void);
|
||||
static void
|
||||
xctrl_suspend()
|
||||
{
|
||||
u_int cpuid;
|
||||
int i, j, k, fpp;
|
||||
unsigned long max_pfn, start_info_mfn;
|
||||
|
||||
@ -210,11 +211,11 @@ xctrl_suspend()
|
||||
thread_lock(td);
|
||||
sched_bind(td, 0);
|
||||
thread_unlock(td);
|
||||
KASSERT(PCPU_GET(cpuid) == 0, ("xen_suspend: not running on cpu 0"));
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
KASSERT(cpuid == 0, ("xen_suspend: not running on cpu 0"));
|
||||
|
||||
sched_pin();
|
||||
map = PCPU_GET(other_cpus);
|
||||
sched_unpin();
|
||||
map = all_cpus;
|
||||
CPU_CLR(cpuid, &map);
|
||||
CPU_NAND(&map, &stopped_cpus);
|
||||
if (!CPU_EMPTY(&map))
|
||||
stop_cpus(map);
|
||||
|
@ -263,7 +263,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
|
||||
pc = pcpu_find(curcpu);
|
||||
|
||||
/* Check if we just need to do a proper critical_exit. */
|
||||
if (!CPU_OVERLAP(&pc->pc_cpumask, &rm->rm_writecpus)) {
|
||||
if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) {
|
||||
critical_exit();
|
||||
return (1);
|
||||
}
|
||||
@ -325,7 +325,7 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
|
||||
|
||||
critical_enter();
|
||||
pc = pcpu_find(curcpu);
|
||||
CPU_NAND(&rm->rm_writecpus, &pc->pc_cpumask);
|
||||
CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus);
|
||||
rm_tracker_add(pc, tracker);
|
||||
sched_pin();
|
||||
critical_exit();
|
||||
@ -367,7 +367,7 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker *tracker, int trylock)
|
||||
* conditional jump.
|
||||
*/
|
||||
if (0 == (td->td_owepreempt |
|
||||
CPU_OVERLAP(&rm->rm_writecpus, &pc->pc_cpumask)))
|
||||
CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))
|
||||
return (1);
|
||||
|
||||
/* We do not have a read token and need to acquire one. */
|
||||
|
@ -951,8 +951,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
if (td->td_flags & TDF_IDLETD) {
|
||||
TD_SET_CAN_RUN(td);
|
||||
#ifdef SMP
|
||||
/* Spinlock held here, assume no migration. */
|
||||
CPU_NAND(&idle_cpus_mask, PCPU_PTR(cpumask));
|
||||
CPU_CLR(PCPU_GET(cpuid), &idle_cpus_mask);
|
||||
#endif
|
||||
} else {
|
||||
if (TD_IS_RUNNING(td)) {
|
||||
@ -1026,7 +1025,7 @@ sched_switch(struct thread *td, struct thread *newtd, int flags)
|
||||
|
||||
#ifdef SMP
|
||||
if (td->td_flags & TDF_IDLETD)
|
||||
CPU_OR(&idle_cpus_mask, PCPU_PTR(cpumask));
|
||||
CPU_SET(PCPU_GET(cpuid), &idle_cpus_mask);
|
||||
#endif
|
||||
sched_lock.mtx_lock = (uintptr_t)td;
|
||||
td->td_oncpu = PCPU_GET(cpuid);
|
||||
@ -1055,7 +1054,8 @@ static int
|
||||
forward_wakeup(int cpunum)
|
||||
{
|
||||
struct pcpu *pc;
|
||||
cpuset_t dontuse, id, map, map2, me;
|
||||
cpuset_t dontuse, map, map2;
|
||||
u_int id, me;
|
||||
int iscpuset;
|
||||
|
||||
mtx_assert(&sched_lock, MA_OWNED);
|
||||
@ -1073,27 +1073,24 @@ forward_wakeup(int cpunum)
|
||||
/*
|
||||
* Check the idle mask we received against what we calculated
|
||||
* before in the old version.
|
||||
*
|
||||
* Also note that sched_lock is held now, thus no migration is
|
||||
* expected.
|
||||
*/
|
||||
me = PCPU_GET(cpumask);
|
||||
me = PCPU_GET(cpuid);
|
||||
|
||||
/* Don't bother if we should be doing it ourself. */
|
||||
if (CPU_OVERLAP(&me, &idle_cpus_mask) &&
|
||||
(cpunum == NOCPU || CPU_ISSET(cpunum, &me)))
|
||||
if (CPU_ISSET(me, &idle_cpus_mask) &&
|
||||
(cpunum == NOCPU || me == cpunum))
|
||||
return (0);
|
||||
|
||||
dontuse = me;
|
||||
CPU_SETOF(me, &dontuse);
|
||||
CPU_OR(&dontuse, &stopped_cpus);
|
||||
CPU_OR(&dontuse, &hlt_cpus_mask);
|
||||
CPU_ZERO(&map2);
|
||||
if (forward_wakeup_use_loop) {
|
||||
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
|
||||
id = pc->pc_cpumask;
|
||||
if (!CPU_OVERLAP(&id, &dontuse) &&
|
||||
id = pc->pc_cpuid;
|
||||
if (!CPU_ISSET(id, &dontuse) &&
|
||||
pc->pc_curthread == pc->pc_idlethread) {
|
||||
CPU_OR(&map2, &id);
|
||||
CPU_SET(id, &map2);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1125,11 +1122,11 @@ forward_wakeup(int cpunum)
|
||||
if (!CPU_EMPTY(&map)) {
|
||||
forward_wakeups_delivered++;
|
||||
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
|
||||
id = pc->pc_cpumask;
|
||||
if (!CPU_OVERLAP(&map, &id))
|
||||
id = pc->pc_cpuid;
|
||||
if (!CPU_ISSET(id, &map))
|
||||
continue;
|
||||
if (cpu_idle_wakeup(pc->pc_cpuid))
|
||||
CPU_NAND(&map, &id);
|
||||
CPU_CLR(id, &map);
|
||||
}
|
||||
if (!CPU_EMPTY(&map))
|
||||
ipi_selected(map, IPI_AST);
|
||||
@ -1147,7 +1144,7 @@ kick_other_cpu(int pri, int cpuid)
|
||||
int cpri;
|
||||
|
||||
pcpu = pcpu_find(cpuid);
|
||||
if (CPU_OVERLAP(&idle_cpus_mask, &pcpu->pc_cpumask)) {
|
||||
if (CPU_ISSET(cpuid, &idle_cpus_mask)) {
|
||||
forward_wakeups_delivered++;
|
||||
if (!cpu_idle_wakeup(cpuid))
|
||||
ipi_cpu(cpuid, IPI_AST);
|
||||
@ -1205,10 +1202,10 @@ void
|
||||
sched_add(struct thread *td, int flags)
|
||||
#ifdef SMP
|
||||
{
|
||||
cpuset_t idle, me, tidlemsk;
|
||||
cpuset_t tidlemsk;
|
||||
struct td_sched *ts;
|
||||
u_int cpu, cpuid;
|
||||
int forwarded = 0;
|
||||
int cpu;
|
||||
int single_cpu = 0;
|
||||
|
||||
ts = td->td_sched;
|
||||
@ -1271,23 +1268,17 @@ sched_add(struct thread *td, int flags)
|
||||
ts->ts_runq = &runq;
|
||||
}
|
||||
|
||||
if (single_cpu && (cpu != PCPU_GET(cpuid))) {
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
if (single_cpu && cpu != cpuid) {
|
||||
kick_other_cpu(td->td_priority, cpu);
|
||||
} else {
|
||||
if (!single_cpu) {
|
||||
tidlemsk = idle_cpus_mask;
|
||||
CPU_NAND(&tidlemsk, &hlt_cpus_mask);
|
||||
CPU_CLR(cpuid, &tidlemsk);
|
||||
|
||||
/*
|
||||
* Thread spinlock is held here, assume no
|
||||
* migration is possible.
|
||||
*/
|
||||
me = PCPU_GET(cpumask);
|
||||
idle = idle_cpus_mask;
|
||||
tidlemsk = idle;
|
||||
CPU_AND(&idle, &me);
|
||||
CPU_OR(&me, &hlt_cpus_mask);
|
||||
CPU_NAND(&tidlemsk, &me);
|
||||
|
||||
if (CPU_EMPTY(&idle) && ((flags & SRQ_INTR) == 0) &&
|
||||
if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
|
||||
((flags & SRQ_INTR) == 0) &&
|
||||
!CPU_EMPTY(&tidlemsk))
|
||||
forwarded = forward_wakeup(cpu);
|
||||
}
|
||||
|
@ -211,9 +211,12 @@ kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS)
|
||||
void
|
||||
kdb_panic(const char *msg)
|
||||
{
|
||||
|
||||
#ifdef SMP
|
||||
stop_cpus_hard(PCPU_GET(other_cpus));
|
||||
cpuset_t other_cpus;
|
||||
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
|
||||
stop_cpus_hard(other_cpus);
|
||||
#endif
|
||||
printf("KDB: panic\n");
|
||||
panic("%s", msg);
|
||||
@ -414,7 +417,7 @@ kdb_thr_ctx(struct thread *thr)
|
||||
#if defined(SMP) && defined(KDB_STOPPEDPCB)
|
||||
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
|
||||
if (pc->pc_curthread == thr &&
|
||||
CPU_OVERLAP(&stopped_cpus, &pc->pc_cpumask))
|
||||
CPU_ISSET(pc->pc_cpuid, &stopped_cpus))
|
||||
return (KDB_STOPPEDPCB(pc));
|
||||
}
|
||||
#endif
|
||||
@ -501,6 +504,7 @@ kdb_trap(int type, int code, struct trapframe *tf)
|
||||
struct kdb_dbbe *be;
|
||||
register_t intr;
|
||||
#ifdef SMP
|
||||
cpuset_t other_cpus;
|
||||
int did_stop_cpus;
|
||||
#endif
|
||||
int handled;
|
||||
@ -516,8 +520,11 @@ kdb_trap(int type, int code, struct trapframe *tf)
|
||||
intr = intr_disable();
|
||||
|
||||
#ifdef SMP
|
||||
if ((did_stop_cpus = kdb_stop_cpus) != 0)
|
||||
stop_cpus_hard(PCPU_GET(other_cpus));
|
||||
if ((did_stop_cpus = kdb_stop_cpus) != 0) {
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
|
||||
stop_cpus_hard(other_cpus);
|
||||
}
|
||||
#endif
|
||||
|
||||
kdb_active++;
|
||||
|
@ -142,7 +142,7 @@ mp_start(void *dummy)
|
||||
/* Probe for MP hardware. */
|
||||
if (smp_disabled != 0 || cpu_mp_probe() == 0) {
|
||||
mp_ncpus = 1;
|
||||
all_cpus = PCPU_GET(cpumask);
|
||||
CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -708,7 +708,7 @@ mp_setvariables_for_up(void *dummy)
|
||||
{
|
||||
mp_ncpus = 1;
|
||||
mp_maxid = PCPU_GET(cpuid);
|
||||
all_cpus = PCPU_GET(cpumask);
|
||||
CPU_SETOF(mp_maxid, &all_cpus);
|
||||
KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
|
||||
}
|
||||
SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
|
||||
|
Loading…
Reference in New Issue
Block a user