Use resume_cpus() instead of restart_cpus() to resume from ACPI suspension.
restart_cpus() worked well enough by accident. Before this set of fixes, resume_cpus() used the same cpuset (started_cpus, meaning CPUs directed to restart) as restart_cpus(). resume_cpus() waited for the wrong cpuset (stopped_cpus) to become empty, but since mixtures of stopped and suspended CPUs are not close to working, stopped_cpus must be empty when resuming so the wait is null -- restart_cpus just allows the other CPUs to restart and returns without waiting. Fix resume_cpus() to wait on a non-wrong cpuset for the ACPI case, and add further kludges to try to keep it working for the XEN case. It was only used for XEN. It waited on suspended_cpus. This works for XEN. However, for ACPI, resuming is a 2-step process. ACPI has already woken up the other CPUs and removed them from suspended_cpus. This fix records the move by putting them in a new cpuset resuming_cpus. Waiting on suspended_cpus would give the same null wait as waiting on stopped_cpus. Wait on resuming_cpus instead. Add a cpuset toresume_cpus to map the CPUs being told to resume to keep this separate from the cpuset started_cpus for mapping the CPUs being told to restart. Mixtures of stopped and suspended/resuming CPUs are still far from working. Describe new and some old cpusets in comments. Add further kludges to cpususpend_handler() to try to avoid breaking it for XEN. XEN doesn't use resumectx(), so it doesn't use the second return path for savectx(), and it goes from the suspended state directly to the restarted state, while ACPI resume goes through the resuming state. Enter the resuming state early for all cases so that resume_cpus can test for being in this state and not have to worry about the intermediate !suspended state for ACPI only. Reviewed by: kib
This commit is contained in:
parent
fa3e727156
commit
cf8a25e82e
@ -351,13 +351,18 @@ generic_restart_cpus(cpuset_t map, u_int type)
|
||||
|
||||
#if X86
|
||||
if (type == IPI_SUSPEND)
|
||||
cpus = &suspended_cpus;
|
||||
cpus = &resuming_cpus;
|
||||
else
|
||||
#endif
|
||||
cpus = &stopped_cpus;
|
||||
|
||||
/* signal other cpus to restart */
|
||||
CPU_COPY_STORE_REL(&map, &started_cpus);
|
||||
#if X86
|
||||
if (type == IPI_SUSPEND)
|
||||
CPU_COPY_STORE_REL(&map, &toresume_cpus);
|
||||
else
|
||||
#endif
|
||||
CPU_COPY_STORE_REL(&map, &started_cpus);
|
||||
|
||||
#if X86
|
||||
if (!nmi_is_broadcast || nmi_kdb_lock == 0) {
|
||||
|
@ -155,10 +155,13 @@ struct cpu_group *smp_topo_find(struct cpu_group *top, int cpu);
|
||||
|
||||
extern void (*cpustop_restartfunc)(void);
|
||||
extern int smp_cpus;
|
||||
extern volatile cpuset_t started_cpus;
|
||||
extern volatile cpuset_t stopped_cpus;
|
||||
extern volatile cpuset_t suspended_cpus;
|
||||
extern cpuset_t hlt_cpus_mask;
|
||||
/* The suspend/resume cpusets are x86 only, but minimize ifdefs. */
|
||||
extern volatile cpuset_t resuming_cpus; /* woken up cpus in suspend pen */
|
||||
extern volatile cpuset_t started_cpus; /* cpus to let out of stop pen */
|
||||
extern volatile cpuset_t stopped_cpus; /* cpus in stop pen */
|
||||
extern volatile cpuset_t suspended_cpus; /* cpus [near] sleeping in susp pen */
|
||||
extern volatile cpuset_t toresume_cpus; /* cpus to let out of suspend pen */
|
||||
extern cpuset_t hlt_cpus_mask; /* XXX 'mask' is detail in old impl */
|
||||
extern cpuset_t logical_cpus_mask;
|
||||
#endif /* SMP */
|
||||
|
||||
|
@ -310,7 +310,7 @@ acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result,
|
||||
|
||||
#ifdef SMP
|
||||
if (!CPU_EMPTY(&suspcpus))
|
||||
restart_cpus(suspcpus);
|
||||
resume_cpus(suspcpus);
|
||||
#endif
|
||||
mca_resume();
|
||||
#ifdef __amd64__
|
||||
|
@ -124,6 +124,9 @@ struct cpu_ops cpu_ops;
|
||||
|
||||
static volatile cpuset_t ipi_stop_nmi_pending;
|
||||
|
||||
volatile cpuset_t resuming_cpus;
|
||||
volatile cpuset_t toresume_cpus;
|
||||
|
||||
/* used to hold the AP's until we are ready to release them */
|
||||
struct mtx ap_boot_mtx;
|
||||
|
||||
@ -1379,6 +1382,13 @@ cpususpend_handler(void)
|
||||
#endif
|
||||
wbinvd();
|
||||
CPU_SET_ATOMIC(cpu, &suspended_cpus);
|
||||
/*
|
||||
* Hack for xen, which does not use resumectx() so never
|
||||
* uses the next clause: set resuming_cpus early so that
|
||||
* resume_cpus() can wait on the same bitmap for acpi and
|
||||
* xen. resuming_cpus now means eventually_resumable_cpus.
|
||||
*/
|
||||
CPU_SET_ATOMIC(cpu, &resuming_cpus);
|
||||
} else {
|
||||
#ifdef __amd64__
|
||||
fpuresume(susppcbs[cpu]->sp_fpususpend);
|
||||
@ -1390,12 +1400,12 @@ cpususpend_handler(void)
|
||||
PCPU_SET(switchtime, 0);
|
||||
PCPU_SET(switchticks, ticks);
|
||||
|
||||
/* Indicate that we are resumed */
|
||||
/* Indicate that we are resuming */
|
||||
CPU_CLR_ATOMIC(cpu, &suspended_cpus);
|
||||
}
|
||||
|
||||
/* Wait for resume */
|
||||
while (!CPU_ISSET(cpu, &started_cpus))
|
||||
/* Wait for resume directive */
|
||||
while (!CPU_ISSET(cpu, &toresume_cpus))
|
||||
ia32_pause();
|
||||
|
||||
#ifdef __i386__
|
||||
@ -1416,8 +1426,9 @@ cpususpend_handler(void)
|
||||
lapic_setup(0);
|
||||
|
||||
/* Indicate that we are resumed */
|
||||
CPU_CLR_ATOMIC(cpu, &resuming_cpus);
|
||||
CPU_CLR_ATOMIC(cpu, &suspended_cpus);
|
||||
CPU_CLR_ATOMIC(cpu, &started_cpus);
|
||||
CPU_CLR_ATOMIC(cpu, &toresume_cpus);
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user