2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
2008-11-03 21:17:02 +00:00
|
|
|
* Copyright (c) 2001, John Baldwin <jhb@FreeBSD.org>.
|
|
|
|
* All rights reserved.
|
1997-04-26 11:46:25 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
2001-04-27 19:28:25 +00:00
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
1997-04-26 11:46:25 +00:00
|
|
|
*
|
2008-11-03 21:17:02 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
1997-04-26 11:46:25 +00:00
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
2008-11-03 21:17:02 +00:00
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
1997-04-26 11:46:25 +00:00
|
|
|
*/
|
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
/*
|
|
|
|
* This module holds the global variables and machine independent functions
|
2001-05-10 17:45:49 +00:00
|
|
|
* used for the kernel SMP support.
|
2001-04-27 19:28:25 +00:00
|
|
|
*/
|
1997-12-08 23:00:24 +00:00
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1997-08-25 21:28:08 +00:00
|
|
|
#include <sys/param.h>
|
1997-04-26 11:46:25 +00:00
|
|
|
#include <sys/systm.h>
|
2001-05-10 17:45:49 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-04-27 19:28:25 +00:00
|
|
|
#include <sys/ktr.h>
|
1997-08-26 18:10:38 +00:00
|
|
|
#include <sys/proc.h>
|
2004-03-09 03:37:21 +00:00
|
|
|
#include <sys/bus.h>
|
2001-04-27 19:28:25 +00:00
|
|
|
#include <sys/lock.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
2001-05-10 17:45:49 +00:00
|
|
|
#include <sys/pcpu.h>
|
2012-11-15 00:51:57 +00:00
|
|
|
#include <sys/sched.h>
|
2001-04-27 19:28:25 +00:00
|
|
|
#include <sys/smp.h>
|
|
|
|
#include <sys/sysctl.h>
|
1997-04-26 11:46:25 +00:00
|
|
|
|
2007-07-03 18:37:06 +00:00
|
|
|
#include <machine/cpu.h>
|
2002-03-07 04:43:51 +00:00
|
|
|
#include <machine/smp.h>
|
|
|
|
|
2004-09-01 06:42:02 +00:00
|
|
|
#include "opt_sched.h"
|
|
|
|
|
2003-12-03 14:55:31 +00:00
|
|
|
#ifdef SMP
|
2011-05-18 15:50:12 +00:00
|
|
|
volatile cpuset_t stopped_cpus;
|
|
|
|
volatile cpuset_t started_cpus;
|
2012-06-09 00:37:26 +00:00
|
|
|
volatile cpuset_t suspended_cpus;
|
2011-05-18 15:50:12 +00:00
|
|
|
cpuset_t hlt_cpus_mask;
|
|
|
|
cpuset_t logical_cpus_mask;
|
1997-04-26 11:46:25 +00:00
|
|
|
|
2002-03-19 21:25:46 +00:00
|
|
|
void (*cpustop_restartfunc)(void);
|
2003-12-03 14:55:31 +00:00
|
|
|
#endif
|
2014-04-26 20:27:54 +00:00
|
|
|
|
|
|
|
static int sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS);
|
|
|
|
|
2004-09-03 07:42:31 +00:00
|
|
|
/* This is used in modules that need to work in both SMP and UP. */
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
cpuset_t all_cpus;
|
2003-12-03 14:55:31 +00:00
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
int mp_ncpus;
|
2003-12-23 13:54:16 +00:00
|
|
|
/* export this for libkvm consumers. */
|
|
|
|
int mp_maxcpus = MAXCPU;
|
1997-04-26 11:46:25 +00:00
|
|
|
|
2001-10-31 20:43:38 +00:00
|
|
|
volatile int smp_started;
|
2002-03-05 10:01:46 +00:00
|
|
|
u_int mp_maxid;
|
1997-04-26 11:46:25 +00:00
|
|
|
|
2011-11-07 15:43:11 +00:00
|
|
|
static SYSCTL_NODE(_kern, OID_AUTO, smp, CTLFLAG_RD|CTLFLAG_CAPRD, NULL,
|
|
|
|
"Kernel SMP");
|
1997-04-26 11:46:25 +00:00
|
|
|
|
2011-07-17 23:05:24 +00:00
|
|
|
SYSCTL_INT(_kern_smp, OID_AUTO, maxid, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxid, 0,
|
2008-05-23 04:05:26 +00:00
|
|
|
"Max CPU ID.");
|
|
|
|
|
2011-07-17 23:05:24 +00:00
|
|
|
SYSCTL_INT(_kern_smp, OID_AUTO, maxcpus, CTLFLAG_RD|CTLFLAG_CAPRD, &mp_maxcpus,
|
|
|
|
0, "Max number of CPUs that the system was compiled for.");
|
2003-12-23 13:54:16 +00:00
|
|
|
|
2014-04-26 20:27:54 +00:00
|
|
|
SYSCTL_PROC(_kern_smp, OID_AUTO, active, CTLFLAG_RD | CTLTYPE_INT, NULL, 0,
|
|
|
|
sysctl_kern_smp_active, "I", "Indicates system is running in SMP mode");
|
1997-04-26 11:46:25 +00:00
|
|
|
|
2002-12-28 23:21:13 +00:00
|
|
|
int smp_disabled = 0; /* has smp been disabled? */
|
2011-07-17 23:05:24 +00:00
|
|
|
SYSCTL_INT(_kern_smp, OID_AUTO, disabled, CTLFLAG_RDTUN|CTLFLAG_CAPRD,
|
|
|
|
&smp_disabled, 0, "SMP has been disabled from the loader");
|
2002-12-28 23:21:13 +00:00
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
int smp_cpus = 1; /* how many cpu's running */
|
2011-07-17 23:05:24 +00:00
|
|
|
SYSCTL_INT(_kern_smp, OID_AUTO, cpus, CTLFLAG_RD|CTLFLAG_CAPRD, &smp_cpus, 0,
|
2003-06-12 19:46:51 +00:00
|
|
|
"Number of CPUs online");
|
1997-04-26 11:46:25 +00:00
|
|
|
|
2008-03-02 07:58:42 +00:00
|
|
|
int smp_topology = 0; /* Which topology we're using. */
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_kern_smp, OID_AUTO, topology, CTLFLAG_RDTUN, &smp_topology, 0,
|
2008-03-02 07:58:42 +00:00
|
|
|
"Topology override setting; 0 is default provided by hardware.");
|
|
|
|
|
2003-12-03 14:55:31 +00:00
|
|
|
#ifdef SMP
|
2001-04-27 19:28:25 +00:00
|
|
|
/* Enable forwarding of a signal to a process running on a different CPU */
|
|
|
|
static int forward_signal_enabled = 1;
|
|
|
|
SYSCTL_INT(_kern_smp, OID_AUTO, forward_signal_enabled, CTLFLAG_RW,
|
2003-06-12 19:46:51 +00:00
|
|
|
&forward_signal_enabled, 0,
|
|
|
|
"Forwarding of a signal to a process on a different CPU");
|
1997-04-26 11:46:25 +00:00
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
/* Variables needed for SMP rendezvous. */
|
2008-08-27 18:23:55 +00:00
|
|
|
static volatile int smp_rv_ncpus;
|
2007-11-08 14:47:55 +00:00
|
|
|
static void (*volatile smp_rv_setup_func)(void *arg);
|
|
|
|
static void (*volatile smp_rv_action_func)(void *arg);
|
2008-01-02 17:09:15 +00:00
|
|
|
static void (*volatile smp_rv_teardown_func)(void *arg);
|
2009-01-26 15:32:39 +00:00
|
|
|
static void *volatile smp_rv_func_arg;
|
2011-07-30 20:29:39 +00:00
|
|
|
static volatile int smp_rv_waiters[4];
|
2004-08-23 21:39:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Shared mutex to restrict busywaits between smp_rendezvous() and
|
|
|
|
* smp(_targeted)_tlb_shootdown(). A deadlock occurs if both of these
|
|
|
|
* functions trigger at once and cause multiple CPUs to busywait with
|
|
|
|
* interrupts disabled.
|
|
|
|
*/
|
2004-08-28 00:49:55 +00:00
|
|
|
struct mtx smp_ipi_mtx;
|
1997-04-26 11:46:25 +00:00
|
|
|
|
|
|
|
/*
|
2003-11-21 22:23:26 +00:00
|
|
|
* Let the MD SMP code initialize mp_maxid very early if it can.
|
2002-03-05 10:01:46 +00:00
|
|
|
*/
|
|
|
|
static void
|
2003-11-21 22:23:26 +00:00
|
|
|
mp_setmaxid(void *dummy)
|
2002-03-05 10:01:46 +00:00
|
|
|
{
|
2015-11-08 14:26:50 +00:00
|
|
|
|
2003-11-21 22:23:26 +00:00
|
|
|
cpu_mp_setmaxid();
|
2015-11-08 14:26:50 +00:00
|
|
|
|
|
|
|
KASSERT(mp_ncpus >= 1, ("%s: CPU count < 1", __func__));
|
|
|
|
KASSERT(mp_ncpus > 1 || mp_maxid == 0,
|
|
|
|
("%s: one CPU but mp_maxid is not zero", __func__));
|
|
|
|
KASSERT(mp_maxid >= mp_ncpus - 1,
|
|
|
|
("%s: counters out of sync: max %d, count %d", __func__,
|
|
|
|
mp_maxid, mp_ncpus));
|
2002-03-05 10:01:46 +00:00
|
|
|
}
|
2008-03-16 10:58:09 +00:00
|
|
|
SYSINIT(cpu_mp_setmaxid, SI_SUB_TUNABLES, SI_ORDER_FIRST, mp_setmaxid, NULL);
|
2002-03-05 10:01:46 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the MD SMP initialization code.
|
1997-04-26 11:46:25 +00:00
|
|
|
*/
|
2001-04-27 19:28:25 +00:00
|
|
|
static void
|
|
|
|
mp_start(void *dummy)
|
1997-04-26 11:46:25 +00:00
|
|
|
{
|
1997-04-28 00:25:00 +00:00
|
|
|
|
2010-05-11 15:36:16 +00:00
|
|
|
mtx_init(&smp_ipi_mtx, "smp rendezvous", NULL, MTX_SPIN);
|
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
/* Probe for MP hardware. */
|
2003-11-21 22:23:26 +00:00
|
|
|
if (smp_disabled != 0 || cpu_mp_probe() == 0) {
|
2003-10-30 21:44:01 +00:00
|
|
|
mp_ncpus = 1;
|
2011-07-04 12:04:52 +00:00
|
|
|
CPU_SETOF(PCPU_GET(cpuid), &all_cpus);
|
2001-04-27 19:28:25 +00:00
|
|
|
return;
|
2003-10-30 21:44:01 +00:00
|
|
|
}
|
1997-04-26 11:46:25 +00:00
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
cpu_mp_start();
|
|
|
|
printf("FreeBSD/SMP: Multiprocessor System Detected: %d CPUs\n",
|
|
|
|
mp_ncpus);
|
|
|
|
cpu_mp_announce();
|
1997-04-26 11:46:25 +00:00
|
|
|
}
|
2008-03-16 10:58:09 +00:00
|
|
|
SYSINIT(cpu_mp, SI_SUB_CPU, SI_ORDER_THIRD, mp_start, NULL);
|
1997-06-27 23:33:17 +00:00
|
|
|
|
1997-12-08 23:00:24 +00:00
|
|
|
void
|
2001-09-12 08:38:13 +00:00
|
|
|
forward_signal(struct thread *td)
|
1998-03-03 20:55:26 +00:00
|
|
|
{
|
|
|
|
int id;
|
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
/*
|
2003-03-31 22:49:17 +00:00
|
|
|
* signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on
|
|
|
|
* this thread, so all we need to do is poke it if it is currently
|
2002-04-05 10:00:37 +00:00
|
|
|
* executing so that it executes ast().
|
1998-03-03 20:55:26 +00:00
|
|
|
*/
|
Commit 14/14 of sched_lock decomposition.
- Use thread_lock() rather than sched_lock for per-thread scheduling
sychronization.
- Use the per-process spinlock rather than the sched_lock for per-process
scheduling synchronization.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-05 00:00:57 +00:00
|
|
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
2002-09-11 08:13:56 +00:00
|
|
|
KASSERT(TD_IS_RUNNING(td),
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
("forward_signal: thread is not TDS_RUNNING"));
|
1998-03-03 20:55:26 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc);
|
2001-01-24 09:48:52 +00:00
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
if (!smp_started || cold || panicstr)
|
1998-03-03 20:55:26 +00:00
|
|
|
return;
|
|
|
|
if (!forward_signal_enabled)
|
|
|
|
return;
|
2001-04-27 19:28:25 +00:00
|
|
|
|
|
|
|
/* No need to IPI ourself. */
|
2001-09-12 08:38:13 +00:00
|
|
|
if (td == curthread)
|
2001-04-27 19:28:25 +00:00
|
|
|
return;
|
|
|
|
|
2003-04-10 17:35:44 +00:00
|
|
|
id = td->td_oncpu;
|
2001-04-27 19:28:25 +00:00
|
|
|
if (id == NOCPU)
|
|
|
|
return;
|
2010-08-06 15:36:59 +00:00
|
|
|
ipi_cpu(id, IPI_AST);
|
1998-03-03 20:55:26 +00:00
|
|
|
}
|
1998-03-03 22:56:30 +00:00
|
|
|
|
2001-01-24 09:48:52 +00:00
|
|
|
/*
|
|
|
|
* When called the executing CPU will send an IPI to all other CPUs
|
|
|
|
* requesting that they halt execution.
|
|
|
|
*
|
|
|
|
* Usually (but not necessarily) called with 'other_cpus' as its arg.
|
|
|
|
*
|
|
|
|
* - Signals all CPUs in map to stop.
|
|
|
|
* - Waits for each to stop.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* -1: error
|
|
|
|
* 0: NA
|
|
|
|
* 1: ok
|
|
|
|
*
|
|
|
|
*/
|
2009-08-13 17:09:45 +00:00
|
|
|
static int
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
generic_stop_cpus(cpuset_t map, u_int type)
|
2001-01-24 09:48:52 +00:00
|
|
|
{
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
#ifdef KTR
|
|
|
|
char cpusetbuf[CPUSETBUFSIZ];
|
|
|
|
#endif
|
2010-10-12 17:40:45 +00:00
|
|
|
static volatile u_int stopping_cpu = NOCPU;
|
2001-04-27 19:28:25 +00:00
|
|
|
int i;
|
2012-06-11 18:47:26 +00:00
|
|
|
volatile cpuset_t *cpus;
|
2001-01-24 09:48:52 +00:00
|
|
|
|
2010-10-12 17:40:45 +00:00
|
|
|
KASSERT(
|
Add SMP/i386 suspend/resume support.
Most part is merged from amd64.
- i386/acpica/acpi_wakecode.S
Replaced with amd64 code (from realmode to paging enabling code).
- i386/acpica/acpi_wakeup.c
Replaced with amd64 code (except for wakeup_pagetables stuff).
- i386/include/pcb.h
- i386/i386/genassym.c
Added PCB new members (CR0, CR2, CR4, DS, ED, FS, SS, GDT, IDT, LDT
and TR) needed for suspend/resume, not for context switch.
- i386/i386/swtch.s
Added suspendctx() and resumectx().
Note that savectx() was not changed and used for suspending (while
amd64 code uses it).
BSP and AP execute the same sequence, suspendctx(), acpi_wakecode()
and resumectx() for suspend/resume (in case of UP system also).
- i386/i386/apic_vector.s
Added cpususpend().
- i386/i386/mp_machdep.c
- i386/include/smp.h
Added cpususpend_handler().
- i386/include/apicvar.h
- kern/subr_smp.c
- sys/smp.h
Added IPI_SUSPEND and suspend_cpus().
- i386/i386/initcpu.c
- i386/i386/machdep.c
- i386/include/md_var.h
- pc98/pc98/machdep.c
Moved initializecpu() declarations to md_var.h.
MFC after: 3 days
2012-05-18 18:55:58 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
2010-10-12 17:40:45 +00:00
|
|
|
type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
|
|
|
|
#else
|
|
|
|
type == IPI_STOP || type == IPI_STOP_HARD,
|
|
|
|
#endif
|
2009-08-13 17:09:45 +00:00
|
|
|
("%s: invalid stop type", __func__));
|
|
|
|
|
2001-01-24 09:48:52 +00:00
|
|
|
if (!smp_started)
|
2010-10-12 17:40:45 +00:00
|
|
|
return (0);
|
2001-01-24 09:48:52 +00:00
|
|
|
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
CTR2(KTR_SMP, "stop_cpus(%s) with %u type",
|
|
|
|
cpusetobj_strprint(cpusetbuf, &map), type);
|
2001-04-27 19:28:25 +00:00
|
|
|
|
2013-09-22 02:46:13 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
2013-09-20 05:06:03 +00:00
|
|
|
/*
|
2013-09-20 22:59:22 +00:00
|
|
|
* When suspending, ensure there are are no IPIs in progress.
|
|
|
|
* IPIs that have been issued, but not yet delivered (e.g.
|
|
|
|
* not pending on a vCPU when running under virtualization)
|
|
|
|
* will be lost, violating FreeBSD's assumption of reliable
|
|
|
|
* IPI delivery.
|
2013-09-20 05:06:03 +00:00
|
|
|
*/
|
|
|
|
if (type == IPI_SUSPEND)
|
|
|
|
mtx_lock_spin(&smp_ipi_mtx);
|
2013-09-22 02:46:13 +00:00
|
|
|
#endif
|
2013-09-20 05:06:03 +00:00
|
|
|
|
2010-10-12 17:40:45 +00:00
|
|
|
if (stopping_cpu != PCPU_GET(cpuid))
|
|
|
|
while (atomic_cmpset_int(&stopping_cpu, NOCPU,
|
|
|
|
PCPU_GET(cpuid)) == 0)
|
|
|
|
while (stopping_cpu != NOCPU)
|
|
|
|
cpu_spinwait(); /* spin */
|
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
/* send the stop IPI to all CPUs in map */
|
2009-08-13 17:09:45 +00:00
|
|
|
ipi_selected(map, type);
|
2004-03-27 18:21:24 +00:00
|
|
|
|
2012-06-11 18:47:26 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
|
|
|
if (type == IPI_SUSPEND)
|
|
|
|
cpus = &suspended_cpus;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
cpus = &stopped_cpus;
|
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
i = 0;
|
2012-06-11 18:47:26 +00:00
|
|
|
while (!CPU_SUBSET(cpus, &map)) {
|
2001-04-27 19:28:25 +00:00
|
|
|
/* spin */
|
2007-07-03 18:37:06 +00:00
|
|
|
cpu_spinwait();
|
2001-04-27 19:28:25 +00:00
|
|
|
i++;
|
2011-06-25 10:01:43 +00:00
|
|
|
if (i == 100000000) {
|
2001-04-27 19:28:25 +00:00
|
|
|
printf("timeout stopping cpus\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2001-01-24 09:48:52 +00:00
|
|
|
|
2013-09-22 02:46:13 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
2013-09-20 05:06:03 +00:00
|
|
|
if (type == IPI_SUSPEND)
|
|
|
|
mtx_unlock_spin(&smp_ipi_mtx);
|
2013-09-22 02:46:13 +00:00
|
|
|
#endif
|
2013-09-20 05:06:03 +00:00
|
|
|
|
2010-10-12 17:40:45 +00:00
|
|
|
stopping_cpu = NOCPU;
|
|
|
|
return (1);
|
2001-01-24 09:48:52 +00:00
|
|
|
}
|
|
|
|
|
2009-08-13 17:09:45 +00:00
|
|
|
int
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
stop_cpus(cpuset_t map)
|
2009-08-13 17:09:45 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (generic_stop_cpus(map, IPI_STOP));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
stop_cpus_hard(cpuset_t map)
|
2009-08-13 17:09:45 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (generic_stop_cpus(map, IPI_STOP_HARD));
|
|
|
|
}
|
|
|
|
|
Add SMP/i386 suspend/resume support.
Most part is merged from amd64.
- i386/acpica/acpi_wakecode.S
Replaced with amd64 code (from realmode to paging enabling code).
- i386/acpica/acpi_wakeup.c
Replaced with amd64 code (except for wakeup_pagetables stuff).
- i386/include/pcb.h
- i386/i386/genassym.c
Added PCB new members (CR0, CR2, CR4, DS, ED, FS, SS, GDT, IDT, LDT
and TR) needed for suspend/resume, not for context switch.
- i386/i386/swtch.s
Added suspendctx() and resumectx().
Note that savectx() was not changed and used for suspending (while
amd64 code uses it).
BSP and AP execute the same sequence, suspendctx(), acpi_wakecode()
and resumectx() for suspend/resume (in case of UP system also).
- i386/i386/apic_vector.s
Added cpususpend().
- i386/i386/mp_machdep.c
- i386/include/smp.h
Added cpususpend_handler().
- i386/include/apicvar.h
- kern/subr_smp.c
- sys/smp.h
Added IPI_SUSPEND and suspend_cpus().
- i386/i386/initcpu.c
- i386/i386/machdep.c
- i386/include/md_var.h
- pc98/pc98/machdep.c
Moved initializecpu() declarations to md_var.h.
MFC after: 3 days
2012-05-18 18:55:58 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
2009-03-17 00:48:11 +00:00
|
|
|
int
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
suspend_cpus(cpuset_t map)
|
2009-03-17 00:48:11 +00:00
|
|
|
{
|
|
|
|
|
2010-10-12 17:40:45 +00:00
|
|
|
return (generic_stop_cpus(map, IPI_SUSPEND));
|
2009-03-17 00:48:11 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2001-01-24 09:48:52 +00:00
|
|
|
/*
|
|
|
|
* Called by a CPU to restart stopped CPUs.
|
|
|
|
*
|
|
|
|
* Usually (but not necessarily) called with 'stopped_cpus' as its arg.
|
|
|
|
*
|
|
|
|
* - Signals all CPUs in map to restart.
|
|
|
|
* - Waits for each to restart.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* -1: error
|
|
|
|
* 0: NA
|
|
|
|
* 1: ok
|
|
|
|
*/
|
2013-09-20 05:06:03 +00:00
|
|
|
static int
|
|
|
|
generic_restart_cpus(cpuset_t map, u_int type)
|
2001-01-24 09:48:52 +00:00
|
|
|
{
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
#ifdef KTR
|
|
|
|
char cpusetbuf[CPUSETBUFSIZ];
|
|
|
|
#endif
|
2013-09-20 05:06:03 +00:00
|
|
|
volatile cpuset_t *cpus;
|
|
|
|
|
|
|
|
KASSERT(
|
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
|
|
|
type == IPI_STOP || type == IPI_STOP_HARD || type == IPI_SUSPEND,
|
|
|
|
#else
|
|
|
|
type == IPI_STOP || type == IPI_STOP_HARD,
|
|
|
|
#endif
|
|
|
|
("%s: invalid stop type", __func__));
|
2001-01-24 09:48:52 +00:00
|
|
|
|
|
|
|
if (!smp_started)
|
|
|
|
return 0;
|
|
|
|
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map));
|
2001-01-24 09:48:52 +00:00
|
|
|
|
2013-09-20 05:06:03 +00:00
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
|
|
|
if (type == IPI_SUSPEND)
|
|
|
|
cpus = &suspended_cpus;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
cpus = &stopped_cpus;
|
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
/* signal other cpus to restart */
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
CPU_COPY_STORE_REL(&map, &started_cpus);
|
2001-01-24 09:48:52 +00:00
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
/* wait for each to clear its bit */
|
2013-09-20 05:06:03 +00:00
|
|
|
while (CPU_OVERLAP(cpus, &map))
|
2007-07-03 18:37:06 +00:00
|
|
|
cpu_spinwait();
|
2001-01-24 09:48:52 +00:00
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2013-09-20 05:06:03 +00:00
|
|
|
int
|
|
|
|
restart_cpus(cpuset_t map)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (generic_restart_cpus(map, IPI_STOP));
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(__amd64__) || defined(__i386__)
|
|
|
|
int
|
|
|
|
resume_cpus(cpuset_t map)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (generic_restart_cpus(map, IPI_SUSPEND));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1999-07-20 06:52:35 +00:00
|
|
|
/*
|
|
|
|
* All-CPU rendezvous. CPUs are signalled, all execute the setup function
|
|
|
|
* (if specified), rendezvous, execute the action function (if specified),
|
|
|
|
* rendezvous again, execute the teardown function (if specified), and then
|
|
|
|
* resume.
|
|
|
|
*
|
|
|
|
* Note that the supplied external functions _must_ be reentrant and aware
|
|
|
|
* that they are running in parallel and in an unknown lock context.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
smp_rendezvous_action(void)
|
|
|
|
{
|
Fix an issue with critical sections and SMP rendezvous handlers.
Specifically, a critical_exit() call that drops the nesting level to zero
has a brief window where the pending preemption flag is set and the
nesting level is set to zero. This is done purposefully to avoid races
where a preemption scheduled by an interrupt could be lost otherwise (see
revision 144777). However, this does mean that if an interrupt fires
during this window and enters and exits a critical section, it may preempt
from the interrupt context. This is generally fine as the interrupt code
is careful to arrange critical sections so that they are not exited until
it is safe to preempt (e.g. interrupts EOI'd and masked if necessary).
However, the SMP rendezvous IPI handler does not quite follow this rule,
and in general a rendezvous can never be preempted. Rendezvous handlers
are also not permitted to schedule threads to execute, so they will not
typically trigger preemptions. SMP rendezvous handlers may use
spinlocks (carefully) such as the rm_cleanIPI() handler used in rmlocks,
but using a spinlock also enters and exits a critical section. If the
interrupted top-half code is in the brief window of critical_exit() where
the nesting level is zero but a preemption is pending, then releasing the
spinlock can trigger a preemption. Because we know that SMP rendezvous
handlers can never schedule a thread, we know that a critical_exit() in
an SMP rendezvous handler will only preempt in this edge case. We also
know that the top-half thread will happily handle the deferred preemption
once the SMP rendezvous has completed, so the preemption will not be lost.
This makes it safe to employ a workaround where we use a nested critical
section in the SMP rendezvous code itself around rendezvous action
routines to prevent any preemptions during an SMP rendezvous. The
workaround intentionally avoids checking for a deferred preemption
when leaving the critical section on the assumption that if there is a
pending preemption it will be handled by the interrupted top-half code.
Submitted by: mlaier (variation specific to rm_cleanIPI())
Obtained from: Isilon
MFC after: 1 week
2011-05-24 13:36:41 +00:00
|
|
|
struct thread *td;
|
2011-05-17 16:39:08 +00:00
|
|
|
void *local_func_arg;
|
|
|
|
void (*local_setup_func)(void*);
|
|
|
|
void (*local_action_func)(void*);
|
|
|
|
void (*local_teardown_func)(void*);
|
Fix an issue with critical sections and SMP rendezvous handlers.
Specifically, a critical_exit() call that drops the nesting level to zero
has a brief window where the pending preemption flag is set and the
nesting level is set to zero. This is done purposefully to avoid races
where a preemption scheduled by an interrupt could be lost otherwise (see
revision 144777). However, this does mean that if an interrupt fires
during this window and enters and exits a critical section, it may preempt
from the interrupt context. This is generally fine as the interrupt code
is careful to arrange critical sections so that they are not exited until
it is safe to preempt (e.g. interrupts EOI'd and masked if necessary).
However, the SMP rendezvous IPI handler does not quite follow this rule,
and in general a rendezvous can never be preempted. Rendezvous handlers
are also not permitted to schedule threads to execute, so they will not
typically trigger preemptions. SMP rendezvous handlers may use
spinlocks (carefully) such as the rm_cleanIPI() handler used in rmlocks,
but using a spinlock also enters and exits a critical section. If the
interrupted top-half code is in the brief window of critical_exit() where
the nesting level is zero but a preemption is pending, then releasing the
spinlock can trigger a preemption. Because we know that SMP rendezvous
handlers can never schedule a thread, we know that a critical_exit() in
an SMP rendezvous handler will only preempt in this edge case. We also
know that the top-half thread will happily handle the deferred preemption
once the SMP rendezvous has completed, so the preemption will not be lost.
This makes it safe to employ a workaround where we use a nested critical
section in the SMP rendezvous code itself around rendezvous action
routines to prevent any preemptions during an SMP rendezvous. The
workaround intentionally avoids checking for a deferred preemption
when leaving the critical section on the assumption that if there is a
pending preemption it will be handled by the interrupted top-half code.
Submitted by: mlaier (variation specific to rm_cleanIPI())
Obtained from: Isilon
MFC after: 1 week
2011-05-24 13:36:41 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
int owepreempt;
|
|
|
|
#endif
|
2008-01-02 17:09:15 +00:00
|
|
|
|
2007-07-03 18:37:06 +00:00
|
|
|
/* Ensure we have up-to-date values. */
|
|
|
|
atomic_add_acq_int(&smp_rv_waiters[0], 1);
|
2008-08-27 18:23:55 +00:00
|
|
|
while (smp_rv_waiters[0] < smp_rv_ncpus)
|
2007-07-03 18:37:06 +00:00
|
|
|
cpu_spinwait();
|
|
|
|
|
2011-05-17 16:39:08 +00:00
|
|
|
/* Fetch rendezvous parameters after acquire barrier. */
|
|
|
|
local_func_arg = smp_rv_func_arg;
|
|
|
|
local_setup_func = smp_rv_setup_func;
|
|
|
|
local_action_func = smp_rv_action_func;
|
|
|
|
local_teardown_func = smp_rv_teardown_func;
|
|
|
|
|
Fix an issue with critical sections and SMP rendezvous handlers.
Specifically, a critical_exit() call that drops the nesting level to zero
has a brief window where the pending preemption flag is set and the
nesting level is set to zero. This is done purposefully to avoid races
where a preemption scheduled by an interrupt could be lost otherwise (see
revision 144777). However, this does mean that if an interrupt fires
during this window and enters and exits a critical section, it may preempt
from the interrupt context. This is generally fine as the interrupt code
is careful to arrange critical sections so that they are not exited until
it is safe to preempt (e.g. interrupts EOI'd and masked if necessary).
However, the SMP rendezvous IPI handler does not quite follow this rule,
and in general a rendezvous can never be preempted. Rendezvous handlers
are also not permitted to schedule threads to execute, so they will not
typically trigger preemptions. SMP rendezvous handlers may use
spinlocks (carefully) such as the rm_cleanIPI() handler used in rmlocks,
but using a spinlock also enters and exits a critical section. If the
interrupted top-half code is in the brief window of critical_exit() where
the nesting level is zero but a preemption is pending, then releasing the
spinlock can trigger a preemption. Because we know that SMP rendezvous
handlers can never schedule a thread, we know that a critical_exit() in
an SMP rendezvous handler will only preempt in this edge case. We also
know that the top-half thread will happily handle the deferred preemption
once the SMP rendezvous has completed, so the preemption will not be lost.
This makes it safe to employ a workaround where we use a nested critical
section in the SMP rendezvous code itself around rendezvous action
routines to prevent any preemptions during an SMP rendezvous. The
workaround intentionally avoids checking for a deferred preemption
when leaving the critical section on the assumption that if there is a
pending preemption it will be handled by the interrupted top-half code.
Submitted by: mlaier (variation specific to rm_cleanIPI())
Obtained from: Isilon
MFC after: 1 week
2011-05-24 13:36:41 +00:00
|
|
|
/*
|
|
|
|
* Use a nested critical section to prevent any preemptions
|
|
|
|
* from occurring during a rendezvous action routine.
|
|
|
|
* Specifically, if a rendezvous handler is invoked via an IPI
|
|
|
|
* and the interrupted thread was in the critical_exit()
|
|
|
|
* function after setting td_critnest to 0 but before
|
|
|
|
* performing a deferred preemption, this routine can be
|
|
|
|
* invoked with td_critnest set to 0 and td_owepreempt true.
|
|
|
|
* In that case, a critical_exit() during the rendezvous
|
|
|
|
* action would trigger a preemption which is not permitted in
|
|
|
|
* a rendezvous action. To fix this, wrap all of the
|
|
|
|
* rendezvous action handlers in a critical section. We
|
|
|
|
* cannot use a regular critical section however as having
|
|
|
|
* critical_exit() preempt from this routine would also be
|
|
|
|
* problematic (the preemption must not occur before the IPI
|
2011-05-24 19:55:57 +00:00
|
|
|
* has been acknowledged via an EOI). Instead, we
|
Fix an issue with critical sections and SMP rendezvous handlers.
Specifically, a critical_exit() call that drops the nesting level to zero
has a brief window where the pending preemption flag is set and the
nesting level is set to zero. This is done purposefully to avoid races
where a preemption scheduled by an interrupt could be lost otherwise (see
revision 144777). However, this does mean that if an interrupt fires
during this window and enters and exits a critical section, it may preempt
from the interrupt context. This is generally fine as the interrupt code
is careful to arrange critical sections so that they are not exited until
it is safe to preempt (e.g. interrupts EOI'd and masked if necessary).
However, the SMP rendezvous IPI handler does not quite follow this rule,
and in general a rendezvous can never be preempted. Rendezvous handlers
are also not permitted to schedule threads to execute, so they will not
typically trigger preemptions. SMP rendezvous handlers may use
spinlocks (carefully) such as the rm_cleanIPI() handler used in rmlocks,
but using a spinlock also enters and exits a critical section. If the
interrupted top-half code is in the brief window of critical_exit() where
the nesting level is zero but a preemption is pending, then releasing the
spinlock can trigger a preemption. Because we know that SMP rendezvous
handlers can never schedule a thread, we know that a critical_exit() in
an SMP rendezvous handler will only preempt in this edge case. We also
know that the top-half thread will happily handle the deferred preemption
once the SMP rendezvous has completed, so the preemption will not be lost.
This makes it safe to employ a workaround where we use a nested critical
section in the SMP rendezvous code itself around rendezvous action
routines to prevent any preemptions during an SMP rendezvous. The
workaround intentionally avoids checking for a deferred preemption
when leaving the critical section on the assumption that if there is a
pending preemption it will be handled by the interrupted top-half code.
Submitted by: mlaier (variation specific to rm_cleanIPI())
Obtained from: Isilon
MFC after: 1 week
2011-05-24 13:36:41 +00:00
|
|
|
* intentionally ignore td_owepreempt when leaving the
|
2011-05-24 19:55:57 +00:00
|
|
|
* critical section. This should be harmless because we do
|
|
|
|
* not permit rendezvous action routines to schedule threads,
|
|
|
|
* and thus td_owepreempt should never transition from 0 to 1
|
Fix an issue with critical sections and SMP rendezvous handlers.
Specifically, a critical_exit() call that drops the nesting level to zero
has a brief window where the pending preemption flag is set and the
nesting level is set to zero. This is done purposefully to avoid races
where a preemption scheduled by an interrupt could be lost otherwise (see
revision 144777). However, this does mean that if an interrupt fires
during this window and enters and exits a critical section, it may preempt
from the interrupt context. This is generally fine as the interrupt code
is careful to arrange critical sections so that they are not exited until
it is safe to preempt (e.g. interrupts EOI'd and masked if necessary).
However, the SMP rendezvous IPI handler does not quite follow this rule,
and in general a rendezvous can never be preempted. Rendezvous handlers
are also not permitted to schedule threads to execute, so they will not
typically trigger preemptions. SMP rendezvous handlers may use
spinlocks (carefully) such as the rm_cleanIPI() handler used in rmlocks,
but using a spinlock also enters and exits a critical section. If the
interrupted top-half code is in the brief window of critical_exit() where
the nesting level is zero but a preemption is pending, then releasing the
spinlock can trigger a preemption. Because we know that SMP rendezvous
handlers can never schedule a thread, we know that a critical_exit() in
an SMP rendezvous handler will only preempt in this edge case. We also
know that the top-half thread will happily handle the deferred preemption
once the SMP rendezvous has completed, so the preemption will not be lost.
This makes it safe to employ a workaround where we use a nested critical
section in the SMP rendezvous code itself around rendezvous action
routines to prevent any preemptions during an SMP rendezvous. The
workaround intentionally avoids checking for a deferred preemption
when leaving the critical section on the assumption that if there is a
pending preemption it will be handled by the interrupted top-half code.
Submitted by: mlaier (variation specific to rm_cleanIPI())
Obtained from: Isilon
MFC after: 1 week
2011-05-24 13:36:41 +00:00
|
|
|
* during this routine.
|
|
|
|
*/
|
|
|
|
td = curthread;
|
|
|
|
td->td_critnest++;
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
owepreempt = td->td_owepreempt;
|
|
|
|
#endif
|
|
|
|
|
2011-05-17 16:39:08 +00:00
|
|
|
/*
|
|
|
|
* If requested, run a setup function before the main action
|
|
|
|
* function. Ensure all CPUs have completed the setup
|
|
|
|
* function before moving on to the action function.
|
|
|
|
*/
|
2007-11-08 14:47:55 +00:00
|
|
|
if (local_setup_func != smp_no_rendevous_barrier) {
|
|
|
|
if (smp_rv_setup_func != NULL)
|
|
|
|
smp_rv_setup_func(smp_rv_func_arg);
|
|
|
|
atomic_add_int(&smp_rv_waiters[1], 1);
|
2008-08-27 18:23:55 +00:00
|
|
|
while (smp_rv_waiters[1] < smp_rv_ncpus)
|
2007-11-08 14:47:55 +00:00
|
|
|
cpu_spinwait();
|
|
|
|
}
|
2007-07-03 18:37:06 +00:00
|
|
|
|
2007-11-08 14:47:55 +00:00
|
|
|
if (local_action_func != NULL)
|
|
|
|
local_action_func(local_func_arg);
|
|
|
|
|
Fix an issue with critical sections and SMP rendezvous handlers.
Specifically, a critical_exit() call that drops the nesting level to zero
has a brief window where the pending preemption flag is set and the
nesting level is set to zero. This is done purposefully to avoid races
where a preemption scheduled by an interrupt could be lost otherwise (see
revision 144777). However, this does mean that if an interrupt fires
during this window and enters and exits a critical section, it may preempt
from the interrupt context. This is generally fine as the interrupt code
is careful to arrange critical sections so that they are not exited until
it is safe to preempt (e.g. interrupts EOI'd and masked if necessary).
However, the SMP rendezvous IPI handler does not quite follow this rule,
and in general a rendezvous can never be preempted. Rendezvous handlers
are also not permitted to schedule threads to execute, so they will not
typically trigger preemptions. SMP rendezvous handlers may use
spinlocks (carefully) such as the rm_cleanIPI() handler used in rmlocks,
but using a spinlock also enters and exits a critical section. If the
interrupted top-half code is in the brief window of critical_exit() where
the nesting level is zero but a preemption is pending, then releasing the
spinlock can trigger a preemption. Because we know that SMP rendezvous
handlers can never schedule a thread, we know that a critical_exit() in
an SMP rendezvous handler will only preempt in this edge case. We also
know that the top-half thread will happily handle the deferred preemption
once the SMP rendezvous has completed, so the preemption will not be lost.
This makes it safe to employ a workaround where we use a nested critical
section in the SMP rendezvous code itself around rendezvous action
routines to prevent any preemptions during an SMP rendezvous. The
workaround intentionally avoids checking for a deferred preemption
when leaving the critical section on the assumption that if there is a
pending preemption it will be handled by the interrupted top-half code.
Submitted by: mlaier (variation specific to rm_cleanIPI())
Obtained from: Isilon
MFC after: 1 week
2011-05-24 13:36:41 +00:00
|
|
|
if (local_teardown_func != smp_no_rendevous_barrier) {
|
2011-07-30 20:29:39 +00:00
|
|
|
/*
|
|
|
|
* Signal that the main action has been completed. If a
|
|
|
|
* full exit rendezvous is requested, then all CPUs will
|
|
|
|
* wait here until all CPUs have finished the main action.
|
|
|
|
*/
|
|
|
|
atomic_add_int(&smp_rv_waiters[2], 1);
|
|
|
|
while (smp_rv_waiters[2] < smp_rv_ncpus)
|
Fix an issue with critical sections and SMP rendezvous handlers.
Specifically, a critical_exit() call that drops the nesting level to zero
has a brief window where the pending preemption flag is set and the
nesting level is set to zero. This is done purposefully to avoid races
where a preemption scheduled by an interrupt could be lost otherwise (see
revision 144777). However, this does mean that if an interrupt fires
during this window and enters and exits a critical section, it may preempt
from the interrupt context. This is generally fine as the interrupt code
is careful to arrange critical sections so that they are not exited until
it is safe to preempt (e.g. interrupts EOI'd and masked if necessary).
However, the SMP rendezvous IPI handler does not quite follow this rule,
and in general a rendezvous can never be preempted. Rendezvous handlers
are also not permitted to schedule threads to execute, so they will not
typically trigger preemptions. SMP rendezvous handlers may use
spinlocks (carefully) such as the rm_cleanIPI() handler used in rmlocks,
but using a spinlock also enters and exits a critical section. If the
interrupted top-half code is in the brief window of critical_exit() where
the nesting level is zero but a preemption is pending, then releasing the
spinlock can trigger a preemption. Because we know that SMP rendezvous
handlers can never schedule a thread, we know that a critical_exit() in
an SMP rendezvous handler will only preempt in this edge case. We also
know that the top-half thread will happily handle the deferred preemption
once the SMP rendezvous has completed, so the preemption will not be lost.
This makes it safe to employ a workaround where we use a nested critical
section in the SMP rendezvous code itself around rendezvous action
routines to prevent any preemptions during an SMP rendezvous. The
workaround intentionally avoids checking for a deferred preemption
when leaving the critical section on the assumption that if there is a
pending preemption it will be handled by the interrupted top-half code.
Submitted by: mlaier (variation specific to rm_cleanIPI())
Obtained from: Isilon
MFC after: 1 week
2011-05-24 13:36:41 +00:00
|
|
|
cpu_spinwait();
|
|
|
|
|
|
|
|
if (local_teardown_func != NULL)
|
|
|
|
local_teardown_func(local_func_arg);
|
|
|
|
}
|
2008-01-02 17:09:15 +00:00
|
|
|
|
2011-07-30 20:29:39 +00:00
|
|
|
/*
|
|
|
|
* Signal that the rendezvous is fully completed by this CPU.
|
|
|
|
* This means that no member of smp_rv_* pseudo-structure will be
|
|
|
|
* accessed by this target CPU after this point; in particular,
|
|
|
|
* memory pointed by smp_rv_func_arg.
|
2015-07-21 22:56:46 +00:00
|
|
|
*
|
|
|
|
* The release semantic ensures that all accesses performed by
|
|
|
|
* the current CPU are visible when smp_rendezvous_cpus()
|
|
|
|
* returns, by synchronizing with the
|
|
|
|
* atomic_load_acq_int(&smp_rv_waiters[3]).
|
2011-07-30 20:29:39 +00:00
|
|
|
*/
|
2015-07-21 22:56:46 +00:00
|
|
|
atomic_add_rel_int(&smp_rv_waiters[3], 1);
|
2011-07-30 20:29:39 +00:00
|
|
|
|
Fix an issue with critical sections and SMP rendezvous handlers.
Specifically, a critical_exit() call that drops the nesting level to zero
has a brief window where the pending preemption flag is set and the
nesting level is set to zero. This is done purposefully to avoid races
where a preemption scheduled by an interrupt could be lost otherwise (see
revision 144777). However, this does mean that if an interrupt fires
during this window and enters and exits a critical section, it may preempt
from the interrupt context. This is generally fine as the interrupt code
is careful to arrange critical sections so that they are not exited until
it is safe to preempt (e.g. interrupts EOI'd and masked if necessary).
However, the SMP rendezvous IPI handler does not quite follow this rule,
and in general a rendezvous can never be preempted. Rendezvous handlers
are also not permitted to schedule threads to execute, so they will not
typically trigger preemptions. SMP rendezvous handlers may use
spinlocks (carefully) such as the rm_cleanIPI() handler used in rmlocks,
but using a spinlock also enters and exits a critical section. If the
interrupted top-half code is in the brief window of critical_exit() where
the nesting level is zero but a preemption is pending, then releasing the
spinlock can trigger a preemption. Because we know that SMP rendezvous
handlers can never schedule a thread, we know that a critical_exit() in
an SMP rendezvous handler will only preempt in this edge case. We also
know that the top-half thread will happily handle the deferred preemption
once the SMP rendezvous has completed, so the preemption will not be lost.
This makes it safe to employ a workaround where we use a nested critical
section in the SMP rendezvous code itself around rendezvous action
routines to prevent any preemptions during an SMP rendezvous. The
workaround intentionally avoids checking for a deferred preemption
when leaving the critical section on the assumption that if there is a
pending preemption it will be handled by the interrupted top-half code.
Submitted by: mlaier (variation specific to rm_cleanIPI())
Obtained from: Isilon
MFC after: 1 week
2011-05-24 13:36:41 +00:00
|
|
|
td->td_critnest--;
|
|
|
|
KASSERT(owepreempt == td->td_owepreempt,
|
|
|
|
("rendezvous action changed td_owepreempt"));
|
1999-07-20 06:52:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
smp_rendezvous_cpus(cpuset_t map,
|
2008-05-23 04:05:26 +00:00
|
|
|
void (* setup_func)(void *),
|
|
|
|
void (* action_func)(void *),
|
|
|
|
void (* teardown_func)(void *),
|
|
|
|
void *arg)
|
1999-07-20 06:52:35 +00:00
|
|
|
{
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
int curcpumap, i, ncpus = 0;
|
2001-01-24 12:35:55 +00:00
|
|
|
|
2011-11-03 14:36:56 +00:00
|
|
|
/* Look comments in the !SMP case. */
|
2001-04-27 19:28:25 +00:00
|
|
|
if (!smp_started) {
|
2011-11-03 14:36:56 +00:00
|
|
|
spinlock_enter();
|
2001-04-27 19:28:25 +00:00
|
|
|
if (setup_func != NULL)
|
|
|
|
setup_func(arg);
|
|
|
|
if (action_func != NULL)
|
|
|
|
action_func(arg);
|
|
|
|
if (teardown_func != NULL)
|
|
|
|
teardown_func(arg);
|
2011-11-03 14:36:56 +00:00
|
|
|
spinlock_exit();
|
2001-04-27 19:28:25 +00:00
|
|
|
return;
|
|
|
|
}
|
2008-05-23 04:05:26 +00:00
|
|
|
|
2010-06-11 18:46:34 +00:00
|
|
|
CPU_FOREACH(i) {
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
if (CPU_ISSET(i, &map))
|
2008-05-23 04:05:26 +00:00
|
|
|
ncpus++;
|
2010-06-11 18:46:34 +00:00
|
|
|
}
|
2009-03-01 14:26:24 +00:00
|
|
|
if (ncpus == 0)
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
panic("ncpus is 0 with non-zero map");
|
2008-08-27 18:23:55 +00:00
|
|
|
|
2004-08-28 00:49:55 +00:00
|
|
|
mtx_lock_spin(&smp_ipi_mtx);
|
1999-07-20 06:52:35 +00:00
|
|
|
|
2011-05-17 16:39:08 +00:00
|
|
|
/* Pass rendezvous parameters via global variables. */
|
2008-08-27 18:23:55 +00:00
|
|
|
smp_rv_ncpus = ncpus;
|
1999-07-20 06:52:35 +00:00
|
|
|
smp_rv_setup_func = setup_func;
|
|
|
|
smp_rv_action_func = action_func;
|
|
|
|
smp_rv_teardown_func = teardown_func;
|
|
|
|
smp_rv_func_arg = arg;
|
|
|
|
smp_rv_waiters[1] = 0;
|
2007-07-03 18:37:06 +00:00
|
|
|
smp_rv_waiters[2] = 0;
|
2011-07-30 20:29:39 +00:00
|
|
|
smp_rv_waiters[3] = 0;
|
2007-07-03 18:37:06 +00:00
|
|
|
atomic_store_rel_int(&smp_rv_waiters[0], 0);
|
1999-07-20 06:52:35 +00:00
|
|
|
|
2011-05-17 16:39:08 +00:00
|
|
|
/*
|
|
|
|
* Signal other processors, which will enter the IPI with
|
|
|
|
* interrupts off.
|
|
|
|
*/
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
curcpumap = CPU_ISSET(curcpu, &map);
|
|
|
|
CPU_CLR(curcpu, &map);
|
|
|
|
ipi_selected(map, IPI_RENDEZVOUS);
|
1999-07-20 06:52:35 +00:00
|
|
|
|
2008-05-23 04:05:26 +00:00
|
|
|
/* Check if the current CPU is in the map */
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
if (curcpumap != 0)
|
2008-05-23 04:05:26 +00:00
|
|
|
smp_rendezvous_action();
|
1999-07-20 06:52:35 +00:00
|
|
|
|
2011-05-17 16:39:08 +00:00
|
|
|
/*
|
2011-07-30 20:29:39 +00:00
|
|
|
* Ensure that the master CPU waits for all the other
|
|
|
|
* CPUs to finish the rendezvous, so that smp_rv_*
|
|
|
|
* pseudo-structure and the arg are guaranteed to not
|
|
|
|
* be in use.
|
2015-07-21 22:56:46 +00:00
|
|
|
*
|
|
|
|
* Load acquire synchronizes with the release add in
|
|
|
|
* smp_rendezvous_action(), which ensures that our caller sees
|
|
|
|
* all memory actions done by the called functions on other
|
|
|
|
* CPUs.
|
2011-05-17 16:39:08 +00:00
|
|
|
*/
|
2011-07-30 20:29:39 +00:00
|
|
|
while (atomic_load_acq_int(&smp_rv_waiters[3]) < ncpus)
|
|
|
|
cpu_spinwait();
|
2008-01-02 17:09:15 +00:00
|
|
|
|
2004-08-28 00:49:55 +00:00
|
|
|
mtx_unlock_spin(&smp_ipi_mtx);
|
1999-07-20 06:52:35 +00:00
|
|
|
}
|
2003-12-03 14:55:31 +00:00
|
|
|
|
2008-05-23 04:05:26 +00:00
|
|
|
void
|
|
|
|
smp_rendezvous(void (* setup_func)(void *),
|
|
|
|
void (* action_func)(void *),
|
|
|
|
void (* teardown_func)(void *),
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
smp_rendezvous_cpus(all_cpus, setup_func, action_func, teardown_func, arg);
|
|
|
|
}
|
|
|
|
|
2008-03-02 07:58:42 +00:00
|
|
|
static struct cpu_group group[MAXCPU];
|
|
|
|
|
|
|
|
struct cpu_group *
|
|
|
|
smp_topo(void)
|
2003-12-03 14:55:31 +00:00
|
|
|
{
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
|
2008-03-02 07:58:42 +00:00
|
|
|
struct cpu_group *top;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for a fake topology request for debugging purposes.
|
|
|
|
*/
|
|
|
|
switch (smp_topology) {
|
|
|
|
case 1:
|
|
|
|
/* Dual core with no sharing. */
|
|
|
|
top = smp_topo_1level(CG_SHARE_NONE, 2, 0);
|
|
|
|
break;
|
2008-03-10 01:38:53 +00:00
|
|
|
case 2:
|
|
|
|
/* No topology, all cpus are equal. */
|
|
|
|
top = smp_topo_none();
|
|
|
|
break;
|
2008-03-02 07:58:42 +00:00
|
|
|
case 3:
|
|
|
|
/* Dual core with shared L2. */
|
|
|
|
top = smp_topo_1level(CG_SHARE_L2, 2, 0);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
/* quad core, shared l3 among each package, private l2. */
|
|
|
|
top = smp_topo_1level(CG_SHARE_L3, 4, 0);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
/* quad core, 2 dualcore parts on each package share l2. */
|
|
|
|
top = smp_topo_2level(CG_SHARE_NONE, 2, CG_SHARE_L2, 2, 0);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
/* Single-core 2xHTT */
|
|
|
|
top = smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_HTT);
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
/* quad core with a shared l3, 8 threads sharing L2. */
|
|
|
|
top = smp_topo_2level(CG_SHARE_L3, 4, CG_SHARE_L2, 8,
|
2009-04-29 03:15:43 +00:00
|
|
|
CG_FLAG_SMT);
|
2008-03-02 07:58:42 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Default, ask the system what it wants. */
|
|
|
|
top = cpu_topo();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Verify the returned topology.
|
|
|
|
*/
|
|
|
|
if (top->cg_count != mp_ncpus)
|
|
|
|
panic("Built bad topology at %p. CPU count %d != %d",
|
|
|
|
top, top->cg_count, mp_ncpus);
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
if (CPU_CMP(&top->cg_mask, &all_cpus))
|
|
|
|
panic("Built bad topology at %p. CPU mask (%s) != (%s)",
|
|
|
|
top, cpusetobj_strprint(cpusetbuf, &top->cg_mask),
|
|
|
|
cpusetobj_strprint(cpusetbuf2, &all_cpus));
|
2008-03-02 07:58:42 +00:00
|
|
|
return (top);
|
2003-12-03 14:55:31 +00:00
|
|
|
}
|
2008-03-02 07:58:42 +00:00
|
|
|
|
|
|
|
struct cpu_group *
|
|
|
|
smp_topo_none(void)
|
|
|
|
{
|
|
|
|
struct cpu_group *top;
|
|
|
|
|
|
|
|
top = &group[0];
|
|
|
|
top->cg_parent = NULL;
|
|
|
|
top->cg_child = NULL;
|
2011-02-11 22:43:10 +00:00
|
|
|
top->cg_mask = all_cpus;
|
2008-03-02 07:58:42 +00:00
|
|
|
top->cg_count = mp_ncpus;
|
|
|
|
top->cg_children = 0;
|
|
|
|
top->cg_level = CG_SHARE_NONE;
|
|
|
|
top->cg_flags = 0;
|
|
|
|
|
|
|
|
return (top);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
|
|
|
|
int count, int flags, int start)
|
|
|
|
{
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
char cpusetbuf[CPUSETBUFSIZ], cpusetbuf2[CPUSETBUFSIZ];
|
|
|
|
cpuset_t mask;
|
2008-03-02 07:58:42 +00:00
|
|
|
int i;
|
|
|
|
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
CPU_ZERO(&mask);
|
|
|
|
for (i = 0; i < count; i++, start++)
|
|
|
|
CPU_SET(start, &mask);
|
2008-03-02 07:58:42 +00:00
|
|
|
child->cg_parent = parent;
|
|
|
|
child->cg_child = NULL;
|
|
|
|
child->cg_children = 0;
|
|
|
|
child->cg_level = share;
|
|
|
|
child->cg_count = count;
|
|
|
|
child->cg_flags = flags;
|
|
|
|
child->cg_mask = mask;
|
|
|
|
parent->cg_children++;
|
|
|
|
for (; parent != NULL; parent = parent->cg_parent) {
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
if (CPU_OVERLAP(&parent->cg_mask, &child->cg_mask))
|
|
|
|
panic("Duplicate children in %p. mask (%s) child (%s)",
|
|
|
|
parent,
|
|
|
|
cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
|
|
|
|
cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
|
|
|
|
CPU_OR(&parent->cg_mask, &child->cg_mask);
|
2008-03-02 07:58:42 +00:00
|
|
|
parent->cg_count += child->cg_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (start);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct cpu_group *
|
|
|
|
smp_topo_1level(int share, int count, int flags)
|
|
|
|
{
|
|
|
|
struct cpu_group *child;
|
|
|
|
struct cpu_group *top;
|
|
|
|
int packages;
|
|
|
|
int cpu;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
cpu = 0;
|
|
|
|
top = &group[0];
|
|
|
|
packages = mp_ncpus / count;
|
|
|
|
top->cg_child = child = &group[1];
|
|
|
|
top->cg_level = CG_SHARE_NONE;
|
|
|
|
for (i = 0; i < packages; i++, child++)
|
|
|
|
cpu = smp_topo_addleaf(top, child, share, count, flags, cpu);
|
|
|
|
return (top);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct cpu_group *
|
|
|
|
smp_topo_2level(int l2share, int l2count, int l1share, int l1count,
|
|
|
|
int l1flags)
|
|
|
|
{
|
|
|
|
struct cpu_group *top;
|
|
|
|
struct cpu_group *l1g;
|
|
|
|
struct cpu_group *l2g;
|
|
|
|
int cpu;
|
|
|
|
int i;
|
|
|
|
int j;
|
|
|
|
|
|
|
|
cpu = 0;
|
|
|
|
top = &group[0];
|
|
|
|
l2g = &group[1];
|
|
|
|
top->cg_child = l2g;
|
|
|
|
top->cg_level = CG_SHARE_NONE;
|
|
|
|
top->cg_children = mp_ncpus / (l2count * l1count);
|
|
|
|
l1g = l2g + top->cg_children;
|
|
|
|
for (i = 0; i < top->cg_children; i++, l2g++) {
|
|
|
|
l2g->cg_parent = top;
|
|
|
|
l2g->cg_child = l1g;
|
|
|
|
l2g->cg_level = l2share;
|
|
|
|
for (j = 0; j < l2count; j++, l1g++)
|
|
|
|
cpu = smp_topo_addleaf(l2g, l1g, l1share, l1count,
|
|
|
|
l1flags, cpu);
|
|
|
|
}
|
|
|
|
return (top);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct cpu_group *
|
|
|
|
smp_topo_find(struct cpu_group *top, int cpu)
|
|
|
|
{
|
|
|
|
struct cpu_group *cg;
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
cpuset_t mask;
|
2008-03-02 07:58:42 +00:00
|
|
|
int children;
|
|
|
|
int i;
|
|
|
|
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
CPU_SETOF(cpu, &mask);
|
2008-03-02 07:58:42 +00:00
|
|
|
cg = top;
|
|
|
|
for (;;) {
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
if (!CPU_OVERLAP(&cg->cg_mask, &mask))
|
2008-03-02 07:58:42 +00:00
|
|
|
return (NULL);
|
|
|
|
if (cg->cg_children == 0)
|
|
|
|
return (cg);
|
|
|
|
children = cg->cg_children;
|
|
|
|
for (i = 0, cg = cg->cg_child; i < children; cg++, i++)
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
if (CPU_OVERLAP(&cg->cg_mask, &mask))
|
2008-03-02 07:58:42 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
#else /* !SMP */
|
2003-12-03 14:55:31 +00:00
|
|
|
|
2008-05-23 04:05:26 +00:00
|
|
|
void
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
smp_rendezvous_cpus(cpuset_t map,
|
2008-05-23 04:05:26 +00:00
|
|
|
void (*setup_func)(void *),
|
|
|
|
void (*action_func)(void *),
|
|
|
|
void (*teardown_func)(void *),
|
|
|
|
void *arg)
|
|
|
|
{
|
2011-11-03 14:36:56 +00:00
|
|
|
/*
|
|
|
|
* In the !SMP case we just need to ensure the same initial conditions
|
|
|
|
* as the SMP case.
|
|
|
|
*/
|
|
|
|
spinlock_enter();
|
2008-05-23 04:05:26 +00:00
|
|
|
if (setup_func != NULL)
|
|
|
|
setup_func(arg);
|
|
|
|
if (action_func != NULL)
|
|
|
|
action_func(arg);
|
|
|
|
if (teardown_func != NULL)
|
|
|
|
teardown_func(arg);
|
2011-11-03 14:36:56 +00:00
|
|
|
spinlock_exit();
|
2008-05-23 04:05:26 +00:00
|
|
|
}
|
|
|
|
|
2003-12-03 14:55:31 +00:00
|
|
|
void
|
2008-01-02 17:09:15 +00:00
|
|
|
smp_rendezvous(void (*setup_func)(void *),
|
|
|
|
void (*action_func)(void *),
|
|
|
|
void (*teardown_func)(void *),
|
2003-12-03 14:55:31 +00:00
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
|
2011-11-03 14:36:56 +00:00
|
|
|
/* Look comments in the smp_rendezvous_cpus() case. */
|
|
|
|
spinlock_enter();
|
2003-12-03 14:55:31 +00:00
|
|
|
if (setup_func != NULL)
|
|
|
|
setup_func(arg);
|
|
|
|
if (action_func != NULL)
|
|
|
|
action_func(arg);
|
|
|
|
if (teardown_func != NULL)
|
|
|
|
teardown_func(arg);
|
2011-11-03 14:36:56 +00:00
|
|
|
spinlock_exit();
|
2003-12-03 14:55:31 +00:00
|
|
|
}
|
2008-03-02 07:58:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Provide dummy SMP support for UP kernels. Modules that need to use SMP
|
|
|
|
* APIs will still work using this dummy support.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mp_setvariables_for_up(void *dummy)
|
|
|
|
{
|
|
|
|
mp_ncpus = 1;
|
|
|
|
mp_maxid = PCPU_GET(cpuid);
|
2011-07-04 12:04:52 +00:00
|
|
|
CPU_SETOF(mp_maxid, &all_cpus);
|
2008-03-02 07:58:42 +00:00
|
|
|
KASSERT(PCPU_GET(cpuid) == 0, ("UP must have a CPU ID of zero"));
|
|
|
|
}
|
|
|
|
SYSINIT(cpu_mp_setvariables, SI_SUB_TUNABLES, SI_ORDER_FIRST,
|
2008-03-16 10:58:09 +00:00
|
|
|
mp_setvariables_for_up, NULL);
|
2003-12-03 14:55:31 +00:00
|
|
|
#endif /* SMP */
|
2008-05-23 04:05:26 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
smp_no_rendevous_barrier(void *dummy)
|
|
|
|
{
|
|
|
|
#ifdef SMP
|
|
|
|
KASSERT((!smp_started),("smp_no_rendevous called and smp is started"));
|
|
|
|
#endif
|
|
|
|
}
|
2012-11-15 00:51:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait specified idle threads to switch once. This ensures that even
|
|
|
|
* preempted threads have cycled through the switch function once,
|
|
|
|
* exiting their codepaths. This allows us to change global pointers
|
|
|
|
* with no other synchronization.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
|
|
|
|
{
|
|
|
|
struct pcpu *pcpu;
|
|
|
|
u_int gen[MAXCPU];
|
|
|
|
int error;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
|
|
|
if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
|
|
|
|
continue;
|
|
|
|
pcpu = pcpu_find(cpu);
|
|
|
|
gen[cpu] = pcpu->pc_idlethread->td_generation;
|
|
|
|
}
|
|
|
|
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
|
|
|
if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
|
|
|
|
continue;
|
|
|
|
pcpu = pcpu_find(cpu);
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_bind(curthread, cpu);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
|
|
|
|
error = tsleep(quiesce_cpus, prio, wmesg, 1);
|
2012-12-19 20:08:06 +00:00
|
|
|
if (error != EWOULDBLOCK)
|
2012-11-15 00:51:57 +00:00
|
|
|
goto out;
|
2012-12-19 20:08:06 +00:00
|
|
|
error = 0;
|
2012-11-15 00:51:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_unbind(curthread);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
quiesce_all_cpus(const char *wmesg, int prio)
|
|
|
|
{
|
|
|
|
|
|
|
|
return quiesce_cpus(all_cpus, wmesg, prio);
|
|
|
|
}
|
2014-04-26 20:27:54 +00:00
|
|
|
|
|
|
|
/* Extra care is taken with this sysctl because the data type is volatile */
|
|
|
|
static int
|
|
|
|
sysctl_kern_smp_active(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int error, active;
|
|
|
|
|
|
|
|
active = smp_started;
|
|
|
|
error = SYSCTL_OUT(req, &active, sizeof(active));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|