Fix all users of mp_maxid to use the same semantics, namely:

1) mp_maxid is a valid FreeBSD CPU ID in the range 0 .. MAXCPU - 1.
2) For all active CPUs in the system, PCPU_GET(cpuid) <= mp_maxid.

Approved by:	re (scottl)
Tested on:	i386, amd64, alpha
This commit is contained in:
jhb 2003-12-03 14:57:26 +00:00
parent 907202ec1f
commit 4b61439e79
7 changed files with 15 additions and 24 deletions

View File

@ -339,7 +339,6 @@ cpu_mp_setmaxid(void)
continue;
mp_maxid = i;
}
mp_maxid++;
}
int

View File

@ -179,8 +179,8 @@ cpu_add(u_int apic_id, char boot_cpu)
cpu_info[apic_id].cpu_bsp = 1;
}
mp_ncpus++;
if (apic_id >= mp_maxid)
mp_maxid = apic_id + 1;
if (apic_id > mp_maxid)
mp_maxid = apic_id;
if (bootverbose)
printf("SMP: Added CPU %d (%s)\n", apic_id, boot_cpu ? "BSP" :
"AP");

View File

@ -258,7 +258,7 @@ void
cpu_mp_setmaxid(void)
{
mp_maxid = MAXCPU;
mp_maxid = MAXCPU - 1;
}
int

View File

@ -418,7 +418,7 @@ sched_balance(void *arg)
if (smp_started == 0)
goto out;
for (i = 0; i < mp_maxid; i++) {
for (i = 0; i <= mp_maxid; i++) {
if (CPU_ABSENT(i) || (i & stopped_cpus) != 0)
continue;
kseq = KSEQ_CPU(i);

View File

@ -159,14 +159,14 @@ cpu_mp_setmaxid(void)
strcmp(buf, "cpu") == 0)
cpus++;
}
mp_maxid = cpus;
mp_maxid = cpus - 1;
}
int
cpu_mp_probe(void)
{
return (mp_maxid > 1);
return (mp_maxid > 0);
}
static void

View File

@ -81,7 +81,7 @@ extern volatile int smp_started;
* The cpu_setmaxid() function is called very early during the boot process
* so that the MD code may set mp_maxid to provide an upper bound on CPU IDs
* that other subsystems may use. If a platform is not able to determine
* the exact maximum ID that early, then it may set mp_maxid to MAXCPU.
* the exact maximum ID that early, then it may set mp_maxid to MAXCPU - 1.
*/
struct thread;

View File

@ -126,14 +126,6 @@ static int uma_boot_free = 0;
/* Is the VM done starting up? */
static int booted = 0;
/*
* Rather than #ifdef SMP all over, just give us a bogus definition for
* this on UP.
*/
#ifndef SMP
static int mp_maxid = 1;
#endif
/*
* This is the handle used to schedule events that need to happen
* outside of the allocation fast path.
@ -350,7 +342,7 @@ zone_timeout(uma_zone_t zone)
* far out of sync.
*/
if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL)) {
for (cpu = 0; cpu < mp_maxid; cpu++) {
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
CPU_LOCK(cpu);
@ -577,7 +569,7 @@ cache_drain(uma_zone_t zone)
/*
* We have to lock each cpu cache before locking the zone
*/
for (cpu = 0; cpu < mp_maxid; cpu++) {
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
CPU_LOCK(cpu);
@ -609,7 +601,7 @@ cache_drain(uma_zone_t zone)
LIST_REMOVE(bucket, ub_link);
bucket_free(bucket);
}
for (cpu = 0; cpu < mp_maxid; cpu++) {
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
CPU_UNLOCK(cpu);
@ -1228,7 +1220,7 @@ uma_startup(void *bootmem)
/* "manually" Create the initial zone */
args.name = "UMA Zones";
args.size = sizeof(struct uma_zone) +
(sizeof(struct uma_cache) * mp_maxid);
(sizeof(struct uma_cache) * (mp_maxid + 1));
args.ctor = zone_ctor;
args.dtor = zone_dtor;
args.uminit = zero_init;
@ -1239,7 +1231,7 @@ uma_startup(void *bootmem)
zone_ctor(zones, sizeof(struct uma_zone), &args);
/* Initialize the pcpu cache lock set once and for all */
for (i = 0; i < mp_maxid; i++)
for (i = 0; i <= mp_maxid; i++)
CPU_LOCK_INIT(i);
#ifdef UMA_DEBUG
printf("Filling boot free list.\n");
@ -2105,7 +2097,7 @@ uma_print_zone(uma_zone_t zone)
printf("Full slabs:\n");
LIST_FOREACH(slab, &zone->uz_full_slab, us_link)
slab_print(slab);
for (i = 0; i < mp_maxid; i++) {
for (i = 0; i <= mp_maxid; i++) {
if (CPU_ABSENT(i))
continue;
cache = &zone->uz_cpu[i];
@ -2153,7 +2145,7 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
if (cnt == 0) /* list may have changed size */
break;
if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) {
for (cpu = 0; cpu < mp_maxid; cpu++) {
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
CPU_LOCK(cpu);
@ -2162,7 +2154,7 @@ sysctl_vm_zone(SYSCTL_HANDLER_ARGS)
ZONE_LOCK(z);
cachefree = 0;
if (!(z->uz_flags & UMA_ZFLAG_INTERNAL)) {
for (cpu = 0; cpu < mp_maxid; cpu++) {
for (cpu = 0; cpu <= mp_maxid; cpu++) {
if (CPU_ABSENT(cpu))
continue;
cache = &z->uz_cpu[cpu];