Update several places that iterate over CPUs to use CPU_FOREACH().
This commit is contained in:
parent
4945d8d26d
commit
9b74a62d73
@ -557,9 +557,7 @@ madt_set_ids(void *dummy)
|
||||
|
||||
if (madt == NULL)
|
||||
return;
|
||||
for (i = 0; i < MAXCPU; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
pc = pcpu_find(i);
|
||||
KASSERT(pc != NULL, ("no pcpu data for CPU %u", i));
|
||||
la = &lapics[pc->pc_apic_id];
|
||||
|
@ -269,12 +269,11 @@ cpu_identify(driver_t *driver, device_t parent)
|
||||
* so that these devices are attached after the Host-PCI
|
||||
* bridges (which are added at order 100).
|
||||
*/
|
||||
for (i = 0; i <= mp_maxid; i++)
|
||||
if (!CPU_ABSENT(i)) {
|
||||
child = BUS_ADD_CHILD(parent, 150, "cpu", i);
|
||||
if (child == NULL)
|
||||
panic("legacy_attach cpu");
|
||||
}
|
||||
CPU_FOREACH(i) {
|
||||
child = BUS_ADD_CHILD(parent, 150, "cpu", i);
|
||||
if (child == NULL)
|
||||
panic("legacy_attach cpu");
|
||||
}
|
||||
}
|
||||
|
||||
static device_t
|
||||
|
@ -10583,8 +10583,6 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
|
||||
{
|
||||
#if defined(sun)
|
||||
cpu_t *cp;
|
||||
#else
|
||||
struct pcpu *cp;
|
||||
#endif
|
||||
dtrace_buffer_t *buf;
|
||||
|
||||
@ -10672,10 +10670,7 @@ err:
|
||||
#endif
|
||||
|
||||
ASSERT(MUTEX_HELD(&dtrace_lock));
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if ((cp = pcpu_find(i)) == NULL)
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(i) {
|
||||
if (cpu != DTRACE_CPUALL && cpu != i)
|
||||
continue;
|
||||
|
||||
@ -10715,10 +10710,7 @@ err:
|
||||
* Error allocating memory, so free the buffers that were
|
||||
* allocated before the failed allocation.
|
||||
*/
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if ((cp = pcpu_find(i)) == NULL)
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(i) {
|
||||
if (cpu != DTRACE_CPUALL && cpu != i)
|
||||
continue;
|
||||
|
||||
@ -12621,10 +12613,10 @@ dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
|
||||
maxper = (limit - (uintptr_t)start) / NCPU;
|
||||
maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
|
||||
|
||||
for (i = 0; i < NCPU; i++) {
|
||||
#if !defined(sun)
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
#else
|
||||
for (i = 0; i < NCPU; i++) {
|
||||
#endif
|
||||
dstate->dtds_percpu[i].dtdsc_free = dvar = start;
|
||||
|
||||
|
@ -1344,10 +1344,7 @@ cyclic_uninit(void)
|
||||
cpu_t *c;
|
||||
int id;
|
||||
|
||||
for (id = 0; id <= mp_maxid; id++) {
|
||||
if (pcpu_find(id) == NULL)
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(id) {
|
||||
c = &solaris_cpu[id];
|
||||
|
||||
if (c->cpu_cyclic == NULL)
|
||||
|
@ -439,13 +439,10 @@ dtrace_gethrtime_init(void *arg)
|
||||
/* The current CPU is the reference one. */
|
||||
tsc_skew[curcpu] = 0;
|
||||
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
CPU_FOREACH(i) {
|
||||
if (i == curcpu)
|
||||
continue;
|
||||
|
||||
if (pcpu_find(i) == NULL)
|
||||
continue;
|
||||
|
||||
map = 0;
|
||||
map |= (1 << curcpu);
|
||||
map |= (1 << i);
|
||||
|
@ -108,10 +108,7 @@ dtrace_debug_init(void *dummy)
|
||||
int i;
|
||||
struct dtrace_debug_data *d;
|
||||
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (pcpu_find(i) == NULL)
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(i) {
|
||||
d = &dtrace_debug_data[i];
|
||||
|
||||
if (d->first == NULL) {
|
||||
@ -134,10 +131,7 @@ dtrace_debug_output(void)
|
||||
struct dtrace_debug_data *d;
|
||||
uintptr_t count;
|
||||
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (pcpu_find(i) == NULL)
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(i) {
|
||||
dtrace_debug_lock(i);
|
||||
|
||||
d = &dtrace_debug_data[i];
|
||||
|
@ -30,8 +30,8 @@ dtrace_ap_start(void *dummy)
|
||||
mutex_enter(&cpu_lock);
|
||||
|
||||
/* Setup the rest of the CPUs. */
|
||||
for (i = 1; i <= mp_maxid; i++) {
|
||||
if (pcpu_find(i) == NULL)
|
||||
CPU_FOREACH(i) {
|
||||
if (i == 0)
|
||||
continue;
|
||||
|
||||
(void) dtrace_cpu_setup(CPU_CONFIG, i);
|
||||
|
@ -439,13 +439,10 @@ dtrace_gethrtime_init(void *arg)
|
||||
/* The current CPU is the reference one. */
|
||||
tsc_skew[curcpu] = 0;
|
||||
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
CPU_FOREACH(i) {
|
||||
if (i == curcpu)
|
||||
continue;
|
||||
|
||||
if (pcpu_find(i) == NULL)
|
||||
continue;
|
||||
|
||||
map = 0;
|
||||
map |= (1 << curcpu);
|
||||
map |= (1 << i);
|
||||
|
@ -468,9 +468,7 @@ linprocfs_dostat(PFS_FILL_ARGS)
|
||||
T2J(cp_time[CP_NICE]),
|
||||
T2J(cp_time[CP_SYS] /*+ cp_time[CP_INTR]*/),
|
||||
T2J(cp_time[CP_IDLE]));
|
||||
for (i = 0; i <= mp_maxid; ++i) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
pcpu = pcpu_find(i);
|
||||
cp = pcpu->pc_cp_time;
|
||||
sbuf_printf(sb, "cpu%d %ld %ld %ld %ld\n", i,
|
||||
|
@ -445,9 +445,7 @@ acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id)
|
||||
|
||||
KASSERT(acpi_id != NULL, ("Null acpi_id"));
|
||||
KASSERT(cpu_id != NULL, ("Null cpu_id"));
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
pcpu_data = pcpu_find(i);
|
||||
KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i));
|
||||
if (idx-- == 0) {
|
||||
|
@ -557,9 +557,7 @@ madt_set_ids(void *dummy)
|
||||
|
||||
if (madt == NULL)
|
||||
return;
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
pc = pcpu_find(i);
|
||||
KASSERT(pc != NULL, ("no pcpu data for CPU %u", i));
|
||||
la = &lapics[pc->pc_apic_id];
|
||||
|
@ -290,12 +290,11 @@ cpu_identify(driver_t *driver, device_t parent)
|
||||
* so that these devices are attached after the Host-PCI
|
||||
* bridges (which are added at order 100).
|
||||
*/
|
||||
for (i = 0; i <= mp_maxid; i++)
|
||||
if (!CPU_ABSENT(i)) {
|
||||
child = BUS_ADD_CHILD(parent, 150, "cpu", i);
|
||||
if (child == NULL)
|
||||
panic("legacy_attach cpu");
|
||||
}
|
||||
CPU_FOREACH(i) {
|
||||
child = BUS_ADD_CHILD(parent, 150, "cpu", i);
|
||||
if (child == NULL)
|
||||
panic("legacy_attach cpu");
|
||||
}
|
||||
}
|
||||
|
||||
static device_t
|
||||
|
@ -1573,9 +1573,7 @@ mp_ipi_intrcnt(void *dummy)
|
||||
char buf[64];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
snprintf(buf, sizeof(buf), "cpu%d: invltlb", i);
|
||||
intrcnt_add(buf, &ipi_invltlb_counts[i]);
|
||||
snprintf(buf, sizeof(buf), "cpu%d: invlrng", i);
|
||||
|
@ -318,9 +318,7 @@ read_cpu_time(long *cp_time)
|
||||
|
||||
/* Sum up global cp_time[]. */
|
||||
bzero(cp_time, sizeof(long) * CPUSTATES);
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
pc = pcpu_find(i);
|
||||
for (j = 0; j < CPUSTATES; j++)
|
||||
cp_time[j] += pc->pc_cp_time[j];
|
||||
|
@ -133,9 +133,7 @@ sysctl_stats_reset(SYSCTL_HANDLER_ARGS)
|
||||
if (p == oidp || p->oid_arg1 == NULL)
|
||||
continue;
|
||||
counter = (uintptr_t)p->oid_arg1;
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
*(long *)(dpcpu_off[i] + counter) = 0;
|
||||
}
|
||||
}
|
||||
|
@ -228,11 +228,9 @@ start_softclock(void *dummy)
|
||||
panic("died while creating standard software ithreads");
|
||||
cc->cc_cookie = softclock_ih;
|
||||
#ifdef SMP
|
||||
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
||||
CPU_FOREACH(cpu) {
|
||||
if (cpu == timeout_cpu)
|
||||
continue;
|
||||
if (CPU_ABSENT(cpu))
|
||||
continue;
|
||||
cc = CC_CPU(cpu);
|
||||
if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
|
||||
INTR_MPSAFE, &cc->cc_cookie))
|
||||
|
@ -1190,9 +1190,7 @@ sched_pickcpu(struct thread *td)
|
||||
best = td->td_lastcpu;
|
||||
else
|
||||
best = NOCPU;
|
||||
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
||||
if (CPU_ABSENT(cpu))
|
||||
continue;
|
||||
CPU_FOREACH(cpu) {
|
||||
if (!THREAD_CAN_SCHED(td, cpu))
|
||||
continue;
|
||||
|
||||
@ -1627,9 +1625,7 @@ sched_affinity(struct thread *td)
|
||||
*/
|
||||
ts = td->td_sched;
|
||||
ts->ts_flags &= ~TSF_AFFINITY;
|
||||
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
||||
if (CPU_ABSENT(cpu))
|
||||
continue;
|
||||
CPU_FOREACH(cpu) {
|
||||
if (!THREAD_CAN_SCHED(td, cpu)) {
|
||||
ts->ts_flags |= TSF_AFFINITY;
|
||||
break;
|
||||
|
@ -1254,9 +1254,7 @@ sched_setup_smp(void)
|
||||
int i;
|
||||
|
||||
cpu_top = smp_topo();
|
||||
for (i = 0; i < MAXCPU; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
tdq = TDQ_CPU(i);
|
||||
tdq_setup(tdq);
|
||||
tdq->tdq_cg = smp_topo_find(cpu_top, i);
|
||||
@ -2485,7 +2483,7 @@ sched_load(void)
|
||||
int i;
|
||||
|
||||
total = 0;
|
||||
for (i = 0; i <= mp_maxid; i++)
|
||||
CPU_FOREACH(i)
|
||||
total += TDQ_CPU(i)->tdq_sysload;
|
||||
return (total);
|
||||
#else
|
||||
|
@ -256,9 +256,7 @@ lock_prof_idle(void)
|
||||
|
||||
td = curthread;
|
||||
thread_lock(td);
|
||||
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
||||
if (CPU_ABSENT(cpu))
|
||||
continue;
|
||||
CPU_FOREACH(cpu) {
|
||||
sched_bind(td, cpu);
|
||||
}
|
||||
sched_unbind(td);
|
||||
|
@ -317,9 +317,7 @@ DB_SHOW_COMMAND(dpcpu_off, db_show_dpcpu_off)
|
||||
{
|
||||
int id;
|
||||
|
||||
for (id = 0; id <= mp_maxid; id++) {
|
||||
if (CPU_ABSENT(id))
|
||||
continue;
|
||||
CPU_FOREACH(id) {
|
||||
db_printf("dpcpu_off[%2d] = 0x%jx (+ DPCPU_START = %p)\n",
|
||||
id, (uintmax_t)dpcpu_off[id],
|
||||
(void *)(uintptr_t)(dpcpu_off[id] + DPCPU_START));
|
||||
|
@ -395,9 +395,10 @@ smp_rendezvous_cpus(cpumask_t map,
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i <= mp_maxid; i++)
|
||||
if (((1 << i) & map) != 0 && !CPU_ABSENT(i))
|
||||
CPU_FOREACH(i) {
|
||||
if (((1 << i) & map) != 0)
|
||||
ncpus++;
|
||||
}
|
||||
if (ncpus == 0)
|
||||
panic("ncpus is 0 with map=0x%x", map);
|
||||
|
||||
|
@ -328,9 +328,7 @@ flowtable_show_stats(struct sbuf *sb, struct flowtable *ft)
|
||||
if (ft->ft_flags & FL_PCPU) {
|
||||
bzero(&fs, sizeof(fs));
|
||||
pfs = &fs;
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
pfs->ft_collisions += ft->ft_stats[i].ft_collisions;
|
||||
pfs->ft_allocated += ft->ft_stats[i].ft_allocated;
|
||||
pfs->ft_misses += ft->ft_stats[i].ft_misses;
|
||||
@ -1495,10 +1493,7 @@ flowtable_route_flush(struct flowtable *ft, struct rtentry *rt)
|
||||
int i;
|
||||
|
||||
if (ft->ft_flags & FL_PCPU) {
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(i) {
|
||||
if (smp_started == 1) {
|
||||
thread_lock(curthread);
|
||||
sched_bind(curthread, i);
|
||||
@ -1527,10 +1522,7 @@ flowtable_clean_vnet(void)
|
||||
ft = V_flow_list_head;
|
||||
while (ft != NULL) {
|
||||
if (ft->ft_flags & FL_PCPU) {
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(i) {
|
||||
if (smp_started == 1) {
|
||||
thread_lock(curthread);
|
||||
sched_bind(curthread, i);
|
||||
@ -1799,9 +1791,7 @@ flowtable_show_vnet(void)
|
||||
while (ft != NULL) {
|
||||
printf("name: %s\n", ft->ft_name);
|
||||
if (ft->ft_flags & FL_PCPU) {
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
flowtable_show(ft, i);
|
||||
}
|
||||
} else {
|
||||
|
@ -189,10 +189,7 @@ epair_dpcpu_init(void)
|
||||
struct eid_list *s;
|
||||
u_int cpuid;
|
||||
|
||||
for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
|
||||
if (CPU_ABSENT(cpuid))
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(cpuid) {
|
||||
epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
|
||||
|
||||
/* Initialize per-cpu lock. */
|
||||
@ -217,10 +214,7 @@ epair_dpcpu_detach(void)
|
||||
struct epair_dpcpu *epair_dpcpu;
|
||||
u_int cpuid;
|
||||
|
||||
for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
|
||||
if (CPU_ABSENT(cpuid))
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(cpuid) {
|
||||
epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
|
||||
|
||||
/* Destroy per-cpu lock. */
|
||||
@ -330,10 +324,7 @@ epair_remove_ifp_from_draining(struct ifnet *ifp)
|
||||
struct epair_ifp_drain *elm, *tvar;
|
||||
u_int cpuid;
|
||||
|
||||
for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
|
||||
if (CPU_ABSENT(cpuid))
|
||||
continue;
|
||||
|
||||
CPU_FOREACH(cpuid) {
|
||||
epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu);
|
||||
EPAIR_LOCK(epair_dpcpu);
|
||||
STAILQ_FOREACH_SAFE(elm, &epair_dpcpu->epair_ifp_drain_list,
|
||||
|
@ -339,9 +339,7 @@ netisr_register(const struct netisr_handler *nhp)
|
||||
} else
|
||||
netisr_proto[proto].np_qlimit = nhp->nh_qlimit;
|
||||
netisr_proto[proto].np_policy = nhp->nh_policy;
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
|
||||
bzero(npwp, sizeof(*npwp));
|
||||
npwp->nw_qlimit = netisr_proto[proto].np_qlimit;
|
||||
@ -373,9 +371,7 @@ netisr_clearqdrops(const struct netisr_handler *nhp)
|
||||
("%s(%u): protocol not registered for %s", __func__, proto,
|
||||
name));
|
||||
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
|
||||
npwp->nw_qdrops = 0;
|
||||
}
|
||||
@ -408,9 +404,7 @@ netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp)
|
||||
("%s(%u): protocol not registered for %s", __func__, proto,
|
||||
name));
|
||||
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
|
||||
*qdropp += npwp->nw_qdrops;
|
||||
}
|
||||
@ -474,9 +468,7 @@ netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit)
|
||||
name));
|
||||
|
||||
netisr_proto[proto].np_qlimit = qlimit;
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
|
||||
npwp->nw_qlimit = qlimit;
|
||||
}
|
||||
@ -540,9 +532,7 @@ netisr_unregister(const struct netisr_handler *nhp)
|
||||
netisr_proto[proto].np_m2cpuid = NULL;
|
||||
netisr_proto[proto].np_qlimit = 0;
|
||||
netisr_proto[proto].np_policy = 0;
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto];
|
||||
netisr_drain_proto(npwp);
|
||||
bzero(npwp, sizeof(*npwp));
|
||||
@ -1136,9 +1126,7 @@ sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS)
|
||||
M_ZERO | M_WAITOK);
|
||||
counter = 0;
|
||||
NETISR_RLOCK(&tracker);
|
||||
for (cpuid = 0; cpuid < MAXCPU; cpuid++) {
|
||||
if (CPU_ABSENT(cpuid))
|
||||
continue;
|
||||
CPU_FOREACH(cpuid) {
|
||||
nwsp = DPCPU_ID_PTR(cpuid, nws);
|
||||
if (nwsp->nws_intr_event == NULL)
|
||||
continue;
|
||||
@ -1192,9 +1180,7 @@ sysctl_netisr_work(SYSCTL_HANDLER_ARGS)
|
||||
M_TEMP, M_ZERO | M_WAITOK);
|
||||
counter = 0;
|
||||
NETISR_RLOCK(&tracker);
|
||||
for (cpuid = 0; cpuid < MAXCPU; cpuid++) {
|
||||
if (CPU_ABSENT(cpuid))
|
||||
continue;
|
||||
CPU_FOREACH(cpuid) {
|
||||
nwsp = DPCPU_ID_PTR(cpuid, nws);
|
||||
if (nwsp->nws_intr_event == NULL)
|
||||
continue;
|
||||
@ -1243,9 +1229,7 @@ DB_SHOW_COMMAND(netisr, db_show_netisr)
|
||||
|
||||
db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto",
|
||||
"Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue");
|
||||
for (cpuid = 0; cpuid <= mp_maxid; cpuid++) {
|
||||
if (CPU_ABSENT(cpuid))
|
||||
continue;
|
||||
CPU_FOREACH(cpuid) {
|
||||
nwsp = DPCPU_ID_PTR(cpuid, nws);
|
||||
if (nwsp->nws_intr_event == NULL)
|
||||
continue;
|
||||
|
@ -620,9 +620,7 @@ cache_drain(uma_zone_t zone)
|
||||
* it is used elsewhere. Should the tear-down path be made special
|
||||
* there in some form?
|
||||
*/
|
||||
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
||||
if (CPU_ABSENT(cpu))
|
||||
continue;
|
||||
CPU_FOREACH(cpu) {
|
||||
cache = &zone->uz_cpu[cpu];
|
||||
bucket_drain(zone, cache->uc_allocbucket);
|
||||
bucket_drain(zone, cache->uc_freebucket);
|
||||
@ -3075,9 +3073,7 @@ uma_print_zone(uma_zone_t zone)
|
||||
zone->uz_name, zone, zone->uz_size, zone->uz_flags);
|
||||
LIST_FOREACH(kl, &zone->uz_kegs, kl_link)
|
||||
uma_print_keg(kl->kl_keg);
|
||||
for (i = 0; i <= mp_maxid; i++) {
|
||||
if (CPU_ABSENT(i))
|
||||
continue;
|
||||
CPU_FOREACH(i) {
|
||||
cache = &zone->uz_cpu[i];
|
||||
printf("CPU %d Cache:\n", i);
|
||||
cache_print(cache);
|
||||
@ -3106,9 +3102,7 @@ uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
|
||||
|
||||
allocs = frees = 0;
|
||||
cachefree = 0;
|
||||
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
||||
if (CPU_ABSENT(cpu))
|
||||
continue;
|
||||
CPU_FOREACH(cpu) {
|
||||
cache = &z->uz_cpu[cpu];
|
||||
if (cache->uc_allocbucket != NULL)
|
||||
cachefree += cache->uc_allocbucket->ub_cnt;
|
||||
|
@ -558,7 +558,7 @@ mca_scan(enum scan_mode mode)
|
||||
* If this is a bank this CPU monitors via CMCI,
|
||||
* update the threshold.
|
||||
*/
|
||||
if (PCPU_GET(cmci_mask) & (1 << i))
|
||||
if (PCPU_GET(cmci_mask) & 1 << i)
|
||||
cmci_update(mode, i, valid, &rec);
|
||||
#endif
|
||||
}
|
||||
@ -580,9 +580,7 @@ mca_scan_cpus(void *context, int pending)
|
||||
td = curthread;
|
||||
count = 0;
|
||||
thread_lock(td);
|
||||
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
||||
if (CPU_ABSENT(cpu))
|
||||
continue;
|
||||
CPU_FOREACH(cpu) {
|
||||
sched_bind(td, cpu);
|
||||
thread_unlock(td);
|
||||
count += mca_scan(POLLED);
|
||||
|
Loading…
x
Reference in New Issue
Block a user