diff --git a/sys/amd64/acpica/madt.c b/sys/amd64/acpica/madt.c index a4096820bea3..90ffd640dc19 100644 --- a/sys/amd64/acpica/madt.c +++ b/sys/amd64/acpica/madt.c @@ -557,9 +557,7 @@ madt_set_ids(void *dummy) if (madt == NULL) return; - for (i = 0; i < MAXCPU; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { pc = pcpu_find(i); KASSERT(pc != NULL, ("no pcpu data for CPU %u", i)); la = &lapics[pc->pc_apic_id]; diff --git a/sys/amd64/amd64/legacy.c b/sys/amd64/amd64/legacy.c index 9793c0246608..9aa0365d26b9 100644 --- a/sys/amd64/amd64/legacy.c +++ b/sys/amd64/amd64/legacy.c @@ -269,12 +269,11 @@ cpu_identify(driver_t *driver, device_t parent) * so that these devices are attached after the Host-PCI * bridges (which are added at order 100). */ - for (i = 0; i <= mp_maxid; i++) - if (!CPU_ABSENT(i)) { - child = BUS_ADD_CHILD(parent, 150, "cpu", i); - if (child == NULL) - panic("legacy_attach cpu"); - } + CPU_FOREACH(i) { + child = BUS_ADD_CHILD(parent, 150, "cpu", i); + if (child == NULL) + panic("legacy_attach cpu"); + } } static device_t diff --git a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c index 2e0990e5a407..70282e9a3a27 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c +++ b/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c @@ -10583,8 +10583,6 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, { #if defined(sun) cpu_t *cp; -#else - struct pcpu *cp; #endif dtrace_buffer_t *buf; @@ -10672,10 +10670,7 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, #endif ASSERT(MUTEX_HELD(&dtrace_lock)); - for (i = 0; i <= mp_maxid; i++) { - if ((cp = pcpu_find(i)) == NULL) - continue; - + CPU_FOREACH(i) { if (cpu != DTRACE_CPUALL && cpu != i) continue; @@ -10715,10 +10710,7 @@ dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, * Error allocating memory, so free the buffers that were * allocated before the failed allocation. */ - for (i = 0; i <= mp_maxid; i++) { - if ((cp = pcpu_find(i)) == NULL) - continue; - + CPU_FOREACH(i) { if (cpu != DTRACE_CPUALL && cpu != i) continue; @@ -12621,10 +12613,10 @@ dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) maxper = (limit - (uintptr_t)start) / NCPU; maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; - for (i = 0; i < NCPU; i++) { #if !defined(sun) - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { +#else + for (i = 0; i < NCPU; i++) { #endif dstate->dtds_percpu[i].dtdsc_free = dvar = start; diff --git a/sys/cddl/dev/cyclic/cyclic.c b/sys/cddl/dev/cyclic/cyclic.c index 52ab2acaf6e3..df0de6be09da 100644 --- a/sys/cddl/dev/cyclic/cyclic.c +++ b/sys/cddl/dev/cyclic/cyclic.c @@ -1344,10 +1344,7 @@ cyclic_uninit(void) cpu_t *c; int id; - for (id = 0; id <= mp_maxid; id++) { - if (pcpu_find(id) == NULL) - continue; - + CPU_FOREACH(id) { c = &solaris_cpu[id]; if (c->cpu_cyclic == NULL) diff --git a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c index eb66828b4478..b4c7eafbc20e 100644 --- a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c +++ b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c @@ -439,13 +439,10 @@ dtrace_gethrtime_init(void *arg) /* The current CPU is the reference one. */ tsc_skew[curcpu] = 0; - for (i = 0; i <= mp_maxid; i++) { + CPU_FOREACH(i) { if (i == curcpu) continue; - if (pcpu_find(i) == NULL) - continue; - map = 0; map |= (1 << curcpu); map |= (1 << i); diff --git a/sys/cddl/dev/dtrace/dtrace_debug.c b/sys/cddl/dev/dtrace/dtrace_debug.c index 24a7a09a0dfc..03af9aa0efd9 100644 --- a/sys/cddl/dev/dtrace/dtrace_debug.c +++ b/sys/cddl/dev/dtrace/dtrace_debug.c @@ -108,10 +108,7 @@ dtrace_debug_init(void *dummy) int i; struct dtrace_debug_data *d; - for (i = 0; i <= mp_maxid; i++) { - if (pcpu_find(i) == NULL) - continue; - + CPU_FOREACH(i) { d = &dtrace_debug_data[i]; if (d->first == NULL) { @@ -134,10 +131,7 @@ dtrace_debug_output(void) struct dtrace_debug_data *d; uintptr_t count; - for (i = 0; i <= mp_maxid; i++) { - if (pcpu_find(i) == NULL) - continue; - + CPU_FOREACH(i) { dtrace_debug_lock(i); d = &dtrace_debug_data[i]; diff --git a/sys/cddl/dev/dtrace/dtrace_load.c b/sys/cddl/dev/dtrace/dtrace_load.c index 5be746971770..accee4706ac4 100644 --- a/sys/cddl/dev/dtrace/dtrace_load.c +++ b/sys/cddl/dev/dtrace/dtrace_load.c @@ -30,8 +30,8 @@ dtrace_ap_start(void *dummy) mutex_enter(&cpu_lock); /* Setup the rest of the CPUs. */ - for (i = 1; i <= mp_maxid; i++) { - if (pcpu_find(i) == NULL) + CPU_FOREACH(i) { + if (i == 0) continue; (void) dtrace_cpu_setup(CPU_CONFIG, i); diff --git a/sys/cddl/dev/dtrace/i386/dtrace_subr.c b/sys/cddl/dev/dtrace/i386/dtrace_subr.c index 2839263455fc..9d85873ffdf0 100644 --- a/sys/cddl/dev/dtrace/i386/dtrace_subr.c +++ b/sys/cddl/dev/dtrace/i386/dtrace_subr.c @@ -439,13 +439,10 @@ dtrace_gethrtime_init(void *arg) /* The current CPU is the reference one. */ tsc_skew[curcpu] = 0; - for (i = 0; i <= mp_maxid; i++) { + CPU_FOREACH(i) { if (i == curcpu) continue; - if (pcpu_find(i) == NULL) - continue; - map = 0; map |= (1 << curcpu); map |= (1 << i); diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c index 58d897e9d08e..974f825ea526 100644 --- a/sys/compat/linprocfs/linprocfs.c +++ b/sys/compat/linprocfs/linprocfs.c @@ -468,9 +468,7 @@ linprocfs_dostat(PFS_FILL_ARGS) T2J(cp_time[CP_NICE]), T2J(cp_time[CP_SYS] /*+ cp_time[CP_INTR]*/), T2J(cp_time[CP_IDLE])); - for (i = 0; i <= mp_maxid; ++i) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { pcpu = pcpu_find(i); cp = pcpu->pc_cp_time; sbuf_printf(sb, "cpu%d %ld %ld %ld %ld\n", i, diff --git a/sys/dev/acpica/acpi_cpu.c b/sys/dev/acpica/acpi_cpu.c index 06c295481322..d8cc17e2791e 100644 --- a/sys/dev/acpica/acpi_cpu.c +++ b/sys/dev/acpica/acpi_cpu.c @@ -445,9 +445,7 @@ acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id) KASSERT(acpi_id != NULL, ("Null acpi_id")); KASSERT(cpu_id != NULL, ("Null cpu_id")); - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { pcpu_data = pcpu_find(i); KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i)); if (idx-- == 0) { diff --git a/sys/i386/acpica/madt.c b/sys/i386/acpica/madt.c index 114fbc78a3e1..5013c21287a8 100644 --- a/sys/i386/acpica/madt.c +++ b/sys/i386/acpica/madt.c @@ -557,9 +557,7 @@ madt_set_ids(void *dummy) if (madt == NULL) return; - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { pc = pcpu_find(i); KASSERT(pc != NULL, ("no pcpu data for CPU %u", i)); la = &lapics[pc->pc_apic_id]; diff --git a/sys/i386/i386/legacy.c b/sys/i386/i386/legacy.c index 53004887929a..39f3e229a1aa 100644 --- a/sys/i386/i386/legacy.c +++ b/sys/i386/i386/legacy.c @@ -290,12 +290,11 @@ cpu_identify(driver_t *driver, device_t parent) * so that these devices are attached after the Host-PCI * bridges (which are added at order 100). */ - for (i = 0; i <= mp_maxid; i++) - if (!CPU_ABSENT(i)) { - child = BUS_ADD_CHILD(parent, 150, "cpu", i); - if (child == NULL) - panic("legacy_attach cpu"); - } + CPU_FOREACH(i) { + child = BUS_ADD_CHILD(parent, 150, "cpu", i); + if (child == NULL) + panic("legacy_attach cpu"); + } } static device_t diff --git a/sys/i386/i386/mp_machdep.c b/sys/i386/i386/mp_machdep.c index 0dfb3578e26b..36d54921697a 100644 --- a/sys/i386/i386/mp_machdep.c +++ b/sys/i386/i386/mp_machdep.c @@ -1573,9 +1573,7 @@ mp_ipi_intrcnt(void *dummy) char buf[64]; int i; - for (i = 0; i < mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { snprintf(buf, sizeof(buf), "cpu%d: invltlb", i); intrcnt_add(buf, &ipi_invltlb_counts[i]); snprintf(buf, sizeof(buf), "cpu%d: invlrng", i); diff --git a/sys/kern/kern_clock.c b/sys/kern/kern_clock.c index a35e43bbfba4..7e93ddd663f4 100644 --- a/sys/kern/kern_clock.c +++ b/sys/kern/kern_clock.c @@ -318,9 +318,7 @@ read_cpu_time(long *cp_time) /* Sum up global cp_time[]. */ bzero(cp_time, sizeof(long) * CPUSTATES); - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { pc = pcpu_find(i); for (j = 0; j < CPUSTATES; j++) cp_time[j] += pc->pc_cp_time[j]; diff --git a/sys/kern/kern_switch.c b/sys/kern/kern_switch.c index 035661f40228..93cbf7b54db4 100644 --- a/sys/kern/kern_switch.c +++ b/sys/kern/kern_switch.c @@ -133,9 +133,7 @@ sysctl_stats_reset(SYSCTL_HANDLER_ARGS) if (p == oidp || p->oid_arg1 == NULL) continue; counter = (uintptr_t)p->oid_arg1; - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { *(long *)(dpcpu_off[i] + counter) = 0; } } diff --git a/sys/kern/kern_timeout.c b/sys/kern/kern_timeout.c index c38888a5f891..78b094928b9e 100644 --- a/sys/kern/kern_timeout.c +++ b/sys/kern/kern_timeout.c @@ -228,11 +228,9 @@ start_softclock(void *dummy) panic("died while creating standard software ithreads"); cc->cc_cookie = softclock_ih; #ifdef SMP - for (cpu = 0; cpu <= mp_maxid; cpu++) { + CPU_FOREACH(cpu) { if (cpu == timeout_cpu) continue; - if (CPU_ABSENT(cpu)) - continue; cc = CC_CPU(cpu); if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, INTR_MPSAFE, &cc->cc_cookie)) diff --git a/sys/kern/sched_4bsd.c b/sys/kern/sched_4bsd.c index 2cdf2c493c68..61366cd60e06 100644 --- a/sys/kern/sched_4bsd.c +++ b/sys/kern/sched_4bsd.c @@ -1190,9 +1190,7 @@ sched_pickcpu(struct thread *td) best = td->td_lastcpu; else best = NOCPU; - for (cpu = 0; cpu <= mp_maxid; cpu++) { - if (CPU_ABSENT(cpu)) - continue; + CPU_FOREACH(cpu) { if (!THREAD_CAN_SCHED(td, cpu)) continue; @@ -1627,9 +1625,7 @@ sched_affinity(struct thread *td) */ ts = td->td_sched; ts->ts_flags &= ~TSF_AFFINITY; - for (cpu = 0; cpu <= mp_maxid; cpu++) { - if (CPU_ABSENT(cpu)) - continue; + CPU_FOREACH(cpu) { if (!THREAD_CAN_SCHED(td, cpu)) { ts->ts_flags |= TSF_AFFINITY; break; diff --git a/sys/kern/sched_ule.c b/sys/kern/sched_ule.c index efc4ee98c5e2..dcbac7800d5d 100644 --- a/sys/kern/sched_ule.c +++ b/sys/kern/sched_ule.c @@ -1254,9 +1254,7 @@ sched_setup_smp(void) int i; cpu_top = smp_topo(); - for (i = 0; i < MAXCPU; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { tdq = TDQ_CPU(i); tdq_setup(tdq); tdq->tdq_cg = smp_topo_find(cpu_top, i); @@ -2485,7 +2483,7 @@ sched_load(void) int i; total = 0; - for (i = 0; i <= mp_maxid; i++) + CPU_FOREACH(i) total += TDQ_CPU(i)->tdq_sysload; return (total); #else diff --git a/sys/kern/subr_lock.c b/sys/kern/subr_lock.c index 977f9e57f7e1..bc43c9c0b85c 100644 --- a/sys/kern/subr_lock.c +++ b/sys/kern/subr_lock.c @@ -256,9 +256,7 @@ lock_prof_idle(void) td = curthread; thread_lock(td); - for (cpu = 0; cpu <= mp_maxid; cpu++) { - if (CPU_ABSENT(cpu)) - continue; + CPU_FOREACH(cpu) { sched_bind(td, cpu); } sched_unbind(td); diff --git a/sys/kern/subr_pcpu.c b/sys/kern/subr_pcpu.c index ef69ff6c5586..24a12ea67159 100644 --- a/sys/kern/subr_pcpu.c +++ b/sys/kern/subr_pcpu.c @@ -317,9 +317,7 @@ DB_SHOW_COMMAND(dpcpu_off, db_show_dpcpu_off) { int id; - for (id = 0; id <= mp_maxid; id++) { - if (CPU_ABSENT(id)) - continue; + CPU_FOREACH(id) { db_printf("dpcpu_off[%2d] = 0x%jx (+ DPCPU_START = %p)\n", id, (uintmax_t)dpcpu_off[id], (void *)(uintptr_t)(dpcpu_off[id] + DPCPU_START)); diff --git a/sys/kern/subr_smp.c b/sys/kern/subr_smp.c index 45a374e5a964..3e4a2abcaf4c 100644 --- a/sys/kern/subr_smp.c +++ b/sys/kern/subr_smp.c @@ -395,9 +395,10 @@ smp_rendezvous_cpus(cpumask_t map, return; } - for (i = 0; i <= mp_maxid; i++) - if (((1 << i) & map) != 0 && !CPU_ABSENT(i)) + CPU_FOREACH(i) { + if (((1 << i) & map) != 0) ncpus++; + } if (ncpus == 0) panic("ncpus is 0 with map=0x%x", map); diff --git a/sys/net/flowtable.c b/sys/net/flowtable.c index b1fd15ff0c5d..49886709efb3 100644 --- a/sys/net/flowtable.c +++ b/sys/net/flowtable.c @@ -328,9 +328,7 @@ flowtable_show_stats(struct sbuf *sb, struct flowtable *ft) if (ft->ft_flags & FL_PCPU) { bzero(&fs, sizeof(fs)); pfs = &fs; - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { pfs->ft_collisions += ft->ft_stats[i].ft_collisions; pfs->ft_allocated += ft->ft_stats[i].ft_allocated; pfs->ft_misses += ft->ft_stats[i].ft_misses; @@ -1495,10 +1493,7 @@ flowtable_route_flush(struct flowtable *ft, struct rtentry *rt) int i; if (ft->ft_flags & FL_PCPU) { - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; - + CPU_FOREACH(i) { if (smp_started == 1) { thread_lock(curthread); sched_bind(curthread, i); @@ -1527,10 +1522,7 @@ flowtable_clean_vnet(void) ft = V_flow_list_head; while (ft != NULL) { if (ft->ft_flags & FL_PCPU) { - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; - + CPU_FOREACH(i) { if (smp_started == 1) { thread_lock(curthread); sched_bind(curthread, i); @@ -1799,9 +1791,7 @@ flowtable_show_vnet(void) while (ft != NULL) { printf("name: %s\n", ft->ft_name); if (ft->ft_flags & FL_PCPU) { - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { flowtable_show(ft, i); } } else { diff --git a/sys/net/if_epair.c b/sys/net/if_epair.c index 316418217dea..13907e42461e 100644 --- a/sys/net/if_epair.c +++ b/sys/net/if_epair.c @@ -189,10 +189,7 @@ epair_dpcpu_init(void) struct eid_list *s; u_int cpuid; - for (cpuid = 0; cpuid <= mp_maxid; cpuid++) { - if (CPU_ABSENT(cpuid)) - continue; - + CPU_FOREACH(cpuid) { epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu); /* Initialize per-cpu lock. */ @@ -217,10 +214,7 @@ epair_dpcpu_detach(void) struct epair_dpcpu *epair_dpcpu; u_int cpuid; - for (cpuid = 0; cpuid <= mp_maxid; cpuid++) { - if (CPU_ABSENT(cpuid)) - continue; - + CPU_FOREACH(cpuid) { epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu); /* Destroy per-cpu lock. */ @@ -330,10 +324,7 @@ epair_remove_ifp_from_draining(struct ifnet *ifp) struct epair_ifp_drain *elm, *tvar; u_int cpuid; - for (cpuid = 0; cpuid <= mp_maxid; cpuid++) { - if (CPU_ABSENT(cpuid)) - continue; - + CPU_FOREACH(cpuid) { epair_dpcpu = DPCPU_ID_PTR(cpuid, epair_dpcpu); EPAIR_LOCK(epair_dpcpu); STAILQ_FOREACH_SAFE(elm, &epair_dpcpu->epair_ifp_drain_list, diff --git a/sys/net/netisr.c b/sys/net/netisr.c index 4ac1fae4db72..6df544d8c5e8 100644 --- a/sys/net/netisr.c +++ b/sys/net/netisr.c @@ -339,9 +339,7 @@ netisr_register(const struct netisr_handler *nhp) } else netisr_proto[proto].np_qlimit = nhp->nh_qlimit; netisr_proto[proto].np_policy = nhp->nh_policy; - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; bzero(npwp, sizeof(*npwp)); npwp->nw_qlimit = netisr_proto[proto].np_qlimit; @@ -373,9 +371,7 @@ netisr_clearqdrops(const struct netisr_handler *nhp) ("%s(%u): protocol not registered for %s", __func__, proto, name)); - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; npwp->nw_qdrops = 0; } @@ -408,9 +404,7 @@ netisr_getqdrops(const struct netisr_handler *nhp, u_int64_t *qdropp) ("%s(%u): protocol not registered for %s", __func__, proto, name)); - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; *qdropp += npwp->nw_qdrops; } @@ -474,9 +468,7 @@ netisr_setqlimit(const struct netisr_handler *nhp, u_int qlimit) name)); netisr_proto[proto].np_qlimit = qlimit; - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; npwp->nw_qlimit = qlimit; } @@ -540,9 +532,7 @@ netisr_unregister(const struct netisr_handler *nhp) netisr_proto[proto].np_m2cpuid = NULL; netisr_proto[proto].np_qlimit = 0; netisr_proto[proto].np_policy = 0; - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { npwp = &(DPCPU_ID_PTR(i, nws))->nws_work[proto]; netisr_drain_proto(npwp); bzero(npwp, sizeof(*npwp)); @@ -1136,9 +1126,7 @@ sysctl_netisr_workstream(SYSCTL_HANDLER_ARGS) M_ZERO | M_WAITOK); counter = 0; NETISR_RLOCK(&tracker); - for (cpuid = 0; cpuid < MAXCPU; cpuid++) { - if (CPU_ABSENT(cpuid)) - continue; + CPU_FOREACH(cpuid) { nwsp = DPCPU_ID_PTR(cpuid, nws); if (nwsp->nws_intr_event == NULL) continue; @@ -1192,9 +1180,7 @@ sysctl_netisr_work(SYSCTL_HANDLER_ARGS) M_TEMP, M_ZERO | M_WAITOK); counter = 0; NETISR_RLOCK(&tracker); - for (cpuid = 0; cpuid < MAXCPU; cpuid++) { - if (CPU_ABSENT(cpuid)) - continue; + CPU_FOREACH(cpuid) { nwsp = DPCPU_ID_PTR(cpuid, nws); if (nwsp->nws_intr_event == NULL) continue; @@ -1243,9 +1229,7 @@ DB_SHOW_COMMAND(netisr, db_show_netisr) db_printf("%3s %6s %5s %5s %5s %8s %8s %8s %8s\n", "CPU", "Proto", "Len", "WMark", "Max", "Disp", "HDisp", "Drop", "Queue"); - for (cpuid = 0; cpuid <= mp_maxid; cpuid++) { - if (CPU_ABSENT(cpuid)) - continue; + CPU_FOREACH(cpuid) { nwsp = DPCPU_ID_PTR(cpuid, nws); if (nwsp->nws_intr_event == NULL) continue; diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index e1b9a08109d4..a3855ca62639 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -620,9 +620,7 @@ cache_drain(uma_zone_t zone) * it is used elsewhere. Should the tear-down path be made special * there in some form? */ - for (cpu = 0; cpu <= mp_maxid; cpu++) { - if (CPU_ABSENT(cpu)) - continue; + CPU_FOREACH(cpu) { cache = &zone->uz_cpu[cpu]; bucket_drain(zone, cache->uc_allocbucket); bucket_drain(zone, cache->uc_freebucket); @@ -3075,9 +3073,7 @@ uma_print_zone(uma_zone_t zone) zone->uz_name, zone, zone->uz_size, zone->uz_flags); LIST_FOREACH(kl, &zone->uz_kegs, kl_link) uma_print_keg(kl->kl_keg); - for (i = 0; i <= mp_maxid; i++) { - if (CPU_ABSENT(i)) - continue; + CPU_FOREACH(i) { cache = &zone->uz_cpu[i]; printf("CPU %d Cache:\n", i); cache_print(cache); @@ -3106,9 +3102,7 @@ uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp, allocs = frees = 0; cachefree = 0; - for (cpu = 0; cpu <= mp_maxid; cpu++) { - if (CPU_ABSENT(cpu)) - continue; + CPU_FOREACH(cpu) { cache = &z->uz_cpu[cpu]; if (cache->uc_allocbucket != NULL) cachefree += cache->uc_allocbucket->ub_cnt; diff --git a/sys/x86/x86/mca.c b/sys/x86/x86/mca.c index eb8ab271f4f7..089c65b23c98 100644 --- a/sys/x86/x86/mca.c +++ b/sys/x86/x86/mca.c @@ -558,7 +558,7 @@ mca_scan(enum scan_mode mode) * If this is a bank this CPU monitors via CMCI, * update the threshold. */ - if (PCPU_GET(cmci_mask) & (1 << i)) + if (PCPU_GET(cmci_mask) & 1 << i) cmci_update(mode, i, valid, &rec); #endif } @@ -580,9 +580,7 @@ mca_scan_cpus(void *context, int pending) td = curthread; count = 0; thread_lock(td); - for (cpu = 0; cpu <= mp_maxid; cpu++) { - if (CPU_ABSENT(cpu)) - continue; + CPU_FOREACH(cpu) { sched_bind(td, cpu); thread_unlock(td); count += mca_scan(POLLED);