Make CPU_SET macros compliant with other implementations

The introduction of <sched.h> improved compatibility with some 3rd
party software, but caused the configure scripts of some ports to
assume that they were run in a GLIBC compatible environment.

Parts of sched.h were made conditional on -D_WITH_CPU_SET_T being
added to ports, but there still were compatibility issues due to
invalid assumptions made in autoconfigure scripts.

The differences between the FreeBSD version of macros like CPU_AND,
CPU_OR, etc. and the GLIBC versions was in the number of arguments:
FreeBSD used a 2-address scheme (one source argument is also used as
the destination of the operation), while GLIBC uses a 3-adderess
scheme (2 source operands and a separately passed destination).

The GLIBC scheme provides a super-set of the functionality of the
FreeBSD macros, since it does not prevent passing the same variable
as source and destination arguments. In code that wanted to preserve
both source arguments, the FreeBSD macros required a temporary copy of
one of the source arguments.

This patch set allows to unconditionally provide functions and macros
expected by 3rd party software written for GLIBC based systems, but
breaks builds of externally maintained sources that use any of the
following macros: CPU_AND, CPU_ANDNOT, CPU_OR, CPU_XOR.

One contributed driver (contrib/ofed/libmlx5) has been patched to
support both the old and the new CPU_OR signatures. If this commit
is merged to -STABLE, the version test will have to be extended to
cover more ranges.

Ports that have added -D_WITH_CPU_SET_T to build on -CURRENT do
no longer require that option.

The FreeBSD version has been bumped to 1400046 to reflect this
incompatible change.

Reviewed by:	kib
MFC after:	2 weeks
Relnotes:	yes
Differential Revision:	https://reviews.freebsd.org/D33451
This commit is contained in:
Stefan Eßer 2021-12-30 12:20:32 +01:00
parent 1331805574
commit e2650af157
28 changed files with 144 additions and 47 deletions

View File

@ -27,6 +27,13 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 14.x IS SLOW:
world, or to merely disable the most expensive debugging functionality
at runtime, run "ln -s 'abort:false,junk:false' /etc/malloc.conf".)
20211230:
The macros provided for the manipulation of CPU sets (e.g. CPU_AND)
have been modified to take 2 source arguments instead of only 1.
Externally maintained sources that use these macros will have to
be adapted. The FreeBSD version has been bumped to 1400046 to
reflect this change.
20211214:
A number of the kernel include files are able to be included by
themselves. A test has been added to buildworld to enforce this.

View File

@ -363,8 +363,12 @@ static int mlx5_enable_sandy_bridge_fix(struct ibv_device *ibdev)
mlx5_local_cpu_set(ibdev, &dev_local_cpus);
/* check if my cpu set is in dev cpu */
#if __FreeBSD_version < 1400046
CPU_OR(&result_set, &my_cpus);
CPU_OR(&result_set, &dev_local_cpus);
#else
CPU_OR(&result_set, &my_cpus, &dev_local_cpus);
#endif
stall_enable = CPU_EQUAL(&result_set, &dev_local_cpus) ? 0 : 1;
out:

View File

@ -30,6 +30,8 @@ SRCS+= __getosreldate.c \
clock_getcpuclockid.c \
closedir.c \
confstr.c \
cpuset_alloc.c \
cpuset_free.c \
crypt.c \
ctermid.c \
daemon.c \

View File

@ -442,6 +442,8 @@ FBSD_1.7 {
sched_getaffinity;
sched_setaffinity;
sched_getcpu;
__cpuset_alloc;
__cpuset_free;
};
FBSDprivate_1.0 {

View File

@ -0,0 +1,33 @@
/*-
* Copyright (c) 2021 Stefan Esser <se@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdlib.h>
#include <sched.h>
cpuset_t *
__cpuset_alloc(size_t ncpus)
{
return (malloc(CPU_ALLOC_SIZE(ncpus)));
}

View File

@ -0,0 +1,33 @@
/*-
* Copyright (c) 2021 Stefan Esser <se@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdlib.h>
#include <sched.h>
void
__cpuset_free(cpuset_t *ptr)
{
free(ptr);
}

View File

@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
#define _WITH_CPU_SET_T
#include <sched.h>
int

View File

@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
#define _WITH_CPU_SET_T
#include <sched.h>
#include "libc_private.h"

View File

@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
#define _WITH_CPU_SET_T
#include <sched.h>
int

View File

@ -32,7 +32,6 @@
#include <machine/sysarch.h>
#include <x86/ifunc.h>
#include <errno.h>
#define _WITH_CPU_SET_T
#include <sched.h>
#include "libc_private.h"

View File

@ -49,6 +49,7 @@
.Nm CPU_OR ,
.Nm CPU_AND ,
.Nm CPU_ANDNOT ,
.Nm CPU_XOR ,
.Nm CPU_CLR_ATOMIC ,
.Nm CPU_SET_ATOMIC ,
.Nm CPU_SET_ATOMIC_ACQ ,
@ -86,9 +87,10 @@
.Fn CPU_OVERLAP "cpuset_t *cpuset1" "cpuset_t *cpuset2"
.Ft bool
.Fn CPU_CMP "cpuset_t *cpuset1" "cpuset_t *cpuset2"
.Fn CPU_OR "cpuset_t *dst" "cpuset_t *src"
.Fn CPU_AND "cpuset_t *dst" "cpuset_t *src"
.Fn CPU_ANDNOT "cpuset_t *dst" "cpuset_t *src"
.Fn CPU_OR "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2"
.Fn CPU_AND "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2"
.Fn CPU_ANDNOT "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2"
.Fn CPU_XOR "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2"
.\"
.Fn CPU_CLR_ATOMIC "size_t cpu_idx" "cpuset_t *cpuset"
.Fn CPU_SET_ATOMIC "size_t cpu_idx" "cpuset_t *cpuset"

View File

@ -8290,7 +8290,7 @@ pmap_remove_pages(pmap_t pmap)
other_cpus = all_cpus;
critical_enter();
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
CPU_AND(&other_cpus, &pmap->pm_active);
CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
critical_exit();
KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
}

View File

@ -1304,7 +1304,7 @@ vm_handle_rendezvous(struct vm *vm, int vcpuid)
mtx_lock(&vm->rendezvous_mtx);
while (vm->rendezvous_func != NULL) {
/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus);
if (vcpuid != -1 &&
CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&

View File

@ -1173,7 +1173,7 @@ acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize,
return (error);
if (setsize != sizeof(cpuset_t))
return (EINVAL);
CPU_AND(cpuset, &cpuset_domain[d]);
CPU_AND(cpuset, cpuset, &cpuset_domain[d]);
return (0);
default:
return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));

View File

@ -1243,7 +1243,7 @@ pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va)
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
CPU_AND(&other_cpus, &pmap->pm_active);
CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
mask = &other_cpus;
}
smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy);
@ -1276,7 +1276,7 @@ pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
CPU_AND(&other_cpus, &pmap->pm_active);
CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
mask = &other_cpus;
}
smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy);
@ -1299,7 +1299,7 @@ pmap_invalidate_all_int(pmap_t pmap)
cpuid = PCPU_GET(cpuid);
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
CPU_AND(&other_cpus, &pmap->pm_active);
CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
mask = &other_cpus;
}
smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy);

View File

@ -604,9 +604,9 @@ sf_buf_shootdown(struct sf_buf *sf, int flags)
if ((flags & SFB_CPUPRIVATE) == 0) {
other_cpus = all_cpus;
CPU_CLR(cpuid, &other_cpus);
CPU_ANDNOT(&other_cpus, &sf->cpumask);
CPU_ANDNOT(&other_cpus, &other_cpus, &sf->cpumask);
if (!CPU_EMPTY(&other_cpus)) {
CPU_OR(&sf->cpumask, &other_cpus);
CPU_OR(&sf->cpumask, &sf->cpumask, &other_cpus);
smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap,
sf_buf_shootdown_curcpu_cb);
}

View File

@ -326,7 +326,7 @@ cpuset_init(struct cpuset *set, struct cpuset *parent,
set->cs_flags = 0;
mtx_lock_spin(&cpuset_lock);
set->cs_domain = domain;
CPU_AND(&set->cs_mask, &parent->cs_mask);
CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask);
set->cs_id = id;
set->cs_parent = cpuset_ref(parent);
LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
@ -645,8 +645,7 @@ cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask)
if (set->cs_flags & CPU_SET_RDONLY)
return (EPERM);
if (augment_mask) {
CPU_COPY(&set->cs_mask, &newmask);
CPU_AND(&newmask, mask);
CPU_AND(&newmask, &set->cs_mask, mask);
} else
CPU_COPY(mask, &newmask);
@ -668,7 +667,7 @@ cpuset_update(struct cpuset *set, cpuset_t *mask)
struct cpuset *nset;
mtx_assert(&cpuset_lock, MA_OWNED);
CPU_AND(&set->cs_mask, mask);
CPU_AND(&set->cs_mask, &set->cs_mask, mask);
LIST_FOREACH(nset, &set->cs_children, cs_siblings)
cpuset_update(nset, &set->cs_mask);
@ -1083,8 +1082,7 @@ cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set,
* restriction to the new set, otherwise take it wholesale.
*/
if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) {
CPU_COPY(&tdset->cs_mask, mask);
CPU_AND(mask, &set->cs_mask);
CPU_AND(mask, &tdset->cs_mask, &set->cs_mask);
} else
CPU_COPY(&set->cs_mask, mask);
@ -1153,8 +1151,7 @@ cpuset_setproc_newbase(struct thread *td, struct cpuset *set,
pbase = cpuset_getbase(td->td_cpuset);
/* Copy process mask, then further apply the new root mask. */
CPU_COPY(&pbase->cs_mask, &nmask);
CPU_AND(&nmask, &nroot->cs_mask);
CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask);
domainset_copy(pbase->cs_domain, &ndomain);
DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask);
@ -1946,7 +1943,7 @@ kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
case CPU_WHICH_PID:
FOREACH_THREAD_IN_PROC(p, ttd) {
thread_lock(ttd);
CPU_OR(mask, &ttd->td_cpuset->cs_mask);
CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask);
thread_unlock(ttd);
}
break;

View File

@ -548,8 +548,7 @@ _rm_wlock(struct rmlock *rm)
if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
/* Get all read tokens back */
readcpus = all_cpus;
CPU_ANDNOT(&readcpus, &rm->rm_writecpus);
CPU_ANDNOT(&readcpus, &all_cpus, &rm->rm_writecpus);
rm->rm_writecpus = all_cpus;
/*

View File

@ -1165,8 +1165,8 @@ forward_wakeup(int cpunum)
return (0);
CPU_SETOF(me, &dontuse);
CPU_OR(&dontuse, &stopped_cpus);
CPU_OR(&dontuse, &hlt_cpus_mask);
CPU_OR(&dontuse, &dontuse, &stopped_cpus);
CPU_OR(&dontuse, &dontuse, &hlt_cpus_mask);
CPU_ZERO(&map2);
if (forward_wakeup_use_loop) {
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
@ -1179,8 +1179,7 @@ forward_wakeup(int cpunum)
}
if (forward_wakeup_use_mask) {
map = idle_cpus_mask;
CPU_ANDNOT(&map, &dontuse);
CPU_ANDNOT(&map, &idle_cpus_mask, &dontuse);
/* If they are both on, compare and use loop if different. */
if (forward_wakeup_use_loop) {
@ -1366,8 +1365,7 @@ sched_add(struct thread *td, int flags)
kick_other_cpu(td->td_priority, cpu);
} else {
if (!single_cpu) {
tidlemsk = idle_cpus_mask;
CPU_ANDNOT(&tidlemsk, &hlt_cpus_mask);
CPU_ANDNOT(&tidlemsk, &idle_cpus_mask, &hlt_cpus_mask);
CPU_CLR(cpuid, &tidlemsk);
if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&

View File

@ -707,8 +707,7 @@ kdb_trap(int type, int code, struct trapframe *tf)
if (!SCHEDULER_STOPPED()) {
#ifdef SMP
other_cpus = all_cpus;
CPU_ANDNOT(&other_cpus, &stopped_cpus);
CPU_ANDNOT(&other_cpus, &all_cpus, &stopped_cpus);
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
stop_cpus_hard(other_cpus);
#endif
@ -746,7 +745,7 @@ kdb_trap(int type, int code, struct trapframe *tf)
if (did_stop_cpus) {
curthread->td_stopsched = 0;
#ifdef SMP
CPU_AND(&other_cpus, &stopped_cpus);
CPU_AND(&other_cpus, &other_cpus, &stopped_cpus);
restart_cpus(other_cpus);
#endif
}

View File

@ -761,7 +761,7 @@ smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
parent,
cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
CPU_OR(&parent->cg_mask, &child->cg_mask);
CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask);
parent->cg_count += child->cg_count;
}

View File

@ -4970,7 +4970,7 @@ get_ctx_core_offset(if_ctx_t ctx)
for (i = 0; i < scctx->isc_nrxqsets; i++)
CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, false),
&assigned_cpus);
CPU_AND(&assigned_cpus, &ctx->ifc_cpus);
CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus);
cores_consumed = CPU_COUNT(&assigned_cpus);
mtx_lock(&cpu_offset_mtx);

View File

@ -405,7 +405,7 @@ ofw_pcibus_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsi
return (error);
if (setsize != sizeof(cpuset_t))
return (EINVAL);
CPU_AND(cpuset, &cpuset_domain[d]);
CPU_AND(cpuset, cpuset, &cpuset_domain[d]);
return (0);
default:
return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));

View File

@ -1,4 +1,4 @@
/*-
#/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
@ -49,4 +49,11 @@
__BITSET_DEFINE(_cpuset, CPU_SETSIZE);
typedef struct _cpuset cpuset_t;
#ifndef _KERNEL
__BEGIN_DECLS
cpuset_t *__cpuset_alloc(size_t set_size);
void __cpuset_free(cpuset_t *ptr);
__END_DECLS
#endif
#endif /* !_SYS__CPUSET_H_ */

View File

@ -312,8 +312,6 @@
/*
* Dynamically allocate a bitset.
*/
#define __BITSET_ALLOC(_s, mt, mf) malloc(__BITSET_SIZE((_s)), mt, (mf))
#define BIT_AND(_s, d, s) __BIT_AND(_s, d, s)
#define BIT_AND2(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2)
#define BIT_ANDNOT(_s, d, s) __BIT_ANDNOT(_s, d, s)
@ -351,7 +349,11 @@
#define BIT_XOR2(_s, d, s1, s2) __BIT_XOR2(_s, d, s1, s2)
#define BIT_ZERO(_s, p) __BIT_ZERO(_s, p)
#define BITSET_ALLOC(_s, mt, mf) __BITSET_ALLOC(_s, mt, mf)
#if defined(_KERNEL)
#define BITSET_ALLOC(_s, mt, mf) malloc(__BITSET_SIZE((_s)), mt, (mf))
#define BITSET_FREE(p, mt) free(p, mt)
#endif /* _KERNEL */
#define BITSET_FSET(n) __BITSET_FSET(n)
#define BITSET_SIZE(_s) __BITSET_SIZE(_s)
#define BITSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x)

View File

@ -36,6 +36,7 @@
#include <sys/_cpuset.h>
#include <sys/_bitset.h>
#include <sys/bitset.h>
#define _NCPUBITS _BITSET_BITS
@ -56,9 +57,10 @@
#define CPU_SUBSET(p, c) __BIT_SUBSET(CPU_SETSIZE, p, c)
#define CPU_OVERLAP(p, c) __BIT_OVERLAP(CPU_SETSIZE, p, c)
#define CPU_CMP(p, c) __BIT_CMP(CPU_SETSIZE, p, c)
#define CPU_OR(d, s) __BIT_OR(CPU_SETSIZE, d, s)
#define CPU_AND(d, s) __BIT_AND(CPU_SETSIZE, d, s)
#define CPU_ANDNOT(d, s) __BIT_ANDNOT(CPU_SETSIZE, d, s)
#define CPU_OR(d, s1, s2) __BIT_OR2(CPU_SETSIZE, d, s1, s2)
#define CPU_AND(d, s1, s2) __BIT_AND2(CPU_SETSIZE, d, s1, s2)
#define CPU_ANDNOT(d, s1, s2) __BIT_ANDNOT2(CPU_SETSIZE, d, s1, s2)
#define CPU_XOR(d, s1, s2) __BIT_XOR2(CPU_SETSIZE, d, s1, s2)
#define CPU_CLR_ATOMIC(n, p) __BIT_CLR_ATOMIC(CPU_SETSIZE, n, p)
#define CPU_SET_ATOMIC(n, p) __BIT_SET_ATOMIC(CPU_SETSIZE, n, p)
#define CPU_SET_ATOMIC_ACQ(n, p) __BIT_SET_ATOMIC_ACQ(CPU_SETSIZE, n, p)
@ -73,6 +75,20 @@
#define CPUSET_FSET __BITSET_FSET(_NCPUWORDS)
#define CPUSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x)
#if !defined(_KERNEL)
#define CPU_ALLOC_SIZE(_s) __BITSET_SIZE(_s)
#define CPU_ALLOC(_s) __cpuset_alloc(_s)
#define CPU_FREE(p) __cpuset_free(p)
#define CPU_ISSET_S(n, _s, p) __BIT_ISSET(_s, n, p)
#define CPU_SET_S(n, _s, p) __BIT_SET(_s, n, p)
#define CPU_ZERO_S(_s, p) __BIT_ZERO(_s, p)
#define CPU_OR_S(_s, d, s1, s2) __BIT_OR2(_s, d, s1, s2)
#define CPU_AND_S(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2)
#define CPU_XOR_S(_s, d, s1, s2) __BIT_XOR2(_s, d, s1, s2)
#endif
/*
* Valid cpulevel_t values.
*/

View File

@ -76,7 +76,7 @@
* cannot include sys/param.h and should only be updated here.
*/
#undef __FreeBSD_version
#define __FreeBSD_version 1400045
#define __FreeBSD_version 1400046
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,

View File

@ -475,7 +475,7 @@ cpu_reset(void)
if (smp_started) {
map = all_cpus;
CPU_CLR(PCPU_GET(cpuid), &map);
CPU_ANDNOT(&map, &stopped_cpus);
CPU_ANDNOT(&map, &map, &stopped_cpus);
if (!CPU_EMPTY(&map)) {
printf("cpu_reset: Stopping other CPUs\n");
stop_cpus(map);