Make CPU_SET macros compliant with other implementations
(cherry picked from commit e2650af157
)
This commit is contained in:
parent
fddbba918d
commit
dc4114875e
7
UPDATING
7
UPDATING
@ -12,6 +12,13 @@ Items affecting the ports and packages system can be found in
|
||||
/usr/ports/UPDATING. Please read that file before updating system packages
|
||||
and/or ports.
|
||||
|
||||
20220114:
|
||||
The macros provided for the manipulation of CPU sets (e.g. CPU_AND)
|
||||
have been modified to take 2 source arguments instead of only 1.
|
||||
Externally maintained sources that use these macros will have to
|
||||
be adapted. The FreeBSD version has been bumped to 1300524 to
|
||||
reflect this change.
|
||||
|
||||
20211218:
|
||||
Commit 18f5b477ee66 adds two arguments to VOP_ALLOCATE().
|
||||
Normally changes to VOP calls are not MFC'd, but a
|
||||
|
@ -363,8 +363,7 @@ static int mlx5_enable_sandy_bridge_fix(struct ibv_device *ibdev)
|
||||
mlx5_local_cpu_set(ibdev, &dev_local_cpus);
|
||||
|
||||
/* check if my cpu set is in dev cpu */
|
||||
CPU_OR(&result_set, &my_cpus);
|
||||
CPU_OR(&result_set, &dev_local_cpus);
|
||||
CPU_OR(&result_set, &my_cpus, &dev_local_cpus);
|
||||
stall_enable = CPU_EQUAL(&result_set, &dev_local_cpus) ? 0 : 1;
|
||||
|
||||
out:
|
||||
|
@ -30,6 +30,8 @@ SRCS+= __getosreldate.c \
|
||||
clock_getcpuclockid.c \
|
||||
closedir.c \
|
||||
confstr.c \
|
||||
cpuset_alloc.c \
|
||||
cpuset_free.c \
|
||||
crypt.c \
|
||||
ctermid.c \
|
||||
daemon.c \
|
||||
|
@ -442,6 +442,8 @@ FBSD_1.7 {
|
||||
sched_getaffinity;
|
||||
sched_setaffinity;
|
||||
sched_getcpu;
|
||||
__cpuset_alloc;
|
||||
__cpuset_free;
|
||||
};
|
||||
|
||||
FBSDprivate_1.0 {
|
||||
|
33
lib/libc/gen/cpuset_alloc.c
Normal file
33
lib/libc/gen/cpuset_alloc.c
Normal file
@ -0,0 +1,33 @@
|
||||
/*-
|
||||
* Copyright (c) 2021 Stefan Esser <se@FreeBSD.org>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <sched.h>
|
||||
|
||||
cpuset_t *
|
||||
__cpuset_alloc(size_t ncpus)
|
||||
{
|
||||
return (malloc(CPU_ALLOC_SIZE(ncpus)));
|
||||
}
|
33
lib/libc/gen/cpuset_free.c
Normal file
33
lib/libc/gen/cpuset_free.c
Normal file
@ -0,0 +1,33 @@
|
||||
/*-
|
||||
* Copyright (c) 2021 Stefan Esser <se@FreeBSD.org>
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <sched.h>
|
||||
|
||||
void
|
||||
__cpuset_free(cpuset_t *ptr)
|
||||
{
|
||||
free(ptr);
|
||||
}
|
@ -26,7 +26,6 @@
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define _WITH_CPU_SET_T
|
||||
#include <sched.h>
|
||||
|
||||
int
|
||||
|
@ -26,7 +26,6 @@
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define _WITH_CPU_SET_T
|
||||
#include <sched.h>
|
||||
#include "libc_private.h"
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#define _WITH_CPU_SET_T
|
||||
#include <sched.h>
|
||||
|
||||
int
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include <machine/sysarch.h>
|
||||
#include <x86/ifunc.h>
|
||||
#include <errno.h>
|
||||
#define _WITH_CPU_SET_T
|
||||
#include <sched.h>
|
||||
#include "libc_private.h"
|
||||
|
||||
|
@ -49,6 +49,7 @@
|
||||
.Nm CPU_OR ,
|
||||
.Nm CPU_AND ,
|
||||
.Nm CPU_ANDNOT ,
|
||||
.Nm CPU_XOR ,
|
||||
.Nm CPU_CLR_ATOMIC ,
|
||||
.Nm CPU_SET_ATOMIC ,
|
||||
.Nm CPU_SET_ATOMIC_ACQ ,
|
||||
@ -86,9 +87,10 @@
|
||||
.Fn CPU_OVERLAP "cpuset_t *cpuset1" "cpuset_t *cpuset2"
|
||||
.Ft bool
|
||||
.Fn CPU_CMP "cpuset_t *cpuset1" "cpuset_t *cpuset2"
|
||||
.Fn CPU_OR "cpuset_t *dst" "cpuset_t *src"
|
||||
.Fn CPU_AND "cpuset_t *dst" "cpuset_t *src"
|
||||
.Fn CPU_ANDNOT "cpuset_t *dst" "cpuset_t *src"
|
||||
.Fn CPU_OR "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2"
|
||||
.Fn CPU_AND "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2"
|
||||
.Fn CPU_ANDNOT "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2"
|
||||
.Fn CPU_XOR "cpuset_t *dst" "cpuset_t *src1" "cpuset_t *src2"
|
||||
.\"
|
||||
.Fn CPU_CLR_ATOMIC "size_t cpu_idx" "cpuset_t *cpuset"
|
||||
.Fn CPU_SET_ATOMIC "size_t cpu_idx" "cpuset_t *cpuset"
|
||||
|
@ -8172,7 +8172,7 @@ pmap_remove_pages(pmap_t pmap)
|
||||
other_cpus = all_cpus;
|
||||
critical_enter();
|
||||
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
|
||||
CPU_AND(&other_cpus, &pmap->pm_active);
|
||||
CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
|
||||
critical_exit();
|
||||
KASSERT(CPU_EMPTY(&other_cpus), ("pmap active %p", pmap));
|
||||
}
|
||||
|
@ -1304,7 +1304,7 @@ vm_handle_rendezvous(struct vm *vm, int vcpuid)
|
||||
mtx_lock(&vm->rendezvous_mtx);
|
||||
while (vm->rendezvous_func != NULL) {
|
||||
/* 'rendezvous_req_cpus' must be a subset of 'active_cpus' */
|
||||
CPU_AND(&vm->rendezvous_req_cpus, &vm->active_cpus);
|
||||
CPU_AND(&vm->rendezvous_req_cpus, &vm->rendezvous_req_cpus, &vm->active_cpus);
|
||||
|
||||
if (vcpuid != -1 &&
|
||||
CPU_ISSET(vcpuid, &vm->rendezvous_req_cpus) &&
|
||||
|
@ -1162,7 +1162,7 @@ acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize,
|
||||
return (error);
|
||||
if (setsize != sizeof(cpuset_t))
|
||||
return (EINVAL);
|
||||
CPU_AND(cpuset, &cpuset_domain[d]);
|
||||
CPU_AND(cpuset, cpuset, &cpuset_domain[d]);
|
||||
return (0);
|
||||
default:
|
||||
return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
|
||||
|
@ -1243,7 +1243,7 @@ pmap_invalidate_page_int(pmap_t pmap, vm_offset_t va)
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(cpuid, &other_cpus);
|
||||
CPU_AND(&other_cpus, &pmap->pm_active);
|
||||
CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
|
||||
mask = &other_cpus;
|
||||
}
|
||||
smp_masked_invlpg(*mask, va, pmap, pmap_curcpu_cb_dummy);
|
||||
@ -1276,7 +1276,7 @@ pmap_invalidate_range_int(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(cpuid, &other_cpus);
|
||||
CPU_AND(&other_cpus, &pmap->pm_active);
|
||||
CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
|
||||
mask = &other_cpus;
|
||||
}
|
||||
smp_masked_invlpg_range(*mask, sva, eva, pmap, pmap_curcpu_cb_dummy);
|
||||
@ -1299,7 +1299,7 @@ pmap_invalidate_all_int(pmap_t pmap)
|
||||
cpuid = PCPU_GET(cpuid);
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(cpuid, &other_cpus);
|
||||
CPU_AND(&other_cpus, &pmap->pm_active);
|
||||
CPU_AND(&other_cpus, &other_cpus, &pmap->pm_active);
|
||||
mask = &other_cpus;
|
||||
}
|
||||
smp_masked_invltlb(*mask, pmap, pmap_curcpu_cb_dummy);
|
||||
|
@ -595,9 +595,9 @@ sf_buf_shootdown(struct sf_buf *sf, int flags)
|
||||
if ((flags & SFB_CPUPRIVATE) == 0) {
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(cpuid, &other_cpus);
|
||||
CPU_ANDNOT(&other_cpus, &sf->cpumask);
|
||||
CPU_ANDNOT(&other_cpus, &other_cpus, &sf->cpumask);
|
||||
if (!CPU_EMPTY(&other_cpus)) {
|
||||
CPU_OR(&sf->cpumask, &other_cpus);
|
||||
CPU_OR(&sf->cpumask, &sf->cpumask, &other_cpus);
|
||||
smp_masked_invlpg(other_cpus, sf->kva, kernel_pmap,
|
||||
sf_buf_shootdown_curcpu_cb);
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ cpuset_init(struct cpuset *set, struct cpuset *parent,
|
||||
set->cs_flags = 0;
|
||||
mtx_lock_spin(&cpuset_lock);
|
||||
set->cs_domain = domain;
|
||||
CPU_AND(&set->cs_mask, &parent->cs_mask);
|
||||
CPU_AND(&set->cs_mask, &set->cs_mask, &parent->cs_mask);
|
||||
set->cs_id = id;
|
||||
set->cs_parent = cpuset_ref(parent);
|
||||
LIST_INSERT_HEAD(&parent->cs_children, set, cs_siblings);
|
||||
@ -645,8 +645,7 @@ cpuset_testupdate(struct cpuset *set, cpuset_t *mask, int augment_mask)
|
||||
if (set->cs_flags & CPU_SET_RDONLY)
|
||||
return (EPERM);
|
||||
if (augment_mask) {
|
||||
CPU_COPY(&set->cs_mask, &newmask);
|
||||
CPU_AND(&newmask, mask);
|
||||
CPU_AND(&newmask, &set->cs_mask, mask);
|
||||
} else
|
||||
CPU_COPY(mask, &newmask);
|
||||
|
||||
@ -668,7 +667,7 @@ cpuset_update(struct cpuset *set, cpuset_t *mask)
|
||||
struct cpuset *nset;
|
||||
|
||||
mtx_assert(&cpuset_lock, MA_OWNED);
|
||||
CPU_AND(&set->cs_mask, mask);
|
||||
CPU_AND(&set->cs_mask, &set->cs_mask, mask);
|
||||
LIST_FOREACH(nset, &set->cs_children, cs_siblings)
|
||||
cpuset_update(nset, &set->cs_mask);
|
||||
|
||||
@ -1083,8 +1082,7 @@ cpuset_setproc_setthread_mask(struct cpuset *tdset, struct cpuset *set,
|
||||
* restriction to the new set, otherwise take it wholesale.
|
||||
*/
|
||||
if (CPU_CMP(&tdset->cs_mask, &parent->cs_mask) != 0) {
|
||||
CPU_COPY(&tdset->cs_mask, mask);
|
||||
CPU_AND(mask, &set->cs_mask);
|
||||
CPU_AND(mask, &tdset->cs_mask, &set->cs_mask);
|
||||
} else
|
||||
CPU_COPY(&set->cs_mask, mask);
|
||||
|
||||
@ -1153,8 +1151,7 @@ cpuset_setproc_newbase(struct thread *td, struct cpuset *set,
|
||||
pbase = cpuset_getbase(td->td_cpuset);
|
||||
|
||||
/* Copy process mask, then further apply the new root mask. */
|
||||
CPU_COPY(&pbase->cs_mask, &nmask);
|
||||
CPU_AND(&nmask, &nroot->cs_mask);
|
||||
CPU_AND(&nmask, &pbase->cs_mask, &nroot->cs_mask);
|
||||
|
||||
domainset_copy(pbase->cs_domain, &ndomain);
|
||||
DOMAINSET_AND(&ndomain.ds_mask, &set->cs_domain->ds_mask);
|
||||
@ -1946,7 +1943,7 @@ kern_cpuset_getaffinity(struct thread *td, cpulevel_t level, cpuwhich_t which,
|
||||
case CPU_WHICH_PID:
|
||||
FOREACH_THREAD_IN_PROC(p, ttd) {
|
||||
thread_lock(ttd);
|
||||
CPU_OR(mask, &ttd->td_cpuset->cs_mask);
|
||||
CPU_OR(mask, mask, &ttd->td_cpuset->cs_mask);
|
||||
thread_unlock(ttd);
|
||||
}
|
||||
break;
|
||||
|
@ -548,8 +548,7 @@ _rm_wlock(struct rmlock *rm)
|
||||
|
||||
if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) {
|
||||
/* Get all read tokens back */
|
||||
readcpus = all_cpus;
|
||||
CPU_ANDNOT(&readcpus, &rm->rm_writecpus);
|
||||
CPU_ANDNOT(&readcpus, &all_cpus, &rm->rm_writecpus);
|
||||
rm->rm_writecpus = all_cpus;
|
||||
|
||||
/*
|
||||
|
@ -1158,8 +1158,8 @@ forward_wakeup(int cpunum)
|
||||
return (0);
|
||||
|
||||
CPU_SETOF(me, &dontuse);
|
||||
CPU_OR(&dontuse, &stopped_cpus);
|
||||
CPU_OR(&dontuse, &hlt_cpus_mask);
|
||||
CPU_OR(&dontuse, &dontuse, &stopped_cpus);
|
||||
CPU_OR(&dontuse, &dontuse, &hlt_cpus_mask);
|
||||
CPU_ZERO(&map2);
|
||||
if (forward_wakeup_use_loop) {
|
||||
STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) {
|
||||
@ -1172,8 +1172,7 @@ forward_wakeup(int cpunum)
|
||||
}
|
||||
|
||||
if (forward_wakeup_use_mask) {
|
||||
map = idle_cpus_mask;
|
||||
CPU_ANDNOT(&map, &dontuse);
|
||||
CPU_ANDNOT(&map, &idle_cpus_mask, &dontuse);
|
||||
|
||||
/* If they are both on, compare and use loop if different. */
|
||||
if (forward_wakeup_use_loop) {
|
||||
@ -1359,8 +1358,7 @@ sched_add(struct thread *td, int flags)
|
||||
kick_other_cpu(td->td_priority, cpu);
|
||||
} else {
|
||||
if (!single_cpu) {
|
||||
tidlemsk = idle_cpus_mask;
|
||||
CPU_ANDNOT(&tidlemsk, &hlt_cpus_mask);
|
||||
CPU_ANDNOT(&tidlemsk, &idle_cpus_mask, &hlt_cpus_mask);
|
||||
CPU_CLR(cpuid, &tidlemsk);
|
||||
|
||||
if (!CPU_ISSET(cpuid, &idle_cpus_mask) &&
|
||||
|
@ -707,8 +707,7 @@ kdb_trap(int type, int code, struct trapframe *tf)
|
||||
|
||||
if (!SCHEDULER_STOPPED()) {
|
||||
#ifdef SMP
|
||||
other_cpus = all_cpus;
|
||||
CPU_ANDNOT(&other_cpus, &stopped_cpus);
|
||||
CPU_ANDNOT(&other_cpus, &all_cpus, &stopped_cpus);
|
||||
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
|
||||
stop_cpus_hard(other_cpus);
|
||||
#endif
|
||||
@ -746,7 +745,7 @@ kdb_trap(int type, int code, struct trapframe *tf)
|
||||
if (did_stop_cpus) {
|
||||
curthread->td_stopsched = 0;
|
||||
#ifdef SMP
|
||||
CPU_AND(&other_cpus, &stopped_cpus);
|
||||
CPU_AND(&other_cpus, &other_cpus, &stopped_cpus);
|
||||
restart_cpus(other_cpus);
|
||||
#endif
|
||||
}
|
||||
|
@ -761,7 +761,7 @@ smp_topo_addleaf(struct cpu_group *parent, struct cpu_group *child, int share,
|
||||
parent,
|
||||
cpusetobj_strprint(cpusetbuf, &parent->cg_mask),
|
||||
cpusetobj_strprint(cpusetbuf2, &child->cg_mask));
|
||||
CPU_OR(&parent->cg_mask, &child->cg_mask);
|
||||
CPU_OR(&parent->cg_mask, &parent->cg_mask, &child->cg_mask);
|
||||
parent->cg_count += child->cg_count;
|
||||
}
|
||||
|
||||
|
@ -4944,7 +4944,7 @@ get_ctx_core_offset(if_ctx_t ctx)
|
||||
for (i = 0; i < scctx->isc_nrxqsets; i++)
|
||||
CPU_SET(get_cpuid_for_queue(ctx, first_valid, i, false),
|
||||
&assigned_cpus);
|
||||
CPU_AND(&assigned_cpus, &ctx->ifc_cpus);
|
||||
CPU_AND(&assigned_cpus, &assigned_cpus, &ctx->ifc_cpus);
|
||||
cores_consumed = CPU_COUNT(&assigned_cpus);
|
||||
|
||||
mtx_lock(&cpu_offset_mtx);
|
||||
|
@ -405,7 +405,7 @@ ofw_pcibus_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsi
|
||||
return (error);
|
||||
if (setsize != sizeof(cpuset_t))
|
||||
return (EINVAL);
|
||||
CPU_AND(cpuset, &cpuset_domain[d]);
|
||||
CPU_AND(cpuset, cpuset, &cpuset_domain[d]);
|
||||
return (0);
|
||||
default:
|
||||
return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
|
||||
|
@ -1,4 +1,4 @@
|
||||
/*-
|
||||
#/*-
|
||||
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
||||
*
|
||||
* Copyright (c) 2008, Jeffrey Roberson <jeff@freebsd.org>
|
||||
@ -49,4 +49,11 @@
|
||||
__BITSET_DEFINE(_cpuset, CPU_SETSIZE);
|
||||
typedef struct _cpuset cpuset_t;
|
||||
|
||||
#ifndef _KERNEL
|
||||
__BEGIN_DECLS
|
||||
cpuset_t *__cpuset_alloc(size_t set_size);
|
||||
void __cpuset_free(cpuset_t *ptr);
|
||||
__END_DECLS
|
||||
#endif
|
||||
|
||||
#endif /* !_SYS__CPUSET_H_ */
|
||||
|
@ -312,8 +312,6 @@
|
||||
/*
|
||||
* Dynamically allocate a bitset.
|
||||
*/
|
||||
#define __BITSET_ALLOC(_s, mt, mf) malloc(__BITSET_SIZE((_s)), mt, (mf))
|
||||
|
||||
#define BIT_AND(_s, d, s) __BIT_AND(_s, d, s)
|
||||
#define BIT_AND2(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2)
|
||||
#define BIT_ANDNOT(_s, d, s) __BIT_ANDNOT(_s, d, s)
|
||||
@ -351,7 +349,11 @@
|
||||
#define BIT_XOR2(_s, d, s1, s2) __BIT_XOR2(_s, d, s1, s2)
|
||||
#define BIT_ZERO(_s, p) __BIT_ZERO(_s, p)
|
||||
|
||||
#define BITSET_ALLOC(_s, mt, mf) __BITSET_ALLOC(_s, mt, mf)
|
||||
#if defined(_KERNEL)
|
||||
#define BITSET_ALLOC(_s, mt, mf) malloc(__BITSET_SIZE((_s)), mt, (mf))
|
||||
#define BITSET_FREE(p, mt) free(p, mt)
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#define BITSET_FSET(n) __BITSET_FSET(n)
|
||||
#define BITSET_SIZE(_s) __BITSET_SIZE(_s)
|
||||
#define BITSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x)
|
||||
|
@ -36,6 +36,7 @@
|
||||
|
||||
#include <sys/_cpuset.h>
|
||||
|
||||
#include <sys/_bitset.h>
|
||||
#include <sys/bitset.h>
|
||||
|
||||
#define _NCPUBITS _BITSET_BITS
|
||||
@ -56,9 +57,10 @@
|
||||
#define CPU_SUBSET(p, c) __BIT_SUBSET(CPU_SETSIZE, p, c)
|
||||
#define CPU_OVERLAP(p, c) __BIT_OVERLAP(CPU_SETSIZE, p, c)
|
||||
#define CPU_CMP(p, c) __BIT_CMP(CPU_SETSIZE, p, c)
|
||||
#define CPU_OR(d, s) __BIT_OR(CPU_SETSIZE, d, s)
|
||||
#define CPU_AND(d, s) __BIT_AND(CPU_SETSIZE, d, s)
|
||||
#define CPU_ANDNOT(d, s) __BIT_ANDNOT(CPU_SETSIZE, d, s)
|
||||
#define CPU_OR(d, s1, s2) __BIT_OR2(CPU_SETSIZE, d, s1, s2)
|
||||
#define CPU_AND(d, s1, s2) __BIT_AND2(CPU_SETSIZE, d, s1, s2)
|
||||
#define CPU_ANDNOT(d, s1, s2) __BIT_ANDNOT2(CPU_SETSIZE, d, s1, s2)
|
||||
#define CPU_XOR(d, s1, s2) __BIT_XOR2(CPU_SETSIZE, d, s1, s2)
|
||||
#define CPU_CLR_ATOMIC(n, p) __BIT_CLR_ATOMIC(CPU_SETSIZE, n, p)
|
||||
#define CPU_SET_ATOMIC(n, p) __BIT_SET_ATOMIC(CPU_SETSIZE, n, p)
|
||||
#define CPU_SET_ATOMIC_ACQ(n, p) __BIT_SET_ATOMIC_ACQ(CPU_SETSIZE, n, p)
|
||||
@ -73,6 +75,20 @@
|
||||
#define CPUSET_FSET __BITSET_FSET(_NCPUWORDS)
|
||||
#define CPUSET_T_INITIALIZER(x) __BITSET_T_INITIALIZER(x)
|
||||
|
||||
#if !defined(_KERNEL)
|
||||
#define CPU_ALLOC_SIZE(_s) __BITSET_SIZE(_s)
|
||||
#define CPU_ALLOC(_s) __cpuset_alloc(_s)
|
||||
#define CPU_FREE(p) __cpuset_free(p)
|
||||
|
||||
#define CPU_ISSET_S(n, _s, p) __BIT_ISSET(_s, n, p)
|
||||
#define CPU_SET_S(n, _s, p) __BIT_SET(_s, n, p)
|
||||
#define CPU_ZERO_S(_s, p) __BIT_ZERO(_s, p)
|
||||
|
||||
#define CPU_OR_S(_s, d, s1, s2) __BIT_OR2(_s, d, s1, s2)
|
||||
#define CPU_AND_S(_s, d, s1, s2) __BIT_AND2(_s, d, s1, s2)
|
||||
#define CPU_XOR_S(_s, d, s1, s2) __BIT_XOR2(_s, d, s1, s2)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Valid cpulevel_t values.
|
||||
*/
|
||||
|
@ -60,7 +60,7 @@
|
||||
* in the range 5 to 9.
|
||||
*/
|
||||
#undef __FreeBSD_version
|
||||
#define __FreeBSD_version 1300523 /* Master, propagated to newvers */
|
||||
#define __FreeBSD_version 1300524 /* Master, propagated to newvers */
|
||||
|
||||
/*
|
||||
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
|
||||
|
@ -471,7 +471,7 @@ cpu_reset(void)
|
||||
if (smp_started) {
|
||||
map = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &map);
|
||||
CPU_ANDNOT(&map, &stopped_cpus);
|
||||
CPU_ANDNOT(&map, &map, &stopped_cpus);
|
||||
if (!CPU_EMPTY(&map)) {
|
||||
printf("cpu_reset: Stopping other CPUs\n");
|
||||
stop_cpus(map);
|
||||
|
Loading…
Reference in New Issue
Block a user