Merge ^/head r327886 through r327930.

This commit is contained in:
Dimitry Andric 2018-01-13 17:52:55 +00:00
commit 72bfb31a82
117 changed files with 3596 additions and 1887 deletions

View File

@ -537,6 +537,7 @@ BSARGS= DESTDIR= \
-DNO_CPU_CFLAGS MK_WARNS=no MK_CTF=no \
MK_CLANG_EXTRAS=no MK_CLANG_FULL=no \
MK_LLDB=no MK_TESTS=no \
MK_LLD=${MK_LLD_BOOTSTRAP} \
MK_INCLUDES=yes
BMAKE= \

View File

@ -0,0 +1,44 @@
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*/
/*
* Copyright 2018 Domagoj Stolfa <domagoj.stolfa@cl.cam.ac.uk>.
*
* This software was developed by BAE Systems, the University of Cambridge
* Computer Laboratory, and Memorial University under DARPA/AFRL contract
* FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
* (TC) research program.
*
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* ASSERTION:
* collect jailname at every fbt probe and at every firing of a
* high-frequency profile probe
*/
fbt:::
{
@a[jailname] = count();
}
profile-4999hz
{
@a[jailname] = count();
}
tick-1sec
/n++ == 10/
{
exit(0);
}

View File

@ -0,0 +1,44 @@
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*/
/*
* Copyright 2018 Domagoj Stolfa <domagoj.stolfa@cl.cam.ac.uk>.
*
* This software was developed by BAE Systems, the University of Cambridge
* Computer Laboratory, and Memorial University under DARPA/AFRL contract
* FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent Computing
* (TC) research program.
*
*/
#pragma ident "%Z%%M% %I% %E% SMI"
/*
* ASSERTION:
* collect jid at every fbt probe and at every firing of a
* high-frequency profile probe
*/
fbt:::
{
@a[jid] = count();
}
profile-4999hz
{
@a[jid] = count();
}
tick-1sec
/n++ == 10/
{
exit(0);
}

View File

@ -313,6 +313,12 @@ static const dt_ident_t _dtrace_globals[] = {
DT_VERS_1_5, &dt_idops_func, "string(int, void *)" },
{ "ipl", DT_IDENT_SCALAR, 0, DIF_VAR_IPL, DT_ATTR_STABCMN, DT_VERS_1_0,
&dt_idops_type, "uint_t" },
#ifdef __FreeBSD__
{ "jailname", DT_IDENT_SCALAR, 0, DIF_VAR_JAILNAME,
DT_ATTR_STABCMN, DT_VERS_1_13, &dt_idops_type, "string" },
{ "jid", DT_IDENT_SCALAR, 0, DIF_VAR_JID, DT_ATTR_STABCMN, DT_VERS_1_13,
&dt_idops_type, "int" },
#endif
{ "json", DT_IDENT_FUNC, 0, DIF_SUBR_JSON, DT_ATTR_STABCMN, DT_VERS_1_11,
&dt_idops_func, "string(const char *, const char *)" },
{ "jstack", DT_IDENT_ACTFUNC, 0, DT_ACT_JSTACK, DT_ATTR_STABCMN, DT_VERS_1_0,
@ -528,10 +534,8 @@ static const dt_ident_t _dtrace_globals[] = {
{ "walltimestamp", DT_IDENT_SCALAR, 0, DIF_VAR_WALLTIMESTAMP,
DT_ATTR_STABCMN, DT_VERS_1_0,
&dt_idops_type, "int64_t" },
#ifdef illumos
{ "zonename", DT_IDENT_SCALAR, 0, DIF_VAR_ZONENAME,
DT_ATTR_STABCMN, DT_VERS_1_0, &dt_idops_type, "string" },
#endif
#ifndef illumos
{ "cpu", DT_IDENT_SCALAR, 0, DIF_VAR_CPU,

View File

@ -19,6 +19,8 @@ ${PACKAGE}FILES= \
tst.gid.d \
tst.hton.d \
tst.index.d \
tst.jailname.d \
tst.jid.d \
tst.msgdsize.d \
tst.msgsize.d \
tst.null.d \

View File

@ -1393,16 +1393,7 @@ uw_advance_context (struct _Unwind_Context *context, _Unwind_FrameState *fs)
static inline void
init_dwarf_reg_size_table (void)
{
/*
* ARM64TODO: http://llvm.org/pr22997
* llvm 3.6 doesn't support __builtin_init_dwarf_reg_size_table on AArch64.
*/
#ifdef __aarch64__
printf("Unimplemented: init_dwarf_reg_size_table\n");
abort();
#else
__builtin_init_dwarf_reg_size_table (dwarf_reg_size_table);
#endif
}
static void

View File

@ -398,6 +398,8 @@ FBSD_1.5 {
mknodat;
stat;
statfs;
cpuset_getdomain;
cpuset_setdomain;
};
FBSDprivate_1.0 {
@ -1022,4 +1024,8 @@ FBSDprivate_1.0 {
gssd_syscall;
__libc_interposing_slot;
__libc_sigwait;
_cpuset_getdomain;
__sys_cpuset_getdomain;
_cpuset_setdomain;
__sys_cpuset_setdomain;
};

View File

@ -55,6 +55,8 @@ static struct nlist namelist[] = {
{ .n_name = "_mp_maxid" },
#define X_ALL_CPUS 2
{ .n_name = "_all_cpus" },
#define X_VM_NDOMAINS 3
{ .n_name = "_vm_ndomains" },
{ .n_name = "" },
};
@ -297,11 +299,12 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
{
LIST_HEAD(, uma_keg) uma_kegs;
struct memory_type *mtp;
struct uma_zone_domain uzd;
struct uma_bucket *ubp, ub;
struct uma_cache *ucp, *ucp_array;
struct uma_zone *uzp, uz;
struct uma_keg *kzp, kz;
int hint_dontsearch, i, mp_maxid, ret;
int hint_dontsearch, i, mp_maxid, ndomains, ret;
char name[MEMTYPE_MAXNAME];
cpuset_t all_cpus;
long cpusetsize;
@ -323,6 +326,12 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
list->mtl_error = ret;
return (-1);
}
ret = kread_symbol(kvm, X_VM_NDOMAINS, &ndomains,
sizeof(ndomains), 0);
if (ret != 0) {
list->mtl_error = ret;
return (-1);
}
ret = kread_symbol(kvm, X_UMA_KEGS, &uma_kegs, sizeof(uma_kegs), 0);
if (ret != 0) {
list->mtl_error = ret;
@ -447,10 +456,17 @@ memstat_kvm_uma(struct memory_type_list *list, void *kvm_handle)
kz.uk_ipers;
mtp->mt_byteslimit = mtp->mt_countlimit * mtp->mt_size;
mtp->mt_count = mtp->mt_numallocs - mtp->mt_numfrees;
for (ubp = LIST_FIRST(&uz.uz_buckets); ubp !=
NULL; ubp = LIST_NEXT(&ub, ub_link)) {
ret = kread(kvm, ubp, &ub, sizeof(ub), 0);
mtp->mt_zonefree += ub.ub_cnt;
for (i = 0; i < ndomains; i++) {
ret = kread(kvm, &uz.uz_domain[i], &uzd,
sizeof(uzd), 0);
for (ubp =
LIST_FIRST(&uzd.uzd_buckets);
ubp != NULL;
ubp = LIST_NEXT(&ub, ub_link)) {
ret = kread(kvm, ubp, &ub,
sizeof(ub), 0);
mtp->mt_zonefree += ub.ub_cnt;
}
}
if (!((kz.uk_flags & UMA_ZONE_SECONDARY) &&
LIST_FIRST(&kz.uk_zones) != uzp)) {

View File

@ -3270,7 +3270,8 @@ pmc_init(void)
cpu_info.pm_npmc = op_cpu_info.pm_npmc;
cpu_info.pm_nclass = op_cpu_info.pm_nclass;
for (n = 0; n < cpu_info.pm_nclass; n++)
cpu_info.pm_classes[n] = op_cpu_info.pm_classes[n];
memcpy(&cpu_info.pm_classes[n], &op_cpu_info.pm_classes[n],
sizeof(cpu_info.pm_classes[n]));
pmc_class_table = malloc(PMC_CLASS_TABLE_SIZE *
sizeof(struct pmc_class_descr *));

View File

@ -1,6 +1,6 @@
.\" DO NOT EDIT-- this file is generated by tools/build/options/makeman.
.\" $FreeBSD$
.Dd January 11, 2018
.Dd January 12, 2018
.Dt SRC.CONF 5
.Os
.Sh NAME
@ -970,12 +970,12 @@ To be able to build the system, either Binutils or LLD bootstrap must be
enabled unless an alternate linker is provided via XLD.
.Pp
This is a default setting on
arm/arm, arm/armeb, arm/armv6, arm/armv7, mips/mipsel, mips/mips, mips/mips64el, mips/mips64, mips/mipsn32, mips/mipselhf, mips/mipshf, mips/mips64elhf, mips/mips64hf, powerpc/powerpc, powerpc/powerpc64, powerpc/powerpcspe, riscv/riscv64, riscv/riscv64sf and sparc64/sparc64.
arm/arm, arm/armeb, arm/armv6, arm/armv7, i386/i386, mips/mipsel, mips/mips, mips/mips64el, mips/mips64, mips/mipsn32, mips/mipselhf, mips/mipshf, mips/mips64elhf, mips/mips64hf, powerpc/powerpc, powerpc/powerpc64, powerpc/powerpcspe, riscv/riscv64, riscv/riscv64sf and sparc64/sparc64.
.It Va WITH_LLD_BOOTSTRAP
Set to build the LLD linker during the bootstrap phase of the build.
.Pp
This is a default setting on
amd64/amd64, arm64/aarch64 and i386/i386.
amd64/amd64 and arm64/aarch64.
.It Va WITHOUT_LLD_IS_LD
Set to use GNU binutils ld as the system linker, instead of LLVM's LLD.
.Pp

View File

@ -253,7 +253,7 @@ __DEFAULT_NO_OPTIONS+=LLVM_LIBUNWIND
.endif
.if ${__T} == "aarch64"
__DEFAULT_YES_OPTIONS+=LLD_BOOTSTRAP LLD_IS_LD
.elif ${__T} == "amd64" || ${__T} == "i386"
.elif ${__T} == "amd64"
__DEFAULT_YES_OPTIONS+=LLD_BOOTSTRAP
__DEFAULT_NO_OPTIONS+=LLD_IS_LD
.else

View File

@ -44,14 +44,15 @@ __FBSDID("$FreeBSD$");
#include <machine/vmparam.h>
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
int wait)
{
vm_page_t m;
vm_paddr_t pa;
void *va;
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc(NULL, 0,
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);

View File

@ -71,24 +71,24 @@ static const struct allwinner_pins a83t_pins[] = {
{ "PC17", 2, 17, { "gpio_in", "gpio_out", "nand" } },
{ "PC18", 2, 18, { "gpio_in", "gpio_out", "nand" } },
{ "PD2", 3, 2, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD3", 3, 3, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD4", 3, 4, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD5", 3, 5, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD6", 3, 6, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD7", 3, 7, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD10", 3, 10, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD11", 3, 11, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD12", 3, 12, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD13", 3, 13, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD14", 3, 14, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD15", 3, 15, { "gpio_in", "gpio_out", "lcd", NULL, "emac" } },
{ "PD18", 3, 18, { "gpio_in", "gpio_out", "lcd", "lvds", "emac" } },
{ "PD19", 3, 19, { "gpio_in", "gpio_out", "lcd", "lvds", "emac" } },
{ "PD20", 3, 20, { "gpio_in", "gpio_out", "lcd", "lvds", "emac" } },
{ "PD21", 3, 21, { "gpio_in", "gpio_out", "lcd", "lvds", "emac" } },
{ "PD22", 3, 22, { "gpio_in", "gpio_out", "lcd", "lvds", "emac" } },
{ "PD23", 3, 23, { "gpio_in", "gpio_out", "lcd", "lvds", "emac" } },
{ "PD2", 3, 2, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD3", 3, 3, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD4", 3, 4, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD5", 3, 5, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD6", 3, 6, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD7", 3, 7, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD10", 3, 10, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD11", 3, 11, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD12", 3, 12, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD13", 3, 13, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD14", 3, 14, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD15", 3, 15, { "gpio_in", "gpio_out", "lcd", NULL, "gmac" } },
{ "PD18", 3, 18, { "gpio_in", "gpio_out", "lcd", "lvds", "gmac" } },
{ "PD19", 3, 19, { "gpio_in", "gpio_out", "lcd", "lvds", "gmac" } },
{ "PD20", 3, 20, { "gpio_in", "gpio_out", "lcd", "lvds", "gmac" } },
{ "PD21", 3, 21, { "gpio_in", "gpio_out", "lcd", "lvds", "gmac" } },
{ "PD22", 3, 22, { "gpio_in", "gpio_out", "lcd", "lvds", "gmac" } },
{ "PD23", 3, 23, { "gpio_in", "gpio_out", "lcd", "lvds", "gmac" } },
{ "PD24", 3, 24, { "gpio_in", "gpio_out", "lcd", "lvds" } },
{ "PD25", 3, 25, { "gpio_in", "gpio_out", "lcd", "lvds" } },
{ "PD26", 3, 26, { "gpio_in", "gpio_out", "lcd", "lvds" } },

View File

@ -499,6 +499,13 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{

View File

@ -562,6 +562,13 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{

View File

@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/ptrace.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <machine/machdep.h>

View File

@ -223,3 +223,9 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
return (tc->impl->tag_destroy(dmat));
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}

View File

@ -42,14 +42,15 @@ __FBSDID("$FreeBSD$");
#include <machine/vmparam.h>
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
int wait)
{
vm_page_t m;
vm_paddr_t pa;
void *va;
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc(NULL, 0,
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);

View File

@ -3654,6 +3654,24 @@ dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
return (dtrace_dif_varstr(
(uintptr_t)curthread->t_procp->p_zone->zone_name,
state, mstate));
#elif defined(__FreeBSD__)
/*
* On FreeBSD, we introduce compatibility to zonename by falling through
* into jailname.
*/
case DIF_VAR_JAILNAME:
if (!dtrace_priv_kernel(state))
return (0);
return (dtrace_dif_varstr(
(uintptr_t)curthread->t_procp->p_ucred->cr_prison->pr_name,
state, mstate));
case DIF_VAR_JID:
if (!dtrace_priv_kernel(state))
return (0);
return ((uint64_t)curthread->t_procp->p_ucred->cr_prison->pr_id);
#else
return (0);
#endif

View File

@ -254,6 +254,8 @@ typedef enum dtrace_probespec {
#define DIF_VAR_GID 0x011f /* process group ID */
#define DIF_VAR_ERRNO 0x0120 /* thread errno */
#define DIF_VAR_EXECARGS 0x0121 /* process arguments */
#define DIF_VAR_JID 0x0122 /* process jail id */
#define DIF_VAR_JAILNAME 0x0123 /* process jail name */
#ifndef illumos
#define DIF_VAR_CPU 0x0200

View File

@ -3016,6 +3016,24 @@ freebsd32_cpuset_setaffinity(struct thread *td,
PAIR32TO64(id_t,uap->id), uap->cpusetsize, uap->mask));
}
int
freebsd32_cpuset_getdomain(struct thread *td,
struct freebsd32_cpuset_getdomain_args *uap)
{
return (kern_cpuset_getdomain(td, uap->level, uap->which,
PAIR32TO64(id_t,uap->id), uap->domainsetsize, uap->mask, uap->policy));
}
int
freebsd32_cpuset_setdomain(struct thread *td,
struct freebsd32_cpuset_setdomain_args *uap)
{
return (kern_cpuset_setdomain(td, uap->level, uap->which,
PAIR32TO64(id_t,uap->id), uap->domainsetsize, uap->mask, uap->policy));
}
int
freebsd32_nmount(struct thread *td,
struct freebsd32_nmount_args /* {

View File

@ -11,6 +11,7 @@
#include <sys/signal.h>
#include <sys/acl.h>
#include <sys/cpuset.h>
#include <sys/domainset.h>
#include <sys/_ffcounter.h>
#include <sys/_semaphore.h>
#include <sys/ucontext.h>
@ -693,6 +694,24 @@ struct freebsd32_kevent_args {
char nevents_l_[PADL_(int)]; int nevents; char nevents_r_[PADR_(int)];
char timeout_l_[PADL_(const struct timespec32 *)]; const struct timespec32 * timeout; char timeout_r_[PADR_(const struct timespec32 *)];
};
struct freebsd32_cpuset_getdomain_args {
char level_l_[PADL_(cpulevel_t)]; cpulevel_t level; char level_r_[PADR_(cpulevel_t)];
char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)];
char id1_l_[PADL_(uint32_t)]; uint32_t id1; char id1_r_[PADR_(uint32_t)];
char id2_l_[PADL_(uint32_t)]; uint32_t id2; char id2_r_[PADR_(uint32_t)];
char domainsetsize_l_[PADL_(size_t)]; size_t domainsetsize; char domainsetsize_r_[PADR_(size_t)];
char mask_l_[PADL_(domainset_t *)]; domainset_t * mask; char mask_r_[PADR_(domainset_t *)];
char policy_l_[PADL_(int *)]; int * policy; char policy_r_[PADR_(int *)];
};
struct freebsd32_cpuset_setdomain_args {
char level_l_[PADL_(cpulevel_t)]; cpulevel_t level; char level_r_[PADR_(cpulevel_t)];
char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)];
char id1_l_[PADL_(uint32_t)]; uint32_t id1; char id1_r_[PADR_(uint32_t)];
char id2_l_[PADL_(uint32_t)]; uint32_t id2; char id2_r_[PADR_(uint32_t)];
char domainsetsize_l_[PADL_(size_t)]; size_t domainsetsize; char domainsetsize_r_[PADR_(size_t)];
char mask_l_[PADL_(domainset_t *)]; domainset_t * mask; char mask_r_[PADR_(domainset_t *)];
char policy_l_[PADL_(int)]; int policy; char policy_r_[PADR_(int)];
};
#if !defined(PAD64_REQUIRED) && (defined(__powerpc__) || defined(__mips__))
#define PAD64_REQUIRED
#endif
@ -823,6 +842,8 @@ int freebsd32_fstatat(struct thread *, struct freebsd32_fstatat_args *);
int freebsd32_fhstat(struct thread *, struct freebsd32_fhstat_args *);
int freebsd32_getdirentries(struct thread *, struct freebsd32_getdirentries_args *);
int freebsd32_kevent(struct thread *, struct freebsd32_kevent_args *);
int freebsd32_cpuset_getdomain(struct thread *, struct freebsd32_cpuset_getdomain_args *);
int freebsd32_cpuset_setdomain(struct thread *, struct freebsd32_cpuset_setdomain_args *);
#ifdef COMPAT_43
@ -1370,6 +1391,8 @@ int freebsd11_freebsd32_mknodat(struct thread *, struct freebsd11_freebsd32_mkno
#define FREEBSD32_SYS_AUE_freebsd32_fhstat AUE_FHSTAT
#define FREEBSD32_SYS_AUE_freebsd32_getdirentries AUE_GETDIRENTRIES
#define FREEBSD32_SYS_AUE_freebsd32_kevent AUE_KEVENT
#define FREEBSD32_SYS_AUE_freebsd32_cpuset_getdomain AUE_NULL
#define FREEBSD32_SYS_AUE_freebsd32_cpuset_setdomain AUE_NULL
#undef PAD_
#undef PADL_

View File

@ -455,8 +455,6 @@
#define FREEBSD32_SYS_freebsd32_ppoll 545
#define FREEBSD32_SYS_freebsd32_futimens 546
#define FREEBSD32_SYS_freebsd32_utimensat 547
#define FREEBSD32_SYS_numa_getaffinity 548
#define FREEBSD32_SYS_numa_setaffinity 549
#define FREEBSD32_SYS_fdatasync 550
#define FREEBSD32_SYS_freebsd32_fstat 551
#define FREEBSD32_SYS_freebsd32_fstatat 552
@ -468,4 +466,6 @@
#define FREEBSD32_SYS_fhstatfs 558
#define FREEBSD32_SYS_mknodat 559
#define FREEBSD32_SYS_freebsd32_kevent 560
#define FREEBSD32_SYS_MAXSYSCALL 561
#define FREEBSD32_SYS_freebsd32_cpuset_getdomain 561
#define FREEBSD32_SYS_freebsd32_cpuset_setdomain 562
#define FREEBSD32_SYS_MAXSYSCALL 563

View File

@ -580,8 +580,8 @@ const char *freebsd32_syscallnames[] = {
"freebsd32_ppoll", /* 545 = freebsd32_ppoll */
"freebsd32_futimens", /* 546 = freebsd32_futimens */
"freebsd32_utimensat", /* 547 = freebsd32_utimensat */
"numa_getaffinity", /* 548 = numa_getaffinity */
"numa_setaffinity", /* 549 = numa_setaffinity */
"#548", /* 548 = numa_getaffinity */
"#549", /* 549 = numa_setaffinity */
"fdatasync", /* 550 = fdatasync */
"freebsd32_fstat", /* 551 = freebsd32_fstat */
"freebsd32_fstatat", /* 552 = freebsd32_fstatat */
@ -593,4 +593,6 @@ const char *freebsd32_syscallnames[] = {
"fhstatfs", /* 558 = fhstatfs */
"mknodat", /* 559 = mknodat */
"freebsd32_kevent", /* 560 = freebsd32_kevent */
"freebsd32_cpuset_getdomain", /* 561 = freebsd32_cpuset_getdomain */
"freebsd32_cpuset_setdomain", /* 562 = freebsd32_cpuset_setdomain */
};

View File

@ -629,8 +629,8 @@ struct sysent freebsd32_sysent[] = {
{ AS(freebsd32_ppoll_args), (sy_call_t *)freebsd32_ppoll, AUE_POLL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 545 = freebsd32_ppoll */
{ AS(freebsd32_futimens_args), (sy_call_t *)freebsd32_futimens, AUE_FUTIMES, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 546 = freebsd32_futimens */
{ AS(freebsd32_utimensat_args), (sy_call_t *)freebsd32_utimensat, AUE_FUTIMESAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 547 = freebsd32_utimensat */
{ AS(numa_getaffinity_args), (sy_call_t *)sys_numa_getaffinity, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 548 = numa_getaffinity */
{ AS(numa_setaffinity_args), (sy_call_t *)sys_numa_setaffinity, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 549 = numa_setaffinity */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 548 = numa_getaffinity */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 549 = numa_setaffinity */
{ AS(fdatasync_args), (sy_call_t *)sys_fdatasync, AUE_FSYNC, NULL, 0, 0, 0, SY_THR_STATIC }, /* 550 = fdatasync */
{ AS(freebsd32_fstat_args), (sy_call_t *)freebsd32_fstat, AUE_FSTAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 551 = freebsd32_fstat */
{ AS(freebsd32_fstatat_args), (sy_call_t *)freebsd32_fstatat, AUE_FSTATAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 552 = freebsd32_fstatat */
@ -642,4 +642,6 @@ struct sysent freebsd32_sysent[] = {
{ AS(fhstatfs_args), (sy_call_t *)sys_fhstatfs, AUE_FHSTATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 558 = fhstatfs */
{ AS(mknodat_args), (sy_call_t *)sys_mknodat, AUE_MKNODAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 559 = mknodat */
{ AS(freebsd32_kevent_args), (sy_call_t *)freebsd32_kevent, AUE_KEVENT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 560 = freebsd32_kevent */
{ AS(freebsd32_cpuset_getdomain_args), (sy_call_t *)freebsd32_cpuset_getdomain, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 561 = freebsd32_cpuset_getdomain */
{ AS(freebsd32_cpuset_setdomain_args), (sy_call_t *)freebsd32_cpuset_setdomain, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 562 = freebsd32_cpuset_setdomain */
};

View File

@ -3150,24 +3150,6 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 4;
break;
}
/* numa_getaffinity */
case 548: {
struct numa_getaffinity_args *p = params;
iarg[0] = p->which; /* cpuwhich_t */
iarg[1] = p->id; /* id_t */
uarg[2] = (intptr_t) p->policy; /* struct vm_domain_policy * */
*n_args = 3;
break;
}
/* numa_setaffinity */
case 549: {
struct numa_setaffinity_args *p = params;
iarg[0] = p->which; /* cpuwhich_t */
iarg[1] = p->id; /* id_t */
uarg[2] = (intptr_t) p->policy; /* const struct vm_domain_policy * */
*n_args = 3;
break;
}
/* fdatasync */
case 550: {
struct fdatasync_args *p = params;
@ -3266,6 +3248,32 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 6;
break;
}
/* freebsd32_cpuset_getdomain */
case 561: {
struct freebsd32_cpuset_getdomain_args *p = params;
iarg[0] = p->level; /* cpulevel_t */
iarg[1] = p->which; /* cpuwhich_t */
uarg[2] = p->id1; /* uint32_t */
uarg[3] = p->id2; /* uint32_t */
uarg[4] = p->domainsetsize; /* size_t */
uarg[5] = (intptr_t) p->mask; /* domainset_t * */
uarg[6] = (intptr_t) p->policy; /* int * */
*n_args = 7;
break;
}
/* freebsd32_cpuset_setdomain */
case 562: {
struct freebsd32_cpuset_setdomain_args *p = params;
iarg[0] = p->level; /* cpulevel_t */
iarg[1] = p->which; /* cpuwhich_t */
uarg[2] = p->id1; /* uint32_t */
uarg[3] = p->id2; /* uint32_t */
uarg[4] = p->domainsetsize; /* size_t */
uarg[5] = (intptr_t) p->mask; /* domainset_t * */
iarg[6] = p->policy; /* int */
*n_args = 7;
break;
}
default:
*n_args = 0;
break;
@ -8563,38 +8571,6 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
/* numa_getaffinity */
case 548:
switch(ndx) {
case 0:
p = "cpuwhich_t";
break;
case 1:
p = "id_t";
break;
case 2:
p = "userland struct vm_domain_policy *";
break;
default:
break;
};
break;
/* numa_setaffinity */
case 549:
switch(ndx) {
case 0:
p = "cpuwhich_t";
break;
case 1:
p = "id_t";
break;
case 2:
p = "userland const struct vm_domain_policy *";
break;
default:
break;
};
break;
/* fdatasync */
case 550:
switch(ndx) {
@ -8768,6 +8744,62 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
/* freebsd32_cpuset_getdomain */
case 561:
switch(ndx) {
case 0:
p = "cpulevel_t";
break;
case 1:
p = "cpuwhich_t";
break;
case 2:
p = "uint32_t";
break;
case 3:
p = "uint32_t";
break;
case 4:
p = "size_t";
break;
case 5:
p = "userland domainset_t *";
break;
case 6:
p = "userland int *";
break;
default:
break;
};
break;
/* freebsd32_cpuset_setdomain */
case 562:
switch(ndx) {
case 0:
p = "cpulevel_t";
break;
case 1:
p = "cpuwhich_t";
break;
case 2:
p = "uint32_t";
break;
case 3:
p = "uint32_t";
break;
case 4:
p = "size_t";
break;
case 5:
p = "userland domainset_t *";
break;
case 6:
p = "int";
break;
default:
break;
};
break;
default:
break;
};
@ -10554,16 +10586,6 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* numa_getaffinity */
case 548:
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* numa_setaffinity */
case 549:
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* fdatasync */
case 550:
if (ndx == 0 || ndx == 1)
@ -10619,6 +10641,16 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* freebsd32_cpuset_getdomain */
case 561:
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* freebsd32_cpuset_setdomain */
case 562:
if (ndx == 0 || ndx == 1)
p = "int";
break;
default:
break;
};

View File

@ -1086,12 +1086,8 @@
547 AUE_FUTIMESAT STD { int freebsd32_utimensat(int fd, \
char *path, \
struct timespec *times, int flag); }
548 AUE_NULL NOPROTO { int numa_getaffinity(cpuwhich_t which, \
id_t id, \
struct vm_domain_policy *policy); }
549 AUE_NULL NOPROTO { int numa_setaffinity(cpuwhich_t which, \
id_t id, \
const struct vm_domain_policy *policy); }
548 AUE_NULL UNIMPL numa_getaffinity
549 AUE_NULL UNIMPL numa_setaffinity
550 AUE_FSYNC NOPROTO { int fdatasync(int fd); }
551 AUE_FSTAT STD { int freebsd32_fstat(int fd, \
struct stat32 *ub); }
@ -1119,4 +1115,13 @@
struct kevent32 *eventlist, \
int nevents, \
const struct timespec32 *timeout); }
561 AUE_NULL STD { int freebsd32_cpuset_getdomain(cpulevel_t level, \
cpuwhich_t which, uint32_t id1, uint32_t id2, \
size_t domainsetsize, domainset_t *mask, \
int *policy); }
562 AUE_NULL STD { int freebsd32_cpuset_setdomain(cpulevel_t level, \
cpuwhich_t which, uint32_t id1, uint32_t id2, \
size_t domainsetsize, domainset_t *mask, \
int policy); }
; vim: syntax=off

View File

@ -3787,7 +3787,6 @@ kern/kern_module.c standard
kern/kern_mtxpool.c standard
kern/kern_mutex.c standard
kern/kern_ntptime.c standard
kern/kern_numa.c standard
kern/kern_osd.c standard
kern/kern_physio.c standard
kern/kern_pmc.c standard
@ -4837,7 +4836,7 @@ vm/swap_pager.c standard
vm/uma_core.c standard
vm/uma_dbg.c standard
vm/memguard.c optional DEBUG_MEMGUARD
vm/vm_domain.c standard
vm/vm_domainset.c standard
vm/vm_fault.c standard
vm/vm_glue.c standard
vm/vm_init.c standard

View File

@ -31,7 +31,6 @@ dev/adb/adb_if.m optional adb
dev/adb/adb_buttons.c optional adb
dev/agp/agp_apple.c optional agp powermac
dev/fb/fb.c optional sc
dev/fdt/fdt_powerpc.c optional fdt
# ofwbus depends on simplebus.
dev/fdt/simplebus.c optional aim | fdt
dev/hwpmc/hwpmc_e500.c optional hwpmc
@ -146,6 +145,7 @@ powerpc/mpc85xx/i2c.c optional iicbus fdt
powerpc/mpc85xx/isa.c optional mpc85xx isa
powerpc/mpc85xx/lbc.c optional mpc85xx
powerpc/mpc85xx/mpc85xx.c optional mpc85xx
powerpc/mpc85xx/mpc85xx_cache.c optional mpc85xx
powerpc/mpc85xx/mpc85xx_gpio.c optional mpc85xx gpio
powerpc/mpc85xx/platform_mpc85xx.c optional mpc85xx
powerpc/mpc85xx/pci_mpc85xx.c optional pci mpc85xx

View File

@ -2136,7 +2136,7 @@ ipf_p_ftp_eprt6(softf, fin, ip, nat, ftp, dlen)
SNPRINTF(s, left, "%x:%x", a >> 16, a & 0xffff);
left -= strlen(s);
s += strlen(s);
sprintf(s, "|%d|\r\n", port);
SNPRINTF(s, left, "|%d|\r\n", port);
#else
(void) sprintf(s, "EPRT %c2%c", delim, delim);
s += strlen(s);

View File

@ -103,7 +103,6 @@ buildnodes(addr, mask, nodes)
ipf_rdx_node_t nodes[2];
{
u_32_t maskbits;
u_32_t lastbits;
u_32_t lastmask;
u_32_t *last;
int masklen;
@ -117,7 +116,6 @@ buildnodes(addr, mask, nodes)
masklen = last - (u_32_t *)mask;
lastmask = *last;
}
lastbits = maskbits & 0x1f;
bzero(&nodes[0], sizeof(ipf_rdx_node_t) * 2);
nodes[0].maskbitcount = maskbits;

View File

@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kdb.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <machine/kdb.h>
#include <machine/pcb.h>

View File

@ -60,6 +60,7 @@ ACPI_MODULE_NAME("PCI_ACPI")
struct acpi_hpcib_softc {
device_t ap_dev;
ACPI_HANDLE ap_handle;
bus_dma_tag_t ap_dma_tag;
int ap_flags;
uint32_t ap_osc_ctl;
@ -108,6 +109,7 @@ static int acpi_pcib_acpi_release_resource(device_t dev,
#endif
static int acpi_pcib_request_feature(device_t pcib, device_t dev,
enum pci_feature feature);
static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child);
static device_method_t acpi_pcib_acpi_methods[] = {
/* Device interface */
@ -136,6 +138,7 @@ static device_method_t acpi_pcib_acpi_methods[] = {
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
DEVMETHOD(bus_get_cpus, acpi_pcib_get_cpus),
DEVMETHOD(bus_get_dma_tag, acpi_pcib_get_dma_tag),
/* pcib interface */
DEVMETHOD(pcib_maxslots, pcib_maxslots),
@ -366,6 +369,7 @@ acpi_pcib_acpi_attach(device_t dev)
rman_res_t start;
int rid;
#endif
int error, domain;
uint8_t busno;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@ -537,15 +541,33 @@ acpi_pcib_acpi_attach(device_t dev)
acpi_pcib_fetch_prt(dev, &sc->ap_prt);
error = bus_dma_tag_create(bus_get_dma_tag(dev), 1,
0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED,
BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->ap_dma_tag);
if (error != 0)
goto errout;
error = bus_get_domain(dev, &domain);
if (error == 0)
error = bus_dma_tag_set_domain(sc->ap_dma_tag, domain);
/* Don't fail to attach if the domain can't be queried or set. */
error = 0;
bus_generic_probe(dev);
if (device_add_child(dev, "pci", -1) == NULL) {
device_printf(device_get_parent(dev), "couldn't attach pci bus\n");
#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
pcib_host_res_free(dev, &sc->ap_host_res);
#endif
return (ENXIO);
bus_dma_tag_destroy(sc->ap_dma_tag);
sc->ap_dma_tag = NULL;
error = ENXIO;
goto errout;
}
return (bus_generic_attach(dev));
errout:
device_printf(device_get_parent(dev), "couldn't attach pci bus\n");
#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
pcib_host_res_free(dev, &sc->ap_host_res);
#endif
return (error);
}
/*
@ -753,3 +775,13 @@ acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature)
return (acpi_pcib_osc(sc, osc_ctl));
}
static bus_dma_tag_t
acpi_pcib_get_dma_tag(device_t bus, device_t child)
{
struct acpi_hpcib_softc *sc;
sc = device_get_softc(bus);
return (sc->ap_dma_tag);
}

View File

@ -899,8 +899,6 @@ typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct mbuf *m);
int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
struct l2t_entry *l2t);
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
struct c4iw_dev_ucontext *uctx);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
@ -986,7 +984,6 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
unsigned int *pbar2_qid, u64 *pbar2_pa);
extern struct cxgb4_client t4c_client;
extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
extern int c4iw_max_read_depth;
#if defined(__i386__) || defined(__amd64__)
#define L1_CACHE_BYTES 128

View File

@ -1,144 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2009-2010 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Semihalf under sponsorship from
* the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <machine/intr_machdep.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/ofw/openfirm.h>
#include "ofw_bus_if.h"
#include "fdt_common.h"
static void
fdt_fixup_busfreq(phandle_t root, uint32_t div)
{
phandle_t sb, cpus, child;
pcell_t freq;
/*
* Do a strict check so as to skip non-SOC nodes, which also claim
* simple-bus compatibility such as eLBC etc.
*/
if ((sb = fdt_find_compatible(root, "simple-bus", 1)) == 0)
return;
/*
* This fixup uses /cpus/ bus-frequency prop value to set simple-bus
* bus-frequency property.
*/
if ((cpus = OF_finddevice("/cpus")) == -1)
return;
if ((child = OF_child(cpus)) == 0)
return;
if (OF_getprop(child, "bus-frequency", (void *)&freq,
sizeof(freq)) <= 0)
return;
if (div == 0)
return;
freq /= div;
OF_setprop(sb, "bus-frequency", (void *)&freq, sizeof(freq));
}
static void
fdt_fixup_busfreq_mpc85xx(phandle_t root)
{
fdt_fixup_busfreq(root, 1);
}
static void
fdt_fixup_busfreq_dpaa(phandle_t root)
{
fdt_fixup_busfreq(root, 2);
}
static void
fdt_fixup_fman(phandle_t root)
{
phandle_t node;
pcell_t freq;
if ((node = fdt_find_compatible(root, "simple-bus", 1)) == 0)
return;
if (OF_getprop(node, "bus-frequency", (void *)&freq,
sizeof(freq)) <= 0)
return;
/*
* Set clock-frequency for FMan nodes (only on QorIQ DPAA targets).
* That frequency is equal to /soc node bus-frequency.
*/
for (node = OF_child(node); node != 0; node = OF_peer(node)) {
if (ofw_bus_node_is_compatible(node, "fsl,fman") == 0)
continue;
if (OF_setprop(node, "clock-frequency", (void *)&freq,
sizeof(freq)) == -1) {
/*
* XXX Shall we take some actions if no clock-frequency
* property was found?
*/
}
}
}
struct fdt_fixup_entry fdt_fixup_table[] = {
{ "fsl,MPC8572DS", &fdt_fixup_busfreq_mpc85xx },
{ "MPC8555CDS", &fdt_fixup_busfreq_mpc85xx },
{ "fsl,P2020", &fdt_fixup_busfreq_mpc85xx },
{ "fsl,P2041RDB", &fdt_fixup_busfreq_dpaa },
{ "fsl,P2041RDB", &fdt_fixup_fman },
{ "fsl,P3041DS", &fdt_fixup_busfreq_dpaa },
{ "fsl,P3041DS", &fdt_fixup_fman },
{ "fsl,P5020DS", &fdt_fixup_busfreq_dpaa },
{ "fsl,P5020DS", &fdt_fixup_fman },
{ "varisys,CYRUS", &fdt_fixup_busfreq_dpaa },
{ "varisys,CYRUS", &fdt_fixup_fman },
{ NULL, NULL }
};

View File

@ -41,6 +41,8 @@ __FBSDID("$FreeBSD$");
#include <dev/mmc/host/dwmmc_var.h>
#include "opt_mmccam.h"
static device_probe_t hisi_dwmmc_probe;
static device_attach_t hisi_dwmmc_attach;

View File

@ -1868,7 +1868,7 @@ mmc_discover_cards(struct mmc_softc *sc)
* units of 10 ms), defaulting to 500 ms.
*/
ivar->cmd6_time = 500 * 1000;
if (ivar->csd.spec_vers >= 6)
if (ivar->raw_ext_csd[EXT_CSD_REV] >= 6)
ivar->cmd6_time = 10 *
ivar->raw_ext_csd[EXT_CSD_GEN_CMD6_TIME];
/* Handle HC erase sector size. */

View File

@ -920,7 +920,7 @@ mmcsd_ioctl_cmd(struct mmcsd_part *part, struct mmc_ioc_cmd *mic, int fflag)
*/
if (cmd.opcode == MMC_SWITCH_FUNC && dp != NULL &&
(((uint8_t *)dp)[EXT_CSD_PART_CONFIG] &
EXT_CSD_PART_CONFIG_ACC_MASK) != sc->part_curr) {
EXT_CSD_PART_CONFIG_ACC_MASK) != part->type) {
err = EINVAL;
goto out;
}

View File

@ -430,7 +430,7 @@ ofw_fdt_package_to_path(ofw_t ofw, phandle_t package, char *buf, size_t len)
return (-1);
}
#if defined(FDT_MARVELL) || defined(__powerpc__)
#if defined(FDT_MARVELL)
static int
ofw_fdt_fixup(ofw_t ofw)
{
@ -477,7 +477,7 @@ ofw_fdt_fixup(ofw_t ofw)
static int
ofw_fdt_interpret(ofw_t ofw, const char *cmd, int nret, cell_t *retvals)
{
#if defined(FDT_MARVELL) || defined(__powerpc__)
#if defined(FDT_MARVELL)
int rv;
/*

View File

@ -273,7 +273,7 @@ sdhci_tuning_intmask(struct sdhci_slot *slot)
uint32_t intmask;
intmask = 0;
if (slot->opt & SDHCI_TUNING_SUPPORTED) {
if (slot->opt & SDHCI_TUNING_ENABLED) {
intmask |= SDHCI_INT_TUNEERR;
if (slot->retune_mode == SDHCI_RETUNE_MODE_2 ||
slot->retune_mode == SDHCI_RETUNE_MODE_3)
@ -1439,9 +1439,17 @@ sdhci_exec_tuning(struct sdhci_slot *slot, bool reset)
DELAY(1000);
}
/*
* Restore DMA usage and interrupts.
* Note that the interrupt aggregation code might have cleared
* SDHCI_INT_DMA_END and/or SDHCI_INT_RESPONSE in slot->intmask
* and SDHCI_SIGNAL_ENABLE respectively so ensure SDHCI_INT_ENABLE
* doesn't lose these.
*/
slot->opt = opt;
slot->intmask = intmask;
WR4(slot, SDHCI_INT_ENABLE, intmask);
WR4(slot, SDHCI_INT_ENABLE, intmask | SDHCI_INT_DMA_END |
SDHCI_INT_RESPONSE);
WR4(slot, SDHCI_SIGNAL_ENABLE, intmask);
if ((hostctrl2 & (SDHCI_CTRL2_EXEC_TUNING |

View File

@ -97,7 +97,7 @@ __FBSDID("$FreeBSD$");
enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf,
w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg,
w83627dhg_p, w83667hg_b, nct6775, nct6776, nct6779, nct6791,
nct6792, nct6102 };
nct6792, nct6793, nct6795, nct6102 };
struct wb_softc {
device_t dev;
@ -252,6 +252,16 @@ struct winbond_vendor_device_id {
.chip = nct6792,
.descr = "Nuvoton NCT6792",
},
{
.device_id = 0xd1,
.chip = nct6793,
.descr = "Nuvoton NCT6793",
},
{
.device_id = 0xd3,
.chip = nct6795,
.descr = "Nuvoton NCT6795",
},
};
static void
@ -810,6 +820,8 @@ wb_attach(device_t dev)
case nct6779:
case nct6791:
case nct6792:
case nct6793:
case nct6795:
case nct6102:
/*
* These chips have a fixed WDTO# output pin (W83627UHG),

View File

@ -338,8 +338,8 @@ static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static void pmap_pte_release(pt_entry_t *pte);
static int pmap_unuse_pt(pmap_t, vm_offset_t, struct spglist *);
#if defined(PAE) || defined(PAE_TABLES)
static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
int wait);
static void *pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain,
uint8_t *flags, int wait);
#endif
static void pmap_set_pg(void);
@ -697,12 +697,13 @@ pmap_page_init(vm_page_t m)
#if defined(PAE) || defined(PAE_TABLES)
static void *
pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
pmap_pdpt_allocf(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
int wait)
{
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait, 0x0ULL,
return ((void *)kmem_alloc_contig_domain(domain, bytes, wait, 0x0ULL,
0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
}
#endif

View File

@ -89,7 +89,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_domain.h>
#include <sys/copyright.h>
#include <ddb/ddb.h>
@ -497,10 +496,7 @@ proc0_init(void *dummy __unused)
td->td_flags = TDF_INMEM;
td->td_pflags = TDP_KTHREAD;
td->td_cpuset = cpuset_thread0();
vm_domain_policy_init(&td->td_vm_dom_policy);
vm_domain_policy_set(&td->td_vm_dom_policy, VM_POLICY_NONE, -1);
vm_domain_policy_init(&p->p_vm_dom_policy);
vm_domain_policy_set(&p->p_vm_dom_policy, VM_POLICY_NONE, -1);
td->td_domain.dr_policy = td->td_cpuset->cs_domain;
prison0_init();
p->p_peers = 0;
p->p_leader = p;

View File

@ -599,8 +599,8 @@ struct sysent sysent[] = {
{ AS(ppoll_args), (sy_call_t *)sys_ppoll, AUE_POLL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 545 = ppoll */
{ AS(futimens_args), (sy_call_t *)sys_futimens, AUE_FUTIMES, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 546 = futimens */
{ AS(utimensat_args), (sy_call_t *)sys_utimensat, AUE_FUTIMESAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 547 = utimensat */
{ AS(numa_getaffinity_args), (sy_call_t *)sys_numa_getaffinity, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 548 = numa_getaffinity */
{ AS(numa_setaffinity_args), (sy_call_t *)sys_numa_setaffinity, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 549 = numa_setaffinity */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 548 = numa_getaffinity */
{ 0, (sy_call_t *)nosys, AUE_NULL, NULL, 0, 0, 0, SY_THR_ABSENT }, /* 549 = numa_setaffinity */
{ AS(fdatasync_args), (sy_call_t *)sys_fdatasync, AUE_FSYNC, NULL, 0, 0, 0, SY_THR_STATIC }, /* 550 = fdatasync */
{ AS(fstat_args), (sy_call_t *)sys_fstat, AUE_FSTAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 551 = fstat */
{ AS(fstatat_args), (sy_call_t *)sys_fstatat, AUE_FSTATAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 552 = fstatat */
@ -612,4 +612,6 @@ struct sysent sysent[] = {
{ AS(fhstatfs_args), (sy_call_t *)sys_fhstatfs, AUE_FHSTATFS, NULL, 0, 0, 0, SY_THR_STATIC }, /* 558 = fhstatfs */
{ AS(mknodat_args), (sy_call_t *)sys_mknodat, AUE_MKNODAT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 559 = mknodat */
{ AS(kevent_args), (sy_call_t *)sys_kevent, AUE_KEVENT, NULL, 0, 0, SYF_CAPENABLED, SY_THR_STATIC }, /* 560 = kevent */
{ AS(cpuset_getdomain_args), (sy_call_t *)sys_cpuset_getdomain, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 561 = cpuset_getdomain */
{ AS(cpuset_setdomain_args), (sy_call_t *)sys_cpuset_setdomain, AUE_NULL, NULL, 0, 0, 0, SY_THR_STATIC }, /* 562 = cpuset_setdomain */
};

File diff suppressed because it is too large Load Diff

View File

@ -88,7 +88,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/uma.h>
#include <vm/vm_domain.h>
#ifdef KDTRACE_HOOKS
#include <sys/dtrace_bsd.h>
@ -931,10 +930,6 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options)
#ifdef MAC
mac_proc_destroy(p);
#endif
/*
* Free any domain policy that's still hiding around.
*/
vm_domain_policy_cleanup(&p->p_vm_dom_policy);
KASSERT(FIRST_THREAD_IN_PROC(p),
("proc_reap: no residual thread!"));

View File

@ -83,7 +83,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_map.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
#include <vm/vm_domain.h>
#ifdef KDTRACE_HOOKS
#include <sys/dtrace_bsd.h>
@ -512,14 +511,6 @@ do_fork(struct thread *td, struct fork_req *fr, struct proc *p2, struct thread *
if (p1->p_flag & P_PROFIL)
startprofclock(p2);
/*
* Whilst the proc lock is held, copy the VM domain data out
* using the VM domain method.
*/
vm_domain_policy_init(&p2->p_vm_dom_policy);
vm_domain_policy_localcopy(&p2->p_vm_dom_policy,
&p1->p_vm_dom_policy);
if (fr->fr_flags & RFSIGSHARE) {
p2->p_sigacts = sigacts_hold(p1->p_sigacts);
} else {

View File

@ -96,6 +96,11 @@ __FBSDID("$FreeBSD$");
dtrace_malloc_probe_func_t dtrace_malloc_probe;
#endif
#if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
#define MALLOC_DEBUG 1
#endif
/*
* When realloc() is called, if the new size is sufficiently smaller than
* the old size, realloc() will allocate a new, smaller block to avoid
@ -417,6 +422,20 @@ contigmalloc(unsigned long size, struct malloc_type *type, int flags,
return (ret);
}
void *
contigmalloc_domain(unsigned long size, struct malloc_type *type,
int domain, int flags, vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, vm_paddr_t boundary)
{
void *ret;
ret = (void *)kmem_alloc_contig_domain(domain, size, flags, low, high,
alignment, boundary, VM_MEMATTR_DEFAULT);
if (ret != NULL)
malloc_type_allocated(type, round_page(size));
return (ret);
}
/*
* contigfree:
*
@ -432,26 +451,14 @@ contigfree(void *addr, unsigned long size, struct malloc_type *type)
malloc_type_freed(type, round_page(size));
}
/*
* malloc:
*
* Allocate a block of memory.
*
* If M_NOWAIT is set, this routine will not block and return NULL if
* the allocation fails.
*/
void *
malloc(unsigned long size, struct malloc_type *mtp, int flags)
#ifdef MALLOC_DEBUG
static int
malloc_dbg(caddr_t *vap, unsigned long *sizep, struct malloc_type *mtp,
int flags)
{
int indx;
struct malloc_type_internal *mtip;
caddr_t va;
uma_zone_t zone;
#if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
unsigned long osize = size;
#endif
#ifdef INVARIANTS
int indx;
KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
/*
* Check that exactly one of M_WAITOK or M_NOWAIT is specified.
@ -474,7 +481,8 @@ malloc(unsigned long size, struct malloc_type *mtp, int flags)
if ((malloc_nowait_count % malloc_failure_rate) == 0) {
atomic_add_int(&malloc_failure_count, 1);
t_malloc_fail = time_uptime;
return (NULL);
*vap = NULL;
return (EJUSTRETURN);
}
}
#endif
@ -485,16 +493,44 @@ malloc(unsigned long size, struct malloc_type *mtp, int flags)
("malloc: called with spinlock or critical section held"));
#ifdef DEBUG_MEMGUARD
if (memguard_cmp_mtp(mtp, size)) {
va = memguard_alloc(size, flags);
if (va != NULL)
return (va);
if (memguard_cmp_mtp(mtp, *sizep)) {
*vap = memguard_alloc(*sizep, flags);
if (*vap != NULL)
return (EJUSTRETURN);
/* This is unfortunate but should not be fatal. */
}
#endif
#ifdef DEBUG_REDZONE
size = redzone_size_ntor(size);
*sizep = redzone_size_ntor(*sizep);
#endif
return (0);
}
#endif
/*
* malloc:
*
* Allocate a block of memory.
*
* If M_NOWAIT is set, this routine will not block and return NULL if
* the allocation fails.
*/
void *
malloc(unsigned long size, struct malloc_type *mtp, int flags)
{
int indx;
struct malloc_type_internal *mtip;
caddr_t va;
uma_zone_t zone;
#if defined(DEBUG_REDZONE)
unsigned long osize = size;
#endif
#ifdef MALLOC_DEBUG
if (malloc_dbg(&va, &size, mtp, flags) != 0)
return (va);
#endif
if (size <= kmem_zmax) {
@ -523,11 +559,55 @@ malloc(unsigned long size, struct malloc_type *mtp, int flags)
KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
else if (va == NULL)
t_malloc_fail = time_uptime;
#ifdef DIAGNOSTIC
if (va != NULL && !(flags & M_ZERO)) {
memset(va, 0x70, osize);
}
#ifdef DEBUG_REDZONE
if (va != NULL)
va = redzone_setup(va, osize);
#endif
return ((void *) va);
}
void *
malloc_domain(unsigned long size, struct malloc_type *mtp, int domain,
int flags)
{
int indx;
struct malloc_type_internal *mtip;
caddr_t va;
uma_zone_t zone;
#if defined(DEBUG_REDZONE)
unsigned long osize = size;
#endif
#ifdef MALLOC_DEBUG
if (malloc_dbg(&va, &size, mtp, flags) != 0)
return (va);
#endif
if (size <= kmem_zmax) {
mtip = mtp->ks_handle;
if (size & KMEM_ZMASK)
size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
indx = kmemsize[size >> KMEM_ZSHIFT];
KASSERT(mtip->mti_zone < numzones,
("mti_zone %u out of range %d",
mtip->mti_zone, numzones));
zone = kmemzones[indx].kz_zone[mtip->mti_zone];
#ifdef MALLOC_PROFILE
krequests[size >> KMEM_ZSHIFT]++;
#endif
va = uma_zalloc_domain(zone, NULL, domain, flags);
if (va != NULL)
size = zone->uz_size;
malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
} else {
size = roundup(size, PAGE_SIZE);
zone = NULL;
va = uma_large_malloc_domain(size, domain, flags);
malloc_type_allocated(mtp, va == NULL ? 0 : size);
}
if (flags & M_WAITOK)
KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
else if (va == NULL)
t_malloc_fail = time_uptime;
#ifdef DEBUG_REDZONE
if (va != NULL)
va = redzone_setup(va, osize);
@ -545,6 +625,58 @@ mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
return (malloc(size * nmemb, type, flags));
}
#ifdef INVARIANTS
static void
free_save_type(void *addr, struct malloc_type *mtp, u_long size)
{
struct malloc_type **mtpp = addr;
/*
* Cache a pointer to the malloc_type that most recently freed
* this memory here. This way we know who is most likely to
* have stepped on it later.
*
* This code assumes that size is a multiple of 8 bytes for
* 64 bit machines
*/
mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
mtpp += (size - sizeof(struct malloc_type *)) /
sizeof(struct malloc_type *);
*mtpp = mtp;
}
#endif
#ifdef MALLOC_DEBUG
static int
free_dbg(void **addrp, struct malloc_type *mtp)
{
void *addr;
addr = *addrp;
KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
("free: called with spinlock or critical section held"));
/* free(NULL, ...) does nothing */
if (addr == NULL)
return (EJUSTRETURN);
#ifdef DEBUG_MEMGUARD
if (is_memguard_addr(addr)) {
memguard_free(addr);
return (EJUSTRETURN);
}
#endif
#ifdef DEBUG_REDZONE
redzone_check(addr);
*addrp = redzone_addr_ntor(addr);
#endif
return (0);
}
#endif
/*
* free:
*
@ -558,51 +690,23 @@ free(void *addr, struct malloc_type *mtp)
uma_slab_t slab;
u_long size;
KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
("free: called with spinlock or critical section held"));
#ifdef MALLOC_DEBUG
if (free_dbg(&addr, mtp) != 0)
return;
#endif
/* free(NULL, ...) does nothing */
if (addr == NULL)
return;
#ifdef DEBUG_MEMGUARD
if (is_memguard_addr(addr)) {
memguard_free(addr);
return;
}
#endif
#ifdef DEBUG_REDZONE
redzone_check(addr);
addr = redzone_addr_ntor(addr);
#endif
slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
if (slab == NULL)
panic("free: address %p(%p) has not been allocated.\n",
addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
#ifdef INVARIANTS
struct malloc_type **mtpp = addr;
#endif
size = slab->us_keg->uk_size;
#ifdef INVARIANTS
/*
* Cache a pointer to the malloc_type that most recently freed
* this memory here. This way we know who is most likely to
* have stepped on it later.
*
* This code assumes that size is a multiple of 8 bytes for
* 64 bit machines
*/
mtpp = (struct malloc_type **)
((unsigned long)mtpp & ~UMA_ALIGN_PTR);
mtpp += (size - sizeof(struct malloc_type *)) /
sizeof(struct malloc_type *);
*mtpp = mtp;
free_save_type(addr, mtp, size);
#endif
uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
} else {
@ -612,6 +716,40 @@ free(void *addr, struct malloc_type *mtp)
malloc_type_freed(mtp, size);
}
void
free_domain(void *addr, struct malloc_type *mtp)
{
uma_slab_t slab;
u_long size;
#ifdef MALLOC_DEBUG
if (free_dbg(&addr, mtp) != 0)
return;
#endif
/* free(NULL, ...) does nothing */
if (addr == NULL)
return;
slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
if (slab == NULL)
panic("free_domain: address %p(%p) has not been allocated.\n",
addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
size = slab->us_keg->uk_size;
#ifdef INVARIANTS
free_save_type(addr, mtp, size);
#endif
uma_zfree_domain(LIST_FIRST(&slab->us_keg->uk_zones),
addr, slab);
} else {
size = slab->us_size;
uma_large_free(slab);
}
malloc_type_freed(mtp, size);
}
/*
* realloc: change the size of a memory block
*/

View File

@ -283,7 +283,7 @@ static void mb_dtor_pack(void *, int, void *);
static int mb_zinit_pack(void *, int, int);
static void mb_zfini_pack(void *, int);
static void mb_reclaim(uma_zone_t, int);
static void *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, uint8_t *, int);
static void *mbuf_jumbo_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
/* Ensure that MSIZE is a power of 2. */
CTASSERT((((MSIZE - 1) ^ MSIZE) + 1) >> 1 == MSIZE);
@ -386,12 +386,13 @@ SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbuf_init, NULL);
* pages.
*/
static void *
mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags, int wait)
mbuf_jumbo_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
int wait)
{
/* Inform UMA that this allocator uses kernel_map/object. */
*flags = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_contig(kernel_arena, bytes, wait,
return ((void *)kmem_alloc_contig_domain(domain, bytes, wait,
(vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT));
}

View File

@ -1,169 +0,0 @@
/*-
* Copyright (c) 2015, Adrian Chadd <adrian@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysproto.h>
#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/refcount.h>
#include <sys/sched.h>
#include <sys/smp.h>
#include <sys/syscallsubr.h>
#include <sys/cpuset.h>
#include <sys/sx.h>
#include <sys/queue.h>
#include <sys/libkern.h>
#include <sys/limits.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <vm/uma.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_param.h>
#include <vm/vm_domain.h>
int
sys_numa_setaffinity(struct thread *td, struct numa_setaffinity_args *uap)
{
int error;
struct vm_domain_policy vp;
struct thread *ttd;
struct proc *p;
struct cpuset *set;
set = NULL;
p = NULL;
/*
* Copy in just the policy information into the policy
* struct. Userland only supplies vm_domain_policy_entry.
*/
error = copyin(uap->policy, &vp.p, sizeof(vp.p));
if (error)
goto out;
/*
* Ensure the seq number is zero - otherwise seq.h
* may get very confused.
*/
vp.seq = 0;
/*
* Validate policy.
*/
if (vm_domain_policy_validate(&vp) != 0) {
error = EINVAL;
goto out;
}
/*
* Go find the desired proc/tid for this operation.
*/
error = cpuset_which(uap->which, uap->id, &p,
&ttd, &set);
if (error)
goto out;
/* Only handle CPU_WHICH_TID and CPU_WHICH_PID */
/*
* XXX if cpuset_which is called with WHICH_CPUSET and NULL cpuset,
* it'll return ESRCH. We should just return EINVAL.
*/
switch (uap->which) {
case CPU_WHICH_TID:
vm_domain_policy_copy(&ttd->td_vm_dom_policy, &vp);
break;
case CPU_WHICH_PID:
vm_domain_policy_copy(&p->p_vm_dom_policy, &vp);
break;
default:
error = EINVAL;
break;
}
PROC_UNLOCK(p);
out:
if (set)
cpuset_rel(set);
return (error);
}
int
sys_numa_getaffinity(struct thread *td, struct numa_getaffinity_args *uap)
{
int error;
struct vm_domain_policy vp;
struct thread *ttd;
struct proc *p;
struct cpuset *set;
set = NULL;
p = NULL;
error = cpuset_which(uap->which, uap->id, &p,
&ttd, &set);
if (error)
goto out;
/* Only handle CPU_WHICH_TID and CPU_WHICH_PID */
/*
* XXX if cpuset_which is called with WHICH_CPUSET and NULL cpuset,
* it'll return ESRCH. We should just return EINVAL.
*/
switch (uap->which) {
case CPU_WHICH_TID:
vm_domain_policy_localcopy(&vp, &ttd->td_vm_dom_policy);
break;
case CPU_WHICH_PID:
vm_domain_policy_localcopy(&vp, &p->p_vm_dom_policy);
break;
default:
error = EINVAL;
break;
}
if (p)
PROC_UNLOCK(p);
/*
* Copy out only the vm_domain_policy_entry part.
*/
if (error == 0)
error = copyout(&vp.p, uap->policy, sizeof(vp.p));
out:
if (set)
cpuset_rel(set);
return (error);
}

View File

@ -771,9 +771,9 @@ __rw_runlock_hard(struct rwlock *rw, struct thread *td, uintptr_t v
turnstile_chain_lock(&rw->lock_object);
v = RW_READ_VALUE(rw);
retry_ts:
if (__predict_false(RW_READERS(v) > 1)) {
if (__rw_runlock_try(rw, td, &v)) {
turnstile_chain_unlock(&rw->lock_object);
continue;
break;
}
v &= (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER);

View File

@ -1193,7 +1193,7 @@ _sx_sunlock_try(struct sx *sx, uintptr_t *xp)
static void __noinline
_sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
{
int wakeup_swapper;
int wakeup_swapper = 0;
uintptr_t setx;
if (SCHEDULER_STOPPED())
@ -1213,6 +1213,9 @@ _sx_sunlock_hard(struct sx *sx, uintptr_t x LOCK_FILE_LINE_ARG_DEF)
for (;;) {
MPASS(x & SX_LOCK_EXCLUSIVE_WAITERS);
MPASS(!(x & SX_LOCK_SHARED_WAITERS));
if (_sx_sunlock_try(sx, &x))
break;
/*
* Wake up semantic here is quite simple:
* Just wake up all the exclusive waiters.

View File

@ -57,8 +57,6 @@ __FBSDID("$FreeBSD$");
#include <sys/umtx.h>
#include <sys/limits.h>
#include <vm/vm_domain.h>
#include <machine/frame.h>
#include <security/audit/audit.h>
@ -260,12 +258,6 @@ thread_create(struct thread *td, struct rtprio *rtp,
if (p->p_ptevents & PTRACE_LWP)
newtd->td_dbgflags |= TDB_BORN;
/*
* Copy the existing thread VM policy into the new thread.
*/
vm_domain_policy_localcopy(&newtd->td_vm_dom_policy,
&td->td_vm_dom_policy);
PROC_UNLOCK(p);
tidhash_add(newtd);

View File

@ -64,7 +64,6 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
#include <vm/vm_domain.h>
#include <sys/eventhandler.h>
/*
@ -78,13 +77,13 @@ __FBSDID("$FreeBSD$");
* structures.
*/
#ifdef __amd64__
_Static_assert(offsetof(struct thread, td_flags) == 0xf4,
_Static_assert(offsetof(struct thread, td_flags) == 0xfc,
"struct thread KBI td_flags");
_Static_assert(offsetof(struct thread, td_pflags) == 0xfc,
_Static_assert(offsetof(struct thread, td_pflags) == 0x104,
"struct thread KBI td_pflags");
_Static_assert(offsetof(struct thread, td_frame) == 0x460,
_Static_assert(offsetof(struct thread, td_frame) == 0x468,
"struct thread KBI td_frame");
_Static_assert(offsetof(struct thread, td_emuldata) == 0x508,
_Static_assert(offsetof(struct thread, td_emuldata) == 0x510,
"struct thread KBI td_emuldata");
_Static_assert(offsetof(struct proc, p_flag) == 0xb0,
"struct proc KBI p_flag");
@ -98,13 +97,13 @@ _Static_assert(offsetof(struct proc, p_emuldata) == 0x4b8,
"struct proc KBI p_emuldata");
#endif
#ifdef __i386__
_Static_assert(offsetof(struct thread, td_flags) == 0x9c,
_Static_assert(offsetof(struct thread, td_flags) == 0x98,
"struct thread KBI td_flags");
_Static_assert(offsetof(struct thread, td_pflags) == 0xa4,
_Static_assert(offsetof(struct thread, td_pflags) == 0xa0,
"struct thread KBI td_pflags");
_Static_assert(offsetof(struct thread, td_frame) == 0x2ec,
_Static_assert(offsetof(struct thread, td_frame) == 0x2e4,
"struct thread KBI td_frame");
_Static_assert(offsetof(struct thread, td_emuldata) == 0x338,
_Static_assert(offsetof(struct thread, td_emuldata) == 0x330,
"struct thread KBI td_emuldata");
_Static_assert(offsetof(struct proc, p_flag) == 0x68,
"struct proc KBI p_flag");
@ -413,7 +412,6 @@ thread_alloc(int pages)
return (NULL);
}
cpu_thread_alloc(td);
vm_domain_policy_init(&td->td_vm_dom_policy);
return (td);
}
@ -443,7 +441,6 @@ thread_free(struct thread *td)
cpu_thread_free(td);
if (td->td_kstack != 0)
vm_thread_dispose(td);
vm_domain_policy_cleanup(&td->td_vm_dom_policy);
callout_drain(&td->td_slpcallout);
uma_zfree(thread_zone, td);
}

View File

@ -139,6 +139,7 @@ sed -e '
printf "#include <sys/signal.h>\n" > sysarg
printf "#include <sys/acl.h>\n" > sysarg
printf "#include <sys/cpuset.h>\n" > sysarg
printf "#include <sys/domainset.h>\n" > sysarg
printf "#include <sys/_ffcounter.h>\n" > sysarg
printf "#include <sys/_semaphore.h>\n" > sysarg
printf "#include <sys/ucontext.h>\n" > sysarg

View File

@ -781,6 +781,7 @@ sched_fork_thread(struct thread *td, struct thread *childtd)
childtd->td_lastcpu = NOCPU;
childtd->td_lock = &sched_lock;
childtd->td_cpuset = cpuset_ref(td->td_cpuset);
childtd->td_domain.dr_policy = td->td_cpuset->cs_domain;
childtd->td_priority = childtd->td_base_pri;
ts = td_get_sched(childtd);
bzero(ts, sizeof(*ts));

View File

@ -2131,6 +2131,7 @@ sched_fork_thread(struct thread *td, struct thread *child)
child->td_lastcpu = NOCPU;
child->td_lock = TDQ_LOCKPTR(tdq);
child->td_cpuset = cpuset_ref(td->td_cpuset);
child->td_domain.dr_policy = td->td_cpuset->cs_domain;
ts2->ts_cpu = ts->ts_cpu;
ts2->ts_flags = 0;
/*

View File

@ -149,7 +149,7 @@ busdma_bufalloc_findzone(busdma_bufalloc_t ba, bus_size_t size)
}
void *
busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size,
busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size, int domain,
uint8_t *pflag, int wait)
{
#ifdef VM_MEMATTR_UNCACHEABLE
@ -157,7 +157,7 @@ busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size,
/* Inform UMA that this allocator uses kernel_arena/object. */
*pflag = UMA_SLAB_KERNEL;
return ((void *)kmem_alloc_attr(kernel_arena, size, wait, 0,
return ((void *)kmem_alloc_attr_domain(domain, size, wait, 0,
BUS_SPACE_MAXADDR, VM_MEMATTR_UNCACHEABLE));
#else

View File

@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <sys/kdb.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/pcpu.h>
#include <sys/proc.h>
#include <sys/sbuf.h>
@ -84,6 +85,7 @@ static int kdb_sysctl_enter(SYSCTL_HANDLER_ARGS);
static int kdb_sysctl_panic(SYSCTL_HANDLER_ARGS);
static int kdb_sysctl_trap(SYSCTL_HANDLER_ARGS);
static int kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS);
static int kdb_sysctl_stack_overflow(SYSCTL_HANDLER_ARGS);
static SYSCTL_NODE(_debug, OID_AUTO, kdb, CTLFLAG_RW, NULL, "KDB nodes");
@ -109,6 +111,10 @@ SYSCTL_PROC(_debug_kdb, OID_AUTO, trap_code,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE, NULL, 0,
kdb_sysctl_trap_code, "I", "set to cause a page fault via code access");
SYSCTL_PROC(_debug_kdb, OID_AUTO, stack_overflow,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE, NULL, 0,
kdb_sysctl_stack_overflow, "I", "set to cause a stack overflow");
SYSCTL_INT(_debug_kdb, OID_AUTO, break_to_debugger,
CTLFLAG_RWTUN | CTLFLAG_SECURE,
&kdb_break_to_debugger, 0, "Enable break to debugger");
@ -224,6 +230,36 @@ kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS)
return (0);
}
static void kdb_stack_overflow(volatile int *x) __noinline;
static void
kdb_stack_overflow(volatile int *x)
{
if (*x > 10000000)
return;
kdb_stack_overflow(x);
*x += PCPU_GET(cpuid) / 1000000;
}
static int
kdb_sysctl_stack_overflow(SYSCTL_HANDLER_ARGS)
{
int error, i;
volatile int x;
error = sysctl_wire_old_buffer(req, sizeof(int));
if (error == 0) {
i = 0;
error = sysctl_handle_int(oidp, &i, 0, req);
}
if (error != 0 || req->newptr == NULL)
return (error);
x = 0;
kdb_stack_overflow(&x);
return (0);
}
void
kdb_panic(const char *msg)
{

View File

@ -70,6 +70,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#define VMEM_OPTORDER 5
@ -186,6 +187,7 @@ static struct task vmem_periodic_wk;
static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
static uma_zone_t vmem_zone;
/* ---- misc */
#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
@ -255,11 +257,11 @@ bt_fill(vmem_t *vm, int flags)
VMEM_ASSERT_LOCKED(vm);
/*
* Only allow the kernel arena to dip into reserve tags. It is the
* vmem where new tags come from.
* Only allow the kernel arena and arenas derived from kernel arena to
* dip into reserve tags. They are where new tags come from.
*/
flags &= BT_FLAGS;
if (vm != kernel_arena)
if (vm != kernel_arena && vm->vm_arg != kernel_arena)
flags &= ~M_USE_RESERVE;
/*
@ -498,7 +500,7 @@ bt_insfree(vmem_t *vm, bt_t *bt)
* Import from the arena into the quantum cache in UMA.
*/
static int
qc_import(void *arg, void **store, int cnt, int flags)
qc_import(void *arg, void **store, int cnt, int domain, int flags)
{
qcache_t *qc;
vmem_addr_t addr;
@ -612,7 +614,8 @@ static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
* we are really out of KVA.
*/
static void *
vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
int wait)
{
vmem_addr_t addr;
@ -623,15 +626,15 @@ vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag, int wait)
* and memory are added in one atomic operation.
*/
mtx_lock(&vmem_bt_lock);
if (vmem_xalloc(kernel_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN,
VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT,
&addr) == 0) {
if (kmem_back(kernel_object, addr, bytes,
if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
VMEM_ADDR_MIN, VMEM_ADDR_MAX,
M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
if (kmem_back_domain(domain, kernel_object, addr, bytes,
M_NOWAIT | M_USE_RESERVE) == 0) {
mtx_unlock(&vmem_bt_lock);
return ((void *)addr);
}
vmem_xfree(kernel_arena, addr, bytes);
vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
mtx_unlock(&vmem_bt_lock);
/*
* Out of memory, not address space. This may not even be
@ -657,9 +660,12 @@ vmem_startup(void)
{
mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
vmem_zone = uma_zcreate("vmem",
sizeof(struct vmem), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_VM);
vmem_bt_zone = uma_zcreate("vmem btag",
sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_VM);
UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
#ifndef UMA_MD_SMALL_ALLOC
mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
@ -826,7 +832,7 @@ vmem_destroy1(vmem_t *vm)
VMEM_CONDVAR_DESTROY(vm);
VMEM_LOCK_DESTROY(vm);
free(vm, M_VMEM);
uma_zfree(vmem_zone, vm);
}
static int
@ -1058,7 +1064,7 @@ vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
vmem_t *vm;
vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT));
vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
if (vm == NULL)
return (NULL);
if (vmem_init(vm, name, base, size, quantum, qcache_max,

View File

@ -554,8 +554,8 @@ const char *syscallnames[] = {
"ppoll", /* 545 = ppoll */
"futimens", /* 546 = futimens */
"utimensat", /* 547 = utimensat */
"numa_getaffinity", /* 548 = numa_getaffinity */
"numa_setaffinity", /* 549 = numa_setaffinity */
"#548", /* 548 = numa_getaffinity */
"#549", /* 549 = numa_setaffinity */
"fdatasync", /* 550 = fdatasync */
"fstat", /* 551 = fstat */
"fstatat", /* 552 = fstatat */
@ -567,4 +567,6 @@ const char *syscallnames[] = {
"fhstatfs", /* 558 = fhstatfs */
"mknodat", /* 559 = mknodat */
"kevent", /* 560 = kevent */
"cpuset_getdomain", /* 561 = cpuset_getdomain */
"cpuset_setdomain", /* 562 = cpuset_setdomain */
};

View File

@ -997,12 +997,8 @@
547 AUE_FUTIMESAT STD { int utimensat(int fd, \
char *path, \
struct timespec *times, int flag); }
548 AUE_NULL STD { int numa_getaffinity(cpuwhich_t which, \
id_t id, \
struct vm_domain_policy_entry *policy); }
549 AUE_NULL STD { int numa_setaffinity(cpuwhich_t which, \
id_t id, const struct \
vm_domain_policy_entry *policy); }
548 AUE_NULL UNIMPL numa_getaffinity
549 AUE_NULL UNIMPL numa_setaffinity
550 AUE_FSYNC STD { int fdatasync(int fd); }
551 AUE_FSTAT STD { int fstat(int fd, struct stat *sb); }
552 AUE_FSTATAT STD { int fstatat(int fd, char *path, \
@ -1023,6 +1019,14 @@
struct kevent *changelist, int nchanges, \
struct kevent *eventlist, int nevents, \
const struct timespec *timeout); }
561 AUE_NULL STD { int cpuset_getdomain(cpulevel_t level, \
cpuwhich_t which, id_t id, \
size_t domainsetsize, domainset_t *mask, \
int *policy); }
562 AUE_NULL STD { int cpuset_setdomain(cpulevel_t level, \
cpuwhich_t which, id_t id, \
size_t domainsetsize, domainset_t *mask, \
int policy); }
; Please copy any additions and changes to the following compatability tables:
; sys/compat/freebsd32/syscalls.master

View File

@ -3160,24 +3160,6 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 4;
break;
}
/* numa_getaffinity */
case 548: {
struct numa_getaffinity_args *p = params;
iarg[0] = p->which; /* cpuwhich_t */
iarg[1] = p->id; /* id_t */
uarg[2] = (intptr_t) p->policy; /* struct vm_domain_policy_entry * */
*n_args = 3;
break;
}
/* numa_setaffinity */
case 549: {
struct numa_setaffinity_args *p = params;
iarg[0] = p->which; /* cpuwhich_t */
iarg[1] = p->id; /* id_t */
uarg[2] = (intptr_t) p->policy; /* const struct vm_domain_policy_entry * */
*n_args = 3;
break;
}
/* fdatasync */
case 550: {
struct fdatasync_args *p = params;
@ -3276,6 +3258,30 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
*n_args = 6;
break;
}
/* cpuset_getdomain */
case 561: {
struct cpuset_getdomain_args *p = params;
iarg[0] = p->level; /* cpulevel_t */
iarg[1] = p->which; /* cpuwhich_t */
iarg[2] = p->id; /* id_t */
uarg[3] = p->domainsetsize; /* size_t */
uarg[4] = (intptr_t) p->mask; /* domainset_t * */
uarg[5] = (intptr_t) p->policy; /* int * */
*n_args = 6;
break;
}
/* cpuset_setdomain */
case 562: {
struct cpuset_setdomain_args *p = params;
iarg[0] = p->level; /* cpulevel_t */
iarg[1] = p->which; /* cpuwhich_t */
iarg[2] = p->id; /* id_t */
uarg[3] = p->domainsetsize; /* size_t */
uarg[4] = (intptr_t) p->mask; /* domainset_t * */
iarg[5] = p->policy; /* int */
*n_args = 6;
break;
}
default:
*n_args = 0;
break;
@ -8523,38 +8529,6 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
/* numa_getaffinity */
case 548:
switch(ndx) {
case 0:
p = "cpuwhich_t";
break;
case 1:
p = "id_t";
break;
case 2:
p = "userland struct vm_domain_policy_entry *";
break;
default:
break;
};
break;
/* numa_setaffinity */
case 549:
switch(ndx) {
case 0:
p = "cpuwhich_t";
break;
case 1:
p = "id_t";
break;
case 2:
p = "userland const struct vm_domain_policy_entry *";
break;
default:
break;
};
break;
/* fdatasync */
case 550:
switch(ndx) {
@ -8728,6 +8702,56 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
break;
};
break;
/* cpuset_getdomain */
case 561:
switch(ndx) {
case 0:
p = "cpulevel_t";
break;
case 1:
p = "cpuwhich_t";
break;
case 2:
p = "id_t";
break;
case 3:
p = "size_t";
break;
case 4:
p = "userland domainset_t *";
break;
case 5:
p = "userland int *";
break;
default:
break;
};
break;
/* cpuset_setdomain */
case 562:
switch(ndx) {
case 0:
p = "cpulevel_t";
break;
case 1:
p = "cpuwhich_t";
break;
case 2:
p = "id_t";
break;
case 3:
p = "size_t";
break;
case 4:
p = "userland domainset_t *";
break;
case 5:
p = "int";
break;
default:
break;
};
break;
default:
break;
};
@ -10549,16 +10573,6 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* numa_getaffinity */
case 548:
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* numa_setaffinity */
case 549:
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* fdatasync */
case 550:
if (ndx == 0 || ndx == 1)
@ -10614,6 +10628,16 @@ systrace_return_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* cpuset_getdomain */
case 561:
if (ndx == 0 || ndx == 1)
p = "int";
break;
/* cpuset_setdomain */
case 562:
if (ndx == 0 || ndx == 1)
p = "int";
break;
default:
break;
};

View File

@ -133,7 +133,7 @@ static __inline void bd_wakeup(void);
static int sysctl_runningspace(SYSCTL_HANDLER_ARGS);
static void bufkva_reclaim(vmem_t *, int);
static void bufkva_free(struct buf *);
static int buf_import(void *, void **, int, int);
static int buf_import(void *, void **, int, int, int);
static void buf_release(void *, void **, int);
static void maxbcachebuf_adjust(void);
@ -1419,7 +1419,7 @@ buf_free(struct buf *bp)
* only as a per-cpu cache of bufs still maintained on a global list.
*/
static int
buf_import(void *arg, void **store, int cnt, int flags)
buf_import(void *arg, void **store, int cnt, int domain, int flags)
{
struct buf *bp;
int i;

View File

@ -480,6 +480,13 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{

View File

@ -44,7 +44,8 @@ __FBSDID("$FreeBSD$");
#include <machine/vmparam.h>
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
int wait)
{
vm_paddr_t pa;
vm_page_t m;
@ -59,7 +60,8 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
#endif
for (;;) {
m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, pflags);
m = vm_page_alloc_freelist_domain(domain, VM_FREELIST_DIRECT,
pflags);
#ifndef __mips_n64
if (m == NULL && vm_page_reclaim_contig(pflags, 1,
0, MIPS_KSEG0_LARGEST_PHYS, PAGE_SIZE, 0))

View File

@ -42,7 +42,7 @@ MALLOC_DECLARE(M_ALIAS);
/* Use kernel allocator. */
#if defined(_SYS_MALLOC_H_)
#define malloc(x) malloc(x, M_ALIAS, M_NOWAIT|M_ZERO)
#define calloc(x, n) malloc(x*n)
#define calloc(n, x) mallocarray((n), (x), M_ALIAS, M_NOWAIT|M_ZERO)
#define free(x) free(x, M_ALIAS)
#endif
#endif

View File

@ -187,7 +187,7 @@ static MALLOC_DEFINE(M_SCTPNAT, "sctpnat", "sctp nat dbs");
/* Use kernel allocator. */
#ifdef _SYS_MALLOC_H_
#define sn_malloc(x) malloc(x, M_SCTPNAT, M_NOWAIT|M_ZERO)
#define sn_calloc(n,x) sn_malloc((x) * (n))
#define sn_calloc(n,x) mallocarray((n), (x), M_SCTPNAT, M_NOWAIT|M_ZERO)
#define sn_free(x) free(x, M_SCTPNAT)
#endif// #ifdef _SYS_MALLOC_H_
@ -1104,7 +1104,7 @@ sctp_PktParser(struct libalias *la, int direction, struct ip *pip,
if (*passoc == NULL) {/* out of resources */
return (SN_PARSE_ERROR_AS_MALLOC);
}
/* Initialise association - malloc initialises memory to zeros */
/* Initialize association - sn_malloc initializes memory to zeros */
(*passoc)->state = SN_ID;
LIST_INIT(&((*passoc)->Gaddr)); /* always initialise to avoid memory problems */
(*passoc)->TableRegister = SN_NULL_TBL;
@ -1168,7 +1168,7 @@ sctp_PktParser(struct libalias *la, int direction, struct ip *pip,
if (*passoc == NULL) {/* out of resources */
return (SN_PARSE_ERROR_AS_MALLOC);
}
/* Initialise association - malloc initialises memory to zeros */
/* Initialize association - sn_malloc initializes memory to zeros */
(*passoc)->state = SN_ID;
LIST_INIT(&((*passoc)->Gaddr)); /* always initialise to avoid memory problems */
(*passoc)->TableRegister = SN_NULL_TBL;

View File

@ -44,6 +44,7 @@
#include <netinet/ip_fw.h> /* flow_id */
#include <netinet/ip_dummynet.h>
#include <sys/lock.h>
#include <sys/proc.h>
#include <sys/rwlock.h>

View File

@ -1504,8 +1504,8 @@ moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
static mmu_t installed_mmu;
static void *
moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
int wait)
moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
uint8_t *flags, int wait)
{
struct pvo_entry *pvo;
vm_offset_t va;
@ -1522,7 +1522,7 @@ moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
*flags = UMA_SLAB_PRIV;
needed_lock = !PMAP_LOCKED(kernel_pmap);
m = vm_page_alloc(NULL, 0,
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
if (m == NULL)
return (NULL);

View File

@ -480,7 +480,8 @@ slb_insert_user(pmap_t pm, struct slb *slb)
}
static void *
slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
u_int8_t *flags, int wait)
{
static vm_offset_t realmax = 0;
void *va;
@ -490,7 +491,7 @@ slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
realmax = platform_real_maxaddr();
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc_contig(NULL, 0,
m = vm_page_alloc_contig_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED,
1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
if (m == NULL)

View File

@ -0,0 +1,131 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2018 Justin Hibbits
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/resource.h>
#include <sys/rman.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <machine/stdarg.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
/*
* From the P1022 manual, sequence for writing to L2CTL is:
* - mbar
* - isync
* - write
* - read
* - mbar
*/
#define L2_CTL 0x0
#define L2CTL_L2E 0x80000000
#define L2CTL_L2I 0x40000000
struct mpc85xx_cache_softc {
struct resource *sc_mem;
};
static int
mpc85xx_cache_probe(device_t dev)
{
if (!ofw_bus_is_compatible(dev, "cache"))
return (ENXIO);
device_set_desc(dev, "MPC85xx L2 cache");
return (0);
}
static int
mpc85xx_cache_attach(device_t dev)
{
struct mpc85xx_cache_softc *sc = device_get_softc(dev);
int rid;
int cache_line_size, cache_size;
/* Map registers. */
rid = 0;
sc->sc_mem = bus_alloc_resource_any(dev,
SYS_RES_MEMORY, &rid, RF_ACTIVE);
if (sc->sc_mem == NULL)
return (ENOMEM);
/* Enable cache and flash invalidate. */
__asm __volatile ("mbar; isync" ::: "memory");
bus_write_4(sc->sc_mem, L2_CTL, L2CTL_L2E | L2CTL_L2I);
bus_read_4(sc->sc_mem, L2_CTL);
__asm __volatile ("mbar" ::: "memory");
cache_line_size = 0;
cache_size = 0;
OF_getencprop(ofw_bus_get_node(dev), "cache-size", &cache_size,
sizeof(cache_size));
OF_getencprop(ofw_bus_get_node(dev), "cache-line-size",
&cache_line_size, sizeof(cache_line_size));
if (cache_line_size != 0 && cache_size != 0)
device_printf(dev,
"L2 cache size: %dKB, cache line size: %d bytes\n",
cache_size / 1024, cache_line_size);
return (0);
}
static device_method_t mpc85xx_cache_methods[] = {
/* device methods */
DEVMETHOD(device_probe, mpc85xx_cache_probe),
DEVMETHOD(device_attach, mpc85xx_cache_attach),
DEVMETHOD_END
};
static driver_t mpc85xx_cache_driver = {
"cache",
mpc85xx_cache_methods,
sizeof(struct mpc85xx_cache_softc),
};
static devclass_t mpc85xx_cache_devclass;
EARLY_DRIVER_MODULE(mpc85xx_cache, simplebus, mpc85xx_cache_driver,
mpc85xx_cache_devclass, NULL, NULL,
BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE);

View File

@ -420,8 +420,6 @@ OF_bootstrap()
return status;
err = OF_init(fdt);
if (err == 0)
OF_interpret("perform-fixup", 0);
}
if (err != 0) {

View File

@ -340,6 +340,13 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
return (error);
}
int
bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain)
{
return (0);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{

View File

@ -51,7 +51,8 @@ SYSCTL_INT(_hw, OID_AUTO, uma_mdpages, CTLFLAG_RD, &hw_uma_mdpages, 0,
"UMA MD pages in use");
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
int wait)
{
void *va;
vm_paddr_t pa;
@ -59,7 +60,7 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc(NULL, 0,
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
if (m == NULL)
return (NULL);

View File

@ -41,7 +41,8 @@ __FBSDID("$FreeBSD$");
#include <machine/vmparam.h>
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
int wait)
{
panic("uma_small_alloc");

View File

@ -392,7 +392,8 @@ swi_vm(void *v)
}
void *
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
int wait)
{
vm_paddr_t pa;
vm_page_t m;
@ -402,7 +403,7 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
*flags = UMA_SLAB_PRIV;
m = vm_page_alloc(NULL, 0,
m = vm_page_alloc_domain(NULL, 0, domain,
malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
if (m == NULL)
return (NULL);

60
sys/sys/_domainset.h Normal file
View File

@ -0,0 +1,60 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2017, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS__DOMAINSET_H_
#define _SYS__DOMAINSET_H_
#include <sys/_bitset.h>
#ifdef _KERNEL
#define DOMAINSET_SETSIZE MAXMEMDOM
#endif
#define DOMAINSET_MAXSIZE 256
#ifndef DOMAINSET_SETSIZE
#define DOMAINSET_SETSIZE DOMAINSET_MAXSIZE
#endif
BITSET_DEFINE(_domainset, DOMAINSET_SETSIZE);
typedef struct _domainset domainset_t;
/*
* This structure is intended to be embedded in objects which have policy
* attributes. Each object keeps its own iterator so round-robin is
* synchronized and accurate.
*/
struct domainset;
struct domainset_ref {
struct domainset * volatile dr_policy;
int dr_iterator;
};
#endif /* !_SYS__DOMAINSET_H_ */

View File

@ -1,63 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
#ifndef __SYS_VM_DOMAIN_H__
#define __SYS_VM_DOMAIN_H__
#include <sys/seq.h>
typedef enum {
VM_POLICY_NONE,
VM_POLICY_ROUND_ROBIN,
VM_POLICY_FIXED_DOMAIN,
VM_POLICY_FIXED_DOMAIN_ROUND_ROBIN,
VM_POLICY_FIRST_TOUCH,
VM_POLICY_FIRST_TOUCH_ROUND_ROBIN,
VM_POLICY_MAX
} vm_domain_policy_type_t;
struct vm_domain_policy_entry {
vm_domain_policy_type_t policy;
int domain;
};
struct vm_domain_policy {
seq_t seq;
struct vm_domain_policy_entry p;
};
#define VM_DOMAIN_POLICY_STATIC_INITIALISER(vt, vd) \
{ .seq = 0, \
.p.policy = vt, \
.p.domain = vd }
#endif /* __SYS_VM_DOMAIN_H__ */

View File

@ -176,6 +176,14 @@ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
void *lockfuncarg, bus_dma_tag_t *dmat);
/*
* Set the memory domain to be used for allocations.
*
* Automatic for PCI devices. Must be set prior to creating maps or
* allocating memory.
*/
int bus_dma_tag_set_domain(bus_dma_tag_t dmat, int domain);
int bus_dma_tag_destroy(bus_dma_tag_t dmat);
/*

View File

@ -113,7 +113,7 @@ struct busdma_bufzone * busdma_bufalloc_findzone(busdma_bufalloc_t ba,
* you can probably use these when you need uncacheable buffers.
*/
void * busdma_bufalloc_alloc_uncacheable(uma_zone_t zone, vm_size_t size,
uint8_t *pflag, int wait);
int domain, uint8_t *pflag, int wait);
void busdma_bufalloc_free_uncacheable(void *item, vm_size_t size,
uint8_t pflag);

View File

@ -112,6 +112,7 @@ LIST_HEAD(setlist, cpuset);
*/
struct cpuset {
cpuset_t cs_mask; /* bitmask of valid cpus. */
struct domainset *cs_domain; /* (c) NUMA policy. */
volatile u_int cs_ref; /* (a) Reference count. */
int cs_flags; /* (s) Flags from below. */
cpusetid_t cs_id; /* (s) Id or INVALID. */

102
sys/sys/domainset.h Normal file
View File

@ -0,0 +1,102 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2017, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_DOMAINSETSET_H_
#define _SYS_DOMAINSETSET_H_
#include <sys/_domainset.h>
#include <sys/bitset.h>
#define _NDOMAINSETBITS _BITSET_BITS
#define _NDOMAINSETWORDS __bitset_words(DOMAINSET_SETSIZE)
#define DOMAINSETSETBUFSIZ ((2 + sizeof(long) * 2) * _NDOMAINSETWORDS)
#define DOMAINSET_CLR(n, p) BIT_CLR(DOMAINSET_SETSIZE, n, p)
#define DOMAINSET_COPY(f, t) BIT_COPY(DOMAINSET_SETSIZE, f, t)
#define DOMAINSET_ISSET(n, p) BIT_ISSET(DOMAINSET_SETSIZE, n, p)
#define DOMAINSET_SET(n, p) BIT_SET(DOMAINSET_SETSIZE, n, p)
#define DOMAINSET_ZERO(p) BIT_ZERO(DOMAINSET_SETSIZE, p)
#define DOMAINSET_FILL(p) BIT_FILL(DOMAINSET_SETSIZE, p)
#define DOMAINSET_SETOF(n, p) BIT_SETOF(DOMAINSET_SETSIZE, n, p)
#define DOMAINSET_EMPTY(p) BIT_EMPTY(DOMAINSET_SETSIZE, p)
#define DOMAINSET_ISFULLSET(p) BIT_ISFULLSET(DOMAINSET_SETSIZE, p)
#define DOMAINSET_SUBSET(p, c) BIT_SUBSET(DOMAINSET_SETSIZE, p, c)
#define DOMAINSET_OVERLAP(p, c) BIT_OVERLAP(DOMAINSET_SETSIZE, p, c)
#define DOMAINSET_CMP(p, c) BIT_CMP(DOMAINSET_SETSIZE, p, c)
#define DOMAINSET_OR(d, s) BIT_OR(DOMAINSET_SETSIZE, d, s)
#define DOMAINSET_AND(d, s) BIT_AND(DOMAINSET_SETSIZE, d, s)
#define DOMAINSET_NAND(d, s) BIT_NAND(DOMAINSET_SETSIZE, d, s)
#define DOMAINSET_CLR_ATOMIC(n, p) BIT_CLR_ATOMIC(DOMAINSET_SETSIZE, n, p)
#define DOMAINSET_SET_ATOMIC(n, p) BIT_SET_ATOMIC(DOMAINSET_SETSIZE, n, p)
#define DOMAINSET_SET_ATOMIC_ACQ(n, p) \
BIT_SET_ATOMIC_ACQ(DOMAINSET_SETSIZE, n, p)
#define DOMAINSET_AND_ATOMIC(n, p) BIT_AND_ATOMIC(DOMAINSET_SETSIZE, n, p)
#define DOMAINSET_OR_ATOMIC(d, s) BIT_OR_ATOMIC(DOMAINSET_SETSIZE, d, s)
#define DOMAINSET_COPY_STORE_REL(f, t) \
BIT_COPY_STORE_REL(DOMAINSET_SETSIZE, f, t)
#define DOMAINSET_FFS(p) BIT_FFS(DOMAINSET_SETSIZE, p)
#define DOMAINSET_FLS(p) BIT_FLS(DOMAINSET_SETSIZE, p)
#define DOMAINSET_COUNT(p) BIT_COUNT(DOMAINSET_SETSIZE, p)
#define DOMAINSET_FSET BITSET_FSET(_NDOMAINSETWORDS)
#define DOMAINSET_T_INITIALIZER BITSET_T_INITIALIZER
#define DOMAINSET_POLICY_INVALID 0
#define DOMAINSET_POLICY_ROUNDROBIN 1
#define DOMAINSET_POLICY_FIRSTTOUCH 2
#define DOMAINSET_POLICY_PREFER 3
#define DOMAINSET_POLICY_MAX DOMAINSET_POLICY_PREFER
#ifdef _KERNEL
#include <sys/queue.h>
LIST_HEAD(domainlist, domainset);
struct domainset {
LIST_ENTRY(domainset) ds_link;
domainset_t ds_mask; /* allowed domains. */
uint16_t ds_policy; /* Policy type. */
int16_t ds_prefer; /* Preferred domain or -1. */
uint16_t ds_cnt; /* popcnt from above. */
uint16_t ds_max; /* Maximum domain in set. */
};
void domainset_zero(void);
#else
__BEGIN_DECLS
int cpuset_getdomain(cpulevel_t, cpuwhich_t, id_t, size_t, domainset_t *,
int *);
int cpuset_setdomain(cpulevel_t, cpuwhich_t, id_t, size_t,
const domainset_t *, int);
__END_DECLS
#endif
#endif /* !_SYS_DOMAINSETSET_H_ */

View File

@ -175,9 +175,17 @@ void *contigmalloc(unsigned long size, struct malloc_type *type, int flags,
vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
vm_paddr_t boundary) __malloc_like __result_use_check
__alloc_size(1) __alloc_align(6);
void *contigmalloc_domain(unsigned long size, struct malloc_type *type,
int domain, int flags, vm_paddr_t low, vm_paddr_t high,
unsigned long alignment, vm_paddr_t boundary)
__malloc_like __result_use_check __alloc_size(1) __alloc_align(6);
void free(void *addr, struct malloc_type *type);
void free_domain(void *addr, struct malloc_type *type);
void *malloc(unsigned long size, struct malloc_type *type, int flags)
__malloc_like __result_use_check __alloc_size(1);
void *malloc_domain(unsigned long size, struct malloc_type *type,
int domain, int flags)
__malloc_like __result_use_check __alloc_size(1);
void *mallocarray(size_t nmemb, size_t size, struct malloc_type *type,
int flags) __malloc_like __result_use_check
__alloc_size(1) __alloc_size(2);

View File

@ -62,11 +62,18 @@
#include <sys/time.h> /* For structs itimerval, timeval. */
#else
#include <sys/pcpu.h>
#include <sys/systm.h>
#endif
#include <sys/ucontext.h>
#include <sys/ucred.h>
#include <sys/_vm_domain.h>
#include <sys/types.h>
#include <sys/domainset.h>
#include <machine/proc.h> /* Machine-dependent proc substruct. */
#ifdef _KERNEL
#include <machine/cpu.h>
#endif
/*
* One structure allocated per session.
@ -179,6 +186,7 @@ struct procdesc;
struct racct;
struct sbuf;
struct sleepqueue;
struct socket;
struct syscall_args;
struct td_sched;
struct thread;
@ -222,12 +230,12 @@ struct thread {
TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */
LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */
struct cpuset *td_cpuset; /* (t) CPU affinity mask. */
struct domainset_ref td_domain; /* (a) NUMA policy */
struct seltd *td_sel; /* Select queue/channel. */
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
struct turnstile *td_turnstile; /* (k) Associated turnstile. */
struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */
struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */
struct vm_domain_policy td_vm_dom_policy; /* (c) current numa domain policy */
lwpid_t td_tid; /* (b) Thread ID. */
sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */
#define td_siglist td_sigqueue.sq_signals
@ -286,7 +294,6 @@ struct thread {
pid_t td_dbg_forked; /* (c) Child pid for debugger. */
u_int td_vp_reserv; /* (k) Count of reserved vnodes. */
int td_no_sleeping; /* (k) Sleeping disabled count. */
int td_dom_rr_idx; /* (k) RR Numa domain selection. */
void *td_su; /* (k) FFS SU private */
sbintime_t td_sleeptimo; /* (t) Sleep timeout. */
int td_rtcgen; /* (s) rtc_generation of abs. sleep */
@ -655,7 +662,6 @@ struct proc {
uint64_t p_prev_runtime; /* (c) Resource usage accounting. */
struct racct *p_racct; /* (b) Resource accounting. */
int p_throttled; /* (c) Flag for racct pcpu throttling */
struct vm_domain_policy p_vm_dom_policy; /* (c) process default VM domain, or -1 */
/*
* An orphan is the child that has beed re-parented to the
* debugger as a result of attaching to it. Need to keep

View File

@ -465,8 +465,6 @@
#define SYS_ppoll 545
#define SYS_futimens 546
#define SYS_utimensat 547
#define SYS_numa_getaffinity 548
#define SYS_numa_setaffinity 549
#define SYS_fdatasync 550
#define SYS_fstat 551
#define SYS_fstatat 552
@ -478,4 +476,6 @@
#define SYS_fhstatfs 558
#define SYS_mknodat 559
#define SYS_kevent 560
#define SYS_MAXSYSCALL 561
#define SYS_cpuset_getdomain 561
#define SYS_cpuset_setdomain 562
#define SYS_MAXSYSCALL 563

View File

@ -393,8 +393,6 @@ MIASM = \
ppoll.o \
futimens.o \
utimensat.o \
numa_getaffinity.o \
numa_setaffinity.o \
fdatasync.o \
fstat.o \
fstatat.o \
@ -405,4 +403,6 @@ MIASM = \
getfsstat.o \
fhstatfs.o \
mknodat.o \
kevent.o
kevent.o \
cpuset_getdomain.o \
cpuset_setdomain.o

View File

@ -36,6 +36,7 @@
#include <sys/mac.h>
#include <sys/mount.h>
#include <sys/_cpuset.h>
#include <sys/_domainset.h>
struct file;
struct filecaps;
@ -96,6 +97,12 @@ int kern_cpuset_getaffinity(struct thread *td, cpulevel_t level,
int kern_cpuset_setaffinity(struct thread *td, cpulevel_t level,
cpuwhich_t which, id_t id, size_t cpusetsize,
const cpuset_t *maskp);
int kern_cpuset_getdomain(struct thread *td, cpulevel_t level,
cpuwhich_t which, id_t id, size_t domainsetsize,
domainset_t *maskp, int *policyp);
int kern_cpuset_setdomain(struct thread *td, cpulevel_t level,
cpuwhich_t which, id_t id, size_t domainsetsize,
const domainset_t *maskp, int policy);
int kern_cpuset_getid(struct thread *td, cpulevel_t level,
cpuwhich_t which, id_t id, cpusetid_t *setid);
int kern_cpuset_setid(struct thread *td, cpuwhich_t which,

View File

@ -11,6 +11,7 @@
#include <sys/signal.h>
#include <sys/acl.h>
#include <sys/cpuset.h>
#include <sys/domainset.h>
#include <sys/_ffcounter.h>
#include <sys/_semaphore.h>
#include <sys/ucontext.h>
@ -1697,16 +1698,6 @@ struct utimensat_args {
char times_l_[PADL_(struct timespec *)]; struct timespec * times; char times_r_[PADR_(struct timespec *)];
char flag_l_[PADL_(int)]; int flag; char flag_r_[PADR_(int)];
};
struct numa_getaffinity_args {
char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)];
char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)];
char policy_l_[PADL_(struct vm_domain_policy_entry *)]; struct vm_domain_policy_entry * policy; char policy_r_[PADR_(struct vm_domain_policy_entry *)];
};
struct numa_setaffinity_args {
char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)];
char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)];
char policy_l_[PADL_(const struct vm_domain_policy_entry *)]; const struct vm_domain_policy_entry * policy; char policy_r_[PADR_(const struct vm_domain_policy_entry *)];
};
struct fdatasync_args {
char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)];
};
@ -1761,6 +1752,22 @@ struct kevent_args {
char nevents_l_[PADL_(int)]; int nevents; char nevents_r_[PADR_(int)];
char timeout_l_[PADL_(const struct timespec *)]; const struct timespec * timeout; char timeout_r_[PADR_(const struct timespec *)];
};
struct cpuset_getdomain_args {
char level_l_[PADL_(cpulevel_t)]; cpulevel_t level; char level_r_[PADR_(cpulevel_t)];
char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)];
char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)];
char domainsetsize_l_[PADL_(size_t)]; size_t domainsetsize; char domainsetsize_r_[PADR_(size_t)];
char mask_l_[PADL_(domainset_t *)]; domainset_t * mask; char mask_r_[PADR_(domainset_t *)];
char policy_l_[PADL_(int *)]; int * policy; char policy_r_[PADR_(int *)];
};
struct cpuset_setdomain_args {
char level_l_[PADL_(cpulevel_t)]; cpulevel_t level; char level_r_[PADR_(cpulevel_t)];
char which_l_[PADL_(cpuwhich_t)]; cpuwhich_t which; char which_r_[PADR_(cpuwhich_t)];
char id_l_[PADL_(id_t)]; id_t id; char id_r_[PADR_(id_t)];
char domainsetsize_l_[PADL_(size_t)]; size_t domainsetsize; char domainsetsize_r_[PADR_(size_t)];
char mask_l_[PADL_(domainset_t *)]; domainset_t * mask; char mask_r_[PADR_(domainset_t *)];
char policy_l_[PADL_(int)]; int policy; char policy_r_[PADR_(int)];
};
int nosys(struct thread *, struct nosys_args *);
void sys_sys_exit(struct thread *, struct sys_exit_args *);
int sys_fork(struct thread *, struct fork_args *);
@ -2128,8 +2135,6 @@ int sys_procctl(struct thread *, struct procctl_args *);
int sys_ppoll(struct thread *, struct ppoll_args *);
int sys_futimens(struct thread *, struct futimens_args *);
int sys_utimensat(struct thread *, struct utimensat_args *);
int sys_numa_getaffinity(struct thread *, struct numa_getaffinity_args *);
int sys_numa_setaffinity(struct thread *, struct numa_setaffinity_args *);
int sys_fdatasync(struct thread *, struct fdatasync_args *);
int sys_fstat(struct thread *, struct fstat_args *);
int sys_fstatat(struct thread *, struct fstatat_args *);
@ -2141,6 +2146,8 @@ int sys_getfsstat(struct thread *, struct getfsstat_args *);
int sys_fhstatfs(struct thread *, struct fhstatfs_args *);
int sys_mknodat(struct thread *, struct mknodat_args *);
int sys_kevent(struct thread *, struct kevent_args *);
int sys_cpuset_getdomain(struct thread *, struct cpuset_getdomain_args *);
int sys_cpuset_setdomain(struct thread *, struct cpuset_setdomain_args *);
#ifdef COMPAT_43
@ -3020,8 +3027,6 @@ int freebsd11_mknodat(struct thread *, struct freebsd11_mknodat_args *);
#define SYS_AUE_ppoll AUE_POLL
#define SYS_AUE_futimens AUE_FUTIMES
#define SYS_AUE_utimensat AUE_FUTIMESAT
#define SYS_AUE_numa_getaffinity AUE_NULL
#define SYS_AUE_numa_setaffinity AUE_NULL
#define SYS_AUE_fdatasync AUE_FSYNC
#define SYS_AUE_fstat AUE_FSTAT
#define SYS_AUE_fstatat AUE_FSTATAT
@ -3033,6 +3038,8 @@ int freebsd11_mknodat(struct thread *, struct freebsd11_mknodat_args *);
#define SYS_AUE_fhstatfs AUE_FHSTATFS
#define SYS_AUE_mknodat AUE_MKNODAT
#define SYS_AUE_kevent AUE_KEVENT
#define SYS_AUE_cpuset_getdomain AUE_NULL
#define SYS_AUE_cpuset_setdomain AUE_NULL
#undef PAD_
#undef PADL_

View File

@ -128,7 +128,8 @@ typedef void (*uma_fini)(void *mem, int size);
/*
* Import new memory into a cache zone.
*/
typedef int (*uma_import)(void *arg, void **store, int count, int flags);
typedef int (*uma_import)(void *arg, void **store, int count, int domain,
int flags);
/*
* Free memory from a cache zone.
@ -281,6 +282,10 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
* Allocates mp_maxid + 1 slabs sized to
* sizeof(struct pcpu).
*/
#define UMA_ZONE_NUMA 0x10000 /*
* NUMA aware Zone. Implements a best
* effort first-touch policy.
*/
/*
* These flags are shared between the keg and zone. In zones wishing to add
@ -325,6 +330,19 @@ void uma_zdestroy(uma_zone_t zone);
void *uma_zalloc_arg(uma_zone_t zone, void *arg, int flags);
/*
* Allocate an item from a specific NUMA domain. This uses a slow path in
* the allocator but is guaranteed to allocate memory from the requested
* domain if M_WAITOK is set.
*
* Arguments:
* zone The zone we are allocating from
* arg This data is passed to the ctor function
* domain The domain to allocate from.
* flags See sys/malloc.h for available flags.
*/
void *uma_zalloc_domain(uma_zone_t zone, void *arg, int domain, int flags);
/*
* Allocates an item out of a zone without supplying an argument
*
@ -353,6 +371,16 @@ uma_zalloc(uma_zone_t zone, int flags)
void uma_zfree_arg(uma_zone_t zone, void *item, void *arg);
/*
* Frees an item back to the specified zone's domain specific pool.
*
* Arguments:
* zone The zone the item was originally allocated out of.
* item The memory to be freed.
* arg Argument passed to the destructor
*/
void uma_zfree_domain(uma_zone_t zone, void *item, void *arg);
/*
* Frees an item back to a zone without supplying an argument
*
@ -372,11 +400,6 @@ uma_zfree(uma_zone_t zone, void *item)
*/
void uma_zwait(uma_zone_t zone);
/*
* XXX The rest of the prototypes in this header are h0h0 magic for the VM.
* If you think you need to use it for a normal zone you're probably incorrect.
*/
/*
* Backend page supplier routines
*
@ -384,14 +407,15 @@ void uma_zwait(uma_zone_t zone);
* zone The zone that is requesting pages.
* size The number of bytes being requested.
* pflag Flags for these memory pages, see below.
* domain The NUMA domain that we prefer for this allocation.
* wait Indicates our willingness to block.
*
* Returns:
* A pointer to the allocated memory or NULL on failure.
*/
typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, uint8_t *pflag,
int wait);
typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, int domain,
uint8_t *pflag, int wait);
/*
* Backend page free routines
@ -406,8 +430,6 @@ typedef void *(*uma_alloc)(uma_zone_t zone, vm_size_t size, uint8_t *pflag,
*/
typedef void (*uma_free)(void *item, vm_size_t size, uint8_t pflag);
/*
* Sets up the uma allocator. (Called by vm_mem_init)
*

File diff suppressed because it is too large Load Diff

View File

@ -39,7 +39,22 @@
*/
/*
* Here's a quick description of the relationship between the objects:
* The brief summary; Zones describe unique allocation types. Zones are
* organized into per-CPU caches which are filled by buckets. Buckets are
* organized according to memory domains. Buckets are filled from kegs which
* are also organized according to memory domains. Kegs describe a unique
* allocation type, backend memory provider, and layout. Kegs are associated
* with one or more zones and zones reference one or more kegs. Kegs provide
* slabs which are virtually contiguous collections of pages. Each slab is
* broken down int one or more items that will satisfy an individual allocation.
*
* Allocation is satisfied in the following order:
* 1) Per-CPU cache
* 2) Per-domain cache of buckets
* 3) Slab from any of N kegs
* 4) Backend page provider
*
* More detail on individual objects is contained below:
*
* Kegs contain lists of slabs which are stored in either the full bin, empty
* bin, or partially allocated bin, to reduce fragmentation. They also contain
@ -47,6 +62,13 @@
* and rsize is the result of that. The Keg also stores information for
* managing a hash of page addresses that maps pages to uma_slab_t structures
* for pages that don't have embedded uma_slab_t's.
*
* Keg slab lists are organized by memory domain to support NUMA allocation
* policies. By default allocations are spread across domains to reduce the
* potential for hotspots. Special keg creation flags may be specified to
* prefer location allocation. However there is no strict enforcement as frees
* may happen on any CPU and these are returned to the CPU-local cache
* regardless of the originating domain.
*
* The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
* be allocated off the page from a special slab zone. The free list within a
@ -181,6 +203,17 @@ struct uma_cache {
typedef struct uma_cache * uma_cache_t;
/*
* Per-domain memory list. Embedded in the kegs.
*/
struct uma_domain {
LIST_HEAD(,uma_slab) ud_part_slab; /* partially allocated slabs */
LIST_HEAD(,uma_slab) ud_free_slab; /* empty slab list */
LIST_HEAD(,uma_slab) ud_full_slab; /* full slabs */
};
typedef struct uma_domain * uma_domain_t;
/*
* Keg management structure
*
@ -192,10 +225,8 @@ struct uma_keg {
struct uma_hash uk_hash;
LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
LIST_HEAD(,uma_slab) uk_part_slab; /* partially allocated slabs */
LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
uint32_t uk_cursor; /* Domain alloc cursor. */
uint32_t uk_align; /* Alignment mask */
uint32_t uk_pages; /* Total page count */
uint32_t uk_free; /* Count of items free in slabs */
@ -221,6 +252,9 @@ struct uma_keg {
/* Least used fields go to the last cache line. */
const char *uk_name; /* Name of creating zone. */
LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
/* Must be last, variable sized. */
struct uma_domain uk_domain[]; /* Keg's slab lists. */
};
typedef struct uma_keg * uma_keg_t;
@ -248,14 +282,18 @@ struct uma_slab {
#endif
uint16_t us_freecount; /* How many are free? */
uint8_t us_flags; /* Page flags see uma.h */
uint8_t us_pad; /* Pad to 32bits, unused. */
uint8_t us_domain; /* Backing NUMA domain. */
};
#define us_link us_type._us_link
#define us_size us_type._us_size
#if MAXMEMDOM >= 255
#error "Slab domain type insufficient"
#endif
typedef struct uma_slab * uma_slab_t;
typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int);
typedef uma_slab_t (*uma_slaballoc)(uma_zone_t, uma_keg_t, int, int);
struct uma_klink {
LIST_ENTRY(uma_klink) kl_link;
@ -263,6 +301,12 @@ struct uma_klink {
};
typedef struct uma_klink *uma_klink_t;
struct uma_zone_domain {
LIST_HEAD(,uma_bucket) uzd_buckets; /* full buckets */
};
typedef struct uma_zone_domain * uma_zone_domain_t;
/*
* Zone management structure
*
@ -275,7 +319,7 @@ struct uma_zone {
const char *uz_name; /* Text name of the zone */
LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
LIST_HEAD(,uma_bucket) uz_buckets; /* full buckets */
struct uma_zone_domain *uz_domain; /* per-domain buckets */
LIST_HEAD(,uma_klink) uz_kegs; /* List of kegs. */
struct uma_klink uz_klink; /* klink for first keg. */
@ -309,7 +353,9 @@ struct uma_zone {
* This HAS to be the last item because we adjust the zone size
* based on NCPU and then allocate the space for the zones.
*/
struct uma_cache uz_cpu[1]; /* Per cpu caches */
struct uma_cache uz_cpu[]; /* Per cpu caches */
/* uz_domain follows here. */
};
/*
@ -340,6 +386,7 @@ zone_first_keg(uma_zone_t zone)
/* Internal prototypes */
static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
void *uma_large_malloc(vm_size_t size, int wait);
void *uma_large_malloc_domain(vm_size_t size, int domain, int wait);
void uma_large_free(uma_slab_t slab);
/* Lock Macros */
@ -422,8 +469,8 @@ vsetslab(vm_offset_t va, uma_slab_t slab)
* if they can provide more efficient allocation functions. This is useful
* for using direct mapped addresses.
*/
void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *pflag,
int wait);
void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
uint8_t *pflag, int wait);
void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
/* Set a global soft limit on UMA managed memory. */

View File

@ -1,514 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_vm.h"
#include "opt_ddb.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#ifdef VM_NUMA_ALLOC
#include <sys/proc.h>
#endif
#include <sys/queue.h>
#include <sys/rwlock.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/tree.h>
#include <sys/vmmeter.h>
#include <sys/seq.h>
#include <ddb/ddb.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/vm_domain.h>
/*
* Default to first-touch + round-robin.
*/
static struct mtx vm_default_policy_mtx;
MTX_SYSINIT(vm_default_policy, &vm_default_policy_mtx, "default policy mutex",
MTX_DEF);
#ifdef VM_NUMA_ALLOC
static struct vm_domain_policy vm_default_policy =
VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0);
#else
/* Use round-robin so the domain policy code will only try once per allocation */
static struct vm_domain_policy vm_default_policy =
VM_DOMAIN_POLICY_STATIC_INITIALISER(VM_POLICY_ROUND_ROBIN, 0);
#endif
static int
sysctl_vm_default_policy(SYSCTL_HANDLER_ARGS)
{
char policy_name[32];
int error;
mtx_lock(&vm_default_policy_mtx);
/* Map policy to output string */
switch (vm_default_policy.p.policy) {
case VM_POLICY_FIRST_TOUCH:
strcpy(policy_name, "first-touch");
break;
case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN:
strcpy(policy_name, "first-touch-rr");
break;
case VM_POLICY_ROUND_ROBIN:
default:
strcpy(policy_name, "rr");
break;
}
mtx_unlock(&vm_default_policy_mtx);
error = sysctl_handle_string(oidp, &policy_name[0],
sizeof(policy_name), req);
if (error != 0 || req->newptr == NULL)
return (error);
mtx_lock(&vm_default_policy_mtx);
/* Set: match on the subset of policies that make sense as a default */
if (strcmp("first-touch-rr", policy_name) == 0) {
vm_domain_policy_set(&vm_default_policy,
VM_POLICY_FIRST_TOUCH_ROUND_ROBIN, 0);
} else if (strcmp("first-touch", policy_name) == 0) {
vm_domain_policy_set(&vm_default_policy,
VM_POLICY_FIRST_TOUCH, 0);
} else if (strcmp("rr", policy_name) == 0) {
vm_domain_policy_set(&vm_default_policy,
VM_POLICY_ROUND_ROBIN, 0);
} else {
error = EINVAL;
goto finish;
}
error = 0;
finish:
mtx_unlock(&vm_default_policy_mtx);
return (error);
}
SYSCTL_PROC(_vm, OID_AUTO, default_policy, CTLTYPE_STRING | CTLFLAG_RW,
0, 0, sysctl_vm_default_policy, "A",
"Default policy (rr, first-touch, first-touch-rr");
/*
* Initialise a VM domain iterator.
*
* Check the thread policy, then the proc policy,
* then default to the system policy.
*/
void
vm_policy_iterator_init(struct vm_domain_iterator *vi)
{
#ifdef VM_NUMA_ALLOC
struct vm_domain_policy lcl;
#endif
vm_domain_iterator_init(vi);
#ifdef VM_NUMA_ALLOC
/* Copy out the thread policy */
vm_domain_policy_localcopy(&lcl, &curthread->td_vm_dom_policy);
if (lcl.p.policy != VM_POLICY_NONE) {
/* Thread policy is present; use it */
vm_domain_iterator_set_policy(vi, &lcl);
return;
}
vm_domain_policy_localcopy(&lcl,
&curthread->td_proc->p_vm_dom_policy);
if (lcl.p.policy != VM_POLICY_NONE) {
/* Process policy is present; use it */
vm_domain_iterator_set_policy(vi, &lcl);
return;
}
#endif
/* Use system default policy */
vm_domain_iterator_set_policy(vi, &vm_default_policy);
}
void
vm_policy_iterator_finish(struct vm_domain_iterator *vi)
{
vm_domain_iterator_cleanup(vi);
}
#ifdef VM_NUMA_ALLOC
static __inline int
vm_domain_rr_selectdomain(int skip_domain)
{
struct thread *td;
td = curthread;
td->td_dom_rr_idx++;
td->td_dom_rr_idx %= vm_ndomains;
/*
* If skip_domain is provided then skip over that
* domain. This is intended for round robin variants
* which first try a fixed domain.
*/
if ((skip_domain > -1) && (td->td_dom_rr_idx == skip_domain)) {
td->td_dom_rr_idx++;
td->td_dom_rr_idx %= vm_ndomains;
}
return (td->td_dom_rr_idx);
}
#endif
/*
* This implements a very simple set of VM domain memory allocation
* policies and iterators.
*/
/*
* A VM domain policy represents a desired VM domain policy.
* Iterators implement searching through VM domains in a specific
* order.
*/
/*
* When setting a policy, the caller must establish their own
* exclusive write protection for the contents of the domain
* policy.
*/
int
vm_domain_policy_init(struct vm_domain_policy *vp)
{
bzero(vp, sizeof(*vp));
vp->p.policy = VM_POLICY_NONE;
vp->p.domain = -1;
return (0);
}
int
vm_domain_policy_set(struct vm_domain_policy *vp,
vm_domain_policy_type_t vt, int domain)
{
seq_write_begin(&vp->seq);
vp->p.policy = vt;
vp->p.domain = domain;
seq_write_end(&vp->seq);
return (0);
}
/*
* Take a local copy of a policy.
*
* The destination policy isn't write-barriered; this is used
* for doing local copies into something that isn't shared.
*/
void
vm_domain_policy_localcopy(struct vm_domain_policy *dst,
const struct vm_domain_policy *src)
{
seq_t seq;
for (;;) {
seq = seq_read(&src->seq);
*dst = *src;
if (seq_consistent(&src->seq, seq))
return;
}
}
/*
* Take a write-barrier copy of a policy.
*
* The destination policy is write -barriered; this is used
* for doing copies into policies that may be read by other
* threads.
*/
void
vm_domain_policy_copy(struct vm_domain_policy *dst,
const struct vm_domain_policy *src)
{
seq_t seq;
struct vm_domain_policy d;
for (;;) {
seq = seq_read(&src->seq);
d = *src;
if (seq_consistent(&src->seq, seq)) {
seq_write_begin(&dst->seq);
dst->p.domain = d.p.domain;
dst->p.policy = d.p.policy;
seq_write_end(&dst->seq);
return;
}
}
}
int
vm_domain_policy_validate(const struct vm_domain_policy *vp)
{
switch (vp->p.policy) {
case VM_POLICY_NONE:
case VM_POLICY_ROUND_ROBIN:
case VM_POLICY_FIRST_TOUCH:
case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN:
if (vp->p.domain == -1)
return (0);
return (-1);
case VM_POLICY_FIXED_DOMAIN:
case VM_POLICY_FIXED_DOMAIN_ROUND_ROBIN:
#ifdef VM_NUMA_ALLOC
if (vp->p.domain >= 0 && vp->p.domain < vm_ndomains)
return (0);
#else
if (vp->p.domain == 0)
return (0);
#endif
return (-1);
default:
return (-1);
}
return (-1);
}
int
vm_domain_policy_cleanup(struct vm_domain_policy *vp)
{
/* For now, empty */
return (0);
}
int
vm_domain_iterator_init(struct vm_domain_iterator *vi)
{
/* Nothing to do for now */
return (0);
}
/*
* Manually setup an iterator with the given details.
*/
int
vm_domain_iterator_set(struct vm_domain_iterator *vi,
vm_domain_policy_type_t vt, int domain)
{
#ifdef VM_NUMA_ALLOC
switch (vt) {
case VM_POLICY_FIXED_DOMAIN:
vi->policy = VM_POLICY_FIXED_DOMAIN;
vi->domain = domain;
vi->n = 1;
break;
case VM_POLICY_FIXED_DOMAIN_ROUND_ROBIN:
vi->policy = VM_POLICY_FIXED_DOMAIN_ROUND_ROBIN;
vi->domain = domain;
vi->n = vm_ndomains;
break;
case VM_POLICY_FIRST_TOUCH:
vi->policy = VM_POLICY_FIRST_TOUCH;
vi->domain = PCPU_GET(domain);
vi->n = 1;
break;
case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN:
vi->policy = VM_POLICY_FIRST_TOUCH_ROUND_ROBIN;
vi->domain = PCPU_GET(domain);
vi->n = vm_ndomains;
break;
case VM_POLICY_ROUND_ROBIN:
default:
vi->policy = VM_POLICY_ROUND_ROBIN;
vi->domain = -1;
vi->n = vm_ndomains;
break;
}
#else
vi->domain = 0;
vi->n = 1;
#endif
return (0);
}
/*
* Setup an iterator based on the given policy.
*/
static inline void
_vm_domain_iterator_set_policy(struct vm_domain_iterator *vi,
const struct vm_domain_policy *vt)
{
#ifdef VM_NUMA_ALLOC
/*
* Initialise the iterator.
*
* For first-touch, the initial domain is set
* via the current thread CPU domain.
*
* For fixed-domain, it's assumed that the
* caller has initialised the specific domain
* it is after.
*/
switch (vt->p.policy) {
case VM_POLICY_FIXED_DOMAIN:
vi->policy = vt->p.policy;
vi->domain = vt->p.domain;
vi->n = 1;
break;
case VM_POLICY_FIXED_DOMAIN_ROUND_ROBIN:
vi->policy = vt->p.policy;
vi->domain = vt->p.domain;
vi->n = vm_ndomains;
break;
case VM_POLICY_FIRST_TOUCH:
vi->policy = vt->p.policy;
vi->domain = PCPU_GET(domain);
vi->n = 1;
break;
case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN:
vi->policy = vt->p.policy;
vi->domain = PCPU_GET(domain);
vi->n = vm_ndomains;
break;
case VM_POLICY_ROUND_ROBIN:
default:
/*
* Default to round-robin policy.
*/
vi->policy = VM_POLICY_ROUND_ROBIN;
vi->domain = -1;
vi->n = vm_ndomains;
break;
}
#else
vi->domain = 0;
vi->n = 1;
#endif
}
void
vm_domain_iterator_set_policy(struct vm_domain_iterator *vi,
const struct vm_domain_policy *vt)
{
seq_t seq;
struct vm_domain_policy vt_lcl;
for (;;) {
seq = seq_read(&vt->seq);
vt_lcl = *vt;
if (seq_consistent(&vt->seq, seq)) {
_vm_domain_iterator_set_policy(vi, &vt_lcl);
return;
}
}
}
/*
* Return the next VM domain to use.
*
* Returns 0 w/ domain set to the next domain to use, or
* -1 to indicate no more domains are available.
*/
int
vm_domain_iterator_run(struct vm_domain_iterator *vi, int *domain)
{
/* General catch-all */
if (vi->n <= 0)
return (-1);
#ifdef VM_NUMA_ALLOC
switch (vi->policy) {
case VM_POLICY_FIXED_DOMAIN:
case VM_POLICY_FIRST_TOUCH:
*domain = vi->domain;
vi->n--;
break;
case VM_POLICY_FIXED_DOMAIN_ROUND_ROBIN:
case VM_POLICY_FIRST_TOUCH_ROUND_ROBIN:
/*
* XXX TODO: skip over the rr'ed domain
* if it equals the one we started with.
*/
if (vi->n == vm_ndomains)
*domain = vi->domain;
else
*domain = vm_domain_rr_selectdomain(vi->domain);
vi->n--;
break;
case VM_POLICY_ROUND_ROBIN:
default:
*domain = vm_domain_rr_selectdomain(-1);
vi->n--;
break;
}
#else
*domain = 0;
vi->n--;
#endif
return (0);
}
/*
* Returns 1 if the iteration is done, or 0 if it has not.
* This can only be called after at least one loop through
* the iterator. Ie, it's designed to be used as a tail
* check of a loop, not the head check of a loop.
*/
int
vm_domain_iterator_isdone(struct vm_domain_iterator *vi)
{
return (vi->n <= 0);
}
int
vm_domain_iterator_cleanup(struct vm_domain_iterator *vi)
{
return (0);
}

View File

@ -1,71 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
* $FreeBSD$
*/
#ifndef __VM_DOMAIN_H__
#define __VM_DOMAIN_H__
#include <sys/_vm_domain.h>
struct vm_domain_iterator {
vm_domain_policy_type_t policy;
int domain;
int n;
};
/*
* TODO: check to see if these should just become inline functions
* at some point.
*/
extern int vm_domain_policy_init(struct vm_domain_policy *vp);
extern int vm_domain_policy_set(struct vm_domain_policy *vp,
vm_domain_policy_type_t vt, int domain);
extern int vm_domain_policy_cleanup(struct vm_domain_policy *vp);
extern void vm_domain_policy_localcopy(struct vm_domain_policy *dst,
const struct vm_domain_policy *src);
extern void vm_domain_policy_copy(struct vm_domain_policy *dst,
const struct vm_domain_policy *src);
extern int vm_domain_policy_validate(const struct vm_domain_policy *vp);
extern int vm_domain_iterator_init(struct vm_domain_iterator *vi);
extern int vm_domain_iterator_set(struct vm_domain_iterator *vi,
vm_domain_policy_type_t vt, int domain);
extern void vm_domain_iterator_set_policy(struct vm_domain_iterator *vi,
const struct vm_domain_policy *vt);
extern int vm_domain_iterator_run(struct vm_domain_iterator *vi,
int *domain);
extern int vm_domain_iterator_isdone(struct vm_domain_iterator *vi);
extern int vm_domain_iterator_cleanup(struct vm_domain_iterator *vi);
extern void vm_policy_iterator_init(struct vm_domain_iterator *vi);
extern void vm_policy_iterator_finish(struct vm_domain_iterator *vi);
#endif /* __VM_DOMAIN_H__ */

243
sys/vm/vm_domainset.c Normal file
View File

@ -0,0 +1,243 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2017, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_vm.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bitset.h>
#include <sys/domainset.h>
#include <sys/proc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_domainset.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
/*
* Iterators are written such that the first nowait pass has as short a
* codepath as possible to eliminate bloat from the allocator. It is
* assumed that most allocations are successful.
*/
/*
* Determine which policy is to be used for this allocation.
*/
static void
vm_domainset_iter_domain(struct vm_domainset_iter *di, struct vm_object *obj)
{
struct domainset *domain;
/*
* object policy takes precedence over thread policy. The policies
* are immutable and unsynchronized. Updates can race but pointer
* loads are assumed to be atomic.
*/
if (obj != NULL && (domain = obj->domain.dr_policy) != NULL) {
di->di_domain = domain;
di->di_iter = &obj->domain.dr_iterator;
} else {
di->di_domain = curthread->td_domain.dr_policy;
di->di_iter = &curthread->td_domain.dr_iterator;
}
}
static void
vm_domainset_iter_rr(struct vm_domainset_iter *di, int *domain)
{
int d;
d = *di->di_iter;
do {
d = (d + 1) % di->di_domain->ds_max;
} while (!DOMAINSET_ISSET(d, &di->di_domain->ds_mask));
*di->di_iter = *domain = d;
}
static void
vm_domainset_iter_prefer(struct vm_domainset_iter *di, int *domain)
{
int d;
d = *di->di_iter;
do {
d = (d + 1) % di->di_domain->ds_max;
} while (!DOMAINSET_ISSET(d, &di->di_domain->ds_mask) ||
d == di->di_domain->ds_prefer);
*di->di_iter = *domain = d;
}
static void
vm_domainset_iter_next(struct vm_domainset_iter *di, int *domain)
{
KASSERT(di->di_n > 0,
("vm_domainset_iter_first: Invalid n %d", di->di_n));
switch (di->di_domain->ds_policy) {
case DOMAINSET_POLICY_FIRSTTOUCH:
/*
* To prevent impossible allocations we convert an invalid
* first-touch to round-robin.
*/
/* FALLTHROUGH */
case DOMAINSET_POLICY_ROUNDROBIN:
vm_domainset_iter_rr(di, domain);
break;
case DOMAINSET_POLICY_PREFER:
vm_domainset_iter_prefer(di, domain);
break;
default:
panic("vm_domainset_iter_first: Unknown policy %d",
di->di_domain->ds_policy);
}
KASSERT(*domain < vm_ndomains,
("vm_domainset_iter_next: Invalid domain %d", *domain));
}
static void
vm_domainset_iter_first(struct vm_domainset_iter *di, int *domain)
{
switch (di->di_domain->ds_policy) {
case DOMAINSET_POLICY_FIRSTTOUCH:
*domain = PCPU_GET(domain);
if (DOMAINSET_ISSET(*domain, &di->di_domain->ds_mask)) {
di->di_n = 1;
break;
}
/*
* To prevent impossible allocations we convert an invalid
* first-touch to round-robin.
*/
/* FALLTHROUGH */
case DOMAINSET_POLICY_ROUNDROBIN:
di->di_n = di->di_domain->ds_cnt;
vm_domainset_iter_rr(di, domain);
break;
case DOMAINSET_POLICY_PREFER:
*domain = di->di_domain->ds_prefer;
di->di_n = di->di_domain->ds_cnt;
break;
default:
panic("vm_domainset_iter_first: Unknown policy %d",
di->di_domain->ds_policy);
}
KASSERT(di->di_n > 0,
("vm_domainset_iter_first: Invalid n %d", di->di_n));
KASSERT(*domain < vm_ndomains,
("vm_domainset_iter_first: Invalid domain %d", *domain));
}
void
vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj,
int *domain, int *req)
{
vm_domainset_iter_domain(di, obj);
di->di_flags = *req;
*req = (di->di_flags & ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) |
VM_ALLOC_NOWAIT;
vm_domainset_iter_first(di, domain);
}
int
vm_domainset_iter_page(struct vm_domainset_iter *di, int *domain, int *req)
{
/*
* If we exhausted all options with NOWAIT and did a WAITFAIL it
* is time to return an error to the caller.
*/
if ((*req & VM_ALLOC_WAITFAIL) != 0)
return (ENOMEM);
/* If there are more domains to visit we run the iterator. */
if (--di->di_n != 0) {
vm_domainset_iter_next(di, domain);
return (0);
}
/* If we visited all domains and this was a NOWAIT we return error. */
if ((di->di_flags & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) == 0)
return (ENOMEM);
/*
* We have visited all domains with non-blocking allocations, try
* from the beginning with a blocking allocation.
*/
vm_domainset_iter_first(di, domain);
*req = di->di_flags;
return (0);
}
void
vm_domainset_iter_malloc_init(struct vm_domainset_iter *di,
struct vm_object *obj, int *domain, int *flags)
{
vm_domainset_iter_domain(di, obj);
di->di_flags = *flags;
*flags = (di->di_flags & ~M_WAITOK) | M_NOWAIT;
vm_domainset_iter_first(di, domain);
}
int
vm_domainset_iter_malloc(struct vm_domainset_iter *di, int *domain, int *flags)
{
/* If there are more domains to visit we run the iterator. */
if (--di->di_n != 0) {
vm_domainset_iter_next(di, domain);
return (0);
}
/* If we visited all domains and this was a NOWAIT we return error. */
if ((di->di_flags & M_WAITOK) == 0)
return (ENOMEM);
/*
* We have visited all domains with non-blocking allocations, try
* from the beginning with a blocking allocation.
*/
vm_domainset_iter_first(di, domain);
*flags = di->di_flags;
return (0);
}

47
sys/vm/vm_domainset.h Normal file
View File

@ -0,0 +1,47 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2017, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __VM_DOMAINSET_H__
#define __VM_DOMAINSET_H__
struct vm_domainset_iter {
struct domainset *di_domain;
int *di_iter;
int di_flags;
int di_n;
};
int vm_domainset_iter_page(struct vm_domainset_iter *, int *, int *);
void vm_domainset_iter_page_init(struct vm_domainset_iter *,
struct vm_object *, int *, int *);
int vm_domainset_iter_malloc(struct vm_domainset_iter *, int *, int *);
void vm_domainset_iter_malloc_init(struct vm_domainset_iter *,
struct vm_object *, int *, int *);
#endif /* __VM_DOMAINSET_H__ */

View File

@ -56,14 +56,21 @@ void kmap_free_wakeup(vm_map_t, vm_offset_t, vm_size_t);
/* These operate on virtual addresses backed by memory. */
vm_offset_t kmem_alloc_attr(struct vmem *, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr);
vm_offset_t kmem_alloc_contig(struct vmem *, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr);
vm_offset_t kmem_malloc(struct vmem *, vm_size_t size, int flags);
vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags);
void kmem_free(struct vmem *, vm_offset_t, vm_size_t);
/* This provides memory for previously allocated address space. */
int kmem_back(vm_object_t, vm_offset_t, vm_size_t, int);
int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int);
void kmem_unback(vm_object_t, vm_offset_t, vm_size_t);
/* Bootstrapping. */

View File

@ -1589,6 +1589,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
KASSERT(upgrade || dst_entry->object.vm_object == NULL,
("vm_fault_copy_entry: vm_object not NULL"));
if (src_object != dst_object) {
dst_object->domain = src_object->domain;
dst_entry->object.vm_object = dst_object;
dst_entry->offset = 0;
dst_object->charge = dst_entry->end - dst_entry->start;

View File

@ -81,16 +81,25 @@ __FBSDID("$FreeBSD$");
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/vmem.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_kern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/vm_map.h>
#include <vm/vm_pager.h>
#include <vm/vm_extern.h>
#if VM_NRESERVLEVEL > 0
#define KVA_QUANTUM (1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT))
#else
/* On non-superpage architectures want large import sizes. */
#define KVA_QUANTUM (PAGE_SIZE * 1024)
#endif
long physmem;
/*
@ -107,7 +116,10 @@ kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
{
vm_offset_t addr;
int result;
KASSERT((size % KVA_QUANTUM) == 0,
("kva_import: Size %jd is not a multiple of %d",
(intmax_t)size, (int)KVA_QUANTUM));
addr = vm_map_min(kernel_map);
result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0,
VMFS_SUPER_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT);
@ -130,6 +142,7 @@ static void
vm_mem_init(dummy)
void *dummy;
{
int domain;
/*
* Initializes resident memory structures. From here on, all physical
@ -150,13 +163,15 @@ vm_mem_init(dummy)
* Initialize the kernel_arena. This can grow on demand.
*/
vmem_init(kernel_arena, "kernel arena", 0, 0, PAGE_SIZE, 0, 0);
vmem_set_import(kernel_arena, kva_import, NULL, NULL,
#if VM_NRESERVLEVEL > 0
1 << (VM_LEVEL_0_ORDER + PAGE_SHIFT));
#else
/* On non-superpage architectures want large import sizes. */
PAGE_SIZE * 1024);
#endif
vmem_set_import(kernel_arena, kva_import, NULL, NULL, KVA_QUANTUM);
for (domain = 0; domain < vm_ndomains; domain++) {
vm_dom[domain].vmd_kernel_arena = vmem_create(
"kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
vmem_set_import(vm_dom[domain].vmd_kernel_arena,
(vmem_import_t *)vmem_alloc, NULL, kernel_arena,
KVA_QUANTUM);
}
kmem_init_zero_region();
pmap_init();

View File

@ -67,9 +67,12 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_vm.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h> /* for ticks and hz */
#include <sys/domainset.h>
#include <sys/eventhandler.h>
#include <sys/lock.h>
#include <sys/proc.h>
@ -77,15 +80,18 @@ __FBSDID("$FreeBSD$");
#include <sys/rwlock.h>
#include <sys/sysctl.h>
#include <sys/vmem.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_domainset.h>
#include <vm/vm_kern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <vm/vm_phys.h>
#include <vm/vm_radix.h>
#include <vm/vm_extern.h>
#include <vm/uma.h>
@ -161,17 +167,17 @@ kva_free(vm_offset_t addr, vm_size_t size)
* given flags, then the pages are zeroed before they are mapped.
*/
vm_offset_t
kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
vmem_t *vmem;
vm_object_t object = kernel_object;
vm_offset_t addr, i, offset;
vm_page_t m;
int pflags, tries;
KASSERT(vmem == kernel_arena,
("kmem_alloc_attr: Only kernel_arena is supported."));
size = round_page(size);
vmem = vm_dom[domain].vmd_kernel_arena;
if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
@ -182,13 +188,13 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
for (i = 0; i < size; i += PAGE_SIZE) {
tries = 0;
retry:
m = vm_page_alloc_contig(object, atop(offset + i),
pflags, 1, low, high, PAGE_SIZE, 0, memattr);
m = vm_page_alloc_contig_domain(object, atop(offset + i),
domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr);
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
if (!vm_page_reclaim_contig(pflags, 1,
low, high, PAGE_SIZE, 0) &&
if (!vm_page_reclaim_contig_domain(domain,
pflags, 1, low, high, PAGE_SIZE, 0) &&
(flags & M_WAITOK) != 0)
VM_WAIT;
VM_OBJECT_WLOCK(object);
@ -199,6 +205,9 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
vmem_free(vmem, addr, size);
return (0);
}
KASSERT(vm_phys_domidx(m) == domain,
("kmem_alloc_attr_domain: Domain mismatch %d != %d",
vm_phys_domidx(m), domain));
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
@ -209,6 +218,28 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
return (addr);
}
vm_offset_t
kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, vm_memattr_t memattr)
{
struct vm_domainset_iter di;
vm_offset_t addr;
int domain;
KASSERT(vmem == kernel_arena,
("kmem_alloc_attr: Only kernel_arena is supported."));
vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags);
do {
addr = kmem_alloc_attr_domain(domain, size, flags, low, high,
memattr);
if (addr != 0)
break;
} while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0);
return (addr);
}
/*
* Allocates a region from the kernel address map and physically
* contiguous pages within the specified address range to the kernel
@ -218,19 +249,19 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
* mapped.
*/
vm_offset_t
kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
vmem_t *vmem;
vm_object_t object = kernel_object;
vm_offset_t addr, offset, tmp;
vm_page_t end_m, m;
u_long npages;
int pflags, tries;
KASSERT(vmem == kernel_arena,
("kmem_alloc_contig: Only kernel_arena is supported."));
size = round_page(size);
vmem = vm_dom[domain].vmd_kernel_arena;
if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
return (0);
offset = addr - VM_MIN_KERNEL_ADDRESS;
@ -241,13 +272,14 @@ kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
VM_OBJECT_WLOCK(object);
tries = 0;
retry:
m = vm_page_alloc_contig(object, atop(offset), pflags,
m = vm_page_alloc_contig_domain(object, atop(offset), domain, pflags,
npages, low, high, alignment, boundary, memattr);
if (m == NULL) {
VM_OBJECT_WUNLOCK(object);
if (tries < ((flags & M_NOWAIT) != 0 ? 1 : 3)) {
if (!vm_page_reclaim_contig(pflags, npages, low, high,
alignment, boundary) && (flags & M_WAITOK) != 0)
if (!vm_page_reclaim_contig_domain(domain, pflags,
npages, low, high, alignment, boundary) &&
(flags & M_WAITOK) != 0)
VM_WAIT;
VM_OBJECT_WLOCK(object);
tries++;
@ -256,6 +288,9 @@ kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
vmem_free(vmem, addr, size);
return (0);
}
KASSERT(vm_phys_domidx(m) == domain,
("kmem_alloc_contig_domain: Domain mismatch %d != %d",
vm_phys_domidx(m), domain));
end_m = m + npages;
tmp = addr;
for (; m < end_m; m++) {
@ -270,6 +305,29 @@ kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
return (addr);
}
vm_offset_t
kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
vm_memattr_t memattr)
{
struct vm_domainset_iter di;
vm_offset_t addr;
int domain;
KASSERT(vmem == kernel_arena,
("kmem_alloc_contig: Only kernel_arena is supported."));
vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags);
do {
addr = kmem_alloc_contig_domain(domain, size, flags, low, high,
alignment, boundary, memattr);
if (addr != 0)
break;
} while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0);
return (addr);
}
/*
* kmem_suballoc:
*
@ -313,18 +371,18 @@ kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max,
* Allocate wired-down pages in the kernel's address space.
*/
vm_offset_t
kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
kmem_malloc_domain(int domain, vm_size_t size, int flags)
{
vmem_t *vmem;
vm_offset_t addr;
int rv;
KASSERT(vmem == kernel_arena,
("kmem_malloc: Only kernel_arena is supported."));
vmem = vm_dom[domain].vmd_kernel_arena;
size = round_page(size);
if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
return (0);
rv = kmem_back(kernel_object, addr, size, flags);
rv = kmem_back_domain(domain, kernel_object, addr, size, flags);
if (rv != KERN_SUCCESS) {
vmem_free(vmem, addr, size);
return (0);
@ -332,20 +390,41 @@ kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
return (addr);
}
vm_offset_t
kmem_malloc(struct vmem *vmem, vm_size_t size, int flags)
{
struct vm_domainset_iter di;
vm_offset_t addr;
int domain;
KASSERT(vmem == kernel_arena,
("kmem_malloc: Only kernel_arena is supported."));
vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags);
do {
addr = kmem_malloc_domain(domain, size, flags);
if (addr != 0)
break;
} while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0);
return (addr);
}
/*
* kmem_back:
*
* Allocate physical pages for the specified virtual address range.
*/
int
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr,
vm_size_t size, int flags)
{
vm_offset_t offset, i;
vm_page_t m, mpred;
int pflags;
KASSERT(object == kernel_object,
("kmem_back: only supports kernel object."));
("kmem_back_domain: only supports kernel object."));
offset = addr - VM_MIN_KERNEL_ADDRESS;
pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
@ -358,8 +437,8 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
retry:
mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
for (; i < size; i += PAGE_SIZE, mpred = m) {
m = vm_page_alloc_after(object, atop(offset + i), pflags,
mpred);
m = vm_page_alloc_domain_after(object, atop(offset + i),
domain, pflags, mpred);
/*
* Ran out of space, free everything up and return. Don't need
@ -373,6 +452,9 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
kmem_unback(object, addr, i);
return (KERN_NO_SPACE);
}
KASSERT(vm_phys_domidx(m) == domain,
("kmem_back_domain: Domain mismatch %d != %d",
vm_phys_domidx(m), domain));
if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
@ -386,6 +468,26 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
return (KERN_SUCCESS);
}
int
kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
{
struct vm_domainset_iter di;
int domain;
int ret;
KASSERT(object == kernel_object,
("kmem_back: only supports kernel object."));
vm_domainset_iter_malloc_init(&di, kernel_object, &domain, &flags);
do {
ret = kmem_back_domain(domain, object, addr, size, flags);
if (ret == KERN_SUCCESS)
break;
} while (vm_domainset_iter_malloc(&di, &domain, &flags) == 0);
return (ret);
}
/*
* kmem_unback:
*
@ -395,26 +497,39 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
* A physical page must exist within the specified object at each index
* that is being unmapped.
*/
void
kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
static int
_kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
{
vm_page_t m, next;
vm_offset_t end, offset;
int domain;
KASSERT(object == kernel_object,
("kmem_unback: only supports kernel object."));
if (size == 0)
return (0);
pmap_remove(kernel_pmap, addr, addr + size);
offset = addr - VM_MIN_KERNEL_ADDRESS;
end = offset + size;
VM_OBJECT_WLOCK(object);
for (m = vm_page_lookup(object, atop(offset)); offset < end;
offset += PAGE_SIZE, m = next) {
m = vm_page_lookup(object, atop(offset));
domain = vm_phys_domidx(m);
for (; offset < end; offset += PAGE_SIZE, m = next) {
next = vm_page_next(m);
vm_page_unwire(m, PQ_NONE);
vm_page_free(m);
}
VM_OBJECT_WUNLOCK(object);
return (domain);
}
void
kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
{
_kmem_unback(object, addr, size);
}
/*
@ -426,12 +541,13 @@ kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
void
kmem_free(struct vmem *vmem, vm_offset_t addr, vm_size_t size)
{
int domain;
KASSERT(vmem == kernel_arena,
("kmem_free: Only kernel_arena is supported."));
size = round_page(size);
kmem_unback(kernel_object, addr, size);
vmem_free(vmem, addr, size);
domain = _kmem_unback(kernel_object, addr, size);
vmem_free(vm_dom[domain].vmd_kernel_arena, addr, size);
}
/*

Some files were not shown because too many files have changed in this diff Show More