Merge ^/head r357855 through r357920.

This commit is contained in:
Dimitry Andric 2020-02-14 19:32:58 +00:00
commit 74dc6beb30
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang1000-import/; revision=357930
89 changed files with 1336 additions and 702 deletions

View File

@ -244,7 +244,7 @@ translator tcpsinfo_t < struct tcpcb *p > {
tcps_cwnd_ssthresh = p == NULL ? -1 : p->snd_ssthresh;
tcps_srecover = p == NULL ? -1 : p->snd_recover;
tcps_sack_fack = p == NULL ? 0 : p->snd_fack;
tcps_sack_snxt = p == NULL ? 0 : p->sack_newdata;
tcps_sack_snxt = p == NULL ? 0 : p->snd_recover;
tcps_rto = p == NULL ? -1 : (p->t_rxtcur * 1000) / `hz;
tcps_mss = p == NULL ? -1 : p->t_maxseg;
tcps_retransmit = p == NULL ? -1 : p->t_rxtshift > 0 ? 1 : 0;

View File

@ -36,6 +36,7 @@
#include <libdwarf.h>
#include <libelftc.h>
#include <libgen.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

View File

@ -73,7 +73,7 @@ USE="$3"
# determine if we are using gcc, and if so, what version because the proposed
# solution uses a nonstandard option.
PRG=`echo "$1" | $AWK '{ sub(/^[[:space:]]*/,""); sub(/[[:space:]].*$/, ""); print; }' || exit 0`
FSF=`"$PRG" --version 2>/dev/null || exit 0 | fgrep "Free Software Foundation" | head -n 1`
FSF=`("$PRG" --version 2>/dev/null || exit 0) | fgrep "Free Software Foundation" | head -n 1`
ALL=`"$PRG" -dumpversion 2>/dev/null || exit 0`
ONE=`echo "$ALL" | sed -e 's/\..*$//'`
if test -n "$FSF" && test -n "$ALL" && test -n "$ONE" ; then

View File

@ -71,7 +71,7 @@ _thr_cancel(pthread_t pthread)
* _thr_find_thread and THR_THREAD_UNLOCK will enter and leave critical
* region automatically.
*/
if ((ret = _thr_find_thread(curthread, pthread, 0)) == 0) {
if ((ret = _thr_find_thread(curthread, pthread, 1)) == 0) {
if (!pthread->cancel_pending) {
pthread->cancel_pending = 1;
if (pthread->state != PS_DEAD)

View File

@ -258,27 +258,52 @@ reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
return (0);
}
static void
reloc_iresolve_one(Obj_Entry *obj, const Elf_Rela *rela,
RtldLockState *lockstate)
{
Elf_Addr *where, target, *ptr;
ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(ptr);
wlock_acquire(rtld_bind_lock, lockstate);
*where = target;
}
int
reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
{
const Elf_Rela *relalim;
const Elf_Rela *rela;
Elf_Addr *where, target, *ptr;
if (!obj->irelative)
return (0);
relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE) {
ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(ptr);
wlock_acquire(rtld_bind_lock, lockstate);
*where = target;
}
}
obj->irelative = false;
relalim = (const Elf_Rela *)((const char *)obj->pltrela +
obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
reloc_iresolve_one(obj, rela, lockstate);
}
return (0);
}
int
reloc_iresolve_nonplt(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
{
const Elf_Rela *relalim;
const Elf_Rela *rela;
if (!obj->irelative_nonplt)
return (0);
obj->irelative_nonplt = false;
relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
for (rela = obj->rela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_AARCH64_IRELATIVE)
reloc_iresolve_one(obj, rela, lockstate);
}
return (0);
}
@ -498,6 +523,9 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
break;
case R_AARCH64_NONE:
break;
case R_AARCH64_IRELATIVE:
obj->irelative_nonplt = true;
break;
default:
rtld_printf("%s: Unhandled relocation %lu\n",
obj->path, ELF_R_TYPE(rela->r_info));

View File

@ -303,6 +303,10 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
case R_X86_64_RELATIVE:
*where = (Elf_Addr)(obj->relocbase + rela->r_addend);
break;
case R_X86_64_IRELATIVE:
obj->irelative_nonplt = true;
break;
/*
* missing:
* R_X86_64_GOTPCREL, R_X86_64_32, R_X86_64_32S, R_X86_64_16,
@ -410,34 +414,53 @@ reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
return (target);
}
static void
reloc_iresolve_one(Obj_Entry *obj, const Elf_Rela *rela,
RtldLockState *lockstate)
{
Elf_Addr *where, target, *ptr;
ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(ptr);
wlock_acquire(rtld_bind_lock, lockstate);
*where = target;
}
int
reloc_iresolve(Obj_Entry *obj, RtldLockState *lockstate)
{
const Elf_Rela *relalim;
const Elf_Rela *rela;
const Elf_Rela *relalim;
const Elf_Rela *rela;
if (!obj->irelative)
return (0);
relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
Elf_Addr *where, target, *ptr;
switch (ELF_R_TYPE(rela->r_info)) {
case R_X86_64_JMP_SLOT:
break;
case R_X86_64_IRELATIVE:
ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(ptr);
wlock_acquire(rtld_bind_lock, lockstate);
*where = target;
break;
if (!obj->irelative)
return (0);
obj->irelative = false;
relalim = (const Elf_Rela *)((const char *)obj->pltrela +
obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_X86_64_IRELATIVE)
reloc_iresolve_one(obj, rela, lockstate);
}
}
obj->irelative = false;
return (0);
return (0);
}
int
reloc_iresolve_nonplt(Obj_Entry *obj, RtldLockState *lockstate)
{
const Elf_Rela *relalim;
const Elf_Rela *rela;
if (!obj->irelative_nonplt)
return (0);
obj->irelative_nonplt = false;
relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
for (rela = obj->rela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) == R_X86_64_IRELATIVE)
reloc_iresolve_one(obj, rela, lockstate);
}
return (0);
}
int

View File

@ -451,6 +451,15 @@ reloc_iresolve(Obj_Entry *obj __unused,
return (0);
}
int
reloc_iresolve_nonplt(Obj_Entry *obj __unused,
struct Struct_RtldLockState *lockstate __unused)
{
/* XXX not implemented */
return (0);
}
int
reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
struct Struct_RtldLockState *lockstate __unused)

View File

@ -263,6 +263,9 @@ reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
case R_386_TLS_DTPOFF32:
*where += (Elf_Addr) def->st_value;
break;
case R_386_IRELATIVE:
obj->irelative_nonplt = true;
break;
default:
_rtld_error("%s: Unsupported relocation type %d"
" in non-PLT relocations\n", obj->path,
@ -365,29 +368,51 @@ reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
return (target);
}
static void
reloc_iresolve_one(Obj_Entry *obj, const Elf_Rel *rel,
RtldLockState *lockstate)
{
Elf_Addr *where, target;
where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(obj->relocbase + *where);
wlock_acquire(rtld_bind_lock, lockstate);
*where = target;
}
int
reloc_iresolve(Obj_Entry *obj, RtldLockState *lockstate)
{
const Elf_Rel *rellim;
const Elf_Rel *rel;
Elf_Addr *where, target;
const Elf_Rel *rellim;
const Elf_Rel *rel;
if (!obj->irelative)
return (0);
rellim = (const Elf_Rel *)((const char *)obj->pltrel + obj->pltrelsize);
for (rel = obj->pltrel; rel < rellim; rel++) {
switch (ELF_R_TYPE(rel->r_info)) {
case R_386_IRELATIVE:
where = (Elf_Addr *)(obj->relocbase + rel->r_offset);
lock_release(rtld_bind_lock, lockstate);
target = call_ifunc_resolver(obj->relocbase + *where);
wlock_acquire(rtld_bind_lock, lockstate);
*where = target;
break;
if (!obj->irelative)
return (0);
obj->irelative = false;
rellim = (const Elf_Rel *)((const char *)obj->pltrel + obj->pltrelsize);
for (rel = obj->pltrel; rel < rellim; rel++) {
if (ELF_R_TYPE(rel->r_info) == R_386_IRELATIVE)
reloc_iresolve_one(obj, rel, lockstate);
}
}
obj->irelative = false;
return (0);
return (0);
}
int
reloc_iresolve_nonplt(Obj_Entry *obj, RtldLockState *lockstate)
{
const Elf_Rel *rellim;
const Elf_Rel *rel;
if (!obj->irelative_nonplt)
return (0);
obj->irelative_nonplt = false;
rellim = (const Elf_Rel *)((const char *)obj->rel + obj->relsize);
for (rel = obj->rel; rel < rellim; rel++) {
if (ELF_R_TYPE(rel->r_info) == R_386_IRELATIVE)
reloc_iresolve_one(obj, rel, lockstate);
}
return (0);
}
int

View File

@ -722,6 +722,15 @@ reloc_iresolve(Obj_Entry *obj __unused,
return (0);
}
int
reloc_iresolve_nonplt(Obj_Entry *obj __unused,
struct Struct_RtldLockState *lockstate __unused)
{
/* XXX not implemented */
return (0);
}
int
reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
struct Struct_RtldLockState *lockstate __unused)

View File

@ -652,6 +652,13 @@ reloc_iresolve(Obj_Entry *obj,
return (0);
}
int
reloc_iresolve_nonplt(Obj_Entry *obj __unused,
struct Struct_RtldLockState *lockstate __unused)
{
return (0);
}
int
reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
struct Struct_RtldLockState *lockstate __unused)

View File

@ -652,6 +652,13 @@ reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
#endif
}
int
reloc_iresolve_nonplt(Obj_Entry *obj __unused,
struct Struct_RtldLockState *lockstate __unused)
{
return (0);
}
void
init_pltgot(Obj_Entry *obj)
{

View File

@ -211,6 +211,15 @@ reloc_iresolve(Obj_Entry *obj __unused,
return (0);
}
int
reloc_iresolve_nonplt(Obj_Entry *obj __unused,
struct Struct_RtldLockState *lockstate __unused)
{
/* XXX not implemented */
return (0);
}
int
reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
struct Struct_RtldLockState *lockstate __unused)

View File

@ -3034,10 +3034,13 @@ resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags,
if (obj->ifuncs_resolved)
return (0);
obj->ifuncs_resolved = true;
if (!obj->irelative && !((obj->bind_now || bind_now) && obj->gnu_ifunc))
if (!obj->irelative && !obj->irelative_nonplt &&
!((obj->bind_now || bind_now) && obj->gnu_ifunc))
return (0);
if (obj_disable_relro(obj) == -1 ||
(obj->irelative && reloc_iresolve(obj, lockstate) == -1) ||
(obj->irelative_nonplt && reloc_iresolve_nonplt(obj,
lockstate) == -1) ||
((obj->bind_now || bind_now) && obj->gnu_ifunc &&
reloc_gnu_ifunc(obj, flags, lockstate) == -1) ||
obj_enforce_relro(obj) == -1)

View File

@ -264,6 +264,7 @@ typedef struct Struct_Obj_Entry {
bool dag_inited : 1; /* Object has its DAG initialized. */
bool filtees_loaded : 1; /* Filtees loaded */
bool irelative : 1; /* Object has R_MACHDEP_IRELATIVE relocs */
bool irelative_nonplt : 1; /* Object has R_MACHDEP_IRELATIVE non-plt relocs */
bool gnu_ifunc : 1; /* Object has references to STT_GNU_IFUNC */
bool non_plt_gnu_ifunc : 1; /* Object has non-plt IFUNC references */
bool ifuncs_resolved : 1; /* Object ifuncs were already resolved */
@ -406,6 +407,7 @@ int reloc_non_plt(Obj_Entry *, Obj_Entry *, int flags,
int reloc_plt(Obj_Entry *, int flags, struct Struct_RtldLockState *);
int reloc_jmpslots(Obj_Entry *, int flags, struct Struct_RtldLockState *);
int reloc_iresolve(Obj_Entry *, struct Struct_RtldLockState *);
int reloc_iresolve_nonplt(Obj_Entry *, struct Struct_RtldLockState *);
int reloc_gnu_ifunc(Obj_Entry *, int flags, struct Struct_RtldLockState *);
void ifunc_init(Elf_Auxinfo[__min_size(AT_COUNT)]);
void pre_init(void);

View File

@ -569,6 +569,15 @@ reloc_iresolve(Obj_Entry *obj __unused,
return (0);
}
int
reloc_iresolve_nonplt(Obj_Entry *obj __unused,
struct Struct_RtldLockState *lockstate __unused)
{
/* XXX not implemented */
return (0);
}
int
reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
struct Struct_RtldLockState *lockstate __unused)

View File

@ -159,6 +159,7 @@ kevlo [label="Kevin Lo\nkevlo@FreeBSD.org\n2003/02/21"]
kmoore [label="Kris Moore\nkmoore@FreeBSD.org\n2009/04/14"]
knu [label="Akinori Musha\nknu@FreeBSD.org\n2000/03/22"]
koitsu [label="Jeremy Chadwick\nkoitsu@FreeBSD.org\n2006/11/10"]
koobs [label="Kubilay Kocak\nkoobs@FreeBSD.org\n2012/12/24"]
krion [label="Kirill Ponomarew\nkrion@FreeBSD.org\n2003/07/20"]
kwm [label="Koop Mast\nkwm@FreeBSD.org\n2004/09/14"]
laszlof [label="Frank Laszlo\nlaszlof@FreeBSD.org\n2006/11/07"]

View File

@ -1068,25 +1068,32 @@ flush_l1d_hw(void)
wrmsr(MSR_IA32_FLUSH_CMD, IA32_FLUSH_CMD_L1D);
}
static void __inline
amd64_syscall_ret_flush_l1d_inline(int error)
static void __noinline
amd64_syscall_ret_flush_l1d_check(int error)
{
void (*p)(void);
if (error != 0 && error != EEXIST && error != EAGAIN &&
error != EXDEV && error != ENOENT && error != ENOTCONN &&
error != EINPROGRESS) {
p = syscall_ret_l1d_flush;
if (error != EEXIST && error != EAGAIN && error != EXDEV &&
error != ENOENT && error != ENOTCONN && error != EINPROGRESS) {
p = (void *)atomic_load_ptr(&syscall_ret_l1d_flush);
if (p != NULL)
p();
}
}
static void __inline
amd64_syscall_ret_flush_l1d_check_inline(int error)
{
if (__predict_false(error != 0))
amd64_syscall_ret_flush_l1d_check(error);
}
void
amd64_syscall_ret_flush_l1d(int error)
{
amd64_syscall_ret_flush_l1d_inline(error);
amd64_syscall_ret_flush_l1d_check_inline(error);
}
void
@ -1190,5 +1197,5 @@ amd64_syscall(struct thread *td, int traced)
if (__predict_false(td->td_frame->tf_rip >= VM_MAXUSER_ADDRESS))
set_pcb_flags(td->td_pcb, PCB_FULL_IRET);
amd64_syscall_ret_flush_l1d_inline(td->td_errno);
amd64_syscall_ret_flush_l1d_check_inline(td->td_errno);
}

View File

@ -184,7 +184,7 @@ iommu_init(void)
if (vmm_is_intel())
ops = &iommu_ops_intel;
else if (vmm_is_amd())
else if (vmm_is_svm())
ops = &iommu_ops_amd;
else
ops = NULL;

View File

@ -347,7 +347,7 @@ vmm_init(void)
if (vmm_is_intel())
ops = &vmm_ops_intel;
else if (vmm_is_amd())
else if (vmm_is_svm())
ops = &vmm_ops_amd;
else
return (ENXIO);

View File

@ -67,7 +67,7 @@ vmm_stat_register(void *arg)
if (vst->scope == VMM_STAT_SCOPE_INTEL && !vmm_is_intel())
return;
if (vst->scope == VMM_STAT_SCOPE_AMD && !vmm_is_amd())
if (vst->scope == VMM_STAT_SCOPE_AMD && !vmm_is_svm())
return;
if (vst_num_elems + vst->nelems >= MAX_VMM_STAT_ELEMS) {

View File

@ -46,9 +46,10 @@ vmm_is_intel(void)
}
bool
vmm_is_amd(void)
vmm_is_svm(void)
{
return (strcmp(cpu_vendor, "AuthenticAMD") == 0);
return (strcmp(cpu_vendor, "AuthenticAMD") == 0 ||
strcmp(cpu_vendor, "HygonGenuine") == 0);
}
bool

View File

@ -34,7 +34,7 @@
struct trapframe;
bool vmm_is_intel(void);
bool vmm_is_amd(void);
bool vmm_is_svm(void);
bool vmm_supports_1G_pages(void);
void dump_trapframe(struct trapframe *tf);

View File

@ -135,7 +135,7 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id,
break;
case CPUID_8000_0008:
cpuid_count(*eax, *ecx, regs);
if (vmm_is_amd()) {
if (vmm_is_svm()) {
/*
* As on Intel (0000_0007:0, EDX), mask out
* unsupported or unsafe AMD extended features
@ -234,7 +234,7 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id,
case CPUID_8000_001D:
/* AMD Cache topology, like 0000_0004 for Intel. */
if (!vmm_is_amd())
if (!vmm_is_svm())
goto default_leaf;
/*
@ -276,8 +276,11 @@ x86_emulate_cpuid(struct vm *vm, int vcpu_id,
break;
case CPUID_8000_001E:
/* AMD Family 16h+ additional identifiers */
if (!vmm_is_amd() || CPUID_TO_FAMILY(cpu_id) < 0x16)
/*
* AMD Family 16h+ and Hygon Family 18h additional
* identifiers.
*/
if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16)
goto default_leaf;
vm_get_topology(vm, &sockets, &cores, &threads,

View File

@ -57,6 +57,7 @@ __FBSDID("$FreeBSD$");
#include <cam/cam_periph.h>
#include <cam/cam_debug.h>
#include <cam/cam_sim.h>
#include <cam/cam_xpt_internal.h> /* For KASSERTs only */
#include <cam/scsi/scsi_all.h>
#include <cam/scsi/scsi_message.h>
@ -681,6 +682,10 @@ camperiphfree(struct cam_periph *periph)
cam_periph_assert(periph, MA_OWNED);
KASSERT(periph->periph_allocating == 0, ("%s%d: freed while allocating",
periph->periph_name, periph->unit_number));
KASSERT(periph->path->device->ccbq.dev_active == 0,
("%s%d: freed with %d active CCBs\n",
periph->periph_name, periph->unit_number,
periph->path->device->ccbq.dev_active));
for (p_drv = periph_drivers; *p_drv != NULL; p_drv++) {
if (strcmp((*p_drv)->driver_name, periph->periph_name) == 0)
break;

View File

@ -248,7 +248,6 @@ static void xpt_run_allocq(struct cam_periph *periph, int sleep);
static void xpt_run_allocq_task(void *context, int pending);
static void xpt_run_devq(struct cam_devq *devq);
static callout_func_t xpt_release_devq_timeout;
static void xpt_release_simq_timeout(void *arg) __unused;
static void xpt_acquire_bus(struct cam_eb *bus);
static void xpt_release_bus(struct cam_eb *bus);
static uint32_t xpt_freeze_devq_device(struct cam_ed *dev, u_int count);
@ -4621,18 +4620,6 @@ xpt_release_simq(struct cam_sim *sim, int run_queue)
mtx_unlock(&devq->send_mtx);
}
/*
* XXX Appears to be unused.
*/
static void
xpt_release_simq_timeout(void *arg)
{
struct cam_sim *sim;
sim = (struct cam_sim *)arg;
xpt_release_simq(sim, /* run_queue */ TRUE);
}
void
xpt_done(union ccb *done_ccb)
{

View File

@ -90,6 +90,7 @@ static const struct {
{0x06221b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS|AHCI_Q_NOAUX},
{0x06241b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS|AHCI_Q_NOAUX},
{0x06251b21, 0x00, "ASMedia ASM106x", AHCI_Q_NOCCS|AHCI_Q_NOAUX},
{0x79011d94, 0x00, "Hygon KERNCZ", 0},
{0x26528086, 0x00, "Intel ICH6", AHCI_Q_NOFORCE},
{0x26538086, 0x00, "Intel ICH6M", AHCI_Q_NOFORCE},
{0x26818086, 0x00, "Intel ESB2", 0},

View File

@ -203,7 +203,8 @@ ecc_ei_load(void)
{
uint32_t val;
if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) {
if ((cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) &&
cpu_vendor_id != CPU_VENDOR_HYGON) {
printf("DRAM ECC error injection is not supported\n");
return (ENXIO);
}

View File

@ -67,6 +67,8 @@ static int amdpm_debug = 0;
#define AMDPM_DEVICEID_AMD768PM 0x7443
#define AMDPM_DEVICEID_AMD8111PM 0x746B
#define AMDPM_VENDORID_HYGON 0x1d94
/* nVidia nForce chipset */
#define AMDPM_VENDORID_NVIDIA 0x10de
#define AMDPM_DEVICEID_NF_SMB 0x01b4
@ -199,7 +201,8 @@ amdpm_attach(device_t dev)
pci_write_config(dev, AMDPCI_GEN_CONFIG_PM, val_b | AMDPCI_PMIOEN, 1);
/* Allocate I/O space */
if (pci_get_vendor(dev) == AMDPM_VENDORID_AMD)
if (pci_get_vendor(dev) == AMDPM_VENDORID_AMD ||
pci_get_vendor(dev) == AMDPM_VENDORID_HYGON)
amdpm_sc->rid = AMDPCI_PMBASE;
else
amdpm_sc->rid = NFPCI_PMBASE;

View File

@ -146,3 +146,5 @@
#define AMDCZ_SMBUS_DEVID 0x790b1022
#define AMDCZ49_SMBUS_REVID 0x49
#define HYGONCZ_SMBUS_DEVID 0x790b1d94

View File

@ -60,6 +60,8 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/resource.h>
#include <sys/watchdog.h>
@ -269,7 +271,8 @@ amdsbwd_identify(driver_t *driver, device_t parent)
return;
if (pci_get_devid(smb_dev) != AMDSB_SMBUS_DEVID &&
pci_get_devid(smb_dev) != AMDFCH_SMBUS_DEVID &&
pci_get_devid(smb_dev) != AMDCZ_SMBUS_DEVID)
pci_get_devid(smb_dev) != AMDCZ_SMBUS_DEVID &&
pci_get_devid(smb_dev) != HYGONCZ_SMBUS_DEVID)
return;
child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "amdsbwd", -1);
@ -378,6 +381,7 @@ static void
amdsbwd_probe_fch41(device_t dev, struct resource *pmres, uint32_t *addr)
{
uint8_t val;
char buf[36];
val = pmio_read(pmres, AMDFCH41_PM_ISA_CTRL);
if ((val & AMDFCH41_MMIO_EN) != 0) {
@ -416,7 +420,9 @@ amdsbwd_probe_fch41(device_t dev, struct resource *pmres, uint32_t *addr)
amdsbwd_verbose_printf(dev, "AMDFCH41_PM_DECODE_EN3 value = %#04x\n",
val);
#endif
device_set_desc(dev, "AMD FCH Rev 41h+ Watchdog Timer");
snprintf(buf, sizeof(buf), "%s FCH Rev 41h+ Watchdog Timer",
cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD");
device_set_desc_copy(dev, buf);
}
static int

View File

@ -574,7 +574,6 @@ struct sge_txq {
uint64_t txpkts0_pkts; /* # of frames in type0 coalesced tx WRs */
uint64_t txpkts1_pkts; /* # of frames in type1 coalesced tx WRs */
uint64_t raw_wrs; /* # of raw work requests (alloc_wr_mbuf) */
uint64_t tls_wrs; /* # of TLS work requests */
uint64_t kern_tls_records;
uint64_t kern_tls_short;

View File

@ -2082,7 +2082,6 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
ndesc += howmany(wr_len, EQ_ESIZE);
MPASS(ndesc <= available);
txq->tls_wrs++;
txq->kern_tls_records++;
txq->kern_tls_octets += tlen - mtod(m_tls, vm_offset_t);

View File

@ -10359,7 +10359,6 @@ clear_stats(struct adapter *sc, u_int port_id)
txq->txpkts0_pkts = 0;
txq->txpkts1_pkts = 0;
txq->raw_wrs = 0;
txq->tls_wrs = 0;
txq->kern_tls_records = 0;
txq->kern_tls_short = 0;
txq->kern_tls_partial = 0;

View File

@ -4204,8 +4204,6 @@ alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx,
"# of frames tx'd using type1 txpkts work requests");
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD,
&txq->raw_wrs, "# of raw work requests (non-packets)");
SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tls_wrs", CTLFLAG_RD,
&txq->tls_wrs, "# of TLS work requests (TLS records)");
#ifdef KERN_TLS
if (sc->flags & KERN_TLS_OK) {

View File

@ -188,6 +188,7 @@ hyperv_tsc_tcinit(void *dummy __unused)
switch (cpu_vendor_id) {
case CPU_VENDOR_AMD:
case CPU_VENDOR_HYGON:
hyperv_tsc_timecounter.tc_get_timecount =
hyperv_tsc_timecount_mfence;
tc64 = hyperv_tc64_tsc_mfence;

View File

@ -102,6 +102,7 @@ const struct intsmb_device {
{ AMDSB_SMBUS_DEVID, "AMD SB600/7xx/8xx/9xx SMBus Controller" },
{ AMDFCH_SMBUS_DEVID, "AMD FCH SMBus Controller" },
{ AMDCZ_SMBUS_DEVID, "AMD FCH SMBus Controller" },
{ HYGONCZ_SMBUS_DEVID, "Hygon FCH SMBus Controller" },
};
static int
@ -243,6 +244,7 @@ intsmb_attach(device_t dev)
break;
case AMDFCH_SMBUS_DEVID:
case AMDCZ_SMBUS_DEVID:
case HYGONCZ_SMBUS_DEVID:
sc->sb8xx = 1;
break;
}

View File

@ -339,10 +339,6 @@ mlx5e_tls_snd_tag_alloc(struct ifnet *ifp,
case CRYPTO_AES_NIST_GCM_16:
switch (en->cipher_key_len) {
case 128 / 8:
if (en->auth_algorithm != CRYPTO_AES_128_NIST_GMAC) {
error = EINVAL;
goto failure;
}
if (en->tls_vminor == TLS_MINOR_VER_TWO) {
if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_128) == 0) {
error = EPROTONOSUPPORT;
@ -360,10 +356,6 @@ mlx5e_tls_snd_tag_alloc(struct ifnet *ifp,
break;
case 256 / 8:
if (en->auth_algorithm != CRYPTO_AES_256_NIST_GMAC) {
error = EINVAL;
goto failure;
}
if (en->tls_vminor == TLS_MINOR_VER_TWO) {
if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_256) == 0) {
error = EPROTONOSUPPORT;

View File

@ -101,6 +101,16 @@ static const struct amd_ntb_hw_info amd_ntb_hw_info_list[] = {
.msix_vector_count = 24,
.quirks = 0,
.desc = "AMD Non-Transparent Bridge"},
{ .vendor_id = NTB_HW_HYGON_VENDOR_ID,
.device_id = NTB_HW_HYGON_DEVICE_ID1,
.mw_count = 3,
.bar_start_idx = 1,
.spad_count = 16,
.db_count = 16,
.msix_vector_count = 24,
.quirks = QUIRK_MW0_32BIT,
.desc = "Hygon Non-Transparent Bridge"},
};
static const struct pci_device_table amd_ntb_devs[] = {
@ -109,7 +119,10 @@ static const struct pci_device_table amd_ntb_devs[] = {
PCI_DESCR("AMD Non-Transparent Bridge") },
{ PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID2),
.driver_data = (uintptr_t)&amd_ntb_hw_info_list[1],
PCI_DESCR("AMD Non-Transparent Bridge") }
PCI_DESCR("AMD Non-Transparent Bridge") },
{ PCI_DEV(NTB_HW_HYGON_VENDOR_ID, NTB_HW_HYGON_DEVICE_ID1),
.driver_data = (uintptr_t)&amd_ntb_hw_info_list[0],
PCI_DESCR("Hygon Non-Transparent Bridge") }
};
static unsigned g_amd_ntb_hw_debug_level;

View File

@ -51,6 +51,9 @@
#define NTB_HW_AMD_DEVICE_ID1 0x145B
#define NTB_HW_AMD_DEVICE_ID2 0x148B
#define NTB_HW_HYGON_VENDOR_ID 0x19D4
#define NTB_HW_HYGON_DEVICE_ID1 0x145B
#define NTB_DEF_PEER_CNT 1
#define NTB_DEF_PEER_IDX 0

View File

@ -109,27 +109,21 @@ SYSCTL_INT(_hw_usb_ukbd, OID_AUTO, pollrate, CTLFLAG_RWTUN,
#define UKBD_EMULATE_ATSCANCODE 1
#define UKBD_DRIVER_NAME "ukbd"
#define UKBD_NMOD 8 /* units */
#define UKBD_NKEYCODE 6 /* units */
#define UKBD_IN_BUF_SIZE (2*(UKBD_NMOD + (2*UKBD_NKEYCODE))) /* bytes */
#define UKBD_IN_BUF_FULL ((UKBD_IN_BUF_SIZE / 2) - 1) /* bytes */
#define UKBD_NKEYCODE 256 /* units */
#define UKBD_IN_BUF_SIZE (4 * UKBD_NKEYCODE) /* scancodes */
#define UKBD_IN_BUF_FULL ((UKBD_IN_BUF_SIZE / 2) - 1) /* scancodes */
#define UKBD_NFKEY (sizeof(fkey_tab)/sizeof(fkey_tab[0])) /* units */
#define UKBD_BUFFER_SIZE 64 /* bytes */
#define UKBD_KEY_PRESSED(map, key) ({ \
CTASSERT((key) >= 0 && (key) < UKBD_NKEYCODE); \
((map)[(key) / 64] & (1ULL << ((key) % 64))); \
})
#define MOD_EJECT 0x01
#define MOD_FN 0x02
struct ukbd_data {
uint16_t modifiers;
#define MOD_CONTROL_L 0x01
#define MOD_CONTROL_R 0x10
#define MOD_SHIFT_L 0x02
#define MOD_SHIFT_R 0x20
#define MOD_ALT_L 0x04
#define MOD_ALT_R 0x40
#define MOD_WIN_L 0x08
#define MOD_WIN_R 0x80
/* internal */
#define MOD_EJECT 0x0100
#define MOD_FN 0x0200
uint8_t keycode[UKBD_NKEYCODE];
uint64_t bitmap[howmany(UKBD_NKEYCODE, 64)];
};
enum {
@ -144,17 +138,10 @@ struct ukbd_softc {
keymap_t sc_keymap;
accentmap_t sc_accmap;
fkeytab_t sc_fkeymap[UKBD_NFKEY];
uint64_t sc_loc_key_valid[howmany(UKBD_NKEYCODE, 64)];
struct hid_location sc_loc_apple_eject;
struct hid_location sc_loc_apple_fn;
struct hid_location sc_loc_ctrl_l;
struct hid_location sc_loc_ctrl_r;
struct hid_location sc_loc_shift_l;
struct hid_location sc_loc_shift_r;
struct hid_location sc_loc_alt_l;
struct hid_location sc_loc_alt_r;
struct hid_location sc_loc_win_l;
struct hid_location sc_loc_win_r;
struct hid_location sc_loc_events;
struct hid_location sc_loc_key[UKBD_NKEYCODE];
struct hid_location sc_loc_numlock;
struct hid_location sc_loc_capslock;
struct hid_location sc_loc_scrolllock;
@ -172,8 +159,7 @@ struct ukbd_softc {
sbintime_t sc_co_basetime;
int sc_delay;
uint32_t sc_ntime[UKBD_NKEYCODE];
uint32_t sc_otime[UKBD_NKEYCODE];
uint32_t sc_repeat_time;
uint32_t sc_input[UKBD_IN_BUF_SIZE]; /* input buffer */
uint32_t sc_time_ms;
uint32_t sc_composed_char; /* composed char code, if non-zero */
@ -191,15 +177,6 @@ struct ukbd_softc {
#define UKBD_FLAG_APPLE_EJECT 0x00000040
#define UKBD_FLAG_APPLE_FN 0x00000080
#define UKBD_FLAG_APPLE_SWAP 0x00000100
#define UKBD_FLAG_CTRL_L 0x00000400
#define UKBD_FLAG_CTRL_R 0x00000800
#define UKBD_FLAG_SHIFT_L 0x00001000
#define UKBD_FLAG_SHIFT_R 0x00002000
#define UKBD_FLAG_ALT_L 0x00004000
#define UKBD_FLAG_ALT_R 0x00008000
#define UKBD_FLAG_WIN_L 0x00010000
#define UKBD_FLAG_WIN_R 0x00020000
#define UKBD_FLAG_EVENTS 0x00040000
#define UKBD_FLAG_NUMLOCK 0x00080000
#define UKBD_FLAG_CAPSLOCK 0x00100000
#define UKBD_FLAG_SCROLLLOCK 0x00200000
@ -214,31 +191,23 @@ struct ukbd_softc {
uint16_t sc_inputs;
uint16_t sc_inputhead;
uint16_t sc_inputtail;
uint16_t sc_modifiers;
uint8_t sc_leds; /* store for async led requests */
uint8_t sc_iface_index;
uint8_t sc_iface_no;
uint8_t sc_id_apple_eject;
uint8_t sc_id_apple_fn;
uint8_t sc_id_ctrl_l;
uint8_t sc_id_ctrl_r;
uint8_t sc_id_shift_l;
uint8_t sc_id_shift_r;
uint8_t sc_id_alt_l;
uint8_t sc_id_alt_r;
uint8_t sc_id_win_l;
uint8_t sc_id_win_r;
uint8_t sc_id_event;
uint8_t sc_id_loc_key[UKBD_NKEYCODE];
uint8_t sc_id_numlock;
uint8_t sc_id_capslock;
uint8_t sc_id_scrolllock;
uint8_t sc_id_events;
uint8_t sc_kbd_id;
uint8_t sc_repeat_key;
uint8_t sc_buffer[UKBD_BUFFER_SIZE];
};
#define KEY_NONE 0x00
#define KEY_ERROR 0x01
#define KEY_PRESS 0
@ -259,21 +228,6 @@ struct ukbd_softc {
#define UKBD_UNLOCK() USB_MTX_UNLOCK(&Giant)
#define UKBD_LOCK_ASSERT() USB_MTX_ASSERT(&Giant, MA_OWNED)
struct ukbd_mods {
uint32_t mask, key;
};
static const struct ukbd_mods ukbd_mods[UKBD_NMOD] = {
{MOD_CONTROL_L, 0xe0},
{MOD_CONTROL_R, 0xe4},
{MOD_SHIFT_L, 0xe1},
{MOD_SHIFT_R, 0xe5},
{MOD_ALT_L, 0xe2},
{MOD_ALT_R, 0xe6},
{MOD_WIN_L, 0xe3},
{MOD_WIN_R, 0xe7},
};
#define NN 0 /* no translation */
/*
* Translate USB keycodes to AT keyboard scancodes.
@ -347,8 +301,8 @@ static void ukbd_timeout(void *);
static void ukbd_set_leds(struct ukbd_softc *, uint8_t);
static int ukbd_set_typematic(keyboard_t *, int);
#ifdef UKBD_EMULATE_ATSCANCODE
static uint32_t ukbd_atkeycode(int, int);
static int ukbd_key2scan(struct ukbd_softc *, int, int, int);
static uint32_t ukbd_atkeycode(int, const uint64_t *);
static int ukbd_key2scan(struct ukbd_softc *, int, const uint64_t *, int);
#endif
static uint32_t ukbd_read_char(keyboard_t *, int);
static void ukbd_clear_state(keyboard_t *);
@ -371,16 +325,33 @@ static const struct evdev_methods ukbd_evdev_methods = {
};
#endif
static uint8_t
static bool
ukbd_any_key_pressed(struct ukbd_softc *sc)
{
uint8_t i;
uint8_t j;
bool ret = false;
unsigned i;
for (j = i = 0; i < UKBD_NKEYCODE; i++)
j |= sc->sc_odata.keycode[i];
for (i = 0; i != howmany(UKBD_NKEYCODE, 64); i++)
ret |= (sc->sc_odata.bitmap[i] != 0);
return (ret);
}
return (j ? 1 : 0);
static bool
ukbd_any_key_valid(struct ukbd_softc *sc)
{
bool ret = false;
unsigned i;
for (i = 0; i != howmany(UKBD_NKEYCODE, 64); i++)
ret |= (sc->sc_loc_key_valid[i] != 0);
return (ret);
}
static bool
ukbd_is_modifier_key(uint32_t key)
{
return (key >= 0xe0 && key <= 0xe7);
}
static void
@ -522,99 +493,66 @@ ukbd_get_key(struct ukbd_softc *sc, uint8_t wait)
static void
ukbd_interrupt(struct ukbd_softc *sc)
{
uint32_t n_mod;
uint32_t o_mod;
uint32_t now = sc->sc_time_ms;
int32_t dtime;
uint8_t key;
uint8_t i;
uint8_t j;
const uint32_t now = sc->sc_time_ms;
unsigned key;
UKBD_LOCK_ASSERT();
if (sc->sc_ndata.keycode[0] == KEY_ERROR)
return;
/* Check for key changes */
for (key = 0; key != UKBD_NKEYCODE; key++) {
const uint64_t mask = 1ULL << (key % 64);
const uint64_t delta =
sc->sc_odata.bitmap[key / 64] ^
sc->sc_ndata.bitmap[key / 64];
n_mod = sc->sc_ndata.modifiers;
o_mod = sc->sc_odata.modifiers;
if (n_mod != o_mod) {
for (i = 0; i < UKBD_NMOD; i++) {
if ((n_mod & ukbd_mods[i].mask) !=
(o_mod & ukbd_mods[i].mask)) {
ukbd_put_key(sc, ukbd_mods[i].key |
((n_mod & ukbd_mods[i].mask) ?
KEY_PRESS : KEY_RELEASE));
}
}
}
/* Check for released keys. */
for (i = 0; i < UKBD_NKEYCODE; i++) {
key = sc->sc_odata.keycode[i];
if (key == 0) {
continue;
}
for (j = 0; j < UKBD_NKEYCODE; j++) {
if (sc->sc_ndata.keycode[j] == 0) {
continue;
}
if (key == sc->sc_ndata.keycode[j]) {
goto rfound;
}
}
ukbd_put_key(sc, key | KEY_RELEASE);
rfound: ;
}
if (mask == 1 && delta == 0) {
key += 63;
continue; /* skip empty areas */
} else if (delta & mask) {
if (sc->sc_odata.bitmap[key / 64] & mask) {
ukbd_put_key(sc, key | KEY_RELEASE);
/* Check for pressed keys. */
for (i = 0; i < UKBD_NKEYCODE; i++) {
key = sc->sc_ndata.keycode[i];
if (key == 0) {
continue;
}
sc->sc_ntime[i] = now + sc->sc_kbd.kb_delay1;
for (j = 0; j < UKBD_NKEYCODE; j++) {
if (sc->sc_odata.keycode[j] == 0) {
continue;
}
if (key == sc->sc_odata.keycode[j]) {
/* clear repeating key, if any */
if (sc->sc_repeat_key == key)
sc->sc_repeat_key = 0;
} else {
ukbd_put_key(sc, key | KEY_PRESS);
/* key is still pressed */
if (ukbd_is_modifier_key(key))
continue;
sc->sc_ntime[i] = sc->sc_otime[j];
dtime = (sc->sc_otime[j] - now);
if (dtime > 0) {
/* time has not elapsed */
goto pfound;
/*
* Check for first new key and set
* initial delay and [re]start timer:
*/
if (sc->sc_repeat_key == 0) {
sc->sc_co_basetime = sbinuptime();
sc->sc_delay = sc->sc_kbd.kb_delay1;
ukbd_start_timer(sc);
}
sc->sc_ntime[i] = now + sc->sc_kbd.kb_delay2;
break;
/* set repeat time for last key */
sc->sc_repeat_time = now + sc->sc_kbd.kb_delay1;
sc->sc_repeat_key = key;
}
}
if (j == UKBD_NKEYCODE) {
/* New key - set initial delay and [re]start timer */
sc->sc_co_basetime = sbinuptime();
sc->sc_delay = sc->sc_kbd.kb_delay1;
ukbd_start_timer(sc);
}
ukbd_put_key(sc, key | KEY_PRESS);
/*
* If any other key is presently down, force its repeat to be
* well in the future (100s). This makes the last key to be
* pressed do the autorepeat.
*/
for (j = 0; j != UKBD_NKEYCODE; j++) {
if (j != i)
sc->sc_ntime[j] = now + (100 * 1000);
}
pfound: ;
}
/* synchronize old data with new data */
sc->sc_odata = sc->sc_ndata;
/* check if last key is still pressed */
if (sc->sc_repeat_key != 0) {
const int32_t dtime = (sc->sc_repeat_time - now);
memcpy(sc->sc_otime, sc->sc_ntime, sizeof(sc->sc_otime));
/* check if time has elapsed */
if (dtime <= 0) {
ukbd_put_key(sc, sc->sc_repeat_key | KEY_PRESS);
sc->sc_repeat_time = now + sc->sc_kbd.kb_delay2;
}
}
/* wakeup keyboard system */
ukbd_event_keyinput(sc);
}
@ -664,8 +602,9 @@ ukbd_timeout(void *arg)
}
}
static uint8_t
ukbd_apple_fn(uint8_t keycode) {
static uint32_t
ukbd_apple_fn(uint32_t keycode)
{
switch (keycode) {
case 0x28: return 0x49; /* RETURN -> INSERT */
case 0x2a: return 0x4c; /* BACKSPACE -> DEL */
@ -677,8 +616,9 @@ ukbd_apple_fn(uint8_t keycode) {
}
}
static uint8_t
ukbd_apple_swap(uint8_t keycode) {
static uint32_t
ukbd_apple_swap(uint32_t keycode)
{
switch (keycode) {
case 0x35: return 0x64;
case 0x64: return 0x35;
@ -691,9 +631,10 @@ ukbd_intr_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct ukbd_softc *sc = usbd_xfer_softc(xfer);
struct usb_page_cache *pc;
uint8_t i;
uint8_t offset;
uint32_t i;
uint8_t id;
uint8_t modifiers;
int offset;
int len;
UKBD_LOCK_ASSERT();
@ -733,117 +674,72 @@ ukbd_intr_callback(struct usb_xfer *xfer, usb_error_t error)
/* clear temporary storage */
memset(&sc->sc_ndata, 0, sizeof(sc->sc_ndata));
/* clear modifiers */
modifiers = 0;
/* scan through HID data */
if ((sc->sc_flags & UKBD_FLAG_APPLE_EJECT) &&
(id == sc->sc_id_apple_eject)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_eject))
sc->sc_modifiers |= MOD_EJECT;
else
sc->sc_modifiers &= ~MOD_EJECT;
modifiers |= MOD_EJECT;
}
if ((sc->sc_flags & UKBD_FLAG_APPLE_FN) &&
(id == sc->sc_id_apple_fn)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_apple_fn))
sc->sc_modifiers |= MOD_FN;
else
sc->sc_modifiers &= ~MOD_FN;
}
if ((sc->sc_flags & UKBD_FLAG_CTRL_L) &&
(id == sc->sc_id_ctrl_l)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_ctrl_l))
sc-> sc_modifiers |= MOD_CONTROL_L;
else
sc-> sc_modifiers &= ~MOD_CONTROL_L;
}
if ((sc->sc_flags & UKBD_FLAG_CTRL_R) &&
(id == sc->sc_id_ctrl_r)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_ctrl_r))
sc->sc_modifiers |= MOD_CONTROL_R;
else
sc->sc_modifiers &= ~MOD_CONTROL_R;
}
if ((sc->sc_flags & UKBD_FLAG_SHIFT_L) &&
(id == sc->sc_id_shift_l)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_shift_l))
sc->sc_modifiers |= MOD_SHIFT_L;
else
sc->sc_modifiers &= ~MOD_SHIFT_L;
}
if ((sc->sc_flags & UKBD_FLAG_SHIFT_R) &&
(id == sc->sc_id_shift_r)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_shift_r))
sc->sc_modifiers |= MOD_SHIFT_R;
else
sc->sc_modifiers &= ~MOD_SHIFT_R;
}
if ((sc->sc_flags & UKBD_FLAG_ALT_L) &&
(id == sc->sc_id_alt_l)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_alt_l))
sc->sc_modifiers |= MOD_ALT_L;
else
sc->sc_modifiers &= ~MOD_ALT_L;
}
if ((sc->sc_flags & UKBD_FLAG_ALT_R) &&
(id == sc->sc_id_alt_r)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_alt_r))
sc->sc_modifiers |= MOD_ALT_R;
else
sc->sc_modifiers &= ~MOD_ALT_R;
}
if ((sc->sc_flags & UKBD_FLAG_WIN_L) &&
(id == sc->sc_id_win_l)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_win_l))
sc->sc_modifiers |= MOD_WIN_L;
else
sc->sc_modifiers &= ~MOD_WIN_L;
}
if ((sc->sc_flags & UKBD_FLAG_WIN_R) &&
(id == sc->sc_id_win_r)) {
if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_win_r))
sc->sc_modifiers |= MOD_WIN_R;
else
sc->sc_modifiers &= ~MOD_WIN_R;
modifiers |= MOD_FN;
}
sc->sc_ndata.modifiers = sc->sc_modifiers;
for (i = 0; i != UKBD_NKEYCODE; i++) {
const uint64_t valid = sc->sc_loc_key_valid[i / 64];
const uint64_t mask = 1ULL << (i % 64);
if ((sc->sc_flags & UKBD_FLAG_EVENTS) &&
(id == sc->sc_id_events)) {
i = sc->sc_loc_events.count;
if (i > UKBD_NKEYCODE)
i = UKBD_NKEYCODE;
if (i > len)
i = len;
while (i--) {
sc->sc_ndata.keycode[i] =
hid_get_data(sc->sc_buffer + i, len - i,
&sc->sc_loc_events);
if (mask == 1 && valid == 0) {
i += 63;
continue; /* skip empty areas */
} else if (~valid & mask) {
continue; /* location is not valid */
} else if (id != sc->sc_id_loc_key[i]) {
continue; /* invalid HID ID */
} else if (i == 0) {
offset = sc->sc_loc_key[0].count;
if (offset < 0 || offset > len)
offset = len;
while (offset--) {
uint32_t key =
hid_get_data(sc->sc_buffer + offset, len - offset,
&sc->sc_loc_key[i]);
if (modifiers & MOD_FN)
key = ukbd_apple_fn(key);
if (sc->sc_flags & UKBD_FLAG_APPLE_SWAP)
key = ukbd_apple_swap(key);
if (key == KEY_NONE || key == KEY_ERROR || key >= UKBD_NKEYCODE)
continue;
/* set key in bitmap */
sc->sc_ndata.bitmap[key / 64] |= 1ULL << (key % 64);
}
} else if (hid_get_data(sc->sc_buffer, len, &sc->sc_loc_key[i])) {
uint32_t key = i;
if (modifiers & MOD_FN)
key = ukbd_apple_fn(key);
if (sc->sc_flags & UKBD_FLAG_APPLE_SWAP)
key = ukbd_apple_swap(key);
if (key == KEY_NONE || key == KEY_ERROR || key >= UKBD_NKEYCODE)
continue;
/* set key in bitmap */
sc->sc_ndata.bitmap[key / 64] |= 1ULL << (key % 64);
}
}
#ifdef USB_DEBUG
DPRINTF("modifiers = 0x%04x\n", (int)sc->sc_modifiers);
for (i = 0; i < UKBD_NKEYCODE; i++) {
if (sc->sc_ndata.keycode[i]) {
DPRINTF("[%d] = 0x%02x\n",
(int)i, (int)sc->sc_ndata.keycode[i]);
}
DPRINTF("modifiers = 0x%04x\n", modifiers);
for (i = 0; i != UKBD_NKEYCODE; i++) {
const uint64_t valid = sc->sc_ndata.bitmap[i / 64];
const uint64_t mask = 1ULL << (i % 64);
if (valid & mask)
DPRINTF("Key 0x%02x pressed\n", i);
}
#endif
if (sc->sc_modifiers & MOD_FN) {
for (i = 0; i < UKBD_NKEYCODE; i++) {
sc->sc_ndata.keycode[i] =
ukbd_apple_fn(sc->sc_ndata.keycode[i]);
}
}
if (sc->sc_flags & UKBD_FLAG_APPLE_SWAP) {
for (i = 0; i < UKBD_NKEYCODE; i++) {
sc->sc_ndata.keycode[i] =
ukbd_apple_swap(sc->sc_ndata.keycode[i]);
}
}
ukbd_interrupt(sc);
case USB_ST_SETUP:
@ -1072,10 +968,14 @@ static void
ukbd_parse_hid(struct ukbd_softc *sc, const uint8_t *ptr, uint32_t len)
{
uint32_t flags;
uint32_t key;
/* reset detected bits */
sc->sc_flags &= ~UKBD_FLAG_HID_MASK;
/* reset detected keys */
memset(sc->sc_loc_key_valid, 0, sizeof(sc->sc_loc_key_valid));
/* check if there is an ID byte */
sc->sc_kbd_size = hid_report_size(ptr, len,
hid_input, &sc->sc_kbd_id);
@ -1098,84 +998,34 @@ ukbd_parse_hid(struct ukbd_softc *sc, const uint8_t *ptr, uint32_t len)
sc->sc_flags |= UKBD_FLAG_APPLE_FN;
DPRINTFN(1, "Found Apple FN-key\n");
}
/* figure out some keys */
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0xE0),
hid_input, 0, &sc->sc_loc_ctrl_l, &flags,
&sc->sc_id_ctrl_l)) {
if (flags & HIO_VARIABLE)
sc->sc_flags |= UKBD_FLAG_CTRL_L;
DPRINTFN(1, "Found left control\n");
}
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0xE4),
hid_input, 0, &sc->sc_loc_ctrl_r, &flags,
&sc->sc_id_ctrl_r)) {
if (flags & HIO_VARIABLE)
sc->sc_flags |= UKBD_FLAG_CTRL_R;
DPRINTFN(1, "Found right control\n");
}
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0xE1),
hid_input, 0, &sc->sc_loc_shift_l, &flags,
&sc->sc_id_shift_l)) {
if (flags & HIO_VARIABLE)
sc->sc_flags |= UKBD_FLAG_SHIFT_L;
DPRINTFN(1, "Found left shift\n");
}
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0xE5),
hid_input, 0, &sc->sc_loc_shift_r, &flags,
&sc->sc_id_shift_r)) {
if (flags & HIO_VARIABLE)
sc->sc_flags |= UKBD_FLAG_SHIFT_R;
DPRINTFN(1, "Found right shift\n");
}
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0xE2),
hid_input, 0, &sc->sc_loc_alt_l, &flags,
&sc->sc_id_alt_l)) {
if (flags & HIO_VARIABLE)
sc->sc_flags |= UKBD_FLAG_ALT_L;
DPRINTFN(1, "Found left alt\n");
}
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0xE6),
hid_input, 0, &sc->sc_loc_alt_r, &flags,
&sc->sc_id_alt_r)) {
if (flags & HIO_VARIABLE)
sc->sc_flags |= UKBD_FLAG_ALT_R;
DPRINTFN(1, "Found right alt\n");
}
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0xE3),
hid_input, 0, &sc->sc_loc_win_l, &flags,
&sc->sc_id_win_l)) {
if (flags & HIO_VARIABLE)
sc->sc_flags |= UKBD_FLAG_WIN_L;
DPRINTFN(1, "Found left GUI\n");
}
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0xE7),
hid_input, 0, &sc->sc_loc_win_r, &flags,
&sc->sc_id_win_r)) {
if (flags & HIO_VARIABLE)
sc->sc_flags |= UKBD_FLAG_WIN_R;
DPRINTFN(1, "Found right GUI\n");
}
/* figure out event buffer */
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, 0x00),
hid_input, 0, &sc->sc_loc_events, &flags,
&sc->sc_id_events)) {
hid_input, 0, &sc->sc_loc_key[0], &flags,
&sc->sc_id_loc_key[0])) {
if (flags & HIO_VARIABLE) {
DPRINTFN(1, "Ignoring keyboard event control\n");
} else {
sc->sc_flags |= UKBD_FLAG_EVENTS;
sc->sc_loc_key_valid[0] |= 1;
DPRINTFN(1, "Found keyboard event array\n");
}
}
/* figure out the keys */
for (key = 1; key != UKBD_NKEYCODE; key++) {
if (hid_locate(ptr, len,
HID_USAGE2(HUP_KEYBOARD, key),
hid_input, 0, &sc->sc_loc_key[key], &flags,
&sc->sc_id_loc_key[key])) {
if (flags & HIO_VARIABLE) {
sc->sc_loc_key_valid[key / 64] |=
1ULL << (key % 64);
DPRINTFN(1, "Found key 0x%02x\n", key);
}
}
}
/* figure out leds on keyboard */
sc->sc_led_size = hid_report_size(ptr, len,
hid_output, NULL);
@ -1301,7 +1151,7 @@ ukbd_attach(device_t dev)
/* check if we should use the boot protocol */
if (usb_test_quirk(uaa, UQ_KBD_BOOTPROTO) ||
(err != 0) || (!(sc->sc_flags & UKBD_FLAG_EVENTS))) {
(err != 0) || ukbd_any_key_valid(sc) == false) {
DPRINTF("Forcing boot protocol\n");
@ -1660,11 +1510,11 @@ ukbd_read(keyboard_t *kbd, int wait)
++(kbd->kb_count);
#ifdef UKBD_EMULATE_ATSCANCODE
keycode = ukbd_atkeycode(usbcode, sc->sc_ndata.modifiers);
keycode = ukbd_atkeycode(usbcode, sc->sc_ndata.bitmap);
if (keycode == NN) {
return -1;
}
return (ukbd_key2scan(sc, keycode, sc->sc_ndata.modifiers,
return (ukbd_key2scan(sc, keycode, sc->sc_ndata.bitmap,
(usbcode & KEY_RELEASE)));
#else /* !UKBD_EMULATE_ATSCANCODE */
return (usbcode);
@ -1731,13 +1581,13 @@ ukbd_read_char_locked(keyboard_t *kbd, int wait)
#ifdef UKBD_EMULATE_ATSCANCODE
/* USB key index -> key code -> AT scan code */
keycode = ukbd_atkeycode(usbcode, sc->sc_ndata.modifiers);
keycode = ukbd_atkeycode(usbcode, sc->sc_ndata.bitmap);
if (keycode == NN) {
return (NOKEY);
}
/* return an AT scan code for the K_RAW mode */
if (sc->sc_mode == K_RAW) {
return (ukbd_key2scan(sc, keycode, sc->sc_ndata.modifiers,
return (ukbd_key2scan(sc, keycode, sc->sc_ndata.bitmap,
(usbcode & KEY_RELEASE)));
}
#else /* !UKBD_EMULATE_ATSCANCODE */
@ -2061,8 +1911,8 @@ ukbd_clear_state(keyboard_t *kbd)
#endif
memset(&sc->sc_ndata, 0, sizeof(sc->sc_ndata));
memset(&sc->sc_odata, 0, sizeof(sc->sc_odata));
memset(&sc->sc_ntime, 0, sizeof(sc->sc_ntime));
memset(&sc->sc_otime, 0, sizeof(sc->sc_otime));
sc->sc_repeat_time = 0;
sc->sc_repeat_key = 0;
}
/* save the internal state, not used */
@ -2149,11 +1999,12 @@ ukbd_set_typematic(keyboard_t *kbd, int code)
#ifdef UKBD_EMULATE_ATSCANCODE
static uint32_t
ukbd_atkeycode(int usbcode, int shift)
ukbd_atkeycode(int usbcode, const uint64_t *bitmap)
{
uint32_t keycode;
keycode = ukbd_trtab[KEY_INDEX(usbcode)];
/*
* Translate Alt-PrintScreen to SysRq.
*
@ -2168,13 +2019,14 @@ ukbd_atkeycode(int usbcode, int shift)
* is routine.
*/
if ((keycode == 0x5c || keycode == 0x7e) &&
shift & (MOD_ALT_L | MOD_ALT_R))
(UKBD_KEY_PRESSED(bitmap, 0xe2 /* ALT-L */) ||
UKBD_KEY_PRESSED(bitmap, 0xe6 /* ALT-R */)))
return (0x54);
return (keycode);
}
static int
ukbd_key2scan(struct ukbd_softc *sc, int code, int shift, int up)
ukbd_key2scan(struct ukbd_softc *sc, int code, const uint64_t *bitmap, int up)
{
static const int scan[] = {
/* 89 */
@ -2234,12 +2086,17 @@ ukbd_key2scan(struct ukbd_softc *sc, int code, int shift, int up)
code = scan[code - 89];
}
/* PrintScreen */
if (code == 0x137 && (!(shift & (MOD_CONTROL_L | MOD_CONTROL_R |
MOD_SHIFT_L | MOD_SHIFT_R)))) {
if (code == 0x137 && (!(
UKBD_KEY_PRESSED(bitmap, 0xe0 /* CTRL-L */) ||
UKBD_KEY_PRESSED(bitmap, 0xe4 /* CTRL-R */) ||
UKBD_KEY_PRESSED(bitmap, 0xe1 /* SHIFT-L */) ||
UKBD_KEY_PRESSED(bitmap, 0xe5 /* SHIFT-R */)))) {
code |= SCAN_PREFIX_SHIFT;
}
/* Pause/Break */
if ((code == 0x146) && (!(shift & (MOD_CONTROL_L | MOD_CONTROL_R)))) {
if ((code == 0x146) && (!(
UKBD_KEY_PRESSED(bitmap, 0xe0 /* CTRL-L */) ||
UKBD_KEY_PRESSED(bitmap, 0xe4 /* CTRL-R */)))) {
code = (0x45 | SCAN_PREFIX_E1 | SCAN_PREFIX_CTL);
}
code |= (up ? SCAN_RELEASE : SCAN_PRESS);

View File

@ -1978,7 +1978,8 @@ finstall(struct thread *td, struct file *fp, int *fd, int flags,
if (!fhold(fp))
return (EBADF);
FILEDESC_XLOCK(fdp);
if ((error = fdalloc(td, 0, fd))) {
error = fdalloc(td, 0, fd);
if (__predict_false(error != 0)) {
FILEDESC_XUNLOCK(fdp);
fdrop(fp, td);
return (error);
@ -2898,7 +2899,7 @@ fget(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp)
}
int
fget_mmap(struct thread *td, int fd, cap_rights_t *rightsp, u_char *maxprotp,
fget_mmap(struct thread *td, int fd, cap_rights_t *rightsp, vm_prot_t *maxprotp,
struct file **fpp)
{
int error;

View File

@ -2820,16 +2820,6 @@ prison_ischild(struct prison *pr1, struct prison *pr2)
return (0);
}
/*
* Return 1 if the passed credential is in a jail, otherwise 0.
*/
int
jailed(struct ucred *cred)
{
return (cred->cr_prison != &prison0);
}
/*
* Return 1 if the passed credential is in a jail and that jail does not
* have its own virtual network stack, otherwise 0.
@ -3008,6 +2998,16 @@ int
prison_priv_check(struct ucred *cred, int priv)
{
/*
* Some policies have custom handlers. This routine should not be
* called for them. See priv_check_cred().
*/
switch (priv) {
case PRIV_VFS_GENERATION:
KASSERT(0, ("prison_priv_check instead of a custom handler "
"called for %d\n", priv));
}
if (!jailed(cred))
return (0);

View File

@ -934,7 +934,7 @@ lockmgr_upgrade(struct lock *lk, u_int flags, struct lock_object *ilk,
}
int
lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk,
lockmgr_lock_flags(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *file, int line)
{
struct lock_class *class;
@ -1114,46 +1114,6 @@ lockmgr_xunlock_hard(struct lock *lk, uintptr_t x, u_int flags, struct lock_obje
return (0);
}
int
lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
{
struct lock_class *class;
uintptr_t x, tid;
const char *file;
int line;
if (KERNEL_PANICKED())
return (0);
file = __FILE__;
line = __LINE__;
_lockmgr_assert(lk, KA_LOCKED, file, line);
x = lk->lk_lock;
if (__predict_true(x & LK_SHARE) != 0) {
lockmgr_note_shared_release(lk, file, line);
if (lockmgr_sunlock_try(lk, &x)) {
LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_READER);
} else {
return (lockmgr_sunlock_hard(lk, x, flags, ilk, file, line));
}
} else {
tid = (uintptr_t)curthread;
lockmgr_note_exclusive_release(lk, file, line);
if (!lockmgr_recursed(lk) &&
atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) {
LOCKSTAT_PROFILE_RELEASE_RWLOCK(lockmgr__release, lk, LOCKSTAT_WRITER);
} else {
return (lockmgr_xunlock_hard(lk, x, flags, ilk, file, line));
}
}
if (__predict_false(flags & LK_INTERLOCK)) {
class = LOCK_CLASS(ilk);
class->lc_unlock(ilk);
}
return (0);
}
/*
* Lightweight entry points for common operations.
*
@ -1163,7 +1123,7 @@ lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk)
* 2. returning with an error after sleep
* 3. unlocking the interlock
*
* If in doubt, use lockmgr_*_fast_path.
* If in doubt, use lockmgr_lock_flags.
*/
int
lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line)

View File

@ -54,7 +54,7 @@ __FBSDID("$FreeBSD$");
* userland programs, and should not be done without careful consideration of
* the consequences.
*/
static int suser_enabled = 1;
static int __read_mostly suser_enabled = 1;
SYSCTL_INT(_security_bsd, OID_AUTO, suser_enabled, CTLFLAG_RWTUN,
&suser_enabled, 0, "processes with uid 0 have privilege");
@ -71,6 +71,51 @@ SDT_PROVIDER_DEFINE(priv);
SDT_PROBE_DEFINE1(priv, kernel, priv_check, priv__ok, "int");
SDT_PROBE_DEFINE1(priv, kernel, priv_check, priv__err, "int");
static __always_inline int
priv_check_cred_pre(struct ucred *cred, int priv)
{
int error;
#ifdef MAC
error = mac_priv_check(cred, priv);
#else
error = 0;
#endif
return (error);
}
static __always_inline int
priv_check_cred_post(struct ucred *cred, int priv, int error, bool handled)
{
if (__predict_true(handled))
goto out;
/*
* Now check with MAC, if enabled, to see if a policy module grants
* privilege.
*/
#ifdef MAC
if (mac_priv_grant(cred, priv) == 0) {
error = 0;
goto out;
}
#endif
/*
* The default is deny, so if no policies have granted it, reject
* with a privilege error here.
*/
error = EPERM;
out:
if (SDT_PROBES_ENABLED()) {
if (error)
SDT_PROBE1(priv, kernel, priv_check, priv__err, priv);
else
SDT_PROBE1(priv, kernel, priv_check, priv__ok, priv);
}
return (error);
}
/*
* Check a credential for privilege. Lots of good reasons to deny privilege;
* only a few to grant it.
@ -83,15 +128,18 @@ priv_check_cred(struct ucred *cred, int priv)
KASSERT(PRIV_VALID(priv), ("priv_check_cred: invalid privilege %d",
priv));
switch (priv) {
case PRIV_VFS_GENERATION:
return (priv_check_cred_vfs_generation(cred));
}
/*
* We first evaluate policies that may deny the granting of
* privilege unilaterally.
*/
#ifdef MAC
error = mac_priv_check(cred, priv);
error = priv_check_cred_pre(cred, priv);
if (error)
goto out;
#endif
/*
* Jail policy will restrict certain privileges that may otherwise be
@ -177,30 +225,9 @@ priv_check_cred(struct ucred *cred, int priv)
}
}
/*
* Now check with MAC, if enabled, to see if a policy module grants
* privilege.
*/
#ifdef MAC
if (mac_priv_grant(cred, priv) == 0) {
error = 0;
goto out;
}
#endif
/*
* The default is deny, so if no policies have granted it, reject
* with a privilege error here.
*/
error = EPERM;
return (priv_check_cred_post(cred, priv, error, false));
out:
if (SDT_PROBES_ENABLED()) {
if (error)
SDT_PROBE1(priv, kernel, priv_check, priv__err, priv);
else
SDT_PROBE1(priv, kernel, priv_check, priv__ok, priv);
}
return (error);
return (priv_check_cred_post(cred, priv, error, true));
}
int
@ -211,3 +238,28 @@ priv_check(struct thread *td, int priv)
return (priv_check_cred(td->td_ucred, priv));
}
int
priv_check_cred_vfs_generation(struct ucred *cred)
{
int error;
error = priv_check_cred_pre(cred, PRIV_VFS_GENERATION);
if (error)
goto out;
if (jailed(cred)) {
error = EPERM;
goto out;
}
if (cred->cr_uid == 0 && suser_enabled) {
error = 0;
goto out;
}
return (priv_check_cred_post(cred, PRIV_VFS_GENERATION, error, false));
out:
return (priv_check_cred_post(cred, PRIV_VFS_GENERATION, error, true));
}

View File

@ -193,8 +193,7 @@ smr_advance(smr_t smr)
/*
* It is illegal to enter while in an smr section.
*/
KASSERT(curthread->td_critnest == 0,
("smr_advance: Not allowed in a critical section."));
SMR_ASSERT_NOT_ENTERED(smr);
/*
* Modifications not done in a smr section need to be visible
@ -237,6 +236,8 @@ smr_advance_deferred(smr_t smr, int limit)
smr_seq_t goal;
smr_t csmr;
SMR_ASSERT_NOT_ENTERED(smr);
critical_enter();
csmr = zpcpu_get(smr);
if (++csmr->c_deferred >= limit) {
@ -275,8 +276,8 @@ smr_poll(smr_t smr, smr_seq_t goal, bool wait)
/*
* It is illegal to enter while in an smr section.
*/
KASSERT(!wait || curthread->td_critnest == 0,
("smr_poll: Blocking not allowed in a critical section."));
KASSERT(!wait || !SMR_ENTERED(smr),
("smr_poll: Blocking not allowed in a SMR section."));
/*
* Use a critical section so that we can avoid ABA races
@ -413,7 +414,7 @@ smr_create(const char *name)
int i;
s = uma_zalloc(smr_shared_zone, M_WAITOK);
smr = uma_zalloc(smr_zone, M_WAITOK);
smr = uma_zalloc_pcpu(smr_zone, M_WAITOK);
s->s_name = name;
s->s_rd_seq = s->s_wr_seq = SMR_SEQ_INIT;
@ -435,7 +436,7 @@ smr_destroy(smr_t smr)
smr_synchronize(smr);
uma_zfree(smr_shared_zone, smr->c_shared);
uma_zfree(smr_zone, smr);
uma_zfree_pcpu(smr_zone, smr);
}
/*

View File

@ -69,7 +69,7 @@ syscallenter(struct thread *td)
if (__predict_false(td->td_cowgen != p->p_cowgen))
thread_cow_update(td);
traced = (p->p_flag & P_TRACED) != 0;
if (traced || td->td_dbgflags & TDB_USERWR) {
if (__predict_false(traced || td->td_dbgflags & TDB_USERWR)) {
PROC_LOCK(p);
td->td_dbgflags &= ~TDB_USERWR;
if (traced)
@ -85,19 +85,19 @@ syscallenter(struct thread *td)
(uintptr_t)td, "pid:%d", td->td_proc->p_pid, "arg0:%p", sa->args[0],
"arg1:%p", sa->args[1], "arg2:%p", sa->args[2]);
if (error != 0) {
if (__predict_false(error != 0)) {
td->td_errno = error;
goto retval;
}
STOPEVENT(p, S_SCE, sa->narg);
if ((p->p_flag & P_TRACED) != 0) {
if (__predict_false((p->p_flag & P_TRACED) != 0)) {
PROC_LOCK(p);
if (p->p_ptevents & PTRACE_SCE)
ptracestop((td), SIGTRAP, NULL);
PROC_UNLOCK(p);
}
if ((td->td_dbgflags & TDB_USERWR) != 0) {
if (__predict_false((td->td_dbgflags & TDB_USERWR) != 0)) {
/*
* Reread syscall number and arguments if debugger
* modified registers or memory.
@ -118,8 +118,8 @@ syscallenter(struct thread *td)
* In capability mode, we only allow access to system calls
* flagged with SYF_CAPENABLED.
*/
if (IN_CAPABILITY_MODE(td) &&
!(sa->callp->sy_flags & SYF_CAPENABLED)) {
if (__predict_false(IN_CAPABILITY_MODE(td) &&
!(sa->callp->sy_flags & SYF_CAPENABLED))) {
td->td_errno = error = ECAPMODE;
goto retval;
}
@ -131,15 +131,6 @@ syscallenter(struct thread *td)
goto retval;
}
#ifdef KDTRACE_HOOKS
/* Give the syscall:::entry DTrace probe a chance to fire. */
if (__predict_false(systrace_enabled && sa->callp->sy_entry != 0))
(*systrace_probe_func)(sa, SYSTRACE_ENTRY, 0);
#endif
/* Let system calls set td_errno directly. */
td->td_pflags &= ~TDP_NERRNO;
/*
* Fetch fast sigblock value at the time of syscall
* entry because sleepqueue primitives might call
@ -147,20 +138,32 @@ syscallenter(struct thread *td)
*/
fetch_sigfastblock(td);
AUDIT_SYSCALL_ENTER(sa->code, td);
error = (sa->callp->sy_call)(td, sa->args);
AUDIT_SYSCALL_EXIT(error, td);
/* Save the latest error return value. */
if ((td->td_pflags & TDP_NERRNO) == 0)
td->td_errno = error;
/* Let system calls set td_errno directly. */
td->td_pflags &= ~TDP_NERRNO;
if (__predict_false(systrace_enabled || AUDIT_SYSCALL_ENTER(sa->code, td))) {
#ifdef KDTRACE_HOOKS
/* Give the syscall:::return DTrace probe a chance to fire. */
if (__predict_false(systrace_enabled && sa->callp->sy_return != 0))
(*systrace_probe_func)(sa, SYSTRACE_RETURN,
error ? -1 : td->td_retval[0]);
/* Give the syscall:::entry DTrace probe a chance to fire. */
if (__predict_false(sa->callp->sy_entry != 0))
(*systrace_probe_func)(sa, SYSTRACE_ENTRY, 0);
#endif
error = (sa->callp->sy_call)(td, sa->args);
/* Save the latest error return value. */
if (__predict_false((td->td_pflags & TDP_NERRNO) == 0))
td->td_errno = error;
AUDIT_SYSCALL_EXIT(error, td);
#ifdef KDTRACE_HOOKS
/* Give the syscall:::return DTrace probe a chance to fire. */
if (__predict_false(sa->callp->sy_return != 0))
(*systrace_probe_func)(sa, SYSTRACE_RETURN,
error ? -1 : td->td_retval[0]);
#endif
} else {
error = (sa->callp->sy_call)(td, sa->args);
/* Save the latest error return value. */
if (__predict_false((td->td_pflags & TDP_NERRNO) == 0))
td->td_errno = error;
}
syscall_thread_exit(td, sa->callp);
retval:
@ -168,7 +171,7 @@ syscallenter(struct thread *td)
(uintptr_t)td, "pid:%d", td->td_proc->p_pid, "error:%d", error,
"retval0:%#lx", td->td_retval[0], "retval1:%#lx",
td->td_retval[1]);
if (traced) {
if (__predict_false(traced)) {
PROC_LOCK(p);
td->td_dbgflags &= ~TDB_SCE;
PROC_UNLOCK(p);
@ -189,9 +192,10 @@ syscallret(struct thread *td)
p = td->td_proc;
sa = &td->td_sa;
if ((trap_enotcap || (p->p_flag2 & P2_TRAPCAP) != 0) &&
IN_CAPABILITY_MODE(td)) {
if (td->td_errno == ENOTCAPABLE || td->td_errno == ECAPMODE) {
if (__predict_false(td->td_errno == ENOTCAPABLE ||
td->td_errno == ECAPMODE)) {
if ((trap_enotcap ||
(p->p_flag2 & P2_TRAPCAP) != 0) && IN_CAPABILITY_MODE(td)) {
ksiginfo_init_trap(&ksi);
ksi.ksi_signo = SIGTRAP;
ksi.ksi_errno = td->td_errno;
@ -211,20 +215,21 @@ syscallret(struct thread *td)
}
#endif
if (p->p_flag & P_TRACED) {
traced = 0;
if (__predict_false(p->p_flag & P_TRACED)) {
traced = 1;
PROC_LOCK(p);
td->td_dbgflags |= TDB_SCX;
PROC_UNLOCK(p);
} else
traced = 0;
}
/*
* This works because errno is findable through the
* register set. If we ever support an emulation where this
* is not the case, this code will need to be revisited.
*/
STOPEVENT(p, S_SCX, sa->code);
if (traced || (td->td_dbgflags & (TDB_EXEC | TDB_FORK)) != 0) {
if (__predict_false(traced ||
(td->td_dbgflags & (TDB_EXEC | TDB_FORK)) != 0)) {
PROC_LOCK(p);
/*
* If tracing the execed process, trap to the debugger

View File

@ -141,13 +141,13 @@ userret(struct thread *td, struct trapframe *frame)
* If this thread tickled GEOM, we need to wait for the giggling to
* stop before we return to userland
*/
if (td->td_pflags & TDP_GEOM)
if (__predict_false(td->td_pflags & TDP_GEOM))
g_waitidle();
/*
* Charge system time if profiling.
*/
if (p->p_flag & P_PROFIL)
if (__predict_false(p->p_flag & P_PROFIL))
addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio);
#ifdef HWPMC_HOOKS

View File

@ -193,10 +193,10 @@ cap_check_failed_notcapable(const cap_rights_t *havep, const cap_rights_t *needp
/*
* Convert capability rights into VM access flags.
*/
u_char
vm_prot_t
cap_rights_to_vmprot(const cap_rights_t *havep)
{
u_char maxprot;
vm_prot_t maxprot;
maxprot = VM_PROT_NONE;
if (cap_rights_is_set(havep, CAP_MMAP_R))

View File

@ -513,7 +513,7 @@ vop_stdlock(ap)
struct mtx *ilk;
ilk = VI_MTX(vp);
return (lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags,
return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags,
&ilk->lock_object, ap->a_file, ap->a_line));
}
@ -573,7 +573,7 @@ vop_lock(ap)
}
other:
ilk = VI_MTX(vp);
return (lockmgr_lock_fast_path(&vp->v_lock, flags,
return (lockmgr_lock_flags(&vp->v_lock, flags,
&ilk->lock_object, ap->a_file, ap->a_line));
}

View File

@ -932,12 +932,9 @@ lookup(struct nameidata *ndp)
*/
unionlookup:
#ifdef MAC
if ((cnp->cn_flags & NOMACCHECK) == 0) {
error = mac_vnode_check_lookup(cnp->cn_thread->td_ucred, dp,
cnp);
if (error)
goto bad;
}
error = mac_vnode_check_lookup(cnp->cn_thread->td_ucred, dp, cnp);
if (error)
goto bad;
#endif
ndp->ni_dvp = dp;
ndp->ni_vp = NULL;

View File

@ -273,7 +273,7 @@ kern_do_statfs(struct thread *td, struct mount *mp, struct statfs *buf)
error = VFS_STATFS(mp, buf);
if (error != 0)
goto out;
if (priv_check(td, PRIV_VFS_GENERATION)) {
if (priv_check_cred_vfs_generation(td->td_ucred)) {
buf->f_fsid.val[0] = buf->f_fsid.val[1] = 0;
prison_enforce_statfs(td->td_ucred, mp, buf);
}
@ -488,7 +488,7 @@ kern_getfsstat(struct thread *td, struct statfs **buf, size_t bufsize,
continue;
}
}
if (priv_check(td, PRIV_VFS_GENERATION)) {
if (priv_check_cred_vfs_generation(td->td_ucred)) {
sptmp = malloc(sizeof(struct statfs), M_STATFS,
M_WAITOK);
*sptmp = *sp;

View File

@ -1477,7 +1477,7 @@ vn_stat(struct vnode *vp, struct stat *sb, struct ucred *active_cred,
sb->st_blksize = max(PAGE_SIZE, vap->va_blocksize);
sb->st_flags = vap->va_flags;
if (priv_check(td, PRIV_VFS_GENERATION))
if (priv_check_cred_vfs_generation(td->td_ucred))
sb->st_gen = 0;
else
sb->st_gen = vap->va_gen;

View File

@ -2564,7 +2564,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (tp->t_flags & TF_SACK_PERMIT) {
TCPSTAT_INC(
tcps_sack_recovery_episode);
tp->sack_newdata = tp->snd_nxt;
tp->snd_recover = tp->snd_nxt;
tp->snd_cwnd = maxseg;
(void) tp->t_fb->tfb_tcp_output(tp);
goto drop;

View File

@ -1686,7 +1686,6 @@ tcp_log_event_(struct tcpcb *tp, struct tcphdr *th, struct sockbuf *rxbuf,
COPY_STAT(rcv_up);
COPY_STAT(rcv_adv);
COPY_STAT(rcv_nxt);
COPY_STAT(sack_newdata);
COPY_STAT(rcv_wnd);
COPY_STAT_T(dupacks);
COPY_STAT_T(segqlen);

View File

@ -32,7 +32,7 @@
#define TCP_LOG_REASON_LEN 32
#define TCP_LOG_TAG_LEN 32
#define TCP_LOG_BUF_VER (7)
#define TCP_LOG_BUF_VER (8)
/*
* Because the (struct tcp_log_buffer) includes 8-byte uint64_t's, it requires
@ -130,7 +130,7 @@ struct tcp_log_buffer
int tlb_state; /* TCPCB t_state */
uint32_t tlb_starttime; /* TCPCB t_starttime */
uint32_t tlb_iss; /* TCPCB iss */
uint32_t tlb_iss; /* TCPCB iss */
uint32_t tlb_flags; /* TCPCB flags */
uint32_t tlb_snd_una; /* TCPCB snd_una */
uint32_t tlb_snd_max; /* TCPCB snd_max */
@ -144,13 +144,12 @@ struct tcp_log_buffer
uint32_t tlb_rcv_up; /* TCPCB rcv_up */
uint32_t tlb_rcv_adv; /* TCPCB rcv_adv */
uint32_t tlb_rcv_nxt; /* TCPCB rcv_nxt */
tcp_seq tlb_sack_newdata; /* TCPCB sack_newdata */
uint32_t tlb_rcv_wnd; /* TCPCB rcv_wnd */
uint32_t tlb_rcv_wnd; /* TCPCB rcv_wnd */
uint32_t tlb_dupacks; /* TCPCB t_dupacks */
int tlb_segqlen; /* TCPCB segqlen */
int tlb_snd_numholes; /* TCPCB snd_numholes */
uint32_t tlb_flex1; /* Event specific information */
uint32_t tlb_flex2; /* Event specific information */
uint32_t tlb_flex1; /* Event specific information */
uint32_t tlb_flex2; /* Event specific information */
uint8_t tlb_snd_scale:4, /* TCPCB snd_scale */
tlb_rcv_scale:4; /* TCPCB rcv_scale */
uint8_t _pad[3]; /* Padding */
@ -169,15 +168,15 @@ struct tcp_log_buffer
} ALIGN_TCP_LOG;
enum tcp_log_events {
TCP_LOG_IN = 1, /* Incoming packet 1 */
TCP_LOG_OUT, /* Transmit (without other event) 2 */
TCP_LOG_RTO, /* Retransmit timeout 3 */
TCP_LOG_TF_ACK, /* Transmit due to TF_ACK 4 */
TCP_LOG_BAD_RETRAN, /* Detected bad retransmission 5 */
TCP_LOG_PRR, /* Doing PRR 6 */
TCP_LOG_REORDER,/* Detected reorder 7 */
TCP_LOG_HPTS, /* Hpts sending a packet 8 */
BBR_LOG_BBRUPD, /* We updated BBR info 9 */
TCP_LOG_IN = 1, /* Incoming packet 1 */
TCP_LOG_OUT, /* Transmit (without other event) 2 */
TCP_LOG_RTO, /* Retransmit timeout 3 */
TCP_LOG_TF_ACK, /* Transmit due to TF_ACK 4 */
TCP_LOG_BAD_RETRAN, /* Detected bad retransmission 5 */
TCP_LOG_PRR, /* Doing PRR 6 */
TCP_LOG_REORDER, /* Detected reorder 7 */
TCP_LOG_HPTS, /* Hpts sending a packet 8 */
BBR_LOG_BBRUPD, /* We updated BBR info 9 */
BBR_LOG_BBRSND, /* We did a slot calculation and sending is done 10 */
BBR_LOG_ACKCLEAR, /* A ack clears all outstanding 11 */
BBR_LOG_INQUEUE, /* The tcb had a packet input to it 12 */
@ -195,8 +194,8 @@ enum tcp_log_events {
BBR_LOG_PERSIST, /* BBR changed to/from a persists 24 */
TCP_LOG_FLOWEND, /* End of a flow 25 */
BBR_LOG_RTO, /* BBR's timeout includes BBR info 26 */
BBR_LOG_DOSEG_DONE, /* hpts do_segment completes 27 */
BBR_LOG_EXIT_GAIN, /* hpts do_segment completes 28 */
BBR_LOG_DOSEG_DONE, /* hpts do_segment completes 27 */
BBR_LOG_EXIT_GAIN, /* hpts do_segment completes 28 */
BBR_LOG_THRESH_CALC, /* Doing threshold calculation 29 */
BBR_LOG_EXTRACWNDGAIN, /* Removed 30 */
TCP_LOG_USERSEND, /* User level sends data 31 */
@ -204,29 +203,29 @@ enum tcp_log_events {
BBR_LOG_STATE_TARGET, /* Log of target at state 33 */
BBR_LOG_TIME_EPOCH, /* A timed based Epoch occured 34 */
BBR_LOG_TO_PROCESS, /* A to was processed 35 */
BBR_LOG_BBRTSO, /* TSO update 36 */
BBR_LOG_HPTSDIAG, /* Hpts diag insert 37 */
BBR_LOG_BBRTSO, /* TSO update 36 */
BBR_LOG_HPTSDIAG, /* Hpts diag insert 37 */
BBR_LOG_LOWGAIN, /* Low gain accounting 38 */
BBR_LOG_PROGRESS, /* Progress timer event 39 */
TCP_LOG_SOCKET_OPT, /* A socket option is set 40 */
TCP_LOG_SOCKET_OPT, /* A socket option is set 40 */
BBR_LOG_TIMERPREP, /* A BBR var to debug out TLP issues 41 */
BBR_LOG_ENOBUF_JMP, /* We had a enobuf jump 42 */
BBR_LOG_HPTSI_CALC, /* calc the hptsi time 43 */
BBR_LOG_ENOBUF_JMP, /* We had a enobuf jump 42 */
BBR_LOG_HPTSI_CALC, /* calc the hptsi time 43 */
BBR_LOG_RTT_SHRINKS, /* We had a log reduction of rttProp 44 */
BBR_LOG_BW_RED_EV, /* B/W reduction events 45 */
BBR_LOG_BW_RED_EV, /* B/W reduction events 45 */
BBR_LOG_REDUCE, /* old bbr log reduce for 4.1 and earlier 46*/
TCP_LOG_RTT, /* A rtt (in useconds) is being sampled and applied to the srtt algo 47 */
BBR_LOG_SETTINGS_CHG, /* Settings changed for loss response 48 */
BBR_LOG_SRTT_GAIN_EVENT, /* SRTT gaining -- now not used 49 */
TCP_LOG_REASS, /* Reassembly buffer logging 50 */
TCP_HDWR_TLS, /* TCP Hardware TLS logs 51 */
BBR_LOG_HDWR_PACE, /* TCP Hardware pacing log 52 */
BBR_LOG_TSTMP_VAL, /* Temp debug timestamp validation 53 */
TCP_LOG_CONNEND, /* End of connection 54 */
TCP_LOG_LRO, /* LRO entry 55 */
TCP_SACK_FILTER_RES, /* Results of SACK Filter 56 */
TCP_SAD_DETECTION, /* Sack Attack Detection 57 */
TCP_LOG_END /* End (keep at end) 58 */
BBR_LOG_SETTINGS_CHG, /* Settings changed for loss response 48 */
BBR_LOG_SRTT_GAIN_EVENT, /* SRTT gaining -- now not used 49 */
TCP_LOG_REASS, /* Reassembly buffer logging 50 */
TCP_HDWR_TLS, /* TCP Hardware TLS logs 51 */
BBR_LOG_HDWR_PACE, /* TCP Hardware pacing log 52 */
BBR_LOG_TSTMP_VAL, /* Temp debug timestamp validation 53 */
TCP_LOG_CONNEND, /* End of connection 54 */
TCP_LOG_LRO, /* LRO entry 55 */
TCP_SACK_FILTER_RES, /* Results of SACK Filter 56 */
TCP_SAD_DETECTION, /* Sack Attack Detection 57 */
TCP_LOG_END /* End (keep at end) 58 */
};
enum tcp_log_states {
@ -296,8 +295,8 @@ struct tcp_log_dev_log_queue {
do { \
if (tp->t_logstate != TCP_LOG_STATE_OFF) \
tcp_log_event_(tp, th, rxbuf, txbuf, eventid, \
errornum, len, stackinfo, th_hostorder, \
tp->t_output_caller, __func__, __LINE__, tv); \
errornum, len, stackinfo, th_hostorder, \
tp->t_output_caller, __func__, __LINE__, tv);\
} while (0)
/*
@ -326,11 +325,11 @@ struct tcp_log_dev_log_queue {
if (tcp_log_verbose) \
TCP_LOG_EVENT_VERBOSE(tp, th, rxbuf, txbuf, \
eventid, errornum, len, stackinfo, \
th_hostorder, NULL); \
th_hostorder, NULL); \
else if (tp->t_logstate != TCP_LOG_STATE_OFF) \
tcp_log_event_(tp, th, rxbuf, txbuf, eventid, \
errornum, len, stackinfo, th_hostorder, \
NULL, NULL, 0, NULL); \
NULL, NULL, 0, NULL); \
} while (0)
#endif /* TCP_LOG_FORCEVERBOSE */
#define TCP_LOG_EVENTP(tp, th, rxbuf, txbuf, eventid, errornum, len, stackinfo, th_hostorder, tv) \

View File

@ -420,7 +420,7 @@ tcp_output(struct tcpcb *tp)
*/
if (len > 0) {
cwin = tp->snd_cwnd -
(tp->snd_nxt - tp->sack_newdata) -
(tp->snd_nxt - tp->snd_recover) -
sack_bytes_rxmt;
if (cwin < 0)
cwin = 0;

View File

@ -780,7 +780,7 @@ tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
if ((BYTES_THIS_ACK(tp, th) / tp->t_maxseg) >= 2)
num_segs = 2;
tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit +
(tp->snd_nxt - tp->sack_newdata) + num_segs * tp->t_maxseg);
(tp->snd_nxt - tp->snd_recover) + num_segs * tp->t_maxseg);
if (tp->snd_cwnd > tp->snd_ssthresh)
tp->snd_cwnd = tp->snd_ssthresh;
tp->t_flags |= TF_ACKNOW;

View File

@ -2806,8 +2806,8 @@ db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
tp->snd_numholes, TAILQ_FIRST(&tp->snd_holes));
db_print_indent(indent);
db_printf("snd_fack: 0x%08x rcv_numsacks: %d sack_newdata: "
"0x%08x\n", tp->snd_fack, tp->rcv_numsacks, tp->sack_newdata);
db_printf("snd_fack: 0x%08x rcv_numsacks: %d\n",
tp->snd_fack, tp->rcv_numsacks);
/* Skip sackblks, sackhint. */

View File

@ -186,8 +186,6 @@ struct tcpcb {
TAILQ_HEAD(sackhole_head, sackhole) snd_holes;
/* SACK scoreboard (sorted) */
tcp_seq snd_fack; /* last seq number(+1) sack'd by rcv'r*/
tcp_seq sack_newdata; /* New data xmitted in this recovery
episode starts at this seq number */
struct sackblk sackblks[MAX_SACK_BLKS]; /* seq nos. of sack blocks */
struct sackhint sackhint; /* SACK scoreboard hint */
int t_rttlow; /* smallest observerved RTT */

View File

@ -377,11 +377,14 @@ void audit_thread_free(struct thread *td);
audit_arg_vnode2((vp)); \
} while (0)
#define AUDIT_SYSCALL_ENTER(code, td) do { \
if (audit_syscalls_enabled) { \
#define AUDIT_SYSCALL_ENTER(code, td) ({ \
bool _audit_entered = false; \
if (__predict_false(audit_syscalls_enabled)) { \
audit_syscall_enter(code, td); \
_audit_entered = true; \
} \
} while (0)
_audit_entered; \
})
/*
* Wrap the audit_syscall_exit() function so that it is called only when
@ -449,7 +452,7 @@ void audit_thread_free(struct thread *td);
#define AUDIT_ARG_VNODE1(vp)
#define AUDIT_ARG_VNODE2(vp)
#define AUDIT_SYSCALL_ENTER(code, td)
#define AUDIT_SYSCALL_ENTER(code, td) 0
#define AUDIT_SYSCALL_EXIT(error, td)
#define AUDIT_SYSCLOSE(p, fd)

View File

@ -117,6 +117,23 @@ static unsigned int mac_version = MAC_VERSION;
SYSCTL_UINT(_security_mac, OID_AUTO, version, CTLFLAG_RD, &mac_version, 0,
"");
/*
* Flags for inlined checks.
*/
#define FPFLAG(f) \
bool __read_frequently mac_##f##_fp_flag
FPFLAG(priv_check);
FPFLAG(priv_grant);
FPFLAG(vnode_check_lookup);
FPFLAG(vnode_check_open);
FPFLAG(vnode_check_stat);
FPFLAG(vnode_check_read);
FPFLAG(vnode_check_write);
FPFLAG(vnode_check_mmap);
#undef FPFLAG
/*
* Labels consist of a indexed set of "slots", which are allocated policies
* as required. The MAC Framework maintains a bitmask of slots allocated so
@ -376,6 +393,96 @@ mac_policy_update(void)
}
}
/*
* There are frequently used code paths which check for rarely installed
* policies. Gross hack below enables doing it in a cheap manner.
*/
#define FPO(f) (offsetof(struct mac_policy_ops, mpo_##f) / sizeof(uintptr_t))
struct mac_policy_fastpath_elem {
int count;
bool *flag;
size_t offset;
};
struct mac_policy_fastpath_elem mac_policy_fastpath_array[] = {
{ .offset = FPO(priv_check), .flag = &mac_priv_check_fp_flag },
{ .offset = FPO(priv_grant), .flag = &mac_priv_grant_fp_flag },
{ .offset = FPO(vnode_check_lookup),
.flag = &mac_vnode_check_lookup_fp_flag },
{ .offset = FPO(vnode_check_open),
.flag = &mac_vnode_check_open_fp_flag },
{ .offset = FPO(vnode_check_stat),
.flag = &mac_vnode_check_stat_fp_flag },
{ .offset = FPO(vnode_check_read),
.flag = &mac_vnode_check_read_fp_flag },
{ .offset = FPO(vnode_check_write),
.flag = &mac_vnode_check_write_fp_flag },
{ .offset = FPO(vnode_check_mmap),
.flag = &mac_vnode_check_mmap_fp_flag },
};
static void
mac_policy_fastpath_enable(struct mac_policy_fastpath_elem *mpfe)
{
MPASS(mpfe->count >= 0);
mpfe->count++;
if (mpfe->count == 1) {
MPASS(*mpfe->flag == false);
*mpfe->flag = true;
}
}
static void
mac_policy_fastpath_disable(struct mac_policy_fastpath_elem *mpfe)
{
MPASS(mpfe->count >= 1);
mpfe->count--;
if (mpfe->count == 0) {
MPASS(*mpfe->flag == true);
*mpfe->flag = false;
}
}
static void
mac_policy_fastpath_register(struct mac_policy_conf *mpc)
{
struct mac_policy_fastpath_elem *mpfe;
uintptr_t **ops;
int i;
mac_policy_xlock_assert();
ops = (uintptr_t **)mpc->mpc_ops;
for (i = 0; i < nitems(mac_policy_fastpath_array); i++) {
mpfe = &mac_policy_fastpath_array[i];
if (ops[mpfe->offset] != NULL)
mac_policy_fastpath_enable(mpfe);
}
}
static void
mac_policy_fastpath_unregister(struct mac_policy_conf *mpc)
{
struct mac_policy_fastpath_elem *mpfe;
uintptr_t **ops;
int i;
mac_policy_xlock_assert();
ops = (uintptr_t **)mpc->mpc_ops;
for (i = 0; i < nitems(mac_policy_fastpath_array); i++) {
mpfe = &mac_policy_fastpath_array[i];
if (ops[mpfe->offset] != NULL)
mac_policy_fastpath_disable(mpfe);
}
}
#undef FPO
static int
mac_policy_register(struct mac_policy_conf *mpc)
{
@ -446,6 +553,9 @@ mac_policy_register(struct mac_policy_conf *mpc)
*/
if (mpc->mpc_ops->mpo_init != NULL)
(*(mpc->mpc_ops->mpo_init))(mpc);
mac_policy_fastpath_register(mpc);
mac_policy_update();
SDT_PROBE1(mac, , policy, register, mpc);
@ -487,6 +597,9 @@ mac_policy_unregister(struct mac_policy_conf *mpc)
mac_policy_xunlock();
return (EBUSY);
}
mac_policy_fastpath_unregister(mpc);
if (mpc->mpc_ops->mpo_destroy != NULL)
(*(mpc->mpc_ops->mpo_destroy))(mpc);

View File

@ -258,8 +258,27 @@ void mac_posixshm_create(struct ucred *cred, struct shmfd *shmfd);
void mac_posixshm_destroy(struct shmfd *);
void mac_posixshm_init(struct shmfd *);
int mac_priv_check(struct ucred *cred, int priv);
int mac_priv_grant(struct ucred *cred, int priv);
int mac_priv_check_impl(struct ucred *cred, int priv);
extern bool mac_priv_check_fp_flag;
static inline int
mac_priv_check(struct ucred *cred, int priv)
{
if (__predict_false(mac_priv_check_fp_flag))
return (mac_priv_check_impl(cred, priv));
return (0);
}
int mac_priv_grant_impl(struct ucred *cred, int priv);
extern bool mac_priv_grant_fp_flag;
static inline int
mac_priv_grant(struct ucred *cred, int priv)
{
if (__predict_false(mac_priv_grant_fp_flag))
return (mac_priv_grant_impl(cred, priv));
return (EPERM);
}
int mac_proc_check_debug(struct ucred *cred, struct proc *p);
int mac_proc_check_sched(struct ucred *cred, struct proc *p);
@ -371,6 +390,12 @@ void mac_sysvshm_init(struct shmid_kernel *);
void mac_thread_userret(struct thread *td);
#ifdef DEBUG_VFS_LOCKS
void mac_vnode_assert_locked(struct vnode *vp, const char *func);
#else
#define mac_vnode_assert_locked(vp, func) do { } while (0)
#endif
int mac_vnode_associate_extattr(struct mount *mp, struct vnode *vp);
void mac_vnode_associate_singlelabel(struct mount *mp, struct vnode *vp);
int mac_vnode_check_access(struct ucred *cred, struct vnode *vp,
@ -393,18 +418,53 @@ int mac_vnode_check_link(struct ucred *cred, struct vnode *dvp,
struct vnode *vp, struct componentname *cnp);
int mac_vnode_check_listextattr(struct ucred *cred, struct vnode *vp,
int attrnamespace);
int mac_vnode_check_lookup(struct ucred *cred, struct vnode *dvp,
int mac_vnode_check_lookup_impl(struct ucred *cred, struct vnode *dvp,
struct componentname *cnp);
int mac_vnode_check_mmap(struct ucred *cred, struct vnode *vp, int prot,
extern bool mac_vnode_check_lookup_fp_flag;
static inline int
mac_vnode_check_lookup(struct ucred *cred, struct vnode *dvp,
struct componentname *cnp)
{
mac_vnode_assert_locked(dvp, "mac_vnode_check_lookup");
if (__predict_false(mac_vnode_check_lookup_fp_flag))
return (mac_vnode_check_lookup_impl(cred, dvp, cnp));
return (0);
}
int mac_vnode_check_mmap_impl(struct ucred *cred, struct vnode *vp, int prot,
int flags);
extern bool mac_vnode_check_mmap_fp_flag;
static inline int
mac_vnode_check_mmap(struct ucred *cred, struct vnode *vp, int prot,
int flags)
{
mac_vnode_assert_locked(vp, "mac_vnode_check_mmap");
if (__predict_false(mac_vnode_check_mmap_fp_flag))
return (mac_vnode_check_mmap_impl(cred, vp, prot, flags));
return (0);
}
int mac_vnode_check_open_impl(struct ucred *cred, struct vnode *vp,
accmode_t accmode);
extern bool mac_vnode_check_open_fp_flag;
static inline int
mac_vnode_check_open(struct ucred *cred, struct vnode *vp,
accmode_t accmode)
{
mac_vnode_assert_locked(vp, "mac_vnode_check_open");
if (__predict_false(mac_vnode_check_open_fp_flag))
return (mac_vnode_check_open_impl(cred, vp, accmode));
return (0);
}
int mac_vnode_check_mprotect(struct ucred *cred, struct vnode *vp,
int prot);
int mac_vnode_check_open(struct ucred *cred, struct vnode *vp,
accmode_t accmode);
int mac_vnode_check_poll(struct ucred *active_cred,
struct ucred *file_cred, struct vnode *vp);
int mac_vnode_check_read(struct ucred *active_cred,
struct ucred *file_cred, struct vnode *vp);
int mac_vnode_check_readdir(struct ucred *cred, struct vnode *vp);
int mac_vnode_check_readlink(struct ucred *cred, struct vnode *vp);
int mac_vnode_check_rename_from(struct ucred *cred, struct vnode *dvp,
@ -424,12 +484,51 @@ int mac_vnode_check_setowner(struct ucred *cred, struct vnode *vp,
uid_t uid, gid_t gid);
int mac_vnode_check_setutimes(struct ucred *cred, struct vnode *vp,
struct timespec atime, struct timespec mtime);
int mac_vnode_check_stat(struct ucred *active_cred,
int mac_vnode_check_stat_impl(struct ucred *active_cred,
struct ucred *file_cred, struct vnode *vp);
extern bool mac_vnode_check_stat_fp_flag;
static inline int
mac_vnode_check_stat(struct ucred *active_cred, struct ucred *file_cred,
struct vnode *vp)
{
mac_vnode_assert_locked(vp, "mac_vnode_check_stat");
if (__predict_false(mac_vnode_check_stat_fp_flag))
return (mac_vnode_check_stat_impl(active_cred, file_cred, vp));
return (0);
}
int mac_vnode_check_read_impl(struct ucred *active_cred,
struct ucred *file_cred, struct vnode *vp);
extern bool mac_vnode_check_read_fp_flag;
static inline int
mac_vnode_check_read(struct ucred *active_cred, struct ucred *file_cred,
struct vnode *vp)
{
mac_vnode_assert_locked(vp, "mac_vnode_check_read");
if (__predict_false(mac_vnode_check_read_fp_flag))
return (mac_vnode_check_read_impl(active_cred, file_cred, vp));
return (0);
}
int mac_vnode_check_write_impl(struct ucred *active_cred,
struct ucred *file_cred, struct vnode *vp);
extern bool mac_vnode_check_write_fp_flag;
static inline int
mac_vnode_check_write(struct ucred *active_cred, struct ucred *file_cred,
struct vnode *vp)
{
mac_vnode_assert_locked(vp, "mac_vnode_check_write");
if (__predict_false(mac_vnode_check_write_fp_flag))
return (mac_vnode_check_write_impl(active_cred, file_cred, vp));
return (0);
}
int mac_vnode_check_unlink(struct ucred *cred, struct vnode *dvp,
struct vnode *vp, struct componentname *cnp);
int mac_vnode_check_write(struct ucred *active_cred,
struct ucred *file_cred, struct vnode *vp);
void mac_vnode_copy_label(struct label *, struct label *);
void mac_vnode_init(struct vnode *);
int mac_vnode_create_extattr(struct ucred *cred, struct mount *mp,

View File

@ -67,7 +67,7 @@ MAC_CHECK_PROBE_DEFINE2(priv_check, "struct ucred *", "int");
* policy denies access.
*/
int
mac_priv_check(struct ucred *cred, int priv)
mac_priv_check_impl(struct ucred *cred, int priv)
{
int error;
@ -84,7 +84,7 @@ MAC_GRANT_PROBE_DEFINE2(priv_grant, "struct ucred *", "int");
* policy grants access.
*/
int
mac_priv_grant(struct ucred *cred, int priv)
mac_priv_grant_impl(struct ucred *cred, int priv)
{
int error;

View File

@ -565,13 +565,15 @@ MAC_CHECK_PROBE_DEFINE3(vnode_check_lookup, "struct ucred *",
"struct vnode *", "struct componentname *");
int
mac_vnode_check_lookup(struct ucred *cred, struct vnode *dvp,
mac_vnode_check_lookup_impl(struct ucred *cred, struct vnode *dvp,
struct componentname *cnp)
{
int error;
ASSERT_VOP_LOCKED(dvp, "mac_vnode_check_lookup");
if ((cnp->cn_flags & NOMACCHECK) != 0)
return (0);
MAC_POLICY_CHECK(vnode_check_lookup, cred, dvp, dvp->v_label, cnp);
MAC_CHECK_PROBE3(vnode_check_lookup, error, cred, dvp, cnp);
@ -582,7 +584,7 @@ MAC_CHECK_PROBE_DEFINE4(vnode_check_mmap, "struct ucred *", "struct vnode *",
"int", "int");
int
mac_vnode_check_mmap(struct ucred *cred, struct vnode *vp, int prot,
mac_vnode_check_mmap_impl(struct ucred *cred, struct vnode *vp, int prot,
int flags)
{
int error;
@ -629,7 +631,7 @@ MAC_CHECK_PROBE_DEFINE3(vnode_check_open, "struct ucred *", "struct vnode *",
"accmode_t");
int
mac_vnode_check_open(struct ucred *cred, struct vnode *vp, accmode_t accmode)
mac_vnode_check_open_impl(struct ucred *cred, struct vnode *vp, accmode_t accmode)
{
int error;
@ -664,7 +666,7 @@ MAC_CHECK_PROBE_DEFINE3(vnode_check_read, "struct ucred *", "struct ucred *",
"struct vnode *");
int
mac_vnode_check_read(struct ucred *active_cred, struct ucred *file_cred,
mac_vnode_check_read_impl(struct ucred *active_cred, struct ucred *file_cred,
struct vnode *vp)
{
int error;
@ -889,7 +891,7 @@ MAC_CHECK_PROBE_DEFINE3(vnode_check_stat, "struct ucred *", "struct ucred *",
"struct vnode *");
int
mac_vnode_check_stat(struct ucred *active_cred, struct ucred *file_cred,
mac_vnode_check_stat_impl(struct ucred *active_cred, struct ucred *file_cred,
struct vnode *vp)
{
int error;
@ -927,7 +929,7 @@ MAC_CHECK_PROBE_DEFINE3(vnode_check_write, "struct ucred *",
"struct ucred *", "struct vnode *");
int
mac_vnode_check_write(struct ucred *active_cred, struct ucred *file_cred,
mac_vnode_check_write_impl(struct ucred *active_cred, struct ucred *file_cred,
struct vnode *vp)
{
int error;
@ -1068,3 +1070,12 @@ vn_setlabel(struct vnode *vp, struct label *intlabel, struct ucred *cred)
return (0);
}
#ifdef DEBUG_VFS_LOCKS
void
mac_vnode_assert_locked(struct vnode *vp, const char *func)
{
ASSERT_VOP_LOCKED(vp, func);
}
#endif

View File

@ -514,7 +514,7 @@ int cap_check(const cap_rights_t *havep, const cap_rights_t *needp);
/*
* Convert capability rights into VM access flags.
*/
u_char cap_rights_to_vmprot(const cap_rights_t *havep);
vm_prot_t cap_rights_to_vmprot(const cap_rights_t *havep);
/*
* For the purposes of procstat(1) and similar tools, allow kern_descrip.c to

View File

@ -247,7 +247,7 @@ extern int maxfilesperproc; /* per process limit on number of open files */
int fget(struct thread *td, int fd, cap_rights_t *rightsp, struct file **fpp);
int fget_mmap(struct thread *td, int fd, cap_rights_t *rightsp,
u_char *maxprotp, struct file **fpp);
vm_prot_t *maxprotp, struct file **fpp);
int fget_read(struct thread *td, int fd, cap_rights_t *rightsp,
struct file **fpp);
int fget_write(struct thread *td, int fd, cap_rights_t *rightsp,

View File

@ -369,7 +369,12 @@ struct mount;
struct sockaddr;
struct statfs;
struct vfsconf;
int jailed(struct ucred *cred);
/*
* Return 1 if the passed credential is in a jail, otherwise 0.
*/
#define jailed(cred) (cred->cr_prison != &prison0)
int jailed_without_vnet(struct ucred *);
void getcredhostname(struct ucred *, char *, size_t);
void getcreddomainname(struct ucred *, char *, size_t);

View File

@ -73,7 +73,7 @@ struct ktr_header {
#define KTRPOINT(td, type) (__predict_false(KTRCHECK((td), (type))))
#define KTRCHECKDRAIN(td) (!(STAILQ_EMPTY(&(td)->td_proc->p_ktr)))
#define KTRUSERRET(td) do { \
if (KTRCHECKDRAIN(td)) \
if (__predict_false(KTRCHECKDRAIN(td))) \
ktruserret(td); \
} while (0)

View File

@ -70,10 +70,8 @@ struct thread;
*/
int __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk,
const char *wmesg, int prio, int timo, const char *file, int line);
int lockmgr_lock_fast_path(struct lock *lk, u_int flags,
int lockmgr_lock_flags(struct lock *lk, u_int flags,
struct lock_object *ilk, const char *file, int line);
int lockmgr_unlock_fast_path(struct lock *lk, u_int flags,
struct lock_object *ilk);
int lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line);
int lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line);
int lockmgr_unlock(struct lock *lk);

View File

@ -533,6 +533,7 @@ struct thread;
struct ucred;
int priv_check(struct thread *td, int priv);
int priv_check_cred(struct ucred *cred, int priv);
int priv_check_cred_vfs_generation(struct ucred *cred);
#endif
#endif /* !_SYS_PRIV_H_ */

View File

@ -68,6 +68,15 @@ struct smr {
int c_deferred; /* Deferred advance counter. */
};
#define SMR_ENTERED(smr) \
(curthread->td_critnest != 0 && zpcpu_get((smr))->c_seq != SMR_SEQ_INVALID)
#define SMR_ASSERT_ENTERED(smr) \
KASSERT(SMR_ENTERED(smr), ("Not in smr section"))
#define SMR_ASSERT_NOT_ENTERED(smr) \
KASSERT(!SMR_ENTERED(smr), ("In smr section."));
/*
* Return the current write sequence number.
*/

View File

@ -54,7 +54,11 @@ typedef void (*systrace_probe_func_t)(struct syscall_args *,
typedef void (*systrace_args_func_t)(int, void *, uint64_t *, int *);
#ifdef _KERNEL
#ifdef KDTRACE_HOOKS
extern bool systrace_enabled;
#else
#define systrace_enabled 0
#endif
#endif
extern systrace_probe_func_t systrace_probe_func;

View File

@ -553,12 +553,13 @@ zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
if ((bucket = STAILQ_FIRST(&zdom->uzd_buckets)) == NULL)
return (NULL);
/* SMR Buckets can not be re-used until readers expire. */
if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
bucket->ub_seq != SMR_SEQ_INVALID) {
if (!smr_poll(zone->uz_smr, bucket->ub_seq, false))
return (NULL);
bucket->ub_seq = SMR_SEQ_INVALID;
dtor = (zone->uz_dtor != NULL) | UMA_ALWAYS_CTORDTOR;
dtor = (zone->uz_dtor != NULL) || UMA_ALWAYS_CTORDTOR;
}
MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link);
@ -678,6 +679,7 @@ cache_bucket_load(uma_cache_bucket_t bucket, uma_bucket_t b)
CRITICAL_ASSERT(curthread);
MPASS(bucket->ucb_bucket == NULL);
MPASS(b->ub_seq == SMR_SEQ_INVALID);
bucket->ucb_bucket = b;
bucket->ucb_cnt = b->ub_cnt;
@ -979,10 +981,10 @@ bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
if ((zone->uz_flags & UMA_ZONE_SMR) != 0 &&
bucket->ub_seq != SMR_SEQ_INVALID) {
smr_wait(zone->uz_smr, bucket->ub_seq);
bucket->ub_seq = SMR_SEQ_INVALID;
for (i = 0; i < bucket->ub_cnt; i++)
item_dtor(zone, bucket->ub_bucket[i],
zone->uz_size, NULL, SKIP_NONE);
bucket->ub_seq = SMR_SEQ_INVALID;
}
if (zone->uz_fini)
for (i = 0; i < bucket->ub_cnt; i++)
@ -1014,6 +1016,7 @@ cache_drain(uma_zone_t zone)
{
uma_cache_t cache;
uma_bucket_t bucket;
smr_seq_t seq;
int cpu;
/*
@ -1024,6 +1027,9 @@ cache_drain(uma_zone_t zone)
* XXX: It would good to be able to assert that the zone is being
* torn down to prevent improper use of cache_drain().
*/
seq = SMR_SEQ_INVALID;
if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
seq = smr_current(zone->uz_smr);
CPU_FOREACH(cpu) {
cache = &zone->uz_cpu[cpu];
bucket = cache_bucket_unload_alloc(cache);
@ -1033,11 +1039,13 @@ cache_drain(uma_zone_t zone)
}
bucket = cache_bucket_unload_free(cache);
if (bucket != NULL) {
bucket->ub_seq = seq;
bucket_drain(zone, bucket);
bucket_free(zone, bucket, NULL);
}
bucket = cache_bucket_unload_cross(cache);
if (bucket != NULL) {
bucket->ub_seq = seq;
bucket_drain(zone, bucket);
bucket_free(zone, bucket, NULL);
}
@ -1069,7 +1077,6 @@ cache_drain_safe_cpu(uma_zone_t zone, void *unused)
return;
b1 = b2 = b3 = NULL;
ZONE_LOCK(zone);
critical_enter();
if (zone->uz_flags & UMA_ZONE_FIRSTTOUCH)
domain = PCPU_GET(domain);
@ -1077,32 +1084,33 @@ cache_drain_safe_cpu(uma_zone_t zone, void *unused)
domain = 0;
cache = &zone->uz_cpu[curcpu];
b1 = cache_bucket_unload_alloc(cache);
if (b1 != NULL && b1->ub_cnt != 0) {
zone_put_bucket(zone, &zone->uz_domain[domain], b1, false);
b1 = NULL;
}
/*
* Don't flush SMR zone buckets. This leaves the zone without a
* bucket and forces every free to synchronize().
*/
if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
goto out;
b2 = cache_bucket_unload_free(cache);
if ((zone->uz_flags & UMA_ZONE_SMR) == 0) {
b2 = cache_bucket_unload_free(cache);
b3 = cache_bucket_unload_cross(cache);
}
critical_exit();
ZONE_LOCK(zone);
if (b1 != NULL && b1->ub_cnt != 0) {
zone_put_bucket(zone, &zone->uz_domain[domain], b1, false);
b1 = NULL;
}
if (b2 != NULL && b2->ub_cnt != 0) {
zone_put_bucket(zone, &zone->uz_domain[domain], b2, false);
b2 = NULL;
}
b3 = cache_bucket_unload_cross(cache);
out:
critical_exit();
ZONE_UNLOCK(zone);
if (b1)
if (b1 != NULL)
bucket_free(zone, b1, NULL);
if (b2)
if (b2 != NULL)
bucket_free(zone, b2, NULL);
if (b3) {
if (b3 != NULL) {
bucket_drain(zone, b3);
bucket_free(zone, b3, NULL);
}
@ -1196,6 +1204,7 @@ bucket_cache_reclaim(uma_zone_t zone, bool drain)
tofree = bucket->ub_cnt;
STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link);
zdom->uzd_nitems -= tofree;
zone->uz_bkt_count -= tofree;
/*
* Shift the bounds of the current WSS interval to avoid
@ -4004,6 +4013,7 @@ zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
struct uma_bucketlist fullbuckets;
uma_zone_domain_t zdom;
uma_bucket_t b;
smr_seq_t seq;
void *item;
int domain;
@ -4019,6 +4029,14 @@ zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
* per-domain locking could be used if necessary.
*/
ZONE_CROSS_LOCK(zone);
/*
* It is possible for buckets to arrive here out of order so we fetch
* the current smr seq rather than accepting the bucket's.
*/
seq = SMR_SEQ_INVALID;
if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
seq = smr_current(zone->uz_smr);
while (bucket->ub_cnt > 0) {
item = bucket->ub_bucket[bucket->ub_cnt - 1];
domain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
@ -4028,10 +4046,11 @@ zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
if (zdom->uzd_cross == NULL)
break;
}
zdom->uzd_cross->ub_bucket[zdom->uzd_cross->ub_cnt++] = item;
if (zdom->uzd_cross->ub_cnt == zdom->uzd_cross->ub_entries) {
STAILQ_INSERT_HEAD(&fullbuckets, zdom->uzd_cross,
ub_link);
b = zdom->uzd_cross;
b->ub_bucket[b->ub_cnt++] = item;
b->ub_seq = seq;
if (b->ub_cnt == b->ub_entries) {
STAILQ_INSERT_HEAD(&fullbuckets, b, ub_link);
zdom->uzd_cross = NULL;
}
bucket->ub_cnt--;
@ -4040,8 +4059,6 @@ zone_free_cross(uma_zone_t zone, uma_bucket_t bucket, void *udata)
if (!STAILQ_EMPTY(&fullbuckets)) {
ZONE_LOCK(zone);
while ((b = STAILQ_FIRST(&fullbuckets)) != NULL) {
if ((zone->uz_flags & UMA_ZONE_SMR) != 0)
bucket->ub_seq = smr_current(zone->uz_smr);
STAILQ_REMOVE_HEAD(&fullbuckets, ub_link);
if (zone->uz_bkt_count >= zone->uz_bkt_max) {
ZONE_UNLOCK(zone);

View File

@ -4258,7 +4258,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
return (NULL);
m = vm_page_alloc(object, pindex, pflags);
if (m == NULL) {
if ((allocflags & VM_ALLOC_NOWAIT) != 0)
if ((allocflags & (VM_ALLOC_NOWAIT | VM_ALLOC_WAITFAIL)) != 0)
return (NULL);
goto retrylookup;
}
@ -4466,7 +4466,8 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,
m = vm_page_alloc_after(object, pindex + i,
pflags | VM_ALLOC_COUNT(count - i), mpred);
if (m == NULL) {
if ((allocflags & VM_ALLOC_NOWAIT) != 0)
if ((allocflags & (VM_ALLOC_NOWAIT |
VM_ALLOC_WAITFAIL)) != 0)
break;
goto retrylookup;
}

View File

@ -19,6 +19,7 @@ TESTS_SUBDIRS+= kern
TESTS_SUBDIRS+= kqueue
TESTS_SUBDIRS+= mac
TESTS_SUBDIRS+= mqueue
TESTS_SUBDIRS+= net
TESTS_SUBDIRS+= netinet
TESTS_SUBDIRS+= netinet6
TESTS_SUBDIRS+= netipsec

View File

@ -3,5 +3,8 @@
PACKAGE= tests
TESTSDIR= ${TESTSBASE}/sys/common
${PACKAGE}FILES+= vnet.subr
${PACKAGE}FILES+= divert.py
${PACKAGE}FILESMODE_divert.py=0555
.include <bsd.test.mk>

83
tests/sys/common/divert.py Executable file
View File

@ -0,0 +1,83 @@
#!/usr/bin/env python
# -
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2020 Alexander V. Chernikov
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
import socket
import scapy.all as sc
import argparse
IPPROTO_DIVERT = 258
def parse_args():
parser = argparse.ArgumentParser(description='divert socket tester')
parser.add_argument('--dip', type=str, help='destination packet IP')
parser.add_argument('--divert_port', type=int, default=6668,
help='divert port to use')
parser.add_argument('--test_name', type=str, required=True,
help='test name to run')
return parser.parse_args()
def ipdivert_ip_output_remote_success(args):
packet = sc.IP(dst=args.dip) / sc.ICMP(type='echo-request')
with socket.socket(socket.AF_INET, socket.SOCK_RAW, IPPROTO_DIVERT) as s:
s.bind(('0.0.0.0', args.divert_port))
s.sendto(bytes(packet), ('0.0.0.0', 0))
def ipdivert_ip6_output_remote_success(args):
packet = sc.IPv6(dst=args.dip) / sc.ICMPv6EchoRequest()
with socket.socket(socket.AF_INET, socket.SOCK_RAW, IPPROTO_DIVERT) as s:
s.bind(('0.0.0.0', args.divert_port))
s.sendto(bytes(packet), ('0.0.0.0', 0))
def ipdivert_ip_input_local_success(args):
"""Sends IPv4 packet to OS stack as inbound local packet."""
packet = sc.IP(dst=args.dip) / sc.ICMP(type='echo-request')
with socket.socket(socket.AF_INET, socket.SOCK_RAW, IPPROTO_DIVERT) as s:
s.bind(('0.0.0.0', args.divert_port))
s.sendto(bytes(packet), (args.dip, 0))
# XXX: IPv6 local divert is currently not supported
# TODO: add IPv4 ifname output verification
def main():
args = parse_args()
test_ptr = globals()[args.test_name]
test_ptr(args)
if __name__ == '__main__':
main()

View File

@ -37,7 +37,7 @@
#include <stdlib.h>
#include <unistd.h>
#define RANDOM_MAX ((1<<31) - 1)
#define RANDOM_MAX ((1U<<31) - 1)
int main(int argc, char** argv){
useconds_t max_usecs, usecs;

View File

@ -9,7 +9,7 @@ ATF_TESTS_C= ip_reass_test \
so_reuseport_lb_test \
socket_afinet
ATF_TESTS_SH= fibs_test redirect
ATF_TESTS_SH= fibs_test redirect divert
PROGS= udp_dontroute tcp_user_cookie

148
tests/sys/netinet/divert.sh Executable file
View File

@ -0,0 +1,148 @@
#!/usr/bin/env atf-sh
#-
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2020 Alexander V. Chernikov
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
. $(atf_get_srcdir)/../common/vnet.subr
load_divert_module() {
kldstat -q -n ipdivert
if [ $? -ne 0 ]; then
atf_skip "ipdivert module is not loaded"
fi
}
atf_test_case "ipdivert_ip_output_remote_success" "cleanup"
ipdivert_ip_output_remote_success_head() {
atf_set descr 'Test diverting IPv4 packet to remote destination'
atf_set require.user root
atf_set require.progs scapy
}
ipdivert_ip_output_remote_success_body() {
ids=65530
id=`printf "%x" ${ids}`
if [ $$ -gt 65535 ]; then
xl=`printf "%x" $(($$ - 65535))`
yl="1"
else
xl=`printf "%x" $$`
yl=""
fi
vnet_init
load_divert_module
ip4a="192.0.2.5"
ip4b="192.0.2.6"
script_name="../common/divert.py"
epair=$(vnet_mkepair)
ifconfig ${epair}a up
ifconfig ${epair}a inet ${ip4a}/30
jname="v4t-${id}-${yl}-${xl}"
vnet_mkjail ${jname} ${epair}b
jexec ${jname} ifconfig ${epair}b up
jexec ${jname} ifconfig ${epair}b inet ${ip4b}/30
atf_check -s exit:0 $(atf_get_srcdir)/${script_name} \
--dip ${ip4b} --test_name ipdivert_ip_output_remote_success
count=`jexec ${jname} netstat -s -p icmp | grep 'Input histogram:' -A8 | grep -c 'echo: '`
# Verify redirect got installed
atf_check_equal "1" "${count}"
}
ipdivert_ip_output_remote_success_cleanup() {
vnet_cleanup
}
atf_test_case "ipdivert_ip_input_local_success" "cleanup"
ipdivert_ip_input_local_success_head() {
atf_set descr 'Test diverting IPv4 packet to remote destination'
atf_set require.user root
atf_set require.progs scapy
}
ipdivert_ip_input_local_success_body() {
ids=65529
id=`printf "%x" ${ids}`
if [ $$ -gt 65535 ]; then
xl=`printf "%x" $(($$ - 65535))`
yl="1"
else
xl=`printf "%x" $$`
yl=""
fi
vnet_init
load_divert_module
ip4a="192.0.2.5"
ip4b="192.0.2.6"
script_name="../common/divert.py"
epair=$(vnet_mkepair)
ifconfig ${epair}a up
ifconfig ${epair}a inet ${ip4a}/30
jname="v4t-${id}-${yl}-${xl}"
vnet_mkjail ${jname} ${epair}b
jexec ${jname} ifconfig ${epair}b up
jexec ${jname} ifconfig ${epair}b inet ${ip4b}/30
atf_check -s exit:0 jexec ${jname} $(atf_get_srcdir)/${script_name} \
--dip ${ip4b} --test_name ipdivert_ip_input_local_success
count=`jexec ${jname} netstat -s -p icmp | grep 'Input histogram:' -A8 | grep -c 'echo: '`
# Verify redirect got installed
atf_check_equal "1" "${count}"
}
ipdivert_ip_input_local_success_cleanup() {
vnet_cleanup
}
atf_init_test_cases()
{
atf_add_test_case "ipdivert_ip_output_remote_success"
atf_add_test_case "ipdivert_ip_input_local_success"
}
# end

View File

@ -9,7 +9,8 @@ ATF_TESTS_SH= \
exthdr \
mld \
scapyi386 \
redirect
redirect \
divert
${PACKAGE}FILES+= exthdr.py
${PACKAGE}FILES+= mld.py

103
tests/sys/netinet6/divert.sh Executable file
View File

@ -0,0 +1,103 @@
#!/usr/bin/env atf-sh
#-
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2020 Alexander V. Chernikov
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $FreeBSD$
#
. $(atf_get_srcdir)/../common/vnet.subr
load_divert_module() {
kldstat -q -n ipdivert
if [ $? -ne 0 ]; then
atf_skip "ipdivert module is not loaded"
fi
}
atf_test_case "ipdivert_ip6_output_remote_success" "cleanup"
ipdivert_ip6_output_remote_success_head() {
atf_set descr 'Test valid IPv6 redirect'
atf_set require.user root
atf_set require.progs scapy
}
ipdivert_ip6_output_remote_success_body() {
ids=65530
id=`printf "%x" ${ids}`
if [ $$ -gt 65535 ]; then
xl=`printf "%x" $(($$ - 65535))`
yl="1"
else
xl=`printf "%x" $$`
yl=""
fi
vnet_init
load_divert_module
ip6a="2001:db8:6666:0000:${yl}:${id}:1:${xl}"
ip6b="2001:db8:6666:0000:${yl}:${id}:2:${xl}"
script_name="../common/divert.py"
epair=$(vnet_mkepair)
ifconfig ${epair}a up
ifconfig ${epair}a inet6 ${ip6a}/64
jname="v6t-${id}-${yl}-${xl}"
vnet_mkjail ${jname} ${epair}b
jexec ${jname} ifconfig ${epair}b up
jexec ${jname} ifconfig ${epair}b inet6 ${ip6b}/64
# wait for DAD to complete
sleep 2
# echo "LOCAL: ${local_ll_ip} ${local_ll_mac}"
# echo "REMOTE: ${remote_rtr_ll_ip} ${remote_rtr_mac}"
atf_check -s exit:0 $(atf_get_srcdir)/${script_name} \
--dip ${ip6b} --test_name ipdivert_ip6_output_remote_success
count=`jexec ${jname} netstat -s -p icmp6 | grep 'Input histogram:' -A1 | grep -c 'echo:'`
# Verify redirect got installed
atf_check_equal "1" "${count}"
}
ipdivert_ip6_output_remote_success_cleanup() {
vnet_cleanup
}
atf_init_test_cases()
{
atf_add_test_case "ipdivert_ip6_output_remote_success"
}
# end

View File

@ -30,7 +30,7 @@
.\" @(#)diff.1 8.1 (Berkeley) 6/30/93
.\" $FreeBSD$
.\"
.Dd February 12, 2020
.Dd February 13, 2020
.Dt DIFF 1
.Os
.Sh NAME
@ -60,7 +60,7 @@
.Op Fl -starting-file
.Op Fl -speed-large-files
.Op Fl -strip-trailing-cr
.Op Fl -tabsize
.Op Fl -tabsize Ar number
.Op Fl -text
.Op Fl -unified
.Op Fl I Ar pattern | Fl -ignore-matching-lines Ar pattern
@ -88,7 +88,7 @@
.Op Fl -speed-large-files
.Op Fl -starting-file
.Op Fl -strip-trailing-cr
.Op Fl -tabsize
.Op Fl -tabsize Ar number
.Op Fl -text
.Fl C Ar number | -context Ar number
.Ar file1 file2
@ -113,7 +113,7 @@
.Op Fl -speed-large-files
.Op Fl -starting-file
.Op Fl -strip-trailing-cr
.Op Fl -tabsize
.Op Fl -tabsize Ar number
.Op Fl -text
.Fl D Ar string | Fl -ifdef Ar string
.Ar file1 file2
@ -139,7 +139,7 @@
.Op Fl -speed-large-files
.Op Fl -starting-file
.Op Fl -strip-trailing-cr
.Op Fl -tabsize
.Op Fl -tabsize Ar number
.Op Fl -text
.Fl U Ar number | Fl -unified Ar number
.Ar file1 file2
@ -170,7 +170,7 @@
.Op Fl -show-c-function
.Op Fl -speed-large-files
.Op Fl -strip-trailing-cr
.Op Fl -tabsize
.Op Fl -tabsize Ar number
.Op Fl -text
.Op Fl -unidirectional-new-file
.Op Fl -unified
@ -192,7 +192,7 @@
.Op Fl -no-ignore-file-name-case
.Op Fl -strip-trailing-cr
.Op Fl -suppress-common-lines
.Op Fl -tabsize
.Op Fl -tabsize Ar number
.Op Fl -text
.Op Fl -width
.Fl y | Fl -side-by-side

View File

@ -93,7 +93,7 @@ static struct option longopts[] = {
{ "no-ignore-file-name-case", no_argument, NULL, OPT_NO_IGN_FN_CASE },
{ "normal", no_argument, NULL, OPT_NORMAL },
{ "strip-trailing-cr", no_argument, NULL, OPT_STRIPCR },
{ "tabsize", optional_argument, NULL, OPT_TSIZE },
{ "tabsize", required_argument, NULL, OPT_TSIZE },
{ "changed-group-format", required_argument, NULL, OPT_CHANGED_GROUP_FORMAT},
{ "suppress-common-lines", no_argument, NULL, OPT_SUPPRESS_COMMON },
{ NULL, 0, 0, '\0'}

View File

@ -10,6 +10,7 @@ atf_test_case side_by_side
atf_test_case brief_format
atf_test_case b230049
atf_test_case Bflag
atf_test_case tabsize
simple_body()
{
@ -164,6 +165,16 @@ Bflag_body()
atf_check -s exit:1 -o file:"$(atf_get_srcdir)/Bflag_F.out" diff -B E F
}
tabsize_body()
{
printf "\tA\n" > A
printf "\tB\n" > B
atf_check -s exit:1 \
-o inline:"1c1\n< A\n---\n> B\n" \
diff -t --tabsize 1 A B
}
atf_init_test_cases()
{
atf_add_test_case simple
@ -176,4 +187,5 @@ atf_init_test_cases()
atf_add_test_case brief_format
atf_add_test_case b230049
atf_add_test_case Bflag
atf_add_test_case tabsize
}

View File

@ -46,7 +46,7 @@ __FBSDID("$FreeBSD$");
#include "debug.h"
#include "xmsr.h"
static int cpu_vendor_intel, cpu_vendor_amd;
static int cpu_vendor_intel, cpu_vendor_amd, cpu_vendor_hygon;
int
emulate_wrmsr(struct vmctx *ctx, int vcpu, uint32_t num, uint64_t val)
@ -64,7 +64,7 @@ emulate_wrmsr(struct vmctx *ctx, int vcpu, uint32_t num, uint64_t val)
default:
break;
}
} else if (cpu_vendor_amd) {
} else if (cpu_vendor_amd || cpu_vendor_hygon) {
switch (num) {
case MSR_HWCR:
/*
@ -128,7 +128,7 @@ emulate_rdmsr(struct vmctx *ctx, int vcpu, uint32_t num, uint64_t *val)
error = -1;
break;
}
} else if (cpu_vendor_amd) {
} else if (cpu_vendor_amd || cpu_vendor_hygon) {
switch (num) {
case MSR_BIOS_SIGN:
*val = 0;
@ -225,6 +225,8 @@ init_msr(void)
error = 0;
if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
cpu_vendor_amd = 1;
} else if (strcmp(cpu_vendor, "HygonGenuine") == 0) {
cpu_vendor_hygon = 1;
} else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
cpu_vendor_intel = 1;
} else {

View File

@ -657,6 +657,8 @@ cpu_vendor_intel(void)
if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
return (false);
} else if (strcmp(cpu_vendor, "HygonGenuine") == 0) {
return (false);
} else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
return (true);
} else {

View File

@ -2494,14 +2494,6 @@ wnn6_DS 26208/tcp #Wnn6 (Dserver)
sgsap 29118/sctp #SGsAP in 3GPP
sbcap 29168/sctp #SBcAP in 3GPP
iuhsctpassoc 29169/sctp #HNBAP and RUA Common Association
profinet-rt 34962/tcp #PROFInet RT Unicast
profinet-rt 34962/udp #PROFInet RT Unicast
profinet-rtm 34963/tcp #PROFInet RT Multicast
profinet-rtm 34963/udp #PROFInet RT Multicast
profinet-cm 34964/tcp #PROFInet Context Manager
profinet-cm 34964/udp #PROFInet Context Manager
ethercat 34980/tcp #EtherCAT Port
ethercat 34980/udp #EhterCAT Port
s1-control 36412/sctp #S1-Control Plane (3GPP)
x2-control 36422/sctp #X2-Control Plane (3GPP)
dbbrowse 47557/tcp #Databeam Corporation

View File

@ -570,17 +570,17 @@ fetch_device_details(char *devnames, char **model, char **serial, off_t *size)
{
char ident[DISK_IDENT_SIZE];
struct diocgattr_arg arg;
char *device, *tmp;
char *tmp;
off_t mediasize;
int comma;
int fd;
tmp = strdup(devnames);
comma = (int)strcspn(devnames, ",");
asprintf(&tmp, "/dev/%.*s", comma, devnames);
if (tmp == NULL)
err(1, "strdup");
device = strsep(&tmp, ",");
asprintf(&tmp, "/dev/%s", device);
err(1, "asprintf");
fd = open(tmp, O_RDONLY);
free(tmp);
if (fd < 0) {
/*
* This can happen with a disk so broken it cannot
@ -589,6 +589,7 @@ fetch_device_details(char *devnames, char **model, char **serial, off_t *size)
*model = strdup("?");
*serial = strdup("?");
*size = -1;
close(fd);
return;
}
@ -608,6 +609,7 @@ fetch_device_details(char *devnames, char **model, char **serial, off_t *size)
*size = mediasize;
else
*size = -1;
close(fd);
}
static void