MFhead@r344270

This commit is contained in:
Enji Cooper 2019-02-19 03:46:32 +00:00
commit 30e009fc3a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/import-googletest-1.8.1/; revision=344271
125 changed files with 3825 additions and 1008 deletions

View File

@ -38,6 +38,12 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 13.x IS SLOW:
modules on kernels not having 'device iflib', the iflib.ko module
is loaded automatically.
20190125:
The IEEE80211_AMPDU_AGE and AH_SUPPORT_AR5416 kernel configuration
options no longer exist since r343219 and r343427 respectively;
nothing uses them, so they should be just removed from custom
kernel config files.
20181230:
r342635 changes the way efibootmgr(8) works by requiring users to add
the -b (bootnum) parameter for commands where the bootnum was previously
@ -231,7 +237,7 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 13.x IS SLOW:
20180719:
ARM64 now have efifb support, if you want to have serial console
on your arm64 board when an screen is connected and the bootloader
setup a frambuffer for us to use, just add :
setup a framebuffer for us to use, just add :
boot_serial=YES
boot_multicons=YES
in /boot/loader.conf

View File

@ -860,6 +860,7 @@ next_entry(struct archive_read_disk *a, struct tree *t,
struct archive_string delayed_str;
delayed = ARCHIVE_OK;
delayed_errno = 0;
archive_string_init(&delayed_str);
st = NULL;

View File

@ -733,12 +733,6 @@ _LIBCPP_INLINE_VAR _LIBCPP_CONSTEXPR bool is_integral_v
// is_floating_point
template <class _Tp> struct __libcpp_is_floating_point : public false_type {};
#ifdef __clang__
template <> struct __libcpp_is_floating_point<__fp16> : public true_type {};
#endif
#ifdef __FLT16_MANT_DIG__
template <> struct __libcpp_is_floating_point<_Float16> : public true_type {};
#endif
template <> struct __libcpp_is_floating_point<float> : public true_type {};
template <> struct __libcpp_is_floating_point<double> : public true_type {};
template <> struct __libcpp_is_floating_point<long double> : public true_type {};

View File

@ -1258,14 +1258,20 @@ void ELFObjectWriter::executePostLayoutBinding(MCAssembler &Asm,
if (!Symbol.isUndefined() && !Rest.startswith("@@@"))
continue;
// FIXME: produce a better error message.
// FIXME: Get source locations for these errors or diagnose them earlier.
if (Symbol.isUndefined() && Rest.startswith("@@") &&
!Rest.startswith("@@@"))
report_fatal_error("A @@ version cannot be undefined");
!Rest.startswith("@@@")) {
Asm.getContext().reportError(SMLoc(), "versioned symbol " + AliasName +
" must be defined");
continue;
}
if (Renames.count(&Symbol) && Renames[&Symbol] != Alias)
report_fatal_error(llvm::Twine("Multiple symbol versions defined for ") +
Symbol.getName());
if (Renames.count(&Symbol) && Renames[&Symbol] != Alias) {
Asm.getContext().reportError(
SMLoc(), llvm::Twine("multiple symbol versions defined for ") +
Symbol.getName());
continue;
}
Renames.insert(std::make_pair(&Symbol, Alias));
}

View File

@ -26,6 +26,8 @@
..
modules
..
uboot
..
zfs
..
..

View File

@ -6,6 +6,7 @@ LIBROKEN_A= ${.OBJDIR:H:H}/lib/libroken/libroken.a
LIBADD= vers
LDADD= ${LIBROKEN_A}
DPADD= ${LIBROKEN_A}
MK_PIE:= no
SRCS= \
asn1parse.y \

View File

@ -6,6 +6,7 @@ LIBADD= vers
LDADD= ${LIBROKEN_A}
DPADD= ${LIBROKEN_A}
MAN=
MK_PIE:= no
SRCS= roken.h \
slc-gram.y \

View File

@ -2,6 +2,8 @@
.include <bsd.compiler.mk>
MK_PIE:= no # Explicit libXXX.a references
.if ${COMPILER_TYPE} == "clang"
DEBUG_FILES_CFLAGS= -gline-tables-only
.else

View File

@ -18,6 +18,7 @@ SRCS+= Support/Errno.cpp
SRCS+= Support/Error.cpp
SRCS+= Support/ErrorHandling.cpp
SRCS+= Support/FoldingSet.cpp
SRCS+= Support/FormatVariadic.cpp
SRCS+= Support/FormattedStream.cpp
SRCS+= Support/Hashing.cpp
SRCS+= Support/Host.cpp

View File

@ -45,6 +45,11 @@ __FBSDID("$FreeBSD$");
#include "be.h"
#include "be_impl.h"
struct be_destroy_data {
libbe_handle_t *lbh;
char *snapname;
};
#if SOON
static int be_create_child_noent(libbe_handle_t *lbh, const char *active,
const char *child_path);
@ -186,12 +191,38 @@ be_nicenum(uint64_t num, char *buf, size_t buflen)
static int
be_destroy_cb(zfs_handle_t *zfs_hdl, void *data)
{
char path[BE_MAXPATHLEN];
struct be_destroy_data *bdd;
zfs_handle_t *snap;
int err;
if ((err = zfs_iter_children(zfs_hdl, be_destroy_cb, data)) != 0)
return (err);
if ((err = zfs_destroy(zfs_hdl, false)) != 0)
bdd = (struct be_destroy_data *)data;
if (bdd->snapname == NULL) {
err = zfs_iter_children(zfs_hdl, be_destroy_cb, data);
if (err != 0)
return (err);
return (zfs_destroy(zfs_hdl, false));
}
/* If we're dealing with snapshots instead, delete that one alone */
err = zfs_iter_filesystems(zfs_hdl, be_destroy_cb, data);
if (err != 0)
return (err);
/*
* This part is intentionally glossing over any potential errors,
* because there's a lot less potential for errors when we're cleaning
* up snapshots rather than a full deep BE. The primary error case
* here being if the snapshot doesn't exist in the first place, which
* the caller will likely deem insignificant as long as it doesn't
* exist after the call. Thus, such a missing snapshot shouldn't jam
* up the destruction.
*/
snprintf(path, sizeof(path), "%s@%s", zfs_get_name(zfs_hdl),
bdd->snapname);
if (!zfs_dataset_exists(bdd->lbh->lzh, path, ZFS_TYPE_SNAPSHOT))
return (0);
snap = zfs_open(bdd->lbh->lzh, path, ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
zfs_destroy(snap, false);
return (0);
}
@ -199,22 +230,26 @@ be_destroy_cb(zfs_handle_t *zfs_hdl, void *data)
* Destroy the boot environment or snapshot specified by the name
* parameter. Options are or'd together with the possible values:
* BE_DESTROY_FORCE : forces operation on mounted datasets
* BE_DESTROY_ORIGIN: destroy the origin snapshot as well
*/
int
be_destroy(libbe_handle_t *lbh, const char *name, int options)
{
struct be_destroy_data bdd;
char origin[BE_MAXPATHLEN], path[BE_MAXPATHLEN];
zfs_handle_t *fs;
char *p;
char *snapdelim;
int err, force, mounted;
size_t rootlen;
p = path;
bdd.lbh = lbh;
bdd.snapname = NULL;
force = options & BE_DESTROY_FORCE;
*origin = '\0';
be_root_concat(lbh, name, path);
if (strchr(name, '@') == NULL) {
if ((snapdelim = strchr(path, '@')) == NULL) {
if (!zfs_dataset_exists(lbh->lzh, path, ZFS_TYPE_FILESYSTEM))
return (set_error(lbh, BE_ERR_NOENT));
@ -222,9 +257,10 @@ be_destroy(libbe_handle_t *lbh, const char *name, int options)
strcmp(path, lbh->bootfs) == 0)
return (set_error(lbh, BE_ERR_DESTROYACT));
fs = zfs_open(lbh->lzh, p, ZFS_TYPE_FILESYSTEM);
fs = zfs_open(lbh->lzh, path, ZFS_TYPE_FILESYSTEM);
if (fs == NULL)
return (set_error(lbh, BE_ERR_ZFSOPEN));
if ((options & BE_DESTROY_ORIGIN) != 0 &&
zfs_prop_get(fs, ZFS_PROP_ORIGIN, origin, sizeof(origin),
NULL, NULL, 0, 1) != 0)
@ -233,41 +269,57 @@ be_destroy(libbe_handle_t *lbh, const char *name, int options)
if (!zfs_dataset_exists(lbh->lzh, path, ZFS_TYPE_SNAPSHOT))
return (set_error(lbh, BE_ERR_NOENT));
fs = zfs_open(lbh->lzh, p, ZFS_TYPE_SNAPSHOT);
if (fs == NULL)
bdd.snapname = strdup(snapdelim + 1);
if (bdd.snapname == NULL)
return (set_error(lbh, BE_ERR_NOMEM));
*snapdelim = '\0';
fs = zfs_open(lbh->lzh, path, ZFS_TYPE_DATASET);
if (fs == NULL) {
free(bdd.snapname);
return (set_error(lbh, BE_ERR_ZFSOPEN));
}
}
/* Check if mounted, unmount if force is specified */
if ((mounted = zfs_is_mounted(fs, NULL)) != 0) {
if (force)
if (force) {
zfs_unmount(fs, NULL, 0);
else
} else {
free(bdd.snapname);
return (set_error(lbh, BE_ERR_DESTROYMNT));
}
}
if ((err = be_destroy_cb(fs, NULL)) != 0) {
err = be_destroy_cb(fs, &bdd);
zfs_close(fs);
free(bdd.snapname);
if (err != 0) {
/* Children are still present or the mount is referenced */
if (err == EBUSY)
return (set_error(lbh, BE_ERR_DESTROYMNT));
return (set_error(lbh, BE_ERR_UNKNOWN));
}
if (*origin != '\0') {
fs = zfs_open(lbh->lzh, origin, ZFS_TYPE_SNAPSHOT);
if (fs == NULL)
return (set_error(lbh, BE_ERR_ZFSOPEN));
err = zfs_destroy(fs, false);
if (err == EBUSY)
return (set_error(lbh, BE_ERR_DESTROYMNT));
else if (err != 0)
return (set_error(lbh, BE_ERR_UNKNOWN));
}
if ((options & BE_DESTROY_ORIGIN) == 0)
return (0);
return (0);
/* The origin can't possibly be shorter than the BE root */
rootlen = strlen(lbh->root);
if (*origin == '\0' || strlen(origin) <= rootlen + 1)
return (set_error(lbh, BE_ERR_INVORIGIN));
/*
* We'll be chopping off the BE root and running this back through
* be_destroy, so that we properly handle the origin snapshot whether
* it be that of a deep BE or not.
*/
if (strncmp(origin, lbh->root, rootlen) != 0 || origin[rootlen] != '/')
return (0);
return (be_destroy(lbh, origin + rootlen + 1,
options & ~BE_DESTROY_ORIGIN));
}
int
be_snapshot(libbe_handle_t *lbh, const char *source, const char *snap_name,
bool recursive, char *result)

View File

@ -59,6 +59,7 @@ typedef enum be_error {
BE_ERR_NOPOOL, /* operation not supported on this pool */
BE_ERR_NOMEM, /* insufficient memory */
BE_ERR_UNKNOWN, /* unknown error */
BE_ERR_INVORIGIN, /* invalid origin */
} be_error_t;

View File

@ -105,6 +105,9 @@ libbe_error_description(libbe_handle_t *lbh)
case BE_ERR_UNKNOWN:
return ("unknown error");
case BE_ERR_INVORIGIN:
return ("invalid origin");
default:
assert(lbh->error == BE_ERR_SUCCESS);
return ("no error");

View File

@ -28,7 +28,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd February 11, 2019
.Dd February 12, 2019
.Dt LIBBE 3
.Os
.Sh NAME
@ -489,6 +489,8 @@ BE_ERR_NOPOOL
BE_ERR_NOMEM
.It
BE_ERR_UNKNOWN
.It
BE_ERR_INVORIGIN
.El
.Sh SEE ALSO
.Xr bectl 8

View File

@ -122,6 +122,7 @@ NOASM=
.endif
.if ${LIBC_ARCH} == "i386" || ${LIBC_ARCH} == "amd64"
.include "${LIBC_SRCTOP}/x86/sys/Makefile.inc"
.include "${LIBC_SRCTOP}/x86/gen/Makefile.inc"
.endif
.if ${MK_NIS} != "no"
CFLAGS+= -DYP

View File

@ -2,7 +2,7 @@
# $FreeBSD$
SRCS+= _setjmp.S _set_tp.c rfork_thread.S setjmp.S sigsetjmp.S \
fabs.S getcontextx.c \
fabs.S \
infinity.c ldexp.c makecontext.c signalcontext.c \
flt_rounds.c fpgetmask.c fpsetmask.c fpgetprec.c fpsetprec.c \
fpgetround.c fpsetround.c fpgetsticky.c

View File

@ -178,4 +178,6 @@ extension and should not be used if portability is desired.
The
.Fn readpassphrase
function first appeared in
.Fx 4.6
and
.Ox 2.9 .

View File

@ -2,5 +2,5 @@
# $FreeBSD$
SRCS+= _ctx_start.S _setjmp.S _set_tp.c fabs.S \
flt_rounds.c getcontextx.c infinity.c ldexp.c makecontext.c \
flt_rounds.c infinity.c ldexp.c makecontext.c \
rfork_thread.S setjmp.S signalcontext.c sigsetjmp.S

View File

@ -1,145 +0,0 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
* Copyright (c) 2011 Konstantin Belousov <kib@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/ucontext.h>
#include <errno.h>
#include <stdarg.h>
#include <stdlib.h>
#include <machine/npx.h>
#include <machine/specialreg.h>
#include <machine/sysarch.h>
static int xstate_sz = -1;
int
__getcontextx_size(void)
{
u_int p[4];
int cpuid_supported;
if (xstate_sz == -1) {
__asm __volatile(
" pushfl\n"
" popl %%eax\n"
" movl %%eax,%%ecx\n"
" xorl $0x200000,%%eax\n"
" pushl %%eax\n"
" popfl\n"
" pushfl\n"
" popl %%eax\n"
" xorl %%eax,%%ecx\n"
" je 1f\n"
" movl $1,%0\n"
" jmp 2f\n"
"1: movl $0,%0\n"
"2:\n"
: "=r" (cpuid_supported) : : "eax", "ecx");
if (cpuid_supported) {
__asm __volatile(
" pushl %%ebx\n"
" cpuid\n"
" movl %%ebx,%1\n"
" popl %%ebx\n"
: "=a" (p[0]), "=r" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (0x1));
if ((p[2] & CPUID2_OSXSAVE) != 0) {
__asm __volatile(
" pushl %%ebx\n"
" cpuid\n"
" movl %%ebx,%1\n"
" popl %%ebx\n"
: "=a" (p[0]), "=r" (p[1]), "=c" (p[2]),
"=d" (p[3])
: "0" (0xd), "2" (0x0));
xstate_sz = p[1] - sizeof(struct savexmm);
} else
xstate_sz = 0;
} else
xstate_sz = 0;
}
return (sizeof(ucontext_t) + xstate_sz);
}
int
__fillcontextx2(char *ctx)
{
struct i386_get_xfpustate xfpu;
ucontext_t *ucp;
ucp = (ucontext_t *)ctx;
if (xstate_sz != 0) {
xfpu.addr = (char *)(ucp + 1);
xfpu.len = xstate_sz;
if (sysarch(I386_GET_XFPUSTATE, &xfpu) == -1)
return (-1);
ucp->uc_mcontext.mc_xfpustate = (__register_t)xfpu.addr;
ucp->uc_mcontext.mc_xfpustate_len = xstate_sz;
ucp->uc_mcontext.mc_flags |= _MC_HASFPXSTATE;
} else {
ucp->uc_mcontext.mc_xfpustate = 0;
ucp->uc_mcontext.mc_xfpustate_len = 0;
}
return (0);
}
int
__fillcontextx(char *ctx)
{
ucontext_t *ucp;
ucp = (ucontext_t *)ctx;
if (getcontext(ucp) == -1)
return (-1);
__fillcontextx2(ctx);
return (0);
}
__weak_reference(__getcontextx, getcontextx);
ucontext_t *
__getcontextx(void)
{
char *ctx;
int error;
ctx = malloc(__getcontextx_size());
if (ctx == NULL)
return (NULL);
if (__fillcontextx(ctx) == -1) {
error = errno;
free(ctx);
errno = error;
return (NULL);
}
return ((ucontext_t *)ctx);
}

View File

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd January 25, 2019
.Dd February 15, 2019
.Dt SENDFILE 2
.Os
.Sh NAME
@ -224,6 +224,19 @@ implementation of
.Fn sendfile
is "zero-copy", meaning that it has been optimized so that copying of the file data is avoided.
.Sh TUNING
.Ss physical paging buffers
.Fn sendfile
uses vnode pager to read file pages into memory.
The pager uses a pool of physical buffers to run its I/O operations.
When system runs out of pbufs, sendfile will block and report state
.Dq Li zonelimit .
Size of the pool can be tuned with
.Va vm.vnode_pbufs
.Xr loader.conf 5
tunable and can be checked with
.Xr sysctl 8
OID of the same name at runtime.
.Ss sendfile(2) buffers
On some architectures, this system call internally uses a special
.Fn sendfile
buffer
@ -279,9 +292,11 @@ buffers usage respectively.
These values may also be viewed through
.Nm netstat Fl m .
.Pp
If a value of zero is reported for
.Va kern.ipc.nsfbufs ,
your architecture does not need to use
If
.Xr sysctl 8
OID
.Va kern.ipc.nsfbufs
doesn't exist, your architecture does not need to use
.Fn sendfile
buffers because their task can be efficiently performed
by the generic virtual memory structures.
@ -363,11 +378,13 @@ does not support
The socket peer has closed the connection.
.El
.Sh SEE ALSO
.Xr loader.conf 5 ,
.Xr netstat 1 ,
.Xr open 2 ,
.Xr send 2 ,
.Xr socket 2 ,
.Xr writev 2 ,
.Xr sysctl 8 ,
.Xr tuning 7
.Rs
.%A K. Elmeleegy

View File

@ -0,0 +1,6 @@
# $FreeBSD$
.PATH: ${LIBC_SRCTOP}/x86/gen
SRCS+= \
getcontextx.c

View File

@ -35,51 +35,78 @@ __FBSDID("$FreeBSD$");
#include <stdarg.h>
#include <stdlib.h>
#include <machine/cpufunc.h>
#include <machine/fpu.h>
#include <machine/specialreg.h>
#include <machine/sysarch.h>
#include <x86/ifunc.h>
#include <x86/fpu.h>
static int xstate_sz = -1;
#if defined __i386__
#define X86_GET_XFPUSTATE I386_GET_XFPUSTATE
typedef struct savexmm savex86_t ;
typedef struct i386_get_xfpustate x86_get_xfpustate_t;
#elif defined __amd64__
#define X86_GET_XFPUSTATE AMD64_GET_XFPUSTATE
typedef struct savefpu savex86_t;
typedef struct amd64_get_xfpustate x86_get_xfpustate_t;
#else
#error "Wrong arch"
#endif
int
__getcontextx_size(void)
static int xstate_sz = 0;
static int
__getcontextx_size_xfpu(void)
{
u_int p[4];
if (xstate_sz == -1) {
do_cpuid(1, p);
if ((p[2] & CPUID2_OSXSAVE) != 0) {
cpuid_count(0xd, 0x0, p);
xstate_sz = p[1] - sizeof(struct savefpu);
} else
xstate_sz = 0;
}
return (sizeof(ucontext_t) + xstate_sz);
}
int
__fillcontextx2(char *ctx)
DEFINE_UIFUNC(, int, __getcontextx_size, (void), static)
{
struct amd64_get_xfpustate xfpu;
u_int p[4];
if ((cpu_feature2 & CPUID2_OSXSAVE) != 0) {
cpuid_count(0xd, 0x0, p);
xstate_sz = p[1] - sizeof(savex86_t);
}
return (__getcontextx_size_xfpu);
}
static int
__fillcontextx2_xfpu(char *ctx)
{
x86_get_xfpustate_t xfpu;
ucontext_t *ucp;
ucp = (ucontext_t *)ctx;
if (xstate_sz != 0) {
xfpu.addr = (char *)(ucp + 1);
xfpu.len = xstate_sz;
if (sysarch(AMD64_GET_XFPUSTATE, &xfpu) == -1)
return (-1);
ucp->uc_mcontext.mc_xfpustate = (__register_t)xfpu.addr;
ucp->uc_mcontext.mc_xfpustate_len = xstate_sz;
ucp->uc_mcontext.mc_flags |= _MC_HASFPXSTATE;
} else {
ucp->uc_mcontext.mc_xfpustate = 0;
ucp->uc_mcontext.mc_xfpustate_len = 0;
}
xfpu.addr = (char *)(ucp + 1);
xfpu.len = xstate_sz;
if (sysarch(X86_GET_XFPUSTATE, &xfpu) == -1)
return (-1);
ucp->uc_mcontext.mc_xfpustate = (__register_t)xfpu.addr;
ucp->uc_mcontext.mc_xfpustate_len = xstate_sz;
ucp->uc_mcontext.mc_flags |= _MC_HASFPXSTATE;
return (0);
}
static int
__fillcontextx2_noxfpu(char *ctx)
{
ucontext_t *ucp;
ucp = (ucontext_t *)ctx;
ucp->uc_mcontext.mc_xfpustate = 0;
ucp->uc_mcontext.mc_xfpustate_len = 0;
return (0);
}
DEFINE_UIFUNC(, int, __fillcontextx2, (char *), static)
{
return ((cpu_feature2 & CPUID2_OSXSAVE) != 0 ? __fillcontextx2_xfpu :
__fillcontextx2_noxfpu);
}
int
__fillcontextx(char *ctx)
{

View File

@ -53,31 +53,6 @@ __FBSDID("$FreeBSD$");
#include <x86/ifunc.h>
#include "libc_private.h"
static void
cpuidp(u_int leaf, u_int p[4])
{
__asm __volatile(
#if defined(__i386__)
" pushl %%ebx\n"
#endif
" cpuid\n"
#if defined(__i386__)
" movl %%ebx,%1\n"
" popl %%ebx"
#endif
: "=a" (p[0]),
#if defined(__i386__)
"=r" (p[1]),
#elif defined(__amd64__)
"=b" (p[1]),
#else
#error "Arch"
#endif
"=c" (p[2]), "=d" (p[3])
: "0" (leaf));
}
static void
rdtsc_mb_lfence(void)
{
@ -100,12 +75,12 @@ rdtsc_mb_none(void)
DEFINE_UIFUNC(static, void, rdtsc_mb, (void), static)
{
u_int p[4];
/* Not a typo, string matches our cpuidp() registers use. */
/* Not a typo, string matches our do_cpuid() registers use. */
static const char intel_id[] = "GenuntelineI";
if ((cpu_feature & CPUID_SSE2) == 0)
return (rdtsc_mb_none);
cpuidp(0, p);
do_cpuid(0, p);
return (memcmp(p + 1, intel_id, sizeof(intel_id) - 1) == 0 ?
rdtsc_mb_lfence : rdtsc_mb_mfence);
}

View File

@ -213,6 +213,15 @@ memstat_sysctl_uma(struct memory_type_list *list, int flags)
mtp->mt_numfrees += upsp->ups_frees;
}
/*
* Values for uth_allocs and uth_frees frees are snap.
* It may happen that kernel reports that number of frees
* is greater than number of allocs. See counter(9) for
* details.
*/
if (mtp->mt_numallocs < mtp->mt_numfrees)
mtp->mt_numallocs = mtp->mt_numfrees;
mtp->mt_size = uthp->uth_size;
mtp->mt_rsize = uthp->uth_rsize;
mtp->mt_memalloced = mtp->mt_numallocs * uthp->uth_size;

View File

@ -72,14 +72,15 @@ _tcb_set(struct tcb *tcb)
static __inline struct tcb *
_tcb_get(void)
{
register uint8_t *_tp;
register struct tcb *tcb;
#ifdef __powerpc64__
__asm __volatile("mr %0,13" : "=r"(_tp));
__asm __volatile("addi %0,13,%1" : "=r"(tcb) : "i"(-TP_OFFSET));
#else
__asm __volatile("mr %0,2" : "=r"(_tp));
__asm __volatile("addi %0,2,%1" : "=r"(tcb) : "i"(-TP_OFFSET));
#endif
return ((struct tcb *)(_tp - TP_OFFSET));
return (tcb);
}
static __inline struct pthread *

View File

@ -13,6 +13,7 @@ name="nfsd"
desc="Remote NFS server"
rcvar="nfs_server_enable"
command="/usr/sbin/${name}"
nfs_server_vhost=""
load_rc_config $name
start_precmd="nfsd_precmd"
@ -20,6 +21,7 @@ sig_stop="USR1"
nfsd_precmd()
{
local _vhost
rc_flags="${nfs_server_flags}"
# Load the modules now, so that the vfs.nfsd sysctl
@ -46,6 +48,9 @@ nfsd_precmd()
force_depend rpcbind || return 1
force_depend mountd || return 1
if [ -n "${nfs_server_vhost}" ]; then
command_args="-V \"${nfs_server_vhost}\""
fi
}
run_rc_command "$1"

View File

@ -7,6 +7,7 @@
.include <src.opts.mk>
PACKAGE= clibs
MK_BIND_NOW= no
MK_PIE= no # Always position independent using local rules
MK_SSP= no
CONFS= libmap.conf

View File

@ -196,6 +196,7 @@ main(int argc, char **argv)
usage();
mdtype = MD_MALLOC;
have_mdtype = true;
argappend(&mdconfig_arg, "-o reserve");
break;
case 'm':
argappend(&newfs_arg, "-m %s", optarg);

View File

@ -177,7 +177,7 @@ static void
firmware(const struct nvme_function *nf, int argc, char *argv[])
{
int fd = -1, slot = 0;
int a_flag, s_flag, f_flag;
int a_flag, f_flag;
int activate_action, reboot_required;
int opt;
char *p, *image = NULL;
@ -188,7 +188,7 @@ firmware(const struct nvme_function *nf, int argc, char *argv[])
uint8_t fw_slot1_ro, fw_num_slots;
struct nvme_controller_data cdata;
a_flag = s_flag = f_flag = false;
a_flag = f_flag = false;
while ((opt = getopt(argc, argv, "af:s:")) != -1) {
switch (opt) {
@ -214,7 +214,6 @@ firmware(const struct nvme_function *nf, int argc, char *argv[])
"7.\n", optarg);
usage(nf);
}
s_flag = true;
break;
case 'f':
image = optarg;

View File

@ -1,6 +1,6 @@
.\" DO NOT EDIT-- this file is @generated by tools/build/options/makeman.
.\" $FreeBSD$
.Dd January 31, 2019
.Dd February 15, 2019
.Dt SRC.CONF 5
.Os
.Sh NAME
@ -406,7 +406,8 @@ Set to build the Clang C/C++ compiler during the bootstrap phase of the build.
This is a default setting on
amd64/amd64, arm/arm, arm/armv6, arm/armv7, arm64/aarch64 and i386/i386.
.It Va WITH_CLANG_EXTRAS
Set to build additional clang and llvm tools, such as bugpoint.
Set to build additional clang and llvm tools, such as bugpoint and
clang-format.
.It Va WITHOUT_CLANG_FULL
Set to avoid building the ARCMigrate, Rewriter and StaticAnalyzer components of
the Clang C/C++ compiler.
@ -1542,6 +1543,9 @@ When set, it enforces these options:
.It
.Va WITHOUT_AUTHPF
.El
.It Va WITH_PIE
Build dynamically linked binaries as
Position-Independent Executable (PIE).
.It Va WITHOUT_PKGBOOTSTRAP
Set to not build
.Xr pkg 7

View File

@ -91,13 +91,16 @@ CTFFLAGS+= -g
# prefer .s to a .c, add .po, remove stuff not used in the BSD libraries
# .pico used for PIC object files
# .nossppico used for NOSSP PIC object files
.SUFFIXES: .out .o .bc .ll .po .pico .nossppico .S .asm .s .c .cc .cpp .cxx .C .f .y .l .ln
# .pieo used for PIE object files
.SUFFIXES: .out .o .bc .ll .po .pico .nossppico .pieo .S .asm .s .c .cc .cpp .cxx .C .f .y .l .ln
.if !defined(PICFLAG)
.if ${MACHINE_CPUARCH} == "sparc64"
PICFLAG=-fPIC
PIEFLAG=-fPIE
.else
PICFLAG=-fpic
PIEFLAG=-fpie
.endif
.endif
@ -115,6 +118,10 @@ PO_FLAG=-pg
${CC} ${PICFLAG} -DPIC ${SHARED_CFLAGS:C/^-fstack-protector.*$//} ${CFLAGS:C/^-fstack-protector.*$//} -c ${.IMPSRC} -o ${.TARGET}
${CTFCONVERT_CMD}
.c.pieo:
${CC} ${PIEFLAG} -DPIC ${SHARED_CFLAGS} ${CFLAGS} -c ${.IMPSRC} -o ${.TARGET}
${CTFCONVERT_CMD}
.cc.po .C.po .cpp.po .cxx.po:
${CXX} ${PO_FLAG} ${STATIC_CXXFLAGS} ${PO_CXXFLAGS} -c ${.IMPSRC} -o ${.TARGET}
@ -124,6 +131,9 @@ PO_FLAG=-pg
.cc.nossppico .C.nossppico .cpp.nossppico .cxx.nossppico:
${CXX} ${PICFLAG} -DPIC ${SHARED_CXXFLAGS:C/^-fstack-protector.*$//} ${CXXFLAGS:C/^-fstack-protector.*$//} -c ${.IMPSRC} -o ${.TARGET}
.cc.pieo .C.pieo .cpp.pieo .cxx.pieo:
${CXX} ${PIEFLAG} ${SHARED_CXXFLAGS} ${CXXFLAGS} -c ${.IMPSRC} -o ${.TARGET}
.f.po:
${FC} -pg ${FFLAGS} -o ${.TARGET} -c ${.IMPSRC}
${CTFCONVERT_CMD}
@ -136,7 +146,7 @@ PO_FLAG=-pg
${FC} ${PICFLAG} -DPIC ${FFLAGS:C/^-fstack-protector.*$//} -o ${.TARGET} -c ${.IMPSRC}
${CTFCONVERT_CMD}
.s.po .s.pico .s.nossppico:
.s.po .s.pico .s.nossppico .s.pieo:
${AS} ${AFLAGS} -o ${.TARGET} ${.IMPSRC}
${CTFCONVERT_CMD}
@ -155,6 +165,11 @@ PO_FLAG=-pg
${CFLAGS:C/^-fstack-protector.*$//} ${ACFLAGS} -c ${.IMPSRC} -o ${.TARGET}
${CTFCONVERT_CMD}
.asm.pieo:
${CC:N${CCACHE_BIN}} -x assembler-with-cpp ${PIEFLAG} -DPIC \
${CFLAGS} ${ACFLAGS} -c ${.IMPSRC} -o ${.TARGET}
${CTFCONVERT_CMD}
.S.po:
${CC:N${CCACHE_BIN}} -DPROF ${PO_CFLAGS} ${ACFLAGS} -c ${.IMPSRC} \
-o ${.TARGET}
@ -170,6 +185,11 @@ PO_FLAG=-pg
-c ${.IMPSRC} -o ${.TARGET}
${CTFCONVERT_CMD}
.S.pieo:
${CC:N${CCACHE_BIN}} ${PIEFLAG} -DPIC ${CFLAGS} ${ACFLAGS} \
-c ${.IMPSRC} -o ${.TARGET}
${CTFCONVERT_CMD}
_LIBDIR:=${LIBDIR}
_SHLIBDIR:=${SHLIBDIR}
@ -334,6 +354,20 @@ lib${LIB_PRIVATE}${LIB}_nossp_pic.a: ${NOSSPSOBJS}
.endif # !defined(INTERNALLIB)
.if defined(INTERNALLIB) && ${MK_PIE} != "no"
PIEOBJS+= ${OBJS:.o=.pieo}
DEPENDOBJS+= ${PIEOBJS}
CLEANFILES+= ${PIEOBJS}
_LIBS+= lib${LIB_PRIVATE}${LIB}_pie.a
lib${LIB_PRIVATE}${LIB}_pie.a: ${PIEOBJS}
@${ECHO} building pie ${LIB} library
@rm -f ${.TARGET}
${AR} ${ARFLAGS} ${.TARGET} ${PIEOBJS} ${ARADD}
${RANLIB} ${RANLIBFLAGS} ${.TARGET}
.endif
.if defined(_SKIP_BUILD)
all:
.else

View File

@ -73,6 +73,7 @@ __DEFAULT_NO_OPTIONS = \
CCACHE_BUILD \
CTF \
INSTALL_AS_USER \
PIE \
RETPOLINE \
STALE_STAGED

View File

@ -38,11 +38,16 @@ MK_DEBUG_FILES= no
.if ${MK_BIND_NOW} != "no"
LDFLAGS+= -Wl,-znow
.endif
.if ${MK_PIE} != "no" && (!defined(NO_SHARED) || ${NO_SHARED:tl} == "no")
CFLAGS+= -fPIE
CXXFLAGS+= -fPIE
LDFLAGS+= -pie
.endif
.if ${MK_RETPOLINE} != "no"
CFLAGS+= -mretpoline
CXXFLAGS+= -mretpoline
# retpolineplt is broken with static linking (PR 233336)
.if !defined(NO_SHARED) || ${NO_SHARED} == "no" || ${NO_SHARED} == "NO"
.if !defined(NO_SHARED) || ${NO_SHARED:tl} == "no"
LDFLAGS+= -Wl,-zretpolineplt
.endif
.endif
@ -68,7 +73,7 @@ TAGS+= package=${PACKAGE:Uruntime}
TAG_ARGS= -T ${TAGS:[*]:S/ /,/g}
.endif
.if defined(NO_SHARED) && (${NO_SHARED} != "no" && ${NO_SHARED} != "NO")
.if defined(NO_SHARED) && ${NO_SHARED:tl} != "no"
LDFLAGS+= -static
.endif

View File

@ -385,6 +385,10 @@ LDADD_gtest_main= -lprivategtest_main
LIB${_l:tu}?= ${LIBDESTDIR}${LIBDIR_BASE}/libprivate${_l}.a
.endfor
.if ${MK_PIE} != "no"
PIE_SUFFIX= _pie
.endif
.for _l in ${_LIBRARIES}
.if ${_INTERNALLIBS:M${_l}} || !defined(SYSROOT)
LDADD_${_l}_L+= -L${LIB${_l:tu}DIR}
@ -392,12 +396,14 @@ LDADD_${_l}_L+= -L${LIB${_l:tu}DIR}
DPADD_${_l}?= ${LIB${_l:tu}}
.if ${_PRIVATELIBS:M${_l}}
LDADD_${_l}?= -lprivate${_l}
.elif ${_INTERNALLIBS:M${_l}}
LDADD_${_l}?= ${LDADD_${_l}_L} -l${_l:S/${PIE_SUFFIX}//}${PIE_SUFFIX}
.else
LDADD_${_l}?= ${LDADD_${_l}_L} -l${_l}
.endif
# Add in all dependencies for static linkage.
.if defined(_DP_${_l}) && (${_INTERNALLIBS:M${_l}} || \
(defined(NO_SHARED) && (${NO_SHARED} != "no" && ${NO_SHARED} != "NO")))
(defined(NO_SHARED) && ${NO_SHARED:tl} != "no"))
.for _d in ${_DP_${_l}}
DPADD_${_l}+= ${DPADD_${_d}}
LDADD_${_l}+= ${LDADD_${_d}}
@ -444,69 +450,69 @@ LDADD+= ${LDADD_${_l}}
# INTERNALLIB definitions.
LIBELFTCDIR= ${OBJTOP}/lib/libelftc
LIBELFTC?= ${LIBELFTCDIR}/libelftc.a
LIBELFTC?= ${LIBELFTCDIR}/libelftc${PIE_SUFFIX}.a
LIBPEDIR= ${OBJTOP}/lib/libpe
LIBPE?= ${LIBPEDIR}/libpe.a
LIBPE?= ${LIBPEDIR}/libpe${PIE_SUFFIX}.a
LIBOPENBSDDIR= ${OBJTOP}/lib/libopenbsd
LIBOPENBSD?= ${LIBOPENBSDDIR}/libopenbsd.a
LIBOPENBSD?= ${LIBOPENBSDDIR}/libopenbsd${PIE_SUFFIX}.a
LIBSMDIR= ${OBJTOP}/lib/libsm
LIBSM?= ${LIBSMDIR}/libsm.a
LIBSM?= ${LIBSMDIR}/libsm${PIE_SUFFIX}.a
LIBSMDBDIR= ${OBJTOP}/lib/libsmdb
LIBSMDB?= ${LIBSMDBDIR}/libsmdb.a
LIBSMDB?= ${LIBSMDBDIR}/libsmdb${PIE_SUFFIX}.a
LIBSMUTILDIR= ${OBJTOP}/lib/libsmutil
LIBSMUTIL?= ${LIBSMUTILDIR}/libsmutil.a
LIBSMUTIL?= ${LIBSMUTILDIR}/libsmutil${PIE_SUFFIX}.a
LIBNETBSDDIR?= ${OBJTOP}/lib/libnetbsd
LIBNETBSD?= ${LIBNETBSDDIR}/libnetbsd.a
LIBNETBSD?= ${LIBNETBSDDIR}/libnetbsd${PIE_SUFFIX}.a
LIBVERSDIR?= ${OBJTOP}/kerberos5/lib/libvers
LIBVERS?= ${LIBVERSDIR}/libvers.a
LIBVERS?= ${LIBVERSDIR}/libvers${PIE_SUFFIX}.a
LIBSLDIR= ${OBJTOP}/kerberos5/lib/libsl
LIBSL?= ${LIBSLDIR}/libsl.a
LIBSL?= ${LIBSLDIR}/libsl${PIE_SUFFIX}.a
LIBIPFDIR= ${OBJTOP}/sbin/ipf/libipf
LIBIPF?= ${LIBIPFDIR}/libipf.a
LIBIPF?= ${LIBIPFDIR}/libipf${PIE_SUFFIX}.a
LIBTELNETDIR= ${OBJTOP}/lib/libtelnet
LIBTELNET?= ${LIBTELNETDIR}/libtelnet.a
LIBTELNET?= ${LIBTELNETDIR}/libtelnet${PIE_SUFFIX}.a
LIBCRONDIR= ${OBJTOP}/usr.sbin/cron/lib
LIBCRON?= ${LIBCRONDIR}/libcron.a
LIBCRON?= ${LIBCRONDIR}/libcron${PIE_SUFFIX}.a
LIBNTPDIR= ${OBJTOP}/usr.sbin/ntp/libntp
LIBNTP?= ${LIBNTPDIR}/libntp.a
LIBNTP?= ${LIBNTPDIR}/libntp${PIE_SUFFIX}.a
LIBNTPEVENTDIR= ${OBJTOP}/usr.sbin/ntp/libntpevent
LIBNTPEVENT?= ${LIBNTPEVENTDIR}/libntpevent.a
LIBNTPEVENT?= ${LIBNTPEVENTDIR}/libntpevent${PIE_SUFFIX}.a
LIBOPTSDIR= ${OBJTOP}/usr.sbin/ntp/libopts
LIBOPTS?= ${LIBOPTSDIR}/libopts.a
LIBOPTS?= ${LIBOPTSDIR}/libopts${PIE_SUFFIX}.a
LIBPARSEDIR= ${OBJTOP}/usr.sbin/ntp/libparse
LIBPARSE?= ${LIBPARSEDIR}/libparse.a
LIBPARSE?= ${LIBPARSEDIR}/libparse${PIE_SUFFIX}.a
LIBLPRDIR= ${OBJTOP}/usr.sbin/lpr/common_source
LIBLPR?= ${LIBLPRDIR}/liblpr.a
LIBLPR?= ${LIBLPRDIR}/liblpr${PIE_SUFFIX}.a
LIBFIFOLOGDIR= ${OBJTOP}/usr.sbin/fifolog/lib
LIBFIFOLOG?= ${LIBFIFOLOGDIR}/libfifolog.a
LIBFIFOLOG?= ${LIBFIFOLOGDIR}/libfifolog${PIE_SUFFIX}.a
LIBBSNMPTOOLSDIR= ${OBJTOP}/usr.sbin/bsnmpd/tools/libbsnmptools
LIBBSNMPTOOLS?= ${LIBBSNMPTOOLSDIR}/libbsnmptools.a
LIBBSNMPTOOLS?= ${LIBBSNMPTOOLSDIR}/libbsnmptools${PIE_SUFFIX}.a
LIBAMUDIR= ${OBJTOP}/usr.sbin/amd/libamu
LIBAMU?= ${LIBAMUDIR}/libamu.a
LIBAMU?= ${LIBAMUDIR}/libamu${PIE_SUFFIX}.a
LIBBE?= ${LIBBEDIR}/libbe.a
LIBBE?= ${LIBBEDIR}/libbe${PIE_SUFFIX}.a
LIBPMCSTATDIR= ${OBJTOP}/lib/libpmcstat
LIBPMCSTAT?= ${LIBPMCSTATDIR}/libpmcstat.a
LIBPMCSTAT?= ${LIBPMCSTATDIR}/libpmcstat${PIE_SUFFIX}.a
LIBC_NOSSP_PICDIR= ${OBJTOP}/lib/libc
LIBC_NOSSP_PIC?= ${LIBC_NOSSP_PICDIR}/libc_nossp_pic.a

View File

@ -122,13 +122,15 @@ net_open(struct open_file *f, ...)
{
struct iodesc *d;
va_list args;
char *devname; /* Device part of file name (or NULL). */
struct devdesc *dev;
const char *devname; /* Device part of file name (or NULL). */
int error = 0;
va_start(args, f);
devname = va_arg(args, char*);
dev = va_arg(args, struct devdesc *);
va_end(args);
devname = dev->d_dev->dv_name;
/* Before opening another interface, close the previous one first. */
if (netdev_sock >= 0 && strcmp(devname, netdev_name) != 0)
net_cleanup();
@ -137,7 +139,7 @@ net_open(struct open_file *f, ...)
if (netdev_opens == 0) {
/* Find network interface. */
if (netdev_sock < 0) {
netdev_sock = netif_open(devname);
netdev_sock = netif_open(dev);
if (netdev_sock < 0) {
printf("net_open: netif_open() failed\n");
return (ENXIO);

View File

@ -75,7 +75,7 @@ display_size(uint64_t size, u_int sectorsize)
size /= 1024;
unit = 'M';
}
sprintf(buf, "%ld%cB", (long)size, unit);
sprintf(buf, "%4ld%cB", (long)size, unit);
return (buf);
}
@ -102,7 +102,6 @@ ptblread(void *d, void *buf, size_t blocks, uint64_t offset)
blocks * od->sectorsize, (char *)buf, NULL));
}
#define PWIDTH 35
static int
ptable_print(void *arg, const char *pname, const struct ptable_entry *part)
{
@ -112,16 +111,16 @@ ptable_print(void *arg, const char *pname, const struct ptable_entry *part)
struct ptable *table;
char line[80];
int res;
u_int sectsize;
uint64_t partsize;
pa = (struct print_args *)arg;
od = (struct open_disk *)pa->dev->dd.d_opendata;
sprintf(line, " %s%s: %s", pa->prefix, pname,
parttype2str(part->type));
if (pa->verbose)
sprintf(line, "%-*s%s", PWIDTH, line,
display_size(part->end - part->start + 1,
od->sectorsize));
strcat(line, "\n");
sectsize = od->sectorsize;
partsize = part->end - part->start + 1;
sprintf(line, " %s%s: %s\t%s\n", pa->prefix, pname,
parttype2str(part->type),
pa->verbose ? display_size(partsize, sectsize) : "");
if (pager_output(line))
return 1;
res = 0;
@ -131,10 +130,15 @@ ptable_print(void *arg, const char *pname, const struct ptable_entry *part)
dev.dd.d_unit = pa->dev->dd.d_unit;
dev.d_slice = part->index;
dev.d_partition = -1;
if (disk_open(&dev, part->end - part->start + 1,
od->sectorsize) == 0) {
table = ptable_open(&dev, part->end - part->start + 1,
od->sectorsize, ptblread);
if (disk_open(&dev, partsize, sectsize) == 0) {
/*
* disk_open() for partition -1 on a bsd slice assumes
* you want the first bsd partition. Reset things so
* that we're looking at the start of the raw slice.
*/
dev.d_partition = -1;
dev.d_offset = part->start;
table = ptable_open(&dev, partsize, sectsize, ptblread);
if (table != NULL) {
sprintf(line, " %s%s", pa->prefix, pname);
bsd.dev = pa->dev;
@ -149,7 +153,6 @@ ptable_print(void *arg, const char *pname, const struct ptable_entry *part)
return (res);
}
#undef PWIDTH
int
disk_print(struct disk_devdesc *dev, char *prefix, int verbose)

View File

@ -788,6 +788,9 @@ ptable_close(struct ptable *table)
{
struct pentry *entry;
if (table == NULL)
return;
while (!STAILQ_EMPTY(&table->entries)) {
entry = STAILQ_FIRST(&table->entries);
STAILQ_REMOVE_HEAD(&table->entries, entry);

View File

@ -7,6 +7,7 @@
LOADER_ADDRESS?=0x200000
LDFLAGS+= -nostdlib
LDFLAGS.lld+= -Wl,--no-rosegment
MK_PIE:= no
# BTX components
BTXDIR= ${BOOTOBJ}/i386/btx

View File

@ -545,32 +545,19 @@ probe_drive(struct zfsdsk *zdsk)
char *sec;
unsigned i;
/*
* If we find a vdev on the whole disk, stop here.
*/
if (vdev_probe(vdev_read2, zdsk, NULL) == 0)
return;
#ifdef LOADER_GELI_SUPPORT
/*
* Taste the disk, if it is GELI encrypted, decrypt it and check to see if
* it is a usable vdev then. Otherwise dig
* out the partition table and probe each slice/partition
* in turn for a vdev or GELI encrypted vdev.
* Taste the disk, if it is GELI encrypted, decrypt it then dig out the
* partition table and probe each slice/partition in turn for a vdev or
* GELI encrypted vdev.
*/
elba = drvsize_ext(zdsk);
if (elba > 0) {
elba--;
}
zdsk->gdev = geli_taste(vdev_read, zdsk, elba, "disk%u:0:");
if (zdsk->gdev != NULL) {
if (geli_havekey(zdsk->gdev) == 0 ||
geli_passphrase(zdsk->gdev, gelipw) == 0) {
if (vdev_probe(vdev_read2, zdsk, NULL) == 0) {
return;
}
}
}
if ((zdsk->gdev != NULL) && (geli_havekey(zdsk->gdev) == 0))
geli_passphrase(zdsk->gdev, gelipw);
#endif /* LOADER_GELI_SUPPORT */
sec = dmadat->secbuf;

View File

@ -241,6 +241,10 @@ dirmatch(struct open_file *f, const char *path, struct iso_directory_record *dp,
icase = 1;
} else
icase = 0;
if (strlen(path) != len)
return (0);
for (i = len; --i >= 0; path++, cp++) {
if (!*path || *path == '/')
break;

View File

@ -363,51 +363,100 @@ static int
vdev_read(vdev_t *vdev, void *priv, off_t offset, void *buf, size_t bytes)
{
int fd, ret;
size_t res, size, remainder, rb_size, blksz;
unsigned secsz;
off_t off;
char *bouncebuf, *rb_buf;
size_t res, head, tail, total_size, full_sec_size;
unsigned secsz, do_tail_read;
off_t start_sec;
char *outbuf, *bouncebuf;
fd = (uintptr_t) priv;
outbuf = (char *) buf;
bouncebuf = NULL;
ret = ioctl(fd, DIOCGSECTORSIZE, &secsz);
if (ret != 0)
return (ret);
off = offset / secsz;
remainder = offset % secsz;
if (lseek(fd, off * secsz, SEEK_SET) == -1)
return (errno);
/*
* Handling reads of arbitrary offset and size - multi-sector case
* and single-sector case.
*
* Multi-sector Case
* (do_tail_read = true if tail > 0)
*
* |<----------------------total_size--------------------->|
* | |
* |<--head-->|<--------------bytes------------>|<--tail-->|
* | | | |
* | | |<~full_sec_size~>| | |
* +------------------+ +------------------+
* | |0101010| . . . |0101011| |
* +------------------+ +------------------+
* start_sec start_sec + n
*
*
* Single-sector Case
* (do_tail_read = false)
*
* |<------total_size = secsz----->|
* | |
* |<-head->|<---bytes--->|<-tail->|
* +-------------------------------+
* | |0101010101010| |
* +-------------------------------+
* start_sec
*/
start_sec = offset / secsz;
head = offset % secsz;
total_size = roundup2(head + bytes, secsz);
tail = total_size - (head + bytes);
do_tail_read = ((tail > 0) && (head + bytes > secsz));
full_sec_size = total_size;
if (head > 0)
full_sec_size -= secsz;
if (do_tail_read)
full_sec_size -= secsz;
rb_buf = buf;
rb_size = bytes;
size = roundup2(bytes + remainder, secsz);
blksz = size;
if (remainder != 0 || size != bytes) {
/* Return of partial sector data requires a bounce buffer. */
if ((head > 0) || do_tail_read) {
bouncebuf = zfs_alloc(secsz);
if (bouncebuf == NULL) {
printf("vdev_read: out of memory\n");
return (ENOMEM);
}
rb_buf = bouncebuf;
blksz = rb_size - remainder;
}
while (bytes > 0) {
res = read(fd, rb_buf, rb_size);
if (res != rb_size) {
if (lseek(fd, start_sec * secsz, SEEK_SET) == -1)
return (errno);
/* Partial data return from first sector */
if (head > 0) {
res = read(fd, bouncebuf, secsz);
if (res != secsz) {
ret = EIO;
goto error;
}
if (bytes < blksz)
blksz = bytes;
if (bouncebuf != NULL)
memcpy(buf, rb_buf + remainder, blksz);
buf = (void *)((uintptr_t)buf + blksz);
bytes -= blksz;
remainder = 0;
blksz = rb_size;
memcpy(outbuf, bouncebuf + head, min(secsz - head, bytes));
outbuf += min(secsz - head, bytes);
}
/* Full data return from read sectors */
if (full_sec_size > 0) {
res = read(fd, outbuf, full_sec_size);
if (res != full_sec_size) {
ret = EIO;
goto error;
}
outbuf += full_sec_size;
}
/* Partial data return from last sector */
if (do_tail_read) {
res = read(fd, bouncebuf, secsz);
if (res != secsz) {
ret = EIO;
goto error;
}
memcpy(outbuf, bouncebuf, secsz - tail);
}
ret = 0;

View File

@ -38,6 +38,7 @@ local INCORRECT_PASSWORD = "loader: incorrect password"
-- Asterisks as a password mask
local show_password_mask = false
local twiddle_chars = {"/", "-", "\\", "|"}
local screen_setup = false
-- Module exports
function password.read(prompt_length)
@ -80,8 +81,6 @@ function password.read(prompt_length)
end
function password.check()
screen.clear()
screen.defcursor()
-- pwd is optionally supplied if we want to check it
local function doPrompt(prompt, pwd)
local attempts = 1
@ -90,6 +89,12 @@ function password.check()
printc("\r" .. string.rep(" ", #INCORRECT_PASSWORD))
end
if not screen_setup then
screen.clear()
screen.defcursor()
screen_setup = true
end
while true do
if attempts > 1 then
clear_incorrect_text_prompt()

View File

@ -11,6 +11,7 @@ LOADER_BZIP2_SUPPORT?= no
.include <bsd.init.mk>
BINDIR= /boot/uboot
PROG= ubldr
NEWVERSWHAT= "U-Boot loader" ${MACHINE_ARCH}
INSTALLFLAGS= -b

View File

@ -156,7 +156,7 @@ get_device_type(const char *devstr, int *devtype)
printf("Unknown device type '%s'\n", devstr);
}
*devtype = -1;
*devtype = DEV_TYP_NONE;
return (NULL);
}
@ -182,6 +182,14 @@ device_typename(int type)
* The returned values for slice and partition are interpreted by
* disk_open().
*
* The device string can be a standard loader(8) disk specifier:
*
* disk<unit>s<slice> disk0s1
* disk<unit>s<slice><partition> disk1s2a
* disk<unit>p<partition> disk0p4
*
* or one of the following formats:
*
* Valid device strings: For device types:
*
* <type_name> DEV_TYP_STOR, DEV_TYP_NET
@ -198,11 +206,12 @@ device_typename(int type)
static void
get_load_device(int *type, int *unit, int *slice, int *partition)
{
struct disk_devdesc dev;
char *devstr;
const char *p;
char *endp;
*type = -1;
*type = DEV_TYP_NONE;
*unit = -1;
*slice = 0;
*partition = -1;
@ -216,18 +225,31 @@ get_load_device(int *type, int *unit, int *slice, int *partition)
p = get_device_type(devstr, type);
/*
* If type is DEV_TYP_STOR we have a disk-like device. If we can parse
* the remainder of the string as a standard unit+slice+partition (e.g.,
* 0s2a or 1p12), return those results. Otherwise we'll fall through to
* the code that parses the legacy format.
*/
if ((*type & DEV_TYP_STOR) && disk_parsedev(&dev, p, NULL) == 0) {
*unit = dev.dd.d_unit;
*slice = dev.d_slice;
*partition = dev.d_partition;
return;
}
/* Ignore optional spaces after the device name. */
while (*p == ' ')
p++;
/* Unknown device name, or a known name without unit number. */
if ((*type == -1) || (*p == '\0')) {
if ((*type == DEV_TYP_NONE) || (*p == '\0')) {
return;
}
/* Malformed unit number. */
if (!isdigit(*p)) {
*type = -1;
*type = DEV_TYP_NONE;
return;
}
@ -242,7 +264,7 @@ get_load_device(int *type, int *unit, int *slice, int *partition)
/* Device string is malformed beyond unit number. */
if (*p != ':') {
*type = -1;
*type = DEV_TYP_NONE;
*unit = -1;
return;
}
@ -255,7 +277,7 @@ get_load_device(int *type, int *unit, int *slice, int *partition)
/* Only DEV_TYP_STOR devices can have a slice specification. */
if (!(*type & DEV_TYP_STOR)) {
*type = -1;
*type = DEV_TYP_NONE;
*unit = -1;
return;
}
@ -264,7 +286,7 @@ get_load_device(int *type, int *unit, int *slice, int *partition)
/* Malformed slice number. */
if (p == endp) {
*type = -1;
*type = DEV_TYP_NONE;
*unit = -1;
*slice = 0;
return;
@ -278,7 +300,7 @@ get_load_device(int *type, int *unit, int *slice, int *partition)
/* Device string is malformed beyond slice number. */
if (*p != '.') {
*type = -1;
*type = DEV_TYP_NONE;
*unit = -1;
*slice = 0;
return;
@ -298,7 +320,7 @@ get_load_device(int *type, int *unit, int *slice, int *partition)
return;
/* Junk beyond partition number. */
*type = -1;
*type = DEV_TYP_NONE;
*unit = -1;
*slice = 0;
*partition = -1;
@ -310,13 +332,13 @@ print_disk_probe_info()
char slice[32];
char partition[32];
if (currdev.d_disk.slice > 0)
sprintf(slice, "%d", currdev.d_disk.slice);
if (currdev.d_disk.d_slice > 0)
sprintf(slice, "%d", currdev.d_disk.d_slice);
else
strcpy(slice, "<auto>");
if (currdev.d_disk.partition >= 0)
sprintf(partition, "%d", currdev.d_disk.partition);
if (currdev.d_disk.d_partition >= 0)
sprintf(partition, "%d", currdev.d_disk.d_partition);
else
strcpy(partition, "<auto>");
@ -332,8 +354,8 @@ probe_disks(int devidx, int load_type, int load_unit, int load_slice,
int open_result, unit;
struct open_file f;
currdev.d_disk.slice = load_slice;
currdev.d_disk.partition = load_partition;
currdev.d_disk.d_slice = load_slice;
currdev.d_disk.d_partition = load_partition;
f.f_devdata = &currdev;
open_result = -1;
@ -467,14 +489,14 @@ main(int argc, char **argv)
currdev.dd.d_dev = devsw[i];
currdev.dd.d_unit = 0;
if ((load_type == -1 || (load_type & DEV_TYP_STOR)) &&
if ((load_type == DEV_TYP_NONE || (load_type & DEV_TYP_STOR)) &&
strcmp(devsw[i]->dv_name, "disk") == 0) {
if (probe_disks(i, load_type, load_unit, load_slice,
load_partition) == 0)
break;
}
if ((load_type == -1 || (load_type & DEV_TYP_NET)) &&
if ((load_type == DEV_TYP_NONE || (load_type & DEV_TYP_NET)) &&
strcmp(devsw[i]->dv_name, "net") == 0)
break;
}

View File

@ -27,18 +27,14 @@
* $FreeBSD$
*/
struct uboot_devdesc {
struct devdesc dd; /* Must be first. */
union {
struct {
int slice;
int partition;
off_t offset;
} disk;
} d_kind;
};
#include <disk.h>
#define d_disk d_kind.disk
struct uboot_devdesc {
union {
struct devdesc dd;
struct disk_devdesc d_disk;
};
};
/*
* Default network packet alignment in memory. On arm arches packets must be

View File

@ -353,10 +353,6 @@ pt_entry_t pg_nx;
static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
static int pat_works = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, pat_works, CTLFLAG_RD, &pat_works, 1,
"Is page attribute table fully functional?");
static int pg_ps_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
&pg_ps_enabled, 0, "Are large page mappings enabled?");
@ -1222,7 +1218,6 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
void
pmap_init_pat(void)
{
int pat_table[PAT_INDEX_SIZE];
uint64_t pat_msr;
u_long cr0, cr4;
int i;
@ -1233,45 +1228,32 @@ pmap_init_pat(void)
/* Set default PAT index table. */
for (i = 0; i < PAT_INDEX_SIZE; i++)
pat_table[i] = -1;
pat_table[PAT_WRITE_BACK] = 0;
pat_table[PAT_WRITE_THROUGH] = 1;
pat_table[PAT_UNCACHEABLE] = 3;
pat_table[PAT_WRITE_COMBINING] = 3;
pat_table[PAT_WRITE_PROTECTED] = 3;
pat_table[PAT_UNCACHED] = 3;
pat_index[i] = -1;
pat_index[PAT_WRITE_BACK] = 0;
pat_index[PAT_WRITE_THROUGH] = 1;
pat_index[PAT_UNCACHEABLE] = 3;
pat_index[PAT_WRITE_COMBINING] = 6;
pat_index[PAT_WRITE_PROTECTED] = 5;
pat_index[PAT_UNCACHED] = 2;
/* Initialize default PAT entries. */
/*
* Initialize default PAT entries.
* Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
* Program 5 and 6 as WP and WC.
*
* Leave 4 and 7 as WB and UC. Note that a recursive page table
* mapping for a 2M page uses a PAT value with the bit 3 set due
* to its overload with PG_PS.
*/
pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
PAT_VALUE(1, PAT_WRITE_THROUGH) |
PAT_VALUE(2, PAT_UNCACHED) |
PAT_VALUE(3, PAT_UNCACHEABLE) |
PAT_VALUE(4, PAT_WRITE_BACK) |
PAT_VALUE(5, PAT_WRITE_THROUGH) |
PAT_VALUE(6, PAT_UNCACHED) |
PAT_VALUE(5, PAT_WRITE_PROTECTED) |
PAT_VALUE(6, PAT_WRITE_COMBINING) |
PAT_VALUE(7, PAT_UNCACHEABLE);
if (pat_works) {
/*
* Leave the indices 0-3 at the default of WB, WT, UC-, and UC.
* Program 5 and 6 as WP and WC.
* Leave 4 and 7 as WB and UC.
*/
pat_msr &= ~(PAT_MASK(5) | PAT_MASK(6));
pat_msr |= PAT_VALUE(5, PAT_WRITE_PROTECTED) |
PAT_VALUE(6, PAT_WRITE_COMBINING);
pat_table[PAT_UNCACHED] = 2;
pat_table[PAT_WRITE_PROTECTED] = 5;
pat_table[PAT_WRITE_COMBINING] = 6;
} else {
/*
* Just replace PAT Index 2 with WC instead of UC-.
*/
pat_msr &= ~PAT_MASK(2);
pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
pat_table[PAT_WRITE_COMBINING] = 2;
}
/* Disable PGE. */
cr4 = rcr4();
load_cr4(cr4 & ~CR4_PGE);
@ -1286,8 +1268,6 @@ pmap_init_pat(void)
/* Update PAT and index table. */
wrmsr(MSR_PAT, pat_msr);
for (i = 0; i < PAT_INDEX_SIZE; i++)
pat_index[i] = pat_table[i];
/* Flush caches and TLBs again. */
wbinvd();

View File

@ -70,30 +70,26 @@ sgx_linux_ioctl(struct thread *td, struct linux_ioctl_args *args)
cmd = args->cmd;
args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
if (cmd & LINUX_IOC_IN)
if ((cmd & LINUX_IOC_IN) != 0)
args->cmd |= IOC_IN;
if (cmd & LINUX_IOC_OUT)
if ((cmd & LINUX_IOC_OUT) != 0)
args->cmd |= IOC_OUT;
len = IOCPARM_LEN(cmd);
if (len > SGX_IOCTL_MAX_DATA_LEN) {
printf("%s: Can't copy data: cmd len is too big %d\n",
__func__, len);
return (EINVAL);
error = EINVAL;
goto out;
}
if (cmd & LINUX_IOC_IN) {
if ((cmd & LINUX_IOC_IN) != 0) {
error = copyin((void *)args->arg, data, len);
if (error) {
printf("%s: Can't copy data, error %d\n",
__func__, error);
return (EINVAL);
}
if (error != 0)
goto out;
}
error = (fo_ioctl(fp, args->cmd, (caddr_t)data, td->td_ucred, td));
error = fo_ioctl(fp, args->cmd, (caddr_t)data, td->td_ucred, td);
out:
fdrop(fp, td);
return (error);
}

View File

@ -120,6 +120,10 @@ MALLOC_DEFINE(M_AXP8XX_REG, "AXP8xx regulator", "AXP8xx power regulator");
#define AXP_VOLTCTL_MASK 0x7f
#define AXP_POWERBAT 0x32
#define AXP_POWERBAT_SHUTDOWN (1 << 7)
#define AXP_CHARGERCTL1 0x33
#define AXP_CHARGERCTL1_MIN 0
#define AXP_CHARGERCTL1_MAX 13
#define AXP_CHARGERCTL1_CMASK 0xf
#define AXP_IRQEN1 0x40
#define AXP_IRQEN1_ACIN_HI (1 << 6)
#define AXP_IRQEN1_ACIN_LO (1 << 5)
@ -614,13 +618,13 @@ static const struct axp8xx_sensors axp8xx_common_sensors[] = {
.id = AXP_SENSOR_BATT_CHARGE_CURRENT,
.name = "batchargecurrent",
.format = "I",
.desc = "Battery Charging Current",
.desc = "Average Battery Charging Current",
},
{
.id = AXP_SENSOR_BATT_DISCHARGE_CURRENT,
.name = "batdischargecurrent",
.format = "I",
.desc = "Battery Discharging Current",
.desc = "Average Battery Discharging Current",
},
{
.id = AXP_SENSOR_BATT_CAPACITY_PERCENT,
@ -889,6 +893,33 @@ axp8xx_shutdown(void *devp, int howto)
axp8xx_write(dev, AXP_POWERBAT, AXP_POWERBAT_SHUTDOWN);
}
static int
axp8xx_sysctl_chargecurrent(SYSCTL_HANDLER_ARGS)
{
device_t dev = arg1;
uint8_t data;
int val, error;
error = axp8xx_read(dev, AXP_CHARGERCTL1, &data, 1);
if (error != 0)
return (error);
if (bootverbose)
device_printf(dev, "Raw CHARGECTL1 val: 0x%0x\n", data);
val = (data & AXP_CHARGERCTL1_CMASK);
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr) /* error || read request */
return (error);
if ((val < AXP_CHARGERCTL1_MIN) || (val > AXP_CHARGERCTL1_MAX))
return (EINVAL);
val |= (data & (AXP_CHARGERCTL1_CMASK << 4));
axp8xx_write(dev, AXP_CHARGERCTL1, val);
return (0);
}
static int
axp8xx_sysctl(SYSCTL_HANDLER_ARGS)
{
@ -1482,6 +1513,16 @@ axp8xx_attach(device_t dev)
sc->sensors[i].format,
sc->sensors[i].desc);
}
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "batchargecurrentstep",
CTLTYPE_INT | CTLFLAG_RW,
dev, 0, axp8xx_sysctl_chargecurrent,
"I", "Battery Charging Current Step, "
"0: 200mA, 1: 400mA, 2: 600mA, 3: 800mA, "
"4: 1000mA, 5: 1200mA, 6: 1400mA, 7: 1600mA, "
"8: 1800mA, 9: 2000mA, 10: 2200mA, 11: 2400mA, "
"12: 2600mA, 13: 2800mA");
/* Get thresholds */
if (axp8xx_read(dev, AXP_BAT_CAP_WARN, &val, 1) == 0) {

View File

@ -84,7 +84,7 @@ struct sysentvec elf32_freebsd_sysvec = {
#if __ARM_ARCH >= 6
SV_ASLR | SV_SHP | SV_TIMEKEEP |
#endif
SV_ABI_FREEBSD | SV_ILP32,
SV_ABI_FREEBSD | SV_ILP32 | SV_ASLR,
.sv_set_syscall_retval = cpu_set_syscall_retval,
.sv_fetch_syscall_args = cpu_fetch_syscall_args,
.sv_syscallnames = syscallnames,

View File

@ -73,6 +73,7 @@ struct snvs_softc {
};
static struct ofw_compat_data compat_data[] = {
{"fsl,sec-v4.0-mon-rtc-lp", true},
{"fsl,sec-v4.0-mon", true},
{NULL, false}
};

View File

@ -79,7 +79,8 @@ static struct sysentvec elf64_freebsd_sysvec = {
.sv_setregs = exec_setregs,
.sv_fixlimit = NULL,
.sv_maxssiz = NULL,
.sv_flags = SV_SHP | SV_TIMEKEEP | SV_ABI_FREEBSD | SV_LP64,
.sv_flags = SV_SHP | SV_TIMEKEEP | SV_ABI_FREEBSD | SV_LP64 |
SV_ASLR,
.sv_set_syscall_retval = cpu_set_syscall_retval,
.sv_fetch_syscall_args = cpu_fetch_syscall_args,
.sv_syscallnames = syscallnames,

View File

@ -1759,14 +1759,6 @@ hint.ata.1.at="isa"
hint.ata.1.port="0x170"
hint.ata.1.irq="15"
#
# The following options are valid on the ATA driver:
#
# ATA_REQUEST_TIMEOUT: the number of seconds to wait for an ATA request
# before timing out.
#options ATA_REQUEST_TIMEOUT=10
#
# Standard floppy disk controllers and floppy tapes, supports
# the Y-E DATA External FDD (PC Card)

View File

@ -4847,6 +4847,8 @@ crypto/libsodium/randombytes.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
crypto/libsodium/utils.c optional crypto \
compile-with "${NORMAL_C} -I$S/contrib/libsodium/src/libsodium/include -I$S/crypto/libsodium"
opencrypto/cbc_mac.c optional crypto
opencrypto/xform_cbc_mac.c optional crypto
rpc/auth_none.c optional krpc | nfslockd | nfscl | nfsd
rpc/auth_unix.c optional krpc | nfslockd | nfscl | nfsd
rpc/authunix_prot.c optional krpc | nfslockd | nfscl | nfsd

View File

@ -87,6 +87,10 @@ SECTIONS
.sdata : { *(.sdata) }
_edata = .;
PROVIDE (edata = .);
/* Ensure __bss_start is associated with the next section in case orphan
sections are placed directly after .sdata, as has been seen to happen with
LLD. */
. = .;
__bss_start = .;
.sbss : { *(.sbss) *(.scommon) }
.bss :

View File

@ -229,16 +229,8 @@ nvpair_remove_nvlist_array(nvpair_t *nvp)
nvlarray = __DECONST(nvlist_t **,
nvpair_get_nvlist_array(nvp, &count));
for (i = 0; i < count; i++) {
nvlist_t *nvl;
nvpair_t *nnvp;
nvl = nvlarray[i];
nnvp = nvlist_get_array_next_nvpair(nvl);
if (nnvp != NULL) {
nvpair_free_structure(nnvp);
}
nvlist_set_array_next(nvl, NULL);
nvlist_set_parent(nvl, NULL);
nvlist_set_array_next(nvlarray[i], NULL);
nvlist_set_parent(nvlarray[i], NULL);
}
}

View File

@ -203,10 +203,6 @@
#define ATA_OP_FINISHED 1
#define ATA_MAX_28BIT_LBA 268435455UL
#ifndef ATA_REQUEST_TIMEOUT
#define ATA_REQUEST_TIMEOUT 10
#endif
/* structure used for composite atomic operations */
#define MAX_COMPOSITES 32 /* u_int32_t bits */
struct ata_composite {

View File

@ -1046,10 +1046,6 @@ ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
"RX buffer - next to use: %d", next_to_use);
req_id = rx_ring->free_rx_ids[next_to_use];
rc = validate_rx_req_id(rx_ring, req_id);
if (unlikely(rc != 0))
break;
rx_info = &rx_ring->rx_buffer_info[req_id];
rc = ena_alloc_rx_mbuf(adapter, rx_ring, rx_info);
@ -1472,21 +1468,24 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
struct ena_rx_buffer *rx_info;
struct ena_adapter *adapter;
unsigned int descs = ena_rx_ctx->descs;
int rc;
uint16_t ntc, len, req_id, buf = 0;
ntc = *next_to_clean;
adapter = rx_ring->adapter;
rx_info = &rx_ring->rx_buffer_info[ntc];
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
rc = validate_rx_req_id(rx_ring, req_id);
if (unlikely(rc != 0))
return (NULL);
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(rx_info->mbuf == NULL)) {
device_printf(adapter->pdev, "NULL mbuf in rx_info");
return (NULL);
}
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
rx_info = &rx_ring->rx_buffer_info[req_id];
ena_trace(ENA_DBG | ENA_RXPTH, "rx_info %p, mbuf %p, paddr %jx",
rx_info, rx_info->mbuf, (uintmax_t)rx_info->ena_buf.paddr);
@ -1517,6 +1516,16 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
++buf;
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
rc = validate_rx_req_id(rx_ring, req_id);
if (unlikely(rc != 0)) {
/*
* If the req_id is invalid, then the device will be
* reset. In that case we must free all mbufs that
* were already gathered.
*/
m_freem(mbuf);
return (NULL);
}
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(rx_info->mbuf == NULL)) {

View File

@ -41,7 +41,7 @@
#define DRV_MODULE_VER_MAJOR 0
#define DRV_MODULE_VER_MINOR 8
#define DRV_MODULE_VER_SUBMINOR 2
#define DRV_MODULE_VER_SUBMINOR 3
#define DRV_MODULE_NAME "ena"

View File

@ -932,7 +932,7 @@ ixl_if_msix_intr_assign(if_ctx_t ctx, int msix)
return (err);
}
/* Create soft IRQ for handling VFLRs */
iflib_softirq_alloc_generic(ctx, &pf->iov_irq, IFLIB_INTR_IOV, pf, 0, "iov");
iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, pf, 0, "iov");
/* Now set up the stations */
for (i = 0, vector = 1; i < vsi->shared->isc_nrxqsets; i++, vector++, rx_que++) {

View File

@ -138,7 +138,6 @@ struct ixl_pf {
struct ixl_vf *vfs;
int num_vfs;
uint16_t veb_seid;
struct if_irq iov_irq;
};
/*

View File

@ -105,6 +105,7 @@ int nm_os_selinfo_init(NM_SELINFO_T *si, const char *name) {
snprintf(si->mtxname, sizeof(si->mtxname), "nmkl%s", name);
mtx_init(&si->m, si->mtxname, NULL, MTX_DEF);
knlist_init_mtx(&si->si.si_note, &si->m);
si->kqueue_users = 0;
return (0);
}
@ -1351,7 +1352,9 @@ void
nm_os_selwakeup(struct nm_selinfo *si)
{
selwakeuppri(&si->si, PI_NET);
taskqueue_enqueue(si->ntfytq, &si->ntfytask);
if (si->kqueue_users > 0) {
taskqueue_enqueue(si->ntfytq, &si->ntfytask);
}
}
void
@ -1364,20 +1367,28 @@ static void
netmap_knrdetach(struct knote *kn)
{
struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
struct selinfo *si = &priv->np_si[NR_RX]->si;
struct nm_selinfo *si = priv->np_si[NR_RX];
nm_prinf("remove selinfo %p", si);
knlist_remove(&si->si_note, kn, /*islocked=*/0);
knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
NMG_LOCK();
KASSERT(si->kqueue_users > 0, ("kqueue_user underflow on %s",
si->mtxname));
si->kqueue_users--;
nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
NMG_UNLOCK();
}
static void
netmap_knwdetach(struct knote *kn)
{
struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
struct selinfo *si = &priv->np_si[NR_TX]->si;
struct nm_selinfo *si = priv->np_si[NR_TX];
nm_prinf("remove selinfo %p", si);
knlist_remove(&si->si_note, kn, /*islocked=*/0);
knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
NMG_LOCK();
si->kqueue_users--;
nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
NMG_UNLOCK();
}
/*
@ -1465,6 +1476,10 @@ netmap_kqfilter(struct cdev *dev, struct knote *kn)
kn->kn_fop = (ev == EVFILT_WRITE) ?
&netmap_wfiltops : &netmap_rfiltops;
kn->kn_hook = priv;
NMG_LOCK();
si->kqueue_users++;
nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
NMG_UNLOCK();
knlist_add(&si->si.si_note, kn, /*islocked=*/0);
return 0;

View File

@ -132,11 +132,14 @@ struct netmap_adapter *netmap_getna(if_t ifp);
#define MBUF_QUEUED(m) 1
struct nm_selinfo {
/* Support for select(2) and poll(2). */
struct selinfo si;
/* Support for kqueue(9). See comments in netmap_freebsd.c */
struct taskqueue *ntfytq;
struct task ntfytask;
struct mtx m;
char mtxname[32];
int kqueue_users;
};

View File

@ -373,7 +373,6 @@ fuse_internal_readdir_processdata(struct uio *uio,
/* remove */
#define INVALIDATE_CACHED_VATTRS_UPON_UNLINK 1
int
fuse_internal_remove(struct vnode *dvp,
struct vnode *vp,
@ -381,15 +380,11 @@ fuse_internal_remove(struct vnode *dvp,
enum fuse_opcode op)
{
struct fuse_dispatcher fdi;
struct fuse_vnode_data *fvdat;
int err;
struct vattr *vap = VTOVA(vp);
#if INVALIDATE_CACHED_VATTRS_UPON_UNLINK
int need_invalidate = 0;
uint64_t target_nlink = 0;
#endif
int err = 0;
err = 0;
fvdat = VTOFUD(vp);
debug_printf("dvp=%p, cnp=%p, op=%d\n", vp, cnp, op);
@ -399,13 +394,6 @@ fuse_internal_remove(struct vnode *dvp,
memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
((char *)fdi.indata)[cnp->cn_namelen] = '\0';
#if INVALIDATE_CACHED_VATTRS_UPON_UNLINK
if (vap->va_nlink > 1) {
need_invalidate = 1;
target_nlink = vap->va_nlink;
}
#endif
err = fdisp_wait_answ(&fdi);
fdisp_destroy(&fdi);
return err;
@ -483,13 +471,13 @@ fuse_internal_newentry_core(struct vnode *dvp,
if ((err = fuse_internal_checkentry(feo, vtyp))) {
return err;
}
err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, vtyp);
err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, vtyp);
if (err) {
fuse_internal_forget_send(mp, cnp->cn_thread, cnp->cn_cred,
feo->nodeid, 1);
return err;
}
cache_attrs(*vpp, feo);
cache_attrs(*vpp, feo, NULL);
return err;
}
@ -563,6 +551,7 @@ fuse_internal_vnode_disappear(struct vnode *vp)
ASSERT_VOP_ELOCKED(vp, "fuse_internal_vnode_disappear");
fvdat->flag |= FN_REVOKED;
fvdat->valid_attr_cache = false;
cache_purge(vp);
}

View File

@ -200,15 +200,47 @@ fuse_internal_access(struct vnode *vp,
/* attributes */
/*
* Cache FUSE attributes 'fat', with nominal expiration
* 'attr_valid'.'attr_valid_nsec', in attr cache associated with vnode 'vp'.
* Optionally, if argument 'vap' is not NULL, store a copy of the converted
* attributes there as well.
*
* If the nominal attribute cache TTL is zero, do not cache on the 'vp' (but do
* return the result to the caller).
*/
static __inline
void
fuse_internal_attr_fat2vat(struct mount *mp,
fuse_internal_attr_fat2vat(struct vnode *vp,
struct fuse_attr *fat,
uint64_t attr_valid,
uint32_t attr_valid_nsec,
struct vattr *vap)
{
struct mount *mp;
struct fuse_vnode_data *fvdat;
struct vattr *vp_cache_at;
mp = vnode_mount(vp);
fvdat = VTOFUD(vp);
DEBUGX(FUSE_DEBUG_INTERNAL,
"node #%ju, mode 0%o\n", (uintmax_t)fat->ino, fat->mode);
/* Honor explicit do-not-cache requests from user filesystems. */
if (attr_valid == 0 && attr_valid_nsec == 0)
fvdat->valid_attr_cache = false;
else
fvdat->valid_attr_cache = true;
vp_cache_at = VTOVA(vp);
if (vap == NULL && vp_cache_at == NULL)
return;
if (vap == NULL)
vap = vp_cache_at;
vattr_null(vap);
vap->va_fsid = mp->mnt_stat.f_fsid.val[0];
@ -227,21 +259,17 @@ fuse_internal_attr_fat2vat(struct mount *mp,
vap->va_ctime.tv_nsec = fat->ctimensec;
vap->va_blocksize = PAGE_SIZE;
vap->va_type = IFTOVT(fat->mode);
#if (S_BLKSIZE == 512)
/* Optimize this case */
vap->va_bytes = fat->blocks << 9;
#else
vap->va_bytes = fat->blocks * S_BLKSIZE;
#endif
vap->va_flags = 0;
if (vap != vp_cache_at && vp_cache_at != NULL)
memcpy(vp_cache_at, vap, sizeof(*vap));
}
#define cache_attrs(vp, fuse_out) \
fuse_internal_attr_fat2vat(vnode_mount(vp), &(fuse_out)->attr, \
VTOVA(vp));
#define cache_attrs(vp, fuse_out, vap_out) \
fuse_internal_attr_fat2vat((vp), &(fuse_out)->attr, \
(fuse_out)->attr_valid, (fuse_out)->attr_valid_nsec, (vap_out))
/* fsync */

View File

@ -155,7 +155,13 @@ fuse_io_dispatch(struct vnode *vp, struct uio *uio, int ioflag,
}
break;
case UIO_WRITE:
if (directio) {
/*
* Kludge: simulate write-through caching via write-around
* caching. Same effect, as far as never caching dirty data,
* but slightly pessimal in that newly written data is not
* cached.
*/
if (directio || fuse_data_cache_mode == FUSE_CACHE_WT) {
FS_DEBUG("direct write of vnode %ju via file handle %ju\n",
(uintmax_t)VTOILLU(vp), (uintmax_t)fufh->fh_id);
err = fuse_write_directbackend(vp, uio, cred, fufh, ioflag);
@ -362,8 +368,11 @@ fuse_write_directbackend(struct vnode *vp, struct uio *uio,
}
uio->uio_resid += diff;
uio->uio_offset -= diff;
if (uio->uio_offset > fvdat->filesize)
if (uio->uio_offset > fvdat->filesize &&
fuse_data_cache_mode != FUSE_CACHE_UC) {
fuse_vnode_setsize(vp, cred, uio->uio_offset);
fvdat->flag &= ~FN_SIZECHANGE;
}
}
fdisp_destroy(&fdi);
@ -655,6 +664,7 @@ fuse_io_strategy(struct vnode *vp, struct buf *bp)
uiop->uio_offset = ((off_t)bp->b_blkno) * biosize;
error = fuse_read_directbackend(vp, uiop, cred, fufh);
/* XXXCEM: Potentially invalid access to cached_attrs here */
if ((!error && uiop->uio_resid) ||
(fsess_opt_brokenio(vnode_mount(vp)) && error == EIO &&
uiop->uio_offset < fvdat->filesize && fvdat->filesize > 0 &&

View File

@ -214,7 +214,13 @@ struct fuse_data {
#define FSESS_NO_MMAP 0x0800 /* disable mmap */
#define FSESS_BROKENIO 0x1000 /* fix broken io */
extern int fuse_data_cache_enable;
enum fuse_data_cache_mode {
FUSE_CACHE_UC,
FUSE_CACHE_WT,
FUSE_CACHE_WB,
};
extern int fuse_data_cache_mode;
extern int fuse_data_cache_invalidate;
extern int fuse_mmap_enable;
extern int fuse_sync_resize;
@ -248,7 +254,7 @@ fsess_opt_datacache(struct mount *mp)
{
struct fuse_data *data = fuse_get_mpdata(mp);
return (fuse_data_cache_enable ||
return (fuse_data_cache_mode != FUSE_CACHE_UC &&
(data->dataflags & FSESS_NO_DATACACHE) == 0);
}
@ -257,7 +263,7 @@ fsess_opt_mmap(struct mount *mp)
{
struct fuse_data *data = fuse_get_mpdata(mp);
if (!(fuse_mmap_enable && fuse_data_cache_enable))
if (!fuse_mmap_enable || fuse_data_cache_mode == FUSE_CACHE_UC)
return 0;
return ((data->dataflags & (FSESS_NO_DATACACHE | FSESS_NO_MMAP)) == 0);
}

View File

@ -94,16 +94,19 @@ __FBSDID("$FreeBSD$");
MALLOC_DEFINE(M_FUSEVN, "fuse_vnode", "fuse vnode private data");
static int sysctl_fuse_cache_mode(SYSCTL_HANDLER_ARGS);
static int fuse_node_count = 0;
SYSCTL_INT(_vfs_fuse, OID_AUTO, node_count, CTLFLAG_RD,
&fuse_node_count, 0, "Count of FUSE vnodes");
int fuse_data_cache_enable = 1;
int fuse_data_cache_mode = FUSE_CACHE_WT;
SYSCTL_INT(_vfs_fuse, OID_AUTO, data_cache_enable, CTLFLAG_RW,
&fuse_data_cache_enable, 0,
"enable caching of FUSE file data (including dirty data)");
SYSCTL_PROC(_vfs_fuse, OID_AUTO, data_cache_mode, CTLTYPE_INT|CTLFLAG_RW,
&fuse_data_cache_mode, 0, sysctl_fuse_cache_mode, "I",
"Zero: disable caching of FUSE file data; One: write-through caching "
"(default); Two: write-back caching (generally unsafe)");
int fuse_data_cache_invalidate = 0;
@ -116,7 +119,7 @@ int fuse_mmap_enable = 1;
SYSCTL_INT(_vfs_fuse, OID_AUTO, mmap_enable, CTLFLAG_RW,
&fuse_mmap_enable, 0,
"If non-zero, and data_cache_enable is also non-zero, enable mmap(2) of "
"If non-zero, and data_cache_mode is also non-zero, enable mmap(2) of "
"FUSE files");
int fuse_refresh_size = 0;
@ -140,6 +143,28 @@ SYSCTL_INT(_vfs_fuse, OID_AUTO, fix_broken_io, CTLFLAG_RW,
"If non-zero, print a diagnostic warning if a userspace filesystem returns"
" EIO on reads of recently extended portions of files");
static int
sysctl_fuse_cache_mode(SYSCTL_HANDLER_ARGS)
{
int val, error;
val = *(int *)arg1;
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
return (error);
switch (val) {
case FUSE_CACHE_UC:
case FUSE_CACHE_WT:
case FUSE_CACHE_WB:
*(int *)arg1 = val;
break;
default:
return (EDOM);
}
return (0);
}
static void
fuse_vnode_init(struct vnode *vp, struct fuse_vnode_data *fvdat,
uint64_t nodeid, enum vtype vtyp)
@ -147,6 +172,7 @@ fuse_vnode_init(struct vnode *vp, struct fuse_vnode_data *fvdat,
int i;
fvdat->nid = nodeid;
vattr_null(&fvdat->cached_attrs);
if (nodeid == FUSE_ROOT_ID) {
vp->v_vflag |= VV_ROOT;
}
@ -240,6 +266,7 @@ fuse_vnode_alloc(struct mount *mp,
int
fuse_vnode_get(struct mount *mp,
struct fuse_entry_out *feo,
uint64_t nodeid,
struct vnode *dvp,
struct vnode **vpp,
@ -260,7 +287,9 @@ fuse_vnode_get(struct mount *mp,
MPASS(!(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.'));
fuse_vnode_setparent(*vpp, dvp);
}
if (dvp != NULL && cnp != NULL && (cnp->cn_flags & MAKEENTRY) != 0) {
if (dvp != NULL && cnp != NULL && (cnp->cn_flags & MAKEENTRY) != 0 &&
feo != NULL &&
(feo->entry_valid != 0 || feo->entry_valid_nsec != 0)) {
ASSERT_VOP_LOCKED(*vpp, "fuse_vnode_get");
ASSERT_VOP_LOCKED(dvp, "fuse_vnode_get");
cache_enter(dvp, *vpp, cnp);
@ -371,6 +400,7 @@ fuse_vnode_refreshsize(struct vnode *vp, struct ucred *cred)
struct vattr va;
if ((fvdat->flag & FN_SIZECHANGE) != 0 ||
fuse_data_cache_mode == FUSE_CACHE_UC ||
(fuse_refresh_size == 0 && fvdat->filesize != 0))
return;

View File

@ -86,6 +86,7 @@ struct fuse_vnode_data {
uint32_t flag;
/** meta **/
bool valid_attr_cache;
struct vattr cached_attrs;
off_t filesize;
uint64_t nlookup;
@ -95,7 +96,9 @@ struct fuse_vnode_data {
#define VTOFUD(vp) \
((struct fuse_vnode_data *)((vp)->v_data))
#define VTOI(vp) (VTOFUD(vp)->nid)
#define VTOVA(vp) (&(VTOFUD(vp)->cached_attrs))
#define VTOVA(vp) \
(VTOFUD(vp)->valid_attr_cache ? \
&(VTOFUD(vp)->cached_attrs) : NULL)
#define VTOILLU(vp) ((uint64_t)(VTOFUD(vp) ? VTOI(vp) : 0))
#define FUSE_NULL_ID 0
@ -114,6 +117,7 @@ fuse_vnode_setparent(struct vnode *vp, struct vnode *dvp)
void fuse_vnode_destroy(struct vnode *vp);
int fuse_vnode_get(struct mount *mp,
struct fuse_entry_out *feo,
uint64_t nodeid,
struct vnode *dvp,
struct vnode **vpp,

View File

@ -444,7 +444,8 @@ fuse_vfsop_root(struct mount *mp, int lkflags, struct vnode **vpp)
if (err == 0)
*vpp = data->vroot;
} else {
err = fuse_vnode_get(mp, FUSE_ROOT_ID, NULL, vpp, NULL, VDIR);
err = fuse_vnode_get(mp, NULL, FUSE_ROOT_ID, NULL, vpp, NULL,
VDIR);
if (err == 0) {
FUSE_LOCK();
MPASS(data->vroot == NULL || data->vroot == *vpp);

View File

@ -384,7 +384,7 @@ fuse_vnop_create(struct vop_create_args *ap)
if ((err = fuse_internal_checkentry(feo, VREG))) {
goto out;
}
err = fuse_vnode_get(mp, feo->nodeid, dvp, vpp, cnp, VREG);
err = fuse_vnode_get(mp, feo, feo->nodeid, dvp, vpp, cnp, VREG);
if (err) {
struct fuse_release_in *fri;
uint64_t nodeid = feo->nodeid;
@ -518,10 +518,8 @@ fuse_vnop_getattr(struct vop_getattr_args *ap)
}
goto out;
}
cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
if (vap != VTOVA(vp)) {
memcpy(vap, VTOVA(vp), sizeof(*vap));
}
cache_attrs(vp, (struct fuse_attr_out *)fdi.answ, vap);
if (vap->va_type != vnode_vtype(vp)) {
fuse_internal_vnode_disappear(vp);
err = ENOENT;
@ -540,6 +538,7 @@ fuse_vnop_getattr(struct vop_getattr_args *ap)
if (fvdat->filesize != new_filesize) {
fuse_vnode_setsize(vp, cred, new_filesize);
fvdat->flag &= ~FN_SIZECHANGE;
}
}
debug_printf("fuse_getattr e: returning 0\n");
@ -628,9 +627,15 @@ fuse_vnop_link(struct vop_link_args *ap)
if (vnode_mount(tdvp) != vnode_mount(vp)) {
return EXDEV;
}
if (vap->va_nlink >= FUSE_LINK_MAX) {
/*
* This is a seatbelt check to protect naive userspace filesystems from
* themselves and the limitations of the FUSE IPC protocol. If a
* filesystem does not allow attribute caching, assume it is capable of
* validating that nlink does not overflow.
*/
if (vap != NULL && vap->va_nlink >= FUSE_LINK_MAX)
return EMLINK;
}
fli.oldnodeid = VTOI(vp);
fdisp_init(&fdi, 0);
@ -853,8 +858,8 @@ fuse_vnop_lookup(struct vop_lookup_args *ap)
vref(dvp);
*vpp = dvp;
} else {
err = fuse_vnode_get(dvp->v_mount, nid, dvp,
&vp, cnp, IFTOVT(fattr->mode));
err = fuse_vnode_get(dvp->v_mount, feo, nid,
dvp, &vp, cnp, IFTOVT(fattr->mode));
if (err)
goto out;
*vpp = vp;
@ -889,12 +894,8 @@ fuse_vnop_lookup(struct vop_lookup_args *ap)
err = EISDIR;
goto out;
}
err = fuse_vnode_get(vnode_mount(dvp),
nid,
dvp,
&vp,
cnp,
IFTOVT(fattr->mode));
err = fuse_vnode_get(vnode_mount(dvp), feo, nid, dvp,
&vp, cnp, IFTOVT(fattr->mode));
if (err) {
goto out;
}
@ -932,12 +933,8 @@ fuse_vnop_lookup(struct vop_lookup_args *ap)
}
}
VOP_UNLOCK(dvp, 0);
err = fuse_vnode_get(vnode_mount(dvp),
nid,
NULL,
&vp,
cnp,
IFTOVT(fattr->mode));
err = fuse_vnode_get(vnode_mount(dvp), feo, nid, NULL,
&vp, cnp, IFTOVT(fattr->mode));
vfs_unbusy(mp);
vn_lock(dvp, ltype | LK_RETRY);
if ((dvp->v_iflag & VI_DOOMED) != 0) {
@ -952,23 +949,54 @@ fuse_vnop_lookup(struct vop_lookup_args *ap)
vref(dvp);
*vpp = dvp;
} else {
err = fuse_vnode_get(vnode_mount(dvp),
nid,
dvp,
&vp,
cnp,
IFTOVT(fattr->mode));
struct fuse_vnode_data *fvdat;
err = fuse_vnode_get(vnode_mount(dvp), feo, nid, dvp,
&vp, cnp, IFTOVT(fattr->mode));
if (err) {
goto out;
}
fuse_vnode_setparent(vp, dvp);
/*
* In the case where we are looking up a FUSE node
* represented by an existing cached vnode, and the
* true size reported by FUSE_LOOKUP doesn't match
* the vnode's cached size, fix the vnode cache to
* match the real object size.
*
* This can occur via FUSE distributed filesystems,
* irregular files, etc.
*/
fvdat = VTOFUD(vp);
if (vnode_isreg(vp) &&
fattr->size != fvdat->filesize) {
/*
* The FN_SIZECHANGE flag reflects a dirty
* append. If userspace lets us know our cache
* is invalid, that write was lost. (Dirty
* writes that do not cause append are also
* lost, but we don't detect them here.)
*
* XXX: Maybe disable WB caching on this mount.
*/
if (fvdat->flag & FN_SIZECHANGE)
printf("%s: WB cache incoherent on "
"%s!\n", __func__,
vnode_mount(vp)->mnt_stat.f_mntonname);
(void)fuse_vnode_setsize(vp, cred, fattr->size);
fvdat->flag &= ~FN_SIZECHANGE;
}
*vpp = vp;
}
if (op == FUSE_GETATTR) {
cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ);
cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ,
NULL);
} else {
cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ);
cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ,
NULL);
}
/* Insert name into cache if appropriate. */
@ -1643,9 +1671,9 @@ fuse_vnop_setattr(struct vop_setattr_args *ap)
err = EAGAIN;
}
}
if (!err && !sizechanged) {
cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
}
if (err == 0)
cache_attrs(vp, (struct fuse_attr_out *)fdi.answ, NULL);
out:
fdisp_destroy(&fdi);
if (!err && sizechanged) {

View File

@ -108,21 +108,47 @@ disable_intr(void)
__asm __volatile("cli" : : : "memory");
}
#ifdef _KERNEL
static __inline void
do_cpuid(u_int ax, u_int *p)
{
__asm __volatile("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax));
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax));
}
static __inline void
cpuid_count(u_int ax, u_int cx, u_int *p)
{
__asm __volatile("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax), "c" (cx));
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax), "c" (cx));
}
#else
static __inline void
do_cpuid(u_int ax, u_int *p)
{
__asm __volatile(
"pushl\t%%ebx\n\t"
"cpuid\n\t"
"movl\t%%ebx,%1\n\t"
"popl\t%%ebx"
: "=a" (p[0]), "=DS" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax));
}
static __inline void
cpuid_count(u_int ax, u_int cx, u_int *p)
{
__asm __volatile(
"pushl\t%%ebx\n\t"
"cpuid\n\t"
"movl\t%%ebx,%1\n\t"
"popl\t%%ebx"
: "=a" (p[0]), "=DS" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax), "c" (cx));
}
#endif
static __inline void
enable_intr(void)

View File

@ -863,13 +863,88 @@ rufetchtd(struct thread *td, struct rusage *ru)
calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
}
/* XXX: the MI version is too slow to use: */
#ifndef __HAVE_INLINE_FLSLL
#define flsll(x) (fls((x) >> 32) != 0 ? fls((x) >> 32) + 32 : fls(x))
#endif
static uint64_t
mul64_by_fraction(uint64_t a, uint64_t b, uint64_t c)
{
uint64_t acc, bh, bl;
int i, s, sa, sb;
/*
* Compute floor(a * (b / c)) without overflowing, (b / c) <= 1.0.
* Calculate (a * b) / c accurately enough without overflowing. c
* must be nonzero, and its top bit must be 0. a or b must be
* <= c, and the implementation is tuned for b <= c.
*
* The comments about times are for use in calcru1() with units of
* microseconds for 'a' and stathz ticks at 128 Hz for b and c.
*
* Let n be the number of top zero bits in c. Each iteration
* either returns, or reduces b by right shifting it by at least n.
* The number of iterations is at most 1 + 64 / n, and the error is
* at most the number of iterations.
*
* It is very unusual to need even 2 iterations. Previous
* implementations overflowed essentially by returning early in the
* first iteration, with n = 38 giving overflow at 105+ hours and
* n = 32 giving overlow at at 388+ days despite a more careful
* calculation. 388 days is a reasonable uptime, and the calculation
* needs to work for the uptime times the number of CPUs since 'a'
* is per-process.
*/
return ((a / c) * b + (a % c) * (b / c) + (a % c) * (b % c) / c);
if (a >= (uint64_t)1 << 63)
return (0); /* Unsupported arg -- can't happen. */
acc = 0;
for (i = 0; i < 128; i++) {
sa = flsll(a);
sb = flsll(b);
if (sa + sb <= 64)
/* Up to 105 hours on first iteration. */
return (acc + (a * b) / c);
if (a >= c) {
/*
* This reduction is based on a = q * c + r, with the
* remainder r < c. 'a' may be large to start, and
* moving bits from b into 'a' at the end of the loop
* sets the top bit of 'a', so the reduction makes
* significant progress.
*/
acc += (a / c) * b;
a %= c;
sa = flsll(a);
if (sa + sb <= 64)
/* Up to 388 days on first iteration. */
return (acc + (a * b) / c);
}
/*
* This step writes a * b as a * ((bh << s) + bl) =
* a * (bh << s) + a * bl = (a << s) * bh + a * bl. The 2
* additive terms are handled separately. Splitting in
* this way is linear except for rounding errors.
*
* s = 64 - sa is the maximum such that a << s fits in 64
* bits. Since a < c and c has at least 1 zero top bit,
* sa < 64 and s > 0. Thus this step makes progress by
* reducing b (it increases 'a', but taking remainders on
* the next iteration completes the reduction).
*
* Finally, the choice for s is just what is needed to keep
* a * bl from overflowing, so we don't need complications
* like a recursive call mul64_by_fraction(a, bl, c) to
* handle the second additive term.
*/
s = 64 - sa;
bh = b >> s;
bl = b - (bh << s);
acc += (a * bl) / c;
a <<= s;
b = bh;
}
return (0); /* Algorithm failure -- can't happen. */
}
static void
@ -896,15 +971,23 @@ calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
tu = ruxp->rux_tu;
}
/* Subdivide tu. Avoid overflow in the multiplications. */
if (__predict_true(tu <= ((uint64_t)1 << 38) && tt <= (1 << 26))) {
/* Up to 76 hours when stathz is 128. */
uu = (tu * ut) / tt;
su = (tu * st) / tt;
} else {
uu = mul64_by_fraction(tu, ut, tt);
su = mul64_by_fraction(tu, ut, st);
}
if (tu >= ruxp->rux_tu) {
/*
* The normal case, time increased.
* Enforce monotonicity of bucketed numbers.
*/
uu = mul64_by_fraction(tu, ut, tt);
if (uu < ruxp->rux_uu)
uu = ruxp->rux_uu;
su = mul64_by_fraction(tu, st, tt);
if (su < ruxp->rux_su)
su = ruxp->rux_su;
} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
@ -933,8 +1016,6 @@ calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
"to %ju usec for pid %d (%s)\n",
(uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
p->p_pid, p->p_comm);
uu = mul64_by_fraction(tu, ut, tt);
su = mul64_by_fraction(tu, st, tt);
}
ruxp->rux_uu = uu;

View File

@ -379,9 +379,7 @@ void
pipe_dtor(struct pipe *dpipe)
{
struct pipe *peer;
ino_t ino;
ino = dpipe->pipe_ino;
peer = (dpipe->pipe_state & PIPE_NAMED) != 0 ? dpipe->pipe_peer : NULL;
funsetown(&dpipe->pipe_sigio);
pipeclose(dpipe);

View File

@ -76,7 +76,7 @@ struct sysentvec elf64_freebsd_sysvec = {
.sv_setregs = exec_setregs,
.sv_fixlimit = NULL,
.sv_maxssiz = NULL,
.sv_flags = SV_ABI_FREEBSD | SV_LP64,
.sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_ASLR,
.sv_set_syscall_retval = cpu_set_syscall_retval,
.sv_fetch_syscall_args = cpu_fetch_syscall_args,
.sv_syscallnames = syscallnames,
@ -131,7 +131,7 @@ struct sysentvec elf32_freebsd_sysvec = {
.sv_setregs = exec_setregs,
.sv_fixlimit = NULL,
.sv_maxssiz = NULL,
.sv_flags = SV_ABI_FREEBSD | SV_ILP32,
.sv_flags = SV_ABI_FREEBSD | SV_ILP32 | SV_ASLR,
.sv_set_syscall_retval = cpu_set_syscall_retval,
.sv_fetch_syscall_args = cpu_fetch_syscall_args,
.sv_syscallnames = syscallnames,

View File

@ -68,5 +68,7 @@ CFLAGS.utils.c += -I${LIBSODIUM_INC} -I${LIBSODIUM_COMPAT}
SRCS += opt_param.h cryptodev_if.h bus_if.h device_if.h
SRCS += opt_ddb.h
SRCS += cbc_mac.c
SRCS += xform_cbc_mac.c
.include <bsd.kmod.mk>

View File

@ -133,6 +133,7 @@ static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
static int lagg_snd_tag_alloc(struct ifnet *,
union if_snd_tag_alloc_params *,
struct m_snd_tag **);
static void lagg_snd_tag_free(struct m_snd_tag *);
#endif
static int lagg_setmulti(struct lagg_port *);
static int lagg_clrmulti(struct lagg_port *);
@ -514,6 +515,7 @@ lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
#ifdef RATELIMIT
ifp->if_snd_tag_alloc = lagg_snd_tag_alloc;
ifp->if_snd_tag_free = lagg_snd_tag_free;
#endif
ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS;
@ -1568,6 +1570,13 @@ lagg_snd_tag_alloc(struct ifnet *ifp,
/* forward allocation request */
return (ifp->if_snd_tag_alloc(ifp, params, ppmt));
}
static void
lagg_snd_tag_free(struct m_snd_tag *tag)
{
tag->ifp->if_snd_tag_free(tag);
}
#endif
static int

View File

@ -267,6 +267,7 @@ static int vlan_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr);
#ifdef RATELIMIT
static int vlan_snd_tag_alloc(struct ifnet *,
union if_snd_tag_alloc_params *, struct m_snd_tag **);
static void vlan_snd_tag_free(struct m_snd_tag *);
#endif
static void vlan_qflush(struct ifnet *ifp);
static int vlan_setflag(struct ifnet *ifp, int flag, int status,
@ -1047,6 +1048,7 @@ vlan_clone_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
ifp->if_ioctl = vlan_ioctl;
#ifdef RATELIMIT
ifp->if_snd_tag_alloc = vlan_snd_tag_alloc;
ifp->if_snd_tag_free = vlan_snd_tag_free;
#endif
ifp->if_flags = VLAN_IFFLAGS;
ether_ifattach(ifp, eaddr);
@ -1934,4 +1936,10 @@ vlan_snd_tag_alloc(struct ifnet *ifp,
/* forward allocation request */
return (ifp->if_snd_tag_alloc(ifp, params, ppmt));
}
static void
vlan_snd_tag_free(struct m_snd_tag *tag)
{
tag->ifp->if_snd_tag_free(tag);
}
#endif

View File

@ -1468,12 +1468,17 @@ iflib_fast_intr(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
int result;
if (!iflib_started)
return (FILTER_HANDLED);
return (FILTER_STRAY);
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
return (FILTER_HANDLED);
if (info->ifi_filter != NULL) {
result = info->ifi_filter(info->ifi_filter_arg);
if ((result & FILTER_SCHEDULE_THREAD) == 0)
return (result);
}
GROUPTASK_ENQUEUE(gtask);
return (FILTER_HANDLED);
@ -1488,15 +1493,18 @@ iflib_fast_intr_rxtx(void *arg)
iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx;
iflib_txq_t txq;
void *sc;
int i, cidx;
int i, cidx, result;
qidx_t txqid;
if (!iflib_started)
return (FILTER_HANDLED);
return (FILTER_STRAY);
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
return (FILTER_HANDLED);
if (info->ifi_filter != NULL) {
result = info->ifi_filter(info->ifi_filter_arg);
if ((result & FILTER_SCHEDULE_THREAD) == 0)
return (result);
}
ctx = rxq->ifr_ctx;
sc = ctx->ifc_softc;
@ -1531,13 +1539,17 @@ iflib_fast_intr_ctx(void *arg)
{
iflib_filter_info_t info = arg;
struct grouptask *gtask = info->ifi_task;
int result;
if (!iflib_started)
return (FILTER_HANDLED);
return (FILTER_STRAY);
DBG_COUNTER_INC(fast_intrs);
if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED)
return (FILTER_HANDLED);
if (info->ifi_filter != NULL) {
result = info->ifi_filter(info->ifi_filter_arg);
if ((result & FILTER_SCHEDULE_THREAD) == 0)
return (result);
}
GROUPTASK_ENQUEUE(gtask);
return (FILTER_HANDLED);

View File

@ -1565,6 +1565,7 @@ in_pcbfree_deferred(epoch_context_t ctx)
inp = __containerof(ctx, struct inpcb, inp_epoch_ctx);
INP_WLOCK(inp);
CURVNET_SET(inp->inp_vnet);
#ifdef INET
struct ip_moptions *imo = inp->inp_moptions;
inp->inp_moptions = NULL;
@ -1597,6 +1598,7 @@ in_pcbfree_deferred(epoch_context_t ctx)
#ifdef INET
inp_freemoptions(imo);
#endif
CURVNET_RESTORE();
}
/*

View File

@ -302,7 +302,7 @@ tcp_twstart(struct tcpcb *tp)
if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt) &&
recwin < (tp->rcv_adv - tp->rcv_nxt))
recwin = (tp->rcv_adv - tp->rcv_nxt);
tw->last_win = htons((u_short)(recwin >> tp->rcv_scale));
tw->last_win = (u_short)(recwin >> tp->rcv_scale);
/*
* Set t_recent if timestamps are used on the connection.

252
sys/opencrypto/cbc_mac.c Normal file
View File

@ -0,0 +1,252 @@
/*
* Copyright (c) 2018-2019 iXsystems Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/systm.h>
#include <sys/param.h>
#include <sys/endian.h>
#include <opencrypto/cbc_mac.h>
#include <opencrypto/xform_auth.h>
/*
* Given two CCM_CBC_BLOCK_LEN blocks, xor
* them into dst, and then encrypt dst.
*/
static void
xor_and_encrypt(struct aes_cbc_mac_ctx *ctx,
const uint8_t *src, uint8_t *dst)
{
const uint64_t *b1;
uint64_t *b2;
uint64_t temp_block[CCM_CBC_BLOCK_LEN/sizeof(uint64_t)];
b1 = (const uint64_t*)src;
b2 = (uint64_t*)dst;
for (size_t count = 0;
count < CCM_CBC_BLOCK_LEN/sizeof(uint64_t);
count++) {
temp_block[count] = b1[count] ^ b2[count];
}
rijndaelEncrypt(ctx->keysched, ctx->rounds, (void*)temp_block, dst);
}
void
AES_CBC_MAC_Init(struct aes_cbc_mac_ctx *ctx)
{
bzero(ctx, sizeof(*ctx));
}
void
AES_CBC_MAC_Setkey(struct aes_cbc_mac_ctx *ctx, const uint8_t *key, uint16_t klen)
{
ctx->rounds = rijndaelKeySetupEnc(ctx->keysched, key, klen * 8);
}
/*
* This is called to set the nonce, aka IV.
* Before this call, the authDataLength and cryptDataLength fields
* MUST have been set. Sadly, there's no way to return an error.
*
* The CBC-MAC algorithm requires that the first block contain the
* nonce, as well as information about the sizes and lengths involved.
*/
void
AES_CBC_MAC_Reinit(struct aes_cbc_mac_ctx *ctx, const uint8_t *nonce, uint16_t nonceLen)
{
uint8_t b0[CCM_CBC_BLOCK_LEN];
uint8_t *bp = b0, flags = 0;
uint8_t L = 0;
uint64_t dataLength = ctx->cryptDataLength;
KASSERT(ctx->authDataLength != 0 || ctx->cryptDataLength != 0,
("Auth Data and Data lengths cannot both be 0"));
KASSERT(nonceLen >= 7 && nonceLen <= 13,
("nonceLen must be between 7 and 13 bytes"));
ctx->nonce = nonce;
ctx->nonceLength = nonceLen;
ctx->authDataCount = 0;
ctx->blockIndex = 0;
explicit_bzero(ctx->staging_block, sizeof(ctx->staging_block));
/*
* Need to determine the L field value. This is the number of
* bytes needed to specify the length of the message; the length
* is whatever is left in the 16 bytes after specifying flags and
* the nonce.
*/
L = 15 - nonceLen;
flags = ((ctx->authDataLength > 0) << 6) +
(((AES_CBC_MAC_HASH_LEN - 2) / 2) << 3) +
L - 1;
/*
* Now we need to set up the first block, which has flags, nonce,
* and the message length.
*/
b0[0] = flags;
bcopy(nonce, b0 + 1, nonceLen);
bp = b0 + 1 + nonceLen;
/* Need to copy L' [aka L-1] bytes of cryptDataLength */
for (uint8_t *dst = b0 + sizeof(b0) - 1; dst >= bp; dst--) {
*dst = dataLength;
dataLength >>= 8;
}
/* Now need to encrypt b0 */
rijndaelEncrypt(ctx->keysched, ctx->rounds, b0, ctx->block);
/* If there is auth data, we need to set up the staging block */
if (ctx->authDataLength) {
if (ctx->authDataLength < ((1<<16) - (1<<8))) {
uint16_t sizeVal = htobe16(ctx->authDataLength);
bcopy(&sizeVal, ctx->staging_block, sizeof(sizeVal));
ctx->blockIndex = sizeof(sizeVal);
} else if (ctx->authDataLength < (1ULL<<32)) {
uint32_t sizeVal = htobe32(ctx->authDataLength);
ctx->staging_block[0] = 0xff;
ctx->staging_block[1] = 0xfe;
bcopy(&sizeVal, ctx->staging_block+2, sizeof(sizeVal));
ctx->blockIndex = 2 + sizeof(sizeVal);
} else {
uint64_t sizeVal = htobe64(ctx->authDataLength);
ctx->staging_block[0] = 0xff;
ctx->staging_block[1] = 0xff;
bcopy(&sizeVal, ctx->staging_block+2, sizeof(sizeVal));
ctx->blockIndex = 2 + sizeof(sizeVal);
}
}
}
int
AES_CBC_MAC_Update(struct aes_cbc_mac_ctx *ctx, const uint8_t *data,
uint16_t length)
{
size_t copy_amt;
/*
* This will be called in one of two phases:
* (1) Applying authentication data, or
* (2) Applying the payload data.
*
* Because CBC-MAC puts the authentication data size before the
* data, subsequent calls won't be block-size-aligned. Which
* complicates things a fair bit.
*
* The payload data doesn't have that problem.
*/
if (ctx->authDataCount < ctx->authDataLength) {
/*
* We need to process data as authentication data.
* Since we may be out of sync, we may also need
* to pad out the staging block.
*/
const uint8_t *ptr = data;
while (length > 0) {
copy_amt = MIN(length,
sizeof(ctx->staging_block) - ctx->blockIndex);
bcopy(ptr, ctx->staging_block + ctx->blockIndex,
copy_amt);
ptr += copy_amt;
length -= copy_amt;
ctx->authDataCount += copy_amt;
ctx->blockIndex += copy_amt;
ctx->blockIndex %= sizeof(ctx->staging_block);
if (ctx->authDataCount == ctx->authDataLength)
length = 0;
if (ctx->blockIndex == 0 ||
ctx->authDataCount >= ctx->authDataLength) {
/*
* We're done with this block, so we
* xor staging_block with block, and then
* encrypt it.
*/
xor_and_encrypt(ctx, ctx->staging_block, ctx->block);
bzero(ctx->staging_block, sizeof(ctx->staging_block));
ctx->blockIndex = 0;
}
}
return (0);
}
/*
* If we're here, then we're encoding payload data.
* This is marginally easier, except that _Update can
* be called with non-aligned update lengths. As a result,
* we still need to use the staging block.
*/
KASSERT((length + ctx->cryptDataCount) <= ctx->cryptDataLength,
("More encryption data than allowed"));
while (length) {
uint8_t *ptr;
copy_amt = MIN(sizeof(ctx->staging_block) - ctx->blockIndex,
length);
ptr = ctx->staging_block + ctx->blockIndex;
bcopy(data, ptr, copy_amt);
data += copy_amt;
ctx->blockIndex += copy_amt;
ctx->cryptDataCount += copy_amt;
length -= copy_amt;
if (ctx->blockIndex == sizeof(ctx->staging_block)) {
/* We've got a full block */
xor_and_encrypt(ctx, ctx->staging_block, ctx->block);
ctx->blockIndex = 0;
bzero(ctx->staging_block, sizeof(ctx->staging_block));
}
}
return (0);
}
void
AES_CBC_MAC_Final(uint8_t *buf, struct aes_cbc_mac_ctx *ctx)
{
uint8_t s0[CCM_CBC_BLOCK_LEN];
/*
* We first need to check to see if we've got any data
* left over to encrypt.
*/
if (ctx->blockIndex != 0) {
xor_and_encrypt(ctx, ctx->staging_block, ctx->block);
ctx->cryptDataCount += ctx->blockIndex;
ctx->blockIndex = 0;
explicit_bzero(ctx->staging_block, sizeof(ctx->staging_block));
}
bzero(s0, sizeof(s0));
s0[0] = (15 - ctx->nonceLength) - 1;
bcopy(ctx->nonce, s0 + 1, ctx->nonceLength);
rijndaelEncrypt(ctx->keysched, ctx->rounds, s0, s0);
for (size_t indx = 0; indx < AES_CBC_MAC_HASH_LEN; indx++)
buf[indx] = ctx->block[indx] ^ s0[indx];
explicit_bzero(s0, sizeof(s0));
}

67
sys/opencrypto/cbc_mac.h Normal file
View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2014 The FreeBSD Foundation
* Copyright (c) 2018, iXsystems Inc.
* All rights reserved.
*
* This software was developed by Sean Eric Fagan, with lots of references
* to existing AES-CCM (gmac) code.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#ifndef _CBC_CCM_H
# define _CBC_CCM_H
# include <sys/types.h>
# include <crypto/rijndael/rijndael.h>
# define CCM_CBC_BLOCK_LEN 16 /* 128 bits */
# define CCM_CBC_MAX_DIGEST_LEN 16
# define CCM_CBC_MIN_DIGEST_LEN 4
/*
* This is the authentication context structure;
* the encryption one is similar.
*/
struct aes_cbc_mac_ctx {
uint64_t authDataLength, authDataCount;
uint64_t cryptDataLength, cryptDataCount;
int blockIndex;
uint8_t staging_block[CCM_CBC_BLOCK_LEN];
uint8_t block[CCM_CBC_BLOCK_LEN];
const uint8_t *nonce;
int nonceLength; /* This one is in bytes, not bits! */
/* AES state data */
int rounds;
uint32_t keysched[4*(RIJNDAEL_MAXNR+1)];
};
void AES_CBC_MAC_Init(struct aes_cbc_mac_ctx *);
void AES_CBC_MAC_Setkey(struct aes_cbc_mac_ctx *, const uint8_t *, uint16_t);
void AES_CBC_MAC_Reinit(struct aes_cbc_mac_ctx *, const uint8_t *, uint16_t);
int AES_CBC_MAC_Update(struct aes_cbc_mac_ctx *, const uint8_t *, uint16_t);
void AES_CBC_MAC_Final(uint8_t *, struct aes_cbc_mac_ctx *);
#endif /* _CBC_CCM_H */

View File

@ -444,6 +444,9 @@ cryptof_ioctl(
case CRYPTO_CHACHA20:
txform = &enc_xform_chacha20;
break;
case CRYPTO_AES_CCM_16:
txform = &enc_xform_ccm;
break;
default:
CRYPTDEB("invalid cipher");
@ -488,6 +491,25 @@ cryptof_ioctl(
thash = &auth_hash_nist_gmac_aes_256;
break;
case CRYPTO_AES_CCM_CBC_MAC:
switch (sop->keylen) {
case 16:
thash = &auth_hash_ccm_cbc_mac_128;
break;
case 24:
thash = &auth_hash_ccm_cbc_mac_192;
break;
case 32:
thash = &auth_hash_ccm_cbc_mac_256;
break;
default:
CRYPTDEB("Invalid CBC MAC key size %d",
sop->keylen);
SDT_PROBE1(opencrypto, dev, ioctl,
error, __LINE__);
return (EINVAL);
}
break;
#ifdef notdef
case CRYPTO_MD5:
thash = &auth_hash_md5;
@ -1003,12 +1025,13 @@ cryptodev_aead(
}
/*
* For GCM, crd_len covers only the AAD. For other ciphers
* For GCM/CCM, crd_len covers only the AAD. For other ciphers
* chained with an HMAC, crd_len covers both the AAD and the
* cipher text.
*/
crda->crd_skip = 0;
if (cse->cipher == CRYPTO_AES_NIST_GCM_16)
if (cse->cipher == CRYPTO_AES_NIST_GCM_16 ||
cse->cipher == CRYPTO_AES_CCM_16)
crda->crd_len = caead->aadlen;
else
crda->crd_len = caead->aadlen + caead->len;

View File

@ -86,6 +86,7 @@
#define SHA1_KPDK_HASH_LEN 20
#define AES_GMAC_HASH_LEN 16
#define POLY1305_HASH_LEN 16
#define AES_CBC_MAC_HASH_LEN 16
/* Maximum hash algorithm result length */
#define HASH_MAX_LEN SHA2_512_HASH_LEN /* Keep this updated */
@ -107,6 +108,9 @@
#define AES_128_GMAC_KEY_LEN 16
#define AES_192_GMAC_KEY_LEN 24
#define AES_256_GMAC_KEY_LEN 32
#define AES_128_CBC_MAC_KEY_LEN 16
#define AES_192_CBC_MAC_KEY_LEN 24
#define AES_256_CBC_MAC_KEY_LEN 32
#define POLY1305_KEY_LEN 32
@ -129,6 +133,7 @@
#define ARC4_IV_LEN 1
#define AES_GCM_IV_LEN 12
#define AES_CCM_IV_LEN 12
#define AES_XTS_IV_LEN 8
#define AES_XTS_ALPHA 0x87 /* GF(2^128) generator polynomial */
@ -199,7 +204,9 @@
#define CRYPTO_SHA2_384 36
#define CRYPTO_SHA2_512 37
#define CRYPTO_POLY1305 38
#define CRYPTO_ALGORITHM_MAX 38 /* Keep updated - see below */
#define CRYPTO_AES_CCM_CBC_MAC 39 /* auth side */
#define CRYPTO_AES_CCM_16 40 /* cipher side */
#define CRYPTO_ALGORITHM_MAX 40 /* Keep updated - see below */
#define CRYPTO_ALGO_VALID(x) ((x) >= CRYPTO_ALGORITHM_MIN && \
(x) <= CRYPTO_ALGORITHM_MAX)

View File

@ -62,6 +62,9 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include "cryptodev_if.h"
_Static_assert(AES_CCM_IV_LEN == AES_GCM_IV_LEN,
"AES_GCM_IV_LEN must currently be the same as AES_CCM_IV_LEN");
static int32_t swcr_id;
u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
@ -506,6 +509,7 @@ swcr_authenc(struct cryptop *crp)
caddr_t buf = (caddr_t)crp->crp_buf;
uint32_t *blkp;
int aadlen, blksz, i, ivlen, len, iskip, oskip, r;
int isccm = 0;
ivlen = blksz = iskip = oskip = 0;
@ -520,13 +524,18 @@ swcr_authenc(struct cryptop *crp)
sw = &ses->swcr_algorithms[i];
switch (sw->sw_alg) {
case CRYPTO_AES_CCM_16:
case CRYPTO_AES_NIST_GCM_16:
case CRYPTO_AES_NIST_GMAC:
swe = sw;
crde = crd;
exf = swe->sw_exf;
ivlen = 12;
/* AES_CCM_IV_LEN and AES_GCM_IV_LEN are both 12 */
ivlen = AES_CCM_IV_LEN;
break;
case CRYPTO_AES_CCM_CBC_MAC:
isccm = 1;
/* FALLTHROUGH */
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
@ -544,8 +553,26 @@ swcr_authenc(struct cryptop *crp)
}
if (crde == NULL || crda == NULL)
return (EINVAL);
/*
* We need to make sure that the auth algorithm matches the
* encr algorithm. Specifically, for AES-GCM must go with
* AES NIST GMAC, and AES-CCM must go with CBC-MAC.
*/
if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16) {
switch (crda->crd_alg) {
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
break; /* Good! */
default:
return (EINVAL); /* Not good! */
}
} else if (crde->crd_alg == CRYPTO_AES_CCM_16 &&
crda->crd_alg != CRYPTO_AES_CCM_CBC_MAC)
return (EINVAL);
if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 &&
if ((crde->crd_alg == CRYPTO_AES_NIST_GCM_16 ||
crde->crd_alg == CRYPTO_AES_CCM_16) &&
(crde->crd_flags & CRD_F_IV_EXPLICIT) == 0)
return (EINVAL);
@ -576,6 +603,15 @@ swcr_authenc(struct cryptop *crp)
}
}
if (swa->sw_alg == CRYPTO_AES_CCM_CBC_MAC) {
/*
* AES CCM-CBC needs to know the length of
* both the auth data, and payload data, before
* doing the auth computation.
*/
ctx.aes_cbc_mac_ctx.authDataLength = crda->crd_len;
ctx.aes_cbc_mac_ctx.cryptDataLength = crde->crd_len;
}
/* Supply MAC with IV */
if (axf->Reinit)
axf->Reinit(&ctx, iv, ivlen);
@ -610,16 +646,30 @@ swcr_authenc(struct cryptop *crp)
bzero(blk, blksz);
crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len,
blk);
/*
* One of the problems with CCM+CBC is that the authentication
* is done on the unecncrypted data. As a result, we have
* to do the authentication update at different times,
* depending on whether it's CCM or not.
*/
if (crde->crd_flags & CRD_F_ENCRYPT) {
if (isccm)
axf->Update(&ctx, blk, len);
if (exf->encrypt_multi != NULL)
exf->encrypt_multi(swe->sw_kschedule, blk,
len);
else
exf->encrypt(swe->sw_kschedule, blk);
axf->Update(&ctx, blk, len);
if (!isccm)
axf->Update(&ctx, blk, len);
crypto_copyback(crp->crp_flags, buf,
crde->crd_skip + i, len, blk);
} else {
if (isccm) {
KASSERT(exf->encrypt_multi == NULL,
("assume CCM is single-block only"));
exf->decrypt(swe->sw_kschedule, blk);
}
axf->Update(&ctx, blk, len);
}
}
@ -650,6 +700,11 @@ swcr_authenc(struct cryptop *crp)
r = timingsafe_bcmp(aalg, uaalg, axf->hashsize);
if (r == 0) {
/* tag matches, decrypt data */
if (isccm) {
KASSERT(exf->reinit != NULL,
("AES-CCM reinit function must be set"));
exf->reinit(swe->sw_kschedule, iv);
}
for (i = 0; i < crde->crd_len; i += blksz) {
len = MIN(crde->crd_len - i, blksz);
if (len < blksz)
@ -799,6 +854,9 @@ swcr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
case CRYPTO_AES_NIST_GCM_16:
txf = &enc_xform_aes_nist_gcm;
goto enccommon;
case CRYPTO_AES_CCM_16:
txf = &enc_xform_ccm;
goto enccommon;
case CRYPTO_AES_NIST_GMAC:
txf = &enc_xform_aes_nist_gmac;
swd->sw_exf = txf;
@ -943,6 +1001,22 @@ swcr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
swd->sw_axf = axf;
break;
case CRYPTO_AES_CCM_CBC_MAC:
switch (cri->cri_klen) {
case 128:
axf = &auth_hash_ccm_cbc_mac_128;
break;
case 192:
axf = &auth_hash_ccm_cbc_mac_192;
break;
case 256:
axf = &auth_hash_ccm_cbc_mac_256;
break;
default:
swcr_freesession(dev, cses);
return EINVAL;
}
goto auth4common;
case CRYPTO_AES_128_NIST_GMAC:
axf = &auth_hash_nist_gmac_aes_128;
goto auth4common;
@ -1042,6 +1116,7 @@ swcr_freesession(device_t dev, crypto_session_t cses)
case CRYPTO_CAMELLIA_CBC:
case CRYPTO_NULL_CBC:
case CRYPTO_CHACHA20:
case CRYPTO_AES_CCM_16:
txf = swd->sw_exf;
if (swd->sw_kschedule)
@ -1056,6 +1131,7 @@ swcr_freesession(device_t dev, crypto_session_t cses)
case CRYPTO_SHA2_512_HMAC:
case CRYPTO_RIPEMD160_HMAC:
case CRYPTO_NULL_HMAC:
case CRYPTO_AES_CCM_CBC_MAC:
axf = swd->sw_axf;
if (swd->sw_ictx) {
@ -1201,6 +1277,8 @@ swcr_process(device_t dev, struct cryptop *crp, int hint)
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
case CRYPTO_AES_CCM_16:
case CRYPTO_AES_CCM_CBC_MAC:
crp->crp_etype = swcr_authenc(crp);
goto done;
@ -1291,6 +1369,8 @@ swcr_attach(device_t dev)
REGISTER(CRYPTO_BLAKE2B);
REGISTER(CRYPTO_BLAKE2S);
REGISTER(CRYPTO_CHACHA20);
REGISTER(CRYPTO_AES_CCM_16);
REGISTER(CRYPTO_AES_CCM_CBC_MAC);
REGISTER(CRYPTO_POLY1305);
#undef REGISTER

View File

@ -57,6 +57,7 @@ static void aes_icm_crypt(caddr_t, u_int8_t *);
static void aes_icm_zerokey(u_int8_t **);
static void aes_icm_reinit(caddr_t, u_int8_t *);
static void aes_gcm_reinit(caddr_t, u_int8_t *);
static void aes_ccm_reinit(caddr_t, u_int8_t *);
/* Encryption instances */
struct enc_xform enc_xform_aes_icm = {
@ -79,6 +80,18 @@ struct enc_xform enc_xform_aes_nist_gcm = {
aes_gcm_reinit,
};
struct enc_xform enc_xform_ccm = {
.type = CRYPTO_AES_CCM_16,
.name = "AES-CCM",
.blocksize = AES_ICM_BLOCK_LEN, .ivsize = AES_CCM_IV_LEN,
.minkey = AES_MIN_KEY, .maxkey = AES_MAX_KEY,
.encrypt = aes_icm_crypt,
.decrypt = aes_icm_crypt,
.setkey = aes_icm_setkey,
.zerokey = aes_icm_zerokey,
.reinit = aes_ccm_reinit,
};
/*
* Encryption wrapper routines.
*/
@ -104,6 +117,21 @@ aes_gcm_reinit(caddr_t key, u_int8_t *iv)
ctx->ac_block[AESICM_BLOCKSIZE - 1] = 2;
}
static void
aes_ccm_reinit(caddr_t key, u_int8_t *iv)
{
struct aes_icm_ctx *ctx;
ctx = (struct aes_icm_ctx*)key;
/* CCM has flags, then the IV, then the counter, which starts at 1 */
bzero(ctx->ac_block, sizeof(ctx->ac_block));
/* 3 bytes for length field; this gives a nonce of 12 bytes */
ctx->ac_block[0] = (15 - AES_CCM_IV_LEN) - 1;
bcopy(iv, ctx->ac_block+1, AES_CCM_IV_LEN);
ctx->ac_block[AESICM_BLOCKSIZE - 1] = 1;
}
static void
aes_icm_crypt(caddr_t key, u_int8_t *data)
{

View File

@ -42,6 +42,7 @@
#include <crypto/sha2/sha512.h>
#include <opencrypto/rmd160.h>
#include <opencrypto/gmac.h>
#include <opencrypto/cbc_mac.h>
#include <opencrypto/cryptodev.h>
#include <opencrypto/xform_userland.h>
@ -85,6 +86,9 @@ extern struct auth_hash auth_hash_nist_gmac_aes_256;
extern struct auth_hash auth_hash_blake2b;
extern struct auth_hash auth_hash_blake2s;
extern struct auth_hash auth_hash_poly1305;
extern struct auth_hash auth_hash_ccm_cbc_mac_128;
extern struct auth_hash auth_hash_ccm_cbc_mac_192;
extern struct auth_hash auth_hash_ccm_cbc_mac_256;
union authctx {
MD5_CTX md5ctx;
@ -95,6 +99,7 @@ union authctx {
SHA384_CTX sha384ctx;
SHA512_CTX sha512ctx;
struct aes_gmac_ctx aes_gmac_ctx;
struct aes_cbc_mac_ctx aes_cbc_mac_ctx;
};
#endif /* _CRYPTO_XFORM_AUTH_H_ */

View File

@ -0,0 +1,55 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <opencrypto/cbc_mac.h>
#include <opencrypto/xform_auth.h>
/* Authentication instances */
struct auth_hash auth_hash_ccm_cbc_mac_128 = {
.type = CRYPTO_AES_CCM_CBC_MAC,
.name = "CBC-CCM-AES-128",
.keysize = AES_128_CBC_MAC_KEY_LEN,
.hashsize = AES_CBC_MAC_HASH_LEN,
.ctxsize = sizeof(struct aes_cbc_mac_ctx),
.blocksize = CCM_CBC_BLOCK_LEN,
.Init = (void (*)(void *)) AES_CBC_MAC_Init,
.Setkey =
(void (*)(void *, const u_int8_t *, u_int16_t))AES_CBC_MAC_Setkey,
.Reinit =
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_CBC_MAC_Reinit,
.Update =
(int (*)(void *, const u_int8_t *, u_int16_t)) AES_CBC_MAC_Update,
.Final = (void (*)(u_int8_t *, void *)) AES_CBC_MAC_Final,
};
struct auth_hash auth_hash_ccm_cbc_mac_192 = {
.type = CRYPTO_AES_CCM_CBC_MAC,
.name = "CBC-CCM-AES-192",
.keysize = AES_192_CBC_MAC_KEY_LEN,
.hashsize = AES_CBC_MAC_HASH_LEN,
.ctxsize = sizeof(struct aes_cbc_mac_ctx),
.blocksize = CCM_CBC_BLOCK_LEN,
.Init = (void (*)(void *)) AES_CBC_MAC_Init,
.Setkey =
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_CBC_MAC_Setkey,
.Reinit =
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_CBC_MAC_Reinit,
.Update =
(int (*)(void *, const u_int8_t *, u_int16_t)) AES_CBC_MAC_Update,
.Final = (void (*)(u_int8_t *, void *)) AES_CBC_MAC_Final,
};
struct auth_hash auth_hash_ccm_cbc_mac_256 = {
.type = CRYPTO_AES_CCM_CBC_MAC,
.name = "CBC-CCM-AES-256",
.keysize = AES_256_CBC_MAC_KEY_LEN,
.hashsize = AES_CBC_MAC_HASH_LEN,
.ctxsize = sizeof(struct aes_cbc_mac_ctx),
.blocksize = CCM_CBC_BLOCK_LEN,
.Init = (void (*)(void *)) AES_CBC_MAC_Init,
.Setkey =
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_CBC_MAC_Setkey,
.Reinit =
(void (*)(void *, const u_int8_t *, u_int16_t)) AES_CBC_MAC_Reinit,
.Update =
(int (*)(void *, const u_int8_t *, u_int16_t)) AES_CBC_MAC_Update,
.Final = (void (*)(u_int8_t *, void *)) AES_CBC_MAC_Final,
};

View File

@ -84,6 +84,7 @@ extern struct enc_xform enc_xform_aes_xts;
extern struct enc_xform enc_xform_arc4;
extern struct enc_xform enc_xform_camellia;
extern struct enc_xform enc_xform_chacha20;
extern struct enc_xform enc_xform_ccm;
struct aes_icm_ctx {
u_int32_t ac_ek[4*(RIJNDAEL_MAXNR + 1)];

View File

@ -2973,14 +2973,19 @@ mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
/* XXX KASSERT off and size are within a single page? */
mtx_lock(&zero_page_mutex);
va = zero_page_va;
if (hw_direct_map) {
va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
bzero((caddr_t)va + off, size);
} else {
mtx_lock(&zero_page_mutex);
va = zero_page_va;
mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
bzero((caddr_t)va + off, size);
mmu_booke_kremove(mmu, va);
mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
bzero((caddr_t)va + off, size);
mmu_booke_kremove(mmu, va);
mtx_unlock(&zero_page_mutex);
mtx_unlock(&zero_page_mutex);
}
}
/*
@ -2991,15 +2996,23 @@ mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
{
vm_offset_t off, va;
mtx_lock(&zero_page_mutex);
va = zero_page_va;
if (hw_direct_map) {
va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
} else {
va = zero_page_va;
mtx_lock(&zero_page_mutex);
mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
}
mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
for (off = 0; off < PAGE_SIZE; off += cacheline_size)
__asm __volatile("dcbz 0,%0" :: "r"(va + off));
mmu_booke_kremove(mmu, va);
mtx_unlock(&zero_page_mutex);
if (!hw_direct_map) {
mmu_booke_kremove(mmu, va);
mtx_unlock(&zero_page_mutex);
}
}
/*
@ -3015,13 +3028,20 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
sva = copy_page_src_va;
dva = copy_page_dst_va;
mtx_lock(&copy_page_mutex);
mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
if (hw_direct_map) {
sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
} else {
mtx_lock(&copy_page_mutex);
mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
}
memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
mmu_booke_kremove(mmu, dva);
mmu_booke_kremove(mmu, sva);
mtx_unlock(&copy_page_mutex);
if (!hw_direct_map) {
mmu_booke_kremove(mmu, dva);
mmu_booke_kremove(mmu, sva);
mtx_unlock(&copy_page_mutex);
}
}
static inline void
@ -3032,26 +3052,34 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
vm_offset_t a_pg_offset, b_pg_offset;
int cnt;
mtx_lock(&copy_page_mutex);
while (xfersize > 0) {
a_pg_offset = a_offset & PAGE_MASK;
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
mmu_booke_kenter(mmu, copy_page_src_va,
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
a_cp = (char *)copy_page_src_va + a_pg_offset;
b_pg_offset = b_offset & PAGE_MASK;
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
mmu_booke_kenter(mmu, copy_page_dst_va,
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
b_cp = (char *)copy_page_dst_va + b_pg_offset;
bcopy(a_cp, b_cp, cnt);
mmu_booke_kremove(mmu, copy_page_dst_va);
mmu_booke_kremove(mmu, copy_page_src_va);
a_offset += cnt;
b_offset += cnt;
xfersize -= cnt;
if (hw_direct_map) {
a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*ma)) +
a_offset);
b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*mb)) +
b_offset);
bcopy(a_cp, b_cp, xfersize);
} else {
mtx_lock(&copy_page_mutex);
while (xfersize > 0) {
a_pg_offset = a_offset & PAGE_MASK;
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
mmu_booke_kenter(mmu, copy_page_src_va,
VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
a_cp = (char *)copy_page_src_va + a_pg_offset;
b_pg_offset = b_offset & PAGE_MASK;
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
mmu_booke_kenter(mmu, copy_page_dst_va,
VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
b_cp = (char *)copy_page_dst_va + b_pg_offset;
bcopy(a_cp, b_cp, cnt);
mmu_booke_kremove(mmu, copy_page_dst_va);
mmu_booke_kremove(mmu, copy_page_src_va);
a_offset += cnt;
b_offset += cnt;
xfersize -= cnt;
}
mtx_unlock(&copy_page_mutex);
}
mtx_unlock(&copy_page_mutex);
}
static vm_offset_t
@ -3064,6 +3092,9 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
paddr = VM_PAGE_TO_PHYS(m);
if (hw_direct_map)
return (PHYS_TO_DMAP(paddr));
flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
flags |= PTE_PS_4KB;
@ -3097,6 +3128,9 @@ mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
{
pte_t *pte;
if (hw_direct_map)
return;
pte = pte_find(mmu, kernel_pmap, addr);
KASSERT(PCPU_GET(qmap_addr) == addr,
@ -3880,29 +3914,23 @@ tlb1_write_entry_int(void *arg)
mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(args->idx);
mtspr(SPR_MAS0, mas0);
__asm __volatile("isync");
mtspr(SPR_MAS1, args->e->mas1);
__asm __volatile("isync");
mtspr(SPR_MAS2, args->e->mas2);
__asm __volatile("isync");
mtspr(SPR_MAS3, args->e->mas3);
__asm __volatile("isync");
switch ((mfpvr() >> 16) & 0xFFFF) {
case FSL_E500mc:
case FSL_E5500:
case FSL_E6500:
mtspr(SPR_MAS8, 0);
__asm __volatile("isync");
/* FALLTHROUGH */
case FSL_E500v2:
mtspr(SPR_MAS7, args->e->mas7);
__asm __volatile("isync");
break;
default:
break;
}
__asm __volatile("tlbwe; isync; msync");
__asm __volatile("isync; tlbwe; isync; msync");
}
@ -4325,12 +4353,26 @@ tid_flush(tlbtid_t tid)
msr = mfmsr();
__asm __volatile("wrteei 0");
/*
* Newer (e500mc and later) have tlbilx, which doesn't broadcast, so use
* it for PID invalidation.
*/
switch ((mfpvr() >> 16) & 0xffff) {
case FSL_E500mc:
case FSL_E5500:
case FSL_E6500:
mtspr(SPR_MAS6, tid << MAS6_SPID0_SHIFT);
/* tlbilxpid */
__asm __volatile("isync; .long 0x7c000024; isync; msync");
mtmsr(msr);
return;
}
for (way = 0; way < TLB0_WAYS; way++)
for (entry = 0; entry < TLB0_ENTRIES_PER_WAY; entry++) {
mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
mtspr(SPR_MAS0, mas0);
__asm __volatile("isync");
mas2 = entry << MAS2_TLB0_ENTRY_IDX_SHIFT;
mtspr(SPR_MAS2, mas2);
@ -4407,7 +4449,6 @@ DB_SHOW_COMMAND(tlb0, tlb0_print_tlbentries)
mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way);
mtspr(SPR_MAS0, mas0);
__asm __volatile("isync");
mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT;
mtspr(SPR_MAS2, mas2);

View File

@ -115,7 +115,7 @@ struct sysentvec elf32_freebsd_sysvec = {
.sv_fixlimit = NULL,
#endif
.sv_maxssiz = NULL,
.sv_flags = SV_ABI_FREEBSD | SV_ILP32 | SV_SHP,
.sv_flags = SV_ABI_FREEBSD | SV_ILP32 | SV_SHP | SV_ASLR,
.sv_set_syscall_retval = cpu_set_syscall_retval,
.sv_fetch_syscall_args = cpu_fetch_syscall_args,
.sv_shared_page_base = FREEBSD32_SHAREDPAGE,

View File

@ -79,7 +79,7 @@ struct sysentvec elf64_freebsd_sysvec_v1 = {
.sv_setregs = exec_setregs_funcdesc,
.sv_fixlimit = NULL,
.sv_maxssiz = NULL,
.sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP,
.sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP | SV_ASLR,
.sv_set_syscall_retval = cpu_set_syscall_retval,
.sv_fetch_syscall_args = cpu_fetch_syscall_args,
.sv_syscallnames = syscallnames,

View File

@ -474,6 +474,10 @@ set_mcontext(struct thread *td, mcontext_t *mcp)
else
tf->fixreg[2] = tls;
/* Disable FPU */
tf->srr1 &= ~PSL_FP;
pcb->pcb_flags &= ~PCB_FPU;
if (mcp->mc_flags & _MC_FP_VALID) {
/* enable_fpu() will happen lazily on a fault */
pcb->pcb_flags |= PCB_FPREGS;

View File

@ -82,7 +82,7 @@
#define PAGE_SIZE (1 << PAGE_SHIFT) /* Page size */
#define PAGE_MASK (PAGE_SIZE - 1)
#define MAXPAGESIZES 1 /* maximum number of supported page sizes */
#define MAXPAGESIZES 3 /* maximum number of supported page sizes */
#ifndef KSTACK_PAGES
#define KSTACK_PAGES 4 /* pages of kernel stack (with pcb) */

View File

@ -55,7 +55,6 @@ struct pcb {
#define PCB_FP_STARTED 0x1
#define PCB_FP_USERMASK 0x1
uint64_t pcb_sepc; /* Supervisor exception pc */
vm_offset_t pcb_l1addr; /* L1 page tables base address */
vm_offset_t pcb_onfault; /* Copyinout fault handler */
};

View File

@ -45,6 +45,7 @@
#define ALT_STACK_SIZE 128
#define PCPU_MD_FIELDS \
struct pmap *pc_curpmap; /* Currently active pmap */ \
uint32_t pc_pending_ipis; /* IPIs pending to this CPU */ \
char __pad[61]

View File

@ -41,9 +41,12 @@
#ifndef LOCORE
#include <sys/queue.h>
#include <sys/_cpuset.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
#include <vm/_vm_radix.h>
#ifdef _KERNEL
#define vtophys(va) pmap_kextract((vm_offset_t)(va))
@ -78,8 +81,11 @@ struct pmap {
struct mtx pm_mtx;
struct pmap_statistics pm_stats; /* pmap statictics */
pd_entry_t *pm_l1;
u_long pm_satp; /* value for SATP register */
cpuset_t pm_active; /* active on cpus */
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
LIST_ENTRY(pmap) pm_list; /* List of all pmaps */
struct vm_radix pm_root;
};
typedef struct pv_entry {
@ -134,11 +140,16 @@ extern vm_offset_t virtual_end;
#define L1_MAPPABLE_P(va, pa, size) \
((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
struct thread;
void pmap_activate_boot(pmap_t);
void pmap_activate_sw(struct thread *);
void pmap_bootstrap(vm_offset_t, vm_paddr_t, vm_size_t);
void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t);
vm_paddr_t pmap_kextract(vm_offset_t va);
void pmap_kremove(vm_offset_t);
void pmap_kremove_device(vm_offset_t, vm_size_t);
bool pmap_ps_enabled(pmap_t);
void *pmap_mapdev(vm_offset_t, vm_size_t);
void *pmap_mapbios(vm_paddr_t, vm_size_t);

View File

@ -62,7 +62,8 @@ typedef uint64_t pn_t; /* page number */
#define L3_SIZE (1 << L3_SHIFT)
#define L3_OFFSET (L3_SIZE - 1)
#define Ln_ENTRIES (1 << 9)
#define Ln_ENTRIES_SHIFT 9
#define Ln_ENTRIES (1 << Ln_ENTRIES_SHIFT)
#define Ln_ADDR_MASK (Ln_ENTRIES - 1)
/* Bits 9:8 are reserved for software */
@ -79,6 +80,8 @@ typedef uint64_t pn_t; /* page number */
#define PTE_RWX (PTE_R | PTE_W | PTE_X)
#define PTE_RX (PTE_R | PTE_X)
#define PTE_KERN (PTE_V | PTE_R | PTE_W | PTE_A | PTE_D)
#define PTE_PROMOTE (PTE_V | PTE_RWX | PTE_D | PTE_A | PTE_G | PTE_U | \
PTE_SW_MANAGED | PTE_SW_WIRED)
#define PTE_PPN0_S 10
#define PTE_PPN1_S 19

View File

@ -99,10 +99,10 @@
#define VM_NFREEORDER 12
/*
* Disable superpage reservations.
* Enable superpage reservations: 1 level.
*/
#ifndef VM_NRESERVLEVEL
#define VM_NRESERVLEVEL 0
#define VM_NRESERVLEVEL 1
#endif
/*

View File

@ -82,7 +82,7 @@ struct sysentvec elf64_freebsd_sysvec = {
.sv_setregs = exec_setregs,
.sv_fixlimit = NULL,
.sv_maxssiz = NULL,
.sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP,
.sv_flags = SV_ABI_FREEBSD | SV_LP64 | SV_SHP | SV_ASLR,
.sv_set_syscall_retval = cpu_set_syscall_retval,
.sv_fetch_syscall_args = cpu_fetch_syscall_args,
.sv_syscallnames = syscallnames,

View File

@ -63,7 +63,6 @@ ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
ASSYM(PCB_L1ADDR, offsetof(struct pcb, pcb_l1addr));
ASSYM(PCB_SIZE, sizeof(struct pcb));
ASSYM(PCB_RA, offsetof(struct pcb, pcb_ra));
ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp));

Some files were not shown because too many files have changed in this diff Show More