MFhead @ r304038

This commit is contained in:
Enji Cooper 2016-08-13 06:16:38 +00:00
commit 569e901835
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/netbsd-tests-update-12/; revision=304039
165 changed files with 4751 additions and 3443 deletions

View File

@ -209,7 +209,8 @@ SUB_MAKE= `test -x ${MYMAKE} && echo ${MYMAKE} || echo ${MAKE}` \
SUB_MAKE= ${MAKE} -m ${.CURDIR}/share/mk
.endif
_MAKE= PATH=${PATH} ${SUB_MAKE} -f Makefile.inc1 TARGET=${_TARGET} TARGET_ARCH=${_TARGET_ARCH}
_MAKE= PATH=${PATH} MAKE_CMD=${MAKE} ${SUB_MAKE} -f Makefile.inc1 \
TARGET=${_TARGET} TARGET_ARCH=${_TARGET_ARCH}
# Only allow meta mode for the whitelisted targets. See META_TGT_WHITELIST
# above.

View File

@ -1013,7 +1013,7 @@ distributeworld installworld stageworld: _installcheck_world .PHONY
${IMAKEENV} rm -rf ${INSTALLTMP}
.if make(distributeworld)
.for dist in ${EXTRA_DISTRIBUTIONS}
find ${DESTDIR}/${DISTDIR}/${dist} -mindepth 1 -empty -delete
find ${DESTDIR}/${DISTDIR}/${dist} -mindepth 1 -type d -empty -delete
.endfor
.if defined(NO_ROOT)
.for dist in base ${EXTRA_DISTRIBUTIONS}
@ -2378,11 +2378,11 @@ check-old-dirs: .PHONY
done
delete-old: delete-old-files delete-old-dirs .PHONY
@echo "To remove old libraries run '${MAKE} delete-old-libs'."
@echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'."
check-old: check-old-files check-old-libs check-old-dirs .PHONY
@echo "To remove old files and directories run '${MAKE} delete-old'."
@echo "To remove old libraries run '${MAKE} delete-old-libs'."
@echo "To remove old files and directories run '${MAKE_CMD} delete-old'."
@echo "To remove old libraries run '${MAKE_CMD} delete-old-libs'."
.endif

View File

@ -29,7 +29,7 @@
.\" @(#)ps.1 8.3 (Berkeley) 4/18/94
.\" $FreeBSD$
.\"
.Dd July 28, 2016
.Dd August 12, 2016
.Dt PS 1
.Os
.Sh NAME
@ -319,7 +319,6 @@ the include file
.It Dv "P_ADVLOCK" Ta No "0x00001" Ta "Process may hold a POSIX advisory lock"
.It Dv "P_CONTROLT" Ta No "0x00002" Ta "Has a controlling terminal"
.It Dv "P_KPROC" Ta No "0x00004" Ta "Kernel process"
.It Dv "P_FOLLOWFORK" Ta No "0x00008" Ta "Attach debugger to new children"
.It Dv "P_PPWAIT" Ta No "0x00010" Ta "Parent is waiting for child to exec/exit"
.It Dv "P_PROFIL" Ta No "0x00020" Ta "Has started profiling"
.It Dv "P_STOPPROF" Ta No "0x00040" Ta "Has thread in requesting to stop prof"
@ -768,7 +767,8 @@ operating systems.
The
.Nm
command appeared in
.At v4 .
.At v3
in section 8 of the manual.
.Sh BUGS
Since
.Nm

View File

@ -20,4 +20,6 @@ CFILES= \
tst.raise3.c \
TEST_METADATA.t_dtrace_contrib+= required_memory="4g"
.include "../../dtrace.test.mk"

View File

@ -53,4 +53,6 @@ CFILES= \
TEST_METADATA.t_dtrace_contrib+= required_memory="4g"
.include "../../dtrace.test.mk"

View File

@ -34,15 +34,28 @@ genmakefile()
# One-off variable definitions.
local special
if [ "$basedir" = proc ]; then
case "$basedir" in
proc)
special="
LIBADD.tst.sigwait.exe+= rt
"
elif [ "$basedir" = uctf ]; then
;;
raise)
special="
TEST_METADATA.t_dtrace_contrib+= required_memory=\"4g\"
"
;;
safety)
special="
TEST_METADATA.t_dtrace_contrib+= required_memory=\"4g\"
"
;;
uctf)
special="
WITH_CTF=YES
"
fi
;;
esac
local makefile=$(mktemp)
cat <<__EOF__ > $makefile

View File

@ -260,6 +260,7 @@ parse_options()
extract_var $_jv set_hostname_allow allow.set_hostname YN NO
extract_var $_jv sysvipc_allow allow.sysvipc YN NO
extract_var $_jv enforce_statfs enforce_statfs - 2
extract_var $_jv osreldate osreldate
extract_var $_jv osrelease osrelease
for _p in $_parameters; do

View File

@ -484,11 +484,18 @@ pid_t vfork(void) __returns_twice;
#if __BSD_VISIBLE
struct timeval; /* select(2) */
struct crypt_data {
int initialized; /* For compatibility with glibc. */
char __buf[256]; /* Buffer returned by crypt_r(). */
};
int acct(const char *);
int async_daemon(void);
int check_utility_compat(const char *);
const char *
crypt_get_format(void);
char *crypt_r(const char *, const char *, struct crypt_data *);
int crypt_set_format(const char *);
int des_cipher(const char *, char *, long, int);
int des_setkey(const char *key);

View File

@ -29,6 +29,7 @@ SRCS+= __getosreldate.c \
devname.c \
dirfd.c \
dirname.c \
dirname_compat.c \
disklabel.c \
dlfcn.c \
drand48.c \

View File

@ -82,7 +82,6 @@ FBSD_1.0 {
daemon;
devname;
devname_r;
dirname;
getdiskbyname;
dladdr;
dlclose;
@ -418,6 +417,10 @@ FBSD_1.4 {
stravis;
};
FBSD_1.5 {
dirname;
};
FBSDprivate_1.0 {
/* needed by thread libraries */
__thr_jtable;

View File

@ -16,7 +16,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd July 29, 2016
.Dd August 12, 2016
.Dt DIRNAME 3
.Os
.Sh NAME
@ -37,6 +37,7 @@ Any trailing
.Sq \&/
characters are not counted as part of the directory
name.
.Sh RETURN VALUES
If
.Fa path
is a null pointer, the empty string, or contains no
@ -46,40 +47,24 @@ characters,
returns a pointer to the string
.Qq \&. ,
signifying the current directory.
.Sh IMPLEMENTATION NOTES
The
.Fn dirname
function
returns a pointer to internal storage space allocated on the first call
that will be overwritten
by subsequent calls.
.Pp
Other vendor implementations of
.Fn dirname
may store their result in the input buffer,
making it safe to use in multithreaded applications.
Future versions of
.Fx
will follow this approach as well.
.Sh RETURN VALUES
On successful completion,
.Fn dirname
returns a pointer to the parent directory of
Otherwise,
it returns a pointer to the parent directory of
.Fa path .
.Pp
If
.Sh IMPLEMENTATION NOTES
This implementation of
.Fn dirname
fails, a null pointer is returned and the global variable
.Va errno
is set to indicate the error.
.Sh ERRORS
The following error codes may be set in
.Va errno :
.Bl -tag -width Er
.It Bq Er ENAMETOOLONG
The path component to be returned was larger than
.Dv MAXPATHLEN .
.El
uses the buffer provided by the caller to store the resulting parent
directory.
Other vendor implementations may return a pointer to internal storage
space instead.
The advantage of the former approach is that it ensures thread-safety,
while also placing no upper limit on the supported length of the
pathname.
.Pp
The algorithm used by this implementation also discards redundant
slashes and
.Qq \&.
pathname components from the pathname string.
.Sh SEE ALSO
.Xr basename 1 ,
.Xr dirname 1 ,
@ -96,5 +81,10 @@ function first appeared in
.Ox 2.2
and
.Fx 4.2 .
.Pp
In
.Fx 12.0 ,
this function was reimplemented to store its result in the provided
input buffer.
.Sh AUTHORS
.An "Todd C. Miller"
.An Nuxi, the Netherlands

View File

@ -1,77 +1,90 @@
/* $OpenBSD: dirname.c,v 1.13 2005/08/08 08:05:33 espie Exp $ */
/*
* Copyright (c) 1997, 2004 Todd C. Miller <Todd.Miller@courtesan.com>
/*-
* Copyright (c) 2015-2016 Nuxi, https://nuxi.nl/
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <errno.h>
#include <libgen.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <sys/param.h>
char *
dirname(char *path)
{
static char *dname = NULL;
size_t len;
const char *endp;
const char *in, *prev, *begin, *end;
char *out;
size_t prevlen;
bool skipslash;
if (dname == NULL) {
dname = (char *)malloc(MAXPATHLEN);
if (dname == NULL)
return(NULL);
/*
* If path is a null pointer or points to an empty string,
* dirname() shall return a pointer to the string ".".
*/
if (path == NULL || *path == '\0')
return ((char *)".");
/* Retain at least one leading slash character. */
in = out = *path == '/' ? path + 1 : path;
skipslash = true;
prev = ".";
prevlen = 1;
for (;;) {
/* Extract the next pathname component. */
while (*in == '/')
++in;
begin = in;
while (*in != '/' && *in != '\0')
++in;
end = in;
if (begin == end)
break;
/*
* Copy over the previous pathname component, except if
* it's dot. There is no point in retaining those.
*/
if (prevlen != 1 || *prev != '.') {
if (!skipslash)
*out++ = '/';
skipslash = false;
memmove(out, prev, prevlen);
out += prevlen;
}
/* Preserve the pathname component for the next iteration. */
prev = begin;
prevlen = end - begin;
}
/* Empty or NULL string gets treated as "." */
if (path == NULL || *path == '\0') {
dname[0] = '.';
dname[1] = '\0';
return (dname);
}
/* Strip any trailing slashes */
endp = path + strlen(path) - 1;
while (endp > path && *endp == '/')
endp--;
/* Find the start of the dir */
while (endp > path && *endp != '/')
endp--;
/* Either the dir is "/" or there are no slashes */
if (endp == path) {
dname[0] = *endp == '/' ? '/' : '.';
dname[1] = '\0';
return (dname);
} else {
/* Move forward past the separating slashes */
do {
endp--;
} while (endp > path && *endp == '/');
}
len = endp - path + 1;
if (len >= MAXPATHLEN) {
errno = ENAMETOOLONG;
return (NULL);
}
memcpy(dname, path, len);
dname[len] = '\0';
return (dname);
/*
* If path does not contain a '/', then dirname() shall return a
* pointer to the string ".".
*/
if (out == path)
*out++ = '.';
*out = '\0';
return (path);
}

View File

@ -0,0 +1,79 @@
/* $OpenBSD: dirname.c,v 1.13 2005/08/08 08:05:33 espie Exp $ */
/*
* Copyright (c) 1997, 2004 Todd C. Miller <Todd.Miller@courtesan.com>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <errno.h>
#include <libgen.h>
#include <stdlib.h>
#include <string.h>
#include <sys/param.h>
char *
__freebsd11_dirname(char *path)
{
static char *dname = NULL;
size_t len;
const char *endp;
if (dname == NULL) {
dname = (char *)malloc(MAXPATHLEN);
if (dname == NULL)
return(NULL);
}
/* Empty or NULL string gets treated as "." */
if (path == NULL || *path == '\0') {
dname[0] = '.';
dname[1] = '\0';
return (dname);
}
/* Strip any trailing slashes */
endp = path + strlen(path) - 1;
while (endp > path && *endp == '/')
endp--;
/* Find the start of the dir */
while (endp > path && *endp != '/')
endp--;
/* Either the dir is "/" or there are no slashes */
if (endp == path) {
dname[0] = *endp == '/' ? '/' : '.';
dname[1] = '\0';
return (dname);
} else {
/* Move forward past the separating slashes */
do {
endp--;
} while (endp > path && *endp == '/');
}
len = endp - path + 1;
if (len >= MAXPATHLEN) {
errno = ENAMETOOLONG;
return (NULL);
}
memcpy(dname, path, len);
dname[len] = '\0';
return (dname);
}
__sym_compat(dirname, __freebsd11_dirname, FBSD_1.0);

View File

@ -2249,6 +2249,8 @@ _dns_getaddrinfo(void *rv, void *cb_data, va_list ap)
struct res_target q, q2;
res_state res;
ai = NULL;
hostname = va_arg(ap, char *);
pai = va_arg(ap, const struct addrinfo *);
@ -2327,16 +2329,16 @@ _dns_getaddrinfo(void *rv, void *cb_data, va_list ap)
/* prefer IPv6 */
if (q.next) {
ai = getanswer(buf2, q2.n, q2.name, q2.qtype, pai, res);
if (ai) {
if (ai != NULL) {
cur->ai_next = ai;
while (cur && cur->ai_next)
cur = cur->ai_next;
}
}
if (!ai || pai->ai_family != AF_UNSPEC ||
if (ai == NULL || pai->ai_family != AF_UNSPEC ||
(pai->ai_flags & (AI_ALL | AI_V4MAPPED)) != AI_V4MAPPED) {
ai = getanswer(buf, q.n, q.name, q.qtype, pai, res);
if (ai)
if (ai != NULL)
cur->ai_next = ai;
}
free(buf);

View File

@ -291,7 +291,7 @@ do { \
ATF_TC(getaddrinfo_test);
ATF_TC_HEAD(getaddrinfo_test, tc) {
atf_tc_set_md_var(tc, "timeout", "450");
atf_tc_set_md_var(tc, "timeout", "1200");
}
ATF_TC_BODY(getaddrinfo_test, tc)
{
@ -301,7 +301,7 @@ ATF_TC_BODY(getaddrinfo_test, tc)
ATF_TC(gethostby_test);
ATF_TC_HEAD(gethostby_test, tc) {
atf_tc_set_md_var(tc, "timeout", "450");
atf_tc_set_md_var(tc, "timeout", "1200");
}
ATF_TC_BODY(gethostby_test, tc)
{
@ -312,7 +312,7 @@ ATF_TC_BODY(gethostby_test, tc)
ATF_TC(getipnodeby_test);
ATF_TC_HEAD(getipnodeby_test, tc) {
atf_tc_set_md_var(tc, "timeout", "450");
atf_tc_set_md_var(tc, "timeout", "1200");
}
ATF_TC_BODY(getipnodeby_test, tc)
{

View File

@ -17,7 +17,8 @@ SRCS= crypt.c misc.c \
crypt-sha256.c sha256c.c \
crypt-sha512.c sha512c.c
MAN= crypt.3
MLINKS= crypt.3 crypt_get_format.3 crypt.3 crypt_set_format.3
MLINKS= crypt.3 crypt_get_format.3 crypt.3 crypt_r.3 \
crypt.3 crypt_set_format.3
CFLAGS+= -I${.CURDIR}/../libmd -I${.CURDIR}/../libutil \
-I${.CURDIR}/../../sys/crypto/sha2

View File

@ -41,31 +41,27 @@ __FBSDID("$FreeBSD$");
* UNIX password
*/
char *
crypt_md5(const char *pw, const char *salt)
int
crypt_md5(const char *pw, const char *salt, char *buffer)
{
MD5_CTX ctx,ctx1;
unsigned long l;
int sl, pl;
u_int i;
u_char final[MD5_SIZE];
static const char *sp, *ep;
static char passwd[120], *p;
const char *ep;
static const char *magic = "$1$";
/* Refine the Salt first */
sp = salt;
/* If it starts with the magic string, then skip that */
if(!strncmp(sp, magic, strlen(magic)))
sp += strlen(magic);
/* If the salt starts with the magic string, skip that. */
if (!strncmp(salt, magic, strlen(magic)))
salt += strlen(magic);
/* It stops at the first '$', max 8 chars */
for(ep = sp; *ep && *ep != '$' && ep < (sp + 8); ep++)
for (ep = salt; *ep && *ep != '$' && ep < salt + 8; ep++)
continue;
/* get the length of the true salt */
sl = ep - sp;
sl = ep - salt;
MD5Init(&ctx);
@ -76,12 +72,12 @@ crypt_md5(const char *pw, const char *salt)
MD5Update(&ctx, (const u_char *)magic, strlen(magic));
/* Then the raw salt */
MD5Update(&ctx, (const u_char *)sp, (u_int)sl);
MD5Update(&ctx, (const u_char *)salt, (u_int)sl);
/* Then just as many characters of the MD5(pw,salt,pw) */
MD5Init(&ctx1);
MD5Update(&ctx1, (const u_char *)pw, strlen(pw));
MD5Update(&ctx1, (const u_char *)sp, (u_int)sl);
MD5Update(&ctx1, (const u_char *)salt, (u_int)sl);
MD5Update(&ctx1, (const u_char *)pw, strlen(pw));
MD5Final(final, &ctx1);
for(pl = (int)strlen(pw); pl > 0; pl -= MD5_SIZE)
@ -99,9 +95,9 @@ crypt_md5(const char *pw, const char *salt)
MD5Update(&ctx, (const u_char *)pw, 1);
/* Now make the output string */
strcpy(passwd, magic);
strncat(passwd, sp, (u_int)sl);
strcat(passwd, "$");
buffer = stpcpy(buffer, magic);
buffer = stpncpy(buffer, salt, (u_int)sl);
*buffer++ = '$';
MD5Final(final, &ctx);
@ -118,7 +114,7 @@ crypt_md5(const char *pw, const char *salt)
MD5Update(&ctx1, (const u_char *)final, MD5_SIZE);
if(i % 3)
MD5Update(&ctx1, (const u_char *)sp, (u_int)sl);
MD5Update(&ctx1, (const u_char *)salt, (u_int)sl);
if(i % 7)
MD5Update(&ctx1, (const u_char *)pw, strlen(pw));
@ -130,24 +126,22 @@ crypt_md5(const char *pw, const char *salt)
MD5Final(final, &ctx1);
}
p = passwd + strlen(passwd);
l = (final[ 0]<<16) | (final[ 6]<<8) | final[12];
_crypt_to64(p, l, 4); p += 4;
_crypt_to64(buffer, l, 4); buffer += 4;
l = (final[ 1]<<16) | (final[ 7]<<8) | final[13];
_crypt_to64(p, l, 4); p += 4;
_crypt_to64(buffer, l, 4); buffer += 4;
l = (final[ 2]<<16) | (final[ 8]<<8) | final[14];
_crypt_to64(p, l, 4); p += 4;
_crypt_to64(buffer, l, 4); buffer += 4;
l = (final[ 3]<<16) | (final[ 9]<<8) | final[15];
_crypt_to64(p, l, 4); p += 4;
_crypt_to64(buffer, l, 4); buffer += 4;
l = (final[ 4]<<16) | (final[10]<<8) | final[ 5];
_crypt_to64(p, l, 4); p += 4;
_crypt_to64(buffer, l, 4); buffer += 4;
l = final[11];
_crypt_to64(p, l, 2); p += 2;
*p = '\0';
_crypt_to64(buffer, l, 2); buffer += 2;
*buffer = '\0';
/* Don't leave anything around in vm they could use. */
memset(final, 0, sizeof(final));
return (passwd);
return (0);
}

View File

@ -46,16 +46,14 @@ __FBSDID("$FreeBSD$");
*/
/* ARGSUSED */
char *
crypt_nthash(const char *pw, const char *salt __unused)
int
crypt_nthash(const char *pw, const char *salt __unused, char *buffer)
{
size_t unipwLen;
int i, j;
static char hexconvtab[] = "0123456789abcdef";
int i;
static const char hexconvtab[] = "0123456789abcdef";
static const char *magic = "$3$";
static char passwd[120];
u_int16_t unipw[128];
char final[MD4_SIZE*2 + 1];
u_char hash[MD4_SIZE];
const char *s;
MD4_CTX ctx;
@ -70,19 +68,14 @@ crypt_nthash(const char *pw, const char *salt __unused)
MD4Init(&ctx);
MD4Update(&ctx, (u_char *)unipw, unipwLen*sizeof(u_int16_t));
MD4Final(hash, &ctx);
for (i = j = 0; i < MD4_SIZE; i++) {
final[j++] = hexconvtab[hash[i] >> 4];
final[j++] = hexconvtab[hash[i] & 15];
buffer = stpcpy(buffer, magic);
*buffer++ = '$';
for (i = 0; i < MD4_SIZE; i++) {
*buffer++ = hexconvtab[hash[i] >> 4];
*buffer++ = hexconvtab[hash[i] & 15];
}
final[j] = '\0';
*buffer = '\0';
strcpy(passwd, magic);
strcat(passwd, "$");
strncat(passwd, final, MD4_SIZE*2);
/* Don't leave anything around in vm they could use. */
memset(final, 0, sizeof(final));
return (passwd);
return (0);
}

View File

@ -59,11 +59,10 @@ static const char sha256_rounds_prefix[] = "rounds=";
/* Maximum number of rounds. */
#define ROUNDS_MAX 999999999
static char *
crypt_sha256_r(const char *key, const char *salt, char *buffer, int buflen)
int
crypt_sha256(const char *key, const char *salt, char *buffer)
{
u_long srounds;
int n;
uint8_t alt_result[32], temp_result[32];
SHA256_CTX ctx, alt_ctx;
size_t salt_len, key_len, cnt, rounds;
@ -210,42 +209,27 @@ crypt_sha256_r(const char *key, const char *salt, char *buffer, int buflen)
/* Now we can construct the result string. It consists of three
* parts. */
cp = stpncpy(buffer, sha256_salt_prefix, MAX(0, buflen));
buflen -= sizeof(sha256_salt_prefix) - 1;
cp = stpcpy(buffer, sha256_salt_prefix);
if (rounds_custom) {
n = snprintf(cp, MAX(0, buflen), "%s%zu$",
sha256_rounds_prefix, rounds);
if (rounds_custom)
cp += sprintf(cp, "%s%zu$", sha256_rounds_prefix, rounds);
cp += n;
buflen -= n;
}
cp = stpncpy(cp, salt, salt_len);
cp = stpncpy(cp, salt, MIN((size_t)MAX(0, buflen), salt_len));
buflen -= MIN((size_t)MAX(0, buflen), salt_len);
*cp++ = '$';
if (buflen > 0) {
*cp++ = '$';
--buflen;
}
b64_from_24bit(alt_result[0], alt_result[10], alt_result[20], 4, &buflen, &cp);
b64_from_24bit(alt_result[21], alt_result[1], alt_result[11], 4, &buflen, &cp);
b64_from_24bit(alt_result[12], alt_result[22], alt_result[2], 4, &buflen, &cp);
b64_from_24bit(alt_result[3], alt_result[13], alt_result[23], 4, &buflen, &cp);
b64_from_24bit(alt_result[24], alt_result[4], alt_result[14], 4, &buflen, &cp);
b64_from_24bit(alt_result[15], alt_result[25], alt_result[5], 4, &buflen, &cp);
b64_from_24bit(alt_result[6], alt_result[16], alt_result[26], 4, &buflen, &cp);
b64_from_24bit(alt_result[27], alt_result[7], alt_result[17], 4, &buflen, &cp);
b64_from_24bit(alt_result[18], alt_result[28], alt_result[8], 4, &buflen, &cp);
b64_from_24bit(alt_result[9], alt_result[19], alt_result[29], 4, &buflen, &cp);
b64_from_24bit(0, alt_result[31], alt_result[30], 3, &buflen, &cp);
if (buflen <= 0) {
errno = ERANGE;
buffer = NULL;
}
else
*cp = '\0'; /* Terminate the string. */
b64_from_24bit(alt_result[0], alt_result[10], alt_result[20], 4, &cp);
b64_from_24bit(alt_result[21], alt_result[1], alt_result[11], 4, &cp);
b64_from_24bit(alt_result[12], alt_result[22], alt_result[2], 4, &cp);
b64_from_24bit(alt_result[3], alt_result[13], alt_result[23], 4, &cp);
b64_from_24bit(alt_result[24], alt_result[4], alt_result[14], 4, &cp);
b64_from_24bit(alt_result[15], alt_result[25], alt_result[5], 4, &cp);
b64_from_24bit(alt_result[6], alt_result[16], alt_result[26], 4, &cp);
b64_from_24bit(alt_result[27], alt_result[7], alt_result[17], 4, &cp);
b64_from_24bit(alt_result[18], alt_result[28], alt_result[8], 4, &cp);
b64_from_24bit(alt_result[9], alt_result[19], alt_result[29], 4, &cp);
b64_from_24bit(0, alt_result[31], alt_result[30], 3, &cp);
*cp = '\0'; /* Terminate the string. */
/* Clear the buffer for the intermediate result so that people
* attaching to processes or reading core dumps cannot get any
@ -263,37 +247,7 @@ crypt_sha256_r(const char *key, const char *salt, char *buffer, int buflen)
if (copied_salt != NULL)
memset(copied_salt, '\0', salt_len);
return buffer;
}
/* This entry point is equivalent to crypt(3). */
char *
crypt_sha256(const char *key, const char *salt)
{
/* We don't want to have an arbitrary limit in the size of the
* password. We can compute an upper bound for the size of the
* result in advance and so we can prepare the buffer we pass to
* `crypt_sha256_r'. */
static char *buffer;
static int buflen;
int needed;
char *new_buffer;
needed = (sizeof(sha256_salt_prefix) - 1
+ sizeof(sha256_rounds_prefix) + 9 + 1
+ strlen(salt) + 1 + 43 + 1);
if (buflen < needed) {
new_buffer = (char *)realloc(buffer, needed);
if (new_buffer == NULL)
return NULL;
buffer = new_buffer;
buflen = needed;
}
return crypt_sha256_r(key, salt, buffer, buflen);
return (0);
}
#ifdef TEST

View File

@ -59,11 +59,10 @@ static const char sha512_rounds_prefix[] = "rounds=";
/* Maximum number of rounds. */
#define ROUNDS_MAX 999999999
static char *
crypt_sha512_r(const char *key, const char *salt, char *buffer, int buflen)
int
crypt_sha512(const char *key, const char *salt, char *buffer)
{
u_long srounds;
int n;
uint8_t alt_result[64], temp_result[64];
SHA512_CTX ctx, alt_ctx;
size_t salt_len, key_len, cnt, rounds;
@ -210,54 +209,39 @@ crypt_sha512_r(const char *key, const char *salt, char *buffer, int buflen)
/* Now we can construct the result string. It consists of three
* parts. */
cp = stpncpy(buffer, sha512_salt_prefix, MAX(0, buflen));
buflen -= sizeof(sha512_salt_prefix) - 1;
cp = stpcpy(buffer, sha512_salt_prefix);
if (rounds_custom) {
n = snprintf(cp, MAX(0, buflen), "%s%zu$",
sha512_rounds_prefix, rounds);
if (rounds_custom)
cp += sprintf(cp, "%s%zu$", sha512_rounds_prefix, rounds);
cp += n;
buflen -= n;
}
cp = stpncpy(cp, salt, salt_len);
cp = stpncpy(cp, salt, MIN((size_t)MAX(0, buflen), salt_len));
buflen -= MIN((size_t)MAX(0, buflen), salt_len);
*cp++ = '$';
if (buflen > 0) {
*cp++ = '$';
--buflen;
}
b64_from_24bit(alt_result[0], alt_result[21], alt_result[42], 4, &cp);
b64_from_24bit(alt_result[22], alt_result[43], alt_result[1], 4, &cp);
b64_from_24bit(alt_result[44], alt_result[2], alt_result[23], 4, &cp);
b64_from_24bit(alt_result[3], alt_result[24], alt_result[45], 4, &cp);
b64_from_24bit(alt_result[25], alt_result[46], alt_result[4], 4, &cp);
b64_from_24bit(alt_result[47], alt_result[5], alt_result[26], 4, &cp);
b64_from_24bit(alt_result[6], alt_result[27], alt_result[48], 4, &cp);
b64_from_24bit(alt_result[28], alt_result[49], alt_result[7], 4, &cp);
b64_from_24bit(alt_result[50], alt_result[8], alt_result[29], 4, &cp);
b64_from_24bit(alt_result[9], alt_result[30], alt_result[51], 4, &cp);
b64_from_24bit(alt_result[31], alt_result[52], alt_result[10], 4, &cp);
b64_from_24bit(alt_result[53], alt_result[11], alt_result[32], 4, &cp);
b64_from_24bit(alt_result[12], alt_result[33], alt_result[54], 4, &cp);
b64_from_24bit(alt_result[34], alt_result[55], alt_result[13], 4, &cp);
b64_from_24bit(alt_result[56], alt_result[14], alt_result[35], 4, &cp);
b64_from_24bit(alt_result[15], alt_result[36], alt_result[57], 4, &cp);
b64_from_24bit(alt_result[37], alt_result[58], alt_result[16], 4, &cp);
b64_from_24bit(alt_result[59], alt_result[17], alt_result[38], 4, &cp);
b64_from_24bit(alt_result[18], alt_result[39], alt_result[60], 4, &cp);
b64_from_24bit(alt_result[40], alt_result[61], alt_result[19], 4, &cp);
b64_from_24bit(alt_result[62], alt_result[20], alt_result[41], 4, &cp);
b64_from_24bit(0, 0, alt_result[63], 2, &cp);
b64_from_24bit(alt_result[0], alt_result[21], alt_result[42], 4, &buflen, &cp);
b64_from_24bit(alt_result[22], alt_result[43], alt_result[1], 4, &buflen, &cp);
b64_from_24bit(alt_result[44], alt_result[2], alt_result[23], 4, &buflen, &cp);
b64_from_24bit(alt_result[3], alt_result[24], alt_result[45], 4, &buflen, &cp);
b64_from_24bit(alt_result[25], alt_result[46], alt_result[4], 4, &buflen, &cp);
b64_from_24bit(alt_result[47], alt_result[5], alt_result[26], 4, &buflen, &cp);
b64_from_24bit(alt_result[6], alt_result[27], alt_result[48], 4, &buflen, &cp);
b64_from_24bit(alt_result[28], alt_result[49], alt_result[7], 4, &buflen, &cp);
b64_from_24bit(alt_result[50], alt_result[8], alt_result[29], 4, &buflen, &cp);
b64_from_24bit(alt_result[9], alt_result[30], alt_result[51], 4, &buflen, &cp);
b64_from_24bit(alt_result[31], alt_result[52], alt_result[10], 4, &buflen, &cp);
b64_from_24bit(alt_result[53], alt_result[11], alt_result[32], 4, &buflen, &cp);
b64_from_24bit(alt_result[12], alt_result[33], alt_result[54], 4, &buflen, &cp);
b64_from_24bit(alt_result[34], alt_result[55], alt_result[13], 4, &buflen, &cp);
b64_from_24bit(alt_result[56], alt_result[14], alt_result[35], 4, &buflen, &cp);
b64_from_24bit(alt_result[15], alt_result[36], alt_result[57], 4, &buflen, &cp);
b64_from_24bit(alt_result[37], alt_result[58], alt_result[16], 4, &buflen, &cp);
b64_from_24bit(alt_result[59], alt_result[17], alt_result[38], 4, &buflen, &cp);
b64_from_24bit(alt_result[18], alt_result[39], alt_result[60], 4, &buflen, &cp);
b64_from_24bit(alt_result[40], alt_result[61], alt_result[19], 4, &buflen, &cp);
b64_from_24bit(alt_result[62], alt_result[20], alt_result[41], 4, &buflen, &cp);
b64_from_24bit(0, 0, alt_result[63], 2, &buflen, &cp);
if (buflen <= 0) {
errno = ERANGE;
buffer = NULL;
}
else
*cp = '\0'; /* Terminate the string. */
*cp = '\0'; /* Terminate the string. */
/* Clear the buffer for the intermediate result so that people
* attaching to processes or reading core dumps cannot get any
@ -275,37 +259,7 @@ crypt_sha512_r(const char *key, const char *salt, char *buffer, int buflen)
if (copied_salt != NULL)
memset(copied_salt, '\0', salt_len);
return buffer;
}
/* This entry point is equivalent to crypt(3). */
char *
crypt_sha512(const char *key, const char *salt)
{
/* We don't want to have an arbitrary limit in the size of the
* password. We can compute an upper bound for the size of the
* result in advance and so we can prepare the buffer we pass to
* `crypt_sha512_r'. */
static char *buffer;
static int buflen;
int needed;
char *new_buffer;
needed = (sizeof(sha512_salt_prefix) - 1
+ sizeof(sha512_rounds_prefix) + 9 + 1
+ strlen(salt) + 1 + 86 + 1);
if (buflen < needed) {
new_buffer = (char *)realloc(buffer, needed);
if (new_buffer == NULL)
return NULL;
buffer = new_buffer;
buflen = needed;
}
return crypt_sha512_r(key, salt, buffer, buflen);
return (0);
}
#ifdef TEST

View File

@ -29,7 +29,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd March 9, 2014
.Dd August 10, 2016
.Dt CRYPT 3
.Os
.Sh NAME
@ -41,6 +41,8 @@
.In unistd.h
.Ft char *
.Fn crypt "const char *key" "const char *salt"
.Ft char *
.Fn crypt_r "const char *key" "const char *salt" "struct crypt_data *data"
.Ft const char *
.Fn crypt_get_format "void"
.Ft int
@ -246,10 +248,20 @@ The
.Fn crypt_set_format
function sets the default encoding format according to the supplied
.Fa string .
.Pp
The
.Fn crypt_r
function behaves identically to
.Fn crypt ,
except that the resulting string is stored in
.Fa data ,
making it thread-safe.
.Sh RETURN VALUES
The
.Fn crypt
function returns a pointer to the encrypted value on success, and NULL on
and
.Fn crypt_r
functions return a pointer to the encrypted value on success, and NULL on
failure.
Note: this is not a standard behaviour, AT&T
.Fn crypt
@ -280,6 +292,11 @@ section of the code (FreeSec 1.0) was developed outside the United
States of America as an unencumbered replacement for the U.S.-only
.Nx
libcrypt encryption library.
.Pp
The
.Fn crypt_r
function was added in
.Fx 12.0 .
.Sh AUTHORS
.An -nosplit
Originally written by

View File

@ -46,9 +46,9 @@ __FBSDID("$FreeBSD$");
* and it needs to be the default for backward compatibility.
*/
static const struct crypt_format {
const char *const name;
char *(*const func)(const char *, const char *);
const char *const magic;
const char *name;
int (*func)(const char *, const char *, char *);
const char *magic;
} crypt_formats[] = {
{ "md5", crypt_md5, "$1$" },
#ifdef HAS_BLOWFISH
@ -104,20 +104,37 @@ crypt_set_format(const char *format)
* otherwise, the currently selected format is used.
*/
char *
crypt(const char *passwd, const char *salt)
crypt_r(const char *passwd, const char *salt, struct crypt_data *data)
{
const struct crypt_format *cf;
int (*func)(const char *, const char *, char *);
#ifdef HAS_DES
int len;
#endif
for (cf = crypt_formats; cf->name != NULL; ++cf)
if (cf->magic != NULL && strstr(salt, cf->magic) == salt)
return (cf->func(passwd, salt));
if (cf->magic != NULL && strstr(salt, cf->magic) == salt) {
func = cf->func;
goto match;
}
#ifdef HAS_DES
len = strlen(salt);
if ((len == 13 || len == 2) && strspn(salt, DES_SALT_ALPHABET) == len)
return (crypt_des(passwd, salt));
if ((len == 13 || len == 2) && strspn(salt, DES_SALT_ALPHABET) == len) {
func = crypt_des;
goto match;
}
#endif
return (crypt_format->func(passwd, salt));
func = crypt_format->func;
match:
if (func(passwd, salt, data->__buf) != 0)
return (NULL);
return (data->__buf);
}
char *
crypt(const char *passwd, const char *salt)
{
static struct crypt_data data;
return (crypt_r(passwd, salt, &data));
}

View File

@ -32,12 +32,12 @@
#define MD4_SIZE 16
#define MD5_SIZE 16
char *crypt_des(const char *pw, const char *salt);
char *crypt_md5(const char *pw, const char *salt);
char *crypt_nthash(const char *pw, const char *salt);
char *crypt_blowfish(const char *pw, const char *salt);
char *crypt_sha256 (const char *pw, const char *salt);
char *crypt_sha512 (const char *pw, const char *salt);
int crypt_des(const char *pw, const char *salt, char *buf);
int crypt_md5(const char *pw, const char *salt, char *buf);
int crypt_nthash(const char *pw, const char *salt, char *buf);
int crypt_blowfish(const char *pw, const char *salt, char *buf);
int crypt_sha256 (const char *pw, const char *salt, char *buf);
int crypt_sha512 (const char *pw, const char *salt, char *buf);
extern void _crypt_to64(char *s, u_long v, int n);
extern void b64_from_24bit(uint8_t B2, uint8_t B1, uint8_t B0, int n, int *buflen, char **cp);
extern void b64_from_24bit(uint8_t B2, uint8_t B1, uint8_t B0, int n, char **cp);

View File

@ -47,7 +47,7 @@ _crypt_to64(char *s, u_long v, int n)
}
void
b64_from_24bit(uint8_t B2, uint8_t B1, uint8_t B0, int n, int *buflen, char **cp)
b64_from_24bit(uint8_t B2, uint8_t B1, uint8_t B0, int n, char **cp)
{
uint32_t w;
int i;
@ -56,8 +56,6 @@ b64_from_24bit(uint8_t B2, uint8_t B1, uint8_t B0, int n, int *buflen, char **cp
for (i = 0; i < n; i++) {
**cp = itoa64[w&0x3f];
(*cp)++;
if ((*buflen)-- < 0)
break;
w >>= 6;
}
}

View File

@ -9,6 +9,7 @@ SRCS= errno.c ioctl.c syscallnames.c utrace.c
INCS= sysdecode.h
CFLAGS+= -I${.CURDIR}/../../sys
CFLAGS+= -I${.CURDIR}/../../libexec/rtld-elf
MAN+= sysdecode.3 \
sysdecode_abi_to_freebsd_errno.3 \

View File

@ -33,31 +33,21 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <dlfcn.h>
#include <stdio.h>
#include <strings.h>
#include <string.h>
#include <sysdecode.h>
#include "rtld_utrace.h"
#define UTRACE_DLOPEN_START 1
#define UTRACE_DLOPEN_STOP 2
#define UTRACE_DLCLOSE_START 3
#define UTRACE_DLCLOSE_STOP 4
#define UTRACE_LOAD_OBJECT 5
#define UTRACE_UNLOAD_OBJECT 6
#define UTRACE_ADD_RUNDEP 7
#define UTRACE_PRELOAD_FINISHED 8
#define UTRACE_INIT_CALL 9
#define UTRACE_FINI_CALL 10
#define UTRACE_DLSYM_START 11
#define UTRACE_DLSYM_STOP 12
struct utrace_rtld {
char sig[4]; /* 'RTLD' */
#ifdef __LP64__
struct utrace_rtld32 {
char sig[4];
int event;
void *handle;
void *mapbase;
size_t mapsize;
uint32_t handle;
uint32_t mapbase;
uint32_t mapsize;
int refcnt;
char name[MAXPATHLEN];
};
#endif
static int
print_utrace_rtld(FILE *fp, void *p)
@ -145,6 +135,14 @@ struct utrace_malloc {
void *r;
};
#ifdef __LP64__
struct utrace_malloc32 {
uint32_t p;
uint32_t s;
uint32_t r;
};
#endif
static void
print_utrace_malloc(FILE *fp, void *p)
{
@ -163,15 +161,49 @@ print_utrace_malloc(FILE *fp, void *p)
int
sysdecode_utrace(FILE *fp, void *p, size_t len)
{
#ifdef __LP64__
struct utrace_rtld ur;
struct utrace_rtld32 *pr;
struct utrace_malloc um;
struct utrace_malloc32 *pm;
#endif
static const char rtld_utrace_sig[RTLD_UTRACE_SIG_SZ] = RTLD_UTRACE_SIG;
if (len == sizeof(struct utrace_rtld) && bcmp(p, "RTLD", 4) == 0) {
if (len == sizeof(struct utrace_rtld) && bcmp(p, rtld_utrace_sig,
sizeof(rtld_utrace_sig)) == 0)
return (print_utrace_rtld(fp, p));
}
if (len == sizeof(struct utrace_malloc)) {
print_utrace_malloc(fp, p);
return (1);
}
#ifdef __LP64__
if (len == sizeof(struct utrace_rtld32) && bcmp(p, rtld_utrace_sig,
sizeof(rtld_utrace_sig)) == 0) {
pr = p;
memset(&ur, 0, sizeof(ur));
memcpy(ur.sig, pr->sig, sizeof(ur.sig));
ur.event = pr->event;
ur.handle = (void *)(uintptr_t)pr->handle;
ur.mapbase = (void *)(uintptr_t)pr->mapbase;
ur.mapsize = pr->mapsize;
ur.refcnt = pr->refcnt;
memcpy(ur.name, pr->name, sizeof(ur.name));
return (print_utrace_rtld(fp, &ur));
}
if (len == sizeof(struct utrace_malloc32)) {
pm = p;
memset(&um, 0, sizeof(um));
um.p = pm->p == (uint32_t)-1 ? (void *)(intptr_t)-1 :
(void *)(uintptr_t)pm->p;
um.s = pm->s;
um.r = (void *)(uintptr_t)pm->r;
print_utrace_malloc(fp, &um);
return (1);
}
#endif
return (0);
}

View File

@ -59,6 +59,7 @@
#include "paths.h"
#include "rtld_tls.h"
#include "rtld_printf.h"
#include "rtld_utrace.h"
#include "notes.h"
/* Types. */
@ -273,29 +274,6 @@ char *ld_env_prefix = LD_;
(dlp)->num_alloc = obj_count, \
(dlp)->num_used = 0)
#define UTRACE_DLOPEN_START 1
#define UTRACE_DLOPEN_STOP 2
#define UTRACE_DLCLOSE_START 3
#define UTRACE_DLCLOSE_STOP 4
#define UTRACE_LOAD_OBJECT 5
#define UTRACE_UNLOAD_OBJECT 6
#define UTRACE_ADD_RUNDEP 7
#define UTRACE_PRELOAD_FINISHED 8
#define UTRACE_INIT_CALL 9
#define UTRACE_FINI_CALL 10
#define UTRACE_DLSYM_START 11
#define UTRACE_DLSYM_STOP 12
struct utrace_rtld {
char sig[4]; /* 'RTLD' */
int event;
void *handle;
void *mapbase; /* Used for 'parent' and 'init/fini' */
size_t mapsize;
int refcnt; /* Used for 'mode' */
char name[MAXPATHLEN];
};
#define LD_UTRACE(e, h, mb, ms, r, n) do { \
if (ld_utrace != NULL) \
ld_utrace_log(e, h, mb, ms, r, n); \
@ -306,11 +284,9 @@ ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize,
int refcnt, const char *name)
{
struct utrace_rtld ut;
static const char rtld_utrace_sig[RTLD_UTRACE_SIG_SZ] = RTLD_UTRACE_SIG;
ut.sig[0] = 'R';
ut.sig[1] = 'T';
ut.sig[2] = 'L';
ut.sig[3] = 'D';
memcpy(ut.sig, rtld_utrace_sig, sizeof(ut.sig));
ut.event = event;
ut.handle = handle;
ut.mapbase = mapbase;
@ -1916,6 +1892,7 @@ static void
init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
{
Obj_Entry objtmp; /* Temporary rtld object */
const Elf_Ehdr *ehdr;
const Elf_Dyn *dyn_rpath;
const Elf_Dyn *dyn_soname;
const Elf_Dyn *dyn_runpath;
@ -1954,6 +1931,9 @@ init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
relocate_objects(&objtmp, true, &objtmp, 0, NULL);
}
ehdr = (Elf_Ehdr *)mapbase;
objtmp.phdr = (Elf_Phdr *)((char *)mapbase + ehdr->e_phoff);
objtmp.phsize = ehdr->e_phnum * sizeof(objtmp.phdr[0]);
/* Initialize the object list. */
TAILQ_INIT(&obj_list);
@ -2164,8 +2144,7 @@ load_needed_objects(Obj_Entry *first, int flags)
{
Obj_Entry *obj;
obj = first;
TAILQ_FOREACH_FROM(obj, &obj_list, next) {
for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
if (obj->marker)
continue;
if (process_needed(obj, obj->needed, flags) == -1)
@ -2769,9 +2748,8 @@ relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj,
Obj_Entry *obj;
int error;
error = 0;
obj = first;
TAILQ_FOREACH_FROM(obj, &obj_list, next) {
for (error = 0, obj = first; obj != NULL;
obj = TAILQ_NEXT(obj, next)) {
if (obj->marker)
continue;
error = relocate_object(obj, bind_now, rtldobj, flags,
@ -2811,8 +2789,7 @@ resolve_objects_ifunc(Obj_Entry *first, bool bind_now, int flags,
{
Obj_Entry *obj;
obj = first;
TAILQ_FOREACH_FROM(obj, &obj_list, next) {
for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
if (obj->marker)
continue;
if (resolve_object_ifunc(obj, bind_now, flags, lockstate) == -1)
@ -4316,7 +4293,7 @@ trace_loaded_objects(Obj_Entry *obj)
list_containers = getenv(_LD("TRACE_LOADED_OBJECTS_ALL"));
TAILQ_FOREACH_FROM(obj, &obj_list, next) {
for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
Needed_Entry *needed;
char *name, *path;
bool is_lib;
@ -4661,8 +4638,7 @@ allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign)
*/
free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr));
} else {
obj = objs;
TAILQ_FOREACH_FROM(obj, &obj_list, next) {
for (obj = objs; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
if (obj->marker || obj->tlsoffset == 0)
continue;
addr = segbase - obj->tlsoffset;

View File

@ -0,0 +1,62 @@
/*-
* Copyright (c) 2007 John Baldwin
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef RTLD_UTRACE_H
#define RTLD_UTRACE_H
#include <sys/param.h>
#define UTRACE_DLOPEN_START 1
#define UTRACE_DLOPEN_STOP 2
#define UTRACE_DLCLOSE_START 3
#define UTRACE_DLCLOSE_STOP 4
#define UTRACE_LOAD_OBJECT 5
#define UTRACE_UNLOAD_OBJECT 6
#define UTRACE_ADD_RUNDEP 7
#define UTRACE_PRELOAD_FINISHED 8
#define UTRACE_INIT_CALL 9
#define UTRACE_FINI_CALL 10
#define UTRACE_DLSYM_START 11
#define UTRACE_DLSYM_STOP 12
#define RTLD_UTRACE_SIG_SZ 4
#define RTLD_UTRACE_SIG "RTLD"
struct utrace_rtld {
char sig[RTLD_UTRACE_SIG_SZ];
int event;
void *handle;
void *mapbase; /* Used for 'parent' and 'init/fini' */
size_t mapsize;
int refcnt; /* Used for 'mode' */
char name[MAXPATHLEN];
};
#endif

View File

@ -1583,7 +1583,7 @@ show_static_rule(struct cmdline_opts *co, struct format_opts *fo,
break;
case O_NAT:
if (cmd->arg1 != 0)
if (cmd->arg1 != IP_FW_NAT44_GLOBAL)
bprint_uint_arg(bp, "nat ", cmd->arg1);
else
bprintf(bp, "nat global");
@ -3776,7 +3776,7 @@ compile_rule(char *av[], uint32_t *rbuf, int *rbufsize, struct tidx *tstate)
action->len = F_INSN_SIZE(ipfw_insn_nat);
CHECK_ACTLEN;
if (*av != NULL && _substrcmp(*av, "global") == 0) {
action->arg1 = 0;
action->arg1 = IP_FW_NAT44_GLOBAL;
av++;
break;
} else

View File

@ -75,8 +75,6 @@ __FBSDID("$FreeBSD$");
static void encode_base64(u_int8_t *, u_int8_t *, u_int16_t);
static void decode_base64(u_int8_t *, u_int16_t, const u_int8_t *);
static char encrypted[_PASSWORD_LEN];
const static u_int8_t Base64Code[] =
"./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
@ -135,8 +133,8 @@ decode_base64(u_int8_t *buffer, u_int16_t len, const u_int8_t *data)
/* We handle $Vers$log2(NumRounds)$salt+passwd$
i.e. $2$04$iwouldntknowwhattosayetKdJ6iFtacBqJdKe6aW7ou */
char *
crypt_blowfish(const char *key, const char *salt)
int
crypt_blowfish(const char *key, const char *salt, char *buffer)
{
blf_ctx state;
u_int32_t rounds, i, k;
@ -157,10 +155,8 @@ crypt_blowfish(const char *key, const char *salt)
/* Discard "$" identifier */
salt++;
if (*salt > BCRYPT_VERSION) {
/* How do I handle errors ? Return NULL */
return NULL;
}
if (*salt > BCRYPT_VERSION)
return (-1);
/* Check for minor versions */
if (salt[1] != '$') {
@ -174,7 +170,7 @@ crypt_blowfish(const char *key, const char *salt)
salt++;
break;
default:
return NULL;
return (-1);
}
} else
minr = 0;
@ -184,15 +180,15 @@ crypt_blowfish(const char *key, const char *salt)
if (salt[2] != '$')
/* Out of sync with passwd entry */
return NULL;
return (-1);
memcpy(arounds, salt, sizeof(arounds));
if (arounds[sizeof(arounds) - 1] != '$')
return NULL;
return (-1);
arounds[sizeof(arounds) - 1] = 0;
logr = strtonum(arounds, BCRYPT_MINLOGROUNDS, 31, NULL);
if (logr == 0)
return NULL;
return (-1);
/* Computer power doesn't increase linearly, 2^x should be fine */
rounds = 1U << logr;
@ -201,7 +197,7 @@ crypt_blowfish(const char *key, const char *salt)
}
if (strlen(salt) * 3 / 4 < BCRYPT_MAXSALT)
return NULL;
return (-1);
/* We dont want the base64 salt but the raw data */
decode_base64(csalt, BCRYPT_MAXSALT, (const u_int8_t *) salt);
@ -248,23 +244,23 @@ crypt_blowfish(const char *key, const char *salt)
}
i = 0;
encrypted[i++] = '$';
encrypted[i++] = BCRYPT_VERSION;
*buffer++ = '$';
*buffer++ = BCRYPT_VERSION;
if (minr)
encrypted[i++] = minr;
encrypted[i++] = '$';
*buffer++ = minr;
*buffer++ = '$';
snprintf(encrypted + i, 4, "%2.2u$", logr);
snprintf(buffer, 4, "%2.2u$", logr);
buffer += 3;
encode_base64((u_int8_t *) encrypted + i + 3, csalt, BCRYPT_MAXSALT);
encode_base64((u_int8_t *) encrypted + strlen(encrypted), ciphertext,
4 * BCRYPT_BLOCKS - 1);
encode_base64((u_int8_t *)buffer, csalt, BCRYPT_MAXSALT);
buffer += strlen(buffer);
encode_base64((u_int8_t *)buffer, ciphertext, 4 * BCRYPT_BLOCKS - 1);
memset(&state, 0, sizeof(state));
memset(ciphertext, 0, sizeof(ciphertext));
memset(csalt, 0, sizeof(csalt));
memset(cdata, 0, sizeof(cdata));
return encrypted;
return (0);
}
static void

View File

@ -588,13 +588,12 @@ des_cipher(const char *in, char *out, u_long salt, int count)
return(retval);
}
char *
crypt_des(const char *key, const char *setting)
int
crypt_des(const char *key, const char *setting, char *buffer)
{
int i;
u_int32_t count, salt, l, r0, r1, keybuf[2];
u_char *p, *q;
static char output[21];
u_char *q;
if (!des_initialised)
des_init();
@ -610,7 +609,7 @@ crypt_des(const char *key, const char *setting)
key++;
}
if (des_setkey((char *)keybuf))
return(NULL);
return (-1);
if (*setting == _PASSWORD_EFMT1) {
/*
@ -629,7 +628,7 @@ crypt_des(const char *key, const char *setting)
* Encrypt the key with itself.
*/
if (des_cipher((char *)keybuf, (char *)keybuf, 0L, 1))
return(NULL);
return (-1);
/*
* And XOR with the next 8 characters of the key.
*/
@ -638,19 +637,9 @@ crypt_des(const char *key, const char *setting)
*q++ ^= *key++ << 1;
if (des_setkey((char *)keybuf))
return(NULL);
return (-1);
}
strncpy(output, setting, 9);
/*
* Double check that we weren't given a short setting.
* If we were, the above code will probably have created
* wierd values for count and salt, but we don't really care.
* Just make sure the output string doesn't have an extra
* NUL in it.
*/
output[9] = '\0';
p = (u_char *)output + strlen(output);
buffer = stpncpy(buffer, setting, 9);
} else {
/*
* "old"-style:
@ -662,43 +651,41 @@ crypt_des(const char *key, const char *setting)
salt = (ascii_to_bin(setting[1]) << 6)
| ascii_to_bin(setting[0]);
output[0] = setting[0];
*buffer++ = setting[0];
/*
* If the encrypted password that the salt was extracted from
* is only 1 character long, the salt will be corrupted. We
* need to ensure that the output string doesn't have an extra
* NUL in it!
*/
output[1] = setting[1] ? setting[1] : output[0];
p = (u_char *)output + 2;
*buffer++ = setting[1] ? setting[1] : setting[0];
}
setup_salt(salt);
/*
* Do it.
*/
if (do_des(0L, 0L, &r0, &r1, (int)count))
return(NULL);
return (-1);
/*
* Now encode the result...
*/
l = (r0 >> 8);
*p++ = ascii64[(l >> 18) & 0x3f];
*p++ = ascii64[(l >> 12) & 0x3f];
*p++ = ascii64[(l >> 6) & 0x3f];
*p++ = ascii64[l & 0x3f];
*buffer++ = ascii64[(l >> 18) & 0x3f];
*buffer++ = ascii64[(l >> 12) & 0x3f];
*buffer++ = ascii64[(l >> 6) & 0x3f];
*buffer++ = ascii64[l & 0x3f];
l = (r0 << 16) | ((r1 >> 16) & 0xffff);
*p++ = ascii64[(l >> 18) & 0x3f];
*p++ = ascii64[(l >> 12) & 0x3f];
*p++ = ascii64[(l >> 6) & 0x3f];
*p++ = ascii64[l & 0x3f];
*buffer++ = ascii64[(l >> 18) & 0x3f];
*buffer++ = ascii64[(l >> 12) & 0x3f];
*buffer++ = ascii64[(l >> 6) & 0x3f];
*buffer++ = ascii64[l & 0x3f];
l = r1 << 2;
*p++ = ascii64[(l >> 12) & 0x3f];
*p++ = ascii64[(l >> 6) & 0x3f];
*p++ = ascii64[l & 0x3f];
*p = 0;
*buffer++ = ascii64[(l >> 12) & 0x3f];
*buffer++ = ascii64[(l >> 6) & 0x3f];
*buffer++ = ascii64[l & 0x3f];
*buffer = '\0';
return(output);
return (0);
}

View File

@ -312,6 +312,7 @@ theraven [label="David Chisnall\ntheraven@FreeBSD.org\n2011/11/11"]
thompsa [label="Andrew Thompson\nthompsa@FreeBSD.org\n2005/05/25"]
ticso [label="Bernd Walter\nticso@FreeBSD.org\n2002/01/31"]
tijl [label="Tijl Coosemans\ntijl@FreeBSD.org\n2010/07/16"]
tsoome [label="Toomas Soome\ntsoome@FreeBSD.org\n2016/08/10"]
trasz [label="Edward Tomasz Napierala\ntrasz@FreeBSD.org\n2008/08/22"]
trhodes [label="Tom Rhodes\ntrhodes@FreeBSD.org\n2002/05/28"]
trociny [label="Mikolaj Golub\ntrociny@FreeBSD.org\n2011/03/10"]
@ -363,6 +364,8 @@ adrian -> sgalabov
ae -> melifaro
allanjude -> tsoome
alc -> davide
andre -> qingli
@ -520,6 +523,7 @@ imp -> sanpei
imp -> shiba
imp -> takawata
imp -> toshi
imp -> tsoome
imp -> uch
jake -> bms

View File

@ -331,6 +331,7 @@ PROGS_CXX PROG and PROGS_CXX in one Makefile. To define
- DEBUG_FLAGS
- DPADD
- DPSRCS
- INTERNALPROG (no installation)
- LDADD
- LDFLAGS
- LIBADD

View File

@ -24,8 +24,8 @@ PROGS += ${PROGS_CXX}
# just one of many
PROG_OVERRIDE_VARS += BINDIR BINGRP BINOWN BINMODE DPSRCS MAN NO_WERROR \
PROGNAME SRCS STRIP WARNS
PROG_VARS += CFLAGS CXXFLAGS DEBUG_FLAGS DPADD LDADD LIBADD LINKS \
LDFLAGS MLINKS ${PROG_OVERRIDE_VARS}
PROG_VARS += CFLAGS CXXFLAGS DEBUG_FLAGS DPADD INTERNALPROG LDADD LIBADD \
LINKS LDFLAGS MLINKS ${PROG_OVERRIDE_VARS}
.for v in ${PROG_VARS:O:u}
.if empty(${PROG_OVERRIDE_VARS:M$v})
.if defined(${v}.${PROG})

View File

@ -561,9 +561,9 @@ pmap_delayed_invl_wait(vm_page_t m)
* block to complete before proceeding.
*
* The function works by setting the DI generation number for m's PV
* list to at least * the number for the current thread. This forces
* a caller to pmap_delayed_invl_wait() to spin until current thread
* calls pmap_delayed_invl_finished().
* list to at least the DI generation number of the current thread.
* This forces a caller of pmap_delayed_invl_wait() to block until
* current thread calls pmap_delayed_invl_finished().
*/
static void
pmap_delayed_invl_page(vm_page_t m)

View File

@ -443,8 +443,8 @@ trap(struct trapframe *frame)
goto out;
case T_DNA:
KASSERT(!PCB_USER_FPU(td->td_pcb),
("Unregistered use of FPU in kernel"));
if (PCB_USER_FPU(td->td_pcb))
panic("Unregistered use of FPU in kernel");
fpudna();
goto out;

View File

@ -196,7 +196,6 @@ static struct sysentvec cloudabi64_elf_sysvec = {
.sv_pagesize = PAGE_SIZE,
.sv_minuser = VM_MIN_ADDRESS,
.sv_maxuser = VM_MAXUSER_ADDRESS,
.sv_usrstack = USRSTACK,
.sv_stackprot = VM_PROT_READ | VM_PROT_WRITE,
.sv_copyout_strings = cloudabi64_copyout_strings,
.sv_setregs = cloudabi64_proc_setregs,

View File

@ -229,6 +229,13 @@ CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
extern pt_entry_t pagetable_dmap[];
static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
static int superpages_enabled = 1;
SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
"Are large page mappings enabled?");
/*
* Data for the pv entry allocation mechanism
*/
@ -243,6 +250,13 @@ static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
vm_offset_t va);
static int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
vm_offset_t va, struct rwlock **lockp);
static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
@ -422,6 +436,13 @@ pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
return (l3);
}
static inline bool
pmap_superpages_enabled(void)
{
return (superpages_enabled != 0);
}
bool
pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
pd_entry_t **l2, pt_entry_t **l3)
@ -836,6 +857,11 @@ pmap_init(void)
{
int i;
/*
* Are large page mappings enabled?
*/
TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
/*
* Initialize the pv chunk list mutex.
*/
@ -1574,7 +1600,6 @@ pmap_release(pmap_t pmap)
vm_page_free_zero(m);
}
#if 0
static int
kvm_size(SYSCTL_HANDLER_ARGS)
{
@ -1594,7 +1619,6 @@ kvm_free(SYSCTL_HANDLER_ARGS)
}
SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
0, 0, kvm_free, "LU", "Amount of KVM free");
#endif /* 0 */
/*
* grow the number of kernel page table entries, if needed
@ -2002,6 +2026,15 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
l3_paddr = pmap_load(l2);
if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
KASSERT((l3_paddr & ATTR_SW_MANAGED) == 0,
("%s: TODO: Demote managed pages", __func__));
if (pmap_demote_l2_locked(pmap, l2, sva & ~L2_OFFSET,
&lock) == NULL)
continue;
l3_paddr = pmap_load(l2);
}
/*
* Weed out invalid mappings.
*/
@ -2195,6 +2228,99 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pmap_invalidate_all(pmap);
}
/*
* Performs a break-before-make update of a pmap entry. This is needed when
* either promoting or demoting pages to ensure the TLB doesn't get into an
* inconsistent state.
*/
static void
pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
vm_offset_t va)
{
register_t intr;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
/*
* Ensure we don't get switched out with the page table in an
* inconsistent state. We also need to ensure no interrupts fire
* as they may make use of an address we are about to invalidate.
*/
intr = intr_disable();
critical_enter();
/* Clear the old mapping */
pmap_load_clear(pte);
PTE_SYNC(pte);
pmap_invalidate_page(pmap, va);
/* Create the new mapping */
pmap_load_store(pte, newpte);
PTE_SYNC(pte);
critical_exit();
intr_restore(intr);
}
/*
* Tries to promote the 512, contiguous 4KB page mappings that are within a
* single level 2 table entry to a single 2MB page mapping. For promotion
* to occur, two conditions must be met: (1) the 4KB page mappings must map
* aligned, contiguous physical memory and (2) the 4KB page mappings must have
* identical characteristics.
*/
static void
pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
struct rwlock **lockp)
{
pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
register_t intr;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
firstl3 = (pt_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK);
newl2 = pmap_load(firstl3);
/* Ignore managed pages for now */
if ((newl2 & ATTR_SW_MANAGED) != 0)
return;
/* Check the alingment is valid */
if (((newl2 & ~ATTR_MASK) & L2_OFFSET) != 0)
return;
pa = newl2 + L2_SIZE - PAGE_SIZE;
for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
oldl3 = pmap_load(l3);
if (oldl3 != pa)
return;
pa -= PAGE_SIZE;
}
newl2 &= ~ATTR_DESCR_MASK;
newl2 |= L2_BLOCK;
/*
* Ensure we don't get switched out with the page table in an
* inconsistent state. We also need to ensure no interrupts fire
* as they may make use of an address we are about to invalidate.
*/
intr = intr_disable();
critical_enter();
/* Clear the old mapping */
pmap_load_clear(l2);
PTE_SYNC(l2);
pmap_invalidate_range(pmap, rounddown2(va, L2_SIZE),
roundup2(va, L2_SIZE));
/* Create the new mapping */
pmap_load_store(l2, newl2);
PTE_SYNC(l2);
critical_exit();
intr_restore(intr);
}
/*
* Insert the given physical page (p) at
* the specified virtual address (v) in the
@ -2214,7 +2340,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
struct rwlock *lock;
pd_entry_t *pde;
pt_entry_t new_l3, orig_l3;
pt_entry_t *l3;
pt_entry_t *l2, *l3;
pv_entry_t pv;
vm_paddr_t opa, pa, l1_pa, l2_pa, l3_pa;
vm_page_t mpte, om, l1_m, l2_m, l3_m;
@ -2241,6 +2367,20 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
lock = NULL;
PMAP_LOCK(pmap);
pde = pmap_pde(pmap, va, &lvl);
if (pde != NULL && lvl == 1) {
l2 = pmap_l1_to_l2(pde, va);
if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
(l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
if (va < VM_MAXUSER_ADDRESS) {
mpte = PHYS_TO_VM_PAGE(
pmap_load(l2) & ~ATTR_MASK);
mpte->wire_count++;
}
goto havel3;
}
}
if (va < VM_MAXUSER_ADDRESS) {
nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
@ -2322,6 +2462,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
l3 = pmap_l2_to_l3(pde, va);
pmap_invalidate_page(pmap, va);
}
havel3:
om = NULL;
orig_l3 = pmap_load(l3);
@ -2402,7 +2543,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (orig_l3 != 0) {
validate:
orig_l3 = pmap_load_store(l3, new_l3);
PTE_SYNC(l3);
opa = orig_l3 & ~ATTR_MASK;
if (opa != pa) {
@ -2421,12 +2561,24 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
} else {
pmap_load_store(l3, new_l3);
PTE_SYNC(l3);
}
PTE_SYNC(l3);
pmap_invalidate_page(pmap, va);
if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
cpu_icache_sync_range(va, PAGE_SIZE);
/* XXX: Not yet, not all demotions are handled */
#if 0
if ((mpte == NULL || mpte->wire_count == NL3PG) &&
pmap_superpages_enabled() && (m->flags & PG_FICTITIOUS) == 0 &&
vm_reserv_level_iffullpop(m) == 0) {
KASSERT(lvl == 2, ("Invalid pde level %d", lvl));
pmap_promote_l2(pmap, pde, va, &lock);
}
#endif
if (lock != NULL)
rw_wunlock(lock);
PMAP_UNLOCK(pmap);
@ -3342,14 +3494,271 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
m->md.pv_memattr = ma;
/*
* ARM64TODO: Implement the below (from the amd64 pmap)
* If "m" is a normal page, update its direct mapping. This update
* can be relied upon to perform any cache operations that are
* required for data coherence.
*/
if ((m->flags & PG_FICTITIOUS) == 0 &&
PHYS_IN_DMAP(VM_PAGE_TO_PHYS(m)))
panic("ARM64TODO: pmap_page_set_memattr");
pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
m->md.pv_memattr) != 0)
panic("memory attribute change on the direct map failed");
}
/*
* Changes the specified virtual address range's memory type to that given by
* the parameter "mode". The specified virtual address range must be
* completely contained within either the direct map or the kernel map. If
* the virtual address range is contained within the kernel map, then the
* memory type for each of the corresponding ranges of the direct map is also
* changed. (The corresponding ranges of the direct map are those ranges that
* map the same physical pages as the specified virtual address range.) These
* changes to the direct map are necessary because Intel describes the
* behavior of their processors as "undefined" if two or more mappings to the
* same physical page have different memory types.
*
* Returns zero if the change completed successfully, and either EINVAL or
* ENOMEM if the change failed. Specifically, EINVAL is returned if some part
* of the virtual address range was not mapped, and ENOMEM is returned if
* there was insufficient memory available to complete the change. In the
* latter case, the memory type may have been changed on some part of the
* virtual address range or the direct map.
*/
static int
pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
{
int error;
PMAP_LOCK(kernel_pmap);
error = pmap_change_attr_locked(va, size, mode);
PMAP_UNLOCK(kernel_pmap);
return (error);
}
static int
pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
{
vm_offset_t base, offset, tmpva;
pt_entry_t l3, *pte, *newpte;
int lvl;
PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
if (!VIRT_IN_DMAP(base))
return (EINVAL);
for (tmpva = base; tmpva < base + size; ) {
pte = pmap_pte(kernel_pmap, va, &lvl);
if (pte == NULL)
return (EINVAL);
if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) {
/*
* We already have the correct attribute,
* ignore this entry.
*/
switch (lvl) {
default:
panic("Invalid DMAP table level: %d\n", lvl);
case 1:
tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
break;
case 2:
tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
break;
case 3:
tmpva += PAGE_SIZE;
break;
}
} else {
/*
* Split the entry to an level 3 table, then
* set the new attribute.
*/
switch (lvl) {
default:
panic("Invalid DMAP table level: %d\n", lvl);
case 1:
newpte = pmap_demote_l1(kernel_pmap, pte,
tmpva & ~L1_OFFSET);
if (newpte == NULL)
return (EINVAL);
pte = pmap_l1_to_l2(pte, tmpva);
case 2:
newpte = pmap_demote_l2(kernel_pmap, pte,
tmpva & ~L2_OFFSET);
if (newpte == NULL)
return (EINVAL);
pte = pmap_l2_to_l3(pte, tmpva);
case 3:
/* Update the entry */
l3 = pmap_load(pte);
l3 &= ~ATTR_IDX_MASK;
l3 |= ATTR_IDX(mode);
pmap_update_entry(kernel_pmap, pte, l3, tmpva);
/*
* If moving to a non-cacheable entry flush
* the cache.
*/
if (mode == VM_MEMATTR_UNCACHEABLE)
cpu_dcache_wbinv_range(tmpva, L3_SIZE);
break;
}
tmpva += PAGE_SIZE;
}
}
return (0);
}
/*
* Create an L2 table to map all addresses within an L1 mapping.
*/
static pt_entry_t *
pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
{
pt_entry_t *l2, newl2, oldl1;
vm_offset_t tmpl1;
vm_paddr_t l2phys, phys;
vm_page_t ml2;
int i;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldl1 = pmap_load(l1);
KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
("pmap_demote_l1: Demoting a non-block entry"));
KASSERT((va & L1_OFFSET) == 0,
("pmap_demote_l1: Invalid virtual address %#lx", va));
tmpl1 = 0;
if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
tmpl1 = kva_alloc(PAGE_SIZE);
if (tmpl1 == 0)
return (NULL);
}
if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
" in pmap %p", va, pmap);
return (NULL);
}
l2phys = VM_PAGE_TO_PHYS(ml2);
l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
/* Address the range points at */
phys = oldl1 & ~ATTR_MASK;
/* The attributed from the old l1 table to be copied */
newl2 = oldl1 & ATTR_MASK;
/* Create the new entries */
for (i = 0; i < Ln_ENTRIES; i++) {
l2[i] = newl2 | phys;
phys += L2_SIZE;
}
cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE);
if (tmpl1 != 0) {
pmap_kenter(tmpl1, PAGE_SIZE,
DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, CACHED_MEMORY);
l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
}
pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va);
if (tmpl1 != 0) {
pmap_kremove(tmpl1);
kva_free(tmpl1, PAGE_SIZE);
}
return (l2);
}
/*
* Create an L3 table to map all addresses within an L2 mapping.
*/
static pt_entry_t *
pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
struct rwlock **lockp)
{
pt_entry_t *l3, newl3, oldl2;
vm_offset_t tmpl2;
vm_paddr_t l3phys, phys;
vm_page_t ml3;
int i;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldl2 = pmap_load(l2);
KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
("pmap_demote_l2: Demoting a non-block entry"));
KASSERT((va & L2_OFFSET) == 0,
("pmap_demote_l2: Invalid virtual address %#lx", va));
KASSERT((oldl2 & ATTR_SW_MANAGED) == 0,
("pmap_demote_l2: TODO: Demote managed pages"));
tmpl2 = 0;
if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
tmpl2 = kva_alloc(PAGE_SIZE);
if (tmpl2 == 0)
return (NULL);
}
if ((ml3 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
" in pmap %p", va, pmap);
return (NULL);
}
l3phys = VM_PAGE_TO_PHYS(ml3);
l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
/* Address the range points at */
phys = oldl2 & ~ATTR_MASK;
/* The attributed from the old l2 table to be copied */
newl3 = (oldl2 & (ATTR_MASK & ~ATTR_DESCR_MASK)) | L3_PAGE;
/* Create the new entries */
for (i = 0; i < Ln_ENTRIES; i++) {
l3[i] = newl3 | phys;
phys += L3_SIZE;
}
cpu_dcache_wb_range((vm_offset_t)l3, PAGE_SIZE);
if (tmpl2 != 0) {
pmap_kenter(tmpl2, PAGE_SIZE,
DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, CACHED_MEMORY);
l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
}
pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va);
if (tmpl2 != 0) {
pmap_kremove(tmpl2);
kva_free(tmpl2, PAGE_SIZE);
}
return (l3);
}
static pt_entry_t *
pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
{
struct rwlock *lock;
pt_entry_t *l3;
lock = NULL;
l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
if (lock != NULL)
rw_wunlock(lock);
return (l3);
}
/*
@ -3482,6 +3891,53 @@ pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
}
}
int
pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
{
#ifdef SMP
uint64_t par;
#endif
switch (ESR_ELx_EXCEPTION(esr)) {
case EXCP_DATA_ABORT_L:
case EXCP_DATA_ABORT:
break;
default:
return (KERN_FAILURE);
}
#ifdef SMP
PMAP_LOCK(pmap);
switch (esr & ISS_DATA_DFSC_MASK) {
case ISS_DATA_DFSC_TF_L0:
case ISS_DATA_DFSC_TF_L1:
case ISS_DATA_DFSC_TF_L2:
case ISS_DATA_DFSC_TF_L3:
/* Ask the MMU to check the address */
if (pmap == kernel_pmap)
par = arm64_address_translate_s1e1r(far);
else
par = arm64_address_translate_s1e0r(far);
/*
* If the translation was successful the address was invalid
* due to a break-before-make sequence. We can unlock and
* return success to the trap handler.
*/
if (PAR_SUCCESS(par)) {
PMAP_UNLOCK(pmap);
return (KERN_SUCCESS);
}
break;
default:
break;
}
PMAP_UNLOCK(pmap);
#endif
return (KERN_FAILURE);
}
/*
* Increase the starting virtual address of the given mapping if a
* different alignment might result in more superpage mappings.
@ -3490,6 +3946,20 @@ void
pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
vm_offset_t *addr, vm_size_t size)
{
vm_offset_t superpage_offset;
if (size < L2_SIZE)
return;
if (object != NULL && (object->flags & OBJ_COLORED) != 0)
offset += ptoa(object->pg_color);
superpage_offset = offset & L2_OFFSET;
if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
(*addr & L2_OFFSET) == superpage_offset)
return;
if ((*addr & L2_OFFSET) < superpage_offset)
*addr = (*addr & ~L2_OFFSET) + superpage_offset;
else
*addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
}
/**

View File

@ -179,16 +179,6 @@ data_abort(struct trapframe *frame, uint64_t esr, uint64_t far, int lower)
return;
}
KASSERT(td->td_md.md_spinlock_count == 0,
("data abort with spinlock held"));
if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK |
WARN_GIANTOK, NULL, "Kernel page fault") != 0) {
print_registers(frame);
printf(" far: %16lx\n", far);
printf(" esr: %.8lx\n", esr);
panic("data abort in critical section or under mutex");
}
p = td->td_proc;
if (lower)
map = &p->p_vmspace->vm_map;
@ -200,6 +190,19 @@ data_abort(struct trapframe *frame, uint64_t esr, uint64_t far, int lower)
map = &p->p_vmspace->vm_map;
}
if (pmap_fault(map->pmap, esr, far) == KERN_SUCCESS)
return;
KASSERT(td->td_md.md_spinlock_count == 0,
("data abort with spinlock held"));
if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK |
WARN_GIANTOK, NULL, "Kernel page fault") != 0) {
print_registers(frame);
printf(" far: %16lx\n", far);
printf(" esr: %.8lx\n", esr);
panic("data abort in critical section or under mutex");
}
va = trunc_page(far);
ftype = ((esr >> 6) & 1) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;

View File

@ -201,6 +201,8 @@ cpu_set_user_tls(struct thread *td, void *tls_base)
pcb = td->td_pcb;
pcb->pcb_tpidr_el0 = (register_t)tls_base;
if (td == curthread)
WRITE_SPECIALREG(tpidr_el0, tls_base);
return (0);
}

View File

@ -165,7 +165,6 @@ static struct sysentvec cloudabi64_elf_sysvec = {
.sv_pagesize = PAGE_SIZE,
.sv_minuser = VM_MIN_ADDRESS,
.sv_maxuser = VM_MAXUSER_ADDRESS,
.sv_usrstack = USRSTACK,
.sv_stackprot = VM_PROT_READ | VM_PROT_WRITE,
.sv_copyout_strings = cloudabi64_copyout_strings,
.sv_setregs = cloudabi64_proc_setregs,

View File

@ -151,6 +151,8 @@ void pmap_unmap_io_transient(vm_page_t *, vm_offset_t *, int, boolean_t);
bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
pd_entry_t **, pt_entry_t **);
int pmap_fault(pmap_t, uint64_t, uint64_t);
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#endif /* _KERNEL */

View File

@ -63,6 +63,18 @@ CFLAGS+= -DNO_PCI -DEFI
LIBSTAND= ${.OBJDIR}/../../../../lib/libstand/libstand.a
.endif
.if !defined(BOOT_HIDE_SERIAL_NUMBERS)
# Export serial numbers, UUID, and asset tag from loader.
CFLAGS+= -DSMBIOS_SERIAL_NUMBERS
.if defined(BOOT_LITTLE_ENDIAN_UUID)
# Use little-endian UUID format as defined in SMBIOS 2.6.
CFLAGS+= -DSMBIOS_LITTLE_ENDIAN_UUID
.elif defined(BOOT_NETWORK_ENDIAN_UUID)
# Use network-endian UUID format for backward compatibility.
CFLAGS+= -DSMBIOS_NETWORK_ENDIAN_UUID
.endif
.endif
.if ${MK_FORTH} != "no"
BOOT_FORTH= yes
CFLAGS+= -DBOOT_FORTH

View File

@ -72,15 +72,11 @@
clock-frequency = < 400000000 >;
};
htif0: htif@0 {
compatible = "riscv,htif";
interrupts = < 0 >;
console0: console@0 {
compatible = "riscv,console";
status = "okay";
interrupts = < 1 >;
interrupt-parent = < &pic0 >;
console0: console@0 {
compatible = "htif,console";
status = "okay";
};
};
};

View File

@ -83,15 +83,11 @@
clock-frequency = < 1000000 >;
};
htif0: htif@0 {
compatible = "riscv,htif";
interrupts = < 0 >;
console0: console@0 {
compatible = "riscv,console";
status = "okay";
interrupts = < 1 >;
interrupt-parent = < &pic0 >;
console0: console@0 {
compatible = "htif,console";
status = "okay";
};
};
};

View File

@ -65,6 +65,10 @@
};
memory {
/*
* This is not used currently.
* We take information from sbi_query_memory.
*/
device_type = "memory";
reg = <0x80000000 0x40000000>; /* 1GB at 0x80000000 */
};
@ -90,15 +94,11 @@
clock-frequency = < 1000000 >;
};
htif0: htif@0 {
compatible = "riscv,htif";
console0: console@0 {
compatible = "riscv,console";
status = "okay";
interrupts = < 1 >;
interrupt-parent = < &pic0 >;
console0: console@0 {
compatible = "htif,console";
status = "okay";
};
};
};

View File

@ -382,12 +382,12 @@ e_fmt: .asciz "Error: Client format not supported\n"
#ifdef BTXLDR_VERBOSE
m_mem: .asciz "Starting in protected mode (base mem=\0)\n"
m_esp: .asciz "Arguments passed (esp=\0):\n"
m_args: .asciz"<howto="
.asciz" bootdev="
.asciz" junk="
.asciz" "
.asciz" "
.asciz" bootinfo=\0>\n"
m_args: .asciz "<howto="
.asciz " bootdev="
.asciz " junk="
.asciz " "
.asciz " "
.asciz " bootinfo=\0>\n"
m_rel_bi: .asciz "Relocated bootinfo (size=48) to \0\n"
m_rel_args: .asciz "Relocated arguments (size=18) to \0\n"
m_rel_btx: .asciz "Relocated kernel (size=\0) to \0\n"

View File

@ -238,6 +238,10 @@ smbios_parse_table(const caddr_t addr)
smbios_setenv("smbios.system.serial", addr, 0x07);
smbios_setuuid("smbios.system.uuid", addr + 0x08, smbios.ver);
#endif
if (smbios.major >= 2 && smbios.minor >= 4) {
smbios_setenv("smbios.system.sku", addr, 0x19);
smbios_setenv("smbios.system.family", addr, 0x1a);
}
break;
case 2: /* 3.3.3 Base Board (or Module) Information (Type 2) */
@ -246,7 +250,9 @@ smbios_parse_table(const caddr_t addr)
smbios_setenv("smbios.planar.version", addr, 0x06);
#ifdef SMBIOS_SERIAL_NUMBERS
smbios_setenv("smbios.planar.serial", addr, 0x07);
smbios_setenv("smbios.planar.tag", addr, 0x08);
#endif
smbios_setenv("smbios.planar.location", addr, 0x0a);
break;
case 3: /* 3.3.4 System Enclosure or Chassis (Type 3) */

View File

@ -814,6 +814,14 @@ static struct da_quirk_entry da_quirk_table[] =
{T_DIRECT, SIP_MEDIA_REMOVABLE, "JetFlash", "Transcend*",
"*"}, /*quirks*/ DA_Q_NO_RC16
},
{
/*
* I-O Data USB Flash Disk
* PR: usb/211716
*/
{T_DIRECT, SIP_MEDIA_REMOVABLE, "I-O DATA", "USB Flash Disk*",
"*"}, /*quirks*/ DA_Q_NO_RC16
},
/* ATA/SATA devices over SAS/USB/... */
{
/* Hitachi Advanced Format (4k) drives */

View File

@ -33,6 +33,7 @@
#include <contrib/cloudabi/cloudabi_types_common.h>
struct file;
struct sysentvec;
struct thread;
struct timespec;
@ -76,4 +77,8 @@ int cloudabi_futex_lock_wrlock(struct thread *, cloudabi_lock_t *,
cloudabi_scope_t, cloudabi_clockid_t, cloudabi_timestamp_t,
cloudabi_timestamp_t);
/* vDSO setup and teardown. */
void cloudabi_vdso_init(struct sysentvec *, char *, char *);
void cloudabi_vdso_destroy(struct sysentvec *);
#endif

View File

@ -0,0 +1,88 @@
/*-
* Copyright (c) 2016 Nuxi, https://nuxi.nl/
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/lock.h>
#include <sys/sysent.h>
#include <sys/rwlock.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <compat/cloudabi/cloudabi_util.h>
void
cloudabi_vdso_init(struct sysentvec *sv, char *begin, char *end)
{
vm_page_t m;
vm_object_t obj;
vm_offset_t addr;
size_t i, pages, pages_length, vdso_length;
/* Determine the number of pages needed to store the vDSO. */
vdso_length = end - begin;
pages = howmany(vdso_length, PAGE_SIZE);
pages_length = pages * PAGE_SIZE;
/* Allocate a VM object and fill it with the vDSO. */
obj = vm_pager_allocate(OBJT_PHYS, 0, pages_length,
VM_PROT_DEFAULT, 0, NULL);
addr = kva_alloc(PAGE_SIZE);
for (i = 0; i < pages; ++i) {
VM_OBJECT_WLOCK(obj);
m = vm_page_grab(obj, i, VM_ALLOC_NOBUSY | VM_ALLOC_ZERO);
m->valid = VM_PAGE_BITS_ALL;
VM_OBJECT_WUNLOCK(obj);
pmap_qenter(addr, &m, 1);
memcpy((void *)addr, begin + i * PAGE_SIZE,
MIN(vdso_length - i * PAGE_SIZE, PAGE_SIZE));
pmap_qremove(addr, 1);
}
kva_free(addr, PAGE_SIZE);
/*
* Place the vDSO at the top of the address space. The user
* stack can start right below it.
*/
sv->sv_shared_page_base = sv->sv_maxuser - pages_length;
sv->sv_shared_page_len = pages_length;
sv->sv_shared_page_obj = obj;
sv->sv_usrstack = sv->sv_shared_page_base;
}
void
cloudabi_vdso_destroy(struct sysentvec *sv)
{
vm_object_deallocate(sv->sv_shared_page_obj);
}

View File

@ -38,8 +38,13 @@ __FBSDID("$FreeBSD$");
#include <contrib/cloudabi/cloudabi64_types.h>
#include <compat/cloudabi/cloudabi_util.h>
#include <compat/cloudabi64/cloudabi64_util.h>
extern char _binary_cloudabi64_vdso_o_start[];
extern char _binary_cloudabi64_vdso_o_end[];
register_t *
cloudabi64_copyout_strings(struct image_params *imgp)
{
@ -107,6 +112,8 @@ cloudabi64_fixup(register_t **stack_base, struct image_params *imgp)
PTR(CLOUDABI_AT_PHDR, args->phdr),
VAL(CLOUDABI_AT_PHNUM, args->phnum),
VAL(CLOUDABI_AT_TID, td->td_tid),
PTR(CLOUDABI_AT_SYSINFO_EHDR,
imgp->proc->p_sysent->sv_shared_page_base),
#undef VAL
#undef PTR
{ .a_type = CLOUDABI_AT_NULL },
@ -127,6 +134,9 @@ cloudabi64_modevent(module_t mod, int type, void *data)
switch (type) {
case MOD_LOAD:
cloudabi_vdso_init(cloudabi64_brand.sysvec,
_binary_cloudabi64_vdso_o_start,
_binary_cloudabi64_vdso_o_end);
if (elf64_insert_brand_entry(&cloudabi64_brand) < 0) {
printf("Failed to add CloudABI ELF brand handler\n");
return (EINVAL);
@ -139,6 +149,7 @@ cloudabi64_modevent(module_t mod, int type, void *data)
printf("Failed to remove CloudABI ELF brand handler\n");
return (EINVAL);
}
cloudabi_vdso_destroy(cloudabi64_brand.sysvec);
return (0);
default:
return (EOPNOTSUPP);

View File

@ -0,0 +1,51 @@
/*
* Linker script for 64-bit vDSO for CloudABI.
* Based on sys/amd64/linux/linux_vdso.lds.s
*
* $FreeBSD$
*/
SECTIONS
{
. = . + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : { *(.rodata*) } :text
.data : {
*(.data*)
*(.sdata*)
*(.got.plt) *(.got)
*(.gnu.linkonce.d.*)
*(.bss*)
*(.dynbss*)
*(.gnu.linkonce.b.*)
}
.altinstructions : { *(.altinstructions) }
.altinstr_replacement : { *(.altinstr_replacement) }
. = ALIGN(0x100);
.text : { *(.test .text*) } :text =0x90909090
}
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}

View File

@ -284,6 +284,7 @@ compat/cloudabi/cloudabi_proc.c optional compat_cloudabi64
compat/cloudabi/cloudabi_random.c optional compat_cloudabi64
compat/cloudabi/cloudabi_sock.c optional compat_cloudabi64
compat/cloudabi/cloudabi_thread.c optional compat_cloudabi64
compat/cloudabi/cloudabi_vdso.c optional compat_cloudabi64
compat/cloudabi64/cloudabi64_fd.c optional compat_cloudabi64
compat/cloudabi64/cloudabi64_module.c optional compat_cloudabi64
compat/cloudabi64/cloudabi64_poll.c optional compat_cloudabi64
@ -3348,6 +3349,7 @@ kern/subr_disk.c standard
kern/subr_eventhandler.c standard
kern/subr_fattime.c standard
kern/subr_firmware.c optional firmware
kern/subr_gtaskqueue.c standard
kern/subr_hash.c standard
kern/subr_hints.c standard
kern/subr_kdb.c standard

View File

@ -8,6 +8,18 @@
# dependency lines other than the first are silently ignored.
#
#
cloudabi64_vdso.o optional compat_cloudabi64 \
dependency "$S/contrib/cloudabi/cloudabi_vdso_x86_64.c" \
compile-with "${CC} -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi64/cloudabi64_vdso.lds.s -D_KERNEL -I. -I$S -I$S/contrib/cloudabi -O2 -fomit-frame-pointer $S/contrib/cloudabi/cloudabi_vdso_x86_64.c -o ${.TARGET}" \
no-obj no-implicit-rule \
clean "cloudabi64_vdso.o"
#
cloudabi64_vdso_blob.o optional compat_cloudabi64 \
dependency "cloudabi64_vdso.o" \
compile-with "${OBJCOPY} --input-target binary --output-target elf64-x86-64-freebsd --binary-architecture i386 cloudabi64_vdso.o ${.TARGET}" \
no-implicit-rule \
clean "cloudabi64_vdso_blob.o"
#
linux32_genassym.o optional compat_linux32 \
dependency "$S/amd64/linux32/linux32_genassym.c" \
compile-with "${CC} ${CFLAGS:N-fno-common} -c ${.IMPSRC}" \
@ -282,6 +294,7 @@ dev/hyperv/vmbus/vmbus_br.c optional hyperv
dev/hyperv/vmbus/vmbus_chan.c optional hyperv
dev/hyperv/vmbus/vmbus_et.c optional hyperv
dev/hyperv/vmbus/vmbus_if.m optional hyperv
dev/hyperv/vmbus/vmbus_xact.c optional hyperv
dev/hyperv/vmbus/amd64/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/amd64/vmbus_vector.S optional hyperv
dev/nfe/if_nfe.c optional nfe pci

View File

@ -1,4 +1,16 @@
# $FreeBSD$
cloudabi64_vdso.o optional compat_cloudabi64 \
dependency "$S/contrib/cloudabi/cloudabi_vdso_aarch64.c" \
compile-with "${CC} -shared -nostdinc -nostdlib -Wl,-T$S/compat/cloudabi64/cloudabi64_vdso.lds.s -D_KERNEL -I. -I$S -I$S/contrib/cloudabi -O2 -fomit-frame-pointer $S/contrib/cloudabi/cloudabi_vdso_aarch64.c -o ${.TARGET}" \
no-obj no-implicit-rule \
clean "cloudabi64_vdso.o"
#
cloudabi64_vdso_blob.o optional compat_cloudabi64 \
dependency "cloudabi64_vdso.o" \
compile-with "${OBJCOPY} --input-target binary --output-target elf64-littleaarch64 --binary-architecture aarch64 cloudabi64_vdso.o ${.TARGET}" \
no-implicit-rule \
clean "cloudabi64_vdso_blob.o"
#
arm/arm/generic_timer.c standard
arm/arm/gic.c standard
arm/arm/gic_fdt.c optional fdt

View File

@ -253,6 +253,7 @@ dev/hyperv/vmbus/vmbus_br.c optional hyperv
dev/hyperv/vmbus/vmbus_chan.c optional hyperv
dev/hyperv/vmbus/vmbus_et.c optional hyperv
dev/hyperv/vmbus/vmbus_if.m optional hyperv
dev/hyperv/vmbus/vmbus_xact.c optional hyperv
dev/hyperv/vmbus/i386/hyperv_machdep.c optional hyperv
dev/hyperv/vmbus/i386/vmbus_vector.S optional hyperv
dev/ichwd/ichwd.c optional ichwd

View File

@ -19,9 +19,6 @@ libkern/flsl.c standard
libkern/flsll.c standard
libkern/memmove.c standard
libkern/memset.c standard
riscv/htif/htif.c optional htif
riscv/htif/htif_block.c optional htif
riscv/htif/htif_console.c optional htif
riscv/riscv/autoconf.c standard
riscv/riscv/bcopy.c standard
riscv/riscv/bus_machdep.c standard
@ -36,6 +33,7 @@ riscv/riscv/db_interface.c optional ddb
riscv/riscv/db_trace.c optional ddb
riscv/riscv/dump_machdep.c standard
riscv/riscv/elf_machdep.c standard
riscv/riscv/exception.S standard
riscv/riscv/intr_machdep.c standard
riscv/riscv/in_cksum.c optional inet | inet6
riscv/riscv/identcpu.c standard
@ -47,6 +45,8 @@ riscv/riscv/mem.c standard
riscv/riscv/nexus.c standard
riscv/riscv/ofw_machdep.c optional fdt
riscv/riscv/pmap.c standard
riscv/riscv/riscv_console.c optional rcons
riscv/riscv/sbi.S standard
riscv/riscv/stack_machdep.c optional ddb | stack
riscv/riscv/support.S standard
riscv/riscv/swtch.S standard

View File

@ -65,6 +65,10 @@ OSRELDATE!= awk '/^\#define[[:space:]]*__FreeBSD_version/ { print $$3 }' \
# Keep the related ports builds in the obj directory so that they are only rebuilt once per kernel build
WRKDIRPREFIX?= ${MAKEOBJDIRPREFIX}${SRC_BASE}/sys/${KERNCONF}
PORTSMODULESENV=\
env \
-u CC \
-u CXX \
-u CPP \
PATH=${PATH}:${LOCALBASE}/bin:${LOCALBASE}/sbin \
SRC_BASE=${SRC_BASE} \
OSVERSION=${OSRELDATE} \

View File

@ -6,7 +6,7 @@ SEARCH_DIR(/usr/lib);
SECTIONS
{
/* Read-only sections, merged into text segment: */
. = kernbase + 0x80000000 /* KERNENTRY */;
. = kernbase;
.text : AT(ADDR(.text) - kernbase)
{
*(.text)

View File

@ -137,6 +137,8 @@ filemon_proc_get(struct proc *p)
{
struct filemon *filemon;
if (p->p_filemon == NULL)
return (NULL);
PROC_LOCK(p);
filemon = filemon_acquire(p->p_filemon);
PROC_UNLOCK(p);

View File

@ -89,6 +89,11 @@ struct vmbus_chanpkt_hdr {
(const void *)((const uint8_t *)(pkt) + \
VMBUS_CHANPKT_GETLEN((pkt)->cph_hlen))
/* Include padding */
#define VMBUS_CHANPKT_DATALEN(pkt) \
(VMBUS_CHANPKT_GETLEN((pkt)->cph_tlen) -\
VMBUS_CHANPKT_GETLEN((pkt)->cph_hlen))
struct vmbus_rxbuf_desc {
uint32_t rb_len;
uint32_t rb_ofs;

View File

@ -0,0 +1,59 @@
/*-
* Copyright (c) 2016 Microsoft Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _VMBUS_XACT_H_
#define _VMBUS_XACT_H_
#include <sys/param.h>
#include <sys/bus.h>
struct vmbus_xact;
struct vmbus_xact_ctx;
struct vmbus_xact_ctx *vmbus_xact_ctx_create(bus_dma_tag_t dtag,
size_t req_size, size_t resp_size,
size_t priv_size);
void vmbus_xact_ctx_destroy(struct vmbus_xact_ctx *ctx);
struct vmbus_xact *vmbus_xact_get(struct vmbus_xact_ctx *ctx,
size_t req_len);
void vmbus_xact_put(struct vmbus_xact *xact);
void *vmbus_xact_req_data(const struct vmbus_xact *xact);
bus_addr_t vmbus_xact_req_paddr(const struct vmbus_xact *xact);
void *vmbus_xact_priv(const struct vmbus_xact *xact,
size_t priv_len);
void vmbus_xact_activate(struct vmbus_xact *xact);
void vmbus_xact_deactivate(struct vmbus_xact *xact);
const void *vmbus_xact_wait(struct vmbus_xact *xact,
size_t *resp_len);
void vmbus_xact_wakeup(struct vmbus_xact *xact,
const void *data, size_t dlen);
void vmbus_xact_ctx_wakeup(struct vmbus_xact_ctx *ctx,
const void *data, size_t dlen);
#endif /* !_VMBUS_XACT_H_ */

View File

@ -45,9 +45,11 @@
#include <machine/atomic.h>
#include <dev/hyperv/include/hyperv.h>
#include "hv_net_vsc.h"
#include "hv_rndis.h"
#include "hv_rndis_filter.h"
#include <dev/hyperv/include/vmbus_xact.h>
#include <dev/hyperv/netvsc/hv_net_vsc.h>
#include <dev/hyperv/netvsc/hv_rndis.h>
#include <dev/hyperv/netvsc/hv_rndis_filter.h>
#include <dev/hyperv/netvsc/if_hnreg.h>
MALLOC_DEFINE(M_NETVSC, "netvsc", "Hyper-V netvsc driver");
@ -68,6 +70,15 @@ static void hv_nv_on_receive_completion(struct vmbus_channel *chan,
static void hv_nv_on_receive(netvsc_dev *net_dev,
struct hn_rx_ring *rxr, struct vmbus_channel *chan,
const struct vmbus_chanpkt_hdr *pkt);
static void hn_nvs_sent_none(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
const struct nvsp_msg_ *msg, int);
static void hn_nvs_sent_xact(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
const struct nvsp_msg_ *msg, int dlen);
static struct hn_send_ctx hn_send_ctx_none =
HN_SEND_CTX_INITIALIZER(hn_nvs_sent_none, NULL);
/*
*
@ -141,9 +152,14 @@ hv_nv_get_next_send_section(netvsc_dev *net_dev)
static int
hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *sc)
{
struct vmbus_xact *xact;
struct hn_nvs_rxbuf_conn *conn;
const struct hn_nvs_rxbuf_connresp *resp;
size_t resp_len;
struct hn_send_ctx sndc;
netvsc_dev *net_dev;
nvsp_msg *init_pkt;
int ret = 0;
uint32_t status;
int error;
net_dev = hv_nv_get_outbound_net_device(sc);
if (!net_dev) {
@ -155,7 +171,7 @@ hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *sc)
BUS_DMA_WAITOK | BUS_DMA_ZERO);
if (net_dev->rx_buf == NULL) {
device_printf(sc->hn_dev, "allocate rxbuf failed\n");
return ENOMEM;
return (ENOMEM);
}
/*
@ -165,73 +181,76 @@ hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *sc)
* Only primary channel has RXBUF connected to it. Sub-channels
* just share this RXBUF.
*/
ret = vmbus_chan_gpadl_connect(sc->hn_prichan,
error = vmbus_chan_gpadl_connect(sc->hn_prichan,
net_dev->rxbuf_dma.hv_paddr, net_dev->rx_buf_size,
&net_dev->rx_buf_gpadl_handle);
if (ret != 0) {
device_printf(sc->hn_dev, "rxbuf gpadl connect failed: %d\n",
ret);
if (error) {
if_printf(sc->hn_ifp, "rxbuf gpadl connect failed: %d\n",
error);
goto cleanup;
}
/* sema_wait(&ext->channel_init_sema); KYS CHECK */
/* Notify the NetVsp of the gpadl handle */
init_pkt = &net_dev->channel_init_packet;
memset(init_pkt, 0, sizeof(nvsp_msg));
init_pkt->hdr.msg_type = nvsp_msg_1_type_send_rx_buf;
init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
net_dev->rx_buf_gpadl_handle;
init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
NETVSC_RECEIVE_BUFFER_ID;
/* Send the gpadl notification request */
ret = vmbus_chan_send(sc->hn_prichan,
VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
if (ret != 0) {
goto cleanup;
}
sema_wait(&net_dev->channel_init_sema);
/* Check the response */
if (init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.status
!= nvsp_status_success) {
ret = EINVAL;
goto cleanup;
}
net_dev->rx_section_count =
init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.num_sections;
net_dev->rx_sections = malloc(net_dev->rx_section_count *
sizeof(nvsp_1_rx_buf_section), M_NETVSC, M_WAITOK);
memcpy(net_dev->rx_sections,
init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.sections,
net_dev->rx_section_count * sizeof(nvsp_1_rx_buf_section));
/*
* For first release, there should only be 1 section that represents
* the entire receive buffer
* Connect RXBUF to NVS.
*/
if (net_dev->rx_section_count != 1
|| net_dev->rx_sections->offset != 0) {
ret = EINVAL;
xact = vmbus_xact_get(sc->hn_xact, sizeof(*conn));
if (xact == NULL) {
if_printf(sc->hn_ifp, "no xact for nvs rxbuf conn\n");
error = ENXIO;
goto cleanup;
}
goto exit;
conn = vmbus_xact_req_data(xact);
conn->nvs_type = HN_NVS_TYPE_RXBUF_CONN;
conn->nvs_gpadl = net_dev->rx_buf_gpadl_handle;
conn->nvs_sig = HN_NVS_RXBUF_SIG;
hn_send_ctx_init_simple(&sndc, hn_nvs_sent_xact, xact);
vmbus_xact_activate(xact);
error = vmbus_chan_send(sc->hn_prichan,
VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
conn, sizeof(*conn), (uint64_t)(uintptr_t)&sndc);
if (error != 0) {
if_printf(sc->hn_ifp, "send nvs rxbuf conn failed: %d\n",
error);
vmbus_xact_deactivate(xact);
vmbus_xact_put(xact);
goto cleanup;
}
resp = vmbus_xact_wait(xact, &resp_len);
if (resp_len < sizeof(*resp)) {
if_printf(sc->hn_ifp, "invalid rxbuf conn resp length %zu\n",
resp_len);
vmbus_xact_put(xact);
error = EINVAL;
goto cleanup;
}
if (resp->nvs_type != HN_NVS_TYPE_RXBUF_CONNRESP) {
if_printf(sc->hn_ifp, "not rxbuf conn resp, type %u\n",
resp->nvs_type);
vmbus_xact_put(xact);
error = EINVAL;
goto cleanup;
}
status = resp->nvs_status;
vmbus_xact_put(xact);
if (status != HN_NVS_STATUS_OK) {
if_printf(sc->hn_ifp, "rxbuf conn failed: %x\n", status);
error = EIO;
goto cleanup;
}
net_dev->rx_section_count = 1;
return (0);
cleanup:
hv_nv_destroy_rx_buffer(net_dev);
exit:
return (ret);
return (error);
}
/*
@ -240,9 +259,14 @@ hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *sc)
static int
hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
{
struct hn_send_ctx sndc;
struct vmbus_xact *xact;
struct hn_nvs_chim_conn *chim;
const struct hn_nvs_chim_connresp *resp;
size_t resp_len;
uint32_t status, sectsz;
netvsc_dev *net_dev;
nvsp_msg *init_pkt;
int ret = 0;
int error;
net_dev = hv_nv_get_outbound_net_device(sc);
if (!net_dev) {
@ -254,7 +278,7 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
BUS_DMA_WAITOK | BUS_DMA_ZERO);
if (net_dev->send_buf == NULL) {
device_printf(sc->hn_dev, "allocate chimney txbuf failed\n");
return ENOMEM;
return (ENOMEM);
}
/*
@ -264,47 +288,77 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
* Only primary channel has chimney sending buffer connected to it.
* Sub-channels just share this chimney sending buffer.
*/
ret = vmbus_chan_gpadl_connect(sc->hn_prichan,
error = vmbus_chan_gpadl_connect(sc->hn_prichan,
net_dev->txbuf_dma.hv_paddr, net_dev->send_buf_size,
&net_dev->send_buf_gpadl_handle);
if (ret != 0) {
device_printf(sc->hn_dev, "chimney sending buffer gpadl "
"connect failed: %d\n", ret);
if (error) {
if_printf(sc->hn_ifp, "chimney sending buffer gpadl "
"connect failed: %d\n", error);
goto cleanup;
}
/* Notify the NetVsp of the gpadl handle */
/*
* Connect chimney sending buffer to NVS
*/
init_pkt = &net_dev->channel_init_packet;
xact = vmbus_xact_get(sc->hn_xact, sizeof(*chim));
if (xact == NULL) {
if_printf(sc->hn_ifp, "no xact for nvs chim conn\n");
error = ENXIO;
goto cleanup;
}
memset(init_pkt, 0, sizeof(nvsp_msg));
chim = vmbus_xact_req_data(xact);
chim->nvs_type = HN_NVS_TYPE_CHIM_CONN;
chim->nvs_gpadl = net_dev->send_buf_gpadl_handle;
chim->nvs_sig = HN_NVS_CHIM_SIG;
init_pkt->hdr.msg_type = nvsp_msg_1_type_send_send_buf;
init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
net_dev->send_buf_gpadl_handle;
init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
NETVSC_SEND_BUFFER_ID;
hn_send_ctx_init_simple(&sndc, hn_nvs_sent_xact, xact);
vmbus_xact_activate(xact);
/* Send the gpadl notification request */
ret = vmbus_chan_send(sc->hn_prichan,
error = vmbus_chan_send(sc->hn_prichan,
VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
init_pkt, sizeof(nvsp_msg), (uint64_t)init_pkt);
if (ret != 0) {
chim, sizeof(*chim), (uint64_t)(uintptr_t)&sndc);
if (error) {
if_printf(sc->hn_ifp, "send nvs chim conn failed: %d\n",
error);
vmbus_xact_deactivate(xact);
vmbus_xact_put(xact);
goto cleanup;
}
sema_wait(&net_dev->channel_init_sema);
/* Check the response */
if (init_pkt->msgs.vers_1_msgs.send_send_buf_complete.status
!= nvsp_status_success) {
ret = EINVAL;
resp = vmbus_xact_wait(xact, &resp_len);
if (resp_len < sizeof(*resp)) {
if_printf(sc->hn_ifp, "invalid chim conn resp length %zu\n",
resp_len);
vmbus_xact_put(xact);
error = EINVAL;
goto cleanup;
}
if (resp->nvs_type != HN_NVS_TYPE_CHIM_CONNRESP) {
if_printf(sc->hn_ifp, "not chim conn resp, type %u\n",
resp->nvs_type);
vmbus_xact_put(xact);
error = EINVAL;
goto cleanup;
}
net_dev->send_section_size =
init_pkt->msgs.vers_1_msgs.send_send_buf_complete.section_size;
status = resp->nvs_status;
sectsz = resp->nvs_sectsz;
vmbus_xact_put(xact);
if (status != HN_NVS_STATUS_OK) {
if_printf(sc->hn_ifp, "chim conn failed: %x\n", status);
error = EIO;
goto cleanup;
}
if (sectsz == 0) {
if_printf(sc->hn_ifp, "zero chimney sending buffer "
"section size\n");
return 0;
}
net_dev->send_section_size = sectsz;
net_dev->send_section_count =
net_dev->send_buf_size / net_dev->send_section_size;
net_dev->bitsmap_words = howmany(net_dev->send_section_count,
@ -313,13 +367,15 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
malloc(net_dev->bitsmap_words * sizeof(long), M_NETVSC,
M_WAITOK | M_ZERO);
goto exit;
if (bootverbose) {
if_printf(sc->hn_ifp, "chimney sending buffer %u/%u\n",
net_dev->send_section_size, net_dev->send_section_count);
}
return 0;
cleanup:
hv_nv_destroy_send_buffer(net_dev);
exit:
return (ret);
return (error);
}
/*
@ -328,35 +384,27 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
static int
hv_nv_destroy_rx_buffer(netvsc_dev *net_dev)
{
nvsp_msg *revoke_pkt;
int ret = 0;
/*
* If we got a section count, it means we received a
* send_rx_buf_complete msg
* (ie sent nvsp_msg_1_type_send_rx_buf msg) therefore,
* we need to send a revoke msg here
*/
if (net_dev->rx_section_count) {
/* Send the revoke receive buffer */
revoke_pkt = &net_dev->revoke_packet;
memset(revoke_pkt, 0, sizeof(nvsp_msg));
revoke_pkt->hdr.msg_type = nvsp_msg_1_type_revoke_rx_buf;
revoke_pkt->msgs.vers_1_msgs.revoke_rx_buf.id =
NETVSC_RECEIVE_BUFFER_ID;
ret = vmbus_chan_send(net_dev->sc->hn_prichan,
VMBUS_CHANPKT_TYPE_INBAND, 0, revoke_pkt, sizeof(nvsp_msg),
(uint64_t)(uintptr_t)revoke_pkt);
struct hn_nvs_rxbuf_disconn disconn;
/*
* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
* Disconnect RXBUF from NVS.
*/
memset(&disconn, 0, sizeof(disconn));
disconn.nvs_type = HN_NVS_TYPE_RXBUF_DISCONN;
disconn.nvs_sig = HN_NVS_RXBUF_SIG;
ret = vmbus_chan_send(net_dev->sc->hn_prichan,
VMBUS_CHANPKT_TYPE_INBAND, 0, &disconn, sizeof(disconn),
(uint64_t)(uintptr_t)&hn_send_ctx_none);
if (ret != 0) {
if_printf(net_dev->sc->hn_ifp,
"send rxbuf disconn failed: %d\n", ret);
return (ret);
}
net_dev->rx_section_count = 0;
}
/* Tear down the gpadl on the vsp end */
@ -379,12 +427,6 @@ hv_nv_destroy_rx_buffer(netvsc_dev *net_dev)
net_dev->rx_buf = NULL;
}
if (net_dev->rx_sections) {
free(net_dev->rx_sections, M_NETVSC);
net_dev->rx_sections = NULL;
net_dev->rx_section_count = 0;
}
return (ret);
}
@ -414,9 +456,8 @@ hv_nv_destroy_send_buffer(netvsc_dev *net_dev)
NETVSC_SEND_BUFFER_ID;
ret = vmbus_chan_send(net_dev->sc->hn_prichan,
VMBUS_CHANPKT_TYPE_INBAND, 0,
revoke_pkt, sizeof(nvsp_msg),
(uint64_t)(uintptr_t)revoke_pkt);
VMBUS_CHANPKT_TYPE_INBAND, 0, revoke_pkt, sizeof(nvsp_msg),
(uint64_t)(uintptr_t)&hn_send_ctx_none);
/*
* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@ -454,43 +495,64 @@ hv_nv_destroy_send_buffer(netvsc_dev *net_dev)
return (ret);
}
/*
* Attempt to negotiate the caller-specified NVSP version
*
* For NVSP v2, Server 2008 R2 does not set
* init_pkt->msgs.init_msgs.init_compl.negotiated_prot_vers
* to the negotiated version, so we cannot rely on that.
*/
static int
hv_nv_negotiate_nvsp_protocol(struct hn_softc *sc, netvsc_dev *net_dev,
uint32_t nvsp_ver)
uint32_t nvs_ver)
{
nvsp_msg *init_pkt;
int ret;
struct hn_send_ctx sndc;
struct vmbus_xact *xact;
struct hn_nvs_init *init;
const struct hn_nvs_init_resp *resp;
size_t resp_len;
uint32_t status;
int error;
init_pkt = &net_dev->channel_init_packet;
memset(init_pkt, 0, sizeof(nvsp_msg));
init_pkt->hdr.msg_type = nvsp_msg_type_init;
xact = vmbus_xact_get(sc->hn_xact, sizeof(*init));
if (xact == NULL) {
if_printf(sc->hn_ifp, "no xact for nvs init\n");
return (ENXIO);
}
/*
* Specify parameter as the only acceptable protocol version
*/
init_pkt->msgs.init_msgs.init.p1.protocol_version = nvsp_ver;
init_pkt->msgs.init_msgs.init.protocol_version_2 = nvsp_ver;
init = vmbus_xact_req_data(xact);
init->nvs_type = HN_NVS_TYPE_INIT;
init->nvs_ver_min = nvs_ver;
init->nvs_ver_max = nvs_ver;
/* Send the init request */
ret = vmbus_chan_send(sc->hn_prichan,
vmbus_xact_activate(xact);
hn_send_ctx_init_simple(&sndc, hn_nvs_sent_xact, xact);
error = vmbus_chan_send(sc->hn_prichan,
VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
if (ret != 0)
return (-1);
init, sizeof(*init), (uint64_t)(uintptr_t)&sndc);
if (error) {
if_printf(sc->hn_ifp, "send nvs init failed: %d\n", error);
vmbus_xact_deactivate(xact);
vmbus_xact_put(xact);
return (error);
}
sema_wait(&net_dev->channel_init_sema);
if (init_pkt->msgs.init_msgs.init_compl.status != nvsp_status_success)
resp = vmbus_xact_wait(xact, &resp_len);
if (resp_len < sizeof(*resp)) {
if_printf(sc->hn_ifp, "invalid init resp length %zu\n",
resp_len);
vmbus_xact_put(xact);
return (EINVAL);
}
if (resp->nvs_type != HN_NVS_TYPE_INIT_RESP) {
if_printf(sc->hn_ifp, "not init resp, type %u\n",
resp->nvs_type);
vmbus_xact_put(xact);
return (EINVAL);
}
status = resp->nvs_status;
vmbus_xact_put(xact);
if (status != HN_NVS_STATUS_OK) {
if_printf(sc->hn_ifp, "nvs init failed for ver 0x%x\n",
nvs_ver);
return (EINVAL);
}
return (0);
}
@ -502,33 +564,19 @@ hv_nv_negotiate_nvsp_protocol(struct hn_softc *sc, netvsc_dev *net_dev,
static int
hv_nv_send_ndis_config(struct hn_softc *sc, uint32_t mtu)
{
netvsc_dev *net_dev;
nvsp_msg *init_pkt;
int ret;
struct hn_nvs_ndis_conf conf;
int error;
net_dev = hv_nv_get_outbound_net_device(sc);
if (!net_dev)
return (-ENODEV);
memset(&conf, 0, sizeof(conf));
conf.nvs_type = HN_NVS_TYPE_NDIS_CONF;
conf.nvs_mtu = mtu;
conf.nvs_caps = HN_NVS_NDIS_CONF_VLAN;
/*
* Set up configuration packet, write MTU
* Indicate we are capable of handling VLAN tags
*/
init_pkt = &net_dev->channel_init_packet;
memset(init_pkt, 0, sizeof(nvsp_msg));
init_pkt->hdr.msg_type = nvsp_msg_2_type_send_ndis_config;
init_pkt->msgs.vers_2_msgs.send_ndis_config.mtu = mtu;
init_pkt->
msgs.vers_2_msgs.send_ndis_config.capabilities.u1.u2.ieee8021q
= 1;
/* Send the configuration packet */
ret = vmbus_chan_send(sc->hn_prichan, VMBUS_CHANPKT_TYPE_INBAND, 0,
init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
if (ret != 0)
return (-EINVAL);
return (0);
error = vmbus_chan_send(sc->hn_prichan, VMBUS_CHANPKT_TYPE_INBAND, 0,
&conf, sizeof(conf), (uint64_t)(uintptr_t)&hn_send_ctx_none);
if (error)
if_printf(sc->hn_ifp, "send nvs ndis conf failed: %d\n", error);
return (error);
}
/*
@ -538,8 +586,6 @@ static int
hv_nv_connect_to_vsp(struct hn_softc *sc)
{
netvsc_dev *net_dev;
nvsp_msg *init_pkt;
uint32_t ndis_version;
uint32_t protocol_list[] = { NVSP_PROTOCOL_VERSION_1,
NVSP_PROTOCOL_VERSION_2,
NVSP_PROTOCOL_VERSION_4,
@ -549,6 +595,7 @@ hv_nv_connect_to_vsp(struct hn_softc *sc)
int ret = 0;
device_t dev = sc->hn_dev;
struct ifnet *ifp = sc->hn_ifp;
struct hn_nvs_ndis_init ndis;
net_dev = hv_nv_get_outbound_net_device(sc);
@ -581,37 +628,23 @@ hv_nv_connect_to_vsp(struct hn_softc *sc)
ret = hv_nv_send_ndis_config(sc, ifp->if_mtu);
/*
* Send the NDIS version
* Initialize NDIS.
*/
init_pkt = &net_dev->channel_init_packet;
memset(init_pkt, 0, sizeof(nvsp_msg));
if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_4) {
ndis_version = NDIS_VERSION_6_1;
} else {
ndis_version = NDIS_VERSION_6_30;
}
init_pkt->hdr.msg_type = nvsp_msg_1_type_send_ndis_vers;
init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_major_vers =
(ndis_version & 0xFFFF0000) >> 16;
init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_minor_vers =
ndis_version & 0xFFFF;
/* Send the init request */
memset(&ndis, 0, sizeof(ndis));
ndis.nvs_type = HN_NVS_TYPE_NDIS_INIT;
ndis.nvs_ndis_major = NDIS_VERSION_MAJOR_6;
if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
ndis.nvs_ndis_minor = NDIS_VERSION_MINOR_1;
else
ndis.nvs_ndis_minor = NDIS_VERSION_MINOR_30;
ret = vmbus_chan_send(sc->hn_prichan, VMBUS_CHANPKT_TYPE_INBAND, 0,
init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
&ndis, sizeof(ndis), (uint64_t)(uintptr_t)&hn_send_ctx_none);
if (ret != 0) {
if_printf(sc->hn_ifp, "send nvs ndis init failed: %d\n", ret);
goto cleanup;
}
/*
* TODO: BUGBUG - We have to wait for the above msg since the netvsp
* uses KMCL which acknowledges packet (completion packet)
* since our Vmbus always set the VMBUS_CHANPKT_FLAG_RC flag
*/
/* sema_wait(&NetVscChannel->channel_init_sema); */
/* Post the big receive buffer to NetVSP */
if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
@ -731,6 +764,52 @@ hv_nv_on_device_remove(struct hn_softc *sc, boolean_t destroy_channel)
return (0);
}
void
hn_nvs_sent_wakeup(struct hn_send_ctx *sndc __unused,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan __unused,
const struct nvsp_msg_ *msg, int dlen __unused)
{
/* Copy the response back */
memcpy(&net_dev->channel_init_packet, msg, sizeof(nvsp_msg));
sema_post(&net_dev->channel_init_sema);
}
static void
hn_nvs_sent_xact(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev __unused, struct vmbus_channel *chan __unused,
const struct nvsp_msg_ *msg, int dlen)
{
vmbus_xact_wakeup(sndc->hn_cbarg, msg, dlen);
}
static void
hn_nvs_sent_none(struct hn_send_ctx *sndc __unused,
struct netvsc_dev_ *net_dev __unused, struct vmbus_channel *chan __unused,
const struct nvsp_msg_ *msg __unused, int dlen __unused)
{
/* EMPTY */
}
void
hn_chim_free(struct netvsc_dev_ *net_dev, uint32_t chim_idx)
{
u_long mask;
uint32_t idx;
idx = chim_idx / BITS_PER_LONG;
KASSERT(idx < net_dev->bitsmap_words,
("invalid chimney index 0x%x", chim_idx));
mask = 1UL << (chim_idx % BITS_PER_LONG);
KASSERT(net_dev->send_section_bitsmap[idx] & mask,
("index bitmap 0x%lx, chimney index %u, "
"bitmap idx %d, bitmask 0x%lx",
net_dev->send_section_bitsmap[idx], chim_idx, idx, mask));
atomic_clear_long(&net_dev->send_section_bitsmap[idx], mask);
}
/*
* Net VSC on send completion
*/
@ -738,59 +817,16 @@ static void
hv_nv_on_send_completion(netvsc_dev *net_dev, struct vmbus_channel *chan,
const struct vmbus_chanpkt_hdr *pkt)
{
const nvsp_msg *nvsp_msg_pkt;
netvsc_packet *net_vsc_pkt;
struct hn_send_ctx *sndc;
nvsp_msg_pkt = VMBUS_CHANPKT_CONST_DATA(pkt);
if (nvsp_msg_pkt->hdr.msg_type == nvsp_msg_type_init_complete
|| nvsp_msg_pkt->hdr.msg_type
== nvsp_msg_1_type_send_rx_buf_complete
|| nvsp_msg_pkt->hdr.msg_type
== nvsp_msg_1_type_send_send_buf_complete
|| nvsp_msg_pkt->hdr.msg_type
== nvsp_msg5_type_subchannel) {
/* Copy the response back */
memcpy(&net_dev->channel_init_packet, nvsp_msg_pkt,
sizeof(nvsp_msg));
sema_post(&net_dev->channel_init_sema);
} else if (nvsp_msg_pkt->hdr.msg_type ==
nvsp_msg_1_type_send_rndis_pkt_complete) {
/* Get the send context */
net_vsc_pkt =
(netvsc_packet *)(unsigned long)pkt->cph_xactid;
if (NULL != net_vsc_pkt) {
if (net_vsc_pkt->send_buf_section_idx !=
NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
u_long mask;
int idx;
idx = net_vsc_pkt->send_buf_section_idx /
BITS_PER_LONG;
KASSERT(idx < net_dev->bitsmap_words,
("invalid section index %u",
net_vsc_pkt->send_buf_section_idx));
mask = 1UL <<
(net_vsc_pkt->send_buf_section_idx %
BITS_PER_LONG);
KASSERT(net_dev->send_section_bitsmap[idx] &
mask,
("index bitmap 0x%lx, section index %u, "
"bitmap idx %d, bitmask 0x%lx",
net_dev->send_section_bitsmap[idx],
net_vsc_pkt->send_buf_section_idx,
idx, mask));
atomic_clear_long(
&net_dev->send_section_bitsmap[idx], mask);
}
/* Notify the layer above us */
net_vsc_pkt->compl.send.on_send_completion(chan,
net_vsc_pkt->compl.send.send_completion_context);
}
}
sndc = (struct hn_send_ctx *)(uintptr_t)pkt->cph_xactid;
sndc->hn_cb(sndc, net_dev, chan, VMBUS_CHANPKT_CONST_DATA(pkt),
VMBUS_CHANPKT_DATALEN(pkt));
/*
* NOTE:
* 'sndc' CAN NOT be accessed anymore, since it can be freed by
* its callback.
*/
}
/*
@ -799,14 +835,14 @@ hv_nv_on_send_completion(netvsc_dev *net_dev, struct vmbus_channel *chan,
* Returns 0 on success, non-zero on failure.
*/
int
hv_nv_on_send(struct vmbus_channel *chan,
netvsc_packet *pkt, struct vmbus_gpa *gpa, int gpa_cnt)
hv_nv_on_send(struct vmbus_channel *chan, bool is_data_pkt,
struct hn_send_ctx *sndc, struct vmbus_gpa *gpa, int gpa_cnt)
{
nvsp_msg send_msg;
int ret;
send_msg.hdr.msg_type = nvsp_msg_1_type_send_rndis_pkt;
if (pkt->is_data_pkt) {
if (is_data_pkt) {
/* 0 is RMC_DATA */
send_msg.msgs.vers_1_msgs.send_rndis_pkt.chan_type = 0;
} else {
@ -815,17 +851,17 @@ hv_nv_on_send(struct vmbus_channel *chan,
}
send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_idx =
pkt->send_buf_section_idx;
sndc->hn_chim_idx;
send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_size =
pkt->send_buf_section_size;
sndc->hn_chim_sz;
if (gpa_cnt) {
ret = vmbus_chan_send_sglist(chan, gpa, gpa_cnt,
&send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt);
&send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)sndc);
} else {
ret = vmbus_chan_send(chan,
VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
&send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt);
&send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)sndc);
}
return (ret);

View File

@ -1060,7 +1060,6 @@ typedef struct netvsc_dev_ {
uint32_t rx_buf_size;
uint32_t rx_buf_gpadl_handle;
uint32_t rx_section_count;
nvsp_1_rx_buf_section *rx_sections;
/* Used for NetVSP initialization protocol */
struct sema channel_init_sema;
@ -1112,29 +1111,8 @@ typedef void (*pfn_on_send_rx_completion)(struct vmbus_channel *, void *);
#endif
typedef struct netvsc_packet_ {
uint8_t is_data_pkt; /* One byte */
uint16_t vlan_tci;
uint32_t status;
/* Completion */
union {
struct {
uint64_t rx_completion_tid;
void *rx_completion_context;
/* This is no longer used */
pfn_on_send_rx_completion on_rx_completion;
} rx;
struct {
uint64_t send_completion_tid;
void *send_completion_context;
/* Still used in netvsc and filter code */
pfn_on_send_rx_completion on_send_completion;
} send;
} compl;
uint32_t send_buf_section_idx;
uint32_t send_buf_section_size;
void *rndis_mesg;
uint16_t vlan_tci;
uint32_t status;
uint32_t tot_data_buf_len;
void *data;
} netvsc_packet;
@ -1264,20 +1242,22 @@ typedef struct hn_softc {
struct taskqueue *hn_tx_taskq;
struct sysctl_oid *hn_tx_sysctl_tree;
struct sysctl_oid *hn_rx_sysctl_tree;
struct vmbus_xact_ctx *hn_xact;
} hn_softc_t;
/*
* Externs
*/
extern int hv_promisc_mode;
struct hn_send_ctx;
void netvsc_linkstatus_callback(struct hn_softc *sc, uint32_t status);
netvsc_dev *hv_nv_on_device_add(struct hn_softc *sc,
void *additional_info, struct hn_rx_ring *rxr);
int hv_nv_on_device_remove(struct hn_softc *sc,
boolean_t destroy_channel);
int hv_nv_on_send(struct vmbus_channel *chan, netvsc_packet *pkt,
struct vmbus_gpa *gpa, int gpa_cnt);
int hv_nv_on_send(struct vmbus_channel *chan, bool is_data_pkt,
struct hn_send_ctx *sndc, struct vmbus_gpa *gpa, int gpa_cnt);
int hv_nv_get_next_send_section(netvsc_dev *net_dev);
void hv_nv_subchan_attach(struct vmbus_channel *chan,
struct hn_rx_ring *rxr);

View File

@ -115,6 +115,7 @@ __FBSDID("$FreeBSD$");
#include <dev/hyperv/include/hyperv.h>
#include <dev/hyperv/include/hyperv_busdma.h>
#include <dev/hyperv/include/vmbus_xact.h>
#include "hv_net_vsc.h"
#include "hv_rndis.h"
@ -124,6 +125,9 @@ __FBSDID("$FreeBSD$");
/* Short for Hyper-V network interface */
#define NETVSC_DEVNAME "hn"
#define HN_XACT_REQ_SIZE (2 * PAGE_SIZE)
#define HN_XACT_RESP_SIZE (2 * PAGE_SIZE)
/*
* It looks like offset 0 of buf is reserved to hold the softc pointer.
* The sc pointer evidently not needed, and is not presently populated.
@ -166,7 +170,7 @@ struct hn_txdesc {
struct hn_tx_ring *txr;
int refs;
uint32_t flags; /* HN_TXD_FLAG_ */
netvsc_packet netvsc_pkt; /* XXX to be removed */
struct hn_send_ctx send_ctx;
bus_dmamap_t data_dmap;
@ -542,6 +546,11 @@ netvsc_attach(device_t dev)
IFCAP_LRO;
ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
sc->hn_xact = vmbus_xact_ctx_create(bus_get_dma_tag(dev),
HN_XACT_REQ_SIZE, HN_XACT_RESP_SIZE, 0);
if (sc->hn_xact == NULL)
goto failed;
error = hv_rf_on_device_add(sc, &device_info, ring_cnt,
&sc->hn_rx_ring[0]);
if (error)
@ -643,6 +652,7 @@ netvsc_detach(device_t dev)
if (sc->hn_tx_taskq != hn_tx_taskq)
taskqueue_free(sc->hn_tx_taskq);
vmbus_xact_ctx_destroy(sc->hn_xact);
return (0);
}
@ -781,14 +791,15 @@ hn_txeof(struct hn_tx_ring *txr)
}
static void
hn_tx_done(struct vmbus_channel *chan, void *xpkt)
hn_tx_done(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
struct vmbus_channel *chan, const struct nvsp_msg_ *msg __unused,
int dlen __unused)
{
netvsc_packet *packet = xpkt;
struct hn_txdesc *txd;
struct hn_txdesc *txd = sndc->hn_cbarg;
struct hn_tx_ring *txr;
txd = (struct hn_txdesc *)(uintptr_t)
packet->compl.send.send_completion_tid;
if (sndc->hn_chim_idx != NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX)
hn_chim_free(net_dev, sndc->hn_chim_idx);
txr = txd->txr;
KASSERT(txr->hn_chan == chan,
@ -835,16 +846,14 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
int error, nsegs, i;
struct mbuf *m_head = *m_head0;
netvsc_packet *packet;
rndis_msg *rndis_mesg;
rndis_packet *rndis_pkt;
rndis_per_packet_info *rppi;
struct rndis_hash_value *hash_value;
uint32_t rndis_msg_size;
uint32_t rndis_msg_size, tot_data_buf_len, send_buf_section_idx;
int send_buf_section_size;
packet = &txd->netvsc_pkt;
packet->is_data_pkt = TRUE;
packet->tot_data_buf_len = m_head->m_pkthdr.len;
tot_data_buf_len = m_head->m_pkthdr.len;
/*
* extension points to the area reserved for the
@ -859,7 +868,7 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
rndis_pkt = &rndis_mesg->msg.packet;
rndis_pkt->data_offset = sizeof(rndis_packet);
rndis_pkt->data_length = packet->tot_data_buf_len;
rndis_pkt->data_length = tot_data_buf_len;
rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
@ -967,15 +976,14 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
}
}
rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size;
packet->tot_data_buf_len = rndis_mesg->msg_len;
rndis_mesg->msg_len = tot_data_buf_len + rndis_msg_size;
tot_data_buf_len = rndis_mesg->msg_len;
/*
* Chimney send, if the packet could fit into one chimney buffer.
*/
if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) {
if (tot_data_buf_len < txr->hn_tx_chimney_size) {
netvsc_dev *net_dev = txr->hn_sc->net_dev;
uint32_t send_buf_section_idx;
txr->hn_tx_chimney_tried++;
send_buf_section_idx =
@ -990,9 +998,7 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
dest += rndis_msg_size;
m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
packet->send_buf_section_idx = send_buf_section_idx;
packet->send_buf_section_size =
packet->tot_data_buf_len;
send_buf_section_size = tot_data_buf_len;
txr->hn_gpa_cnt = 0;
txr->hn_tx_chimney++;
goto done;
@ -1039,16 +1045,14 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
gpa->gpa_len = segs[i].ds_len;
}
packet->send_buf_section_idx =
NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
packet->send_buf_section_size = 0;
send_buf_section_idx = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
send_buf_section_size = 0;
done:
txd->m = m_head;
/* Set the completion routine */
packet->compl.send.on_send_completion = hn_tx_done;
packet->compl.send.send_completion_context = packet;
packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd;
hn_send_ctx_init(&txd->send_ctx, hn_tx_done, txd,
send_buf_section_idx, send_buf_section_size);
return 0;
}
@ -1068,7 +1072,7 @@ hn_send_pkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd)
* Make sure that txd is not freed before ETHER_BPF_MTAP.
*/
hn_txdesc_hold(txd);
error = hv_nv_on_send(txr->hn_chan, &txd->netvsc_pkt,
error = hv_nv_on_send(txr->hn_chan, true, &txd->send_ctx,
txr->hn_gpa, txr->hn_gpa_cnt);
if (!error) {
ETHER_BPF_MTAP(ifp, txd->m);

View File

@ -41,6 +41,10 @@
#define NDIS_VERSION_6_1 0x00060001
#define NDIS_VERSION_6_30 0x0006001e
#define NDIS_VERSION_MAJOR_6 6
#define NDIS_VERSION_MINOR_1 1
#define NDIS_VERSION_MINOR_30 30
#define NDIS_VERSION (NDIS_VERSION_5_1)
/*

View File

@ -85,11 +85,17 @@ static int hv_rf_set_packet_filter(rndis_device *device, uint32_t new_filter);
static int hv_rf_init_device(rndis_device *device);
static int hv_rf_open_device(rndis_device *device);
static int hv_rf_close_device(rndis_device *device);
static void hv_rf_on_send_request_completion(struct vmbus_channel *, void *context);
static void hv_rf_on_send_request_halt_completion(struct vmbus_channel *, void *context);
int
hv_rf_send_offload_request(struct hn_softc *sc,
rndis_offload_params *offloads);
static void hn_rndis_sent_halt(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
const struct nvsp_msg_ *msg, int dlen);
static void hn_rndis_sent_cb(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
const struct nvsp_msg_ *msg, int dlen);
/*
* Set the Per-Packet-Info with the specified type
*/
@ -238,17 +244,14 @@ static int
hv_rf_send_request(rndis_device *device, rndis_request *request,
uint32_t message_type)
{
netvsc_packet *packet;
netvsc_dev *net_dev = device->net_dev;
int send_buf_section_idx;
uint32_t send_buf_section_idx, tot_data_buf_len;
struct vmbus_gpa gpa[2];
int gpa_cnt;
int gpa_cnt, send_buf_section_size;
hn_sent_callback_t cb;
/* Set up the packet to send it */
packet = &request->pkt;
packet->is_data_pkt = FALSE;
packet->tot_data_buf_len = request->request_msg.msg_len;
tot_data_buf_len = request->request_msg.msg_len;
gpa_cnt = 1;
gpa[0].gpa_page = hv_get_phys_addr(&request->request_msg) >> PAGE_SHIFT;
@ -265,16 +268,12 @@ hv_rf_send_request(rndis_device *device, rndis_request *request,
gpa[1].gpa_len = request->request_msg.msg_len - gpa[0].gpa_len;
}
packet->compl.send.send_completion_context = request; /* packet */
if (message_type != REMOTE_NDIS_HALT_MSG) {
packet->compl.send.on_send_completion =
hv_rf_on_send_request_completion;
} else {
packet->compl.send.on_send_completion =
hv_rf_on_send_request_halt_completion;
}
packet->compl.send.send_completion_tid = (unsigned long)device;
if (packet->tot_data_buf_len < net_dev->send_section_size) {
if (message_type != REMOTE_NDIS_HALT_MSG)
cb = hn_rndis_sent_cb;
else
cb = hn_rndis_sent_halt;
if (tot_data_buf_len < net_dev->send_section_size) {
send_buf_section_idx = hv_nv_get_next_send_section(net_dev);
if (send_buf_section_idx !=
NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
@ -282,19 +281,20 @@ hv_rf_send_request(rndis_device *device, rndis_request *request,
send_buf_section_idx * net_dev->send_section_size);
memcpy(dest, &request->request_msg, request->request_msg.msg_len);
packet->send_buf_section_idx = send_buf_section_idx;
packet->send_buf_section_size = packet->tot_data_buf_len;
send_buf_section_size = tot_data_buf_len;
gpa_cnt = 0;
goto sendit;
}
/* Failed to allocate chimney send buffer; move on */
}
packet->send_buf_section_idx = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
packet->send_buf_section_size = 0;
send_buf_section_idx = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
send_buf_section_size = 0;
sendit:
return hv_nv_on_send(device->net_dev->sc->hn_prichan, packet,
gpa, gpa_cnt);
hn_send_ctx_init(&request->send_ctx, cb, request,
send_buf_section_idx, send_buf_section_size);
return hv_nv_on_send(device->net_dev->sc->hn_prichan, false,
&request->send_ctx, gpa, gpa_cnt);
}
/*
@ -1056,6 +1056,7 @@ int
hv_rf_on_device_add(struct hn_softc *sc, void *additl_info,
int nchan, struct hn_rx_ring *rxr)
{
struct hn_send_ctx sndc;
int ret;
netvsc_dev *net_dev;
rndis_device *rndis_dev;
@ -1162,9 +1163,10 @@ hv_rf_on_device_add(struct hn_softc *sc, void *additl_info,
init_pkt->msgs.vers_5_msgs.subchannel_request.num_subchannels =
net_dev->num_channel - 1;
hn_send_ctx_init_simple(&sndc, hn_nvs_sent_wakeup, NULL);
ret = vmbus_chan_send(sc->hn_prichan,
VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)&sndc);
if (ret != 0) {
device_printf(dev, "Fail to allocate subchannel\n");
goto out;
@ -1235,23 +1237,24 @@ hv_rf_on_close(struct hn_softc *sc)
return (hv_rf_close_device((rndis_device *)net_dev->extension));
}
/*
* RNDIS filter on send request completion callback
*/
static void
hv_rf_on_send_request_completion(struct vmbus_channel *chan __unused,
void *context __unused)
static void
hn_rndis_sent_cb(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
struct vmbus_channel *chan __unused, const struct nvsp_msg_ *msg __unused,
int dlen __unused)
{
if (sndc->hn_chim_idx != NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX)
hn_chim_free(net_dev, sndc->hn_chim_idx);
}
/*
* RNDIS filter on send request (halt only) completion callback
*/
static void
hv_rf_on_send_request_halt_completion(struct vmbus_channel *chan __unused,
void *context)
static void
hn_rndis_sent_halt(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
struct vmbus_channel *chan __unused, const struct nvsp_msg_ *msg __unused,
int dlen __unused)
{
rndis_request *request = context;
rndis_request *request = sndc->hn_cbarg;
if (sndc->hn_chim_idx != NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX)
hn_chim_free(net_dev, sndc->hn_chim_idx);
/*
* Notify hv_rf_halt_device() about halt completion.

View File

@ -33,6 +33,7 @@
#include <sys/param.h>
#include <net/ethernet.h>
#include <dev/hyperv/netvsc/if_hnvar.h>
/*
* Defines
@ -75,7 +76,7 @@ typedef struct rndis_request_ {
uint8_t buf_resp[PAGE_SIZE];
/* Simplify allocation by having a netvsc packet inline */
netvsc_packet pkt;
struct hn_send_ctx send_ctx;
/*
* The max request size is sizeof(rndis_msg) + PAGE_SIZE.

View File

@ -0,0 +1,138 @@
/*-
* Copyright (c) 2016 Microsoft Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IF_HNREG_H_
#define _IF_HNREG_H_
#include <sys/param.h>
#include <sys/systm.h>
#define HN_NVS_RXBUF_SIG 0xcafe
#define HN_NVS_CHIM_SIG 0xface
#define HN_NVS_STATUS_OK 1
#define HN_NVS_TYPE_INIT 1
#define HN_NVS_TYPE_INIT_RESP 2
#define HN_NVS_TYPE_NDIS_INIT 100
#define HN_NVS_TYPE_RXBUF_CONN 101
#define HN_NVS_TYPE_RXBUF_CONNRESP 102
#define HN_NVS_TYPE_RXBUF_DISCONN 103
#define HN_NVS_TYPE_CHIM_CONN 104
#define HN_NVS_TYPE_CHIM_CONNRESP 105
#define HN_NVS_TYPE_NDIS_CONF 125
/*
* Any size less than this one will _not_ work, e.g. hn_nvs_init
* only has 12B valid data, however, if only 12B data were sent,
* Hypervisor would never reply.
*/
#define HN_NVS_REQSIZE_MIN 32
struct hn_nvs_init {
uint32_t nvs_type; /* HN_NVS_TYPE_INIT */
uint32_t nvs_ver_min;
uint32_t nvs_ver_max;
uint8_t nvs_rsvd[20];
} __packed;
CTASSERT(sizeof(struct hn_nvs_init) >= HN_NVS_REQSIZE_MIN);
struct hn_nvs_init_resp {
uint32_t nvs_type; /* HN_NVS_TYPE_INIT_RESP */
uint32_t nvs_ver; /* deprecated */
uint32_t nvs_rsvd;
uint32_t nvs_status; /* HN_NVS_STATUS_ */
} __packed;
/* No reponse */
struct hn_nvs_ndis_conf {
uint32_t nvs_type; /* HN_NVS_TYPE_NDIS_CONF */
uint32_t nvs_mtu;
uint32_t nvs_rsvd;
uint64_t nvs_caps; /* HN_NVS_NDIS_CONF_ */
uint8_t nvs_rsvd1[12];
} __packed;
CTASSERT(sizeof(struct hn_nvs_ndis_conf) >= HN_NVS_REQSIZE_MIN);
#define HN_NVS_NDIS_CONF_SRIOV 0x0004
#define HN_NVS_NDIS_CONF_VLAN 0x0008
/* No response */
struct hn_nvs_ndis_init {
uint32_t nvs_type; /* HN_NVS_TYPE_NDIS_INIT */
uint32_t nvs_ndis_major; /* NDIS_VERSION_MAJOR_ */
uint32_t nvs_ndis_minor; /* NDIS_VERSION_MINOR_ */
uint8_t nvs_rsvd[20];
} __packed;
CTASSERT(sizeof(struct hn_nvs_ndis_init) >= HN_NVS_REQSIZE_MIN);
struct hn_nvs_rxbuf_conn {
uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_CONN */
uint32_t nvs_gpadl; /* RXBUF vmbus GPADL */
uint16_t nvs_sig; /* HN_NVS_RXBUF_SIG */
uint8_t nvs_rsvd[22];
} __packed;
CTASSERT(sizeof(struct hn_nvs_rxbuf_conn) >= HN_NVS_REQSIZE_MIN);
struct hn_nvs_rxbuf_sect {
uint32_t nvs_start;
uint32_t nvs_slotsz;
uint32_t nvs_slotcnt;
uint32_t nvs_end;
} __packed;
struct hn_nvs_rxbuf_connresp {
uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_CONNRESP */
uint32_t nvs_status; /* HN_NVS_STATUS_ */
uint32_t nvs_nsect; /* # of elem in nvs_sect */
struct hn_nvs_rxbuf_sect nvs_sect[];
} __packed;
/* No response */
struct hn_nvs_rxbuf_disconn {
uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_DISCONN */
uint16_t nvs_sig; /* HN_NVS_RXBUF_SIG */
uint8_t nvs_rsvd[26];
} __packed;
CTASSERT(sizeof(struct hn_nvs_rxbuf_disconn) >= HN_NVS_REQSIZE_MIN);
struct hn_nvs_chim_conn {
uint32_t nvs_type; /* HN_NVS_TYPE_CHIM_CONN */
uint32_t nvs_gpadl; /* chimney buf vmbus GPADL */
uint16_t nvs_sig; /* NDIS_NVS_CHIM_SIG */
uint8_t nvs_rsvd[22];
} __packed;
CTASSERT(sizeof(struct hn_nvs_chim_conn) >= HN_NVS_REQSIZE_MIN);
struct hn_nvs_chim_connresp {
uint32_t nvs_type; /* HN_NVS_TYPE_CHIM_CONNRESP */
uint32_t nvs_status; /* HN_NVS_STATUS_ */
uint32_t nvs_sectsz; /* section size */
} __packed;
#endif /* !_IF_HNREG_H_ */

View File

@ -0,0 +1,83 @@
/*-
* Copyright (c) 2016 Microsoft Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IF_HNVAR_H_
#define _IF_HNVAR_H_
#include <sys/param.h>
#include <dev/hyperv/netvsc/hv_net_vsc.h>
struct netvsc_dev_;
struct nvsp_msg_;
struct vmbus_channel;
struct hn_send_ctx;
typedef void (*hn_sent_callback_t)
(struct hn_send_ctx *, struct netvsc_dev_ *,
struct vmbus_channel *, const struct nvsp_msg_ *, int);
struct hn_send_ctx {
hn_sent_callback_t hn_cb;
void *hn_cbarg;
uint32_t hn_chim_idx;
int hn_chim_sz;
};
#define HN_SEND_CTX_INITIALIZER(cb, cbarg) \
{ \
.hn_cb = cb, \
.hn_cbarg = cbarg, \
.hn_chim_idx = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX, \
.hn_chim_sz = 0 \
}
static __inline void
hn_send_ctx_init(struct hn_send_ctx *sndc, hn_sent_callback_t cb,
void *cbarg, uint32_t chim_idx, int chim_sz)
{
sndc->hn_cb = cb;
sndc->hn_cbarg = cbarg;
sndc->hn_chim_idx = chim_idx;
sndc->hn_chim_sz = chim_sz;
}
static __inline void
hn_send_ctx_init_simple(struct hn_send_ctx *sndc, hn_sent_callback_t cb,
void *cbarg)
{
hn_send_ctx_init(sndc, cb, cbarg,
NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX, 0);
}
void hn_nvs_sent_wakeup(struct hn_send_ctx *sndc,
struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
const struct nvsp_msg_ *msg, int dlen);
void hn_chim_free(struct netvsc_dev_ *net_dev, uint32_t chim_idx);
#endif /* !_IF_HNVAR_H_ */

View File

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <contrib/dev/acpica/include/acpi.h>
#include <dev/hyperv/include/hyperv.h>
#include <dev/hyperv/include/vmbus_xact.h>
#include <dev/hyperv/vmbus/hyperv_reg.h>
#include <dev/hyperv/vmbus/hyperv_var.h>
#include <dev/hyperv/vmbus/vmbus_reg.h>
@ -62,25 +63,10 @@ __FBSDID("$FreeBSD$");
#define VMBUS_GPADL_START 0xe1e10
struct vmbus_msghc {
struct hypercall_postmsg_in *mh_inprm;
struct vmbus_xact *mh_xact;
struct hypercall_postmsg_in mh_inprm_save;
struct hyperv_dma mh_inprm_dma;
struct vmbus_message *mh_resp;
struct vmbus_message mh_resp0;
};
struct vmbus_msghc_ctx {
struct vmbus_msghc *mhc_free;
struct mtx mhc_free_lock;
uint32_t mhc_flags;
struct vmbus_msghc *mhc_active;
struct mtx mhc_active_lock;
};
#define VMBUS_MSGHC_CTXF_DESTROY 0x0001
static int vmbus_probe(device_t);
static int vmbus_attach(device_t);
static int vmbus_detach(device_t);
@ -116,15 +102,6 @@ static int vmbus_doattach(struct vmbus_softc *);
static void vmbus_event_proc_dummy(struct vmbus_softc *,
int);
static struct vmbus_msghc_ctx *vmbus_msghc_ctx_create(bus_dma_tag_t);
static void vmbus_msghc_ctx_destroy(
struct vmbus_msghc_ctx *);
static void vmbus_msghc_ctx_free(struct vmbus_msghc_ctx *);
static struct vmbus_msghc *vmbus_msghc_alloc(bus_dma_tag_t);
static void vmbus_msghc_free(struct vmbus_msghc *);
static struct vmbus_msghc *vmbus_msghc_get1(struct vmbus_msghc_ctx *,
uint32_t);
static struct vmbus_softc *vmbus_sc;
extern inthand_t IDTVEC(vmbus_isr);
@ -182,85 +159,6 @@ vmbus_get_softc(void)
return vmbus_sc;
}
static struct vmbus_msghc *
vmbus_msghc_alloc(bus_dma_tag_t parent_dtag)
{
struct vmbus_msghc *mh;
mh = malloc(sizeof(*mh), M_DEVBUF, M_WAITOK | M_ZERO);
mh->mh_inprm = hyperv_dmamem_alloc(parent_dtag,
HYPERCALL_PARAM_ALIGN, 0, HYPERCALL_POSTMSGIN_SIZE,
&mh->mh_inprm_dma, BUS_DMA_WAITOK);
if (mh->mh_inprm == NULL) {
free(mh, M_DEVBUF);
return NULL;
}
return mh;
}
static void
vmbus_msghc_free(struct vmbus_msghc *mh)
{
hyperv_dmamem_free(&mh->mh_inprm_dma, mh->mh_inprm);
free(mh, M_DEVBUF);
}
static void
vmbus_msghc_ctx_free(struct vmbus_msghc_ctx *mhc)
{
KASSERT(mhc->mhc_active == NULL, ("still have active msg hypercall"));
KASSERT(mhc->mhc_free == NULL, ("still have hypercall msg"));
mtx_destroy(&mhc->mhc_free_lock);
mtx_destroy(&mhc->mhc_active_lock);
free(mhc, M_DEVBUF);
}
static struct vmbus_msghc_ctx *
vmbus_msghc_ctx_create(bus_dma_tag_t parent_dtag)
{
struct vmbus_msghc_ctx *mhc;
mhc = malloc(sizeof(*mhc), M_DEVBUF, M_WAITOK | M_ZERO);
mtx_init(&mhc->mhc_free_lock, "vmbus msghc free", NULL, MTX_DEF);
mtx_init(&mhc->mhc_active_lock, "vmbus msghc act", NULL, MTX_DEF);
mhc->mhc_free = vmbus_msghc_alloc(parent_dtag);
if (mhc->mhc_free == NULL) {
vmbus_msghc_ctx_free(mhc);
return NULL;
}
return mhc;
}
static struct vmbus_msghc *
vmbus_msghc_get1(struct vmbus_msghc_ctx *mhc, uint32_t dtor_flag)
{
struct vmbus_msghc *mh;
mtx_lock(&mhc->mhc_free_lock);
while ((mhc->mhc_flags & dtor_flag) == 0 && mhc->mhc_free == NULL) {
mtx_sleep(&mhc->mhc_free, &mhc->mhc_free_lock, 0,
"gmsghc", 0);
}
if (mhc->mhc_flags & dtor_flag) {
/* Being destroyed */
mh = NULL;
} else {
mh = mhc->mhc_free;
KASSERT(mh != NULL, ("no free hypercall msg"));
KASSERT(mh->mh_resp == NULL,
("hypercall msg has pending response"));
mhc->mhc_free = NULL;
}
mtx_unlock(&mhc->mhc_free_lock);
return mh;
}
void
vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize)
{
@ -269,7 +167,7 @@ vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize)
if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
panic("invalid data size %zu", dsize);
inprm = mh->mh_inprm;
inprm = vmbus_xact_req_data(mh->mh_xact);
memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE);
inprm->hc_connid = VMBUS_CONNID_MESSAGE;
inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL;
@ -280,63 +178,50 @@ struct vmbus_msghc *
vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize)
{
struct vmbus_msghc *mh;
struct vmbus_xact *xact;
if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
panic("invalid data size %zu", dsize);
mh = vmbus_msghc_get1(sc->vmbus_msg_hc, VMBUS_MSGHC_CTXF_DESTROY);
if (mh == NULL)
return NULL;
xact = vmbus_xact_get(sc->vmbus_xc,
dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0]));
if (xact == NULL)
return (NULL);
mh = vmbus_xact_priv(xact, sizeof(*mh));
mh->mh_xact = xact;
vmbus_msghc_reset(mh, dsize);
return mh;
return (mh);
}
void
vmbus_msghc_put(struct vmbus_softc *sc, struct vmbus_msghc *mh)
vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
{
struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc;
KASSERT(mhc->mhc_active == NULL, ("msg hypercall is active"));
mh->mh_resp = NULL;
mtx_lock(&mhc->mhc_free_lock);
KASSERT(mhc->mhc_free == NULL, ("has free hypercall msg"));
mhc->mhc_free = mh;
mtx_unlock(&mhc->mhc_free_lock);
wakeup(&mhc->mhc_free);
vmbus_xact_put(mh->mh_xact);
}
void *
vmbus_msghc_dataptr(struct vmbus_msghc *mh)
{
return mh->mh_inprm->hc_data;
}
struct hypercall_postmsg_in *inprm;
static void
vmbus_msghc_ctx_destroy(struct vmbus_msghc_ctx *mhc)
{
struct vmbus_msghc *mh;
mtx_lock(&mhc->mhc_free_lock);
mhc->mhc_flags |= VMBUS_MSGHC_CTXF_DESTROY;
mtx_unlock(&mhc->mhc_free_lock);
wakeup(&mhc->mhc_free);
mh = vmbus_msghc_get1(mhc, 0);
if (mh == NULL)
panic("can't get msghc");
vmbus_msghc_free(mh);
vmbus_msghc_ctx_free(mhc);
inprm = vmbus_xact_req_data(mh->mh_xact);
return (inprm->hc_data);
}
int
vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
{
sbintime_t time = SBT_1MS;
struct hypercall_postmsg_in *inprm;
bus_addr_t inprm_paddr;
int i;
inprm = vmbus_xact_req_data(mh->mh_xact);
inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact);
/*
* Save the input parameter so that we could restore the input
* parameter if the Hypercall failed.
@ -345,7 +230,7 @@ vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
* Is this really necessary?! i.e. Will the Hypercall ever
* overwrite the input parameter?
*/
memcpy(&mh->mh_inprm_save, mh->mh_inprm, HYPERCALL_POSTMSGIN_SIZE);
memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE);
/*
* In order to cope with transient failures, e.g. insufficient
@ -357,7 +242,7 @@ vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
for (i = 0; i < HC_RETRY_MAX; ++i) {
uint64_t status;
status = hypercall_post_message(mh->mh_inprm_dma.hv_paddr);
status = hypercall_post_message(inprm_paddr);
if (status == HYPERCALL_STATUS_SUCCESS)
return 0;
@ -366,8 +251,7 @@ vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
time *= 2;
/* Restore input parameter and try again */
memcpy(mh->mh_inprm, &mh->mh_inprm_save,
HYPERCALL_POSTMSGIN_SIZE);
memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE);
}
#undef HC_RETRY_MAX
@ -376,62 +260,30 @@ vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
}
int
vmbus_msghc_exec(struct vmbus_softc *sc, struct vmbus_msghc *mh)
vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
{
struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc;
int error;
KASSERT(mh->mh_resp == NULL, ("hypercall msg has pending response"));
mtx_lock(&mhc->mhc_active_lock);
KASSERT(mhc->mhc_active == NULL, ("pending active msg hypercall"));
mhc->mhc_active = mh;
mtx_unlock(&mhc->mhc_active_lock);
vmbus_xact_activate(mh->mh_xact);
error = vmbus_msghc_exec_noresult(mh);
if (error) {
mtx_lock(&mhc->mhc_active_lock);
KASSERT(mhc->mhc_active == mh, ("msghc mismatch"));
mhc->mhc_active = NULL;
mtx_unlock(&mhc->mhc_active_lock);
}
if (error)
vmbus_xact_deactivate(mh->mh_xact);
return error;
}
const struct vmbus_message *
vmbus_msghc_wait_result(struct vmbus_softc *sc, struct vmbus_msghc *mh)
vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
{
struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc;
size_t resp_len;
mtx_lock(&mhc->mhc_active_lock);
KASSERT(mhc->mhc_active == mh, ("msghc mismatch"));
while (mh->mh_resp == NULL) {
mtx_sleep(&mhc->mhc_active, &mhc->mhc_active_lock, 0,
"wmsghc", 0);
}
mhc->mhc_active = NULL;
mtx_unlock(&mhc->mhc_active_lock);
return mh->mh_resp;
return (vmbus_xact_wait(mh->mh_xact, &resp_len));
}
void
vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg)
{
struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc;
struct vmbus_msghc *mh;
mtx_lock(&mhc->mhc_active_lock);
mh = mhc->mhc_active;
KASSERT(mh != NULL, ("no pending msg hypercall"));
memcpy(&mh->mh_resp0, msg, sizeof(mh->mh_resp0));
mh->mh_resp = &mh->mh_resp0;
mtx_unlock(&mhc->mhc_active_lock);
wakeup(&mhc->mhc_active);
vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg));
}
uint32_t
@ -1187,9 +1039,10 @@ vmbus_doattach(struct vmbus_softc *sc)
/*
* Create context for "post message" Hypercalls
*/
sc->vmbus_msg_hc = vmbus_msghc_ctx_create(
bus_get_dma_tag(sc->vmbus_dev));
if (sc->vmbus_msg_hc == NULL) {
sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev),
HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE,
sizeof(struct vmbus_msghc));
if (sc->vmbus_xc == NULL) {
ret = ENXIO;
goto cleanup;
}
@ -1244,9 +1097,9 @@ vmbus_doattach(struct vmbus_softc *sc)
cleanup:
vmbus_intr_teardown(sc);
vmbus_dma_free(sc);
if (sc->vmbus_msg_hc != NULL) {
vmbus_msghc_ctx_destroy(sc->vmbus_msg_hc);
sc->vmbus_msg_hc = NULL;
if (sc->vmbus_xc != NULL) {
vmbus_xact_ctx_destroy(sc->vmbus_xc);
sc->vmbus_xc = NULL;
}
free(sc->vmbus_chmap, M_DEVBUF);
mtx_destroy(&sc->vmbus_scan_lock);
@ -1305,9 +1158,9 @@ vmbus_detach(device_t dev)
vmbus_intr_teardown(sc);
vmbus_dma_free(sc);
if (sc->vmbus_msg_hc != NULL) {
vmbus_msghc_ctx_destroy(sc->vmbus_msg_hc);
sc->vmbus_msg_hc = NULL;
if (sc->vmbus_xc != NULL) {
vmbus_xact_ctx_destroy(sc->vmbus_xc);
sc->vmbus_xc = NULL;
}
free(sc->vmbus_chmap, M_DEVBUF);

View File

@ -86,7 +86,7 @@ struct vmbus_softc {
u_long *vmbus_rx_evtflags;
/* compat evtflgs from host */
struct vmbus_channel **vmbus_chmap;
struct vmbus_msghc_ctx *vmbus_msg_hc;
struct vmbus_xact_ctx *vmbus_xc;
struct vmbus_pcpu_data vmbus_pcpu[MAXCPU];
/*

View File

@ -0,0 +1,313 @@
/*-
* Copyright (c) 2016 Microsoft Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/systm.h>
#include <dev/hyperv/include/hyperv_busdma.h>
#include <dev/hyperv/include/vmbus_xact.h>
struct vmbus_xact {
struct vmbus_xact_ctx *x_ctx;
void *x_priv;
void *x_req;
struct hyperv_dma x_req_dma;
const void *x_resp;
size_t x_resp_len;
void *x_resp0;
};
struct vmbus_xact_ctx {
uint32_t xc_flags;
size_t xc_req_size;
size_t xc_resp_size;
size_t xc_priv_size;
struct vmbus_xact *xc_free;
struct mtx xc_free_lock;
struct vmbus_xact *xc_active;
struct mtx xc_active_lock;
};
#define VMBUS_XACT_CTXF_DESTROY 0x0001
static struct vmbus_xact *vmbus_xact_alloc(struct vmbus_xact_ctx *,
bus_dma_tag_t);
static void vmbus_xact_free(struct vmbus_xact *);
static struct vmbus_xact *vmbus_xact_get1(struct vmbus_xact_ctx *,
uint32_t);
static struct vmbus_xact *
vmbus_xact_alloc(struct vmbus_xact_ctx *ctx, bus_dma_tag_t parent_dtag)
{
struct vmbus_xact *xact;
xact = malloc(sizeof(*xact), M_DEVBUF, M_WAITOK | M_ZERO);
xact->x_ctx = ctx;
/* XXX assume that page aligned is enough */
xact->x_req = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
ctx->xc_req_size, &xact->x_req_dma, BUS_DMA_WAITOK);
if (xact->x_req == NULL) {
free(xact, M_DEVBUF);
return (NULL);
}
if (ctx->xc_priv_size != 0)
xact->x_priv = malloc(ctx->xc_priv_size, M_DEVBUF, M_WAITOK);
xact->x_resp0 = malloc(ctx->xc_resp_size, M_DEVBUF, M_WAITOK);
return (xact);
}
static void
vmbus_xact_free(struct vmbus_xact *xact)
{
hyperv_dmamem_free(&xact->x_req_dma, xact->x_req);
free(xact->x_resp0, M_DEVBUF);
if (xact->x_priv != NULL)
free(xact->x_priv, M_DEVBUF);
free(xact, M_DEVBUF);
}
static struct vmbus_xact *
vmbus_xact_get1(struct vmbus_xact_ctx *ctx, uint32_t dtor_flag)
{
struct vmbus_xact *xact;
mtx_lock(&ctx->xc_free_lock);
while ((ctx->xc_flags & dtor_flag) == 0 && ctx->xc_free == NULL)
mtx_sleep(&ctx->xc_free, &ctx->xc_free_lock, 0, "gxact", 0);
if (ctx->xc_flags & dtor_flag) {
/* Being destroyed */
xact = NULL;
} else {
xact = ctx->xc_free;
KASSERT(xact != NULL, ("no free xact"));
KASSERT(xact->x_resp == NULL, ("xact has pending response"));
ctx->xc_free = NULL;
}
mtx_unlock(&ctx->xc_free_lock);
return (xact);
}
struct vmbus_xact_ctx *
vmbus_xact_ctx_create(bus_dma_tag_t dtag, size_t req_size, size_t resp_size,
size_t priv_size)
{
struct vmbus_xact_ctx *ctx;
ctx = malloc(sizeof(*ctx), M_DEVBUF, M_WAITOK | M_ZERO);
ctx->xc_req_size = req_size;
ctx->xc_resp_size = resp_size;
ctx->xc_priv_size = priv_size;
ctx->xc_free = vmbus_xact_alloc(ctx, dtag);
if (ctx->xc_free == NULL) {
free(ctx, M_DEVBUF);
return (NULL);
}
mtx_init(&ctx->xc_free_lock, "vmbus xact free", NULL, MTX_DEF);
mtx_init(&ctx->xc_active_lock, "vmbus xact active", NULL, MTX_DEF);
return (ctx);
}
void
vmbus_xact_ctx_destroy(struct vmbus_xact_ctx *ctx)
{
struct vmbus_xact *xact;
mtx_lock(&ctx->xc_free_lock);
ctx->xc_flags |= VMBUS_XACT_CTXF_DESTROY;
mtx_unlock(&ctx->xc_free_lock);
wakeup(&ctx->xc_free);
xact = vmbus_xact_get1(ctx, 0);
if (xact == NULL)
panic("can't get xact");
vmbus_xact_free(xact);
mtx_destroy(&ctx->xc_free_lock);
mtx_destroy(&ctx->xc_active_lock);
free(ctx, M_DEVBUF);
}
struct vmbus_xact *
vmbus_xact_get(struct vmbus_xact_ctx *ctx, size_t req_len)
{
struct vmbus_xact *xact;
if (req_len > ctx->xc_req_size)
panic("invalid request size %zu", req_len);
xact = vmbus_xact_get1(ctx, VMBUS_XACT_CTXF_DESTROY);
if (xact == NULL)
return (NULL);
memset(xact->x_req, 0, req_len);
return (xact);
}
void
vmbus_xact_put(struct vmbus_xact *xact)
{
struct vmbus_xact_ctx *ctx = xact->x_ctx;
KASSERT(ctx->xc_active == NULL, ("pending active xact"));
xact->x_resp = NULL;
mtx_lock(&ctx->xc_free_lock);
KASSERT(ctx->xc_free == NULL, ("has free xact"));
ctx->xc_free = xact;
mtx_unlock(&ctx->xc_free_lock);
wakeup(&ctx->xc_free);
}
void *
vmbus_xact_req_data(const struct vmbus_xact *xact)
{
return (xact->x_req);
}
bus_addr_t
vmbus_xact_req_paddr(const struct vmbus_xact *xact)
{
return (xact->x_req_dma.hv_paddr);
}
void *
vmbus_xact_priv(const struct vmbus_xact *xact, size_t priv_len)
{
if (priv_len > xact->x_ctx->xc_priv_size)
panic("invalid priv size %zu", priv_len);
return (xact->x_priv);
}
void
vmbus_xact_activate(struct vmbus_xact *xact)
{
struct vmbus_xact_ctx *ctx = xact->x_ctx;
KASSERT(xact->x_resp == NULL, ("xact has pending response"));
mtx_lock(&ctx->xc_active_lock);
KASSERT(ctx->xc_active == NULL, ("pending active xact"));
ctx->xc_active = xact;
mtx_unlock(&ctx->xc_active_lock);
}
void
vmbus_xact_deactivate(struct vmbus_xact *xact)
{
struct vmbus_xact_ctx *ctx = xact->x_ctx;
mtx_lock(&ctx->xc_active_lock);
KASSERT(ctx->xc_active == xact, ("xact mismatch"));
ctx->xc_active = NULL;
mtx_unlock(&ctx->xc_active_lock);
}
const void *
vmbus_xact_wait(struct vmbus_xact *xact, size_t *resp_len)
{
struct vmbus_xact_ctx *ctx = xact->x_ctx;
const void *resp;
mtx_lock(&ctx->xc_active_lock);
KASSERT(ctx->xc_active == xact, ("xact mismatch"));
while (xact->x_resp == NULL) {
mtx_sleep(&ctx->xc_active, &ctx->xc_active_lock, 0,
"wxact", 0);
}
ctx->xc_active = NULL;
resp = xact->x_resp;
*resp_len = xact->x_resp_len;
mtx_unlock(&ctx->xc_active_lock);
return (resp);
}
static void
vmbus_xact_save_resp(struct vmbus_xact *xact, const void *data, size_t dlen)
{
struct vmbus_xact_ctx *ctx = xact->x_ctx;
size_t cplen = dlen;
mtx_assert(&ctx->xc_active_lock, MA_OWNED);
if (cplen > ctx->xc_resp_size) {
printf("vmbus: xact response truncated %zu -> %zu\n",
cplen, ctx->xc_resp_size);
cplen = ctx->xc_resp_size;
}
KASSERT(ctx->xc_active == xact, ("xact mismatch"));
memcpy(xact->x_resp0, data, cplen);
xact->x_resp_len = cplen;
xact->x_resp = xact->x_resp0;
}
void
vmbus_xact_wakeup(struct vmbus_xact *xact, const void *data, size_t dlen)
{
struct vmbus_xact_ctx *ctx = xact->x_ctx;
mtx_lock(&ctx->xc_active_lock);
vmbus_xact_save_resp(xact, data, dlen);
mtx_unlock(&ctx->xc_active_lock);
wakeup(&ctx->xc_active);
}
void
vmbus_xact_ctx_wakeup(struct vmbus_xact_ctx *ctx, const void *data, size_t dlen)
{
mtx_lock(&ctx->xc_active_lock);
KASSERT(ctx->xc_active != NULL, ("no pending xact"));
vmbus_xact_save_resp(ctx->xc_active, data, dlen);
mtx_unlock(&ctx->xc_active_lock);
wakeup(&ctx->xc_active);
}

View File

@ -230,18 +230,32 @@ static void vtnet_disable_interrupts(struct vtnet_softc *);
static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
/* Tunables. */
static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
static int vtnet_csum_disable = 0;
TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
&vtnet_csum_disable, 0, "Disables receive and send checksum offload");
static int vtnet_tso_disable = 0;
TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
0, "Disables TCP Segmentation Offload");
static int vtnet_lro_disable = 0;
TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
0, "Disables TCP Large Receive Offload");
static int vtnet_mq_disable = 0;
TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
static int vtnet_mq_max_pairs = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
0, "Disables Multi Queue support");
static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
&vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
static int vtnet_rx_process_limit = 512;
TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
&vtnet_rx_process_limit, 0,
"Limits the number RX segments processed in a single pass");
static uma_zone_t vtnet_tx_header_zone;
@ -597,7 +611,6 @@ static void
vtnet_setup_features(struct vtnet_softc *sc)
{
device_t dev;
int max_pairs, max;
dev = sc->vtnet_dev;
@ -646,32 +659,31 @@ vtnet_setup_features(struct vtnet_softc *sc)
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
max_pairs = virtio_read_dev_config_2(dev,
sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, max_virtqueue_pairs));
if (max_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
max_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
max_pairs = 1;
} else
max_pairs = 1;
sc->vtnet_max_vq_pairs = 1;
if (max_pairs > 1) {
if (sc->vtnet_max_vq_pairs > 1) {
/*
* Limit the maximum number of queue pairs to the number of
* CPUs or the configured maximum. The actual number of
* queues that get used may be less.
* Limit the maximum number of queue pairs to the lower of
* the number of CPUs and the configured maximum.
* The actual number of queues that get used may be less.
*/
max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
if (max > 0 && max_pairs > max)
max_pairs = max;
if (max_pairs > mp_ncpus)
max_pairs = mp_ncpus;
if (max_pairs > VTNET_MAX_QUEUE_PAIRS)
max_pairs = VTNET_MAX_QUEUE_PAIRS;
if (max_pairs > 1)
sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
}
int max;
sc->vtnet_max_vq_pairs = max_pairs;
max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
if (max > mp_ncpus)
max = mp_ncpus;
if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
if (max > 1) {
sc->vtnet_requested_vq_pairs = max;
sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
}
}
}
}
static int
@ -2982,13 +2994,11 @@ vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
dev = sc->vtnet_dev;
if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
MPASS(sc->vtnet_max_vq_pairs == 1);
sc->vtnet_act_vq_pairs = 1;
return;
}
/* BMV: Just use the maximum configured for now. */
npairs = sc->vtnet_max_vq_pairs;
npairs = sc->vtnet_requested_vq_pairs;
if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
device_printf(dev,
@ -3852,6 +3862,9 @@ vtnet_setup_sysctl(struct vtnet_softc *sc)
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
"Maximum number of supported virtqueue pairs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
"Requested number of virtqueue pairs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
"Number of active virtqueue pairs");

View File

@ -155,6 +155,7 @@ struct vtnet_softc {
int vtnet_if_flags;
int vtnet_act_vq_pairs;
int vtnet_max_vq_pairs;
int vtnet_requested_vq_pairs;
struct virtqueue *vtnet_ctrl_vq;
struct vtnet_mac_filter *vtnet_mac_filter;

View File

@ -329,6 +329,21 @@ autofs_mkdir(struct vop_mkdir_args *ap)
return (error);
}
static int
autofs_print(struct vop_print_args *ap)
{
struct vnode *vp;
struct autofs_node *anp;
vp = ap->a_vp;
anp = vp->v_data;
printf(" name \"%s\", fileno %d, cached %d, wildcards %d\n",
anp->an_name, anp->an_fileno, anp->an_cached, anp->an_wildcards);
return (0);
}
/*
* Write out a single 'struct dirent', based on 'name' and 'fileno' arguments.
*/
@ -529,6 +544,7 @@ struct vop_vector autofs_vnodeops = {
.vop_link = VOP_EOPNOTSUPP,
.vop_mkdir = autofs_mkdir,
.vop_mknod = VOP_EOPNOTSUPP,
.vop_print = autofs_print,
.vop_read = VOP_EOPNOTSUPP,
.vop_readdir = autofs_readdir,
.vop_remove = VOP_EOPNOTSUPP,

View File

@ -89,7 +89,7 @@ uint32_t nfscl_nfs4_done_probes[NFSV41_NPROCS + 1];
NFSSTATESPINLOCK;
NFSREQSPINLOCK;
NFSDLOCKMUTEX;
extern struct nfsstats newnfsstats;
extern struct nfsstatsv1 nfsstatsv1;
extern struct nfsreqhead nfsd_reqq;
extern int nfscl_ticks;
extern void (*ncl_call_invalcaches)(struct vnode *);
@ -642,7 +642,7 @@ newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
procnum = NFSV4PROC_COMPOUND;
if (nmp != NULL) {
NFSINCRGLOBAL(newnfsstats.rpcrequests);
NFSINCRGLOBAL(nfsstatsv1.rpcrequests);
/* Map the procnum to the old NFSv2 one, as required. */
if ((nd->nd_flag & ND_NFSV2) != 0) {
@ -762,18 +762,18 @@ newnfs_request(struct nfsrv_descript *nd, struct nfsmount *nmp,
if (stat == RPC_SUCCESS) {
error = 0;
} else if (stat == RPC_TIMEDOUT) {
NFSINCRGLOBAL(newnfsstats.rpctimeouts);
NFSINCRGLOBAL(nfsstatsv1.rpctimeouts);
error = ETIMEDOUT;
} else if (stat == RPC_VERSMISMATCH) {
NFSINCRGLOBAL(newnfsstats.rpcinvalid);
NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
error = EOPNOTSUPP;
} else if (stat == RPC_PROGVERSMISMATCH) {
NFSINCRGLOBAL(newnfsstats.rpcinvalid);
NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
error = EPROTONOSUPPORT;
} else if (stat == RPC_INTR) {
error = EINTR;
} else {
NFSINCRGLOBAL(newnfsstats.rpcinvalid);
NFSINCRGLOBAL(nfsstatsv1.rpcinvalid);
error = EACCES;
}
if (error) {

View File

@ -58,7 +58,7 @@ extern void (*nfsd_call_recall)(struct vnode *, int, struct ucred *,
extern int nfsrv_useacl;
struct mount nfsv4root_mnt;
int newnfs_numnfsd = 0;
struct nfsstats newnfsstats;
struct nfsstatsv1 nfsstatsv1;
int nfs_numnfscbd = 0;
int nfscl_debuglevel = 0;
char nfsv4_callbackaddr[INET6_ADDRSTRLEN];
@ -69,6 +69,7 @@ void (*ncl_call_invalcaches)(struct vnode *) = NULL;
static int nfs_realign_test;
static int nfs_realign_count;
static struct ext_nfsstats oldnfsstats;
SYSCTL_NODE(_vfs, OID_AUTO, nfs, CTLFLAG_RW, 0, "NFS filesystem");
SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test,
@ -446,9 +447,12 @@ nfssvc_nfscommon(struct thread *td, struct nfssvc_args *uap)
static int
nfssvc_call(struct thread *p, struct nfssvc_args *uap, struct ucred *cred)
{
int error = EINVAL;
int error = EINVAL, i, j;
struct nfsd_idargs nid;
struct nfsd_oidargs onid;
struct {
int vers; /* Just the first field of nfsstats. */
} nfsstatver;
if (uap->flag & NFSSVC_IDNAME) {
if ((uap->flag & NFSSVC_NEWSTRUCT) != 0)
@ -472,63 +476,157 @@ nfssvc_call(struct thread *p, struct nfssvc_args *uap, struct ucred *cred)
error = nfssvc_idname(&nid);
goto out;
} else if (uap->flag & NFSSVC_GETSTATS) {
error = copyout(&newnfsstats,
CAST_USER_ADDR_T(uap->argp), sizeof (newnfsstats));
if ((uap->flag & NFSSVC_NEWSTRUCT) == 0) {
/* Copy fields to the old ext_nfsstat structure. */
oldnfsstats.attrcache_hits =
nfsstatsv1.attrcache_hits;
oldnfsstats.attrcache_misses =
nfsstatsv1.attrcache_misses;
oldnfsstats.lookupcache_hits =
nfsstatsv1.lookupcache_hits;
oldnfsstats.lookupcache_misses =
nfsstatsv1.lookupcache_misses;
oldnfsstats.direofcache_hits =
nfsstatsv1.direofcache_hits;
oldnfsstats.direofcache_misses =
nfsstatsv1.direofcache_misses;
oldnfsstats.accesscache_hits =
nfsstatsv1.accesscache_hits;
oldnfsstats.accesscache_misses =
nfsstatsv1.accesscache_misses;
oldnfsstats.biocache_reads =
nfsstatsv1.biocache_reads;
oldnfsstats.read_bios =
nfsstatsv1.read_bios;
oldnfsstats.read_physios =
nfsstatsv1.read_physios;
oldnfsstats.biocache_writes =
nfsstatsv1.biocache_writes;
oldnfsstats.write_bios =
nfsstatsv1.write_bios;
oldnfsstats.write_physios =
nfsstatsv1.write_physios;
oldnfsstats.biocache_readlinks =
nfsstatsv1.biocache_readlinks;
oldnfsstats.readlink_bios =
nfsstatsv1.readlink_bios;
oldnfsstats.biocache_readdirs =
nfsstatsv1.biocache_readdirs;
oldnfsstats.readdir_bios =
nfsstatsv1.readdir_bios;
for (i = 0; i < NFSV4_NPROCS; i++)
oldnfsstats.rpccnt[i] = nfsstatsv1.rpccnt[i];
oldnfsstats.rpcretries = nfsstatsv1.rpcretries;
for (i = 0; i < NFSV4OP_NOPS; i++)
oldnfsstats.srvrpccnt[i] =
nfsstatsv1.srvrpccnt[i];
for (i = NFSV42_NOPS, j = NFSV4OP_NOPS;
i < NFSV42_NOPS + NFSV4OP_FAKENOPS; i++, j++)
oldnfsstats.srvrpccnt[j] =
nfsstatsv1.srvrpccnt[i];
oldnfsstats.srvrpc_errs = nfsstatsv1.srvrpc_errs;
oldnfsstats.srv_errs = nfsstatsv1.srv_errs;
oldnfsstats.rpcrequests = nfsstatsv1.rpcrequests;
oldnfsstats.rpctimeouts = nfsstatsv1.rpctimeouts;
oldnfsstats.rpcunexpected = nfsstatsv1.rpcunexpected;
oldnfsstats.rpcinvalid = nfsstatsv1.rpcinvalid;
oldnfsstats.srvcache_inproghits =
nfsstatsv1.srvcache_inproghits;
oldnfsstats.srvcache_idemdonehits =
nfsstatsv1.srvcache_idemdonehits;
oldnfsstats.srvcache_nonidemdonehits =
nfsstatsv1.srvcache_nonidemdonehits;
oldnfsstats.srvcache_misses =
nfsstatsv1.srvcache_misses;
oldnfsstats.srvcache_tcppeak =
nfsstatsv1.srvcache_tcppeak;
oldnfsstats.srvcache_size = nfsstatsv1.srvcache_size;
oldnfsstats.srvclients = nfsstatsv1.srvclients;
oldnfsstats.srvopenowners = nfsstatsv1.srvopenowners;
oldnfsstats.srvopens = nfsstatsv1.srvopens;
oldnfsstats.srvlockowners = nfsstatsv1.srvlockowners;
oldnfsstats.srvlocks = nfsstatsv1.srvlocks;
oldnfsstats.srvdelegates = nfsstatsv1.srvdelegates;
for (i = 0; i < NFSV4OP_CBNOPS; i++)
oldnfsstats.cbrpccnt[i] =
nfsstatsv1.cbrpccnt[i];
oldnfsstats.clopenowners = nfsstatsv1.clopenowners;
oldnfsstats.clopens = nfsstatsv1.clopens;
oldnfsstats.cllockowners = nfsstatsv1.cllockowners;
oldnfsstats.cllocks = nfsstatsv1.cllocks;
oldnfsstats.cldelegates = nfsstatsv1.cldelegates;
oldnfsstats.cllocalopenowners =
nfsstatsv1.cllocalopenowners;
oldnfsstats.cllocalopens = nfsstatsv1.cllocalopens;
oldnfsstats.cllocallockowners =
nfsstatsv1.cllocallockowners;
oldnfsstats.cllocallocks = nfsstatsv1.cllocallocks;
error = copyout(&oldnfsstats, uap->argp,
sizeof (oldnfsstats));
} else {
error = copyin(uap->argp, &nfsstatver,
sizeof(nfsstatver));
if (error == 0 && nfsstatver.vers != NFSSTATS_V1)
error = EPERM;
if (error == 0)
error = copyout(&nfsstatsv1, uap->argp,
sizeof (nfsstatsv1));
}
if (error == 0) {
if ((uap->flag & NFSSVC_ZEROCLTSTATS) != 0) {
newnfsstats.attrcache_hits = 0;
newnfsstats.attrcache_misses = 0;
newnfsstats.lookupcache_hits = 0;
newnfsstats.lookupcache_misses = 0;
newnfsstats.direofcache_hits = 0;
newnfsstats.direofcache_misses = 0;
newnfsstats.accesscache_hits = 0;
newnfsstats.accesscache_misses = 0;
newnfsstats.biocache_reads = 0;
newnfsstats.read_bios = 0;
newnfsstats.read_physios = 0;
newnfsstats.biocache_writes = 0;
newnfsstats.write_bios = 0;
newnfsstats.write_physios = 0;
newnfsstats.biocache_readlinks = 0;
newnfsstats.readlink_bios = 0;
newnfsstats.biocache_readdirs = 0;
newnfsstats.readdir_bios = 0;
newnfsstats.rpcretries = 0;
newnfsstats.rpcrequests = 0;
newnfsstats.rpctimeouts = 0;
newnfsstats.rpcunexpected = 0;
newnfsstats.rpcinvalid = 0;
bzero(newnfsstats.rpccnt,
sizeof(newnfsstats.rpccnt));
nfsstatsv1.attrcache_hits = 0;
nfsstatsv1.attrcache_misses = 0;
nfsstatsv1.lookupcache_hits = 0;
nfsstatsv1.lookupcache_misses = 0;
nfsstatsv1.direofcache_hits = 0;
nfsstatsv1.direofcache_misses = 0;
nfsstatsv1.accesscache_hits = 0;
nfsstatsv1.accesscache_misses = 0;
nfsstatsv1.biocache_reads = 0;
nfsstatsv1.read_bios = 0;
nfsstatsv1.read_physios = 0;
nfsstatsv1.biocache_writes = 0;
nfsstatsv1.write_bios = 0;
nfsstatsv1.write_physios = 0;
nfsstatsv1.biocache_readlinks = 0;
nfsstatsv1.readlink_bios = 0;
nfsstatsv1.biocache_readdirs = 0;
nfsstatsv1.readdir_bios = 0;
nfsstatsv1.rpcretries = 0;
nfsstatsv1.rpcrequests = 0;
nfsstatsv1.rpctimeouts = 0;
nfsstatsv1.rpcunexpected = 0;
nfsstatsv1.rpcinvalid = 0;
bzero(nfsstatsv1.rpccnt,
sizeof(nfsstatsv1.rpccnt));
}
if ((uap->flag & NFSSVC_ZEROSRVSTATS) != 0) {
newnfsstats.srvrpc_errs = 0;
newnfsstats.srv_errs = 0;
newnfsstats.srvcache_inproghits = 0;
newnfsstats.srvcache_idemdonehits = 0;
newnfsstats.srvcache_nonidemdonehits = 0;
newnfsstats.srvcache_misses = 0;
newnfsstats.srvcache_tcppeak = 0;
newnfsstats.srvclients = 0;
newnfsstats.srvopenowners = 0;
newnfsstats.srvopens = 0;
newnfsstats.srvlockowners = 0;
newnfsstats.srvlocks = 0;
newnfsstats.srvdelegates = 0;
newnfsstats.clopenowners = 0;
newnfsstats.clopens = 0;
newnfsstats.cllockowners = 0;
newnfsstats.cllocks = 0;
newnfsstats.cldelegates = 0;
newnfsstats.cllocalopenowners = 0;
newnfsstats.cllocalopens = 0;
newnfsstats.cllocallockowners = 0;
newnfsstats.cllocallocks = 0;
bzero(newnfsstats.srvrpccnt,
sizeof(newnfsstats.srvrpccnt));
bzero(newnfsstats.cbrpccnt,
sizeof(newnfsstats.cbrpccnt));
nfsstatsv1.srvrpc_errs = 0;
nfsstatsv1.srv_errs = 0;
nfsstatsv1.srvcache_inproghits = 0;
nfsstatsv1.srvcache_idemdonehits = 0;
nfsstatsv1.srvcache_nonidemdonehits = 0;
nfsstatsv1.srvcache_misses = 0;
nfsstatsv1.srvcache_tcppeak = 0;
nfsstatsv1.srvclients = 0;
nfsstatsv1.srvopenowners = 0;
nfsstatsv1.srvopens = 0;
nfsstatsv1.srvlockowners = 0;
nfsstatsv1.srvlocks = 0;
nfsstatsv1.srvdelegates = 0;
nfsstatsv1.clopenowners = 0;
nfsstatsv1.clopens = 0;
nfsstatsv1.cllockowners = 0;
nfsstatsv1.cllocks = 0;
nfsstatsv1.cldelegates = 0;
nfsstatsv1.cllocalopenowners = 0;
nfsstatsv1.cllocalopens = 0;
nfsstatsv1.cllocallockowners = 0;
nfsstatsv1.cllocallocks = 0;
bzero(nfsstatsv1.srvrpccnt,
sizeof(nfsstatsv1.srvrpccnt));
bzero(nfsstatsv1.cbrpccnt,
sizeof(nfsstatsv1.cbrpccnt));
}
}
goto out;

View File

@ -55,6 +55,7 @@
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mount.h>
#include <sys/mutex.h>
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/protosw.h>
@ -254,24 +255,26 @@
/*
* Must be one more than last op#.
* NFSv4.2 isn't implemented yet, but define the op# limit for it.
*/
#define NFSV41_NOPS 59
#define NFSV42_NOPS 72
/* Quirky case if the illegal op code */
#define NFSV4OP_OPILLEGAL 10044
/*
* Fake NFSV4OP_xxx used for nfsstat. Start at NFSV4OP_NOPS.
* Fake NFSV4OP_xxx used for nfsstat. Start at NFSV42_NOPS.
*/
#define NFSV4OP_SYMLINK (NFSV4OP_NOPS)
#define NFSV4OP_MKDIR (NFSV4OP_NOPS + 1)
#define NFSV4OP_RMDIR (NFSV4OP_NOPS + 2)
#define NFSV4OP_READDIRPLUS (NFSV4OP_NOPS + 3)
#define NFSV4OP_MKNOD (NFSV4OP_NOPS + 4)
#define NFSV4OP_FSSTAT (NFSV4OP_NOPS + 5)
#define NFSV4OP_FSINFO (NFSV4OP_NOPS + 6)
#define NFSV4OP_PATHCONF (NFSV4OP_NOPS + 7)
#define NFSV4OP_V3CREATE (NFSV4OP_NOPS + 8)
#define NFSV4OP_SYMLINK (NFSV42_NOPS)
#define NFSV4OP_MKDIR (NFSV42_NOPS + 1)
#define NFSV4OP_RMDIR (NFSV42_NOPS + 2)
#define NFSV4OP_READDIRPLUS (NFSV42_NOPS + 3)
#define NFSV4OP_MKNOD (NFSV42_NOPS + 4)
#define NFSV4OP_FSSTAT (NFSV42_NOPS + 5)
#define NFSV4OP_FSINFO (NFSV42_NOPS + 6)
#define NFSV4OP_PATHCONF (NFSV42_NOPS + 7)
#define NFSV4OP_V3CREATE (NFSV42_NOPS + 8)
/*
* This is the count of the fake operations listed above.
@ -285,12 +288,12 @@
#define NFSV4OP_CBRECALL 4
/*
* Must be one greater than the last Callback Operation#.
* Must be one greater than the last Callback Operation# for NFSv4.0.
*/
#define NFSV4OP_CBNOPS 5
/*
* Additional Callback Ops for NFSv4.1 only. Not yet in nfsstats.
* Additional Callback Ops for NFSv4.1 only.
*/
#define NFSV4OP_CBLAYOUTRECALL 5
#define NFSV4OP_CBNOTIFY 6
@ -303,6 +306,9 @@
#define NFSV4OP_CBNOTIFYLOCK 13
#define NFSV4OP_CBNOTIFYDEVID 14
#define NFSV41_CBNOPS 15
#define NFSV42_CBNOPS 16
/*
* The lower numbers -> 21 are used by NFSv2 and v3. These define higher
* numbers used by NFSv4.
@ -360,7 +366,72 @@
#endif /* NFS_V3NPROCS */
/*
* Stats structure
* New stats structure.
* The vers field will be set to NFSSTATS_V1 by the caller.
*/
#define NFSSTATS_V1 1
struct nfsstatsv1 {
int vers; /* Set to version requested by caller. */
uint64_t attrcache_hits;
uint64_t attrcache_misses;
uint64_t lookupcache_hits;
uint64_t lookupcache_misses;
uint64_t direofcache_hits;
uint64_t direofcache_misses;
uint64_t accesscache_hits;
uint64_t accesscache_misses;
uint64_t biocache_reads;
uint64_t read_bios;
uint64_t read_physios;
uint64_t biocache_writes;
uint64_t write_bios;
uint64_t write_physios;
uint64_t biocache_readlinks;
uint64_t readlink_bios;
uint64_t biocache_readdirs;
uint64_t readdir_bios;
uint64_t rpccnt[NFSV41_NPROCS + 15];
uint64_t rpcretries;
uint64_t srvrpccnt[NFSV42_NOPS + NFSV4OP_FAKENOPS];
uint64_t srvrpc_errs;
uint64_t srv_errs;
uint64_t rpcrequests;
uint64_t rpctimeouts;
uint64_t rpcunexpected;
uint64_t rpcinvalid;
uint64_t srvcache_inproghits;
uint64_t srvcache_idemdonehits;
uint64_t srvcache_nonidemdonehits;
uint64_t srvcache_misses;
uint64_t srvcache_tcppeak;
int srvcache_size; /* Updated by atomic_xx_int(). */
uint64_t srvclients;
uint64_t srvopenowners;
uint64_t srvopens;
uint64_t srvlockowners;
uint64_t srvlocks;
uint64_t srvdelegates;
uint64_t cbrpccnt[NFSV42_CBNOPS];
uint64_t clopenowners;
uint64_t clopens;
uint64_t cllockowners;
uint64_t cllocks;
uint64_t cldelegates;
uint64_t cllocalopenowners;
uint64_t cllocalopens;
uint64_t cllocallockowners;
uint64_t cllocallocks;
uint64_t srvstartcnt;
uint64_t srvdonecnt;
uint64_t srvbytes[NFSV42_NOPS + NFSV4OP_FAKENOPS];
uint64_t srvops[NFSV42_NOPS + NFSV4OP_FAKENOPS];
struct bintime srvduration[NFSV42_NOPS + NFSV4OP_FAKENOPS];
struct bintime busyfrom;
struct bintime busytime;
};
/*
* Old stats structure.
*/
struct ext_nfsstats {
int attrcache_hits;
@ -415,11 +486,6 @@ struct ext_nfsstats {
};
#ifdef _KERNEL
/*
* Define the ext_nfsstats as nfsstats for the kernel code.
*/
#define nfsstats ext_nfsstats
/*
* Define NFS_NPROCS as NFSV4_NPROCS for the experimental kernel code.
*/

View File

@ -345,10 +345,10 @@
/*
* NFSPROC_NOOP is a fake op# that can't be the same as any V2/3/4 Procedure
* or Operation#. Since the NFS V4 Op #s go higher, use NFSV41_NOPS, which
* or Operation#. Since the NFS V4 Op #s go higher, use NFSV42_NOPS, which
* is one greater than the highest Op#.
*/
#define NFSPROC_NOOP NFSV41_NOPS
#define NFSPROC_NOOP NFSV42_NOPS
/* Actual Version 2 procedure numbers */
#define NFSV2PROC_NULL 0

View File

@ -60,7 +60,7 @@ __FBSDID("$FreeBSD$");
#include <fs/nfsclient/nfs_kdtrace.h>
extern int newnfs_directio_allow_mmap;
extern struct nfsstats newnfsstats;
extern struct nfsstatsv1 nfsstatsv1;
extern struct mtx ncl_iod_mutex;
extern int ncl_numasync;
extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
@ -466,7 +466,7 @@ ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
switch (vp->v_type) {
case VREG:
NFSINCRGLOBAL(newnfsstats.biocache_reads);
NFSINCRGLOBAL(nfsstatsv1.biocache_reads);
lbn = uio->uio_offset / biosize;
on = uio->uio_offset - (lbn * biosize);
@ -543,7 +543,7 @@ ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
n = MIN((unsigned)(bcount - on), uio->uio_resid);
break;
case VLNK:
NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
NFSINCRGLOBAL(nfsstatsv1.biocache_readlinks);
bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
if (!bp) {
error = newnfs_sigintr(nmp, td);
@ -563,7 +563,7 @@ ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
on = 0;
break;
case VDIR:
NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
NFSINCRGLOBAL(nfsstatsv1.biocache_readdirs);
if (np->n_direofoffset
&& uio->uio_offset >= np->n_direofoffset) {
return (0);
@ -992,7 +992,7 @@ ncl_write(struct vop_write_args *ap)
}
}
NFSINCRGLOBAL(newnfsstats.biocache_writes);
NFSINCRGLOBAL(nfsstatsv1.biocache_writes);
lbn = uio->uio_offset / biosize;
on = uio->uio_offset - (lbn * biosize);
n = MIN((unsigned)(biosize - on), uio->uio_resid);
@ -1606,7 +1606,7 @@ ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
switch (vp->v_type) {
case VREG:
uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
NFSINCRGLOBAL(newnfsstats.read_bios);
NFSINCRGLOBAL(nfsstatsv1.read_bios);
error = ncl_readrpc(vp, uiop, cr);
if (!error) {
@ -1641,11 +1641,11 @@ ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
break;
case VLNK:
uiop->uio_offset = (off_t)0;
NFSINCRGLOBAL(newnfsstats.readlink_bios);
NFSINCRGLOBAL(nfsstatsv1.readlink_bios);
error = ncl_readlinkrpc(vp, uiop, cr);
break;
case VDIR:
NFSINCRGLOBAL(newnfsstats.readdir_bios);
NFSINCRGLOBAL(nfsstatsv1.readdir_bios);
uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
error = ncl_readdirplusrpc(vp, uiop, cr, td);
@ -1707,7 +1707,7 @@ ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
+ bp->b_dirtyoff;
io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
uiop->uio_rw = UIO_WRITE;
NFSINCRGLOBAL(newnfsstats.write_bios);
NFSINCRGLOBAL(nfsstatsv1.write_bios);
if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
iomode = NFSWRITE_UNSTABLE;

View File

@ -42,7 +42,7 @@ __FBSDID("$FreeBSD$");
#ifndef APPLEKEXT
#include <fs/nfs/nfsport.h>
extern struct nfsstats newnfsstats;
extern struct nfsstatsv1 nfsstatsv1;
extern struct nfsv4_opflag nfsv4_opflag[NFSV41_NOPS];
extern int ncl_mbuf_mlen;
extern enum vtype newnv2tov_type[8];
@ -241,8 +241,8 @@ nfscl_reqstart(struct nfsrv_descript *nd, int procnum, struct nfsmount *nmp,
} else {
(void) nfsm_fhtom(nd, nfhp, fhlen, 0);
}
if (procnum < NFSV4_NPROCS)
NFSINCRGLOBAL(newnfsstats.rpccnt[procnum]);
if (procnum < NFSV41_NPROCS)
NFSINCRGLOBAL(nfsstatsv1.rpccnt[procnum]);
}
#ifndef APPLE

View File

@ -84,7 +84,7 @@ __FBSDID("$FreeBSD$");
/*
* Global variables
*/
extern struct nfsstats newnfsstats;
extern struct nfsstatsv1 nfsstatsv1;
extern struct nfsreqhead nfsd_reqq;
extern u_int32_t newnfs_false, newnfs_true;
extern int nfscl_debuglevel;
@ -343,10 +343,10 @@ nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
nowp->nfsow_defunct = 0;
nfscl_lockinit(&nowp->nfsow_rwlock);
if (dp != NULL) {
newnfsstats.cllocalopenowners++;
nfsstatsv1.cllocalopenowners++;
LIST_INSERT_HEAD(&dp->nfsdl_owner, nowp, nfsow_list);
} else {
newnfsstats.clopenowners++;
nfsstatsv1.clopenowners++;
LIST_INSERT_HEAD(&clp->nfsc_owner, nowp, nfsow_list);
}
owp = *owpp = nowp;
@ -380,9 +380,9 @@ nfscl_newopen(struct nfsclclient *clp, struct nfscldeleg *dp,
TAILQ_INSERT_HEAD(&clp->nfsc_deleg, dp,
nfsdl_list);
dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
newnfsstats.cllocalopens++;
nfsstatsv1.cllocalopens++;
} else {
newnfsstats.clopens++;
nfsstatsv1.clopens++;
}
LIST_INSERT_HEAD(&owp->nfsow_open, nop, nfso_list);
*opp = nop;
@ -430,7 +430,7 @@ nfscl_deleg(mount_t mp, struct nfsclclient *clp, u_int8_t *nfhp,
LIST_INSERT_HEAD(NFSCLDELEGHASH(clp, nfhp, fhlen), dp,
nfsdl_hash);
dp->nfsdl_timestamp = NFSD_MONOSEC + 120;
newnfsstats.cldelegates++;
nfsstatsv1.cldelegates++;
nfscl_delegcnt++;
} else {
/*
@ -1071,10 +1071,10 @@ nfscl_getbytelock(vnode_t vp, u_int64_t off, u_int64_t len,
LIST_INIT(&nlp->nfsl_lock);
if (donelocally) {
nlp->nfsl_open = NULL;
newnfsstats.cllocallockowners++;
nfsstatsv1.cllocallockowners++;
} else {
nlp->nfsl_open = op;
newnfsstats.cllockowners++;
nfsstatsv1.cllockowners++;
}
LIST_INSERT_HEAD(lhp, nlp, nfsl_list);
lp = nlp;
@ -1402,9 +1402,9 @@ nfscl_freeopen(struct nfsclopen *op, int local)
nfscl_freealllocks(&op->nfso_lock, local);
FREE((caddr_t)op, M_NFSCLOPEN);
if (local)
newnfsstats.cllocalopens--;
nfsstatsv1.cllocalopens--;
else
newnfsstats.clopens--;
nfsstatsv1.clopens--;
}
/*
@ -1483,9 +1483,9 @@ nfscl_freeopenowner(struct nfsclowner *owp, int local)
LIST_REMOVE(owp, nfsow_list);
FREE((caddr_t)owp, M_NFSCLOWNER);
if (local)
newnfsstats.cllocalopenowners--;
nfsstatsv1.cllocalopenowners--;
else
newnfsstats.clopenowners--;
nfsstatsv1.clopenowners--;
}
/*
@ -1502,9 +1502,9 @@ nfscl_freelockowner(struct nfscllockowner *lp, int local)
}
FREE((caddr_t)lp, M_NFSCLLOCKOWNER);
if (local)
newnfsstats.cllocallockowners--;
nfsstatsv1.cllocallockowners--;
else
newnfsstats.cllockowners--;
nfsstatsv1.cllockowners--;
}
/*
@ -1517,9 +1517,9 @@ nfscl_freelock(struct nfscllock *lop, int local)
LIST_REMOVE(lop, nfslo_list);
FREE((caddr_t)lop, M_NFSCLLOCK);
if (local)
newnfsstats.cllocallocks--;
nfsstatsv1.cllocallocks--;
else
newnfsstats.cllocks--;
nfsstatsv1.cllocks--;
}
/*
@ -1553,7 +1553,7 @@ nfscl_freedeleg(struct nfscldeleghead *hdp, struct nfscldeleg *dp)
TAILQ_REMOVE(hdp, dp, nfsdl_list);
LIST_REMOVE(dp, nfsdl_hash);
FREE((caddr_t)dp, M_NFSCLDELEG);
newnfsstats.cldelegates--;
nfsstatsv1.cldelegates--;
nfscl_delegcnt--;
}
@ -1621,18 +1621,18 @@ nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
LIST_REMOVE(op, nfso_list);
op->nfso_own = towp;
LIST_INSERT_HEAD(&towp->nfsow_open, op, nfso_list);
newnfsstats.cllocalopens--;
newnfsstats.clopens++;
nfsstatsv1.cllocalopens--;
nfsstatsv1.clopens++;
}
} else {
/* Just add the openowner to the client list */
LIST_REMOVE(owp, nfsow_list);
owp->nfsow_clp = clp;
LIST_INSERT_HEAD(&clp->nfsc_owner, owp, nfsow_list);
newnfsstats.cllocalopenowners--;
newnfsstats.clopenowners++;
newnfsstats.cllocalopens--;
newnfsstats.clopens++;
nfsstatsv1.cllocalopenowners--;
nfsstatsv1.clopenowners++;
nfsstatsv1.cllocalopens--;
nfsstatsv1.clopens++;
}
}
owp = nowp;
@ -2282,9 +2282,9 @@ nfscl_insertlock(struct nfscllockowner *lp, struct nfscllock *new_lop,
else
LIST_INSERT_AFTER(insert_lop, new_lop, nfslo_list);
if (local)
newnfsstats.cllocallocks++;
nfsstatsv1.cllocallocks++;
else
newnfsstats.cllocks++;
nfsstatsv1.cllocks++;
}
/*
@ -2571,7 +2571,7 @@ nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
LIST_REMOVE(dp, nfsdl_hash);
TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
nfscl_delegcnt--;
newnfsstats.cldelegates--;
nfsstatsv1.cldelegates--;
}
NFSLOCKCLSTATE();
}
@ -2612,7 +2612,7 @@ nfscl_renewthread(struct nfsclclient *clp, NFSPROC_T *p)
LIST_REMOVE(dp, nfsdl_hash);
TAILQ_INSERT_HEAD(&dh, dp, nfsdl_list);
nfscl_delegcnt--;
newnfsstats.cldelegates--;
nfsstatsv1.cldelegates--;
}
}
dp = ndp;
@ -3215,8 +3215,8 @@ nfscl_docb(struct nfsrv_descript *nd, NFSPROC_T *p)
break;
}
nd->nd_procnum = op;
if (op < NFSV4OP_CBNOPS)
newnfsstats.cbrpccnt[nd->nd_procnum]++;
if (op < NFSV41_CBNOPS)
nfsstatsv1.cbrpccnt[nd->nd_procnum]++;
switch (op) {
case NFSV4OP_CBGETATTR:
NFSCL_DEBUG(4, "cbgetattr\n");

View File

@ -83,7 +83,7 @@ extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
extern int ncl_numasync;
extern unsigned int ncl_iodmax;
extern struct nfsstats newnfsstats;
extern struct nfsstatsv1 nfsstatsv1;
struct task ncl_nfsiodnew_task;
@ -219,12 +219,12 @@ ncl_getattrcache(struct vnode *vp, struct vattr *vaper)
if ((time_second - np->n_attrstamp) >= timeo &&
(mustflush != 0 || np->n_attrstamp == 0)) {
newnfsstats.attrcache_misses++;
nfsstatsv1.attrcache_misses++;
mtx_unlock(&np->n_mtx);
KDTRACE_NFS_ATTRCACHE_GET_MISS(vp);
return( ENOENT);
}
newnfsstats.attrcache_hits++;
nfsstatsv1.attrcache_hits++;
if (vap->va_size != np->n_size) {
if (vap->va_type == VREG) {
if (np->n_flag & NMODIFIED) {

View File

@ -78,7 +78,6 @@ FEATURE(nfscl, "NFSv4 client");
extern int nfscl_ticks;
extern struct timeval nfsboottime;
extern struct nfsstats newnfsstats;
extern int nfsrv_useacl;
extern int nfscl_debuglevel;
extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];

View File

@ -100,7 +100,7 @@ uint32_t nfscl_accesscache_load_done_id;
#define TRUE 1
#define FALSE 0
extern struct nfsstats newnfsstats;
extern struct nfsstatsv1 nfsstatsv1;
extern int nfsrv_useacl;
extern int nfscl_debuglevel;
MALLOC_DECLARE(M_NEWNFSREQ);
@ -258,14 +258,6 @@ int newnfs_directio_allow_mmap = 1;
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs_directio_allow_mmap, CTLFLAG_RW,
&newnfs_directio_allow_mmap, 0, "Enable mmaped IO on file with O_DIRECT opens");
#if 0
SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_hits, CTLFLAG_RD,
&newnfsstats.accesscache_hits, 0, "NFS ACCESS cache hit count");
SYSCTL_INT(_vfs_nfs, OID_AUTO, access_cache_misses, CTLFLAG_RD,
&newnfsstats.accesscache_misses, 0, "NFS ACCESS cache miss count");
#endif
#define NFSACCESS_ALL (NFSACCESS_READ | NFSACCESS_MODIFY \
| NFSACCESS_EXTEND | NFSACCESS_EXECUTE \
| NFSACCESS_DELETE | NFSACCESS_LOOKUP)
@ -418,7 +410,7 @@ nfs_access(struct vop_access_args *ap)
if (time_second < (np->n_accesscache[i].stamp
+ nfsaccess_cache_timeout) &&
(np->n_accesscache[i].mode & mode) == mode) {
NFSINCRGLOBAL(newnfsstats.accesscache_hits);
NFSINCRGLOBAL(nfsstatsv1.accesscache_hits);
gotahit = 1;
}
break;
@ -437,7 +429,7 @@ nfs_access(struct vop_access_args *ap)
/*
* Either a no, or a don't know. Go to the wire.
*/
NFSINCRGLOBAL(newnfsstats.accesscache_misses);
NFSINCRGLOBAL(nfsstatsv1.accesscache_misses);
error = nfs34_access_otw(vp, wmode, ap->a_td,
ap->a_cred, &rmode);
if (!error &&
@ -857,7 +849,7 @@ nfs_getattr(struct vop_getattr_args *ap)
if (NFS_ISV34(vp) && nfs_prime_access_cache &&
nfsaccess_cache_timeout > 0) {
NFSINCRGLOBAL(newnfsstats.accesscache_misses);
NFSINCRGLOBAL(nfsstatsv1.accesscache_misses);
nfs34_access_otw(vp, NFSACCESS_ALL, td, ap->a_cred, NULL);
if (ncl_getattrcache(vp, ap->a_vap) == 0) {
nfscl_deleggetmodtime(vp, &ap->a_vap->va_mtime);
@ -1114,7 +1106,7 @@ nfs_lookup(struct vop_lookup_args *ap)
((u_int)(ticks - ncticks) < (nmp->nm_nametimeo * hz) &&
VOP_GETATTR(newvp, &vattr, cnp->cn_cred) == 0 &&
timespeccmp(&vattr.va_ctime, &nctime, ==))) {
NFSINCRGLOBAL(newnfsstats.lookupcache_hits);
NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits);
if (cnp->cn_nameiop != LOOKUP &&
(flags & ISLASTCN))
cnp->cn_flags |= SAVENAME;
@ -1141,7 +1133,7 @@ nfs_lookup(struct vop_lookup_args *ap)
if ((u_int)(ticks - ncticks) < (nmp->nm_negnametimeo * hz) &&
VOP_GETATTR(dvp, &vattr, cnp->cn_cred) == 0 &&
timespeccmp(&vattr.va_mtime, &nctime, ==)) {
NFSINCRGLOBAL(newnfsstats.lookupcache_hits);
NFSINCRGLOBAL(nfsstatsv1.lookupcache_hits);
return (ENOENT);
}
cache_purge_negative(dvp);
@ -1149,7 +1141,7 @@ nfs_lookup(struct vop_lookup_args *ap)
error = 0;
newvp = NULLVP;
NFSINCRGLOBAL(newnfsstats.lookupcache_misses);
NFSINCRGLOBAL(nfsstatsv1.lookupcache_misses);
error = nfsrpc_lookup(dvp, cnp->cn_nameptr, cnp->cn_namelen,
cnp->cn_cred, td, &dnfsva, &nfsva, &nfhp, &attrflag, &dattrflag,
NULL);
@ -2227,7 +2219,7 @@ nfs_readdir(struct vop_readdir_args *ap)
if ((NFS_ISV4(vp) && np->n_change == vattr.va_filerev) ||
!NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime)) {
mtx_unlock(&np->n_mtx);
NFSINCRGLOBAL(newnfsstats.direofcache_hits);
NFSINCRGLOBAL(nfsstatsv1.direofcache_hits);
if (ap->a_eofflag != NULL)
*ap->a_eofflag = 1;
return (0);
@ -2254,7 +2246,7 @@ nfs_readdir(struct vop_readdir_args *ap)
error = ncl_bioread(vp, uio, 0, ap->a_cred);
if (!error && uio->uio_resid == tresid) {
NFSINCRGLOBAL(newnfsstats.direofcache_misses);
NFSINCRGLOBAL(nfsstatsv1.direofcache_misses);
if (ap->a_eofflag != NULL)
*ap->a_eofflag = 1;
}

View File

@ -159,7 +159,7 @@ __FBSDID("$FreeBSD$");
#ifndef APPLEKEXT
#include <fs/nfs/nfsport.h>
extern struct nfsstats newnfsstats;
extern struct nfsstatsv1 nfsstatsv1;
extern struct mtx nfsrc_udpmtx;
extern struct nfsrchash_bucket nfsrchash_table[NFSRVCACHE_HASHSIZE];
extern struct nfsrchash_bucket nfsrcahash_table[NFSRVCACHE_HASHSIZE];
@ -318,8 +318,8 @@ nfsrvd_initcache(void)
TAILQ_INIT(&nfsrvudplru);
nfsrc_tcpsavedreplies = 0;
nfsrc_udpcachesize = 0;
newnfsstats.srvcache_tcppeak = 0;
newnfsstats.srvcache_size = 0;
nfsstatsv1.srvcache_tcppeak = 0;
nfsstatsv1.srvcache_size = 0;
}
/*
@ -395,14 +395,14 @@ nfsrc_getudp(struct nfsrv_descript *nd, struct nfsrvcache *newrp)
TAILQ_REMOVE(&nfsrvudplru, rp, rc_lru);
TAILQ_INSERT_TAIL(&nfsrvudplru, rp, rc_lru);
if (rp->rc_flag & RC_INPROG) {
newnfsstats.srvcache_inproghits++;
nfsstatsv1.srvcache_inproghits++;
mtx_unlock(mutex);
ret = RC_DROPIT;
} else if (rp->rc_flag & RC_REPSTATUS) {
/*
* V2 only.
*/
newnfsstats.srvcache_nonidemdonehits++;
nfsstatsv1.srvcache_nonidemdonehits++;
mtx_unlock(mutex);
nfsrvd_rephead(nd);
*(nd->nd_errp) = rp->rc_status;
@ -410,7 +410,7 @@ nfsrc_getudp(struct nfsrv_descript *nd, struct nfsrvcache *newrp)
rp->rc_timestamp = NFSD_MONOSEC +
NFSRVCACHE_UDPTIMEOUT;
} else if (rp->rc_flag & RC_REPMBUF) {
newnfsstats.srvcache_nonidemdonehits++;
nfsstatsv1.srvcache_nonidemdonehits++;
mtx_unlock(mutex);
nd->nd_mreq = m_copym(rp->rc_reply, 0,
M_COPYALL, M_WAITOK);
@ -425,8 +425,8 @@ nfsrc_getudp(struct nfsrv_descript *nd, struct nfsrvcache *newrp)
goto out;
}
}
newnfsstats.srvcache_misses++;
atomic_add_int(&newnfsstats.srvcache_size, 1);
nfsstatsv1.srvcache_misses++;
atomic_add_int(&nfsstatsv1.srvcache_size, 1);
nfsrc_udpcachesize++;
newrp->rc_flag |= RC_INPROG;
@ -480,7 +480,7 @@ nfsrvd_updatecache(struct nfsrv_descript *nd)
* Reply from cache is a special case returned by nfsrv_checkseqid().
*/
if (nd->nd_repstat == NFSERR_REPLYFROMCACHE) {
newnfsstats.srvcache_nonidemdonehits++;
nfsstatsv1.srvcache_nonidemdonehits++;
mtx_unlock(mutex);
nd->nd_repstat = 0;
if (nd->nd_mreq)
@ -519,8 +519,8 @@ nfsrvd_updatecache(struct nfsrv_descript *nd)
if (!(rp->rc_flag & RC_UDP)) {
atomic_add_int(&nfsrc_tcpsavedreplies, 1);
if (nfsrc_tcpsavedreplies >
newnfsstats.srvcache_tcppeak)
newnfsstats.srvcache_tcppeak =
nfsstatsv1.srvcache_tcppeak)
nfsstatsv1.srvcache_tcppeak =
nfsrc_tcpsavedreplies;
}
mtx_unlock(mutex);
@ -678,7 +678,7 @@ nfsrc_gettcp(struct nfsrv_descript *nd, struct nfsrvcache *newrp)
panic("nfs tcp cache0");
rp->rc_flag |= RC_LOCKED;
if (rp->rc_flag & RC_INPROG) {
newnfsstats.srvcache_inproghits++;
nfsstatsv1.srvcache_inproghits++;
mtx_unlock(mutex);
if (newrp->rc_sockref == rp->rc_sockref)
nfsrc_marksametcpconn(rp->rc_sockref);
@ -687,7 +687,7 @@ nfsrc_gettcp(struct nfsrv_descript *nd, struct nfsrvcache *newrp)
/*
* V2 only.
*/
newnfsstats.srvcache_nonidemdonehits++;
nfsstatsv1.srvcache_nonidemdonehits++;
mtx_unlock(mutex);
if (newrp->rc_sockref == rp->rc_sockref)
nfsrc_marksametcpconn(rp->rc_sockref);
@ -696,7 +696,7 @@ nfsrc_gettcp(struct nfsrv_descript *nd, struct nfsrvcache *newrp)
*(nd->nd_errp) = rp->rc_status;
rp->rc_timestamp = NFSD_MONOSEC + nfsrc_tcptimeout;
} else if (rp->rc_flag & RC_REPMBUF) {
newnfsstats.srvcache_nonidemdonehits++;
nfsstatsv1.srvcache_nonidemdonehits++;
mtx_unlock(mutex);
if (newrp->rc_sockref == rp->rc_sockref)
nfsrc_marksametcpconn(rp->rc_sockref);
@ -711,8 +711,8 @@ nfsrc_gettcp(struct nfsrv_descript *nd, struct nfsrvcache *newrp)
free((caddr_t)newrp, M_NFSRVCACHE);
goto out;
}
newnfsstats.srvcache_misses++;
atomic_add_int(&newnfsstats.srvcache_size, 1);
nfsstatsv1.srvcache_misses++;
atomic_add_int(&nfsstatsv1.srvcache_size, 1);
/*
* For TCP, multiple entries for a key are allowed, so don't
@ -801,7 +801,7 @@ nfsrc_freecache(struct nfsrvcache *rp)
atomic_add_int(&nfsrc_tcpsavedreplies, -1);
}
FREE((caddr_t)rp, M_NFSRVCACHE);
atomic_add_int(&newnfsstats.srvcache_size, -1);
atomic_add_int(&nfsstatsv1.srvcache_size, -1);
}
/*
@ -825,7 +825,7 @@ nfsrvd_cleancache(void)
nfsrc_freecache(rp);
}
}
newnfsstats.srvcache_size = 0;
nfsstatsv1.srvcache_size = 0;
mtx_unlock(&nfsrc_udpmtx);
nfsrc_tcpsavedreplies = 0;
}

View File

@ -62,6 +62,7 @@ extern struct nfsclienthashhead *nfsclienthash;
extern struct nfslockhashhead *nfslockhash;
extern struct nfssessionhash *nfssessionhash;
extern int nfsrv_sessionhashsize;
extern struct nfsstatsv1 nfsstatsv1;
struct vfsoptlist nfsv4root_opt, nfsv4root_newopt;
NFSDLOCKMUTEX;
struct nfsrchash_bucket nfsrchash_table[NFSRVCACHE_HASHSIZE];
@ -686,6 +687,8 @@ nfsvno_read(struct vnode *vp, off_t off, int cnt, struct ucred *cred,
uiop->uio_td = NULL;
nh = nfsrv_sequential_heuristic(uiop, vp);
ioflag |= nh->nh_seqcount << IO_SEQSHIFT;
/* XXX KDM make this more systematic? */
nfsstatsv1.srvbytes[NFSV4OP_READ] += uiop->uio_resid;
error = VOP_READ(vp, uiop, IO_NODELOCKED | ioflag, cred);
FREE((caddr_t)iv2, M_TEMP);
if (error) {
@ -758,6 +761,8 @@ nfsvno_write(struct vnode *vp, off_t off, int retlen, int cnt, int stable,
uiop->uio_offset = off;
nh = nfsrv_sequential_heuristic(uiop, vp);
ioflags |= nh->nh_seqcount << IO_SEQSHIFT;
/* XXX KDM make this more systematic? */
nfsstatsv1.srvbytes[NFSV4OP_WRITE] += uiop->uio_resid;
error = VOP_WRITE(vp, uiop, ioflags, cred);
if (error == 0)
nh->nh_nextoff = uiop->uio_offset;

View File

@ -41,7 +41,7 @@ __FBSDID("$FreeBSD$");
#ifndef APPLEKEXT
#include <fs/nfs/nfsport.h>
extern struct nfsstats newnfsstats;
extern struct nfsstatsv1 nfsstatsv1;
extern struct nfsrvfh nfs_pubfh, nfs_rootfh;
extern int nfs_pubfhset, nfs_rootfhset;
extern struct nfsv4lock nfsv4rootfs_lock;
@ -400,6 +400,68 @@ static int nfsv3to4op[NFS_V3NPROCS] = {
NFSV4OP_COMMIT,
};
static struct mtx nfsrvd_statmtx;
MTX_SYSINIT(nfsst, &nfsrvd_statmtx, "NFSstat", MTX_DEF);
static void
nfsrvd_statstart(int op, struct bintime *now)
{
if (op > (NFSV42_NOPS + NFSV4OP_FAKENOPS)) {
printf("%s: op %d invalid\n", __func__, op);
return;
}
mtx_lock(&nfsrvd_statmtx);
if (nfsstatsv1.srvstartcnt == nfsstatsv1.srvdonecnt) {
if (now != NULL)
nfsstatsv1.busyfrom = *now;
else
binuptime(&nfsstatsv1.busyfrom);
}
nfsstatsv1.srvrpccnt[op]++;
nfsstatsv1.srvstartcnt++;
mtx_unlock(&nfsrvd_statmtx);
}
static void
nfsrvd_statend(int op, uint64_t bytes, struct bintime *now,
struct bintime *then)
{
struct bintime dt, lnow;
if (op > (NFSV42_NOPS + NFSV4OP_FAKENOPS)) {
printf("%s: op %d invalid\n", __func__, op);
return;
}
if (now == NULL) {
now = &lnow;
binuptime(now);
}
mtx_lock(&nfsrvd_statmtx);
nfsstatsv1.srvbytes[op] += bytes;
nfsstatsv1.srvops[op]++;
if (then != NULL) {
dt = *now;
bintime_sub(&dt, then);
bintime_add(&nfsstatsv1.srvduration[op], &dt);
}
dt = *now;
bintime_sub(&dt, &nfsstatsv1.busyfrom);
bintime_add(&nfsstatsv1.busytime, &dt);
nfsstatsv1.busyfrom = *now;
nfsstatsv1.srvdonecnt++;
mtx_unlock(&nfsrvd_statmtx);
}
/*
* Do an RPC. Basically, get the file handles translated to vnode pointers
* and then call the appropriate server routine. The server routines are
@ -476,7 +538,9 @@ nfsrvd_dorpc(struct nfsrv_descript *nd, int isdgram, u_char *tag, int taglen,
*/
if (nd->nd_repstat && (nd->nd_flag & ND_NFSV2)) {
*nd->nd_errp = nfsd_errmap(nd);
NFSINCRGLOBAL(newnfsstats.srvrpccnt[nfsv3to4op[nd->nd_procnum]]);
nfsrvd_statstart(nfsv3to4op[nd->nd_procnum], /*now*/ NULL);
nfsrvd_statend(nfsv3to4op[nd->nd_procnum], /*bytes*/ 0,
/*now*/ NULL, /*then*/ NULL);
if (mp != NULL && nfs_writerpc[nd->nd_procnum] != 0)
vn_finished_write(mp);
goto out;
@ -491,6 +555,11 @@ nfsrvd_dorpc(struct nfsrv_descript *nd, int isdgram, u_char *tag, int taglen,
if (nd->nd_flag & ND_NFSV4) {
nfsrvd_compound(nd, isdgram, tag, taglen, minorvers, p);
} else {
struct bintime start_time;
binuptime(&start_time);
nfsrvd_statstart(nfsv3to4op[nd->nd_procnum], &start_time);
if (nfs_retfh[nd->nd_procnum] == 1) {
if (vp)
NFSVOPUNLOCK(vp, 0);
@ -505,7 +574,9 @@ nfsrvd_dorpc(struct nfsrv_descript *nd, int isdgram, u_char *tag, int taglen,
}
if (mp != NULL && nfs_writerpc[nd->nd_procnum] != 0)
vn_finished_write(mp);
NFSINCRGLOBAL(newnfsstats.srvrpccnt[nfsv3to4op[nd->nd_procnum]]);
nfsrvd_statend(nfsv3to4op[nd->nd_procnum], /*bytes*/ 0,
/*now*/ NULL, /*then*/ &start_time);
}
if (error) {
if (error != EBADRPC)
@ -547,7 +618,7 @@ static void
nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, u_char *tag,
int taglen, u_int32_t minorvers, NFSPROC_T *p)
{
int i, lktype, op, op0 = 0;
int i, lktype, op, op0 = 0, statsinprog = 0;
u_int32_t *tl;
struct nfsclient *clp, *nclp;
int numops, error = 0, igotlock;
@ -559,6 +630,7 @@ nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, u_char *tag,
struct nfsexstuff nes, vpnes, savevpnes;
fsid_t cur_fsid, save_fsid;
static u_int64_t compref = 0;
struct bintime start_time;
NFSVNO_EXINIT(&vpnes);
NFSVNO_EXINIT(&savevpnes);
@ -686,6 +758,11 @@ nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, u_char *tag,
*repp = *tl;
op = fxdr_unsigned(int, *tl);
NFSD_DEBUG(4, "op=%d\n", op);
binuptime(&start_time);
nfsrvd_statstart(op, &start_time);
statsinprog = 1;
if (op < NFSV4OP_ACCESS ||
(op >= NFSV4OP_NOPS && (nd->nd_flag & ND_NFSV41) == 0) ||
(op >= NFSV41_NOPS && (nd->nd_flag & ND_NFSV41) != 0)) {
@ -771,12 +848,6 @@ nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, u_char *tag,
}
if (nfsv4_opflag[op].savereply)
nd->nd_flag |= ND_SAVEREPLY;
/*
* For now, newnfsstats.srvrpccnt[] doesn't have entries
* for the NFSv4.1 operations.
*/
if (nd->nd_procnum < NFSV4OP_NOPS)
NFSINCRGLOBAL(newnfsstats.srvrpccnt[nd->nd_procnum]);
switch (op) {
case NFSV4OP_PUTFH:
error = nfsrv_mtofh(nd, &fh);
@ -1007,6 +1078,13 @@ nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, u_char *tag,
}
error = 0;
}
if (statsinprog != 0) {
nfsrvd_statend(op, /*bytes*/ 0, /*now*/ NULL,
/*then*/ &start_time);
statsinprog = 0;
}
retops++;
if (nd->nd_repstat) {
*repp = nfsd_errmap(nd);
@ -1016,6 +1094,11 @@ nfsrvd_compound(struct nfsrv_descript *nd, int isdgram, u_char *tag,
}
}
nfsmout:
if (statsinprog != 0) {
nfsrvd_statend(op, /*bytes*/ 0, /*now*/ NULL,
/*then*/ &start_time);
statsinprog = 0;
}
if (error) {
if (error == EBADRPC || error == NFSERR_BADXDR)
nd->nd_repstat = NFSERR_BADXDR;

View File

@ -37,7 +37,7 @@ int nfsrv_dolocallocks = 0;
struct nfsv4lock nfsv4rootfs_lock;
extern int newnfs_numnfsd;
extern struct nfsstats newnfsstats;
extern struct nfsstatsv1 nfsstatsv1;
extern int nfsrv_lease;
extern struct timeval nfsboottime;
extern u_int32_t newnfs_true, newnfs_false;
@ -273,7 +273,7 @@ nfsrv_setclient(struct nfsrv_descript *nd, struct nfsclient **new_clpp,
LIST_INIT(&new_clp->lc_stateid[i]);
LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp,
lc_hash);
newnfsstats.srvclients++;
nfsstatsv1.srvclients++;
nfsrv_openpluslock++;
nfsrv_clients++;
NFSLOCKV4ROOTMUTEX();
@ -377,7 +377,7 @@ nfsrv_setclient(struct nfsrv_descript *nd, struct nfsclient **new_clpp,
}
LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp,
lc_hash);
newnfsstats.srvclients++;
nfsstatsv1.srvclients++;
nfsrv_openpluslock++;
nfsrv_clients++;
NFSLOCKV4ROOTMUTEX();
@ -441,7 +441,7 @@ nfsrv_setclient(struct nfsrv_descript *nd, struct nfsclient **new_clpp,
}
LIST_INSERT_HEAD(NFSCLIENTHASH(new_clp->lc_clientid), new_clp,
lc_hash);
newnfsstats.srvclients++;
nfsstatsv1.srvclients++;
nfsrv_openpluslock++;
nfsrv_clients++;
}
@ -815,7 +815,7 @@ nfsrv_adminrevoke(struct nfsd_clid *revokep, NFSPROC_T *p)
/*
* Dump out stats for all clients. Called from nfssvc(2), that is used
* newnfsstats.
* nfsstatsv1.
*/
APPLESTATIC void
nfsrv_dumpclients(struct nfsd_dumpclients *dumpp, int maxcnt)
@ -1219,7 +1219,7 @@ nfsrv_zapclient(struct nfsclient *clp, NFSPROC_T *p)
free(clp->lc_stateid, M_NFSDCLIENT);
free(clp, M_NFSDCLIENT);
NFSLOCKSTATE();
newnfsstats.srvclients--;
nfsstatsv1.srvclients--;
nfsrv_openpluslock--;
nfsrv_clients--;
NFSUNLOCKSTATE();
@ -1260,7 +1260,7 @@ nfsrv_freedeleg(struct nfsstate *stp)
nfsv4_testlock(&lfp->lf_locallock_lck) == 0)
nfsrv_freenfslockfile(lfp);
FREE((caddr_t)stp, M_NFSDSTATE);
newnfsstats.srvdelegates--;
nfsstatsv1.srvdelegates--;
nfsrv_openpluslock--;
nfsrv_delegatecnt--;
}
@ -1286,7 +1286,7 @@ nfsrv_freeopenowner(struct nfsstate *stp, int cansleep, NFSPROC_T *p)
if (stp->ls_op)
nfsrvd_derefcache(stp->ls_op);
FREE((caddr_t)stp, M_NFSDSTATE);
newnfsstats.srvopenowners--;
nfsstatsv1.srvopenowners--;
nfsrv_openpluslock--;
}
@ -1336,7 +1336,7 @@ nfsrv_freeopen(struct nfsstate *stp, vnode_t vp, int cansleep, NFSPROC_T *p)
if (cansleep != 0)
NFSUNLOCKSTATE();
FREE((caddr_t)stp, M_NFSDSTATE);
newnfsstats.srvopens--;
nfsstatsv1.srvopens--;
nfsrv_openpluslock--;
return (ret);
}
@ -1355,7 +1355,7 @@ nfsrv_freelockowner(struct nfsstate *stp, vnode_t vp, int cansleep,
if (stp->ls_op)
nfsrvd_derefcache(stp->ls_op);
FREE((caddr_t)stp, M_NFSDSTATE);
newnfsstats.srvlockowners--;
nfsstatsv1.srvlockowners--;
nfsrv_openpluslock--;
}
@ -1430,7 +1430,7 @@ nfsrv_freenfslock(struct nfslock *lop)
if (lop->lo_lckfile.le_prev != NULL) {
LIST_REMOVE(lop, lo_lckfile);
newnfsstats.srvlocks--;
nfsstatsv1.srvlocks--;
nfsrv_openpluslock--;
}
LIST_REMOVE(lop, lo_lckowner);
@ -2200,7 +2200,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp,
LIST_INSERT_HEAD(&stp->ls_open, new_stp, ls_list);
*new_lopp = NULL;
*new_stpp = NULL;
newnfsstats.srvlockowners++;
nfsstatsv1.srvlockowners++;
nfsrv_openpluslock++;
}
if (filestruct_locked != 0) {
@ -2849,12 +2849,12 @@ nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp,
LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list);
LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list);
*new_stpp = NULL;
newnfsstats.srvopenowners++;
nfsstatsv1.srvopenowners++;
nfsrv_openpluslock++;
}
openstp = new_open;
new_open = NULL;
newnfsstats.srvopens++;
nfsstatsv1.srvopens++;
nfsrv_openpluslock++;
break;
}
@ -2913,7 +2913,7 @@ nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp,
NFSRV_V4DELEGLIMIT(nfsrv_delegatecnt) ||
!NFSVNO_DELEGOK(vp))
*rflagsp |= NFSV4OPEN_RECALL;
newnfsstats.srvdelegates++;
nfsstatsv1.srvdelegates++;
nfsrv_openpluslock++;
nfsrv_delegatecnt++;
@ -2953,12 +2953,12 @@ nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp,
LIST_INSERT_HEAD(&new_stp->ls_open, new_open, ls_list);
LIST_INSERT_HEAD(&clp->lc_open, new_stp, ls_list);
*new_stpp = NULL;
newnfsstats.srvopenowners++;
nfsstatsv1.srvopenowners++;
nfsrv_openpluslock++;
}
openstp = new_open;
new_open = NULL;
newnfsstats.srvopens++;
nfsstatsv1.srvopens++;
nfsrv_openpluslock++;
} else {
error = NFSERR_RECLAIMCONFLICT;
@ -3027,7 +3027,7 @@ nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp,
new_deleg->ls_stateid), new_deleg, ls_hash);
LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list);
new_deleg = NULL;
newnfsstats.srvdelegates++;
nfsstatsv1.srvdelegates++;
nfsrv_openpluslock++;
nfsrv_delegatecnt++;
}
@ -3049,7 +3049,7 @@ nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp,
new_open, ls_hash);
openstp = new_open;
new_open = NULL;
newnfsstats.srvopens++;
nfsstatsv1.srvopens++;
nfsrv_openpluslock++;
/*
@ -3094,7 +3094,7 @@ nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp,
new_deleg->ls_stateid), new_deleg, ls_hash);
LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg, ls_list);
new_deleg = NULL;
newnfsstats.srvdelegates++;
nfsstatsv1.srvdelegates++;
nfsrv_openpluslock++;
nfsrv_delegatecnt++;
}
@ -3173,7 +3173,7 @@ nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp,
LIST_INSERT_HEAD(&clp->lc_deleg, new_deleg,
ls_list);
new_deleg = NULL;
newnfsstats.srvdelegates++;
nfsstatsv1.srvdelegates++;
nfsrv_openpluslock++;
nfsrv_delegatecnt++;
}
@ -3191,9 +3191,9 @@ nfsrv_openctrl(struct nfsrv_descript *nd, vnode_t vp,
openstp = new_open;
new_open = NULL;
*new_stpp = NULL;
newnfsstats.srvopens++;
nfsstatsv1.srvopens++;
nfsrv_openpluslock++;
newnfsstats.srvopenowners++;
nfsstatsv1.srvopenowners++;
nfsrv_openpluslock++;
}
if (!error) {
@ -3645,7 +3645,7 @@ nfsrv_insertlock(struct nfslock *new_lop, struct nfslock *insert_lop,
else
LIST_INSERT_AFTER(insert_lop, new_lop, lo_lckowner);
if (stp != NULL) {
newnfsstats.srvlocks++;
nfsstatsv1.srvlocks++;
nfsrv_openpluslock++;
}
}
@ -3843,7 +3843,7 @@ nfsrv_checkseqid(struct nfsrv_descript *nd, u_int32_t seqid,
* just set lc_program to 0 to indicate no callbacks are possible.
* (For cases where the address can't be parsed or is 0.0.0.0.0.0, set
* the address to the client's transport address. This won't be used
* for callbacks, but can be printed out by newnfsstats for info.)
* for callbacks, but can be printed out by nfsstats for info.)
* Return error if the xdr can't be parsed, 0 otherwise.
*/
APPLESTATIC int

View File

@ -132,7 +132,7 @@ smbfs_node_alloc(struct mount *mp, struct vnode *dvp, const char *dirnm,
}
dnp = dvp ? VTOSMB(dvp) : NULL;
if (dnp == NULL && dvp != NULL) {
vprint("smbfs_node_alloc: dead parent vnode", dvp);
vn_printf(dvp, "smbfs_node_alloc: dead parent vnode ");
return EINVAL;
}
error = vfs_hash_get(mp, smbfs_hash(name, nmlen), LK_EXCLUSIVE, td,

View File

@ -819,10 +819,13 @@ tmpfs_dir_lookup_cookie(struct tmpfs_node *node, off_t cookie,
goto out;
}
MPASS((cookie & TMPFS_DIRCOOKIE_MASK) == cookie);
dekey.td_hash = cookie;
/* Recover if direntry for cookie was removed */
de = RB_NFIND(tmpfs_dir, dirhead, &dekey);
if ((cookie & TMPFS_DIRCOOKIE_MASK) != cookie) {
de = NULL;
} else {
dekey.td_hash = cookie;
/* Recover if direntry for cookie was removed */
de = RB_NFIND(tmpfs_dir, dirhead, &dekey);
}
dc->tdc_tree = de;
dc->tdc_current = de;
if (de != NULL && tmpfs_dirent_duphead(de)) {

View File

@ -1753,9 +1753,9 @@ unionfs_print(struct vop_print_args *ap)
*/
if (unp->un_uppervp != NULLVP)
vprint("unionfs: upper", unp->un_uppervp);
vn_printf(unp->un_uppervp, "unionfs: upper ");
if (unp->un_lowervp != NULLVP)
vprint("unionfs: lower", unp->un_lowervp);
vn_printf(unp->un_lowervp, "unionfs: lower ");
return (0);
}

View File

@ -540,8 +540,8 @@ trap(struct trapframe *frame)
case T_DNA:
#ifdef DEV_NPX
KASSERT(!PCB_USER_FPU(td->td_pcb),
("Unregistered use of FPU in kernel"));
if (PCB_USER_FPU(td->td_pcb))
panic("Unregistered use of FPU in kernel");
if (npxdna())
goto out;
#endif

View File

@ -942,6 +942,8 @@ funsetown(struct sigio **sigiop)
{
struct sigio *sigio;
if (*sigiop == NULL)
return;
SIGIO_LOCK();
sigio = *sigiop;
if (sigio == NULL) {

View File

@ -320,11 +320,13 @@ void
kthread_exit(void)
{
struct proc *p;
struct thread *td;
p = curthread->td_proc;
td = curthread;
p = td->td_proc;
/* A module may be waiting for us to exit. */
wakeup(curthread);
wakeup(td);
/*
* The last exiting thread in a kernel process must tear down
@ -337,9 +339,10 @@ kthread_exit(void)
rw_wunlock(&tidhash_lock);
kproc_exit(0);
}
LIST_REMOVE(curthread, td_hash);
LIST_REMOVE(td, td_hash);
rw_wunlock(&tidhash_lock);
umtx_thread_exit(curthread);
umtx_thread_exit(td);
tdsigcleanup(td);
PROC_SLOCK(p);
thread_exit();
}

View File

@ -572,9 +572,14 @@ void
ktrprocfork(struct proc *p1, struct proc *p2)
{
MPASS(p2->p_tracevp == NULL);
MPASS(p2->p_traceflag == 0);
if (p1->p_traceflag == 0)
return;
PROC_LOCK(p1);
mtx_lock(&ktrace_mtx);
KASSERT(p2->p_tracevp == NULL, ("new process has a ktrace vnode"));
if (p1->p_traceflag & KTRFAC_INHERIT) {
p2->p_traceflag = p1->p_traceflag;
if ((p2->p_tracevp = p1->p_tracevp) != NULL) {

View File

@ -984,7 +984,7 @@ callout_when(sbintime_t sbt, sbintime_t precision, int flags,
if ((flags & C_HARDCLOCK) == 0)
to_sbt += tick_sbt;
} else
to_sbt = sbinuptime();
to_sbt = sbinuptime();
if (SBT_MAX - to_sbt < sbt)
to_sbt = SBT_MAX;
else

Some files were not shown because too many files have changed in this diff Show More