MFH
Sponsored by: The FreeBSD Foundation
This commit is contained in:
commit
efd5551e55
9
Makefile
9
Makefile
@ -137,6 +137,7 @@ TGTS+= ${BITGTS}
|
||||
.ORDER: buildworld installworld
|
||||
.ORDER: buildworld distributeworld
|
||||
.ORDER: buildworld buildkernel
|
||||
.ORDER: installworld distribution
|
||||
.ORDER: buildkernel installkernel
|
||||
.ORDER: buildkernel installkernel.debug
|
||||
.ORDER: buildkernel reinstallkernel
|
||||
@ -329,7 +330,7 @@ bmake: .PHONY
|
||||
${MMAKE} all; \
|
||||
${MMAKE} install DESTDIR=${MYMAKE:H} BINDIR=
|
||||
|
||||
tinderbox toolchains kernel-toolchains: upgrade_checks
|
||||
tinderbox toolchains kernel-toolchains kernels worlds: upgrade_checks
|
||||
|
||||
tinderbox:
|
||||
@cd ${.CURDIR}; ${SUB_MAKE} DOING_TINDERBOX=YES universe
|
||||
@ -340,6 +341,12 @@ toolchains:
|
||||
kernel-toolchains:
|
||||
@cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=kernel-toolchain universe
|
||||
|
||||
kernels:
|
||||
@cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=buildkernel universe
|
||||
|
||||
worlds:
|
||||
@cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=buildworld universe
|
||||
|
||||
#
|
||||
# universe
|
||||
#
|
||||
|
@ -72,7 +72,7 @@ SRCDIR?= ${.CURDIR}
|
||||
SUBDIR= ${SUBDIR_OVERRIDE}
|
||||
.else
|
||||
SUBDIR= lib libexec
|
||||
.if make(install*)
|
||||
.if !defined(NO_ROOT) && (make(installworld) || make(install))
|
||||
# Ensure libraries are installed before progressing.
|
||||
SUBDIR+=.WAIT
|
||||
.endif
|
||||
@ -127,7 +127,7 @@ SUBDIR+= ${_DIR}
|
||||
# by calling 'makedb' in share/man. This is only relevant for
|
||||
# install/distribute so they build the whatis file after every manpage is
|
||||
# installed.
|
||||
.if make(install*)
|
||||
.if make(installworld) || make(install)
|
||||
SUBDIR+=.WAIT
|
||||
.endif
|
||||
SUBDIR+=etc
|
||||
@ -1125,9 +1125,7 @@ distrib-dirs: .MAKE .PHONY
|
||||
${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \
|
||||
${IMAKE_INSTALL} ${IMAKE_MTREE} METALOG=${METALOG} ${.TARGET}
|
||||
|
||||
distribution: .MAKE .PHONY
|
||||
${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \
|
||||
${IMAKE_INSTALL} ${IMAKE_MTREE} METALOG=${METALOG} ${.TARGET}
|
||||
distribution: distrib-dirs .MAKE .PHONY
|
||||
${_+_}cd ${.CURDIR}; ${CROSSENV} PATH=${TMPPATH} \
|
||||
${MAKE} -f Makefile.inc1 ${IMAKE_INSTALL} \
|
||||
METALOG=${METALOG} installconfig
|
||||
|
7
UPDATING
7
UPDATING
@ -31,6 +31,13 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 11.x IS SLOW:
|
||||
disable the most expensive debugging functionality run
|
||||
"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
|
||||
|
||||
20160129:
|
||||
Building ZFS pools on top of zvols is prohibited by default. That
|
||||
feature has never worked safely; it's always been prone to deadlocks.
|
||||
Using a zvol as the backing store for a VM guest's virtual disk will
|
||||
still work, even if the guest is using ZFS. Legacy behavior can be
|
||||
restored by setting vfs.zfs.vol.recursive=1.
|
||||
|
||||
20160119:
|
||||
The NONE and HPN patches has been removed from OpenSSH. They are
|
||||
still available in the security/openssh-portable port.
|
||||
|
@ -36,9 +36,9 @@
|
||||
#undef iconv_close
|
||||
|
||||
#define ICONVLIB "libiconv.so"
|
||||
#define ICONV_ENGINE "iconv"
|
||||
#define ICONV_OPEN "iconv_open"
|
||||
#define ICONV_CLOSE "iconv_close"
|
||||
#define ICONV_ENGINE "libiconv"
|
||||
#define ICONV_OPEN "libiconv_open"
|
||||
#define ICONV_CLOSE "libiconv_close"
|
||||
|
||||
typedef iconv_t iconv_open_t(const char *, const char *);
|
||||
|
||||
|
@ -70,6 +70,7 @@ struct redirtab {
|
||||
struct redirtab *next;
|
||||
int renamed[10];
|
||||
int fd0_redirected;
|
||||
unsigned int empty_redirs;
|
||||
};
|
||||
|
||||
|
||||
@ -82,6 +83,9 @@ static struct redirtab *redirlist;
|
||||
*/
|
||||
static int fd0_redirected = 0;
|
||||
|
||||
/* Number of redirtabs that have not been allocated. */
|
||||
static unsigned int empty_redirs = 0;
|
||||
|
||||
static void openredirect(union node *, char[10 ]);
|
||||
static int openhere(union node *);
|
||||
|
||||
@ -115,12 +119,17 @@ redirect(union node *redir, int flags)
|
||||
memory[i] = 0;
|
||||
memory[1] = flags & REDIR_BACKQ;
|
||||
if (flags & REDIR_PUSH) {
|
||||
sv = ckmalloc(sizeof (struct redirtab));
|
||||
for (i = 0 ; i < 10 ; i++)
|
||||
sv->renamed[i] = EMPTY;
|
||||
sv->fd0_redirected = fd0_redirected;
|
||||
sv->next = redirlist;
|
||||
redirlist = sv;
|
||||
empty_redirs++;
|
||||
if (redir != NULL) {
|
||||
sv = ckmalloc(sizeof (struct redirtab));
|
||||
for (i = 0 ; i < 10 ; i++)
|
||||
sv->renamed[i] = EMPTY;
|
||||
sv->fd0_redirected = fd0_redirected;
|
||||
sv->empty_redirs = empty_redirs - 1;
|
||||
sv->next = redirlist;
|
||||
redirlist = sv;
|
||||
empty_redirs = 0;
|
||||
}
|
||||
}
|
||||
for (n = redir ; n ; n = n->nfile.next) {
|
||||
fd = n->nfile.fd;
|
||||
@ -303,6 +312,12 @@ popredir(void)
|
||||
struct redirtab *rp = redirlist;
|
||||
int i;
|
||||
|
||||
INTOFF;
|
||||
if (empty_redirs > 0) {
|
||||
empty_redirs--;
|
||||
INTON;
|
||||
return;
|
||||
}
|
||||
for (i = 0 ; i < 10 ; i++) {
|
||||
if (rp->renamed[i] != EMPTY) {
|
||||
if (rp->renamed[i] >= 0) {
|
||||
@ -313,8 +328,8 @@ popredir(void)
|
||||
}
|
||||
}
|
||||
}
|
||||
INTOFF;
|
||||
fd0_redirected = rp->fd0_redirected;
|
||||
empty_redirs = rp->empty_redirs;
|
||||
redirlist = rp->next;
|
||||
ckfree(rp);
|
||||
INTON;
|
||||
|
25
bin/sh/sh.1
25
bin/sh/sh.1
@ -32,7 +32,7 @@
|
||||
.\" from: @(#)sh.1 8.6 (Berkeley) 5/4/95
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd August 29, 2015
|
||||
.Dd January 30, 2016
|
||||
.Dt SH 1
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -1952,13 +1952,20 @@ Execute the specified built-in command,
|
||||
This is useful when the user wishes to override a shell function
|
||||
with the same name as a built-in command.
|
||||
.It Ic cd Oo Fl L | P Oc Oo Fl e Oc Op Ar directory
|
||||
.It Ic cd Fl
|
||||
Switch to the specified
|
||||
.Ar directory ,
|
||||
or to the directory specified in the
|
||||
to the directory specified in the
|
||||
.Va HOME
|
||||
environment variable if no
|
||||
.Ar directory
|
||||
is specified.
|
||||
is specified or
|
||||
to the directory specified in the
|
||||
.Va OLDPWD
|
||||
environment variable if
|
||||
.Ar directory
|
||||
is
|
||||
.Fl .
|
||||
If
|
||||
.Ar directory
|
||||
does not begin with
|
||||
@ -1982,10 +1989,12 @@ the
|
||||
.Ic cd
|
||||
command will print out the name of the directory
|
||||
that it actually switched to
|
||||
if this is different from the name that the user gave.
|
||||
These may be different either because the
|
||||
if the
|
||||
.Va CDPATH
|
||||
mechanism was used or because a symbolic link was crossed.
|
||||
mechanism was used or if
|
||||
.Ar directory
|
||||
was
|
||||
.Fl .
|
||||
.Pp
|
||||
If the
|
||||
.Fl P
|
||||
@ -2774,6 +2783,10 @@ Initialization file for interactive shells.
|
||||
Locale settings.
|
||||
These are inherited by children of the shell,
|
||||
and is used in a limited manner by the shell itself.
|
||||
.It Ev OLDPWD
|
||||
The previous current directory.
|
||||
This is used and updated by
|
||||
.Ic cd .
|
||||
.It Ev PWD
|
||||
An absolute pathname for the current directory,
|
||||
possibly containing symbolic links.
|
||||
|
166
bin/test/test.c
166
bin/test/test.c
@ -120,51 +120,53 @@ enum token {
|
||||
|
||||
#define TOKEN_TYPE(token) ((token) & 0xff00)
|
||||
|
||||
static struct t_op {
|
||||
char op_text[4];
|
||||
static const struct t_op {
|
||||
char op_text[2];
|
||||
short op_num;
|
||||
} const ops [] = {
|
||||
{"-r", FILRD},
|
||||
{"-w", FILWR},
|
||||
{"-x", FILEX},
|
||||
{"-e", FILEXIST},
|
||||
{"-f", FILREG},
|
||||
{"-d", FILDIR},
|
||||
{"-c", FILCDEV},
|
||||
{"-b", FILBDEV},
|
||||
{"-p", FILFIFO},
|
||||
{"-u", FILSUID},
|
||||
{"-g", FILSGID},
|
||||
{"-k", FILSTCK},
|
||||
{"-s", FILGZ},
|
||||
{"-t", FILTT},
|
||||
{"-z", STREZ},
|
||||
{"-n", STRNZ},
|
||||
{"-h", FILSYM}, /* for backwards compat */
|
||||
{"-O", FILUID},
|
||||
{"-G", FILGID},
|
||||
{"-L", FILSYM},
|
||||
{"-S", FILSOCK},
|
||||
} ops1[] = {
|
||||
{"=", STREQ},
|
||||
{"==", STREQ},
|
||||
{"!=", STRNE},
|
||||
{"<", STRLT},
|
||||
{">", STRGT},
|
||||
{"-eq", INTEQ},
|
||||
{"-ne", INTNE},
|
||||
{"-ge", INTGE},
|
||||
{"-gt", INTGT},
|
||||
{"-le", INTLE},
|
||||
{"-lt", INTLT},
|
||||
{"-nt", FILNT},
|
||||
{"-ot", FILOT},
|
||||
{"-ef", FILEQ},
|
||||
{"!", UNOT},
|
||||
{"-a", BAND},
|
||||
{"-o", BOR},
|
||||
{"(", LPAREN},
|
||||
{")", RPAREN},
|
||||
{"", 0}
|
||||
}, opsm1[] = {
|
||||
{"r", FILRD},
|
||||
{"w", FILWR},
|
||||
{"x", FILEX},
|
||||
{"e", FILEXIST},
|
||||
{"f", FILREG},
|
||||
{"d", FILDIR},
|
||||
{"c", FILCDEV},
|
||||
{"b", FILBDEV},
|
||||
{"p", FILFIFO},
|
||||
{"u", FILSUID},
|
||||
{"g", FILSGID},
|
||||
{"k", FILSTCK},
|
||||
{"s", FILGZ},
|
||||
{"t", FILTT},
|
||||
{"z", STREZ},
|
||||
{"n", STRNZ},
|
||||
{"h", FILSYM}, /* for backwards compat */
|
||||
{"O", FILUID},
|
||||
{"G", FILGID},
|
||||
{"L", FILSYM},
|
||||
{"S", FILSOCK},
|
||||
{"a", BAND},
|
||||
{"o", BOR},
|
||||
}, ops2[] = {
|
||||
{"==", STREQ},
|
||||
{"!=", STRNE},
|
||||
}, opsm2[] = {
|
||||
{"eq", INTEQ},
|
||||
{"ne", INTNE},
|
||||
{"ge", INTGE},
|
||||
{"gt", INTGT},
|
||||
{"le", INTLE},
|
||||
{"lt", INTLT},
|
||||
{"nt", FILNT},
|
||||
{"ot", FILOT},
|
||||
{"ef", FILEQ},
|
||||
};
|
||||
|
||||
static int nargc;
|
||||
@ -416,35 +418,71 @@ filstat(char *nm, enum token mode)
|
||||
}
|
||||
}
|
||||
|
||||
static enum token
|
||||
t_lex(char *s)
|
||||
static int
|
||||
find_op_1char(const struct t_op *op, const struct t_op *end, const char *s)
|
||||
{
|
||||
struct t_op const *op = ops;
|
||||
char c;
|
||||
|
||||
if (s == 0) {
|
||||
return EOI;
|
||||
}
|
||||
while (*op->op_text) {
|
||||
if (strcmp(s, op->op_text) == 0) {
|
||||
if (((TOKEN_TYPE(op->op_num) == UNOP ||
|
||||
TOKEN_TYPE(op->op_num) == BUNOP)
|
||||
&& isunopoperand()) ||
|
||||
(op->op_num == LPAREN && islparenoperand()) ||
|
||||
(op->op_num == RPAREN && isrparenoperand()))
|
||||
break;
|
||||
c = s[0];
|
||||
while (op != end) {
|
||||
if (c == *op->op_text)
|
||||
return op->op_num;
|
||||
}
|
||||
op++;
|
||||
}
|
||||
return OPERAND;
|
||||
}
|
||||
|
||||
static int
|
||||
find_op_2char(const struct t_op *op, const struct t_op *end, const char *s)
|
||||
{
|
||||
while (op != end) {
|
||||
if (s[0] == op->op_text[0] && s[1] == op->op_text[1])
|
||||
return op->op_num;
|
||||
op++;
|
||||
}
|
||||
return OPERAND;
|
||||
}
|
||||
|
||||
static int
|
||||
find_op(const char *s)
|
||||
{
|
||||
if (s[0] == '\0')
|
||||
return OPERAND;
|
||||
else if (s[1] == '\0')
|
||||
return find_op_1char(ops1, (&ops1)[1], s);
|
||||
else if (s[2] == '\0')
|
||||
return s[0] == '-' ? find_op_1char(opsm1, (&opsm1)[1], s + 1) :
|
||||
find_op_2char(ops2, (&ops2)[1], s);
|
||||
else if (s[3] == '\0')
|
||||
return s[0] == '-' ? find_op_2char(opsm2, (&opsm2)[1], s + 1) :
|
||||
OPERAND;
|
||||
else
|
||||
return OPERAND;
|
||||
}
|
||||
|
||||
static enum token
|
||||
t_lex(char *s)
|
||||
{
|
||||
int num;
|
||||
|
||||
if (s == 0) {
|
||||
return EOI;
|
||||
}
|
||||
num = find_op(s);
|
||||
if (((TOKEN_TYPE(num) == UNOP || TOKEN_TYPE(num) == BUNOP)
|
||||
&& isunopoperand()) ||
|
||||
(num == LPAREN && islparenoperand()) ||
|
||||
(num == RPAREN && isrparenoperand()))
|
||||
return OPERAND;
|
||||
return num;
|
||||
}
|
||||
|
||||
static int
|
||||
isunopoperand(void)
|
||||
{
|
||||
struct t_op const *op = ops;
|
||||
char *s;
|
||||
char *t;
|
||||
int num;
|
||||
|
||||
if (nargc == 1)
|
||||
return 1;
|
||||
@ -452,20 +490,16 @@ isunopoperand(void)
|
||||
if (nargc == 2)
|
||||
return parenlevel == 1 && strcmp(s, ")") == 0;
|
||||
t = *(t_wp + 2);
|
||||
while (*op->op_text) {
|
||||
if (strcmp(s, op->op_text) == 0)
|
||||
return TOKEN_TYPE(op->op_num) == BINOP &&
|
||||
(parenlevel == 0 || t[0] != ')' || t[1] != '\0');
|
||||
op++;
|
||||
}
|
||||
return 0;
|
||||
num = find_op(s);
|
||||
return TOKEN_TYPE(num) == BINOP &&
|
||||
(parenlevel == 0 || t[0] != ')' || t[1] != '\0');
|
||||
}
|
||||
|
||||
static int
|
||||
islparenoperand(void)
|
||||
{
|
||||
struct t_op const *op = ops;
|
||||
char *s;
|
||||
int num;
|
||||
|
||||
if (nargc == 1)
|
||||
return 1;
|
||||
@ -474,12 +508,8 @@ islparenoperand(void)
|
||||
return parenlevel == 1 && strcmp(s, ")") == 0;
|
||||
if (nargc != 3)
|
||||
return 0;
|
||||
while (*op->op_text) {
|
||||
if (strcmp(s, op->op_text) == 0)
|
||||
return TOKEN_TYPE(op->op_num) == BINOP;
|
||||
op++;
|
||||
}
|
||||
return 0;
|
||||
num = find_op(s);
|
||||
return TOKEN_TYPE(num) == BINOP;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
/*
|
||||
* Copyright (c) 2012 by Delphix. All rights reserved.
|
||||
* Copyright (c) 2015 by Syneto S.R.L. All rights reserved.
|
||||
*/
|
||||
|
||||
/*
|
||||
@ -246,8 +247,9 @@ zpool_get_features(zpool_handle_t *zhp)
|
||||
config = zpool_get_config(zhp, NULL);
|
||||
}
|
||||
|
||||
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
|
||||
&features) == 0);
|
||||
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
|
||||
&features) != 0)
|
||||
return (NULL);
|
||||
|
||||
return (features);
|
||||
}
|
||||
|
8
contrib/libucl/tests/.gitignore
vendored
8
contrib/libucl/tests/.gitignore
vendored
@ -1,8 +0,0 @@
|
||||
*.log
|
||||
*.trs
|
||||
*.plist
|
||||
|
||||
test_basic
|
||||
test_generate
|
||||
test_schema
|
||||
test_speed
|
46
contrib/libxo/.gitignore
vendored
46
contrib/libxo/.gitignore
vendored
@ -1,46 +0,0 @@
|
||||
# Object files
|
||||
*.o
|
||||
|
||||
# Libraries
|
||||
*.lib
|
||||
*.a
|
||||
|
||||
# Shared objects (inc. Windows DLLs)
|
||||
*.dll
|
||||
*.so
|
||||
*.so.*
|
||||
*.dylib
|
||||
|
||||
# Executables
|
||||
*.exe
|
||||
*.app
|
||||
|
||||
*~
|
||||
*.orig
|
||||
|
||||
aclocal.m4
|
||||
ar-lib
|
||||
autom4te.cache
|
||||
build
|
||||
compile
|
||||
config.guess
|
||||
config.h.in
|
||||
config.sub
|
||||
depcomp
|
||||
install-sh
|
||||
ltmain.sh
|
||||
missing
|
||||
m4
|
||||
|
||||
Makefile.in
|
||||
configure
|
||||
.DS_Store
|
||||
|
||||
xoconfig.h.in
|
||||
xo_config.h.in
|
||||
|
||||
.gdbinit
|
||||
.gdbinit.local
|
||||
xtest
|
||||
xtest.dSYM
|
||||
tests/w
|
@ -5,7 +5,6 @@
|
||||
PROG= cmatose
|
||||
MAN=
|
||||
SRCS= cmatose.c
|
||||
LDADD+= -libverbs -lrdmacm -lpthread
|
||||
LDADD+= -lmlx4
|
||||
LIBADD= ibverbs rdmacm pthread mlx4
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
@ -5,7 +5,6 @@
|
||||
PROG= mckey
|
||||
MAN=
|
||||
SRCS= mckey.c
|
||||
LDADD+= -libverbs -lrdmacm -lpthread
|
||||
LDADD+= -lmlx4
|
||||
LIBADD= ibverbs rdmacm pthread mlx4
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
@ -5,7 +5,6 @@
|
||||
PROG= udaddy
|
||||
MAN=
|
||||
SRCS= udaddy.c
|
||||
LDADD+= -libverbs -lrdmacm -lpthread
|
||||
LDADD+= -lmlx4
|
||||
LIBADD= ibverbs rdmacm pthread mlx4
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
@ -1,5 +1,5 @@
|
||||
PKG= openresolv
|
||||
VERSION= 3.7.0
|
||||
VERSION= 3.7.1
|
||||
|
||||
# Nasty hack so that make clean works without configure being run
|
||||
_CONFIG_MK!= test -e config.mk && echo config.mk || echo config-null.mk
|
||||
@ -37,7 +37,7 @@ SED_RESTARTCMD= -e 's:@RESTARTCMD \(.*\)@:${RESTARTCMD}:g'
|
||||
|
||||
DISTPREFIX?= ${PKG}-${VERSION}
|
||||
DISTFILEGZ?= ${DISTPREFIX}.tar.gz
|
||||
DISTFILE?= ${DISTPREFIX}.tar.bz2
|
||||
DISTFILE?= ${DISTPREFIX}.tar.xz
|
||||
FOSSILID?= current
|
||||
|
||||
.SUFFIXES: .in
|
||||
@ -77,9 +77,9 @@ install: proginstall maninstall
|
||||
import:
|
||||
rm -rf /tmp/${DISTPREFIX}
|
||||
${INSTALL} -d /tmp/${DISTPREFIX}
|
||||
cp README ${SRCS} /tmp/${DISPREFIX}
|
||||
cp README ${SRCS} /tmp/${DISTPREFIX}
|
||||
|
||||
dist:
|
||||
fossil tarball --name ${DISTPREFIX} ${FOSSILID} ${DISTFILEGZ}
|
||||
gunzip -c ${DISTFILEGZ} | bzip2 >${DISTFILE}
|
||||
gunzip -c ${DISTFILEGZ} | xz >${DISTFILE}
|
||||
rm ${DISTFILEGZ}
|
||||
|
@ -22,7 +22,7 @@
|
||||
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
.\" SUCH DAMAGE.
|
||||
.\"
|
||||
.Dd April 27, 2014
|
||||
.Dd April 27, 2015
|
||||
.Dt RESOLVCONF 8
|
||||
.Os
|
||||
.Sh NAME
|
||||
|
@ -22,7 +22,7 @@
|
||||
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
.\" SUCH DAMAGE.
|
||||
.\"
|
||||
.Dd March 20, 2015
|
||||
.Dd May 14, 2015
|
||||
.Dt RESOLVCONF.CONF 5
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -91,6 +91,11 @@ To remove a block, you can use 192.168.*
|
||||
These interfaces name servers will only be queried for the domains listed
|
||||
in their resolv.conf.
|
||||
Useful for VPN domains.
|
||||
Setting
|
||||
.Sy private_interfaces Ns ="*"
|
||||
will stop the forwarding of the root zone and allows the local resolver to
|
||||
recursively query the root servers directly.
|
||||
Requires a local nameserver other than libc.
|
||||
This is equivalent to the
|
||||
.Nm resolvconf -p
|
||||
option.
|
||||
@ -149,7 +154,7 @@ When set to /dev/null or NULL,
|
||||
.Sy resolv_conf_local_only
|
||||
is defaulted to NO,
|
||||
.Sy local_nameservers
|
||||
is unset unless overriden and only the information set in
|
||||
is unset unless overridden and only the information set in
|
||||
.Nm
|
||||
is written to
|
||||
.Sy resolv_conf .
|
||||
@ -271,7 +276,7 @@ Each subscriber attempts to automatically configure itself, but not every
|
||||
distribution has been catered for.
|
||||
Also, users could equally want to use a different version from the one
|
||||
installed by default, such as bind8 and bind9.
|
||||
To accomodate this, the subscribers have these files in configurable
|
||||
To accommodate this, the subscribers have these files in configurable
|
||||
variables, documented below.
|
||||
.Pp
|
||||
.Bl -tag -width indent
|
||||
|
@ -50,7 +50,6 @@ elif [ -d "$SYSCONFDIR/resolvconf" ]; then
|
||||
interface_order="$(cat "$SYSCONFDIR"/interface-order)"
|
||||
fi
|
||||
fi
|
||||
TMPDIR="$VARDIR/tmp"
|
||||
IFACEDIR="$VARDIR/interfaces"
|
||||
METRICDIR="$VARDIR/metrics"
|
||||
PRIVATEDIR="$VARDIR/private"
|
||||
|
@ -45,7 +45,8 @@ for d in $DOMAINS; do
|
||||
ns="${d#*:}"
|
||||
case "$unbound_insecure" in
|
||||
[Yy][Ee][Ss]|[Tt][Rr][Uu][Ee]|[Oo][Nn]|1)
|
||||
newconf="$newconf${NL}domain-insecure: \"$dn\""
|
||||
newconf="$newconf${NL}server:$NL"
|
||||
newconf="$newconf domain-insecure: \"$dn\"$NL"
|
||||
;;
|
||||
esac
|
||||
newconf="$newconf${NL}forward-zone:$NL name: \"$dn\"$NL"
|
||||
|
@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <string.h>
|
||||
#include <strings.h>
|
||||
#include <stdlib.h>
|
||||
#include <sysexits.h>
|
||||
|
38
contrib/unbound/.gitignore
vendored
38
contrib/unbound/.gitignore
vendored
@ -1,38 +0,0 @@
|
||||
*.lo
|
||||
*.o
|
||||
/.libs/
|
||||
/Makefile
|
||||
/autom4te.cache/
|
||||
/config.h
|
||||
/config.log
|
||||
/config.status
|
||||
/dnstap/dnstap_config.h
|
||||
/doc/example.conf
|
||||
/doc/libunbound.3
|
||||
/doc/unbound-anchor.8
|
||||
/doc/unbound-checkconf.8
|
||||
/doc/unbound-control.8
|
||||
/doc/unbound-host.1
|
||||
/doc/unbound.8
|
||||
/doc/unbound.conf.5
|
||||
/libtool
|
||||
/libunbound.la
|
||||
/smallapp/unbound-control-setup.sh
|
||||
/unbound
|
||||
/unbound-anchor
|
||||
/unbound-checkconf
|
||||
/unbound-control
|
||||
/unbound-control-setup
|
||||
/unbound-host
|
||||
/unbound.h
|
||||
/asynclook
|
||||
/delayer
|
||||
/lock-verify
|
||||
/memstats
|
||||
/perf
|
||||
/petal
|
||||
/pktview
|
||||
/streamtcp
|
||||
/testbound
|
||||
/unittest
|
||||
|
@ -45,7 +45,7 @@
|
||||
# Authentication:
|
||||
|
||||
#LoginGraceTime 2m
|
||||
#PermitRootLogin prohibit-password
|
||||
#PermitRootLogin no
|
||||
#StrictModes yes
|
||||
#MaxAuthTries 6
|
||||
#MaxSessions 10
|
||||
|
@ -1217,7 +1217,7 @@ The argument must be
|
||||
or
|
||||
.Dq no .
|
||||
The default is
|
||||
.Dq prohibit-password .
|
||||
.Dq no .
|
||||
Note that if
|
||||
.Cm ChallengeResponseAuthentication
|
||||
is
|
||||
|
@ -15,6 +15,7 @@ ATF_TESTS_C+= ftw_test
|
||||
ATF_TESTS_C+= popen_test
|
||||
ATF_TESTS_C+= posix_spawn_test
|
||||
ATF_TESTS_C+= wordexp_test
|
||||
ATF_TESTS_C+= dlopen_empty_test
|
||||
|
||||
# TODO: t_closefrom, t_cpuset, t_fmtcheck, t_randomid, t_sleep
|
||||
# TODO: t_siginfo (fixes require further inspection)
|
||||
|
97
lib/libc/tests/gen/dlopen_empty_test.c
Normal file
97
lib/libc/tests/gen/dlopen_empty_test.c
Normal file
@ -0,0 +1,97 @@
|
||||
/*-
|
||||
* Copyright (c) 2016 Maksym Sobolyev
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <dlfcn.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <atf-c.h>
|
||||
|
||||
static const char *funname;
|
||||
static char *soname;
|
||||
|
||||
static void
|
||||
sigsegv_handler(int sig __unused)
|
||||
{
|
||||
unlink(soname);
|
||||
free(soname);
|
||||
atf_tc_fail("got SIGSEGV in the %s(3)", funname);
|
||||
}
|
||||
|
||||
ATF_TC(dlopen_empty_test);
|
||||
ATF_TC_HEAD(dlopen_empty_test, tc)
|
||||
{
|
||||
atf_tc_set_md_var(tc, "descr", "Tests the dlopen() of an empty file "
|
||||
"returns an error");
|
||||
}
|
||||
ATF_TC_BODY(dlopen_empty_test, tc)
|
||||
{
|
||||
char tempname[] = "/tmp/temp.XXXXXX";
|
||||
char *fname;
|
||||
int fd;
|
||||
void *dlh;
|
||||
struct sigaction act, oact;
|
||||
|
||||
fname = mktemp(tempname);
|
||||
ATF_REQUIRE_MSG(fname != NULL, "mktemp failed; errno=%d", errno);
|
||||
asprintf(&soname, "%s.so", fname);
|
||||
ATF_REQUIRE_MSG(soname != NULL, "asprintf failed; errno=%d", ENOMEM);
|
||||
fd = open(soname, O_WRONLY | O_CREAT | O_TRUNC, DEFFILEMODE);
|
||||
ATF_REQUIRE_MSG(fd != -1, "open(\"%s\") failed; errno=%d", soname, errno);
|
||||
close(fd);
|
||||
|
||||
act.sa_handler = sigsegv_handler;
|
||||
act.sa_flags = 0;
|
||||
sigemptyset(&act.sa_mask);
|
||||
ATF_CHECK_MSG(sigaction(SIGSEGV, &act, &oact) != -1,
|
||||
"sigaction() failed");
|
||||
|
||||
funname = "dlopen";
|
||||
dlh = dlopen(soname, RTLD_LAZY);
|
||||
if (dlh != NULL) {
|
||||
funname = "dlclose";
|
||||
dlclose(dlh);
|
||||
}
|
||||
ATF_REQUIRE_MSG(dlh == NULL, "dlopen(\"%s\") did not fail", soname);
|
||||
unlink(soname);
|
||||
free(soname);
|
||||
}
|
||||
|
||||
ATF_TP_ADD_TCS(tp)
|
||||
{
|
||||
|
||||
ATF_TP_ADD_TC(tp, dlopen_empty_test);
|
||||
|
||||
return (atf_no_error());
|
||||
}
|
@ -70,30 +70,30 @@ sysdecode_syscallname(enum sysdecode_abi abi, unsigned int code)
|
||||
{
|
||||
|
||||
switch (abi) {
|
||||
case FREEBSD:
|
||||
case SYSDECODE_ABI_FREEBSD:
|
||||
if (code < nitems(syscallnames))
|
||||
return (syscallnames[code]);
|
||||
break;
|
||||
#if defined(__amd64__) || defined(__powerpc64__)
|
||||
case FREEBSD32:
|
||||
case SYSDECODE_ABI_FREEBSD32:
|
||||
if (code < nitems(freebsd32_syscallnames))
|
||||
return (freebsd32_syscallnames[code]);
|
||||
break;
|
||||
#endif
|
||||
#if defined(__amd64__) || defined(__i386__)
|
||||
case LINUX:
|
||||
case SYSDECODE_ABI_LINUX:
|
||||
if (code < nitems(linux_syscallnames))
|
||||
return (linux_syscallnames[code]);
|
||||
break;
|
||||
#endif
|
||||
#ifdef __amd64__
|
||||
case LINUX32:
|
||||
case SYSDECODE_ABI_LINUX32:
|
||||
if (code < nitems(linux32_syscallnames))
|
||||
return (linux32_syscallnames[code]);
|
||||
break;
|
||||
#endif
|
||||
#if defined(__amd64__) || defined(__aarch64__)
|
||||
case CLOUDABI64:
|
||||
case SYSDECODE_ABI_CLOUDABI64:
|
||||
if (code < nitems(cloudabi64_syscallnames))
|
||||
return (cloudabi64_syscallnames[code]);
|
||||
break;
|
||||
|
@ -25,7 +25,7 @@
|
||||
.\"
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd January 24, 2016
|
||||
.Dd January 29, 2016
|
||||
.Dt SYSDECODE 3
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -44,23 +44,23 @@ The supported ABIs are named by the
|
||||
.Vt enum sysdecode_abi
|
||||
enumeration.
|
||||
.Pp
|
||||
.Bl -tag -width "Li UNKNOWN_ABI" -compact
|
||||
.It Li FREEBSD
|
||||
.Bl -tag -width "Li SYSDECODE_ABI_CLOUDABI64" -compact
|
||||
.It Li SYSDECODE_ABI_FREEBSD
|
||||
Native FreeBSD binaries.
|
||||
Supported on all platforms.
|
||||
.It Li FREEBSD32
|
||||
.It Li SYSDECODE_ABI_FREEBSD32
|
||||
32-bit FreeBSD binaries.
|
||||
Supported on amd64 and powerpc64.
|
||||
.It Li LINUX
|
||||
.It Li SYSDECODE_ABI_LINUX
|
||||
Linux binaries of the same platform.
|
||||
Supported on amd64 and i386.
|
||||
.It Li LINUX32
|
||||
.It Li SYSDECODE_ABI_LINUX32
|
||||
32-bit Linux binaries.
|
||||
Supported on amd64.
|
||||
.It Li CLOUDABI64
|
||||
.It Li SYSDECODE_ABI_CLOUDABI64
|
||||
64-bit CloudABI binaries.
|
||||
Supported on aarch64 and amd64.
|
||||
.It Li UNKNOWN_ABI
|
||||
.It Li SYSDECODE_ABI_UNKNOWN
|
||||
A placeholder for use when the ABI is not known.
|
||||
.El
|
||||
.Sh SEE ALSO
|
||||
|
@ -30,12 +30,12 @@
|
||||
#define __SYSDECODE_H__
|
||||
|
||||
enum sysdecode_abi {
|
||||
UNKNOWN_ABI = 0,
|
||||
FREEBSD,
|
||||
FREEBSD32,
|
||||
LINUX,
|
||||
LINUX32,
|
||||
CLOUDABI64
|
||||
SYSDECODE_ABI_UNKNOWN = 0,
|
||||
SYSDECODE_ABI_FREEBSD,
|
||||
SYSDECODE_ABI_FREEBSD32,
|
||||
SYSDECODE_ABI_LINUX,
|
||||
SYSDECODE_ABI_LINUX32,
|
||||
SYSDECODE_ABI_CLOUDABI64
|
||||
};
|
||||
|
||||
const char *sysdecode_ioctlname(unsigned long _val);
|
||||
|
@ -459,8 +459,9 @@ main(int argc, char *argv[])
|
||||
int c;
|
||||
int run_batch;
|
||||
#ifdef __FreeBSD__
|
||||
size_t ncpu, ncpusz;
|
||||
size_t ncpusz;
|
||||
double load_avg = -1;
|
||||
int ncpu;
|
||||
#else
|
||||
double load_avg = LOADAVG_MX;
|
||||
#endif
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include "debug.h"
|
||||
#include "rtld.h"
|
||||
|
||||
static Elf_Ehdr *get_elf_header(int, const char *);
|
||||
static Elf_Ehdr *get_elf_header(int, const char *, const struct stat *);
|
||||
static int convert_prot(int); /* Elf flags -> mmap protection */
|
||||
static int convert_flags(int); /* Elf flags -> mmap flags */
|
||||
|
||||
@ -91,7 +91,7 @@ map_object(int fd, const char *path, const struct stat *sb)
|
||||
char *note_map;
|
||||
size_t note_map_len;
|
||||
|
||||
hdr = get_elf_header(fd, path);
|
||||
hdr = get_elf_header(fd, path, sb);
|
||||
if (hdr == NULL)
|
||||
return (NULL);
|
||||
|
||||
@ -324,10 +324,16 @@ map_object(int fd, const char *path, const struct stat *sb)
|
||||
}
|
||||
|
||||
static Elf_Ehdr *
|
||||
get_elf_header(int fd, const char *path)
|
||||
get_elf_header(int fd, const char *path, const struct stat *sbp)
|
||||
{
|
||||
Elf_Ehdr *hdr;
|
||||
|
||||
/* Make sure file has enough data for the ELF header */
|
||||
if (sbp != NULL && sbp->st_size < sizeof(Elf_Ehdr)) {
|
||||
_rtld_error("%s: invalid file format", path);
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
hdr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE | MAP_PREFAULT_READ,
|
||||
fd, 0);
|
||||
if (hdr == (Elf_Ehdr *)MAP_FAILED) {
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
PROG= nvmecontrol
|
||||
SRCS= nvmecontrol.c devlist.c firmware.c identify.c logpage.c \
|
||||
perftest.c reset.c nvme_util.c
|
||||
perftest.c reset.c nvme_util.c power.c
|
||||
MAN= nvmecontrol.8
|
||||
|
||||
.PATH: ${.CURDIR}/../../sys/dev/nvme
|
||||
|
@ -70,6 +70,11 @@
|
||||
.Op Fl f Ar path_to_firmware
|
||||
.Op Fl a
|
||||
.Aq device id
|
||||
.Nm
|
||||
.Ic power
|
||||
.Op Fl l
|
||||
.Op Fl p power_state
|
||||
.Op fl w workload_hint
|
||||
.Sh DESCRIPTION
|
||||
NVM Express (NVMe) is a storage protocol standard, for SSDs and other
|
||||
high-speed storage devices over PCI Express.
|
||||
@ -120,6 +125,18 @@ Activate the firmware in slot 4 of the nvme0 controller on the next reset.
|
||||
.Pp
|
||||
Download the firmware image contained in "/tmp/nvme_firmware" to slot 7 of the
|
||||
nvme0 controller and activate it on the next reset.
|
||||
.Pp
|
||||
.Dl nvmecontrol power -l nvme0
|
||||
.Pp
|
||||
List all the current power modes.
|
||||
.Pp
|
||||
.Dl nvmecontrol power -p 3 nvme0
|
||||
.Pp
|
||||
Set the current power mode.
|
||||
.Pp
|
||||
.Dl nvmecontrol power nvme0
|
||||
.Pp
|
||||
Get the current power mode.
|
||||
.Sh AUTHORS
|
||||
.An -nosplit
|
||||
.Nm
|
||||
|
@ -58,6 +58,7 @@ static struct nvme_function {
|
||||
{"reset", reset, RESET_USAGE},
|
||||
{"logpage", logpage, LOGPAGE_USAGE},
|
||||
{"firmware", firmware, FIRMWARE_USAGE},
|
||||
{"power", power, POWER_USAGE},
|
||||
{NULL, NULL, NULL},
|
||||
};
|
||||
|
||||
|
@ -55,12 +55,16 @@
|
||||
#define FIRMWARE_USAGE \
|
||||
" nvmecontrol firmware [-s slot] [-f path_to_firmware] [-a] <controller id>\n"
|
||||
|
||||
#define POWER_USAGE \
|
||||
" nvmecontrol power [-l] [-p new-state [-w workload-hint]] <controller id>\n"
|
||||
|
||||
void devlist(int argc, char *argv[]);
|
||||
void identify(int argc, char *argv[]);
|
||||
void perftest(int argc, char *argv[]);
|
||||
void reset(int argc, char *argv[]);
|
||||
void logpage(int argc, char *argv[]);
|
||||
void firmware(int argc, char *argv[]);
|
||||
void power(int argc, char *argv[]);
|
||||
|
||||
int open_dev(const char *str, int *fd, int show_error, int exit_on_error);
|
||||
void parse_ns_str(const char *ns_str, char *ctrlr_str, int *nsid);
|
||||
|
185
sbin/nvmecontrol/power.c
Normal file
185
sbin/nvmecontrol/power.c
Normal file
@ -0,0 +1,185 @@
|
||||
/*-
|
||||
* Copyright (c) 2016 Netflix, Inc
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include <sys/cdefs.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/ioccom.h>
|
||||
|
||||
#include <ctype.h>
|
||||
#include <err.h>
|
||||
#include <fcntl.h>
|
||||
#include <stddef.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "nvmecontrol.h"
|
||||
|
||||
_Static_assert(sizeof(struct nvme_power_state) == 256 / NBBY,
|
||||
"nvme_power_state size wrong");
|
||||
|
||||
static void
|
||||
power_usage(void)
|
||||
{
|
||||
fprintf(stderr, "usage:\n");
|
||||
fprintf(stderr, POWER_USAGE);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static void
|
||||
power_list_one(int i, struct nvme_power_state *nps)
|
||||
{
|
||||
int mpower, apower, ipower;
|
||||
|
||||
mpower = nps->mp;
|
||||
if (nps->mps == 0)
|
||||
mpower *= 100;
|
||||
ipower = nps->idlp;
|
||||
if (nps->ips == 1)
|
||||
ipower *= 100;
|
||||
apower = nps->actp;
|
||||
if (nps->aps == 1)
|
||||
apower *= 100;
|
||||
printf("%2d: %2d.%04dW%c %3d.%03dms %3d.%03dms %2d %2d %2d %2d %2d.%04dW %2d.%04dW %d\n",
|
||||
i, mpower / 10000, mpower % 10000,
|
||||
nps->nops ? '*' : ' ', nps->enlat / 1000, nps->enlat % 1000,
|
||||
nps->exlat / 1000, nps->exlat % 1000, nps->rrt, nps->rrl,
|
||||
nps->rwt, nps->rwl, ipower / 10000, ipower % 10000,
|
||||
apower / 10000, apower % 10000, nps->apw);
|
||||
}
|
||||
|
||||
static void
|
||||
power_list(struct nvme_controller_data *cdata)
|
||||
{
|
||||
int i;
|
||||
|
||||
printf("\nPower States Supported: %d\n\n", cdata->npss + 1);
|
||||
printf(" # Max pwr Enter Lat Exit Lat RT RL WT WL Idle Pwr Act Pwr Workloadd\n");
|
||||
printf("-- -------- --------- --------- -- -- -- -- -------- -------- --\n");
|
||||
for (i = 0; i <= cdata->npss; i++)
|
||||
power_list_one(i, &cdata->power_state[i]);
|
||||
}
|
||||
|
||||
static void
|
||||
power_set(int fd, int power, int workload, int perm)
|
||||
{
|
||||
struct nvme_pt_command pt;
|
||||
uint32_t p;
|
||||
|
||||
p = perm ? (1u << 31) : 0;
|
||||
memset(&pt, 0, sizeof(pt));
|
||||
pt.cmd.opc = NVME_OPC_SET_FEATURES;
|
||||
pt.cmd.cdw10 = NVME_FEAT_POWER_MANAGEMENT | p;
|
||||
pt.cmd.cdw11 = power | (workload << 5);
|
||||
|
||||
if (ioctl(fd, NVME_PASSTHROUGH_CMD, &pt) < 0)
|
||||
err(1, "set feature power mgmt request failed");
|
||||
|
||||
if (nvme_completion_is_error(&pt.cpl))
|
||||
errx(1, "set feature power mgmt request returned error");
|
||||
}
|
||||
|
||||
static void
|
||||
power_show(int fd)
|
||||
{
|
||||
struct nvme_pt_command pt;
|
||||
|
||||
memset(&pt, 0, sizeof(pt));
|
||||
pt.cmd.opc = NVME_OPC_GET_FEATURES;
|
||||
pt.cmd.cdw10 = NVME_FEAT_POWER_MANAGEMENT;
|
||||
|
||||
if (ioctl(fd, NVME_PASSTHROUGH_CMD, &pt) < 0)
|
||||
err(1, "set feature power mgmt request failed");
|
||||
|
||||
if (nvme_completion_is_error(&pt.cpl))
|
||||
errx(1, "set feature power mgmt request returned error");
|
||||
|
||||
printf("Current Power Mode is %d\n", pt.cpl.cdw0);
|
||||
}
|
||||
|
||||
void
|
||||
power(int argc, char *argv[])
|
||||
{
|
||||
struct nvme_controller_data cdata;
|
||||
int ch, listflag = 0, powerflag = 0, power = 0, fd;
|
||||
int workload = 0;
|
||||
char *end;
|
||||
|
||||
while ((ch = getopt(argc, argv, "lp:w:")) != -1) {
|
||||
switch ((char)ch) {
|
||||
case 'l':
|
||||
listflag = 1;
|
||||
break;
|
||||
case 'p':
|
||||
powerflag = 1;
|
||||
power = strtol(optarg, &end, 0);
|
||||
if (*end != '\0') {
|
||||
fprintf(stderr, "Invalid power state number: %s\n", optarg);
|
||||
power_usage();
|
||||
}
|
||||
break;
|
||||
case 'w':
|
||||
workload = strtol(optarg, &end, 0);
|
||||
if (*end != '\0') {
|
||||
fprintf(stderr, "Invalid workload hint: %s\n", optarg);
|
||||
power_usage();
|
||||
}
|
||||
break;
|
||||
default:
|
||||
power_usage();
|
||||
}
|
||||
}
|
||||
|
||||
/* Check that a controller was specified. */
|
||||
if (optind >= argc)
|
||||
power_usage();
|
||||
|
||||
if (listflag && powerflag) {
|
||||
fprintf(stderr, "Can't set power and list power states\n");
|
||||
power_usage();
|
||||
}
|
||||
|
||||
open_dev(argv[optind], &fd, 1, 1);
|
||||
read_controller_data(fd, &cdata);
|
||||
|
||||
if (listflag) {
|
||||
power_list(&cdata);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (powerflag) {
|
||||
power_set(fd, power, workload, 0);
|
||||
goto out;
|
||||
}
|
||||
power_show(fd);
|
||||
|
||||
out:
|
||||
close(fd);
|
||||
exit(0);
|
||||
}
|
@ -208,22 +208,30 @@ void
|
||||
print_state(struct pfsync_state *s, int opts)
|
||||
{
|
||||
struct pfsync_state_peer *src, *dst;
|
||||
struct pfsync_state_key *sk, *nk;
|
||||
struct pfsync_state_key *key, *sk, *nk;
|
||||
struct protoent *p;
|
||||
int min, sec;
|
||||
#ifndef __NO_STRICT_ALIGNMENT
|
||||
struct pfsync_state_key aligned_key[2];
|
||||
|
||||
bcopy(&s->key, aligned_key, sizeof(aligned_key));
|
||||
key = aligned_key;
|
||||
#else
|
||||
key = s->key;
|
||||
#endif
|
||||
|
||||
if (s->direction == PF_OUT) {
|
||||
src = &s->src;
|
||||
dst = &s->dst;
|
||||
sk = &s->key[PF_SK_STACK];
|
||||
nk = &s->key[PF_SK_WIRE];
|
||||
sk = &key[PF_SK_STACK];
|
||||
nk = &key[PF_SK_WIRE];
|
||||
if (s->proto == IPPROTO_ICMP || s->proto == IPPROTO_ICMPV6)
|
||||
sk->port[0] = nk->port[0];
|
||||
} else {
|
||||
src = &s->dst;
|
||||
dst = &s->src;
|
||||
sk = &s->key[PF_SK_WIRE];
|
||||
nk = &s->key[PF_SK_STACK];
|
||||
sk = &key[PF_SK_WIRE];
|
||||
nk = &key[PF_SK_STACK];
|
||||
if (s->proto == IPPROTO_ICMP || s->proto == IPPROTO_ICMPV6)
|
||||
sk->port[1] = nk->port[1];
|
||||
}
|
||||
|
@ -701,7 +701,7 @@ keep_ok(filenode *fnode)
|
||||
static void
|
||||
do_file(filenode *fnode)
|
||||
{
|
||||
f_reqnode *r, *r_tmp;
|
||||
f_reqnode *r;
|
||||
f_provnode *p, *p_tmp;
|
||||
provnode *pnode;
|
||||
int was_set;
|
||||
@ -728,13 +728,8 @@ do_file(filenode *fnode)
|
||||
*/
|
||||
r = fnode->req_list;
|
||||
while (r != NULL) {
|
||||
r_tmp = r;
|
||||
satisfy_req(r, fnode->filename);
|
||||
r = r->next;
|
||||
#if 0
|
||||
if (was_set == 0)
|
||||
free(r_tmp);
|
||||
#endif
|
||||
}
|
||||
fnode->req_list = NULL;
|
||||
|
||||
|
@ -315,7 +315,7 @@ swap_on_geli_args(const char *mntops)
|
||||
const char *aalgo, *ealgo, *keylen_str, *sectorsize_str;
|
||||
const char *aflag, *eflag, *lflag, *Tflag, *sflag;
|
||||
char *p, *args, *token, *string, *ops;
|
||||
int argsize, pagesize;
|
||||
int pagesize;
|
||||
size_t pagesize_len;
|
||||
u_long ul;
|
||||
|
||||
@ -389,7 +389,7 @@ swap_on_geli_args(const char *mntops)
|
||||
sectorsize_str = p;
|
||||
}
|
||||
|
||||
argsize = asprintf(&args, "%s%s%s%s%s%s%s%s%s -d",
|
||||
(void)asprintf(&args, "%s%s%s%s%s%s%s%s%s -d",
|
||||
aflag, aalgo, eflag, ealgo, lflag, keylen_str, Tflag,
|
||||
sflag, sectorsize_str);
|
||||
|
||||
|
@ -123,7 +123,7 @@ platform_late_init(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Construct pmap_devmap[] with DT-derived config data.
|
||||
* Construct devmap table with DT-derived config data.
|
||||
*/
|
||||
int
|
||||
platform_devmap_init(void)
|
||||
|
@ -105,23 +105,17 @@ int ctrl;
|
||||
struct cpu_functions arm9_cpufuncs = {
|
||||
/* CPU functions */
|
||||
|
||||
cpufunc_id, /* id */
|
||||
cpufunc_nullop, /* cpwait */
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
cpufunc_control, /* control */
|
||||
cpufunc_domains, /* Domain */
|
||||
arm9_setttb, /* Setttb */
|
||||
cpufunc_faultstatus, /* Faultstatus */
|
||||
cpufunc_faultaddress, /* Faultaddress */
|
||||
|
||||
/* TLB functions */
|
||||
|
||||
armv4_tlb_flushID, /* tlb_flushID */
|
||||
arm9_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv4_tlb_flushI, /* tlb_flushI */
|
||||
(void *)armv4_tlb_flushI, /* tlb_flushI_SE */
|
||||
armv4_tlb_flushD, /* tlb_flushD */
|
||||
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
|
||||
|
||||
@ -146,18 +140,12 @@ struct cpu_functions arm9_cpufuncs = {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
cpufunc_nullop, /* flush_prefetchbuf */
|
||||
armv4_drain_writebuf, /* drain_writebuf */
|
||||
cpufunc_nullop, /* flush_brnchtgt_C */
|
||||
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
|
||||
|
||||
(void *)cpufunc_nullop, /* sleep */
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
cpufunc_null_fixup, /* dataabt_fixup */
|
||||
cpufunc_null_fixup, /* prefetchabt_fixup */
|
||||
|
||||
arm9_context_switch, /* context_switch */
|
||||
|
||||
arm9_setup /* cpu setup */
|
||||
@ -169,23 +157,17 @@ struct cpu_functions arm9_cpufuncs = {
|
||||
struct cpu_functions armv5_ec_cpufuncs = {
|
||||
/* CPU functions */
|
||||
|
||||
cpufunc_id, /* id */
|
||||
cpufunc_nullop, /* cpwait */
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
cpufunc_control, /* control */
|
||||
cpufunc_domains, /* Domain */
|
||||
armv5_ec_setttb, /* Setttb */
|
||||
cpufunc_faultstatus, /* Faultstatus */
|
||||
cpufunc_faultaddress, /* Faultaddress */
|
||||
|
||||
/* TLB functions */
|
||||
|
||||
armv4_tlb_flushID, /* tlb_flushID */
|
||||
arm10_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv4_tlb_flushI, /* tlb_flushI */
|
||||
arm10_tlb_flushI_SE, /* tlb_flushI_SE */
|
||||
arm9_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv4_tlb_flushD, /* tlb_flushD */
|
||||
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
|
||||
|
||||
@ -211,19 +193,13 @@ struct cpu_functions armv5_ec_cpufuncs = {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
cpufunc_nullop, /* flush_prefetchbuf */
|
||||
armv4_drain_writebuf, /* drain_writebuf */
|
||||
cpufunc_nullop, /* flush_brnchtgt_C */
|
||||
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
|
||||
|
||||
(void *)cpufunc_nullop, /* sleep */
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
cpufunc_null_fixup, /* dataabt_fixup */
|
||||
cpufunc_null_fixup, /* prefetchabt_fixup */
|
||||
|
||||
arm10_context_switch, /* context_switch */
|
||||
arm9_context_switch, /* context_switch */
|
||||
|
||||
arm10_setup /* cpu setup */
|
||||
|
||||
@ -232,23 +208,17 @@ struct cpu_functions armv5_ec_cpufuncs = {
|
||||
struct cpu_functions sheeva_cpufuncs = {
|
||||
/* CPU functions */
|
||||
|
||||
cpufunc_id, /* id */
|
||||
cpufunc_nullop, /* cpwait */
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
cpufunc_control, /* control */
|
||||
cpufunc_domains, /* Domain */
|
||||
sheeva_setttb, /* Setttb */
|
||||
cpufunc_faultstatus, /* Faultstatus */
|
||||
cpufunc_faultaddress, /* Faultaddress */
|
||||
|
||||
/* TLB functions */
|
||||
|
||||
armv4_tlb_flushID, /* tlb_flushID */
|
||||
arm10_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv4_tlb_flushI, /* tlb_flushI */
|
||||
arm10_tlb_flushI_SE, /* tlb_flushI_SE */
|
||||
arm9_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv4_tlb_flushD, /* tlb_flushD */
|
||||
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
|
||||
|
||||
@ -274,19 +244,13 @@ struct cpu_functions sheeva_cpufuncs = {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
cpufunc_nullop, /* flush_prefetchbuf */
|
||||
armv4_drain_writebuf, /* drain_writebuf */
|
||||
cpufunc_nullop, /* flush_brnchtgt_C */
|
||||
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
|
||||
|
||||
sheeva_cpu_sleep, /* sleep */
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
cpufunc_null_fixup, /* dataabt_fixup */
|
||||
cpufunc_null_fixup, /* prefetchabt_fixup */
|
||||
|
||||
arm10_context_switch, /* context_switch */
|
||||
arm9_context_switch, /* context_switch */
|
||||
|
||||
arm10_setup /* cpu setup */
|
||||
};
|
||||
@ -296,23 +260,17 @@ struct cpu_functions sheeva_cpufuncs = {
|
||||
struct cpu_functions pj4bv7_cpufuncs = {
|
||||
/* CPU functions */
|
||||
|
||||
cpufunc_id, /* id */
|
||||
armv7_drain_writebuf, /* cpwait */
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
cpufunc_control, /* control */
|
||||
cpufunc_domains, /* Domain */
|
||||
armv7_setttb, /* Setttb */
|
||||
cpufunc_faultstatus, /* Faultstatus */
|
||||
cpufunc_faultaddress, /* Faultaddress */
|
||||
|
||||
/* TLB functions */
|
||||
|
||||
armv7_tlb_flushID, /* tlb_flushID */
|
||||
armv7_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv7_tlb_flushID, /* tlb_flushI */
|
||||
armv7_tlb_flushID_SE, /* tlb_flushI_SE */
|
||||
armv7_tlb_flushID, /* tlb_flushD */
|
||||
armv7_tlb_flushID_SE, /* tlb_flushD_SE */
|
||||
|
||||
@ -337,18 +295,11 @@ struct cpu_functions pj4bv7_cpufuncs = {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
cpufunc_nullop, /* flush_prefetchbuf */
|
||||
armv7_drain_writebuf, /* drain_writebuf */
|
||||
cpufunc_nullop, /* flush_brnchtgt_C */
|
||||
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
|
||||
|
||||
(void *)cpufunc_nullop, /* sleep */
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
cpufunc_null_fixup, /* dataabt_fixup */
|
||||
cpufunc_null_fixup, /* prefetchabt_fixup */
|
||||
|
||||
armv7_context_switch, /* context_switch */
|
||||
|
||||
pj4bv7_setup /* cpu setup */
|
||||
@ -362,23 +313,17 @@ struct cpu_functions pj4bv7_cpufuncs = {
|
||||
struct cpu_functions xscale_cpufuncs = {
|
||||
/* CPU functions */
|
||||
|
||||
cpufunc_id, /* id */
|
||||
xscale_cpwait, /* cpwait */
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
xscale_control, /* control */
|
||||
cpufunc_domains, /* domain */
|
||||
xscale_setttb, /* setttb */
|
||||
cpufunc_faultstatus, /* faultstatus */
|
||||
cpufunc_faultaddress, /* faultaddress */
|
||||
|
||||
/* TLB functions */
|
||||
|
||||
armv4_tlb_flushID, /* tlb_flushID */
|
||||
xscale_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv4_tlb_flushI, /* tlb_flushI */
|
||||
(void *)armv4_tlb_flushI, /* tlb_flushI_SE */
|
||||
armv4_tlb_flushD, /* tlb_flushD */
|
||||
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
|
||||
|
||||
@ -403,18 +348,12 @@ struct cpu_functions xscale_cpufuncs = {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
cpufunc_nullop, /* flush_prefetchbuf */
|
||||
armv4_drain_writebuf, /* drain_writebuf */
|
||||
cpufunc_nullop, /* flush_brnchtgt_C */
|
||||
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
|
||||
|
||||
xscale_cpu_sleep, /* sleep */
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
cpufunc_null_fixup, /* dataabt_fixup */
|
||||
cpufunc_null_fixup, /* prefetchabt_fixup */
|
||||
|
||||
xscale_context_switch, /* context_switch */
|
||||
|
||||
xscale_setup /* cpu setup */
|
||||
@ -427,23 +366,17 @@ struct cpu_functions xscale_cpufuncs = {
|
||||
struct cpu_functions xscalec3_cpufuncs = {
|
||||
/* CPU functions */
|
||||
|
||||
cpufunc_id, /* id */
|
||||
xscale_cpwait, /* cpwait */
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
xscale_control, /* control */
|
||||
cpufunc_domains, /* domain */
|
||||
xscalec3_setttb, /* setttb */
|
||||
cpufunc_faultstatus, /* faultstatus */
|
||||
cpufunc_faultaddress, /* faultaddress */
|
||||
|
||||
/* TLB functions */
|
||||
|
||||
armv4_tlb_flushID, /* tlb_flushID */
|
||||
xscale_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv4_tlb_flushI, /* tlb_flushI */
|
||||
(void *)armv4_tlb_flushI, /* tlb_flushI_SE */
|
||||
armv4_tlb_flushD, /* tlb_flushD */
|
||||
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
|
||||
|
||||
@ -468,18 +401,12 @@ struct cpu_functions xscalec3_cpufuncs = {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
cpufunc_nullop, /* flush_prefetchbuf */
|
||||
armv4_drain_writebuf, /* drain_writebuf */
|
||||
cpufunc_nullop, /* flush_brnchtgt_C */
|
||||
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
|
||||
|
||||
xscale_cpu_sleep, /* sleep */
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
cpufunc_null_fixup, /* dataabt_fixup */
|
||||
cpufunc_null_fixup, /* prefetchabt_fixup */
|
||||
|
||||
xscalec3_context_switch, /* context_switch */
|
||||
|
||||
xscale_setup /* cpu setup */
|
||||
@ -491,23 +418,17 @@ struct cpu_functions xscalec3_cpufuncs = {
|
||||
struct cpu_functions fa526_cpufuncs = {
|
||||
/* CPU functions */
|
||||
|
||||
cpufunc_id, /* id */
|
||||
cpufunc_nullop, /* cpwait */
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
cpufunc_control, /* control */
|
||||
cpufunc_domains, /* domain */
|
||||
fa526_setttb, /* setttb */
|
||||
cpufunc_faultstatus, /* faultstatus */
|
||||
cpufunc_faultaddress, /* faultaddress */
|
||||
|
||||
/* TLB functions */
|
||||
|
||||
armv4_tlb_flushID, /* tlb_flushID */
|
||||
fa526_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv4_tlb_flushI, /* tlb_flushI */
|
||||
fa526_tlb_flushI_SE, /* tlb_flushI_SE */
|
||||
armv4_tlb_flushD, /* tlb_flushD */
|
||||
armv4_tlb_flushD_SE, /* tlb_flushD_SE */
|
||||
|
||||
@ -532,17 +453,12 @@ struct cpu_functions fa526_cpufuncs = {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
fa526_flush_prefetchbuf, /* flush_prefetchbuf */
|
||||
armv4_drain_writebuf, /* drain_writebuf */
|
||||
cpufunc_nullop, /* flush_brnchtgt_C */
|
||||
fa526_flush_brnchtgt_E, /* flush_brnchtgt_E */
|
||||
|
||||
fa526_cpu_sleep, /* sleep */
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
cpufunc_null_fixup, /* dataabt_fixup */
|
||||
cpufunc_null_fixup, /* prefetchabt_fixup */
|
||||
|
||||
fa526_context_switch, /* context_switch */
|
||||
|
||||
@ -554,23 +470,17 @@ struct cpu_functions fa526_cpufuncs = {
|
||||
struct cpu_functions arm1176_cpufuncs = {
|
||||
/* CPU functions */
|
||||
|
||||
cpufunc_id, /* id */
|
||||
cpufunc_nullop, /* cpwait */
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
cpufunc_control, /* control */
|
||||
cpufunc_domains, /* Domain */
|
||||
arm11x6_setttb, /* Setttb */
|
||||
cpufunc_faultstatus, /* Faultstatus */
|
||||
cpufunc_faultaddress, /* Faultaddress */
|
||||
|
||||
/* TLB functions */
|
||||
|
||||
arm11_tlb_flushID, /* tlb_flushID */
|
||||
arm11_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
arm11_tlb_flushI, /* tlb_flushI */
|
||||
arm11_tlb_flushI_SE, /* tlb_flushI_SE */
|
||||
arm11_tlb_flushD, /* tlb_flushD */
|
||||
arm11_tlb_flushD_SE, /* tlb_flushD_SE */
|
||||
|
||||
@ -596,18 +506,12 @@ struct cpu_functions arm1176_cpufuncs = {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */
|
||||
arm11_drain_writebuf, /* drain_writebuf */
|
||||
cpufunc_nullop, /* flush_brnchtgt_C */
|
||||
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
|
||||
|
||||
arm11x6_sleep, /* sleep */
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
cpufunc_null_fixup, /* dataabt_fixup */
|
||||
cpufunc_null_fixup, /* prefetchabt_fixup */
|
||||
|
||||
arm11_context_switch, /* context_switch */
|
||||
|
||||
arm11x6_setup /* cpu setup */
|
||||
@ -618,16 +522,12 @@ struct cpu_functions arm1176_cpufuncs = {
|
||||
struct cpu_functions cortexa_cpufuncs = {
|
||||
/* CPU functions */
|
||||
|
||||
cpufunc_id, /* id */
|
||||
cpufunc_nullop, /* cpwait */
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
cpufunc_control, /* control */
|
||||
cpufunc_domains, /* Domain */
|
||||
armv7_setttb, /* Setttb */
|
||||
cpufunc_faultstatus, /* Faultstatus */
|
||||
cpufunc_faultaddress, /* Faultaddress */
|
||||
|
||||
/*
|
||||
* TLB functions. ARMv7 does all TLB ops based on a unified TLB model
|
||||
@ -637,8 +537,6 @@ struct cpu_functions cortexa_cpufuncs = {
|
||||
|
||||
armv7_tlb_flushID, /* tlb_flushID */
|
||||
armv7_tlb_flushID_SE, /* tlb_flushID_SE */
|
||||
armv7_tlb_flushID, /* tlb_flushI */
|
||||
armv7_tlb_flushID_SE, /* tlb_flushI_SE */
|
||||
armv7_tlb_flushID, /* tlb_flushD */
|
||||
armv7_tlb_flushID_SE, /* tlb_flushD_SE */
|
||||
|
||||
@ -668,18 +566,12 @@ struct cpu_functions cortexa_cpufuncs = {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
cpufunc_nullop, /* flush_prefetchbuf */
|
||||
armv7_drain_writebuf, /* drain_writebuf */
|
||||
cpufunc_nullop, /* flush_brnchtgt_C */
|
||||
(void *)cpufunc_nullop, /* flush_brnchtgt_E */
|
||||
|
||||
armv7_cpu_sleep, /* sleep */
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
cpufunc_null_fixup, /* dataabt_fixup */
|
||||
cpufunc_null_fixup, /* prefetchabt_fixup */
|
||||
|
||||
armv7_context_switch, /* context_switch */
|
||||
|
||||
cortexa_setup /* cpu setup */
|
||||
@ -726,7 +618,7 @@ get_cachetype_cp15()
|
||||
__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
|
||||
: "=r" (ctype));
|
||||
|
||||
cpuid = cpufunc_id();
|
||||
cpuid = cpu_ident();
|
||||
/*
|
||||
* ...and thus spake the ARM ARM:
|
||||
*
|
||||
@ -833,7 +725,7 @@ get_cachetype_cp15()
|
||||
int
|
||||
set_cpufuncs()
|
||||
{
|
||||
cputype = cpufunc_id();
|
||||
cputype = cpu_ident();
|
||||
cputype &= CPU_ID_CPU_MASK;
|
||||
|
||||
#ifdef CPU_ARM9
|
||||
@ -889,9 +781,6 @@ set_cpufuncs()
|
||||
cpufuncs = arm1176_cpufuncs;
|
||||
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
|
||||
get_cachetype_cp15();
|
||||
|
||||
pmap_pte_init_mmu_v6();
|
||||
|
||||
goto out;
|
||||
}
|
||||
#endif /* CPU_ARM1176 */
|
||||
@ -915,8 +804,6 @@ set_cpufuncs()
|
||||
cpufuncs = cortexa_cpufuncs;
|
||||
cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */
|
||||
get_cachetype_cp15();
|
||||
|
||||
pmap_pte_init_mmu_v6();
|
||||
goto out;
|
||||
}
|
||||
#endif /* CPU_CORTEXA */
|
||||
@ -927,7 +814,6 @@ set_cpufuncs()
|
||||
cputype == CPU_ID_ARM_88SV581X_V7) {
|
||||
cpufuncs = pj4bv7_cpufuncs;
|
||||
get_cachetype_cp15();
|
||||
pmap_pte_init_mmu_v6();
|
||||
goto out;
|
||||
}
|
||||
#endif /* CPU_MV_PJ4B */
|
||||
@ -1000,27 +886,6 @@ set_cpufuncs()
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixup routines for data and prefetch aborts.
|
||||
*
|
||||
* Several compile time symbols are used
|
||||
*
|
||||
* DEBUG_FAULT_CORRECTION - Print debugging information during the
|
||||
* correction of registers after a fault.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Null abort fixup routine.
|
||||
* For use when no fixup is required.
|
||||
*/
|
||||
int
|
||||
cpufunc_null_fixup(arg)
|
||||
void *arg;
|
||||
{
|
||||
return(ABORT_FIXUP_OK);
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU Setup code
|
||||
*/
|
||||
@ -1146,7 +1011,7 @@ arm11x6_setup(void)
|
||||
uint32_t sbz=0;
|
||||
uint32_t cpuid;
|
||||
|
||||
cpuid = cpufunc_id();
|
||||
cpuid = cpu_ident();
|
||||
|
||||
cpuctrl =
|
||||
CPU_CONTROL_MMU_ENABLE |
|
||||
|
@ -62,15 +62,10 @@ END(cpufunc_nullop)
|
||||
*
|
||||
*/
|
||||
|
||||
ENTRY(cpufunc_id)
|
||||
ENTRY(cpu_ident)
|
||||
mrc p15, 0, r0, c0, c0, 0
|
||||
RET
|
||||
END(cpufunc_id)
|
||||
|
||||
ENTRY(cpufunc_cpuid)
|
||||
mrc p15, 0, r0, c0, c0, 0
|
||||
RET
|
||||
END(cpufunc_cpuid)
|
||||
END(cpu_ident)
|
||||
|
||||
ENTRY(cpu_get_control)
|
||||
mrc p15, 0, r0, c1, c0, 0
|
||||
@ -82,15 +77,15 @@ ENTRY(cpu_read_cache_config)
|
||||
RET
|
||||
END(cpu_read_cache_config)
|
||||
|
||||
ENTRY(cpufunc_faultstatus)
|
||||
ENTRY(cpu_faultstatus)
|
||||
mrc p15, 0, r0, c5, c0, 0
|
||||
RET
|
||||
END(cpufunc_faultstatus)
|
||||
END(cpu_faultstatus)
|
||||
|
||||
ENTRY(cpufunc_faultaddress)
|
||||
ENTRY(cpu_faultaddress)
|
||||
mrc p15, 0, r0, c6, c0, 0
|
||||
RET
|
||||
END(cpufunc_faultaddress)
|
||||
END(cpu_faultaddress)
|
||||
|
||||
/*
|
||||
* Generic functions to write the internal coprocessor registers
|
||||
@ -110,10 +105,10 @@ ENTRY(cpufunc_control)
|
||||
END(cpufunc_control)
|
||||
#endif
|
||||
|
||||
ENTRY(cpufunc_domains)
|
||||
ENTRY(cpu_domains)
|
||||
mcr p15, 0, r0, c3, c0, 0
|
||||
RET
|
||||
END(cpufunc_domains)
|
||||
END(cpu_domains)
|
||||
|
||||
/*
|
||||
* Generic functions to read/modify/write the internal coprocessor registers
|
||||
|
@ -1,76 +0,0 @@
|
||||
/* $NetBSD: cpufunc_asm_arm10.S,v 1.1 2003/09/06 09:12:29 rearnsha Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 2002 ARM Limited
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. The name of the company may not be used to endorse or promote
|
||||
* products derived from this software without specific prior written
|
||||
* permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
||||
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* ARM10 assembly functions for CPU / MMU / TLB specific operations
|
||||
*
|
||||
*/
|
||||
|
||||
#include <machine/asm.h>
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
/*
|
||||
* TLB functions
|
||||
*/
|
||||
ENTRY(arm10_tlb_flushID_SE)
|
||||
mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */
|
||||
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
|
||||
bx lr
|
||||
END(arm10_tlb_flushID_SE)
|
||||
|
||||
ENTRY(arm10_tlb_flushI_SE)
|
||||
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
|
||||
bx lr
|
||||
END(arm10_tlb_flushI_SE)
|
||||
|
||||
|
||||
/*
|
||||
* Context switch.
|
||||
*
|
||||
* These is the CPU-specific parts of the context switcher cpu_switch()
|
||||
* These functions actually perform the TTB reload.
|
||||
*
|
||||
* NOTE: Special calling convention
|
||||
* r1, r4-r13 must be preserved
|
||||
*/
|
||||
ENTRY(arm10_context_switch)
|
||||
/*
|
||||
* We can assume that the caches will only contain kernel addresses
|
||||
* at this point. So no need to flush them again.
|
||||
*/
|
||||
mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */
|
||||
mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */
|
||||
mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */
|
||||
|
||||
/* Paranoia -- make sure the pipeline is empty. */
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
bx lr
|
||||
END(arm10_context_switch)
|
@ -47,12 +47,6 @@ ENTRY(arm11_tlb_flushID_SE)
|
||||
RET
|
||||
END(arm11_tlb_flushID_SE)
|
||||
|
||||
ENTRY(arm11_tlb_flushI_SE)
|
||||
mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */
|
||||
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
|
||||
RET
|
||||
END(arm11_tlb_flushI_SE)
|
||||
|
||||
/*
|
||||
* Context switch.
|
||||
*
|
||||
@ -87,12 +81,6 @@ ENTRY(arm11_tlb_flushID)
|
||||
mov pc, lr
|
||||
END(arm11_tlb_flushID)
|
||||
|
||||
ENTRY(arm11_tlb_flushI)
|
||||
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
|
||||
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
|
||||
mov pc, lr
|
||||
END(arm11_tlb_flushI)
|
||||
|
||||
ENTRY(arm11_tlb_flushD)
|
||||
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
|
||||
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
|
||||
|
@ -138,11 +138,6 @@ ENTRY_NP(arm11x6_icache_sync_all)
|
||||
RET
|
||||
END(arm11x6_icache_sync_all)
|
||||
|
||||
ENTRY_NP(arm11x6_flush_prefetchbuf)
|
||||
mcr p15, 0, r0, c7, c5, 4 /* Flush Prefetch Buffer */
|
||||
RET
|
||||
END(arm11x6_flush_prefetchbuf)
|
||||
|
||||
ENTRY_NP(arm11x6_icache_sync_range)
|
||||
add r1, r1, r0
|
||||
sub r1, r1, #1
|
||||
|
@ -48,11 +48,6 @@ ENTRY(armv4_tlb_flushID)
|
||||
RET
|
||||
END(armv4_tlb_flushID)
|
||||
|
||||
ENTRY(armv4_tlb_flushI)
|
||||
mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */
|
||||
RET
|
||||
END(armv4_tlb_flushI)
|
||||
|
||||
ENTRY(armv4_tlb_flushD)
|
||||
mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */
|
||||
RET
|
||||
|
@ -64,14 +64,6 @@ ENTRY(fa526_tlb_flushID_SE)
|
||||
mov pc, lr
|
||||
END(fa526_tlb_flushID_SE)
|
||||
|
||||
/*
|
||||
* TLB functions
|
||||
*/
|
||||
ENTRY(fa526_tlb_flushI_SE)
|
||||
mcr p15, 0, r0, c8, c5, 1 /* flush Itlb single entry */
|
||||
mov pc, lr
|
||||
END(fa526_tlb_flushI_SE)
|
||||
|
||||
ENTRY(fa526_cpu_sleep)
|
||||
mov r0, #0
|
||||
/* nop
|
||||
@ -80,12 +72,6 @@ ENTRY(fa526_cpu_sleep)
|
||||
mov pc, lr
|
||||
END(fa526_cpu_sleep)
|
||||
|
||||
ENTRY(fa526_flush_prefetchbuf)
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c5, 4 /* Pre-fetch flush */
|
||||
mov pc, lr
|
||||
END(fa526_flush_prefetchbuf)
|
||||
|
||||
/*
|
||||
* Cache functions
|
||||
*/
|
||||
@ -200,12 +186,6 @@ ENTRY(fa526_icache_sync_range)
|
||||
mov pc, lr
|
||||
END(fa526_icache_sync_range)
|
||||
|
||||
ENTRY(fa526_flush_brnchtgt_E)
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c5, 6 /* invalidate BTB cache */
|
||||
mov pc, lr
|
||||
END(fa526_flush_brnchtgt_E)
|
||||
|
||||
ENTRY(fa526_context_switch)
|
||||
/*
|
||||
* CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this.
|
||||
|
@ -170,7 +170,7 @@ db_validate_address(vm_offset_t addr)
|
||||
addr >= VM_MIN_KERNEL_ADDRESS
|
||||
#endif
|
||||
)
|
||||
pmap = pmap_kernel();
|
||||
pmap = kernel_pmap;
|
||||
else
|
||||
pmap = p->p_vmspace->vm_map.pmap;
|
||||
|
||||
|
@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/pmap.h>
|
||||
#include <machine/acle-compat.h>
|
||||
#include <machine/armreg.h>
|
||||
#include <machine/devmap.h>
|
||||
#include <machine/vmparam.h>
|
||||
@ -52,6 +53,9 @@ static boolean_t devmap_bootstrap_done = false;
|
||||
#define PTE_DEVICE VM_MEMATTR_DEVICE
|
||||
#elif defined(__arm__)
|
||||
#define MAX_VADDR ARM_VECTORS_HIGH
|
||||
#if __ARM_ARCH >= 6
|
||||
#define PTE_DEVICE VM_MEMATTR_DEVICE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -204,8 +208,13 @@ arm_devmap_bootstrap(vm_offset_t l1pt, const struct arm_devmap_entry *table)
|
||||
|
||||
for (pd = devmap_table; pd->pd_size != 0; ++pd) {
|
||||
#if defined(__arm__)
|
||||
#if __ARM_ARCH >= 6
|
||||
pmap_preboot_map_attr(pd->pd_pa, pd->pd_va, pd->pd_size,
|
||||
pd->pd_prot, pd->pd_cache);
|
||||
#else
|
||||
pmap_map_chunk(l1pt, pd->pd_va, pd->pd_pa, pd->pd_size,
|
||||
pd->pd_prot,pd->pd_cache);
|
||||
pd->pd_prot, pd->pd_cache);
|
||||
#endif
|
||||
#elif defined(__aarch64__)
|
||||
pmap_kenter_device(pd->pd_va, pd->pd_size, pd->pd_pa);
|
||||
#endif
|
||||
|
@ -49,7 +49,7 @@ void _start(void);
|
||||
void __start(void);
|
||||
void __startC(void);
|
||||
|
||||
extern unsigned int cpufunc_id(void);
|
||||
extern unsigned int cpu_ident(void);
|
||||
extern void armv6_idcache_wbinv_all(void);
|
||||
extern void armv7_idcache_wbinv_all(void);
|
||||
extern void do_call(void *, void *, void *, int);
|
||||
@ -248,7 +248,7 @@ _startC(void)
|
||||
#ifndef KZIP
|
||||
#ifdef CPU_ARM9
|
||||
/* So that idcache_wbinv works; */
|
||||
if ((cpufunc_id() & 0x0000f000) == 0x00009000)
|
||||
if ((cpu_ident() & 0x0000f000) == 0x00009000)
|
||||
arm9_setup();
|
||||
#endif
|
||||
#endif
|
||||
@ -266,7 +266,7 @@ get_cachetype_cp15()
|
||||
__asm __volatile("mrc p15, 0, %0, c0, c0, 1"
|
||||
: "=r" (ctype));
|
||||
|
||||
cpuid = cpufunc_id();
|
||||
cpuid = cpu_ident();
|
||||
/*
|
||||
* ...and thus spake the ARM ARM:
|
||||
*
|
||||
@ -683,7 +683,7 @@ __start(void)
|
||||
|
||||
#ifdef CPU_ARM9
|
||||
/* So that idcache_wbinv works; */
|
||||
if ((cpufunc_id() & 0x0000f000) == 0x00009000)
|
||||
if ((cpu_ident() & 0x0000f000) == 0x00009000)
|
||||
arm9_setup();
|
||||
#endif
|
||||
setup_pagetables(pt_addr, (vm_paddr_t)curaddr,
|
||||
|
@ -49,7 +49,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/proc.h>
|
||||
#include <machine/cpufunc.h>
|
||||
#include <machine/cpuinfo.h>
|
||||
#include <machine/pte.h>
|
||||
#include <machine/intr.h>
|
||||
#include <machine/sysarch.h>
|
||||
|
||||
|
@ -427,10 +427,8 @@ cpu_startup(void *dummy)
|
||||
{
|
||||
struct pcb *pcb = thread0.td_pcb;
|
||||
const unsigned int mbyte = 1024 * 1024;
|
||||
#ifdef ARM_TP_ADDRESS
|
||||
#ifndef ARM_CACHE_LOCK_ENABLE
|
||||
#if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE)
|
||||
vm_page_t m;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
identify_arm_cpu();
|
||||
@ -455,12 +453,10 @@ cpu_startup(void *dummy)
|
||||
vm_pager_bufferinit();
|
||||
pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack +
|
||||
USPACE_SVC_STACK_TOP;
|
||||
pmap_set_pcb_pagedir(pmap_kernel(), pcb);
|
||||
#if __ARM_ARCH < 6
|
||||
pmap_set_pcb_pagedir(kernel_pmap, pcb);
|
||||
#if __ARM_ARCH < 6
|
||||
vector_page_setprot(VM_PROT_READ);
|
||||
pmap_postinit();
|
||||
#endif
|
||||
#ifdef ARM_TP_ADDRESS
|
||||
#ifdef ARM_CACHE_LOCK_ENABLE
|
||||
pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS);
|
||||
arm_lock_cache_line(ARM_TP_ADDRESS);
|
||||
|
@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <machine/smp.h>
|
||||
#include <machine/pcb.h>
|
||||
#include <machine/pmap.h>
|
||||
#include <machine/pte.h>
|
||||
#include <machine/physmem.h>
|
||||
#include <machine/intr.h>
|
||||
#include <machine/vmparam.h>
|
||||
@ -156,7 +155,6 @@ init_secondary(int cpu)
|
||||
#ifndef ARM_INTRNG
|
||||
int start = 0, end = 0;
|
||||
#endif
|
||||
#if __ARM_ARCH >= 6
|
||||
uint32_t actlr_mask, actlr_set;
|
||||
|
||||
pmap_set_tex();
|
||||
@ -168,11 +166,6 @@ init_secondary(int cpu)
|
||||
set_stackptrs(cpu);
|
||||
|
||||
enable_interrupts(PSR_A);
|
||||
#else /* __ARM_ARCH >= 6 */
|
||||
cpu_setup();
|
||||
setttb(pmap_pa);
|
||||
cpu_tlb_flushID();
|
||||
#endif /* __ARM_ARCH >= 6 */
|
||||
pc = &__pcpu[cpu];
|
||||
|
||||
/*
|
||||
@ -184,10 +177,6 @@ init_secondary(int cpu)
|
||||
|
||||
pcpu_init(pc, cpu, sizeof(struct pcpu));
|
||||
dpcpu_init(dpcpu[cpu - 1], cpu);
|
||||
#if __ARM_ARCH < 6
|
||||
/* Provide stack pointers for other processor modes. */
|
||||
set_stackptrs(cpu);
|
||||
#endif
|
||||
/* Signal our startup to BSP */
|
||||
atomic_add_rel_32(&mp_naps, 1);
|
||||
|
||||
@ -351,13 +340,6 @@ ipi_hardclock(void *arg)
|
||||
critical_exit();
|
||||
}
|
||||
|
||||
static void
|
||||
ipi_tlb(void *dummy __unused)
|
||||
{
|
||||
|
||||
CTR1(KTR_SMP, "%s: IPI_TLB", __func__);
|
||||
cpufuncs.cf_tlb_flushID();
|
||||
}
|
||||
#else
|
||||
static int
|
||||
ipi_handler(void *arg)
|
||||
@ -423,10 +405,6 @@ ipi_handler(void *arg)
|
||||
CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__);
|
||||
hardclockintr();
|
||||
break;
|
||||
case IPI_TLB:
|
||||
CTR1(KTR_SMP, "%s: IPI_TLB", __func__);
|
||||
cpufuncs.cf_tlb_flushID();
|
||||
break;
|
||||
default:
|
||||
panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu);
|
||||
}
|
||||
@ -456,7 +434,6 @@ release_aps(void *dummy __unused)
|
||||
intr_ipi_set_handler(IPI_STOP, "stop", ipi_stop, NULL, 0);
|
||||
intr_ipi_set_handler(IPI_PREEMPT, "preempt", ipi_preempt, NULL, 0);
|
||||
intr_ipi_set_handler(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL, 0);
|
||||
intr_ipi_set_handler(IPI_TLB, "tlb", ipi_tlb, NULL, 0);
|
||||
|
||||
#else
|
||||
#ifdef IPI_IRQ_START
|
||||
@ -548,10 +525,3 @@ ipi_selected(cpuset_t cpus, u_int ipi)
|
||||
platform_ipi_send(cpus, ipi);
|
||||
}
|
||||
|
||||
void
|
||||
tlb_broadcast(int ipi)
|
||||
{
|
||||
|
||||
if (smp_started)
|
||||
ipi_all_but_self(ipi);
|
||||
}
|
||||
|
@ -645,7 +645,7 @@ pt2map_pt2pg(vm_offset_t va)
|
||||
* vm_offset_t pmap_preboot_reserve_pages(u_int num);
|
||||
* vm_offset_t pmap_preboot_get_vpages(u_int num);
|
||||
* void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size,
|
||||
* int prot, int attr);
|
||||
* vm_prot_t prot, vm_memattr_t attr);
|
||||
*
|
||||
* (2) for all stages:
|
||||
*
|
||||
@ -984,15 +984,16 @@ pmap_preboot_get_vpages(u_int num)
|
||||
* Pre-bootstrap epoch page mapping(s) with attributes.
|
||||
*/
|
||||
void
|
||||
pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, int prot,
|
||||
int attr)
|
||||
pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size,
|
||||
vm_prot_t prot, vm_memattr_t attr)
|
||||
{
|
||||
u_int num;
|
||||
u_int l1_attr, l1_prot;
|
||||
u_int l1_attr, l1_prot, l2_prot;
|
||||
pt1_entry_t *pte1p;
|
||||
pt2_entry_t *pte2p;
|
||||
|
||||
l1_prot = ATTR_TO_L1(prot);
|
||||
l2_prot = prot & VM_PROT_WRITE ? PTE2_AP_KRW : PTE2_AP_KR;
|
||||
l1_prot = ATTR_TO_L1(l2_prot);
|
||||
l1_attr = ATTR_TO_L1(attr);
|
||||
|
||||
/* Map all the pages. */
|
||||
@ -1006,13 +1007,12 @@ pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, int prot,
|
||||
num -= PTE1_SIZE;
|
||||
} else {
|
||||
pte2p = pmap_preboot_vtopte2(va);
|
||||
pte2_store(pte2p, PTE2_KERN(pa, prot, attr));
|
||||
pte2_store(pte2p, PTE2_KERN(pa, l2_prot, attr));
|
||||
va += PAGE_SIZE;
|
||||
pa += PAGE_SIZE;
|
||||
num -= PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1325,7 +1325,7 @@ pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
|
||||
PDEBUG(1, printf("%s: virt = %#x, start = %#x, end = %#x (size = %#x),"
|
||||
" prot = %d\n", __func__, *virt, start, end, end - start, prot));
|
||||
|
||||
l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE1_AP_KR;
|
||||
l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE2_AP_KR;
|
||||
l2prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX;
|
||||
l1prot = ATTR_TO_L1(l2prot);
|
||||
|
||||
@ -6278,11 +6278,6 @@ pmap_fault(pmap_t pmap, vm_offset_t far, uint32_t fsr, int idx, bool usermode)
|
||||
}
|
||||
|
||||
/* !!!! REMOVE !!!! */
|
||||
void
|
||||
pmap_pte_init_mmu_v6(void)
|
||||
{
|
||||
}
|
||||
|
||||
void vector_page_setprot(int p)
|
||||
{
|
||||
}
|
||||
|
@ -394,7 +394,7 @@ int pmap_needs_pte_sync;
|
||||
#define PMAP_SHPGPERPROC 200
|
||||
#endif
|
||||
|
||||
#define pmap_is_current(pm) ((pm) == pmap_kernel() || \
|
||||
#define pmap_is_current(pm) ((pm) == kernel_pmap || \
|
||||
curproc->p_vmspace->vm_map.pmap == (pm))
|
||||
static uma_zone_t pvzone = NULL;
|
||||
uma_zone_t l2zone;
|
||||
@ -437,10 +437,10 @@ pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt)
|
||||
/*
|
||||
* Copy the kernel's L1 entries to each new L1.
|
||||
*/
|
||||
if (l1pt != pmap_kernel()->pm_l1->l1_kva)
|
||||
memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE);
|
||||
if (l1pt != kernel_pmap->pm_l1->l1_kva)
|
||||
memcpy(l1pt, kernel_pmap->pm_l1->l1_kva, L1_TABLE_SIZE);
|
||||
|
||||
if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0)
|
||||
if ((l1->l1_physaddr = pmap_extract(kernel_pmap, (vm_offset_t)l1pt)) == 0)
|
||||
panic("pmap_init_l1: can't get PA of L1 at %p", l1pt);
|
||||
SLIST_INSERT_HEAD(&l1_list, l1, l1_link);
|
||||
TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru);
|
||||
@ -561,7 +561,7 @@ pmap_pte_init_xscale(void)
|
||||
{
|
||||
uint32_t id, type;
|
||||
|
||||
id = cpufunc_id();
|
||||
id = cpu_ident();
|
||||
type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
|
||||
|
||||
if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
|
||||
@ -932,7 +932,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
|
||||
* to a performance win over time as we don't need to continually
|
||||
* alloc/free.
|
||||
*/
|
||||
if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
|
||||
if (l2b->l2b_occupancy > 0 || pm == kernel_pmap)
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -1002,7 +1002,7 @@ pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags)
|
||||
* page tables, we simply fix up the cache-mode here if it's not
|
||||
* correct.
|
||||
*/
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
|
||||
l2b = pmap_get_l2_bucket(kernel_pmap, va);
|
||||
ptep = &l2b->l2b_kva[l2pte_index(va)];
|
||||
pte = *ptep;
|
||||
|
||||
@ -1077,9 +1077,9 @@ pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len)
|
||||
vm_size_t rest;
|
||||
|
||||
CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x"
|
||||
" len 0x%x ", pm, pm == pmap_kernel(), va, len);
|
||||
" len 0x%x ", pm, pm == kernel_pmap, va, len);
|
||||
|
||||
if (pmap_is_current(pm) || pm == pmap_kernel()) {
|
||||
if (pmap_is_current(pm) || pm == kernel_pmap) {
|
||||
rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len);
|
||||
while (len > 0) {
|
||||
if (pmap_has_valid_mapping(pm, va)) {
|
||||
@ -1100,7 +1100,7 @@ pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv,
|
||||
vm_size_t rest;
|
||||
|
||||
CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x "
|
||||
"len 0x%x ", pm, pm == pmap_kernel(), va, len);
|
||||
"len 0x%x ", pm, pm == kernel_pmap, va, len);
|
||||
CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only);
|
||||
|
||||
if (pmap_is_current(pm)) {
|
||||
@ -1230,13 +1230,13 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
|
||||
TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
|
||||
/* generate a count of the pv_entry uses */
|
||||
if (pv->pv_flags & PVF_WRITE) {
|
||||
if (pv->pv_pmap == pmap_kernel())
|
||||
if (pv->pv_pmap == kernel_pmap)
|
||||
kwritable++;
|
||||
else if (pv->pv_pmap == pm)
|
||||
uwritable++;
|
||||
writable++;
|
||||
}
|
||||
if (pv->pv_pmap == pmap_kernel())
|
||||
if (pv->pv_pmap == kernel_pmap)
|
||||
kentries++;
|
||||
else {
|
||||
if (pv->pv_pmap == pm)
|
||||
@ -1248,19 +1248,19 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
|
||||
* check if the user duplicate mapping has
|
||||
* been removed.
|
||||
*/
|
||||
if ((pm != pmap_kernel()) && (((uentries > 1) && uwritable) ||
|
||||
if ((pm != kernel_pmap) && (((uentries > 1) && uwritable) ||
|
||||
(uwritable > 1)))
|
||||
pmwc = 1;
|
||||
|
||||
TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
|
||||
/* check for user uncachable conditions - order is important */
|
||||
if (pm != pmap_kernel() &&
|
||||
(pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel())) {
|
||||
if (pm != kernel_pmap &&
|
||||
(pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap)) {
|
||||
|
||||
if ((uentries > 1 && uwritable) || uwritable > 1) {
|
||||
|
||||
/* user duplicate mapping */
|
||||
if (pv->pv_pmap != pmap_kernel())
|
||||
if (pv->pv_pmap != kernel_pmap)
|
||||
pv->pv_flags |= PVF_MWC;
|
||||
|
||||
if (!(pv->pv_flags & PVF_NC)) {
|
||||
@ -1279,7 +1279,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
|
||||
if ((kwritable && (entries || kentries > 1)) ||
|
||||
(kwritable > 1) ||
|
||||
((kwritable != writable) && kentries &&
|
||||
(pv->pv_pmap == pmap_kernel() ||
|
||||
(pv->pv_pmap == kernel_pmap ||
|
||||
(pv->pv_flags & PVF_WRITE) ||
|
||||
(pv->pv_flags & PVF_MWC)))) {
|
||||
|
||||
@ -1291,7 +1291,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
|
||||
}
|
||||
|
||||
/* kernel and user are cachable */
|
||||
if ((pm == pmap_kernel()) && !(pv->pv_flags & PVF_MWC) &&
|
||||
if ((pm == kernel_pmap) && !(pv->pv_flags & PVF_MWC) &&
|
||||
(pv->pv_flags & PVF_NC)) {
|
||||
|
||||
pv->pv_flags &= ~PVF_NC;
|
||||
@ -1300,8 +1300,8 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
|
||||
continue;
|
||||
}
|
||||
/* user is no longer sharable and writable */
|
||||
if (pm != pmap_kernel() &&
|
||||
(pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel()) &&
|
||||
if (pm != kernel_pmap &&
|
||||
(pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap) &&
|
||||
!pmwc && (pv->pv_flags & PVF_NC)) {
|
||||
|
||||
pv->pv_flags &= ~(PVF_NC | PVF_MWC);
|
||||
@ -1565,7 +1565,7 @@ vector_page_setprot(int prot)
|
||||
struct l2_bucket *l2b;
|
||||
pt_entry_t *ptep;
|
||||
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
|
||||
l2b = pmap_get_l2_bucket(kernel_pmap, vector_page);
|
||||
|
||||
ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
|
||||
|
||||
@ -1603,7 +1603,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
|
||||
pg->md.pvh_attrs &= ~PVF_REF;
|
||||
else
|
||||
vm_page_aflag_set(pg, PGA_REFERENCED);
|
||||
if ((pve->pv_flags & PVF_NC) && ((pm == pmap_kernel()) ||
|
||||
if ((pve->pv_flags & PVF_NC) && ((pm == kernel_pmap) ||
|
||||
(pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC)))
|
||||
pmap_fix_cache(pg, pm, 0);
|
||||
else if (pve->pv_flags & PVF_WRITE) {
|
||||
@ -1972,7 +1972,7 @@ pmap_postinit(void)
|
||||
pl1pt = (pd_entry_t *)va;
|
||||
|
||||
while (va < eva) {
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
|
||||
l2b = pmap_get_l2_bucket(kernel_pmap, va);
|
||||
ptep = &l2b->l2b_kva[l2pte_index(va)];
|
||||
pte = *ptep;
|
||||
pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt;
|
||||
@ -2122,7 +2122,7 @@ pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap,
|
||||
struct l2_bucket *l2b;
|
||||
|
||||
if (ptep) {
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
|
||||
l2b = pmap_get_l2_bucket(kernel_pmap, va);
|
||||
if (l2b == NULL)
|
||||
panic("pmap_alloc_specials: no l2b for 0x%x", va);
|
||||
|
||||
@ -2381,7 +2381,7 @@ pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap)
|
||||
if (pap)
|
||||
*pap = pa;
|
||||
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
|
||||
l2b = pmap_get_l2_bucket(kernel_pmap, va);
|
||||
|
||||
ptep = &l2b->l2b_kva[l2pte_index(va)];
|
||||
*ptep = L2_S_PROTO | pa | cache_mode |
|
||||
@ -2494,7 +2494,7 @@ pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va)
|
||||
void
|
||||
pmap_growkernel(vm_offset_t addr)
|
||||
{
|
||||
pmap_t kpm = pmap_kernel();
|
||||
pmap_t kpm = kernel_pmap;
|
||||
|
||||
if (addr <= pmap_curmaxkvaddr)
|
||||
return; /* we are OK */
|
||||
@ -2654,9 +2654,9 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
|
||||
(uint32_t) va, (uint32_t) pa));
|
||||
|
||||
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
|
||||
l2b = pmap_get_l2_bucket(kernel_pmap, va);
|
||||
if (l2b == NULL)
|
||||
l2b = pmap_grow_l2_bucket(pmap_kernel(), va);
|
||||
l2b = pmap_grow_l2_bucket(kernel_pmap, va);
|
||||
KASSERT(l2b != NULL, ("No L2 Bucket"));
|
||||
pte = &l2b->l2b_kva[l2pte_index(va)];
|
||||
opte = *pte;
|
||||
@ -2690,11 +2690,11 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
|
||||
if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva != 0) {
|
||||
if ((pve = pmap_get_pv_entry()) == NULL)
|
||||
panic("pmap_kenter_internal: no pv entries");
|
||||
PMAP_LOCK(pmap_kernel());
|
||||
pmap_enter_pv(m, pve, pmap_kernel(), va,
|
||||
PMAP_LOCK(kernel_pmap);
|
||||
pmap_enter_pv(m, pve, kernel_pmap, va,
|
||||
PVF_WRITE | PVF_UNMAN);
|
||||
pmap_fix_cache(m, pmap_kernel(), va);
|
||||
PMAP_UNLOCK(pmap_kernel());
|
||||
pmap_fix_cache(m, kernel_pmap, va);
|
||||
PMAP_UNLOCK(kernel_pmap);
|
||||
} else {
|
||||
m->md.pv_kva = va;
|
||||
}
|
||||
@ -2758,7 +2758,7 @@ pmap_kenter_user(vm_offset_t va, vm_paddr_t pa)
|
||||
* at the first use of the new address, or bad things will happen,
|
||||
* as we use one of these addresses in the exception handlers.
|
||||
*/
|
||||
pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1);
|
||||
pmap_fault_fixup(kernel_pmap, va, VM_PROT_READ|VM_PROT_WRITE, 1);
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
@ -2780,7 +2780,7 @@ pmap_kremove(vm_offset_t va)
|
||||
vm_page_t m;
|
||||
vm_offset_t pa;
|
||||
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
|
||||
l2b = pmap_get_l2_bucket(kernel_pmap, va);
|
||||
if (!l2b)
|
||||
return;
|
||||
KASSERT(l2b != NULL, ("No L2 Bucket"));
|
||||
@ -2796,11 +2796,11 @@ pmap_kremove(vm_offset_t va)
|
||||
* before the pvzone is initialized.
|
||||
*/
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap_kernel());
|
||||
PMAP_LOCK(kernel_pmap);
|
||||
if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) &&
|
||||
(pve = pmap_remove_pv(m, pmap_kernel(), va)))
|
||||
(pve = pmap_remove_pv(m, kernel_pmap, va)))
|
||||
pmap_free_pv_entry(pve);
|
||||
PMAP_UNLOCK(pmap_kernel());
|
||||
PMAP_UNLOCK(kernel_pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
va = va & ~PAGE_MASK;
|
||||
cpu_dcache_wbinv_range(va, PAGE_SIZE);
|
||||
@ -3027,7 +3027,7 @@ pmap_remove_all(vm_page_t m)
|
||||
curpm = vmspace_pmap(curproc->p_vmspace);
|
||||
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
|
||||
if (flush == FALSE && (pv->pv_pmap == curpm ||
|
||||
pv->pv_pmap == pmap_kernel()))
|
||||
pv->pv_pmap == kernel_pmap))
|
||||
flush = TRUE;
|
||||
|
||||
PMAP_LOCK(pv->pv_pmap);
|
||||
@ -3239,7 +3239,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, "
|
||||
"flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags));
|
||||
|
||||
if (pmap == pmap_kernel()) {
|
||||
if (pmap == kernel_pmap) {
|
||||
l2b = pmap_get_l2_bucket(pmap, va);
|
||||
if (l2b == NULL)
|
||||
l2b = pmap_grow_l2_bucket(pmap, va);
|
||||
@ -3414,7 +3414,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
/*
|
||||
* Make sure userland mappings get the right permissions
|
||||
*/
|
||||
if (pmap != pmap_kernel() && va != vector_page) {
|
||||
if (pmap != kernel_pmap && va != vector_page) {
|
||||
npte |= L2_S_PROT_U;
|
||||
}
|
||||
|
||||
@ -3672,9 +3672,9 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
l1pd = pmap->pm_l1->l1_kva[l1idx];
|
||||
if (l1pte_section_p(l1pd)) {
|
||||
/*
|
||||
* These should only happen for pmap_kernel()
|
||||
* These should only happen for kernel_pmap
|
||||
*/
|
||||
KASSERT(pmap == pmap_kernel(), ("huh"));
|
||||
KASSERT(pmap == kernel_pmap, ("huh"));
|
||||
/* XXX: what to do about the bits > 32 ? */
|
||||
if (l1pd & L1_S_SUPERSEC)
|
||||
pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET);
|
||||
@ -4034,7 +4034,7 @@ pmap_use_minicache(vm_offset_t va, vm_size_t size)
|
||||
if (next_bucket > eva)
|
||||
next_bucket = eva;
|
||||
|
||||
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
|
||||
l2b = pmap_get_l2_bucket(kernel_pmap, va);
|
||||
|
||||
sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
|
||||
|
||||
@ -4137,10 +4137,10 @@ pmap_clean_page(struct pv_entry *pv, boolean_t is_src)
|
||||
if (curthread)
|
||||
pm = vmspace_pmap(curproc->p_vmspace);
|
||||
else
|
||||
pm = pmap_kernel();
|
||||
pm = kernel_pmap;
|
||||
|
||||
for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) {
|
||||
if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) {
|
||||
if (npv->pv_pmap == kernel_pmap || npv->pv_pmap == pm) {
|
||||
flags |= npv->pv_flags;
|
||||
/*
|
||||
* The page is mapped non-cacheable in
|
||||
|
377
sys/arm/arm/swtch-v4.S
Normal file
377
sys/arm/arm/swtch-v4.S
Normal file
@ -0,0 +1,377 @@
|
||||
/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright 2003 Wasabi Systems, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Written by Steve C. Woodford for Wasabi Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. All advertising materials mentioning features or use of this software
|
||||
* must display the following acknowledgement:
|
||||
* This product includes software developed for the NetBSD Project by
|
||||
* Wasabi Systems, Inc.
|
||||
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
|
||||
* or promote products derived from this software without specific prior
|
||||
* written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
/*-
|
||||
* Copyright (c) 1994-1998 Mark Brinicombe.
|
||||
* Copyright (c) 1994 Brini.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This code is derived from software written for Brini by Mark Brinicombe
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. All advertising materials mentioning features or use of this software
|
||||
* must display the following acknowledgement:
|
||||
* This product includes software developed by Brini.
|
||||
* 4. The name of the company nor the name of the author may be used to
|
||||
* endorse or promote products derived from this software without specific
|
||||
* prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
||||
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* RiscBSD kernel project
|
||||
*
|
||||
* cpuswitch.S
|
||||
*
|
||||
* cpu switching functions
|
||||
*
|
||||
* Created : 15/10/94
|
||||
*
|
||||
*/
|
||||
|
||||
#include "assym.s"
|
||||
#include "opt_sched.h"
|
||||
|
||||
#include <machine/acle-compat.h>
|
||||
#include <machine/asm.h>
|
||||
#include <machine/asmacros.h>
|
||||
#include <machine/armreg.h>
|
||||
#include <machine/vfp.h>
|
||||
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
|
||||
#define GET_PCPU(tmp, tmp2) \
|
||||
ldr tmp, .Lcurpcpu
|
||||
|
||||
#ifdef VFP
|
||||
.fpu vfp /* allow VFP instructions */
|
||||
#endif
|
||||
|
||||
.Lcurpcpu:
|
||||
.word _C_LABEL(__pcpu)
|
||||
.Lblocked_lock:
|
||||
.word _C_LABEL(blocked_lock)
|
||||
|
||||
|
||||
#define DOMAIN_CLIENT 0x01
|
||||
|
||||
.Lcpufuncs:
|
||||
.word _C_LABEL(cpufuncs)
|
||||
|
||||
/*
|
||||
* cpu_throw(oldtd, newtd)
|
||||
*
|
||||
* Remove current thread state, then select the next thread to run
|
||||
* and load its state.
|
||||
* r0 = oldtd
|
||||
* r1 = newtd
|
||||
*/
|
||||
ENTRY(cpu_throw)
|
||||
mov r5, r1
|
||||
|
||||
/*
|
||||
* r0 = oldtd
|
||||
* r5 = newtd
|
||||
*/
|
||||
|
||||
#ifdef VFP /* This thread is dying, disable */
|
||||
bl _C_LABEL(vfp_discard) /* VFP without preserving state. */
|
||||
#endif
|
||||
|
||||
GET_PCPU(r7, r9)
|
||||
ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */
|
||||
|
||||
/* Switch to lwp0 context */
|
||||
|
||||
ldr r9, .Lcpufuncs
|
||||
mov lr, pc
|
||||
ldr pc, [r9, #CF_IDCACHE_WBINV_ALL]
|
||||
ldr r0, [r7, #(PCB_PL1VEC)]
|
||||
ldr r1, [r7, #(PCB_DACR)]
|
||||
/*
|
||||
* r0 = Pointer to L1 slot for vector_page (or NULL)
|
||||
* r1 = lwp0's DACR
|
||||
* r5 = lwp0
|
||||
* r7 = lwp0's PCB
|
||||
* r9 = cpufuncs
|
||||
*/
|
||||
|
||||
/*
|
||||
* Ensure the vector table is accessible by fixing up lwp0's L1
|
||||
*/
|
||||
cmp r0, #0 /* No need to fixup vector table? */
|
||||
ldrne r3, [r0] /* But if yes, fetch current value */
|
||||
ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */
|
||||
mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */
|
||||
cmpne r3, r2 /* Stuffing the same value? */
|
||||
strne r2, [r0] /* Store if not. */
|
||||
|
||||
#ifdef PMAP_INCLUDE_PTE_SYNC
|
||||
/*
|
||||
* Need to sync the cache to make sure that last store is
|
||||
* visible to the MMU.
|
||||
*/
|
||||
movne r1, #4
|
||||
movne lr, pc
|
||||
ldrne pc, [r9, #CF_DCACHE_WB_RANGE]
|
||||
#endif /* PMAP_INCLUDE_PTE_SYNC */
|
||||
|
||||
/*
|
||||
* Note: We don't do the same optimisation as cpu_switch() with
|
||||
* respect to avoiding flushing the TLB if we're switching to
|
||||
* the same L1 since this process' VM space may be about to go
|
||||
* away, so we don't want *any* turds left in the TLB.
|
||||
*/
|
||||
|
||||
/* Switch the memory to the new process */
|
||||
ldr r0, [r7, #(PCB_PAGEDIR)]
|
||||
mov lr, pc
|
||||
ldr pc, [r9, #CF_CONTEXT_SWITCH]
|
||||
|
||||
GET_PCPU(r6, r4)
|
||||
/* Hook in a new pcb */
|
||||
str r7, [r6, #PC_CURPCB]
|
||||
/* We have a new curthread now so make a note it */
|
||||
str r5, [r6, #PC_CURTHREAD]
|
||||
|
||||
/* Set the new tp */
|
||||
ldr r6, [r5, #(TD_MD + MD_TP)]
|
||||
ldr r4, =ARM_TP_ADDRESS
|
||||
str r6, [r4]
|
||||
ldr r6, [r5, #(TD_MD + MD_RAS_START)]
|
||||
str r6, [r4, #4] /* ARM_RAS_START */
|
||||
ldr r6, [r5, #(TD_MD + MD_RAS_END)]
|
||||
str r6, [r4, #8] /* ARM_RAS_END */
|
||||
|
||||
/* Restore all the saved registers and exit */
|
||||
add r3, r7, #PCB_R4
|
||||
ldmia r3, {r4-r12, sp, pc}
|
||||
END(cpu_throw)
|
||||
|
||||
/*
|
||||
* cpu_switch(oldtd, newtd, lock)
|
||||
*
|
||||
* Save the current thread state, then select the next thread to run
|
||||
* and load its state.
|
||||
* r0 = oldtd
|
||||
* r1 = newtd
|
||||
* r2 = lock (new lock for old thread)
|
||||
*/
|
||||
ENTRY(cpu_switch)
|
||||
/* Interrupts are disabled. */
|
||||
/* Save all the registers in the old thread's pcb. */
|
||||
ldr r3, [r0, #(TD_PCB)]
|
||||
|
||||
/* Restore all the saved registers and exit */
|
||||
add r3, #(PCB_R4)
|
||||
stmia r3, {r4-r12, sp, lr, pc}
|
||||
|
||||
mov r6, r2 /* Save the mutex */
|
||||
|
||||
/* rem: r0 = old lwp */
|
||||
/* rem: interrupts are disabled */
|
||||
|
||||
/* Process is now on a processor. */
|
||||
/* We have a new curthread now so make a note it */
|
||||
GET_PCPU(r7, r2)
|
||||
str r1, [r7, #PC_CURTHREAD]
|
||||
|
||||
/* Hook in a new pcb */
|
||||
ldr r2, [r1, #TD_PCB]
|
||||
str r2, [r7, #PC_CURPCB]
|
||||
|
||||
/* Stage two : Save old context */
|
||||
|
||||
/* Get the user structure for the old thread. */
|
||||
ldr r2, [r0, #(TD_PCB)]
|
||||
mov r4, r0 /* Save the old thread. */
|
||||
|
||||
/* Store the old tp; userland can change it on armv4. */
|
||||
ldr r3, =ARM_TP_ADDRESS
|
||||
ldr r9, [r3]
|
||||
str r9, [r0, #(TD_MD + MD_TP)]
|
||||
ldr r9, [r3, #4]
|
||||
str r9, [r0, #(TD_MD + MD_RAS_START)]
|
||||
ldr r9, [r3, #8]
|
||||
str r9, [r0, #(TD_MD + MD_RAS_END)]
|
||||
|
||||
/* Set the new tp */
|
||||
ldr r9, [r1, #(TD_MD + MD_TP)]
|
||||
str r9, [r3]
|
||||
ldr r9, [r1, #(TD_MD + MD_RAS_START)]
|
||||
str r9, [r3, #4]
|
||||
ldr r9, [r1, #(TD_MD + MD_RAS_END)]
|
||||
str r9, [r3, #8]
|
||||
|
||||
/* Get the user structure for the new process in r9 */
|
||||
ldr r9, [r1, #(TD_PCB)]
|
||||
|
||||
/* rem: r2 = old PCB */
|
||||
/* rem: r9 = new PCB */
|
||||
/* rem: interrupts are enabled */
|
||||
|
||||
#ifdef VFP
|
||||
fmrx r0, fpexc /* If the VFP is enabled */
|
||||
tst r0, #(VFPEXC_EN) /* the current thread has */
|
||||
movne r1, #1 /* used it, so go save */
|
||||
addne r0, r2, #(PCB_VFPSTATE) /* the state into the PCB */
|
||||
blne _C_LABEL(vfp_store) /* and disable the VFP. */
|
||||
#endif
|
||||
|
||||
/* r0-r3 now free! */
|
||||
|
||||
/* Third phase : restore saved context */
|
||||
|
||||
/* rem: r2 = old PCB */
|
||||
/* rem: r9 = new PCB */
|
||||
|
||||
ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */
|
||||
mov r2, #DOMAIN_CLIENT
|
||||
cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
|
||||
beq .Lcs_context_switched /* Yup. Don't flush cache */
|
||||
mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */
|
||||
/*
|
||||
* Get the new L1 table pointer into r11. If we're switching to
|
||||
* an LWP with the same address space as the outgoing one, we can
|
||||
* skip the cache purge and the TTB load.
|
||||
*
|
||||
* To avoid data dep stalls that would happen anyway, we try
|
||||
* and get some useful work done in the mean time.
|
||||
*/
|
||||
mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */
|
||||
ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
|
||||
|
||||
teq r10, r11 /* Same L1? */
|
||||
cmpeq r0, r5 /* Same DACR? */
|
||||
beq .Lcs_context_switched /* yes! */
|
||||
|
||||
/*
|
||||
* Definately need to flush the cache.
|
||||
*/
|
||||
|
||||
ldr r1, .Lcpufuncs
|
||||
mov lr, pc
|
||||
ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
|
||||
|
||||
.Lcs_cache_purge_skipped:
|
||||
/* rem: r6 = lock */
|
||||
/* rem: r9 = new PCB */
|
||||
/* rem: r10 = old L1 */
|
||||
/* rem: r11 = new L1 */
|
||||
|
||||
mov r2, #0x00000000
|
||||
ldr r7, [r9, #(PCB_PL1VEC)]
|
||||
|
||||
/*
|
||||
* Ensure the vector table is accessible by fixing up the L1
|
||||
*/
|
||||
cmp r7, #0 /* No need to fixup vector table? */
|
||||
ldrne r2, [r7] /* But if yes, fetch current value */
|
||||
ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */
|
||||
mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */
|
||||
cmpne r2, r0 /* Stuffing the same value? */
|
||||
#ifndef PMAP_INCLUDE_PTE_SYNC
|
||||
strne r0, [r7] /* Nope, update it */
|
||||
#else
|
||||
beq .Lcs_same_vector
|
||||
str r0, [r7] /* Otherwise, update it */
|
||||
|
||||
/*
|
||||
* Need to sync the cache to make sure that last store is
|
||||
* visible to the MMU.
|
||||
*/
|
||||
ldr r2, .Lcpufuncs
|
||||
mov r0, r7
|
||||
mov r1, #4
|
||||
mov lr, pc
|
||||
ldr pc, [r2, #CF_DCACHE_WB_RANGE]
|
||||
|
||||
.Lcs_same_vector:
|
||||
#endif /* PMAP_INCLUDE_PTE_SYNC */
|
||||
|
||||
cmp r10, r11 /* Switching to the same L1? */
|
||||
ldr r10, .Lcpufuncs
|
||||
beq .Lcs_same_l1 /* Yup. */
|
||||
/*
|
||||
* Do a full context switch, including full TLB flush.
|
||||
*/
|
||||
mov r0, r11
|
||||
mov lr, pc
|
||||
ldr pc, [r10, #CF_CONTEXT_SWITCH]
|
||||
|
||||
b .Lcs_context_switched
|
||||
|
||||
/*
|
||||
* We're switching to a different process in the same L1.
|
||||
* In this situation, we only need to flush the TLB for the
|
||||
* vector_page mapping, and even then only if r7 is non-NULL.
|
||||
*/
|
||||
.Lcs_same_l1:
|
||||
cmp r7, #0
|
||||
movne r0, #0 /* We *know* vector_page's VA is 0x0 */
|
||||
movne lr, pc
|
||||
ldrne pc, [r10, #CF_TLB_FLUSHID_SE]
|
||||
|
||||
.Lcs_context_switched:
|
||||
|
||||
/* Release the old thread */
|
||||
str r6, [r4, #TD_LOCK]
|
||||
|
||||
/* XXXSCW: Safe to re-enable FIQs here */
|
||||
|
||||
/* rem: r9 = new PCB */
|
||||
|
||||
/* Restore all the saved registers and exit */
|
||||
add r3, r9, #PCB_R4
|
||||
ldmia r3, {r4-r12, sp, pc}
|
||||
END(cpu_switch)
|
482
sys/arm/arm/swtch-v6.S
Normal file
482
sys/arm/arm/swtch-v6.S
Normal file
@ -0,0 +1,482 @@
|
||||
/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright 2003 Wasabi Systems, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Written by Steve C. Woodford for Wasabi Systems, Inc.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. All advertising materials mentioning features or use of this software
|
||||
* must display the following acknowledgement:
|
||||
* This product includes software developed for the NetBSD Project by
|
||||
* Wasabi Systems, Inc.
|
||||
* 4. The name of Wasabi Systems, Inc. may not be used to endorse
|
||||
* or promote products derived from this software without specific prior
|
||||
* written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
|
||||
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC
|
||||
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
/*-
|
||||
* Copyright (c) 1994-1998 Mark Brinicombe.
|
||||
* Copyright (c) 1994 Brini.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This code is derived from software written for Brini by Mark Brinicombe
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. All advertising materials mentioning features or use of this software
|
||||
* must display the following acknowledgement:
|
||||
* This product includes software developed by Brini.
|
||||
* 4. The name of the company nor the name of the author may be used to
|
||||
* endorse or promote products derived from this software without specific
|
||||
* prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
||||
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* RiscBSD kernel project
|
||||
*
|
||||
* cpuswitch.S
|
||||
*
|
||||
* cpu switching functions
|
||||
*
|
||||
* Created : 15/10/94
|
||||
*
|
||||
*/
|
||||
|
||||
#include "assym.s"
|
||||
#include "opt_sched.h"
|
||||
|
||||
#include <machine/acle-compat.h>
|
||||
#include <machine/asm.h>
|
||||
#include <machine/asmacros.h>
|
||||
#include <machine/armreg.h>
|
||||
#include <machine/sysreg.h>
|
||||
#include <machine/vfp.h>
|
||||
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#if defined(SMP)
|
||||
#define GET_PCPU(tmp, tmp2) \
|
||||
mrc CP15_MPIDR(tmp); \
|
||||
and tmp, tmp, #0xf; \
|
||||
ldr tmp2, .Lcurpcpu+4; \
|
||||
mul tmp, tmp, tmp2; \
|
||||
ldr tmp2, .Lcurpcpu; \
|
||||
add tmp, tmp, tmp2;
|
||||
#else
|
||||
|
||||
#define GET_PCPU(tmp, tmp2) \
|
||||
ldr tmp, .Lcurpcpu
|
||||
#endif
|
||||
|
||||
#ifdef VFP
|
||||
.fpu vfp /* allow VFP instructions */
|
||||
#endif
|
||||
|
||||
.Lcurpcpu:
|
||||
.word _C_LABEL(__pcpu)
|
||||
.word PCPU_SIZE
|
||||
.Lblocked_lock:
|
||||
.word _C_LABEL(blocked_lock)
|
||||
|
||||
ENTRY(cpu_context_switch) /* QQQ: What about macro instead of function? */
|
||||
DSB
|
||||
mcr CP15_TTBR0(r0) /* set the new TTB */
|
||||
ISB
|
||||
mov r0, #(CPU_ASID_KERNEL)
|
||||
mcr CP15_TLBIASID(r0) /* flush not global TLBs */
|
||||
/*
|
||||
* Flush entire Branch Target Cache because of the branch predictor
|
||||
* is not architecturally invisible. See ARM Architecture Reference
|
||||
* Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch
|
||||
* predictors and Requirements for branch predictor maintenance
|
||||
* operations sections.
|
||||
*
|
||||
* QQQ: The predictor is virtually addressed and holds virtual target
|
||||
* addresses. Therefore, if mapping is changed, the predictor cache
|
||||
* must be flushed.The flush is part of entire i-cache invalidation
|
||||
* what is always called when code mapping is changed. So herein,
|
||||
* it's the only place where standalone predictor flush must be
|
||||
* executed in kernel (except self modifying code case).
|
||||
*/
|
||||
mcr CP15_BPIALL /* flush entire Branch Target Cache */
|
||||
DSB
|
||||
mov pc, lr
|
||||
END(cpu_context_switch)
|
||||
|
||||
/*
|
||||
* cpu_throw(oldtd, newtd)
|
||||
*
|
||||
* Remove current thread state, then select the next thread to run
|
||||
* and load its state.
|
||||
* r0 = oldtd
|
||||
* r1 = newtd
|
||||
*/
|
||||
ENTRY(cpu_throw)
|
||||
mov r10, r0 /* r10 = oldtd */
|
||||
mov r11, r1 /* r11 = newtd */
|
||||
|
||||
#ifdef VFP /* This thread is dying, disable */
|
||||
bl _C_LABEL(vfp_discard) /* VFP without preserving state. */
|
||||
#endif
|
||||
GET_PCPU(r8, r9) /* r8 = current pcpu */
|
||||
ldr r4, [r8, #PC_CPUID] /* r4 = current cpu id */
|
||||
|
||||
cmp r10, #0 /* old thread? */
|
||||
beq 2f /* no, skip */
|
||||
|
||||
/* Remove this CPU from the active list. */
|
||||
ldr r5, [r8, #PC_CURPMAP]
|
||||
mov r0, #(PM_ACTIVE)
|
||||
add r5, r0 /* r5 = old pm_active */
|
||||
|
||||
/* Compute position and mask. */
|
||||
#if _NCPUWORDS > 1
|
||||
lsr r0, r4, #3
|
||||
bic r0, #3
|
||||
add r5, r0 /* r5 = position in old pm_active */
|
||||
mov r2, #1
|
||||
and r0, r4, #31
|
||||
lsl r2, r0 /* r2 = mask */
|
||||
#else
|
||||
mov r2, #1
|
||||
lsl r2, r4 /* r2 = mask */
|
||||
#endif
|
||||
/* Clear cpu from old active list. */
|
||||
#ifdef SMP
|
||||
1: ldrex r0, [r5]
|
||||
bic r0, r2
|
||||
strex r1, r0, [r5]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
#else
|
||||
ldr r0, [r5]
|
||||
bic r0, r2
|
||||
str r0, [r5]
|
||||
#endif
|
||||
|
||||
2:
|
||||
#ifdef INVARIANTS
|
||||
cmp r11, #0 /* new thread? */
|
||||
beq badsw1 /* no, panic */
|
||||
#endif
|
||||
ldr r7, [r11, #(TD_PCB)] /* r7 = new PCB */
|
||||
|
||||
/*
|
||||
* Registers at this point
|
||||
* r4 = current cpu id
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r11 = newtd
|
||||
*/
|
||||
|
||||
/* MMU switch to new thread. */
|
||||
ldr r0, [r7, #(PCB_PAGEDIR)]
|
||||
#ifdef INVARIANTS
|
||||
cmp r0, #0 /* new thread? */
|
||||
beq badsw4 /* no, panic */
|
||||
#endif
|
||||
bl _C_LABEL(cpu_context_switch)
|
||||
|
||||
/*
|
||||
* Set new PMAP as current one.
|
||||
* Insert cpu to new active list.
|
||||
*/
|
||||
|
||||
ldr r6, [r11, #(TD_PROC)] /* newtd->proc */
|
||||
ldr r6, [r6, #(P_VMSPACE)] /* newtd->proc->vmspace */
|
||||
add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */
|
||||
str r6, [r8, #PC_CURPMAP] /* store to curpmap */
|
||||
|
||||
mov r0, #PM_ACTIVE
|
||||
add r6, r0 /* r6 = new pm_active */
|
||||
|
||||
/* compute position and mask */
|
||||
#if _NCPUWORDS > 1
|
||||
lsr r0, r4, #3
|
||||
bic r0, #3
|
||||
add r6, r0 /* r6 = position in new pm_active */
|
||||
mov r2, #1
|
||||
and r0, r4, #31
|
||||
lsl r2, r0 /* r2 = mask */
|
||||
#else
|
||||
mov r2, #1
|
||||
lsl r2, r4 /* r2 = mask */
|
||||
#endif
|
||||
/* Set cpu to new active list. */
|
||||
#ifdef SMP
|
||||
1: ldrex r0, [r6]
|
||||
orr r0, r2
|
||||
strex r1, r0, [r6]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
#else
|
||||
ldr r0, [r6]
|
||||
orr r0, r2
|
||||
str r0, [r6]
|
||||
#endif
|
||||
/*
|
||||
* Registers at this point.
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r11 = newtd
|
||||
* They must match the ones in sw1 position !!!
|
||||
*/
|
||||
DMB
|
||||
b sw1 /* share new thread init with cpu_switch() */
|
||||
END(cpu_throw)
|
||||
|
||||
/*
|
||||
* cpu_switch(oldtd, newtd, lock)
|
||||
*
|
||||
* Save the current thread state, then select the next thread to run
|
||||
* and load its state.
|
||||
* r0 = oldtd
|
||||
* r1 = newtd
|
||||
* r2 = lock (new lock for old thread)
|
||||
*/
|
||||
ENTRY(cpu_switch)
|
||||
/* Interrupts are disabled. */
|
||||
#ifdef INVARIANTS
|
||||
cmp r0, #0 /* old thread? */
|
||||
beq badsw2 /* no, panic */
|
||||
#endif
|
||||
/* Save all the registers in the old thread's pcb. */
|
||||
ldr r3, [r0, #(TD_PCB)]
|
||||
add r3, #(PCB_R4)
|
||||
stmia r3, {r4-r12, sp, lr, pc}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
cmp r1, #0 /* new thread? */
|
||||
beq badsw3 /* no, panic */
|
||||
#endif
|
||||
/*
|
||||
* Save arguments. Note that we can now use r0-r14 until
|
||||
* it is time to restore them for the new thread. However,
|
||||
* some registers are not safe over function call.
|
||||
*/
|
||||
mov r9, r2 /* r9 = lock */
|
||||
mov r10, r0 /* r10 = oldtd */
|
||||
mov r11, r1 /* r11 = newtd */
|
||||
|
||||
GET_PCPU(r8, r3) /* r8 = current PCPU */
|
||||
ldr r7, [r11, #(TD_PCB)] /* r7 = newtd->td_pcb */
|
||||
|
||||
|
||||
|
||||
#ifdef VFP
|
||||
ldr r3, [r10, #(TD_PCB)]
|
||||
fmrx r0, fpexc /* If the VFP is enabled */
|
||||
tst r0, #(VFPEXC_EN) /* the current thread has */
|
||||
movne r1, #1 /* used it, so go save */
|
||||
addne r0, r3, #(PCB_VFPSTATE) /* the state into the PCB */
|
||||
blne _C_LABEL(vfp_store) /* and disable the VFP. */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MMU switch. If we're switching to a thread with the same
|
||||
* address space as the outgoing one, we can skip the MMU switch.
|
||||
*/
|
||||
mrc CP15_TTBR0(r1) /* r1 = old TTB */
|
||||
ldr r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */
|
||||
cmp r0, r1 /* Switching to the TTB? */
|
||||
beq sw0 /* same TTB, skip */
|
||||
|
||||
#ifdef INVARIANTS
|
||||
cmp r0, #0 /* new thread? */
|
||||
beq badsw4 /* no, panic */
|
||||
#endif
|
||||
|
||||
bl cpu_context_switch /* new TTB as argument */
|
||||
|
||||
/*
|
||||
* Registers at this point
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r9 = lock
|
||||
* r10 = oldtd
|
||||
* r11 = newtd
|
||||
*/
|
||||
|
||||
/*
|
||||
* Set new PMAP as current one.
|
||||
* Update active list on PMAPs.
|
||||
*/
|
||||
ldr r6, [r11, #TD_PROC] /* newtd->proc */
|
||||
ldr r6, [r6, #P_VMSPACE] /* newtd->proc->vmspace */
|
||||
add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */
|
||||
|
||||
ldr r5, [r8, #PC_CURPMAP] /* get old curpmap */
|
||||
str r6, [r8, #PC_CURPMAP] /* and save new one */
|
||||
|
||||
mov r0, #PM_ACTIVE
|
||||
add r5, r0 /* r5 = old pm_active */
|
||||
add r6, r0 /* r6 = new pm_active */
|
||||
|
||||
/* Compute position and mask. */
|
||||
ldr r4, [r8, #PC_CPUID]
|
||||
#if _NCPUWORDS > 1
|
||||
lsr r0, r4, #3
|
||||
bic r0, #3
|
||||
add r5, r0 /* r5 = position in old pm_active */
|
||||
add r6, r0 /* r6 = position in new pm_active */
|
||||
mov r2, #1
|
||||
and r0, r4, #31
|
||||
lsl r2, r0 /* r2 = mask */
|
||||
#else
|
||||
mov r2, #1
|
||||
lsl r2, r4 /* r2 = mask */
|
||||
#endif
|
||||
/* Clear cpu from old active list. */
|
||||
#ifdef SMP
|
||||
1: ldrex r0, [r5]
|
||||
bic r0, r2
|
||||
strex r1, r0, [r5]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
#else
|
||||
ldr r0, [r5]
|
||||
bic r0, r2
|
||||
str r0, [r5]
|
||||
#endif
|
||||
/* Set cpu to new active list. */
|
||||
#ifdef SMP
|
||||
1: ldrex r0, [r6]
|
||||
orr r0, r2
|
||||
strex r1, r0, [r6]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
#else
|
||||
ldr r0, [r6]
|
||||
orr r0, r2
|
||||
str r0, [r6]
|
||||
#endif
|
||||
|
||||
sw0:
|
||||
/*
|
||||
* Registers at this point
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r9 = lock
|
||||
* r10 = oldtd
|
||||
* r11 = newtd
|
||||
*/
|
||||
|
||||
/* Change the old thread lock. */
|
||||
add r5, r10, #TD_LOCK
|
||||
DMB
|
||||
1: ldrex r0, [r5]
|
||||
strex r1, r9, [r5]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
DMB
|
||||
|
||||
sw1:
|
||||
clrex
|
||||
/*
|
||||
* Registers at this point
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r11 = newtd
|
||||
*/
|
||||
|
||||
#if defined(SMP) && defined(SCHED_ULE)
|
||||
/*
|
||||
* 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE
|
||||
* QQQ: What does it mean in reality and why is it done?
|
||||
*/
|
||||
ldr r6, =blocked_lock
|
||||
1:
|
||||
ldr r3, [r11, #TD_LOCK] /* atomic write regular read */
|
||||
cmp r3, r6
|
||||
beq 1b
|
||||
#endif
|
||||
/* Set the new tls */
|
||||
ldr r0, [r11, #(TD_MD + MD_TP)]
|
||||
mcr CP15_TPIDRURO(r0) /* write tls thread reg 2 */
|
||||
|
||||
/* We have a new curthread now so make a note it */
|
||||
str r11, [r8, #PC_CURTHREAD]
|
||||
mcr CP15_TPIDRPRW(r11)
|
||||
|
||||
/* store pcb in per cpu structure */
|
||||
str r7, [r8, #PC_CURPCB]
|
||||
|
||||
/*
|
||||
* Restore all saved registers and return. Note that some saved
|
||||
* registers can be changed when either cpu_fork(), cpu_set_upcall(),
|
||||
* cpu_set_fork_handler(), or makectx() was called.
|
||||
*/
|
||||
add r3, r7, #PCB_R4
|
||||
ldmia r3, {r4-r12, sp, pc}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
badsw1:
|
||||
ldr r0, =sw1_panic_str
|
||||
bl _C_LABEL(panic)
|
||||
1: nop
|
||||
b 1b
|
||||
|
||||
badsw2:
|
||||
ldr r0, =sw2_panic_str
|
||||
bl _C_LABEL(panic)
|
||||
1: nop
|
||||
b 1b
|
||||
|
||||
badsw3:
|
||||
ldr r0, =sw3_panic_str
|
||||
bl _C_LABEL(panic)
|
||||
1: nop
|
||||
b 1b
|
||||
|
||||
badsw4:
|
||||
ldr r0, =sw4_panic_str
|
||||
bl _C_LABEL(panic)
|
||||
1: nop
|
||||
b 1b
|
||||
|
||||
sw1_panic_str:
|
||||
.asciz "cpu_throw: no newthread supplied.\n"
|
||||
sw2_panic_str:
|
||||
.asciz "cpu_switch: no curthread supplied.\n"
|
||||
sw3_panic_str:
|
||||
.asciz "cpu_switch: no newthread supplied.\n"
|
||||
sw4_panic_str:
|
||||
.asciz "cpu_switch: new pagedir is NULL.\n"
|
||||
#endif
|
||||
END(cpu_switch)
|
@ -79,9 +79,7 @@
|
||||
*/
|
||||
|
||||
#include "assym.s"
|
||||
#include "opt_sched.h"
|
||||
|
||||
#include <machine/acle-compat.h>
|
||||
#include <machine/asm.h>
|
||||
#include <machine/asmacros.h>
|
||||
#include <machine/armreg.h>
|
||||
@ -89,708 +87,10 @@
|
||||
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
#if __ARM_ARCH >= 6 && defined(SMP)
|
||||
#define GET_PCPU(tmp, tmp2) \
|
||||
mrc p15, 0, tmp, c0, c0, 5; \
|
||||
and tmp, tmp, #0xf; \
|
||||
ldr tmp2, .Lcurpcpu+4; \
|
||||
mul tmp, tmp, tmp2; \
|
||||
ldr tmp2, .Lcurpcpu; \
|
||||
add tmp, tmp, tmp2;
|
||||
#else
|
||||
|
||||
#define GET_PCPU(tmp, tmp2) \
|
||||
ldr tmp, .Lcurpcpu
|
||||
#endif
|
||||
|
||||
#ifdef VFP
|
||||
.fpu vfp /* allow VFP instructions */
|
||||
#endif
|
||||
|
||||
.Lcurpcpu:
|
||||
.word _C_LABEL(__pcpu)
|
||||
.word PCPU_SIZE
|
||||
.Lblocked_lock:
|
||||
.word _C_LABEL(blocked_lock)
|
||||
|
||||
|
||||
#if __ARM_ARCH < 6
|
||||
|
||||
#define DOMAIN_CLIENT 0x01
|
||||
|
||||
.Lcpufuncs:
|
||||
.word _C_LABEL(cpufuncs)
|
||||
|
||||
/*
|
||||
* cpu_throw(oldtd, newtd)
|
||||
*
|
||||
* Remove current thread state, then select the next thread to run
|
||||
* and load its state.
|
||||
* r0 = oldtd
|
||||
* r1 = newtd
|
||||
*/
|
||||
ENTRY(cpu_throw)
|
||||
mov r5, r1
|
||||
|
||||
/*
|
||||
* r0 = oldtd
|
||||
* r5 = newtd
|
||||
*/
|
||||
|
||||
#ifdef VFP /* This thread is dying, disable */
|
||||
bl _C_LABEL(vfp_discard) /* VFP without preserving state. */
|
||||
#endif
|
||||
|
||||
GET_PCPU(r7, r9)
|
||||
ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */
|
||||
|
||||
/* Switch to lwp0 context */
|
||||
|
||||
ldr r9, .Lcpufuncs
|
||||
#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT)
|
||||
mov lr, pc
|
||||
ldr pc, [r9, #CF_IDCACHE_WBINV_ALL]
|
||||
#endif
|
||||
ldr r0, [r7, #(PCB_PL1VEC)]
|
||||
ldr r1, [r7, #(PCB_DACR)]
|
||||
/*
|
||||
* r0 = Pointer to L1 slot for vector_page (or NULL)
|
||||
* r1 = lwp0's DACR
|
||||
* r5 = lwp0
|
||||
* r7 = lwp0's PCB
|
||||
* r9 = cpufuncs
|
||||
*/
|
||||
|
||||
/*
|
||||
* Ensure the vector table is accessible by fixing up lwp0's L1
|
||||
*/
|
||||
cmp r0, #0 /* No need to fixup vector table? */
|
||||
ldrne r3, [r0] /* But if yes, fetch current value */
|
||||
ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */
|
||||
mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */
|
||||
cmpne r3, r2 /* Stuffing the same value? */
|
||||
strne r2, [r0] /* Store if not. */
|
||||
|
||||
#ifdef PMAP_INCLUDE_PTE_SYNC
|
||||
/*
|
||||
* Need to sync the cache to make sure that last store is
|
||||
* visible to the MMU.
|
||||
*/
|
||||
movne r1, #4
|
||||
movne lr, pc
|
||||
ldrne pc, [r9, #CF_DCACHE_WB_RANGE]
|
||||
#endif /* PMAP_INCLUDE_PTE_SYNC */
|
||||
|
||||
/*
|
||||
* Note: We don't do the same optimisation as cpu_switch() with
|
||||
* respect to avoiding flushing the TLB if we're switching to
|
||||
* the same L1 since this process' VM space may be about to go
|
||||
* away, so we don't want *any* turds left in the TLB.
|
||||
*/
|
||||
|
||||
/* Switch the memory to the new process */
|
||||
ldr r0, [r7, #(PCB_PAGEDIR)]
|
||||
mov lr, pc
|
||||
ldr pc, [r9, #CF_CONTEXT_SWITCH]
|
||||
|
||||
GET_PCPU(r6, r4)
|
||||
/* Hook in a new pcb */
|
||||
str r7, [r6, #PC_CURPCB]
|
||||
/* We have a new curthread now so make a note it */
|
||||
str r5, [r6, #PC_CURTHREAD]
|
||||
#if __ARM_ARCH >= 6
|
||||
mcr p15, 0, r5, c13, c0, 4
|
||||
#endif
|
||||
/* Set the new tp */
|
||||
ldr r6, [r5, #(TD_MD + MD_TP)]
|
||||
#if __ARM_ARCH >= 6
|
||||
mcr p15, 0, r6, c13, c0, 3
|
||||
#else
|
||||
ldr r4, =ARM_TP_ADDRESS
|
||||
str r6, [r4]
|
||||
ldr r6, [r5, #(TD_MD + MD_RAS_START)]
|
||||
str r6, [r4, #4] /* ARM_RAS_START */
|
||||
ldr r6, [r5, #(TD_MD + MD_RAS_END)]
|
||||
str r6, [r4, #8] /* ARM_RAS_END */
|
||||
#endif
|
||||
/* Restore all the saved registers and exit */
|
||||
add r3, r7, #PCB_R4
|
||||
ldmia r3, {r4-r12, sp, pc}
|
||||
END(cpu_throw)
|
||||
|
||||
/*
|
||||
* cpu_switch(oldtd, newtd, lock)
|
||||
*
|
||||
* Save the current thread state, then select the next thread to run
|
||||
* and load its state.
|
||||
* r0 = oldtd
|
||||
* r1 = newtd
|
||||
* r2 = lock (new lock for old thread)
|
||||
*/
|
||||
ENTRY(cpu_switch)
|
||||
/* Interrupts are disabled. */
|
||||
/* Save all the registers in the old thread's pcb. */
|
||||
ldr r3, [r0, #(TD_PCB)]
|
||||
|
||||
/* Restore all the saved registers and exit */
|
||||
add r3, #(PCB_R4)
|
||||
stmia r3, {r4-r12, sp, lr, pc}
|
||||
|
||||
mov r6, r2 /* Save the mutex */
|
||||
|
||||
/* rem: r0 = old lwp */
|
||||
/* rem: interrupts are disabled */
|
||||
|
||||
/* Process is now on a processor. */
|
||||
/* We have a new curthread now so make a note it */
|
||||
GET_PCPU(r7, r2)
|
||||
str r1, [r7, #PC_CURTHREAD]
|
||||
#if __ARM_ARCH >= 6
|
||||
mcr p15, 0, r1, c13, c0, 4
|
||||
#endif
|
||||
|
||||
/* Hook in a new pcb */
|
||||
ldr r2, [r1, #TD_PCB]
|
||||
str r2, [r7, #PC_CURPCB]
|
||||
|
||||
/* Stage two : Save old context */
|
||||
|
||||
/* Get the user structure for the old thread. */
|
||||
ldr r2, [r0, #(TD_PCB)]
|
||||
mov r4, r0 /* Save the old thread. */
|
||||
|
||||
#if __ARM_ARCH >= 6
|
||||
/*
|
||||
* Set new tp. No need to store the old one first, userland can't
|
||||
* change it directly on armv6.
|
||||
*/
|
||||
ldr r9, [r1, #(TD_MD + MD_TP)]
|
||||
mcr p15, 0, r9, c13, c0, 3
|
||||
#else
|
||||
/* Store the old tp; userland can change it on armv4. */
|
||||
ldr r3, =ARM_TP_ADDRESS
|
||||
ldr r9, [r3]
|
||||
str r9, [r0, #(TD_MD + MD_TP)]
|
||||
ldr r9, [r3, #4]
|
||||
str r9, [r0, #(TD_MD + MD_RAS_START)]
|
||||
ldr r9, [r3, #8]
|
||||
str r9, [r0, #(TD_MD + MD_RAS_END)]
|
||||
|
||||
/* Set the new tp */
|
||||
ldr r9, [r1, #(TD_MD + MD_TP)]
|
||||
str r9, [r3]
|
||||
ldr r9, [r1, #(TD_MD + MD_RAS_START)]
|
||||
str r9, [r3, #4]
|
||||
ldr r9, [r1, #(TD_MD + MD_RAS_END)]
|
||||
str r9, [r3, #8]
|
||||
#endif
|
||||
|
||||
/* Get the user structure for the new process in r9 */
|
||||
ldr r9, [r1, #(TD_PCB)]
|
||||
|
||||
/* rem: r2 = old PCB */
|
||||
/* rem: r9 = new PCB */
|
||||
/* rem: interrupts are enabled */
|
||||
|
||||
#ifdef VFP
|
||||
fmrx r0, fpexc /* If the VFP is enabled */
|
||||
tst r0, #(VFPEXC_EN) /* the current thread has */
|
||||
movne r1, #1 /* used it, so go save */
|
||||
addne r0, r2, #(PCB_VFPSTATE) /* the state into the PCB */
|
||||
blne _C_LABEL(vfp_store) /* and disable the VFP. */
|
||||
#endif
|
||||
|
||||
/* r0-r3 now free! */
|
||||
|
||||
/* Third phase : restore saved context */
|
||||
|
||||
/* rem: r2 = old PCB */
|
||||
/* rem: r9 = new PCB */
|
||||
|
||||
ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */
|
||||
mov r2, #DOMAIN_CLIENT
|
||||
cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */
|
||||
beq .Lcs_context_switched /* Yup. Don't flush cache */
|
||||
mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */
|
||||
/*
|
||||
* Get the new L1 table pointer into r11. If we're switching to
|
||||
* an LWP with the same address space as the outgoing one, we can
|
||||
* skip the cache purge and the TTB load.
|
||||
*
|
||||
* To avoid data dep stalls that would happen anyway, we try
|
||||
* and get some useful work done in the mean time.
|
||||
*/
|
||||
mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */
|
||||
ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */
|
||||
|
||||
teq r10, r11 /* Same L1? */
|
||||
cmpeq r0, r5 /* Same DACR? */
|
||||
beq .Lcs_context_switched /* yes! */
|
||||
|
||||
#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT)
|
||||
/*
|
||||
* Definately need to flush the cache.
|
||||
*/
|
||||
|
||||
ldr r1, .Lcpufuncs
|
||||
mov lr, pc
|
||||
ldr pc, [r1, #CF_IDCACHE_WBINV_ALL]
|
||||
#endif
|
||||
.Lcs_cache_purge_skipped:
|
||||
/* rem: r6 = lock */
|
||||
/* rem: r9 = new PCB */
|
||||
/* rem: r10 = old L1 */
|
||||
/* rem: r11 = new L1 */
|
||||
|
||||
mov r2, #0x00000000
|
||||
ldr r7, [r9, #(PCB_PL1VEC)]
|
||||
|
||||
/*
|
||||
* Ensure the vector table is accessible by fixing up the L1
|
||||
*/
|
||||
cmp r7, #0 /* No need to fixup vector table? */
|
||||
ldrne r2, [r7] /* But if yes, fetch current value */
|
||||
ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */
|
||||
mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */
|
||||
cmpne r2, r0 /* Stuffing the same value? */
|
||||
#ifndef PMAP_INCLUDE_PTE_SYNC
|
||||
strne r0, [r7] /* Nope, update it */
|
||||
#else
|
||||
beq .Lcs_same_vector
|
||||
str r0, [r7] /* Otherwise, update it */
|
||||
|
||||
/*
|
||||
* Need to sync the cache to make sure that last store is
|
||||
* visible to the MMU.
|
||||
*/
|
||||
ldr r2, .Lcpufuncs
|
||||
mov r0, r7
|
||||
mov r1, #4
|
||||
mov lr, pc
|
||||
ldr pc, [r2, #CF_DCACHE_WB_RANGE]
|
||||
|
||||
.Lcs_same_vector:
|
||||
#endif /* PMAP_INCLUDE_PTE_SYNC */
|
||||
|
||||
cmp r10, r11 /* Switching to the same L1? */
|
||||
ldr r10, .Lcpufuncs
|
||||
beq .Lcs_same_l1 /* Yup. */
|
||||
/*
|
||||
* Do a full context switch, including full TLB flush.
|
||||
*/
|
||||
mov r0, r11
|
||||
mov lr, pc
|
||||
ldr pc, [r10, #CF_CONTEXT_SWITCH]
|
||||
|
||||
b .Lcs_context_switched
|
||||
|
||||
/*
|
||||
* We're switching to a different process in the same L1.
|
||||
* In this situation, we only need to flush the TLB for the
|
||||
* vector_page mapping, and even then only if r7 is non-NULL.
|
||||
*/
|
||||
.Lcs_same_l1:
|
||||
cmp r7, #0
|
||||
movne r0, #0 /* We *know* vector_page's VA is 0x0 */
|
||||
movne lr, pc
|
||||
ldrne pc, [r10, #CF_TLB_FLUSHID_SE]
|
||||
|
||||
.Lcs_context_switched:
|
||||
|
||||
/* Release the old thread */
|
||||
str r6, [r4, #TD_LOCK]
|
||||
#if defined(SCHED_ULE) && defined(SMP)
|
||||
ldr r6, .Lblocked_lock
|
||||
GET_CURTHREAD_PTR(r3)
|
||||
1:
|
||||
ldr r4, [r3, #TD_LOCK]
|
||||
cmp r4, r6
|
||||
beq 1b
|
||||
#endif
|
||||
|
||||
/* XXXSCW: Safe to re-enable FIQs here */
|
||||
|
||||
/* rem: r9 = new PCB */
|
||||
|
||||
/* Restore all the saved registers and exit */
|
||||
add r3, r9, #PCB_R4
|
||||
ldmia r3, {r4-r12, sp, pc}
|
||||
END(cpu_switch)
|
||||
|
||||
|
||||
#else /* __ARM_ARCH < 6 */
|
||||
#include <machine/sysreg.h>
|
||||
|
||||
ENTRY(cpu_context_switch) /* QQQ: What about macro instead of function? */
|
||||
DSB
|
||||
mcr CP15_TTBR0(r0) /* set the new TTB */
|
||||
ISB
|
||||
mov r0, #(CPU_ASID_KERNEL)
|
||||
mcr CP15_TLBIASID(r0) /* flush not global TLBs */
|
||||
/*
|
||||
* Flush entire Branch Target Cache because of the branch predictor
|
||||
* is not architecturally invisible. See ARM Architecture Reference
|
||||
* Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch
|
||||
* predictors and Requirements for branch predictor maintenance
|
||||
* operations sections.
|
||||
*
|
||||
* QQQ: The predictor is virtually addressed and holds virtual target
|
||||
* addresses. Therefore, if mapping is changed, the predictor cache
|
||||
* must be flushed.The flush is part of entire i-cache invalidation
|
||||
* what is always called when code mapping is changed. So herein,
|
||||
* it's the only place where standalone predictor flush must be
|
||||
* executed in kernel (except self modifying code case).
|
||||
*/
|
||||
mcr CP15_BPIALL /* and flush entire Branch Target Cache */
|
||||
DSB
|
||||
mov pc, lr
|
||||
END(cpu_context_switch)
|
||||
|
||||
/*
|
||||
* cpu_throw(oldtd, newtd)
|
||||
*
|
||||
* Remove current thread state, then select the next thread to run
|
||||
* and load its state.
|
||||
* r0 = oldtd
|
||||
* r1 = newtd
|
||||
*/
|
||||
ENTRY(cpu_throw)
|
||||
mov r10, r0 /* r10 = oldtd */
|
||||
mov r11, r1 /* r11 = newtd */
|
||||
|
||||
#ifdef VFP /* This thread is dying, disable */
|
||||
bl _C_LABEL(vfp_discard) /* VFP without preserving state. */
|
||||
#endif
|
||||
GET_PCPU(r8, r9) /* r8 = current pcpu */
|
||||
ldr r4, [r8, #PC_CPUID] /* r4 = current cpu id */
|
||||
|
||||
cmp r10, #0 /* old thread? */
|
||||
beq 2f /* no, skip */
|
||||
|
||||
/* Remove this CPU from the active list. */
|
||||
ldr r5, [r8, #PC_CURPMAP]
|
||||
mov r0, #(PM_ACTIVE)
|
||||
add r5, r0 /* r5 = old pm_active */
|
||||
|
||||
/* Compute position and mask. */
|
||||
#if _NCPUWORDS > 1
|
||||
lsr r0, r4, #3
|
||||
bic r0, #3
|
||||
add r5, r0 /* r5 = position in old pm_active */
|
||||
mov r2, #1
|
||||
and r0, r4, #31
|
||||
lsl r2, r0 /* r2 = mask */
|
||||
#else
|
||||
mov r2, #1
|
||||
lsl r2, r4 /* r2 = mask */
|
||||
#endif
|
||||
/* Clear cpu from old active list. */
|
||||
#ifdef SMP
|
||||
1: ldrex r0, [r5]
|
||||
bic r0, r2
|
||||
strex r1, r0, [r5]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
#else
|
||||
ldr r0, [r5]
|
||||
bic r0, r2
|
||||
str r0, [r5]
|
||||
#endif
|
||||
|
||||
2:
|
||||
#ifdef INVARIANTS
|
||||
cmp r11, #0 /* new thread? */
|
||||
beq badsw1 /* no, panic */
|
||||
#endif
|
||||
ldr r7, [r11, #(TD_PCB)] /* r7 = new PCB */
|
||||
|
||||
/*
|
||||
* Registers at this point
|
||||
* r4 = current cpu id
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r11 = newtd
|
||||
*/
|
||||
|
||||
/* MMU switch to new thread. */
|
||||
ldr r0, [r7, #(PCB_PAGEDIR)]
|
||||
#ifdef INVARIANTS
|
||||
cmp r0, #0 /* new thread? */
|
||||
beq badsw4 /* no, panic */
|
||||
#endif
|
||||
bl _C_LABEL(cpu_context_switch)
|
||||
|
||||
/*
|
||||
* Set new PMAP as current one.
|
||||
* Insert cpu to new active list.
|
||||
*/
|
||||
|
||||
ldr r6, [r11, #(TD_PROC)] /* newtd->proc */
|
||||
ldr r6, [r6, #(P_VMSPACE)] /* newtd->proc->vmspace */
|
||||
add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */
|
||||
str r6, [r8, #PC_CURPMAP] /* store to curpmap */
|
||||
|
||||
mov r0, #PM_ACTIVE
|
||||
add r6, r0 /* r6 = new pm_active */
|
||||
|
||||
/* compute position and mask */
|
||||
#if _NCPUWORDS > 1
|
||||
lsr r0, r4, #3
|
||||
bic r0, #3
|
||||
add r6, r0 /* r6 = position in new pm_active */
|
||||
mov r2, #1
|
||||
and r0, r4, #31
|
||||
lsl r2, r0 /* r2 = mask */
|
||||
#else
|
||||
mov r2, #1
|
||||
lsl r2, r4 /* r2 = mask */
|
||||
#endif
|
||||
/* Set cpu to new active list. */
|
||||
#ifdef SMP
|
||||
1: ldrex r0, [r6]
|
||||
orr r0, r2
|
||||
strex r1, r0, [r6]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
#else
|
||||
ldr r0, [r6]
|
||||
orr r0, r2
|
||||
str r0, [r6]
|
||||
#endif
|
||||
/*
|
||||
* Registers at this point.
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r11 = newtd
|
||||
* They must match the ones in sw1 position !!!
|
||||
*/
|
||||
DMB
|
||||
b sw1 /* share new thread init with cpu_switch() */
|
||||
END(cpu_throw)
|
||||
|
||||
/*
|
||||
* cpu_switch(oldtd, newtd, lock)
|
||||
*
|
||||
* Save the current thread state, then select the next thread to run
|
||||
* and load its state.
|
||||
* r0 = oldtd
|
||||
* r1 = newtd
|
||||
* r2 = lock (new lock for old thread)
|
||||
*/
|
||||
ENTRY(cpu_switch)
|
||||
/* Interrupts are disabled. */
|
||||
#ifdef INVARIANTS
|
||||
cmp r0, #0 /* old thread? */
|
||||
beq badsw2 /* no, panic */
|
||||
#endif
|
||||
/* Save all the registers in the old thread's pcb. */
|
||||
ldr r3, [r0, #(TD_PCB)]
|
||||
add r3, #(PCB_R4)
|
||||
stmia r3, {r4-r12, sp, lr, pc}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
cmp r1, #0 /* new thread? */
|
||||
beq badsw3 /* no, panic */
|
||||
#endif
|
||||
/*
|
||||
* Save arguments. Note that we can now use r0-r14 until
|
||||
* it is time to restore them for the new thread. However,
|
||||
* some registers are not safe over function call.
|
||||
*/
|
||||
mov r9, r2 /* r9 = lock */
|
||||
mov r10, r0 /* r10 = oldtd */
|
||||
mov r11, r1 /* r11 = newtd */
|
||||
|
||||
GET_PCPU(r8, r3) /* r8 = current PCPU */
|
||||
ldr r7, [r11, #(TD_PCB)] /* r7 = newtd->td_pcb */
|
||||
|
||||
|
||||
|
||||
#ifdef VFP
|
||||
ldr r3, [r10, #(TD_PCB)]
|
||||
fmrx r0, fpexc /* If the VFP is enabled */
|
||||
tst r0, #(VFPEXC_EN) /* the current thread has */
|
||||
movne r1, #1 /* used it, so go save */
|
||||
addne r0, r3, #(PCB_VFPSTATE) /* the state into the PCB */
|
||||
blne _C_LABEL(vfp_store) /* and disable the VFP. */
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MMU switch. If we're switching to a thread with the same
|
||||
* address space as the outgoing one, we can skip the MMU switch.
|
||||
*/
|
||||
mrc CP15_TTBR0(r1) /* r1 = old TTB */
|
||||
ldr r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */
|
||||
cmp r0, r1 /* Switching to the TTB? */
|
||||
beq sw0 /* same TTB, skip */
|
||||
|
||||
#ifdef INVARIANTS
|
||||
cmp r0, #0 /* new thread? */
|
||||
beq badsw4 /* no, panic */
|
||||
#endif
|
||||
|
||||
bl cpu_context_switch /* new TTB as argument */
|
||||
|
||||
/*
|
||||
* Registers at this point
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r9 = lock
|
||||
* r10 = oldtd
|
||||
* r11 = newtd
|
||||
*/
|
||||
|
||||
/*
|
||||
* Set new PMAP as current one.
|
||||
* Update active list on PMAPs.
|
||||
*/
|
||||
ldr r6, [r11, #TD_PROC] /* newtd->proc */
|
||||
ldr r6, [r6, #P_VMSPACE] /* newtd->proc->vmspace */
|
||||
add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */
|
||||
|
||||
ldr r5, [r8, #PC_CURPMAP] /* get old curpmap */
|
||||
str r6, [r8, #PC_CURPMAP] /* and save new one */
|
||||
|
||||
mov r0, #PM_ACTIVE
|
||||
add r5, r0 /* r5 = old pm_active */
|
||||
add r6, r0 /* r6 = new pm_active */
|
||||
|
||||
/* Compute position and mask. */
|
||||
ldr r4, [r8, #PC_CPUID]
|
||||
#if _NCPUWORDS > 1
|
||||
lsr r0, r4, #3
|
||||
bic r0, #3
|
||||
add r5, r0 /* r5 = position in old pm_active */
|
||||
add r6, r0 /* r6 = position in new pm_active */
|
||||
mov r2, #1
|
||||
and r0, r4, #31
|
||||
lsl r2, r0 /* r2 = mask */
|
||||
#else
|
||||
mov r2, #1
|
||||
lsl r2, r4 /* r2 = mask */
|
||||
#endif
|
||||
/* Clear cpu from old active list. */
|
||||
#ifdef SMP
|
||||
1: ldrex r0, [r5]
|
||||
bic r0, r2
|
||||
strex r1, r0, [r5]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
#else
|
||||
ldr r0, [r5]
|
||||
bic r0, r2
|
||||
str r0, [r5]
|
||||
#endif
|
||||
/* Set cpu to new active list. */
|
||||
#ifdef SMP
|
||||
1: ldrex r0, [r6]
|
||||
orr r0, r2
|
||||
strex r1, r0, [r6]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
#else
|
||||
ldr r0, [r6]
|
||||
orr r0, r2
|
||||
str r0, [r6]
|
||||
#endif
|
||||
|
||||
sw0:
|
||||
/*
|
||||
* Registers at this point
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r9 = lock
|
||||
* r10 = oldtd
|
||||
* r11 = newtd
|
||||
*/
|
||||
|
||||
/* Change the old thread lock. */
|
||||
add r5, r10, #TD_LOCK
|
||||
DMB
|
||||
1: ldrex r0, [r5]
|
||||
strex r1, r9, [r5]
|
||||
teq r1, #0
|
||||
bne 1b
|
||||
DMB
|
||||
|
||||
sw1:
|
||||
clrex
|
||||
/*
|
||||
* Registers at this point
|
||||
* r7 = new PCB
|
||||
* r8 = current pcpu
|
||||
* r11 = newtd
|
||||
*/
|
||||
|
||||
#if defined(SMP) && defined(SCHED_ULE)
|
||||
/*
|
||||
* 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE
|
||||
* QQQ: What does it mean in reality and why is it done?
|
||||
*/
|
||||
ldr r6, =blocked_lock
|
||||
1:
|
||||
ldr r3, [r11, #TD_LOCK] /* atomic write regular read */
|
||||
cmp r3, r6
|
||||
beq 1b
|
||||
#endif
|
||||
/* Set the new tls */
|
||||
ldr r0, [r11, #(TD_MD + MD_TP)]
|
||||
mcr CP15_TPIDRURO(r0) /* write tls thread reg 2 */
|
||||
|
||||
/* We have a new curthread now so make a note it */
|
||||
str r11, [r8, #PC_CURTHREAD]
|
||||
mcr CP15_TPIDRPRW(r11)
|
||||
|
||||
/* store pcb in per cpu structure */
|
||||
str r7, [r8, #PC_CURPCB]
|
||||
|
||||
/*
|
||||
* Restore all saved registers and return. Note that some saved
|
||||
* registers can be changed when either cpu_fork(), cpu_set_upcall(),
|
||||
* cpu_set_fork_handler(), or makectx() was called.
|
||||
*/
|
||||
add r3, r7, #PCB_R4
|
||||
ldmia r3, {r4-r12, sp, pc}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
badsw1:
|
||||
ldr r0, =sw1_panic_str
|
||||
bl _C_LABEL(panic)
|
||||
1: nop
|
||||
b 1b
|
||||
|
||||
badsw2:
|
||||
ldr r0, =sw2_panic_str
|
||||
bl _C_LABEL(panic)
|
||||
1: nop
|
||||
b 1b
|
||||
|
||||
badsw3:
|
||||
ldr r0, =sw3_panic_str
|
||||
bl _C_LABEL(panic)
|
||||
1: nop
|
||||
b 1b
|
||||
|
||||
badsw4:
|
||||
ldr r0, =sw4_panic_str
|
||||
bl _C_LABEL(panic)
|
||||
1: nop
|
||||
b 1b
|
||||
|
||||
sw1_panic_str:
|
||||
.asciz "cpu_throw: no newthread supplied.\n"
|
||||
sw2_panic_str:
|
||||
.asciz "cpu_switch: no curthread supplied.\n"
|
||||
sw3_panic_str:
|
||||
.asciz "cpu_switch: no newthread supplied.\n"
|
||||
sw4_panic_str:
|
||||
.asciz "cpu_switch: new pagedir is NULL.\n"
|
||||
#endif
|
||||
END(cpu_switch)
|
||||
|
||||
|
||||
#endif /* __ARM_ARCH < 6 */
|
||||
|
||||
ENTRY(savectx)
|
||||
stmfd sp!, {lr}
|
||||
sub sp, sp, #4
|
||||
|
@ -60,23 +60,17 @@ struct cpu_functions {
|
||||
|
||||
/* CPU functions */
|
||||
|
||||
u_int (*cf_id) (void);
|
||||
void (*cf_cpwait) (void);
|
||||
|
||||
/* MMU functions */
|
||||
|
||||
u_int (*cf_control) (u_int bic, u_int eor);
|
||||
void (*cf_domains) (u_int domains);
|
||||
void (*cf_setttb) (u_int ttb);
|
||||
u_int (*cf_faultstatus) (void);
|
||||
u_int (*cf_faultaddress) (void);
|
||||
|
||||
/* TLB functions */
|
||||
|
||||
void (*cf_tlb_flushID) (void);
|
||||
void (*cf_tlb_flushID_SE) (u_int va);
|
||||
void (*cf_tlb_flushI) (void);
|
||||
void (*cf_tlb_flushI_SE) (u_int va);
|
||||
void (*cf_tlb_flushD) (void);
|
||||
void (*cf_tlb_flushD_SE) (u_int va);
|
||||
|
||||
@ -155,18 +149,12 @@ struct cpu_functions {
|
||||
|
||||
/* Other functions */
|
||||
|
||||
void (*cf_flush_prefetchbuf) (void);
|
||||
void (*cf_drain_writebuf) (void);
|
||||
void (*cf_flush_brnchtgt_C) (void);
|
||||
void (*cf_flush_brnchtgt_E) (u_int va);
|
||||
|
||||
void (*cf_sleep) (int mode);
|
||||
|
||||
/* Soft functions */
|
||||
|
||||
int (*cf_dataabt_fixup) (void *arg);
|
||||
int (*cf_prefetchabt_fixup) (void *arg);
|
||||
|
||||
void (*cf_context_switch) (void);
|
||||
|
||||
void (*cf_setup) (void);
|
||||
@ -175,69 +163,16 @@ struct cpu_functions {
|
||||
extern struct cpu_functions cpufuncs;
|
||||
extern u_int cputype;
|
||||
|
||||
#define cpu_ident() cpufuncs.cf_id()
|
||||
#define cpu_cpwait() cpufuncs.cf_cpwait()
|
||||
|
||||
#define cpu_control(c, e) cpufuncs.cf_control(c, e)
|
||||
#define cpu_domains(d) cpufuncs.cf_domains(d)
|
||||
#define cpu_setttb(t) cpufuncs.cf_setttb(t)
|
||||
#define cpu_faultstatus() cpufuncs.cf_faultstatus()
|
||||
#define cpu_faultaddress() cpufuncs.cf_faultaddress()
|
||||
|
||||
#ifndef SMP
|
||||
|
||||
#define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID()
|
||||
#define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e)
|
||||
#define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI()
|
||||
#define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e)
|
||||
#define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD()
|
||||
#define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e)
|
||||
|
||||
#else
|
||||
void tlb_broadcast(int);
|
||||
|
||||
#if defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT)
|
||||
#define TLB_BROADCAST /* No need to explicitely send an IPI */
|
||||
#else
|
||||
#define TLB_BROADCAST tlb_broadcast(7)
|
||||
#endif
|
||||
|
||||
#define cpu_tlb_flushID() do { \
|
||||
cpufuncs.cf_tlb_flushID(); \
|
||||
TLB_BROADCAST; \
|
||||
} while(0)
|
||||
|
||||
#define cpu_tlb_flushID_SE(e) do { \
|
||||
cpufuncs.cf_tlb_flushID_SE(e); \
|
||||
TLB_BROADCAST; \
|
||||
} while(0)
|
||||
|
||||
|
||||
#define cpu_tlb_flushI() do { \
|
||||
cpufuncs.cf_tlb_flushI(); \
|
||||
TLB_BROADCAST; \
|
||||
} while(0)
|
||||
|
||||
|
||||
#define cpu_tlb_flushI_SE(e) do { \
|
||||
cpufuncs.cf_tlb_flushI_SE(e); \
|
||||
TLB_BROADCAST; \
|
||||
} while(0)
|
||||
|
||||
|
||||
#define cpu_tlb_flushD() do { \
|
||||
cpufuncs.cf_tlb_flushD(); \
|
||||
TLB_BROADCAST; \
|
||||
} while(0)
|
||||
|
||||
|
||||
#define cpu_tlb_flushD_SE(e) do { \
|
||||
cpufuncs.cf_tlb_flushD_SE(e); \
|
||||
TLB_BROADCAST; \
|
||||
} while(0)
|
||||
|
||||
#endif
|
||||
|
||||
#define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all()
|
||||
#define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s))
|
||||
|
||||
@ -255,19 +190,9 @@ void tlb_broadcast(int);
|
||||
#define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s))
|
||||
#define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf()
|
||||
|
||||
#define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf()
|
||||
#define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf()
|
||||
#define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C()
|
||||
#define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e)
|
||||
|
||||
#define cpu_sleep(m) cpufuncs.cf_sleep(m)
|
||||
|
||||
#define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a)
|
||||
#define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a)
|
||||
#define ABORT_FIXUP_OK 0 /* fixup succeeded */
|
||||
#define ABORT_FIXUP_FAILED 1 /* fixup failed */
|
||||
#define ABORT_FIXUP_RETURN 2 /* abort handler should return */
|
||||
|
||||
#define cpu_setup() cpufuncs.cf_setup()
|
||||
|
||||
int set_cpufuncs (void);
|
||||
@ -275,15 +200,11 @@ int set_cpufuncs (void);
|
||||
#define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */
|
||||
|
||||
void cpufunc_nullop (void);
|
||||
int cpufunc_null_fixup (void *);
|
||||
int early_abort_fixup (void *);
|
||||
int late_abort_fixup (void *);
|
||||
u_int cpufunc_id (void);
|
||||
u_int cpufunc_cpuid (void);
|
||||
u_int cpu_ident (void);
|
||||
u_int cpufunc_control (u_int clear, u_int bic);
|
||||
void cpufunc_domains (u_int domains);
|
||||
u_int cpufunc_faultstatus (void);
|
||||
u_int cpufunc_faultaddress (void);
|
||||
void cpu_domains (u_int domains);
|
||||
u_int cpu_faultstatus (void);
|
||||
u_int cpu_faultaddress (void);
|
||||
u_int cpu_pfr (int);
|
||||
|
||||
#if defined(CPU_FA526)
|
||||
@ -291,10 +212,7 @@ void fa526_setup (void);
|
||||
void fa526_setttb (u_int ttb);
|
||||
void fa526_context_switch (void);
|
||||
void fa526_cpu_sleep (int);
|
||||
void fa526_tlb_flushI_SE (u_int);
|
||||
void fa526_tlb_flushID_SE (u_int);
|
||||
void fa526_flush_prefetchbuf (void);
|
||||
void fa526_flush_brnchtgt_E (u_int);
|
||||
|
||||
void fa526_icache_sync_all (void);
|
||||
void fa526_icache_sync_range(vm_offset_t start, vm_size_t end);
|
||||
@ -307,11 +225,13 @@ void fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CPU_ARM9
|
||||
#if defined(CPU_ARM9) || defined(CPU_ARM9E)
|
||||
void arm9_setttb (u_int);
|
||||
|
||||
void arm9_tlb_flushID_SE (u_int va);
|
||||
void arm9_context_switch (void);
|
||||
#endif
|
||||
|
||||
#if defined(CPU_ARM9)
|
||||
void arm9_icache_sync_all (void);
|
||||
void arm9_icache_sync_range (vm_offset_t, vm_size_t);
|
||||
|
||||
@ -323,8 +243,6 @@ void arm9_dcache_wb_range (vm_offset_t, vm_size_t);
|
||||
void arm9_idcache_wbinv_all (void);
|
||||
void arm9_idcache_wbinv_range (vm_offset_t, vm_size_t);
|
||||
|
||||
void arm9_context_switch (void);
|
||||
|
||||
void arm9_setup (void);
|
||||
|
||||
extern unsigned arm9_dcache_sets_max;
|
||||
@ -334,11 +252,6 @@ extern unsigned arm9_dcache_index_inc;
|
||||
#endif
|
||||
|
||||
#if defined(CPU_ARM9E)
|
||||
void arm10_tlb_flushID_SE (u_int);
|
||||
void arm10_tlb_flushI_SE (u_int);
|
||||
|
||||
void arm10_context_switch (void);
|
||||
|
||||
void arm10_setup (void);
|
||||
|
||||
u_int sheeva_control_ext (u_int, u_int);
|
||||
@ -390,8 +303,6 @@ void pj4bv7_setup (void);
|
||||
#if defined(CPU_ARM1176)
|
||||
void arm11_tlb_flushID (void);
|
||||
void arm11_tlb_flushID_SE (u_int);
|
||||
void arm11_tlb_flushI (void);
|
||||
void arm11_tlb_flushI_SE (u_int);
|
||||
void arm11_tlb_flushD (void);
|
||||
void arm11_tlb_flushD_SE (u_int va);
|
||||
|
||||
@ -409,7 +320,6 @@ void arm11x6_setttb (u_int);
|
||||
void arm11x6_idcache_wbinv_all (void);
|
||||
void arm11x6_dcache_wbinv_all (void);
|
||||
void arm11x6_icache_sync_all (void);
|
||||
void arm11x6_flush_prefetchbuf (void);
|
||||
void arm11x6_icache_sync_range (vm_offset_t, vm_size_t);
|
||||
void arm11x6_idcache_wbinv_range (vm_offset_t, vm_size_t);
|
||||
void arm11x6_setup (void);
|
||||
@ -438,7 +348,6 @@ void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t);
|
||||
defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342)
|
||||
|
||||
void armv4_tlb_flushID (void);
|
||||
void armv4_tlb_flushI (void);
|
||||
void armv4_tlb_flushD (void);
|
||||
void armv4_tlb_flushD_SE (u_int va);
|
||||
|
||||
|
@ -110,7 +110,6 @@
|
||||
#define PAGE_SHIFT 12
|
||||
#define PAGE_SIZE (1 << PAGE_SHIFT) /* Page size */
|
||||
#define PAGE_MASK (PAGE_SIZE - 1)
|
||||
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
|
||||
|
||||
#define PDR_SHIFT 20 /* log2(NBPDR) */
|
||||
#define NBPDR (1 << PDR_SHIFT)
|
||||
|
@ -216,28 +216,8 @@ vm_paddr_t pmap_preboot_get_pages(u_int );
|
||||
void pmap_preboot_map_pages(vm_paddr_t , vm_offset_t , u_int );
|
||||
vm_offset_t pmap_preboot_reserve_pages(u_int );
|
||||
vm_offset_t pmap_preboot_get_vpages(u_int );
|
||||
void pmap_preboot_map_attr(vm_paddr_t , vm_offset_t , vm_size_t ,
|
||||
int , int );
|
||||
static __inline void
|
||||
pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa,
|
||||
vm_size_t size, int prot, int cache)
|
||||
{
|
||||
pmap_preboot_map_attr(pa, va, size, prot, cache);
|
||||
}
|
||||
|
||||
/*
|
||||
* This structure is used by machine-dependent code to describe
|
||||
* static mappings of devices, created at bootstrap time.
|
||||
*/
|
||||
struct pmap_devmap {
|
||||
vm_offset_t pd_va; /* virtual address */
|
||||
vm_paddr_t pd_pa; /* physical address */
|
||||
vm_size_t pd_size; /* size of region */
|
||||
vm_prot_t pd_prot; /* protection code */
|
||||
int pd_cache; /* cache attributes */
|
||||
};
|
||||
|
||||
void pmap_devmap_bootstrap(const struct pmap_devmap *);
|
||||
void pmap_preboot_map_attr(vm_paddr_t, vm_offset_t, vm_size_t, vm_prot_t,
|
||||
vm_memattr_t);
|
||||
|
||||
#endif /* _KERNEL */
|
||||
|
||||
@ -268,41 +248,8 @@ void pmap_devmap_bootstrap(const struct pmap_devmap *);
|
||||
/*
|
||||
* sys/arm/arm/cpufunc.c
|
||||
*/
|
||||
void pmap_pte_init_mmu_v6(void);
|
||||
void vector_page_setprot(int);
|
||||
|
||||
|
||||
/*
|
||||
* sys/arm/arm/db_interface.c
|
||||
* sys/arm/arm/machdep.c
|
||||
* sys/arm/arm/minidump_machdep.c
|
||||
* sys/arm/arm/pmap.c
|
||||
*/
|
||||
#define pmap_kernel() kernel_pmap
|
||||
|
||||
/*
|
||||
* sys/arm/arm/bus_space_generic.c (just comment)
|
||||
* sys/arm/arm/devmap.c
|
||||
* sys/arm/arm/pmap.c (just comment)
|
||||
* sys/arm/at91/at91_machdep.c
|
||||
* sys/arm/cavium/cns11xx/econa_machdep.c
|
||||
* sys/arm/freescale/imx/imx6_machdep.c (just comment)
|
||||
* sys/arm/mv/orion/db88f5xxx.c
|
||||
* sys/arm/mv/mv_localbus.c
|
||||
* sys/arm/mv/mv_machdep.c
|
||||
* sys/arm/mv/mv_pci.c
|
||||
* sys/arm/s3c2xx0/s3c24x0_machdep.c
|
||||
* sys/arm/versatile/versatile_machdep.c
|
||||
* sys/arm/xscale/ixp425/avila_machdep.c
|
||||
* sys/arm/xscale/i8134x/crb_machdep.c
|
||||
* sys/arm/xscale/i80321/ep80219_machdep.c
|
||||
* sys/arm/xscale/i80321/iq31244_machdep.c
|
||||
* sys/arm/xscale/pxa/pxa_machdep.c
|
||||
*/
|
||||
#define PTE_DEVICE PTE2_ATTR_DEVICE
|
||||
|
||||
|
||||
|
||||
#endif /* _KERNEL */
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
|
@ -60,21 +60,10 @@
|
||||
/*
|
||||
* Pte related macros
|
||||
*/
|
||||
#if ARM_ARCH_6 || ARM_ARCH_7A
|
||||
#ifdef SMP
|
||||
#define PTE_NOCACHE 2
|
||||
#else
|
||||
#define PTE_NOCACHE 1
|
||||
#endif
|
||||
#define PTE_CACHE 6
|
||||
#define PTE_DEVICE 2
|
||||
#define PTE_PAGETABLE 6
|
||||
#else
|
||||
#define PTE_NOCACHE 1
|
||||
#define PTE_CACHE 2
|
||||
#define PTE_DEVICE PTE_NOCACHE
|
||||
#define PTE_PAGETABLE 3
|
||||
#endif
|
||||
|
||||
enum mem_type {
|
||||
STRONG_ORD = 0,
|
||||
@ -104,11 +93,7 @@ enum mem_type {
|
||||
|
||||
#define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
|
||||
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
|
||||
boolean_t pmap_page_is_mapped(vm_page_t);
|
||||
#else
|
||||
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
|
||||
#endif
|
||||
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
|
||||
|
||||
/*
|
||||
@ -131,9 +116,7 @@ struct pv_chunk;
|
||||
struct md_page {
|
||||
int pvh_attrs;
|
||||
vm_memattr_t pv_memattr;
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
|
||||
vm_offset_t pv_kva; /* first kernel VA mapping */
|
||||
#endif
|
||||
TAILQ_HEAD(,pv_entry) pv_list;
|
||||
};
|
||||
|
||||
@ -164,11 +147,7 @@ struct pmap {
|
||||
struct l2_dtable *pm_l2[L2_SIZE];
|
||||
cpuset_t pm_active; /* active on cpus */
|
||||
struct pmap_statistics pm_stats; /* pmap statictics */
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
||||
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
|
||||
#else
|
||||
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct pmap *pmap_t;
|
||||
@ -176,7 +155,6 @@ typedef struct pmap *pmap_t;
|
||||
#ifdef _KERNEL
|
||||
extern struct pmap kernel_pmap_store;
|
||||
#define kernel_pmap (&kernel_pmap_store)
|
||||
#define pmap_kernel() kernel_pmap
|
||||
|
||||
#define PMAP_ASSERT_LOCKED(pmap) \
|
||||
mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
|
||||
@ -199,10 +177,8 @@ typedef struct pv_entry {
|
||||
vm_offset_t pv_va; /* virtual address for mapping */
|
||||
TAILQ_ENTRY(pv_entry) pv_list;
|
||||
int pv_flags; /* flags (wired, etc...) */
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
|
||||
pmap_t pv_pmap; /* pmap where mapping lies */
|
||||
TAILQ_ENTRY(pv_entry) pv_plist;
|
||||
#endif
|
||||
} *pv_entry_t;
|
||||
|
||||
/*
|
||||
@ -247,7 +223,7 @@ vtopte(vm_offset_t va)
|
||||
pd_entry_t *pdep;
|
||||
pt_entry_t *ptep;
|
||||
|
||||
if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
|
||||
if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE)
|
||||
return (NULL);
|
||||
return (ptep);
|
||||
}
|
||||
@ -271,9 +247,7 @@ void *pmap_mapdev(vm_offset_t, vm_size_t);
|
||||
void pmap_unmapdev(vm_offset_t, vm_size_t);
|
||||
vm_page_t pmap_use_pt(pmap_t, vm_offset_t);
|
||||
void pmap_debug(int);
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
|
||||
void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
|
||||
#endif
|
||||
void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
|
||||
vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
|
||||
void
|
||||
@ -341,119 +315,9 @@ extern int pmap_needs_pte_sync;
|
||||
/*
|
||||
* User-visible names for the ones that vary with MMU class.
|
||||
*/
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
||||
#define L2_AP(x) (L2_AP0(x))
|
||||
#else
|
||||
#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
|
||||
#endif
|
||||
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
||||
/*
|
||||
* AP[2:1] access permissions model:
|
||||
*
|
||||
* AP[2](APX) - Write Disable
|
||||
* AP[1] - User Enable
|
||||
* AP[0] - Reference Flag
|
||||
*
|
||||
* AP[2] AP[1] Kernel User
|
||||
* 0 0 R/W N
|
||||
* 0 1 R/W R/W
|
||||
* 1 0 R N
|
||||
* 1 1 R R
|
||||
*
|
||||
*/
|
||||
#define L2_S_PROT_R (0) /* kernel read */
|
||||
#define L2_S_PROT_U (L2_AP0(2)) /* user read */
|
||||
#define L2_S_REF (L2_AP0(1)) /* reference flag */
|
||||
|
||||
#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R|L2_APX)
|
||||
#define L2_S_EXECUTABLE(pte) (!(pte & L2_XN))
|
||||
#define L2_S_WRITABLE(pte) (!(pte & L2_APX))
|
||||
#define L2_S_REFERENCED(pte) (!!(pte & L2_S_REF))
|
||||
|
||||
#ifndef SMP
|
||||
#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C)
|
||||
#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C)
|
||||
#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C)
|
||||
#else
|
||||
#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED)
|
||||
#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED)
|
||||
#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED)
|
||||
#endif /* SMP */
|
||||
|
||||
#define L1_S_PROTO (L1_TYPE_S)
|
||||
#define L1_C_PROTO (L1_TYPE_C)
|
||||
#define L2_S_PROTO (L2_TYPE_S)
|
||||
|
||||
/*
|
||||
* Promotion to a 1MB (SECTION) mapping requires that the corresponding
|
||||
* 4KB (SMALL) page mappings have identical settings for the following fields:
|
||||
*/
|
||||
#define L2_S_PROMOTE (L2_S_REF | L2_SHARED | L2_S_PROT_MASK | \
|
||||
L2_XN | L2_S_PROTO)
|
||||
|
||||
/*
|
||||
* In order to compare 1MB (SECTION) entry settings with the 4KB (SMALL)
|
||||
* page mapping it is necessary to read and shift appropriate bits from
|
||||
* L1 entry to positions of the corresponding bits in the L2 entry.
|
||||
*/
|
||||
#define L1_S_DEMOTE(l1pd) ((((l1pd) & L1_S_PROTO) >> 0) | \
|
||||
(((l1pd) & L1_SHARED) >> 6) | \
|
||||
(((l1pd) & L1_S_REF) >> 6) | \
|
||||
(((l1pd) & L1_S_PROT_MASK) >> 6) | \
|
||||
(((l1pd) & L1_S_XN) >> 4))
|
||||
|
||||
#ifndef SMP
|
||||
#define ARM_L1S_STRONG_ORD (0)
|
||||
#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
|
||||
#define ARM_L1S_DEVICE_SHARE (L1_S_B)
|
||||
#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1))
|
||||
#define ARM_L1S_NRML_IWT_OWT (L1_S_C)
|
||||
#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B)
|
||||
#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B)
|
||||
|
||||
#define ARM_L2L_STRONG_ORD (0)
|
||||
#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
|
||||
#define ARM_L2L_DEVICE_SHARE (L2_B)
|
||||
#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1))
|
||||
#define ARM_L2L_NRML_IWT_OWT (L2_C)
|
||||
#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B)
|
||||
#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B)
|
||||
|
||||
#define ARM_L2S_STRONG_ORD (0)
|
||||
#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
|
||||
#define ARM_L2S_DEVICE_SHARE (L2_B)
|
||||
#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1))
|
||||
#define ARM_L2S_NRML_IWT_OWT (L2_C)
|
||||
#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B)
|
||||
#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B)
|
||||
#else
|
||||
#define ARM_L1S_STRONG_ORD (0)
|
||||
#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
|
||||
#define ARM_L1S_DEVICE_SHARE (L1_S_B)
|
||||
#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)|L1_SHARED)
|
||||
#define ARM_L1S_NRML_IWT_OWT (L1_S_C|L1_SHARED)
|
||||
#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B|L1_SHARED)
|
||||
#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED)
|
||||
|
||||
#define ARM_L2L_STRONG_ORD (0)
|
||||
#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
|
||||
#define ARM_L2L_DEVICE_SHARE (L2_B)
|
||||
#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)|L2_SHARED)
|
||||
#define ARM_L2L_NRML_IWT_OWT (L2_C|L2_SHARED)
|
||||
#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
|
||||
#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B|L2_SHARED)
|
||||
|
||||
#define ARM_L2S_STRONG_ORD (0)
|
||||
#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
|
||||
#define ARM_L2S_DEVICE_SHARE (L2_B)
|
||||
#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)|L2_SHARED)
|
||||
#define ARM_L2S_NRML_IWT_OWT (L2_C|L2_SHARED)
|
||||
#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
|
||||
#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B|L2_SHARED)
|
||||
#endif /* SMP */
|
||||
|
||||
#elif ARM_NMMUS > 1
|
||||
#if ARM_NMMUS > 1
|
||||
/* More than one MMU class configured; use variables. */
|
||||
#define L2_S_PROT_U pte_l2_s_prot_u
|
||||
#define L2_S_PROT_W pte_l2_s_prot_w
|
||||
@ -495,7 +359,7 @@ extern int pmap_needs_pte_sync;
|
||||
|
||||
#endif /* ARM_NMMUS > 1 */
|
||||
|
||||
#if defined(CPU_XSCALE_81342) || ARM_ARCH_6 || ARM_ARCH_7A
|
||||
#if defined(CPU_XSCALE_81342)
|
||||
#define PMAP_NEEDS_PTE_SYNC 1
|
||||
#define PMAP_INCLUDE_PTE_SYNC
|
||||
#else
|
||||
@ -506,8 +370,6 @@ extern int pmap_needs_pte_sync;
|
||||
* These macros return various bits based on kernel/user and protection.
|
||||
* Note that the compiler will usually fold these at compile time.
|
||||
*/
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
|
||||
|
||||
#define L1_S_PROT_U (L1_S_AP(AP_U))
|
||||
#define L1_S_PROT_W (L1_S_AP(AP_W))
|
||||
#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
|
||||
@ -525,27 +387,6 @@ extern int pmap_needs_pte_sync;
|
||||
|
||||
#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
|
||||
(((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
|
||||
#else
|
||||
#define L1_S_PROT_U (L1_S_AP(AP_U))
|
||||
#define L1_S_PROT_W (L1_S_APX) /* Write disable */
|
||||
#define L1_S_PROT_MASK (L1_S_PROT_W|L1_S_PROT_U)
|
||||
#define L1_S_REF (L1_S_AP(AP_REF)) /* Reference flag */
|
||||
#define L1_S_WRITABLE(pd) (!((pd) & L1_S_PROT_W))
|
||||
#define L1_S_EXECUTABLE(pd) (!((pd) & L1_S_XN))
|
||||
#define L1_S_REFERENCED(pd) ((pd) & L1_S_REF)
|
||||
|
||||
#define L1_S_PROT(ku, pr) (((((ku) == PTE_KERNEL) ? 0 : L1_S_PROT_U) | \
|
||||
(((pr) & VM_PROT_WRITE) ? 0 : L1_S_PROT_W) | \
|
||||
(((pr) & VM_PROT_EXECUTE) ? 0 : L1_S_XN)))
|
||||
|
||||
#define L2_L_PROT_MASK (L2_APX|L2_AP0(0x3))
|
||||
#define L2_L_PROT(ku, pr) (L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
|
||||
(((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
|
||||
|
||||
#define L2_S_PROT(ku, pr) (L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
|
||||
(((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macros to test if a mapping is mappable with an L1 Section mapping
|
||||
@ -620,15 +461,12 @@ extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
|
||||
vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
|
||||
extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
|
||||
|
||||
#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 || defined(CPU_XSCALE_81342)
|
||||
#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342)
|
||||
void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
|
||||
void pmap_zero_page_generic(vm_paddr_t, int, int);
|
||||
|
||||
void pmap_pte_init_generic(void);
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
||||
void pmap_pte_init_mmu_v6(void);
|
||||
#endif /* (ARM_MMU_V6 + ARM_MMU_V7) != 0 */
|
||||
#endif /* (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 */
|
||||
#endif /* ARM_MMU_GENERIC != 0 */
|
||||
|
||||
#if ARM_MMU_XSCALE == 1
|
||||
void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
|
||||
|
@ -14,8 +14,8 @@ enum {
|
||||
IPI_STOP,
|
||||
IPI_STOP_HARD = IPI_STOP, /* These are synonyms on arm. */
|
||||
IPI_HARDCLOCK,
|
||||
IPI_TLB,
|
||||
IPI_CACHE,
|
||||
IPI_TLB, /* Not used now, but keep it reserved. */
|
||||
IPI_CACHE, /* Not used now, but keep it reserved. */
|
||||
INTR_IPI_COUNT
|
||||
};
|
||||
#else
|
||||
@ -25,8 +25,8 @@ enum {
|
||||
#define IPI_STOP 4
|
||||
#define IPI_STOP_HARD 4
|
||||
#define IPI_HARDCLOCK 6
|
||||
#define IPI_TLB 7
|
||||
#define IPI_CACHE 8
|
||||
#define IPI_TLB 7 /* Not used now, but keep it reserved. */
|
||||
#define IPI_CACHE 8 /* Not used now, but keep it reserved. */
|
||||
#endif /* INTRNG */
|
||||
|
||||
void init_secondary(int cpu);
|
||||
|
@ -38,13 +38,13 @@
|
||||
#define VM_MEMATTR_NOCACHE ((vm_memattr_t)PTE2_ATTR_NOCACHE)
|
||||
#define VM_MEMATTR_DEVICE ((vm_memattr_t)PTE2_ATTR_DEVICE)
|
||||
#define VM_MEMATTR_SO ((vm_memattr_t)PTE2_ATTR_SO)
|
||||
#define VM_MEMATTR_WT ((vm_memattr_t)PTE2_ATTR_WT)
|
||||
#define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)PTE2_ATTR_WT)
|
||||
|
||||
#define VM_MEMATTR_DEFAULT VM_MEMATTR_WB_WA
|
||||
#define VM_MEMATTR_UNCACHEABLE VM_MEMATTR_SO /* misused by DMA */
|
||||
#ifdef _KERNEL
|
||||
/* Don't export aliased VM_MEMATTR to userland */
|
||||
#define VM_MEMATTR_WRITE_COMBINING VM_MEMATTR_WT /* for DRM */
|
||||
#define VM_MEMATTR_WRITE_COMBINING VM_MEMATTR_WRITE_THROUGH /* for DRM */
|
||||
#define VM_MEMATTR_WRITE_BACK VM_MEMATTR_WB_WA /* for DRM */
|
||||
#endif
|
||||
#else
|
||||
|
@ -128,7 +128,7 @@ get_tclk(void)
|
||||
{
|
||||
uint32_t cputype;
|
||||
|
||||
cputype = cpufunc_id();
|
||||
cputype = cpu_ident();
|
||||
cputype &= CPU_ID_CPU_MASK;
|
||||
|
||||
if (cputype == CPU_ID_MV88SV584X_V7)
|
||||
|
@ -111,7 +111,7 @@ platform_mp_start_ap(void)
|
||||
* Initialization procedure depends on core revision,
|
||||
* in this step CHIP ID is checked to choose proper procedure
|
||||
*/
|
||||
cputype = cpufunc_id();
|
||||
cputype = cpu_ident();
|
||||
cputype &= CPU_ID_CPU_MASK;
|
||||
|
||||
/*
|
||||
|
@ -377,7 +377,7 @@ soc_id(uint32_t *dev, uint32_t *rev)
|
||||
* Notice: system identifiers are available in the registers range of
|
||||
* PCIE controller, so using this function is only allowed (and
|
||||
* possible) after the internal registers range has been mapped in via
|
||||
* pmap_devmap_bootstrap().
|
||||
* arm_devmap_bootstrap().
|
||||
*/
|
||||
*dev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 0) >> 16;
|
||||
*rev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 8) & 0xff;
|
||||
|
@ -337,7 +337,7 @@ __weak_reference(mv_default_fdt_pci_devmap, mv_pci_devmap);
|
||||
*/
|
||||
|
||||
/*
|
||||
* Construct pmap_devmap[] with DT-derived config data.
|
||||
* Construct devmap table with DT-derived config data.
|
||||
*/
|
||||
int
|
||||
platform_devmap_init(void)
|
||||
|
@ -42,7 +42,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <machine/bus.h>
|
||||
#include <machine/intr.h>
|
||||
#include <machine/pte.h>
|
||||
#include <machine/vmparam.h>
|
||||
|
||||
#include <arm/mv/mvreg.h>
|
||||
@ -74,7 +73,7 @@ __FBSDID("$FreeBSD$");
|
||||
int platform_pci_get_irq(u_int bus, u_int slot, u_int func, u_int pin);
|
||||
|
||||
/* Static device mappings. */
|
||||
const struct pmap_devmap pmap_devmap[] = {
|
||||
const struct arm_devmap_entry db88f5xxx_devmap[] = {
|
||||
/*
|
||||
* Map the on-board devices VA == PA so that we can access them
|
||||
* with the MMU on or off.
|
||||
|
@ -38,7 +38,6 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/pmap.h>
|
||||
|
||||
#include <machine/bus.h>
|
||||
#include <machine/pte.h>
|
||||
#include <machine/vmparam.h>
|
||||
#include <machine/fdt.h>
|
||||
|
||||
|
@ -120,7 +120,7 @@ omap4_get_revision(void)
|
||||
* the ARM cpuid to get the correct revision.
|
||||
*/
|
||||
if (revision == 0) {
|
||||
id_code = cpufunc_id();
|
||||
id_code = cpu_ident();
|
||||
revision = (id_code & 0xf) - 1;
|
||||
}
|
||||
|
||||
|
@ -88,7 +88,7 @@ static struct arm_devmap_entry fdt_devmap[FDT_DEVMAP_MAX] = {
|
||||
|
||||
|
||||
/*
|
||||
* Construct pmap_devmap[] with DT-derived config data.
|
||||
* Construct devmap table with DT-derived config data.
|
||||
*/
|
||||
int
|
||||
platform_devmap_init(void)
|
||||
|
@ -56,6 +56,6 @@ copystr(const void * __restrict kfaddr, void * __restrict kdaddr, size_t len,
|
||||
if (lencopied != NULL)
|
||||
*lencopied = pos;
|
||||
|
||||
return (0);
|
||||
return (error);
|
||||
}
|
||||
|
||||
|
@ -49,10 +49,12 @@ ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
|
||||
|
||||
/* Size of pcb, rounded to keep stack alignment */
|
||||
ASSYM(PCB_SIZE, roundup2(sizeof(struct pcb), STACKALIGNBYTES + 1));
|
||||
ASSYM(PCB_SINGLE_STEP_SHIFT, PCB_SINGLE_STEP_SHIFT);
|
||||
ASSYM(PCB_REGS, offsetof(struct pcb, pcb_x));
|
||||
ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp));
|
||||
ASSYM(PCB_L1ADDR, offsetof(struct pcb, pcb_l1addr));
|
||||
ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
|
||||
ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags));
|
||||
|
||||
ASSYM(SF_UC, offsetof(struct sigframe, sf_uc));
|
||||
|
||||
|
@ -158,12 +158,17 @@ arm_gic_fdt_attach(device_t dev)
|
||||
OF_getencprop(root, "#size-cells", &sc->sc_size_cells,
|
||||
sizeof(sc->sc_size_cells));
|
||||
|
||||
/* If we have no children don't probe for them */
|
||||
child = OF_child(root);
|
||||
if (child == 0)
|
||||
return (0);
|
||||
|
||||
if (gic_fill_ranges(root, sc) < 0) {
|
||||
device_printf(dev, "could not get ranges\n");
|
||||
return (ENXIO);
|
||||
}
|
||||
|
||||
for (child = OF_child(root); child != 0; child = OF_peer(child)) {
|
||||
for (; child != 0; child = OF_peer(child)) {
|
||||
dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO);
|
||||
|
||||
if (ofw_bus_gen_setup_devinfo(&dinfo->obdinfo, child) != 0) {
|
||||
|
@ -472,9 +472,6 @@ ipi_all_but_self(u_int ipi)
|
||||
other_cpus = all_cpus;
|
||||
CPU_CLR(PCPU_GET(cpuid), &other_cpus);
|
||||
|
||||
/* ARM64TODO: This will be fixed with arm_intrng */
|
||||
ipi += 16;
|
||||
|
||||
CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi);
|
||||
PIC_IPI_SEND(root_pic, other_cpus, ipi);
|
||||
}
|
||||
|
@ -369,8 +369,8 @@ create_pagetables:
|
||||
sub x8, x7, x6
|
||||
/* Get the number of l2 pages to allocate, rounded down */
|
||||
lsr x10, x8, #(L2_SHIFT)
|
||||
/* Add 4 MiB for any rounding above and the module data */
|
||||
add x10, x10, #2
|
||||
/* Add 8 MiB for any rounding above and the module data */
|
||||
add x10, x10, #4
|
||||
|
||||
/* Create the kernel space L2 table */
|
||||
mov x6, x26
|
||||
|
@ -233,7 +233,8 @@ int
|
||||
ptrace_single_step(struct thread *td)
|
||||
{
|
||||
|
||||
/* TODO; */
|
||||
td->td_frame->tf_spsr |= PSR_SS;
|
||||
td->td_pcb->pcb_flags |= PCB_SINGLE_STEP;
|
||||
return (0);
|
||||
}
|
||||
|
||||
@ -241,7 +242,8 @@ int
|
||||
ptrace_clear_single_step(struct thread *td)
|
||||
{
|
||||
|
||||
/* TODO; */
|
||||
td->td_frame->tf_spsr &= ~PSR_SS;
|
||||
td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <vm/vm_extern.h>
|
||||
#include <vm/vm_kern.h>
|
||||
|
||||
#include <machine/debug_monitor.h>
|
||||
#include <machine/intr.h>
|
||||
#include <machine/smp.h>
|
||||
#ifdef VFP
|
||||
@ -247,6 +248,8 @@ init_secondary(uint64_t cpu)
|
||||
vfp_init();
|
||||
#endif
|
||||
|
||||
dbg_monitor_init();
|
||||
|
||||
/* Enable interrupts */
|
||||
intr_enable();
|
||||
|
||||
|
@ -596,7 +596,8 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
|
||||
* up to the physical address KERNBASE points at.
|
||||
*/
|
||||
map_slot = avail_slot = 0;
|
||||
for (; map_slot < (physmap_idx * 2); map_slot += 2) {
|
||||
for (; map_slot < (physmap_idx * 2) &&
|
||||
avail_slot < (PHYS_AVAIL_SIZE - 2); map_slot += 2) {
|
||||
if (physmap[map_slot] == physmap[map_slot + 1])
|
||||
continue;
|
||||
|
||||
@ -612,7 +613,7 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
|
||||
}
|
||||
|
||||
/* Add the memory before the kernel */
|
||||
if (physmap[avail_slot] < pa) {
|
||||
if (physmap[avail_slot] < pa && avail_slot < (PHYS_AVAIL_SIZE - 2)) {
|
||||
phys_avail[avail_slot] = physmap[map_slot];
|
||||
phys_avail[avail_slot + 1] = pa;
|
||||
physmem += (phys_avail[avail_slot + 1] -
|
||||
|
@ -37,10 +37,37 @@
|
||||
|
||||
__FBSDID("$FreeBSD$");
|
||||
|
||||
.macro clear_step_flag pcbflags, tmp
|
||||
tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
|
||||
mrs \tmp, mdscr_el1
|
||||
bic \tmp, \tmp, #1
|
||||
msr mdscr_el1, \tmp
|
||||
isb
|
||||
999:
|
||||
.endm
|
||||
|
||||
.macro set_step_flag pcbflags, tmp
|
||||
tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f
|
||||
mrs \tmp, mdscr_el1
|
||||
orr \tmp, \tmp, #1
|
||||
msr mdscr_el1, \tmp
|
||||
isb
|
||||
999:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* void cpu_throw(struct thread *old, struct thread *new)
|
||||
*/
|
||||
ENTRY(cpu_throw)
|
||||
/* Of old == NULL skip disabling stepping */
|
||||
cbz x0, 1f
|
||||
|
||||
/* If we were single stepping, disable it */
|
||||
ldr x4, [x0, #TD_PCB]
|
||||
ldr w5, [x4, #PCB_FLAGS]
|
||||
clear_step_flag w5, x6
|
||||
1:
|
||||
|
||||
#ifdef VFP
|
||||
/* Backup the new thread pointer around a call to C code */
|
||||
mov x19, x1
|
||||
@ -69,6 +96,10 @@ ENTRY(cpu_throw)
|
||||
dsb sy
|
||||
isb
|
||||
|
||||
/* If we are single stepping, enable it */
|
||||
ldr w5, [x4, #PCB_FLAGS]
|
||||
set_step_flag w5, x6
|
||||
|
||||
/* Restore the registers */
|
||||
ldp x5, x6, [x4, #PCB_SP]
|
||||
mov sp, x5
|
||||
@ -127,6 +158,10 @@ ENTRY(cpu_switch)
|
||||
mrs x6, tpidr_el0
|
||||
stp x5, x6, [x4, #PCB_SP]
|
||||
|
||||
/* If we were single stepping, disable it */
|
||||
ldr w5, [x4, #PCB_FLAGS]
|
||||
clear_step_flag w5, x6
|
||||
|
||||
#ifdef VFP
|
||||
mov x19, x0
|
||||
mov x20, x1
|
||||
@ -174,6 +209,10 @@ ENTRY(cpu_switch)
|
||||
b.eq 1b
|
||||
#endif
|
||||
|
||||
/* If we are single stepping, enable it */
|
||||
ldr w5, [x4, #PCB_FLAGS]
|
||||
set_step_flag w5, x6
|
||||
|
||||
/* Restore the registers */
|
||||
ldp x5, x6, [x4, #PCB_SP]
|
||||
mov sp, x5
|
||||
|
@ -138,7 +138,6 @@ svc_handler(struct trapframe *frame)
|
||||
int error;
|
||||
|
||||
td = curthread;
|
||||
td->td_frame = frame;
|
||||
|
||||
error = syscallenter(td, &sa);
|
||||
syscallret(td, error, &sa);
|
||||
@ -338,6 +337,9 @@ do_el0_sync(struct trapframe *frame)
|
||||
("Invalid pcpu address from userland: %p (tpidr %lx)",
|
||||
get_pcpu(), READ_SPECIALREG(tpidr_el1)));
|
||||
|
||||
td = curthread;
|
||||
td->td_frame = frame;
|
||||
|
||||
esr = READ_SPECIALREG(esr_el1);
|
||||
exception = ESR_ELx_EXCEPTION(esr);
|
||||
switch (exception) {
|
||||
@ -373,15 +375,22 @@ do_el0_sync(struct trapframe *frame)
|
||||
el0_excp_unknown(frame);
|
||||
break;
|
||||
case EXCP_PC_ALIGN:
|
||||
td = curthread;
|
||||
call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr);
|
||||
userret(td, frame);
|
||||
break;
|
||||
case EXCP_BRK:
|
||||
td = curthread;
|
||||
call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr);
|
||||
userret(td, frame);
|
||||
break;
|
||||
case EXCP_SOFTSTP_EL0:
|
||||
td->td_frame->tf_spsr &= ~PSR_SS;
|
||||
td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP;
|
||||
WRITE_SPECIALREG(MDSCR_EL1,
|
||||
READ_SPECIALREG(MDSCR_EL1) & ~DBG_MDSCR_SS);
|
||||
call_trapsignal(td, SIGTRAP, TRAP_TRACE,
|
||||
(void *)frame->tf_elr);
|
||||
userret(td, frame);
|
||||
break;
|
||||
default:
|
||||
print_registers(frame);
|
||||
panic("Unknown userland exception %x esr_el1 %lx\n", exception,
|
||||
|
@ -101,6 +101,7 @@
|
||||
#define EXCP_SP_ALIGN 0x26 /* SP slignment fault */
|
||||
#define EXCP_TRAP_FP 0x2c /* Trapped FP exception */
|
||||
#define EXCP_SERROR 0x2f /* SError interrupt */
|
||||
#define EXCP_SOFTSTP_EL0 0x32 /* Software Step, from lower EL */
|
||||
#define EXCP_SOFTSTP_EL1 0x33 /* Software Step, from same EL */
|
||||
#define EXCP_WATCHPT_EL1 0x35 /* Watchpoint, from same EL */
|
||||
#define EXCP_BRK 0x3c /* Breakpoint */
|
||||
|
@ -45,6 +45,10 @@ struct pcb {
|
||||
/* Fault handler, the error value is passed in x0 */
|
||||
vm_offset_t pcb_onfault;
|
||||
|
||||
u_int pcb_flags;
|
||||
#define PCB_SINGLE_STEP_SHIFT 0
|
||||
#define PCB_SINGLE_STEP (1 << PCB_SINGLE_STEP_SHIFT)
|
||||
|
||||
/* Place last to simplify the asm to access the rest if the struct */
|
||||
__uint128_t pcb_vfp[32];
|
||||
uint32_t pcb_fpcr;
|
||||
|
@ -121,7 +121,7 @@ extern struct pmap kernel_pmap_store;
|
||||
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
|
||||
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
|
||||
|
||||
#define PHYS_AVAIL_SIZE 10
|
||||
#define PHYS_AVAIL_SIZE 32
|
||||
extern vm_paddr_t phys_avail[];
|
||||
extern vm_paddr_t dump_avail[];
|
||||
extern vm_offset_t virtual_avail;
|
||||
|
@ -184,11 +184,16 @@ efinet_init(struct iodesc *desc, void *machdep_hint)
|
||||
EFI_HANDLE h;
|
||||
EFI_STATUS status;
|
||||
|
||||
if (nif->nif_driver->netif_ifs[nif->nif_unit].dif_unit < 0) {
|
||||
printf("Invalid network interface %d\n", nif->nif_unit);
|
||||
return;
|
||||
}
|
||||
|
||||
h = nif->nif_driver->netif_ifs[nif->nif_unit].dif_private;
|
||||
status = BS->HandleProtocol(h, &sn_guid, (VOID **)&nif->nif_devdata);
|
||||
if (status != EFI_SUCCESS) {
|
||||
printf("net%d: cannot start interface (status=%ld)\n",
|
||||
nif->nif_unit, (long)status);
|
||||
printf("net%d: cannot start interface (status=%lu)\n",
|
||||
nif->nif_unit, EFI_ERROR_CODE(status));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -288,11 +293,30 @@ efinet_dev_init()
|
||||
stats = calloc(nifs, sizeof(struct netif_stats));
|
||||
|
||||
for (i = 0; i < nifs; i++) {
|
||||
EFI_SIMPLE_NETWORK *net;
|
||||
EFI_HANDLE h;
|
||||
|
||||
dif = &efinetif.netif_ifs[i];
|
||||
dif->dif_unit = -1;
|
||||
|
||||
h = efi_find_handle(&efinet_dev, i);
|
||||
|
||||
/*
|
||||
* Open the network device in exclusive mode. Without this
|
||||
* we will be racing with the UEFI network stack. It will
|
||||
* pull packets off the network leading to lost packets.
|
||||
*/
|
||||
status = BS->OpenProtocol(h, &sn_guid, (void **)&net,
|
||||
IH, 0, EFI_OPEN_PROTOCOL_EXCLUSIVE);
|
||||
if (status != EFI_SUCCESS) {
|
||||
printf("Unable to open network interface %d\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
dif->dif_unit = i;
|
||||
dif->dif_nsel = 1;
|
||||
dif->dif_stats = &stats[i];
|
||||
dif->dif_private = efi_find_handle(&efinet_dev, i);
|
||||
dif->dif_private = h;
|
||||
}
|
||||
|
||||
return (0);
|
||||
|
92
sys/boot/fdt/dts/riscv/spike.dts
Normal file
92
sys/boot/fdt/dts/riscv/spike.dts
Normal file
@ -0,0 +1,92 @@
|
||||
/*-
|
||||
* Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Portions of this software were developed by SRI International and the
|
||||
* University of Cambridge Computer Laboratory under DARPA/AFRL contract
|
||||
* FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
|
||||
*
|
||||
* Portions of this software were developed by the University of Cambridge
|
||||
* Computer Laboratory as part of the CTSRD Project, with support from the
|
||||
* UK Higher Education Innovation Fund (HEIF).
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $FreeBSD$
|
||||
*/
|
||||
|
||||
/dts-v1/;
|
||||
|
||||
/ {
|
||||
model = "UC Berkeley Spike Simulator RV64I";
|
||||
compatible = "riscv,rv64i";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
#interrupt-cells = <1>;
|
||||
|
||||
aliases {
|
||||
console0 = &console0;
|
||||
};
|
||||
|
||||
memory {
|
||||
device_type = "memory";
|
||||
reg = <0x0 0x8000000>; /* 128MB at 0x0 */
|
||||
};
|
||||
|
||||
soc {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <1>;
|
||||
|
||||
compatible = "simple-bus";
|
||||
ranges;
|
||||
|
||||
pic0: pic@0 {
|
||||
compatible = "riscv,pic";
|
||||
interrupt-controller;
|
||||
};
|
||||
|
||||
timer0: timer@0 {
|
||||
compatible = "riscv,timer";
|
||||
interrupts = < 1 >;
|
||||
interrupt-parent = < &pic0 >;
|
||||
clock-frequency = < 1000000 >;
|
||||
};
|
||||
|
||||
htif0: htif@0 {
|
||||
compatible = "riscv,htif";
|
||||
interrupts = < 0 >;
|
||||
interrupt-parent = < &pic0 >;
|
||||
|
||||
console0: console@0 {
|
||||
compatible = "htif,console";
|
||||
status = "okay";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
chosen {
|
||||
bootargs = "-v";
|
||||
stdin = "console0";
|
||||
stdout = "console0";
|
||||
};
|
||||
};
|
99
sys/boot/ficl/riscv/sysdep.c
Normal file
99
sys/boot/ficl/riscv/sysdep.c
Normal file
@ -0,0 +1,99 @@
|
||||
/*******************************************************************
|
||||
** s y s d e p . c
|
||||
** Forth Inspired Command Language
|
||||
** Author: John Sadler (john_sadler@alum.mit.edu)
|
||||
** Created: 16 Oct 1997
|
||||
** Implementations of FICL external interface functions...
|
||||
**
|
||||
*******************************************************************/
|
||||
|
||||
/* $FreeBSD$ */
|
||||
|
||||
#ifdef TESTMAIN
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#else
|
||||
#include <stand.h>
|
||||
#endif
|
||||
#include "ficl.h"
|
||||
|
||||
/*
|
||||
******************* FreeBSD P O R T B E G I N S H E R E ******************** Michael Smith
|
||||
*/
|
||||
|
||||
#if PORTABLE_LONGMULDIV == 0
|
||||
DPUNS ficlLongMul(FICL_UNS x, FICL_UNS y)
|
||||
{
|
||||
DPUNS q;
|
||||
u_int64_t qx;
|
||||
|
||||
qx = (u_int64_t)x * (u_int64_t) y;
|
||||
|
||||
q.hi = (u_int32_t)( qx >> 32 );
|
||||
q.lo = (u_int32_t)( qx & 0xFFFFFFFFL);
|
||||
|
||||
return q;
|
||||
}
|
||||
|
||||
UNSQR ficlLongDiv(DPUNS q, FICL_UNS y)
|
||||
{
|
||||
UNSQR result;
|
||||
u_int64_t qx, qh;
|
||||
|
||||
qh = q.hi;
|
||||
qx = (qh << 32) | q.lo;
|
||||
|
||||
result.quot = qx / y;
|
||||
result.rem = qx % y;
|
||||
|
||||
return result;
|
||||
}
|
||||
#endif
|
||||
|
||||
void ficlTextOut(FICL_VM *pVM, char *msg, int fNewline)
|
||||
{
|
||||
IGNORE(pVM);
|
||||
|
||||
while(*msg != 0)
|
||||
putchar(*(msg++));
|
||||
if (fNewline)
|
||||
putchar('\n');
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void *ficlMalloc (size_t size)
|
||||
{
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
void *ficlRealloc (void *p, size_t size)
|
||||
{
|
||||
return realloc(p, size);
|
||||
}
|
||||
|
||||
void ficlFree (void *p)
|
||||
{
|
||||
free(p);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
** Stub function for dictionary access control - does nothing
|
||||
** by default, user can redefine to guarantee exclusive dict
|
||||
** access to a single thread for updates. All dict update code
|
||||
** is guaranteed to be bracketed as follows:
|
||||
** ficlLockDictionary(TRUE);
|
||||
** <code that updates dictionary>
|
||||
** ficlLockDictionary(FALSE);
|
||||
**
|
||||
** Returns zero if successful, nonzero if unable to acquire lock
|
||||
** befor timeout (optional - could also block forever)
|
||||
*/
|
||||
#if FICL_MULTITHREAD
|
||||
int ficlLockDictionary(short fLock)
|
||||
{
|
||||
IGNORE(fLock);
|
||||
return 0;
|
||||
}
|
||||
#endif /* FICL_MULTITHREAD */
|
411
sys/boot/ficl/riscv/sysdep.h
Normal file
411
sys/boot/ficl/riscv/sysdep.h
Normal file
@ -0,0 +1,411 @@
|
||||
/*******************************************************************
|
||||
s y s d e p . h
|
||||
** Forth Inspired Command Language
|
||||
** Author: John Sadler (john_sadler@alum.mit.edu)
|
||||
** Created: 16 Oct 1997
|
||||
** Ficl system dependent types and prototypes...
|
||||
**
|
||||
** Note: Ficl also depends on the use of "assert" when
|
||||
** FICL_ROBUST is enabled. This may require some consideration
|
||||
** in firmware systems since assert often
|
||||
** assumes stderr/stdout.
|
||||
** $Id: sysdep.h,v 1.6 2001-04-26 21:41:55-07 jsadler Exp jsadler $
|
||||
*******************************************************************/
|
||||
/*
|
||||
** Copyright (c) 1997-2001 John Sadler (john_sadler@alum.mit.edu)
|
||||
** All rights reserved.
|
||||
**
|
||||
** Get the latest Ficl release at http://ficl.sourceforge.net
|
||||
**
|
||||
** L I C E N S E and D I S C L A I M E R
|
||||
**
|
||||
** Redistribution and use in source and binary forms, with or without
|
||||
** modification, are permitted provided that the following conditions
|
||||
** are met:
|
||||
** 1. Redistributions of source code must retain the above copyright
|
||||
** notice, this list of conditions and the following disclaimer.
|
||||
** 2. Redistributions in binary form must reproduce the above copyright
|
||||
** notice, this list of conditions and the following disclaimer in the
|
||||
** documentation and/or other materials provided with the distribution.
|
||||
**
|
||||
** THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
** ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
** OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
** HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
** LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
** OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
** SUCH DAMAGE.
|
||||
**
|
||||
** I am interested in hearing from anyone who uses ficl. If you have
|
||||
** a problem, a success story, a defect, an enhancement request, or
|
||||
** if you would like to contribute to the ficl release, please send
|
||||
** contact me by email at the address above.
|
||||
**
|
||||
** $Id: sysdep.h,v 1.6 2001-04-26 21:41:55-07 jsadler Exp jsadler $
|
||||
** $FreeBSD$
|
||||
*/
|
||||
|
||||
#if !defined (__SYSDEP_H__)
|
||||
#define __SYSDEP_H__
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
#include <stddef.h> /* size_t, NULL */
|
||||
#include <setjmp.h>
|
||||
#include <assert.h>
|
||||
|
||||
#if !defined IGNORE /* Macro to silence unused param warnings */
|
||||
#define IGNORE(x) (void)(x)
|
||||
#endif
|
||||
|
||||
/*
|
||||
** TRUE and FALSE for C boolean operations, and
|
||||
** portable 32 bit types for CELLs
|
||||
**
|
||||
*/
|
||||
#if !defined TRUE
|
||||
#define TRUE 1
|
||||
#endif
|
||||
#if !defined FALSE
|
||||
#define FALSE 0
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
** System dependent data type declarations...
|
||||
*/
|
||||
#if !defined INT32
|
||||
#define INT32 int
|
||||
#endif
|
||||
|
||||
#if !defined UNS32
|
||||
#define UNS32 unsigned int
|
||||
#endif
|
||||
|
||||
#if !defined UNS16
|
||||
#define UNS16 unsigned short
|
||||
#endif
|
||||
|
||||
#if !defined UNS8
|
||||
#define UNS8 unsigned char
|
||||
#endif
|
||||
|
||||
#if !defined NULL
|
||||
#define NULL ((void *)0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_UNS and FICL_INT must have the same size as a void* on
|
||||
** the target system. A CELL is a union of void*, FICL_UNS, and
|
||||
** FICL_INT.
|
||||
** (11/2000: same for FICL_FLOAT)
|
||||
*/
|
||||
#if !defined FICL_INT
|
||||
#define FICL_INT long
|
||||
#endif
|
||||
|
||||
#if !defined FICL_UNS
|
||||
#define FICL_UNS unsigned long
|
||||
#endif
|
||||
|
||||
#if !defined FICL_FLOAT
|
||||
#define FICL_FLOAT float
|
||||
#endif
|
||||
|
||||
/*
|
||||
** Ficl presently supports values of 32 and 64 for BITS_PER_CELL
|
||||
*/
|
||||
#if !defined BITS_PER_CELL
|
||||
#define BITS_PER_CELL 64
|
||||
#endif
|
||||
|
||||
#if ((BITS_PER_CELL != 32) && (BITS_PER_CELL != 64))
|
||||
Error!
|
||||
#endif
|
||||
|
||||
typedef struct
|
||||
{
|
||||
FICL_UNS hi;
|
||||
FICL_UNS lo;
|
||||
} DPUNS;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
FICL_UNS quot;
|
||||
FICL_UNS rem;
|
||||
} UNSQR;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
FICL_INT hi;
|
||||
FICL_INT lo;
|
||||
} DPINT;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
FICL_INT quot;
|
||||
FICL_INT rem;
|
||||
} INTQR;
|
||||
|
||||
|
||||
/*
|
||||
** B U I L D C O N T R O L S
|
||||
*/
|
||||
|
||||
#if !defined (FICL_MINIMAL)
|
||||
#define FICL_MINIMAL 0
|
||||
#endif
|
||||
#if (FICL_MINIMAL)
|
||||
#define FICL_WANT_SOFTWORDS 0
|
||||
#define FICL_WANT_FLOAT 0
|
||||
#define FICL_WANT_USER 0
|
||||
#define FICL_WANT_LOCALS 0
|
||||
#define FICL_WANT_DEBUGGER 0
|
||||
#define FICL_WANT_OOP 0
|
||||
#define FICL_PLATFORM_EXTEND 0
|
||||
#define FICL_MULTITHREAD 0
|
||||
#define FICL_ROBUST 0
|
||||
#define FICL_EXTENDED_PREFIX 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_PLATFORM_EXTEND
|
||||
** Includes words defined in ficlCompilePlatform
|
||||
*/
|
||||
#if !defined (FICL_PLATFORM_EXTEND)
|
||||
#define FICL_PLATFORM_EXTEND 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_WANT_FLOAT
|
||||
** Includes a floating point stack for the VM, and words to do float operations.
|
||||
** Contributed by Guy Carver
|
||||
*/
|
||||
#if !defined (FICL_WANT_FLOAT)
|
||||
#define FICL_WANT_FLOAT 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_WANT_DEBUGGER
|
||||
** Inludes a simple source level debugger
|
||||
*/
|
||||
#if !defined (FICL_WANT_DEBUGGER)
|
||||
#define FICL_WANT_DEBUGGER 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
** User variables: per-instance variables bound to the VM.
|
||||
** Kinda like thread-local storage. Could be implemented in a
|
||||
** VM private dictionary, but I've chosen the lower overhead
|
||||
** approach of an array of CELLs instead.
|
||||
*/
|
||||
#if !defined FICL_WANT_USER
|
||||
#define FICL_WANT_USER 1
|
||||
#endif
|
||||
|
||||
#if !defined FICL_USER_CELLS
|
||||
#define FICL_USER_CELLS 16
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_WANT_LOCALS controls the creation of the LOCALS wordset and
|
||||
** a private dictionary for local variable compilation.
|
||||
*/
|
||||
#if !defined FICL_WANT_LOCALS
|
||||
#define FICL_WANT_LOCALS 1
|
||||
#endif
|
||||
|
||||
/* Max number of local variables per definition */
|
||||
#if !defined FICL_MAX_LOCALS
|
||||
#define FICL_MAX_LOCALS 16
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_WANT_OOP
|
||||
** Inludes object oriented programming support (in softwords)
|
||||
** OOP support requires locals and user variables!
|
||||
*/
|
||||
#if !(FICL_WANT_LOCALS) || !(FICL_WANT_USER)
|
||||
#if !defined (FICL_WANT_OOP)
|
||||
#define FICL_WANT_OOP 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined (FICL_WANT_OOP)
|
||||
#define FICL_WANT_OOP 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_WANT_SOFTWORDS
|
||||
** Controls inclusion of all softwords in softcore.c
|
||||
*/
|
||||
#if !defined (FICL_WANT_SOFTWORDS)
|
||||
#define FICL_WANT_SOFTWORDS 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_MULTITHREAD enables dictionary mutual exclusion
|
||||
** wia the ficlLockDictionary system dependent function.
|
||||
** Note: this implementation is experimental and poorly
|
||||
** tested. Further, it's unnecessary unless you really
|
||||
** intend to have multiple SESSIONS (poor choice of name
|
||||
** on my part) - that is, threads that modify the dictionary
|
||||
** at the same time.
|
||||
*/
|
||||
#if !defined FICL_MULTITHREAD
|
||||
#define FICL_MULTITHREAD 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
** PORTABLE_LONGMULDIV causes ficlLongMul and ficlLongDiv to be
|
||||
** defined in C in sysdep.c. Use this if you cannot easily
|
||||
** generate an inline asm definition
|
||||
*/
|
||||
#if !defined (PORTABLE_LONGMULDIV)
|
||||
#define PORTABLE_LONGMULDIV 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
** INLINE_INNER_LOOP causes the inner interpreter to be inline code
|
||||
** instead of a function call. This is mainly because MS VC++ 5
|
||||
** chokes with an internal compiler error on the function version.
|
||||
** in release mode. Sheesh.
|
||||
*/
|
||||
#if !defined INLINE_INNER_LOOP
|
||||
#if defined _DEBUG
|
||||
#define INLINE_INNER_LOOP 0
|
||||
#else
|
||||
#define INLINE_INNER_LOOP 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_ROBUST enables bounds checking of stacks and the dictionary.
|
||||
** This will detect stack over and underflows and dictionary overflows.
|
||||
** Any exceptional condition will result in an assertion failure.
|
||||
** (As generated by the ANSI assert macro)
|
||||
** FICL_ROBUST == 1 --> stack checking in the outer interpreter
|
||||
** FICL_ROBUST == 2 also enables checking in many primitives
|
||||
*/
|
||||
|
||||
#if !defined FICL_ROBUST
|
||||
#define FICL_ROBUST 2
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_DEFAULT_STACK Specifies the default size (in CELLs) of
|
||||
** a new virtual machine's stacks, unless overridden at
|
||||
** create time.
|
||||
*/
|
||||
#if !defined FICL_DEFAULT_STACK
|
||||
#define FICL_DEFAULT_STACK 128
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_DEFAULT_DICT specifies the number of CELLs to allocate
|
||||
** for the system dictionary by default. The value
|
||||
** can be overridden at startup time as well.
|
||||
** FICL_DEFAULT_ENV specifies the number of cells to allot
|
||||
** for the environment-query dictionary.
|
||||
*/
|
||||
#if !defined FICL_DEFAULT_DICT
|
||||
#define FICL_DEFAULT_DICT 12288
|
||||
#endif
|
||||
|
||||
#if !defined FICL_DEFAULT_ENV
|
||||
#define FICL_DEFAULT_ENV 260
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_DEFAULT_VOCS specifies the maximum number of wordlists in
|
||||
** the dictionary search order. See Forth DPANS sec 16.3.3
|
||||
** (file://dpans16.htm#16.3.3)
|
||||
*/
|
||||
#if !defined FICL_DEFAULT_VOCS
|
||||
#define FICL_DEFAULT_VOCS 16
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_MAX_PARSE_STEPS controls the size of an array in the FICL_SYSTEM structure
|
||||
** that stores pointers to parser extension functions. I would never expect to have
|
||||
** more than 8 of these, so that's the default limit. Too many of these functions
|
||||
** will probably exact a nasty performance penalty.
|
||||
*/
|
||||
#if !defined FICL_MAX_PARSE_STEPS
|
||||
#define FICL_MAX_PARSE_STEPS 8
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_EXTENDED_PREFIX enables a bunch of extra prefixes in prefix.c and prefix.fr (if
|
||||
** included as part of softcore.c)
|
||||
*/
|
||||
#if !defined FICL_EXTENDED_PREFIX
|
||||
#define FICL_EXTENDED_PREFIX 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
** FICL_ALIGN is the power of two to which the dictionary
|
||||
** pointer address must be aligned. This value is usually
|
||||
** either 1 or 2, depending on the memory architecture
|
||||
** of the target system; 2 is safe on any 16 or 32 bit
|
||||
** machine. 3 would be appropriate for a 64 bit machine.
|
||||
*/
|
||||
#if !defined FICL_ALIGN
|
||||
#define FICL_ALIGN 3
|
||||
#define FICL_ALIGN_ADD ((1 << FICL_ALIGN) - 1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
** System dependent routines --
|
||||
** edit the implementations in sysdep.c to be compatible
|
||||
** with your runtime environment...
|
||||
** ficlTextOut sends a NULL terminated string to the
|
||||
** default output device - used for system error messages
|
||||
** ficlMalloc and ficlFree have the same semantics as malloc and free
|
||||
** in standard C
|
||||
** ficlLongMul multiplies two UNS32s and returns a 64 bit unsigned
|
||||
** product
|
||||
** ficlLongDiv divides an UNS64 by an UNS32 and returns UNS32 quotient
|
||||
** and remainder
|
||||
*/
|
||||
struct vm;
|
||||
void ficlTextOut(struct vm *pVM, char *msg, int fNewline);
|
||||
void *ficlMalloc (size_t size);
|
||||
void ficlFree (void *p);
|
||||
void *ficlRealloc(void *p, size_t size);
|
||||
/*
|
||||
** Stub function for dictionary access control - does nothing
|
||||
** by default, user can redefine to guarantee exclusive dict
|
||||
** access to a single thread for updates. All dict update code
|
||||
** must be bracketed as follows:
|
||||
** ficlLockDictionary(TRUE);
|
||||
** <code that updates dictionary>
|
||||
** ficlLockDictionary(FALSE);
|
||||
**
|
||||
** Returns zero if successful, nonzero if unable to acquire lock
|
||||
** before timeout (optional - could also block forever)
|
||||
**
|
||||
** NOTE: this function must be implemented with lock counting
|
||||
** semantics: nested calls must behave properly.
|
||||
*/
|
||||
#if FICL_MULTITHREAD
|
||||
int ficlLockDictionary(short fLock);
|
||||
#else
|
||||
#define ficlLockDictionary(x) 0 /* ignore */
|
||||
#endif
|
||||
|
||||
/*
|
||||
** 64 bit integer math support routines: multiply two UNS32s
|
||||
** to get a 64 bit product, & divide the product by an UNS32
|
||||
** to get an UNS32 quotient and remainder. Much easier in asm
|
||||
** on a 32 bit CPU than in C, which usually doesn't support
|
||||
** the double length result (but it should).
|
||||
*/
|
||||
DPUNS ficlLongMul(FICL_UNS x, FICL_UNS y);
|
||||
UNSQR ficlLongDiv(DPUNS q, FICL_UNS y);
|
||||
|
||||
#endif /*__SYSDEP_H__*/
|
@ -773,9 +773,6 @@ passclose(struct cdev *dev, int flag, int fmt, struct thread *td)
|
||||
|
||||
if (softc->open_count == 0) {
|
||||
struct pass_io_req *io_req, *io_req2;
|
||||
int need_unlock;
|
||||
|
||||
need_unlock = 0;
|
||||
|
||||
TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) {
|
||||
TAILQ_REMOVE(&softc->done_queue, io_req, links);
|
||||
|
@ -4961,10 +4961,6 @@ sasetpos(struct cam_periph *periph, int hard, struct mtlocate *locate_info)
|
||||
/*sense_len*/ SSD_FULL_SIZE,
|
||||
/*timeout*/ SPACE_TIMEOUT);
|
||||
} else {
|
||||
uint32_t blk_pointer;
|
||||
|
||||
blk_pointer = locate_info->logical_id;
|
||||
|
||||
scsi_locate_10(&ccb->csio,
|
||||
/*retries*/ 1,
|
||||
/*cbfcnp*/ sadone,
|
||||
|
@ -51,7 +51,7 @@ extern uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value);
|
||||
extern void membar_producer(void);
|
||||
|
||||
#if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__) || \
|
||||
defined(__mips__) || defined(__aarch64__)
|
||||
defined(__mips__) || defined(__aarch64__) || defined(__riscv__)
|
||||
extern void atomic_or_8(volatile uint8_t *target, uint8_t value);
|
||||
#else
|
||||
static __inline void
|
||||
|
@ -1652,7 +1652,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
|
||||
int spill_data_size = 0;
|
||||
int spill_attr_count = 0;
|
||||
int error;
|
||||
uint16_t length;
|
||||
uint16_t length, reg_length;
|
||||
int i, j, k, length_idx;
|
||||
sa_hdr_phys_t *hdr;
|
||||
sa_idx_tab_t *idx_tab;
|
||||
@ -1712,34 +1712,50 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
|
||||
hdr = SA_GET_HDR(hdl, SA_BONUS);
|
||||
idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS);
|
||||
for (; k != 2; k++) {
|
||||
/* iterate over each attribute in layout */
|
||||
/*
|
||||
* Iterate over each attribute in layout. Fetch the
|
||||
* size of variable-length attributes needing rewrite
|
||||
* from sa_lengths[].
|
||||
*/
|
||||
for (i = 0, length_idx = 0; i != count; i++) {
|
||||
sa_attr_type_t attr;
|
||||
|
||||
attr = idx_tab->sa_layout->lot_attrs[i];
|
||||
if (attr == newattr) {
|
||||
/* duplicate attributes are not allowed */
|
||||
ASSERT(action == SA_REPLACE ||
|
||||
action == SA_REMOVE);
|
||||
/* must be variable-sized to be replaced here */
|
||||
if (action == SA_REPLACE) {
|
||||
ASSERT(SA_REGISTERED_LEN(sa, attr) == 0);
|
||||
SA_ADD_BULK_ATTR(attr_desc, j, attr,
|
||||
locator, datastart, buflen);
|
||||
}
|
||||
reg_length = SA_REGISTERED_LEN(sa, attr);
|
||||
if (reg_length == 0) {
|
||||
length = hdr->sa_lengths[length_idx];
|
||||
length_idx++;
|
||||
} else {
|
||||
length = SA_REGISTERED_LEN(sa, attr);
|
||||
if (length == 0) {
|
||||
length = hdr->sa_lengths[length_idx];
|
||||
}
|
||||
length = reg_length;
|
||||
}
|
||||
if (attr == newattr) {
|
||||
/*
|
||||
* There is nothing to do for SA_REMOVE,
|
||||
* so it is just skipped.
|
||||
*/
|
||||
if (action == SA_REMOVE)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Duplicate attributes are not allowed, so the
|
||||
* action can not be SA_ADD here.
|
||||
*/
|
||||
ASSERT3S(action, ==, SA_REPLACE);
|
||||
|
||||
/*
|
||||
* Only a variable-sized attribute can be
|
||||
* replaced here, and its size must be changing.
|
||||
*/
|
||||
ASSERT3U(reg_length, ==, 0);
|
||||
ASSERT3U(length, !=, buflen);
|
||||
SA_ADD_BULK_ATTR(attr_desc, j, attr,
|
||||
locator, datastart, buflen);
|
||||
} else {
|
||||
SA_ADD_BULK_ATTR(attr_desc, j, attr,
|
||||
NULL, (void *)
|
||||
(TOC_OFF(idx_tab->sa_idx_tab[attr]) +
|
||||
(uintptr_t)old_data[k]), length);
|
||||
}
|
||||
if (SA_REGISTERED_LEN(sa, attr) == 0)
|
||||
length_idx++;
|
||||
}
|
||||
if (k == 0 && hdl->sa_spill) {
|
||||
hdr = SA_GET_HDR(hdl, SA_SPILL);
|
||||
@ -1750,10 +1766,8 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr,
|
||||
}
|
||||
}
|
||||
if (action == SA_ADD) {
|
||||
length = SA_REGISTERED_LEN(sa, newattr);
|
||||
if (length == 0) {
|
||||
length = buflen;
|
||||
}
|
||||
reg_length = SA_REGISTERED_LEN(sa, newattr);
|
||||
IMPLY(reg_length != 0, reg_length == buflen);
|
||||
SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator,
|
||||
datastart, buflen);
|
||||
}
|
||||
|
@ -134,6 +134,9 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME");
|
||||
static int volmode = ZFS_VOLMODE_GEOM;
|
||||
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0,
|
||||
"Expose as GEOM providers (1), device files (2) or neither");
|
||||
static boolean_t zpool_on_zvol = B_FALSE;
|
||||
SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, recursive, CTLFLAG_RWTUN, &zpool_on_zvol, 0,
|
||||
"Allow zpools to use zvols as vdevs (DANGEROUS)");
|
||||
|
||||
#endif
|
||||
typedef struct zvol_extent {
|
||||
@ -1114,7 +1117,9 @@ zvol_open(struct g_provider *pp, int flag, int count)
|
||||
return (err);
|
||||
}
|
||||
#else /* !illumos */
|
||||
if (tsd_get(zfs_geom_probe_vdev_key) != NULL) {
|
||||
boolean_t locked = B_FALSE;
|
||||
|
||||
if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) {
|
||||
/*
|
||||
* if zfs_geom_probe_vdev_key is set, that means that zfs is
|
||||
* attempting to probe geom providers while looking for a
|
||||
@ -1125,19 +1130,34 @@ zvol_open(struct g_provider *pp, int flag, int count)
|
||||
*/
|
||||
return (EOPNOTSUPP);
|
||||
}
|
||||
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
/*
|
||||
* Protect against recursively entering spa_namespace_lock
|
||||
* when spa_open() is used for a pool on a (local) ZVOL(s).
|
||||
* This is needed since we replaced upstream zfsdev_state_lock
|
||||
* with spa_namespace_lock in the ZVOL code.
|
||||
* We are using the same trick as spa_open().
|
||||
* Note that calls in zvol_first_open which need to resolve
|
||||
* pool name to a spa object will enter spa_open()
|
||||
* recursively, but that function already has all the
|
||||
* necessary protection.
|
||||
*/
|
||||
if (!MUTEX_HELD(&zfsdev_state_lock)) {
|
||||
mutex_enter(&zfsdev_state_lock);
|
||||
locked = B_TRUE;
|
||||
}
|
||||
|
||||
zv = pp->private;
|
||||
if (zv == NULL) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
if (locked)
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
return (SET_ERROR(ENXIO));
|
||||
}
|
||||
|
||||
if (zv->zv_total_opens == 0) {
|
||||
err = zvol_first_open(zv);
|
||||
if (err) {
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
if (locked)
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
return (err);
|
||||
}
|
||||
pp->mediasize = zv->zv_volsize;
|
||||
@ -1171,7 +1191,8 @@ zvol_open(struct g_provider *pp, int flag, int count)
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
#else
|
||||
zv->zv_total_opens += count;
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
if (locked)
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
#endif
|
||||
|
||||
return (err);
|
||||
@ -1181,7 +1202,8 @@ zvol_open(struct g_provider *pp, int flag, int count)
|
||||
#ifdef illumos
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
#else
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
if (locked)
|
||||
mutex_exit(&zfsdev_state_lock);
|
||||
#endif
|
||||
return (err);
|
||||
}
|
||||
|
@ -388,6 +388,48 @@ extern "C" {
|
||||
#define _DONT_USE_1275_GENERIC_NAMES
|
||||
#define _HAVE_CPUID_INSN
|
||||
|
||||
#elif defined(__riscv__)
|
||||
|
||||
/*
|
||||
* Define the appropriate "processor characteristics"
|
||||
*/
|
||||
#define _STACK_GROWS_DOWNWARD
|
||||
#define _LONG_LONG_LTOH
|
||||
#define _BIT_FIELDS_LTOH
|
||||
#define _IEEE_754
|
||||
#define _CHAR_IS_UNSIGNED
|
||||
#define _BOOL_ALIGNMENT 1
|
||||
#define _CHAR_ALIGNMENT 1
|
||||
#define _SHORT_ALIGNMENT 2
|
||||
#define _INT_ALIGNMENT 4
|
||||
#define _FLOAT_ALIGNMENT 4
|
||||
#define _FLOAT_COMPLEX_ALIGNMENT 4
|
||||
#define _LONG_ALIGNMENT 8
|
||||
#define _LONG_LONG_ALIGNMENT 8
|
||||
#define _DOUBLE_ALIGNMENT 8
|
||||
#define _DOUBLE_COMPLEX_ALIGNMENT 8
|
||||
#define _LONG_DOUBLE_ALIGNMENT 16
|
||||
#define _LONG_DOUBLE_COMPLEX_ALIGNMENT 16
|
||||
#define _POINTER_ALIGNMENT 8
|
||||
#define _MAX_ALIGNMENT 16
|
||||
#define _ALIGNMENT_REQUIRED 1
|
||||
|
||||
#define _LONG_LONG_ALIGNMENT_32 _LONG_LONG_ALIGNMENT
|
||||
|
||||
/*
|
||||
* Define the appropriate "implementation choices"
|
||||
*/
|
||||
#if !defined(_LP64)
|
||||
#define _LP64
|
||||
#endif
|
||||
#define _SUNOS_VTOC_16
|
||||
#define _DMA_USES_PHYSADDR
|
||||
#define _FIRMWARE_NEEDS_FDISK
|
||||
#define _PSM_MODULES
|
||||
#define _RTC_CONFIG
|
||||
#define _DONT_USE_1275_GENERIC_NAMES
|
||||
#define _HAVE_CPUID_INSN
|
||||
|
||||
#elif defined(__arm__)
|
||||
|
||||
/*
|
||||
|
@ -68,7 +68,6 @@ SYSTEM_LD_TAIL +=;sed s/" + SIZEOF_HEADERS"// ldscript.$M\
|
||||
|
||||
FILES_CPU_FUNC = \
|
||||
$S/$M/$M/cpufunc_asm_arm9.S \
|
||||
$S/$M/$M/cpufunc_asm_arm10.S \
|
||||
$S/$M/$M/cpufunc_asm_xscale.S $S/$M/$M/cpufunc_asm.S \
|
||||
$S/$M/$M/cpufunc_asm_xscale_c3.S $S/$M/$M/cpufunc_asm_armv5_ec.S \
|
||||
$S/$M/$M/cpufunc_asm_fa526.S $S/$M/$M/cpufunc_asm_sheeva.S \
|
||||
|
49
sys/conf/Makefile.riscv
Normal file
49
sys/conf/Makefile.riscv
Normal file
@ -0,0 +1,49 @@
|
||||
# Makefile.riscv -- with config changes.
|
||||
# Copyright 1990 W. Jolitz
|
||||
# from: @(#)Makefile.i386 7.1 5/10/91
|
||||
# from FreeBSD: src/sys/conf/Makefile.i386,v 1.255 2002/02/20 23:35:49
|
||||
# $FreeBSD$
|
||||
#
|
||||
# Makefile for FreeBSD
|
||||
#
|
||||
# RISCVTODO: copy pasted from aarch64, needs to be
|
||||
# constructed from a machine description:
|
||||
# config machineid
|
||||
# Most changes should be made in the machine description
|
||||
# /sys/riscv/conf/``machineid''
|
||||
# after which you should do
|
||||
# config machineid
|
||||
# Generic makefile changes should be made in
|
||||
# /sys/conf/Makefile.riscv
|
||||
# after which config should be rerun for all machines.
|
||||
#
|
||||
|
||||
# Which version of config(8) is required.
|
||||
%VERSREQ= 600012
|
||||
|
||||
.if !defined(S)
|
||||
S= ../../..
|
||||
.endif
|
||||
.include "$S/conf/kern.pre.mk"
|
||||
|
||||
INCLUDES+= -I$S/contrib/libfdt
|
||||
|
||||
.if !empty(DDB_ENABLED)
|
||||
CFLAGS += -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer
|
||||
.endif
|
||||
|
||||
%BEFORE_DEPEND
|
||||
|
||||
%OBJS
|
||||
|
||||
%FILES.c
|
||||
|
||||
%FILES.s
|
||||
|
||||
%FILES.m
|
||||
|
||||
%CLEAN
|
||||
|
||||
%RULES
|
||||
|
||||
.include "$S/conf/kern.post.mk"
|
@ -1078,6 +1078,14 @@ options UFS_GJOURNAL
|
||||
|
||||
# Make space in the kernel for a root filesystem on a md device.
|
||||
# Define to the number of kilobytes to reserve for the filesystem.
|
||||
# This is now optional.
|
||||
# If not defined, the root filesystem passed in as the MFS_IMAGE makeoption
|
||||
# will be automatically embedded in the kernel during linking. Its exact size
|
||||
# will be consumed within the kernel.
|
||||
# If defined, the old way of embedding the filesystem in the kernel will be
|
||||
# used. That is to say MD_ROOT_SIZE KB will be allocated in the kernel and
|
||||
# later, the filesystem image passed in as the MFS_IMAGE makeoption will be
|
||||
# dd'd into the reserved space if it fits.
|
||||
options MD_ROOT_SIZE=10
|
||||
|
||||
# Make the md device a potential root device, either with preloaded
|
||||
|
@ -11,8 +11,7 @@ arm/arm/busdma_machdep-v6.c optional armv6
|
||||
arm/arm/copystr.S standard
|
||||
arm/arm/cpufunc.c standard
|
||||
arm/arm/cpufunc_asm.S standard
|
||||
arm/arm/cpufunc_asm_arm9.S optional cpu_arm9
|
||||
arm/arm/cpufunc_asm_arm10.S optional cpu_arm9e
|
||||
arm/arm/cpufunc_asm_arm9.S optional cpu_arm9 | cpu_arm9e
|
||||
arm/arm/cpufunc_asm_arm11.S optional cpu_arm1176
|
||||
arm/arm/cpufunc_asm_arm11x6.S optional cpu_arm1176
|
||||
arm/arm/cpufunc_asm_armv4.S optional cpu_arm9 | cpu_arm9e | cpu_fa526 | cpu_xscale_80321 | cpu_xscale_pxa2x0 | cpu_xscale_ixp425 | cpu_xscale_80219 | cpu_xscale_81342
|
||||
@ -74,6 +73,8 @@ arm/arm/stdatomic.c standard \
|
||||
compile-with "${NORMAL_C:N-Wmissing-prototypes}"
|
||||
arm/arm/support.S standard
|
||||
arm/arm/swtch.S standard
|
||||
arm/arm/swtch-v4.S optional !armv6
|
||||
arm/arm/swtch-v6.S optional armv6
|
||||
arm/arm/sys_machdep.c standard
|
||||
arm/arm/syscall.c standard
|
||||
arm/arm/trap.c optional !armv6
|
||||
|
44
sys/conf/files.riscv
Normal file
44
sys/conf/files.riscv
Normal file
@ -0,0 +1,44 @@
|
||||
# $FreeBSD$
|
||||
crypto/blowfish/bf_enc.c optional crypto | ipsec
|
||||
crypto/des/des_enc.c optional crypto | ipsec | netsmb
|
||||
kern/kern_clocksource.c standard
|
||||
kern/subr_dummy_vdso_tc.c standard
|
||||
libkern/bcmp.c standard
|
||||
libkern/ffs.c standard
|
||||
libkern/ffsl.c standard
|
||||
libkern/fls.c standard
|
||||
libkern/flsl.c standard
|
||||
libkern/flsll.c standard
|
||||
libkern/memmove.c standard
|
||||
libkern/memset.c standard
|
||||
riscv/htif/htif.c standard
|
||||
riscv/htif/htif_block.c standard
|
||||
riscv/htif/htif_console.c standard
|
||||
riscv/riscv/autoconf.c standard
|
||||
riscv/riscv/bcopy.c standard
|
||||
riscv/riscv/bus_machdep.c standard
|
||||
riscv/riscv/busdma_machdep.c standard
|
||||
riscv/riscv/clock.c standard
|
||||
riscv/riscv/copyinout.S standard
|
||||
riscv/riscv/copystr.c standard
|
||||
riscv/riscv/cpufunc_asm.S standard
|
||||
riscv/riscv/devmap.c standard
|
||||
riscv/riscv/dump_machdep.c standard
|
||||
riscv/riscv/elf_machdep.c standard
|
||||
riscv/riscv/intr_machdep.c standard
|
||||
riscv/riscv/in_cksum.c optional inet | inet6
|
||||
riscv/riscv/identcpu.c standard
|
||||
riscv/riscv/locore.S standard no-obj
|
||||
riscv/riscv/minidump_machdep.c standard
|
||||
riscv/riscv/machdep.c standard
|
||||
riscv/riscv/mem.c standard
|
||||
riscv/riscv/nexus.c standard
|
||||
riscv/riscv/pmap.c standard
|
||||
riscv/riscv/sys_machdep.c standard
|
||||
riscv/riscv/support.S standard
|
||||
riscv/riscv/swtch.S standard
|
||||
riscv/riscv/trap.c standard
|
||||
riscv/riscv/timer.c standard
|
||||
riscv/riscv/uio_machdep.c standard
|
||||
riscv/riscv/uma_machdep.c standard
|
||||
riscv/riscv/vm_machdep.c standard
|
@ -104,6 +104,10 @@ CFLAGS += -mgeneral-regs-only
|
||||
CFLAGS += -ffixed-x18
|
||||
.endif
|
||||
|
||||
.if ${MACHINE_CPUARCH} == "riscv"
|
||||
INLINE_LIMIT?= 8000
|
||||
.endif
|
||||
|
||||
#
|
||||
# For sparc64 we want the medany code model so modules may be located
|
||||
# anywhere in the 64-bit address space. We also tell GCC to use floating
|
||||
|
@ -130,6 +130,9 @@ ${FULLKERNEL}: ${SYSTEM_DEP} vers.o
|
||||
@rm -f ${.TARGET}
|
||||
@echo linking ${.TARGET}
|
||||
${SYSTEM_LD}
|
||||
.if !empty(MD_ROOT_SIZE_CONFIGURED) && defined(MFS_IMAGE)
|
||||
@sh ${S}/tools/embed_mfs.sh ${.TARGET} ${MFS_IMAGE}
|
||||
.endif
|
||||
.if ${MK_CTF} != "no"
|
||||
@echo ${CTFMERGE} ${CTFFLAGS} -o ${.TARGET} ...
|
||||
@${CTFMERGE} ${CTFFLAGS} -o ${.TARGET} ${SYSTEM_OBJS} vers.o
|
||||
@ -353,6 +356,7 @@ vnode_if_typedef.h:
|
||||
${AWK} -f $S/tools/vnode_if.awk $S/kern/vnode_if.src -q
|
||||
|
||||
.if ${MFS_IMAGE:Uno} != "no"
|
||||
.if empty(MD_ROOT_SIZE_CONFIGURED)
|
||||
# Generate an object file from the file system image to embed in the kernel
|
||||
# via linking. Make sure the contents are in the mfs section and rename the
|
||||
# start/end/size variables to __start_mfs, __stop_mfs, and mfs_size,
|
||||
@ -372,6 +376,7 @@ embedfs_${MFS_IMAGE:T:R}.o: ${MFS_IMAGE}
|
||||
_binary_${MFS_IMAGE:C,[^[:alnum:]],_,g}_end=mfs_root_end \
|
||||
${.TARGET}
|
||||
.endif
|
||||
.endif
|
||||
|
||||
# XXX strictly, everything depends on Makefile because changes to ${PROF}
|
||||
# only appear there, but we don't handle that.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user