diff --git a/Makefile b/Makefile index 1bffd18ec29b..9e200a3e973d 100644 --- a/Makefile +++ b/Makefile @@ -137,6 +137,7 @@ TGTS+= ${BITGTS} .ORDER: buildworld installworld .ORDER: buildworld distributeworld .ORDER: buildworld buildkernel +.ORDER: installworld distribution .ORDER: buildkernel installkernel .ORDER: buildkernel installkernel.debug .ORDER: buildkernel reinstallkernel @@ -329,7 +330,7 @@ bmake: .PHONY ${MMAKE} all; \ ${MMAKE} install DESTDIR=${MYMAKE:H} BINDIR= -tinderbox toolchains kernel-toolchains: upgrade_checks +tinderbox toolchains kernel-toolchains kernels worlds: upgrade_checks tinderbox: @cd ${.CURDIR}; ${SUB_MAKE} DOING_TINDERBOX=YES universe @@ -340,6 +341,12 @@ toolchains: kernel-toolchains: @cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=kernel-toolchain universe +kernels: + @cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=buildkernel universe + +worlds: + @cd ${.CURDIR}; ${SUB_MAKE} UNIVERSE_TARGET=buildworld universe + # # universe # diff --git a/Makefile.inc1 b/Makefile.inc1 index 5650ef2134d5..a13438225960 100644 --- a/Makefile.inc1 +++ b/Makefile.inc1 @@ -72,7 +72,7 @@ SRCDIR?= ${.CURDIR} SUBDIR= ${SUBDIR_OVERRIDE} .else SUBDIR= lib libexec -.if make(install*) +.if !defined(NO_ROOT) && (make(installworld) || make(install)) # Ensure libraries are installed before progressing. SUBDIR+=.WAIT .endif @@ -127,7 +127,7 @@ SUBDIR+= ${_DIR} # by calling 'makedb' in share/man. This is only relevant for # install/distribute so they build the whatis file after every manpage is # installed. -.if make(install*) +.if make(installworld) || make(install) SUBDIR+=.WAIT .endif SUBDIR+=etc @@ -1125,9 +1125,7 @@ distrib-dirs: .MAKE .PHONY ${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \ ${IMAKE_INSTALL} ${IMAKE_MTREE} METALOG=${METALOG} ${.TARGET} -distribution: .MAKE .PHONY - ${_+_}cd ${.CURDIR}/etc; ${CROSSENV} PATH=${TMPPATH} ${MAKE} \ - ${IMAKE_INSTALL} ${IMAKE_MTREE} METALOG=${METALOG} ${.TARGET} +distribution: distrib-dirs .MAKE .PHONY ${_+_}cd ${.CURDIR}; ${CROSSENV} PATH=${TMPPATH} \ ${MAKE} -f Makefile.inc1 ${IMAKE_INSTALL} \ METALOG=${METALOG} installconfig diff --git a/UPDATING b/UPDATING index 1b98e40cbaf2..123c05ae975e 100644 --- a/UPDATING +++ b/UPDATING @@ -31,6 +31,13 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 11.x IS SLOW: disable the most expensive debugging functionality run "ln -s 'abort:false,junk:false' /etc/malloc.conf".) +20160129: + Building ZFS pools on top of zvols is prohibited by default. That + feature has never worked safely; it's always been prone to deadlocks. + Using a zvol as the backing store for a VM guest's virtual disk will + still work, even if the guest is using ZFS. Legacy behavior can be + restored by setting vfs.zfs.vol.recursive=1. + 20160119: The NONE and HPN patches has been removed from OpenSSH. They are still available in the security/openssh-portable port. diff --git a/bin/csh/iconv_stub.c b/bin/csh/iconv_stub.c index d1a9e475d87e..e20608c60616 100644 --- a/bin/csh/iconv_stub.c +++ b/bin/csh/iconv_stub.c @@ -36,9 +36,9 @@ #undef iconv_close #define ICONVLIB "libiconv.so" -#define ICONV_ENGINE "iconv" -#define ICONV_OPEN "iconv_open" -#define ICONV_CLOSE "iconv_close" +#define ICONV_ENGINE "libiconv" +#define ICONV_OPEN "libiconv_open" +#define ICONV_CLOSE "libiconv_close" typedef iconv_t iconv_open_t(const char *, const char *); diff --git a/bin/sh/redir.c b/bin/sh/redir.c index 95d3238daf1b..0a7aa96fb601 100644 --- a/bin/sh/redir.c +++ b/bin/sh/redir.c @@ -70,6 +70,7 @@ struct redirtab { struct redirtab *next; int renamed[10]; int fd0_redirected; + unsigned int empty_redirs; }; @@ -82,6 +83,9 @@ static struct redirtab *redirlist; */ static int fd0_redirected = 0; +/* Number of redirtabs that have not been allocated. */ +static unsigned int empty_redirs = 0; + static void openredirect(union node *, char[10 ]); static int openhere(union node *); @@ -115,12 +119,17 @@ redirect(union node *redir, int flags) memory[i] = 0; memory[1] = flags & REDIR_BACKQ; if (flags & REDIR_PUSH) { - sv = ckmalloc(sizeof (struct redirtab)); - for (i = 0 ; i < 10 ; i++) - sv->renamed[i] = EMPTY; - sv->fd0_redirected = fd0_redirected; - sv->next = redirlist; - redirlist = sv; + empty_redirs++; + if (redir != NULL) { + sv = ckmalloc(sizeof (struct redirtab)); + for (i = 0 ; i < 10 ; i++) + sv->renamed[i] = EMPTY; + sv->fd0_redirected = fd0_redirected; + sv->empty_redirs = empty_redirs - 1; + sv->next = redirlist; + redirlist = sv; + empty_redirs = 0; + } } for (n = redir ; n ; n = n->nfile.next) { fd = n->nfile.fd; @@ -303,6 +312,12 @@ popredir(void) struct redirtab *rp = redirlist; int i; + INTOFF; + if (empty_redirs > 0) { + empty_redirs--; + INTON; + return; + } for (i = 0 ; i < 10 ; i++) { if (rp->renamed[i] != EMPTY) { if (rp->renamed[i] >= 0) { @@ -313,8 +328,8 @@ popredir(void) } } } - INTOFF; fd0_redirected = rp->fd0_redirected; + empty_redirs = rp->empty_redirs; redirlist = rp->next; ckfree(rp); INTON; diff --git a/bin/sh/sh.1 b/bin/sh/sh.1 index 14ae89833c8a..94067792e40e 100644 --- a/bin/sh/sh.1 +++ b/bin/sh/sh.1 @@ -32,7 +32,7 @@ .\" from: @(#)sh.1 8.6 (Berkeley) 5/4/95 .\" $FreeBSD$ .\" -.Dd August 29, 2015 +.Dd January 30, 2016 .Dt SH 1 .Os .Sh NAME @@ -1952,13 +1952,20 @@ Execute the specified built-in command, This is useful when the user wishes to override a shell function with the same name as a built-in command. .It Ic cd Oo Fl L | P Oc Oo Fl e Oc Op Ar directory +.It Ic cd Fl Switch to the specified .Ar directory , -or to the directory specified in the +to the directory specified in the .Va HOME environment variable if no .Ar directory -is specified. +is specified or +to the directory specified in the +.Va OLDPWD +environment variable if +.Ar directory +is +.Fl . If .Ar directory does not begin with @@ -1982,10 +1989,12 @@ the .Ic cd command will print out the name of the directory that it actually switched to -if this is different from the name that the user gave. -These may be different either because the +if the .Va CDPATH -mechanism was used or because a symbolic link was crossed. +mechanism was used or if +.Ar directory +was +.Fl . .Pp If the .Fl P @@ -2774,6 +2783,10 @@ Initialization file for interactive shells. Locale settings. These are inherited by children of the shell, and is used in a limited manner by the shell itself. +.It Ev OLDPWD +The previous current directory. +This is used and updated by +.Ic cd . .It Ev PWD An absolute pathname for the current directory, possibly containing symbolic links. diff --git a/bin/test/test.c b/bin/test/test.c index 46e9999eb0bb..48b718e6412d 100644 --- a/bin/test/test.c +++ b/bin/test/test.c @@ -120,51 +120,53 @@ enum token { #define TOKEN_TYPE(token) ((token) & 0xff00) -static struct t_op { - char op_text[4]; +static const struct t_op { + char op_text[2]; short op_num; -} const ops [] = { - {"-r", FILRD}, - {"-w", FILWR}, - {"-x", FILEX}, - {"-e", FILEXIST}, - {"-f", FILREG}, - {"-d", FILDIR}, - {"-c", FILCDEV}, - {"-b", FILBDEV}, - {"-p", FILFIFO}, - {"-u", FILSUID}, - {"-g", FILSGID}, - {"-k", FILSTCK}, - {"-s", FILGZ}, - {"-t", FILTT}, - {"-z", STREZ}, - {"-n", STRNZ}, - {"-h", FILSYM}, /* for backwards compat */ - {"-O", FILUID}, - {"-G", FILGID}, - {"-L", FILSYM}, - {"-S", FILSOCK}, +} ops1[] = { {"=", STREQ}, - {"==", STREQ}, - {"!=", STRNE}, {"<", STRLT}, {">", STRGT}, - {"-eq", INTEQ}, - {"-ne", INTNE}, - {"-ge", INTGE}, - {"-gt", INTGT}, - {"-le", INTLE}, - {"-lt", INTLT}, - {"-nt", FILNT}, - {"-ot", FILOT}, - {"-ef", FILEQ}, {"!", UNOT}, - {"-a", BAND}, - {"-o", BOR}, {"(", LPAREN}, {")", RPAREN}, - {"", 0} +}, opsm1[] = { + {"r", FILRD}, + {"w", FILWR}, + {"x", FILEX}, + {"e", FILEXIST}, + {"f", FILREG}, + {"d", FILDIR}, + {"c", FILCDEV}, + {"b", FILBDEV}, + {"p", FILFIFO}, + {"u", FILSUID}, + {"g", FILSGID}, + {"k", FILSTCK}, + {"s", FILGZ}, + {"t", FILTT}, + {"z", STREZ}, + {"n", STRNZ}, + {"h", FILSYM}, /* for backwards compat */ + {"O", FILUID}, + {"G", FILGID}, + {"L", FILSYM}, + {"S", FILSOCK}, + {"a", BAND}, + {"o", BOR}, +}, ops2[] = { + {"==", STREQ}, + {"!=", STRNE}, +}, opsm2[] = { + {"eq", INTEQ}, + {"ne", INTNE}, + {"ge", INTGE}, + {"gt", INTGT}, + {"le", INTLE}, + {"lt", INTLT}, + {"nt", FILNT}, + {"ot", FILOT}, + {"ef", FILEQ}, }; static int nargc; @@ -416,35 +418,71 @@ filstat(char *nm, enum token mode) } } -static enum token -t_lex(char *s) +static int +find_op_1char(const struct t_op *op, const struct t_op *end, const char *s) { - struct t_op const *op = ops; + char c; - if (s == 0) { - return EOI; - } - while (*op->op_text) { - if (strcmp(s, op->op_text) == 0) { - if (((TOKEN_TYPE(op->op_num) == UNOP || - TOKEN_TYPE(op->op_num) == BUNOP) - && isunopoperand()) || - (op->op_num == LPAREN && islparenoperand()) || - (op->op_num == RPAREN && isrparenoperand())) - break; + c = s[0]; + while (op != end) { + if (c == *op->op_text) return op->op_num; - } op++; } return OPERAND; } +static int +find_op_2char(const struct t_op *op, const struct t_op *end, const char *s) +{ + while (op != end) { + if (s[0] == op->op_text[0] && s[1] == op->op_text[1]) + return op->op_num; + op++; + } + return OPERAND; +} + +static int +find_op(const char *s) +{ + if (s[0] == '\0') + return OPERAND; + else if (s[1] == '\0') + return find_op_1char(ops1, (&ops1)[1], s); + else if (s[2] == '\0') + return s[0] == '-' ? find_op_1char(opsm1, (&opsm1)[1], s + 1) : + find_op_2char(ops2, (&ops2)[1], s); + else if (s[3] == '\0') + return s[0] == '-' ? find_op_2char(opsm2, (&opsm2)[1], s + 1) : + OPERAND; + else + return OPERAND; +} + +static enum token +t_lex(char *s) +{ + int num; + + if (s == 0) { + return EOI; + } + num = find_op(s); + if (((TOKEN_TYPE(num) == UNOP || TOKEN_TYPE(num) == BUNOP) + && isunopoperand()) || + (num == LPAREN && islparenoperand()) || + (num == RPAREN && isrparenoperand())) + return OPERAND; + return num; +} + static int isunopoperand(void) { - struct t_op const *op = ops; char *s; char *t; + int num; if (nargc == 1) return 1; @@ -452,20 +490,16 @@ isunopoperand(void) if (nargc == 2) return parenlevel == 1 && strcmp(s, ")") == 0; t = *(t_wp + 2); - while (*op->op_text) { - if (strcmp(s, op->op_text) == 0) - return TOKEN_TYPE(op->op_num) == BINOP && - (parenlevel == 0 || t[0] != ')' || t[1] != '\0'); - op++; - } - return 0; + num = find_op(s); + return TOKEN_TYPE(num) == BINOP && + (parenlevel == 0 || t[0] != ')' || t[1] != '\0'); } static int islparenoperand(void) { - struct t_op const *op = ops; char *s; + int num; if (nargc == 1) return 1; @@ -474,12 +508,8 @@ islparenoperand(void) return parenlevel == 1 && strcmp(s, ")") == 0; if (nargc != 3) return 0; - while (*op->op_text) { - if (strcmp(s, op->op_text) == 0) - return TOKEN_TYPE(op->op_num) == BINOP; - op++; - } - return 0; + num = find_op(s); + return TOKEN_TYPE(num) == BINOP; } static int diff --git a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_config.c b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_config.c index d5ba20fde0cf..c3dafd6a777c 100644 --- a/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_config.c +++ b/cddl/contrib/opensolaris/lib/libzfs/common/libzfs_config.c @@ -26,6 +26,7 @@ /* * Copyright (c) 2012 by Delphix. All rights reserved. + * Copyright (c) 2015 by Syneto S.R.L. All rights reserved. */ /* @@ -246,8 +247,9 @@ zpool_get_features(zpool_handle_t *zhp) config = zpool_get_config(zhp, NULL); } - verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, - &features) == 0); + if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS, + &features) != 0) + return (NULL); return (features); } diff --git a/contrib/libucl/tests/.gitignore b/contrib/libucl/tests/.gitignore deleted file mode 100644 index 5a48681d39b2..000000000000 --- a/contrib/libucl/tests/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -*.log -*.trs -*.plist - -test_basic -test_generate -test_schema -test_speed diff --git a/contrib/libxo/.gitignore b/contrib/libxo/.gitignore deleted file mode 100644 index 8d70b6cc1550..000000000000 --- a/contrib/libxo/.gitignore +++ /dev/null @@ -1,46 +0,0 @@ -# Object files -*.o - -# Libraries -*.lib -*.a - -# Shared objects (inc. Windows DLLs) -*.dll -*.so -*.so.* -*.dylib - -# Executables -*.exe -*.app - -*~ -*.orig - -aclocal.m4 -ar-lib -autom4te.cache -build -compile -config.guess -config.h.in -config.sub -depcomp -install-sh -ltmain.sh -missing -m4 - -Makefile.in -configure -.DS_Store - -xoconfig.h.in -xo_config.h.in - -.gdbinit -.gdbinit.local -xtest -xtest.dSYM -tests/w diff --git a/contrib/ofed/librdmacm/examples/build/cmatose/Makefile b/contrib/ofed/librdmacm/examples/build/cmatose/Makefile index 31d2ae7f7f6b..e2baf40bcf2c 100644 --- a/contrib/ofed/librdmacm/examples/build/cmatose/Makefile +++ b/contrib/ofed/librdmacm/examples/build/cmatose/Makefile @@ -5,7 +5,6 @@ PROG= cmatose MAN= SRCS= cmatose.c -LDADD+= -libverbs -lrdmacm -lpthread -LDADD+= -lmlx4 +LIBADD= ibverbs rdmacm pthread mlx4 .include diff --git a/contrib/ofed/librdmacm/examples/build/mckey/Makefile b/contrib/ofed/librdmacm/examples/build/mckey/Makefile index 4abaf2786d56..6ef498fb905b 100644 --- a/contrib/ofed/librdmacm/examples/build/mckey/Makefile +++ b/contrib/ofed/librdmacm/examples/build/mckey/Makefile @@ -5,7 +5,6 @@ PROG= mckey MAN= SRCS= mckey.c -LDADD+= -libverbs -lrdmacm -lpthread -LDADD+= -lmlx4 +LIBADD= ibverbs rdmacm pthread mlx4 .include diff --git a/contrib/ofed/librdmacm/examples/build/udaddy/Makefile b/contrib/ofed/librdmacm/examples/build/udaddy/Makefile index 1e325505bc86..fc76534fbed1 100644 --- a/contrib/ofed/librdmacm/examples/build/udaddy/Makefile +++ b/contrib/ofed/librdmacm/examples/build/udaddy/Makefile @@ -5,7 +5,6 @@ PROG= udaddy MAN= SRCS= udaddy.c -LDADD+= -libverbs -lrdmacm -lpthread -LDADD+= -lmlx4 +LIBADD= ibverbs rdmacm pthread mlx4 .include diff --git a/contrib/openresolv/Makefile b/contrib/openresolv/Makefile index aca2cb531769..a898b95e6b51 100644 --- a/contrib/openresolv/Makefile +++ b/contrib/openresolv/Makefile @@ -1,5 +1,5 @@ PKG= openresolv -VERSION= 3.7.0 +VERSION= 3.7.1 # Nasty hack so that make clean works without configure being run _CONFIG_MK!= test -e config.mk && echo config.mk || echo config-null.mk @@ -37,7 +37,7 @@ SED_RESTARTCMD= -e 's:@RESTARTCMD \(.*\)@:${RESTARTCMD}:g' DISTPREFIX?= ${PKG}-${VERSION} DISTFILEGZ?= ${DISTPREFIX}.tar.gz -DISTFILE?= ${DISTPREFIX}.tar.bz2 +DISTFILE?= ${DISTPREFIX}.tar.xz FOSSILID?= current .SUFFIXES: .in @@ -77,9 +77,9 @@ install: proginstall maninstall import: rm -rf /tmp/${DISTPREFIX} ${INSTALL} -d /tmp/${DISTPREFIX} - cp README ${SRCS} /tmp/${DISPREFIX} + cp README ${SRCS} /tmp/${DISTPREFIX} dist: fossil tarball --name ${DISTPREFIX} ${FOSSILID} ${DISTFILEGZ} - gunzip -c ${DISTFILEGZ} | bzip2 >${DISTFILE} + gunzip -c ${DISTFILEGZ} | xz >${DISTFILE} rm ${DISTFILEGZ} diff --git a/contrib/openresolv/resolvconf.8.in b/contrib/openresolv/resolvconf.8.in index 10dcf5dd7399..bfbfc7f4726a 100644 --- a/contrib/openresolv/resolvconf.8.in +++ b/contrib/openresolv/resolvconf.8.in @@ -22,7 +22,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd April 27, 2014 +.Dd April 27, 2015 .Dt RESOLVCONF 8 .Os .Sh NAME diff --git a/contrib/openresolv/resolvconf.conf.5.in b/contrib/openresolv/resolvconf.conf.5.in index d4f654308e9e..7aa14507f4e8 100644 --- a/contrib/openresolv/resolvconf.conf.5.in +++ b/contrib/openresolv/resolvconf.conf.5.in @@ -22,7 +22,7 @@ .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" -.Dd March 20, 2015 +.Dd May 14, 2015 .Dt RESOLVCONF.CONF 5 .Os .Sh NAME @@ -91,6 +91,11 @@ To remove a block, you can use 192.168.* These interfaces name servers will only be queried for the domains listed in their resolv.conf. Useful for VPN domains. +Setting +.Sy private_interfaces Ns ="*" +will stop the forwarding of the root zone and allows the local resolver to +recursively query the root servers directly. +Requires a local nameserver other than libc. This is equivalent to the .Nm resolvconf -p option. @@ -149,7 +154,7 @@ When set to /dev/null or NULL, .Sy resolv_conf_local_only is defaulted to NO, .Sy local_nameservers -is unset unless overriden and only the information set in +is unset unless overridden and only the information set in .Nm is written to .Sy resolv_conf . @@ -271,7 +276,7 @@ Each subscriber attempts to automatically configure itself, but not every distribution has been catered for. Also, users could equally want to use a different version from the one installed by default, such as bind8 and bind9. -To accomodate this, the subscribers have these files in configurable +To accommodate this, the subscribers have these files in configurable variables, documented below. .Pp .Bl -tag -width indent diff --git a/contrib/openresolv/resolvconf.in b/contrib/openresolv/resolvconf.in index 3b2b0f53fd83..a946ed8c44b5 100644 --- a/contrib/openresolv/resolvconf.in +++ b/contrib/openresolv/resolvconf.in @@ -50,7 +50,6 @@ elif [ -d "$SYSCONFDIR/resolvconf" ]; then interface_order="$(cat "$SYSCONFDIR"/interface-order)" fi fi -TMPDIR="$VARDIR/tmp" IFACEDIR="$VARDIR/interfaces" METRICDIR="$VARDIR/metrics" PRIVATEDIR="$VARDIR/private" diff --git a/contrib/openresolv/unbound.in b/contrib/openresolv/unbound.in index 5752e6f2c412..a803615783fc 100644 --- a/contrib/openresolv/unbound.in +++ b/contrib/openresolv/unbound.in @@ -45,7 +45,8 @@ for d in $DOMAINS; do ns="${d#*:}" case "$unbound_insecure" in [Yy][Ee][Ss]|[Tt][Rr][Uu][Ee]|[Oo][Nn]|1) - newconf="$newconf${NL}domain-insecure: \"$dn\"" + newconf="$newconf${NL}server:$NL" + newconf="$newconf domain-insecure: \"$dn\"$NL" ;; esac newconf="$newconf${NL}forward-zone:$NL name: \"$dn\"$NL" diff --git a/contrib/smbfs/smbutil/print.c b/contrib/smbfs/smbutil/print.c index d87ec78587c6..5e4538f7d1cb 100644 --- a/contrib/smbfs/smbutil/print.c +++ b/contrib/smbfs/smbutil/print.c @@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/contrib/unbound/.gitignore b/contrib/unbound/.gitignore deleted file mode 100644 index 7fed8d74d386..000000000000 --- a/contrib/unbound/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -*.lo -*.o -/.libs/ -/Makefile -/autom4te.cache/ -/config.h -/config.log -/config.status -/dnstap/dnstap_config.h -/doc/example.conf -/doc/libunbound.3 -/doc/unbound-anchor.8 -/doc/unbound-checkconf.8 -/doc/unbound-control.8 -/doc/unbound-host.1 -/doc/unbound.8 -/doc/unbound.conf.5 -/libtool -/libunbound.la -/smallapp/unbound-control-setup.sh -/unbound -/unbound-anchor -/unbound-checkconf -/unbound-control -/unbound-control-setup -/unbound-host -/unbound.h -/asynclook -/delayer -/lock-verify -/memstats -/perf -/petal -/pktview -/streamtcp -/testbound -/unittest - diff --git a/crypto/openssh/sshd_config b/crypto/openssh/sshd_config index 6ab7900bc544..5cf3d4f6ea97 100644 --- a/crypto/openssh/sshd_config +++ b/crypto/openssh/sshd_config @@ -45,7 +45,7 @@ # Authentication: #LoginGraceTime 2m -#PermitRootLogin prohibit-password +#PermitRootLogin no #StrictModes yes #MaxAuthTries 6 #MaxSessions 10 diff --git a/crypto/openssh/sshd_config.5 b/crypto/openssh/sshd_config.5 index 9945fc93cab0..e9e460b5e297 100644 --- a/crypto/openssh/sshd_config.5 +++ b/crypto/openssh/sshd_config.5 @@ -1217,7 +1217,7 @@ The argument must be or .Dq no . The default is -.Dq prohibit-password . +.Dq no . Note that if .Cm ChallengeResponseAuthentication is diff --git a/lib/libc/tests/gen/Makefile b/lib/libc/tests/gen/Makefile index b2cef62dd8f3..b4e14d97151c 100644 --- a/lib/libc/tests/gen/Makefile +++ b/lib/libc/tests/gen/Makefile @@ -15,6 +15,7 @@ ATF_TESTS_C+= ftw_test ATF_TESTS_C+= popen_test ATF_TESTS_C+= posix_spawn_test ATF_TESTS_C+= wordexp_test +ATF_TESTS_C+= dlopen_empty_test # TODO: t_closefrom, t_cpuset, t_fmtcheck, t_randomid, t_sleep # TODO: t_siginfo (fixes require further inspection) diff --git a/lib/libc/tests/gen/dlopen_empty_test.c b/lib/libc/tests/gen/dlopen_empty_test.c new file mode 100644 index 000000000000..42f9269a10b2 --- /dev/null +++ b/lib/libc/tests/gen/dlopen_empty_test.c @@ -0,0 +1,97 @@ +/*- + * Copyright (c) 2016 Maksym Sobolyev + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static const char *funname; +static char *soname; + +static void +sigsegv_handler(int sig __unused) +{ + unlink(soname); + free(soname); + atf_tc_fail("got SIGSEGV in the %s(3)", funname); +} + +ATF_TC(dlopen_empty_test); +ATF_TC_HEAD(dlopen_empty_test, tc) +{ + atf_tc_set_md_var(tc, "descr", "Tests the dlopen() of an empty file " + "returns an error"); +} +ATF_TC_BODY(dlopen_empty_test, tc) +{ + char tempname[] = "/tmp/temp.XXXXXX"; + char *fname; + int fd; + void *dlh; + struct sigaction act, oact; + + fname = mktemp(tempname); + ATF_REQUIRE_MSG(fname != NULL, "mktemp failed; errno=%d", errno); + asprintf(&soname, "%s.so", fname); + ATF_REQUIRE_MSG(soname != NULL, "asprintf failed; errno=%d", ENOMEM); + fd = open(soname, O_WRONLY | O_CREAT | O_TRUNC, DEFFILEMODE); + ATF_REQUIRE_MSG(fd != -1, "open(\"%s\") failed; errno=%d", soname, errno); + close(fd); + + act.sa_handler = sigsegv_handler; + act.sa_flags = 0; + sigemptyset(&act.sa_mask); + ATF_CHECK_MSG(sigaction(SIGSEGV, &act, &oact) != -1, + "sigaction() failed"); + + funname = "dlopen"; + dlh = dlopen(soname, RTLD_LAZY); + if (dlh != NULL) { + funname = "dlclose"; + dlclose(dlh); + } + ATF_REQUIRE_MSG(dlh == NULL, "dlopen(\"%s\") did not fail", soname); + unlink(soname); + free(soname); +} + +ATF_TP_ADD_TCS(tp) +{ + + ATF_TP_ADD_TC(tp, dlopen_empty_test); + + return (atf_no_error()); +} diff --git a/lib/libsysdecode/syscallnames.c b/lib/libsysdecode/syscallnames.c index cfe50a6a6ea9..4ec2cd76ac73 100644 --- a/lib/libsysdecode/syscallnames.c +++ b/lib/libsysdecode/syscallnames.c @@ -70,30 +70,30 @@ sysdecode_syscallname(enum sysdecode_abi abi, unsigned int code) { switch (abi) { - case FREEBSD: + case SYSDECODE_ABI_FREEBSD: if (code < nitems(syscallnames)) return (syscallnames[code]); break; #if defined(__amd64__) || defined(__powerpc64__) - case FREEBSD32: + case SYSDECODE_ABI_FREEBSD32: if (code < nitems(freebsd32_syscallnames)) return (freebsd32_syscallnames[code]); break; #endif #if defined(__amd64__) || defined(__i386__) - case LINUX: + case SYSDECODE_ABI_LINUX: if (code < nitems(linux_syscallnames)) return (linux_syscallnames[code]); break; #endif #ifdef __amd64__ - case LINUX32: + case SYSDECODE_ABI_LINUX32: if (code < nitems(linux32_syscallnames)) return (linux32_syscallnames[code]); break; #endif #if defined(__amd64__) || defined(__aarch64__) - case CLOUDABI64: + case SYSDECODE_ABI_CLOUDABI64: if (code < nitems(cloudabi64_syscallnames)) return (cloudabi64_syscallnames[code]); break; diff --git a/lib/libsysdecode/sysdecode.3 b/lib/libsysdecode/sysdecode.3 index 994d278fe001..f058f59c60cf 100644 --- a/lib/libsysdecode/sysdecode.3 +++ b/lib/libsysdecode/sysdecode.3 @@ -25,7 +25,7 @@ .\" .\" $FreeBSD$ .\" -.Dd January 24, 2016 +.Dd January 29, 2016 .Dt SYSDECODE 3 .Os .Sh NAME @@ -44,23 +44,23 @@ The supported ABIs are named by the .Vt enum sysdecode_abi enumeration. .Pp -.Bl -tag -width "Li UNKNOWN_ABI" -compact -.It Li FREEBSD +.Bl -tag -width "Li SYSDECODE_ABI_CLOUDABI64" -compact +.It Li SYSDECODE_ABI_FREEBSD Native FreeBSD binaries. Supported on all platforms. -.It Li FREEBSD32 +.It Li SYSDECODE_ABI_FREEBSD32 32-bit FreeBSD binaries. Supported on amd64 and powerpc64. -.It Li LINUX +.It Li SYSDECODE_ABI_LINUX Linux binaries of the same platform. Supported on amd64 and i386. -.It Li LINUX32 +.It Li SYSDECODE_ABI_LINUX32 32-bit Linux binaries. Supported on amd64. -.It Li CLOUDABI64 +.It Li SYSDECODE_ABI_CLOUDABI64 64-bit CloudABI binaries. Supported on aarch64 and amd64. -.It Li UNKNOWN_ABI +.It Li SYSDECODE_ABI_UNKNOWN A placeholder for use when the ABI is not known. .El .Sh SEE ALSO diff --git a/lib/libsysdecode/sysdecode.h b/lib/libsysdecode/sysdecode.h index c2c3a9e801e5..fa00716826db 100644 --- a/lib/libsysdecode/sysdecode.h +++ b/lib/libsysdecode/sysdecode.h @@ -30,12 +30,12 @@ #define __SYSDECODE_H__ enum sysdecode_abi { - UNKNOWN_ABI = 0, - FREEBSD, - FREEBSD32, - LINUX, - LINUX32, - CLOUDABI64 + SYSDECODE_ABI_UNKNOWN = 0, + SYSDECODE_ABI_FREEBSD, + SYSDECODE_ABI_FREEBSD32, + SYSDECODE_ABI_LINUX, + SYSDECODE_ABI_LINUX32, + SYSDECODE_ABI_CLOUDABI64 }; const char *sysdecode_ioctlname(unsigned long _val); diff --git a/libexec/atrun/atrun.c b/libexec/atrun/atrun.c index 1e25766c41b1..7b11e7bb1997 100644 --- a/libexec/atrun/atrun.c +++ b/libexec/atrun/atrun.c @@ -459,8 +459,9 @@ main(int argc, char *argv[]) int c; int run_batch; #ifdef __FreeBSD__ - size_t ncpu, ncpusz; + size_t ncpusz; double load_avg = -1; + int ncpu; #else double load_avg = LOADAVG_MX; #endif diff --git a/libexec/rtld-elf/map_object.c b/libexec/rtld-elf/map_object.c index 6012015bcc28..f4f6f4221816 100644 --- a/libexec/rtld-elf/map_object.c +++ b/libexec/rtld-elf/map_object.c @@ -38,7 +38,7 @@ #include "debug.h" #include "rtld.h" -static Elf_Ehdr *get_elf_header(int, const char *); +static Elf_Ehdr *get_elf_header(int, const char *, const struct stat *); static int convert_prot(int); /* Elf flags -> mmap protection */ static int convert_flags(int); /* Elf flags -> mmap flags */ @@ -91,7 +91,7 @@ map_object(int fd, const char *path, const struct stat *sb) char *note_map; size_t note_map_len; - hdr = get_elf_header(fd, path); + hdr = get_elf_header(fd, path, sb); if (hdr == NULL) return (NULL); @@ -324,10 +324,16 @@ map_object(int fd, const char *path, const struct stat *sb) } static Elf_Ehdr * -get_elf_header(int fd, const char *path) +get_elf_header(int fd, const char *path, const struct stat *sbp) { Elf_Ehdr *hdr; + /* Make sure file has enough data for the ELF header */ + if (sbp != NULL && sbp->st_size < sizeof(Elf_Ehdr)) { + _rtld_error("%s: invalid file format", path); + return (NULL); + } + hdr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE | MAP_PREFAULT_READ, fd, 0); if (hdr == (Elf_Ehdr *)MAP_FAILED) { diff --git a/sbin/nvmecontrol/Makefile b/sbin/nvmecontrol/Makefile index ea60da3c2941..e8a23719a2a7 100644 --- a/sbin/nvmecontrol/Makefile +++ b/sbin/nvmecontrol/Makefile @@ -2,7 +2,7 @@ PROG= nvmecontrol SRCS= nvmecontrol.c devlist.c firmware.c identify.c logpage.c \ - perftest.c reset.c nvme_util.c + perftest.c reset.c nvme_util.c power.c MAN= nvmecontrol.8 .PATH: ${.CURDIR}/../../sys/dev/nvme diff --git a/sbin/nvmecontrol/nvmecontrol.8 b/sbin/nvmecontrol/nvmecontrol.8 index 3b4b5c23aef3..ae8132ec5cbc 100644 --- a/sbin/nvmecontrol/nvmecontrol.8 +++ b/sbin/nvmecontrol/nvmecontrol.8 @@ -70,6 +70,11 @@ .Op Fl f Ar path_to_firmware .Op Fl a .Aq device id +.Nm +.Ic power +.Op Fl l +.Op Fl p power_state +.Op fl w workload_hint .Sh DESCRIPTION NVM Express (NVMe) is a storage protocol standard, for SSDs and other high-speed storage devices over PCI Express. @@ -120,6 +125,18 @@ Activate the firmware in slot 4 of the nvme0 controller on the next reset. .Pp Download the firmware image contained in "/tmp/nvme_firmware" to slot 7 of the nvme0 controller and activate it on the next reset. +.Pp +.Dl nvmecontrol power -l nvme0 +.Pp +List all the current power modes. +.Pp +.Dl nvmecontrol power -p 3 nvme0 +.Pp +Set the current power mode. +.Pp +.Dl nvmecontrol power nvme0 +.Pp +Get the current power mode. .Sh AUTHORS .An -nosplit .Nm diff --git a/sbin/nvmecontrol/nvmecontrol.c b/sbin/nvmecontrol/nvmecontrol.c index 4dee1909280a..cd7c19d0165d 100644 --- a/sbin/nvmecontrol/nvmecontrol.c +++ b/sbin/nvmecontrol/nvmecontrol.c @@ -58,6 +58,7 @@ static struct nvme_function { {"reset", reset, RESET_USAGE}, {"logpage", logpage, LOGPAGE_USAGE}, {"firmware", firmware, FIRMWARE_USAGE}, + {"power", power, POWER_USAGE}, {NULL, NULL, NULL}, }; diff --git a/sbin/nvmecontrol/nvmecontrol.h b/sbin/nvmecontrol/nvmecontrol.h index 8401dd7ccda3..b3cecd26dcea 100644 --- a/sbin/nvmecontrol/nvmecontrol.h +++ b/sbin/nvmecontrol/nvmecontrol.h @@ -55,12 +55,16 @@ #define FIRMWARE_USAGE \ " nvmecontrol firmware [-s slot] [-f path_to_firmware] [-a] \n" +#define POWER_USAGE \ +" nvmecontrol power [-l] [-p new-state [-w workload-hint]] \n" + void devlist(int argc, char *argv[]); void identify(int argc, char *argv[]); void perftest(int argc, char *argv[]); void reset(int argc, char *argv[]); void logpage(int argc, char *argv[]); void firmware(int argc, char *argv[]); +void power(int argc, char *argv[]); int open_dev(const char *str, int *fd, int show_error, int exit_on_error); void parse_ns_str(const char *ns_str, char *ctrlr_str, int *nsid); diff --git a/sbin/nvmecontrol/power.c b/sbin/nvmecontrol/power.c new file mode 100644 index 000000000000..e681ccd14dcc --- /dev/null +++ b/sbin/nvmecontrol/power.c @@ -0,0 +1,185 @@ +/*- + * Copyright (c) 2016 Netflix, Inc + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "nvmecontrol.h" + +_Static_assert(sizeof(struct nvme_power_state) == 256 / NBBY, + "nvme_power_state size wrong"); + +static void +power_usage(void) +{ + fprintf(stderr, "usage:\n"); + fprintf(stderr, POWER_USAGE); + exit(1); +} + +static void +power_list_one(int i, struct nvme_power_state *nps) +{ + int mpower, apower, ipower; + + mpower = nps->mp; + if (nps->mps == 0) + mpower *= 100; + ipower = nps->idlp; + if (nps->ips == 1) + ipower *= 100; + apower = nps->actp; + if (nps->aps == 1) + apower *= 100; + printf("%2d: %2d.%04dW%c %3d.%03dms %3d.%03dms %2d %2d %2d %2d %2d.%04dW %2d.%04dW %d\n", + i, mpower / 10000, mpower % 10000, + nps->nops ? '*' : ' ', nps->enlat / 1000, nps->enlat % 1000, + nps->exlat / 1000, nps->exlat % 1000, nps->rrt, nps->rrl, + nps->rwt, nps->rwl, ipower / 10000, ipower % 10000, + apower / 10000, apower % 10000, nps->apw); +} + +static void +power_list(struct nvme_controller_data *cdata) +{ + int i; + + printf("\nPower States Supported: %d\n\n", cdata->npss + 1); + printf(" # Max pwr Enter Lat Exit Lat RT RL WT WL Idle Pwr Act Pwr Workloadd\n"); + printf("-- -------- --------- --------- -- -- -- -- -------- -------- --\n"); + for (i = 0; i <= cdata->npss; i++) + power_list_one(i, &cdata->power_state[i]); +} + +static void +power_set(int fd, int power, int workload, int perm) +{ + struct nvme_pt_command pt; + uint32_t p; + + p = perm ? (1u << 31) : 0; + memset(&pt, 0, sizeof(pt)); + pt.cmd.opc = NVME_OPC_SET_FEATURES; + pt.cmd.cdw10 = NVME_FEAT_POWER_MANAGEMENT | p; + pt.cmd.cdw11 = power | (workload << 5); + + if (ioctl(fd, NVME_PASSTHROUGH_CMD, &pt) < 0) + err(1, "set feature power mgmt request failed"); + + if (nvme_completion_is_error(&pt.cpl)) + errx(1, "set feature power mgmt request returned error"); +} + +static void +power_show(int fd) +{ + struct nvme_pt_command pt; + + memset(&pt, 0, sizeof(pt)); + pt.cmd.opc = NVME_OPC_GET_FEATURES; + pt.cmd.cdw10 = NVME_FEAT_POWER_MANAGEMENT; + + if (ioctl(fd, NVME_PASSTHROUGH_CMD, &pt) < 0) + err(1, "set feature power mgmt request failed"); + + if (nvme_completion_is_error(&pt.cpl)) + errx(1, "set feature power mgmt request returned error"); + + printf("Current Power Mode is %d\n", pt.cpl.cdw0); +} + +void +power(int argc, char *argv[]) +{ + struct nvme_controller_data cdata; + int ch, listflag = 0, powerflag = 0, power = 0, fd; + int workload = 0; + char *end; + + while ((ch = getopt(argc, argv, "lp:w:")) != -1) { + switch ((char)ch) { + case 'l': + listflag = 1; + break; + case 'p': + powerflag = 1; + power = strtol(optarg, &end, 0); + if (*end != '\0') { + fprintf(stderr, "Invalid power state number: %s\n", optarg); + power_usage(); + } + break; + case 'w': + workload = strtol(optarg, &end, 0); + if (*end != '\0') { + fprintf(stderr, "Invalid workload hint: %s\n", optarg); + power_usage(); + } + break; + default: + power_usage(); + } + } + + /* Check that a controller was specified. */ + if (optind >= argc) + power_usage(); + + if (listflag && powerflag) { + fprintf(stderr, "Can't set power and list power states\n"); + power_usage(); + } + + open_dev(argv[optind], &fd, 1, 1); + read_controller_data(fd, &cdata); + + if (listflag) { + power_list(&cdata); + goto out; + } + + if (powerflag) { + power_set(fd, power, workload, 0); + goto out; + } + power_show(fd); + +out: + close(fd); + exit(0); +} diff --git a/sbin/pfctl/pf_print_state.c b/sbin/pfctl/pf_print_state.c index 46d4523373d5..346e6236f320 100644 --- a/sbin/pfctl/pf_print_state.c +++ b/sbin/pfctl/pf_print_state.c @@ -208,22 +208,30 @@ void print_state(struct pfsync_state *s, int opts) { struct pfsync_state_peer *src, *dst; - struct pfsync_state_key *sk, *nk; + struct pfsync_state_key *key, *sk, *nk; struct protoent *p; int min, sec; +#ifndef __NO_STRICT_ALIGNMENT + struct pfsync_state_key aligned_key[2]; + + bcopy(&s->key, aligned_key, sizeof(aligned_key)); + key = aligned_key; +#else + key = s->key; +#endif if (s->direction == PF_OUT) { src = &s->src; dst = &s->dst; - sk = &s->key[PF_SK_STACK]; - nk = &s->key[PF_SK_WIRE]; + sk = &key[PF_SK_STACK]; + nk = &key[PF_SK_WIRE]; if (s->proto == IPPROTO_ICMP || s->proto == IPPROTO_ICMPV6) sk->port[0] = nk->port[0]; } else { src = &s->dst; dst = &s->src; - sk = &s->key[PF_SK_WIRE]; - nk = &s->key[PF_SK_STACK]; + sk = &key[PF_SK_WIRE]; + nk = &key[PF_SK_STACK]; if (s->proto == IPPROTO_ICMP || s->proto == IPPROTO_ICMPV6) sk->port[1] = nk->port[1]; } diff --git a/sbin/rcorder/rcorder.c b/sbin/rcorder/rcorder.c index 1d10a40d961a..37faa2470e55 100644 --- a/sbin/rcorder/rcorder.c +++ b/sbin/rcorder/rcorder.c @@ -701,7 +701,7 @@ keep_ok(filenode *fnode) static void do_file(filenode *fnode) { - f_reqnode *r, *r_tmp; + f_reqnode *r; f_provnode *p, *p_tmp; provnode *pnode; int was_set; @@ -728,13 +728,8 @@ do_file(filenode *fnode) */ r = fnode->req_list; while (r != NULL) { - r_tmp = r; satisfy_req(r, fnode->filename); r = r->next; -#if 0 - if (was_set == 0) - free(r_tmp); -#endif } fnode->req_list = NULL; diff --git a/sbin/swapon/swapon.c b/sbin/swapon/swapon.c index f58143179c2f..03b2b09bf742 100644 --- a/sbin/swapon/swapon.c +++ b/sbin/swapon/swapon.c @@ -315,7 +315,7 @@ swap_on_geli_args(const char *mntops) const char *aalgo, *ealgo, *keylen_str, *sectorsize_str; const char *aflag, *eflag, *lflag, *Tflag, *sflag; char *p, *args, *token, *string, *ops; - int argsize, pagesize; + int pagesize; size_t pagesize_len; u_long ul; @@ -389,7 +389,7 @@ swap_on_geli_args(const char *mntops) sectorsize_str = p; } - argsize = asprintf(&args, "%s%s%s%s%s%s%s%s%s -d", + (void)asprintf(&args, "%s%s%s%s%s%s%s%s%s -d", aflag, aalgo, eflag, ealgo, lflag, keylen_str, Tflag, sflag, sectorsize_str); diff --git a/sys/arm/annapurna/alpine/alpine_machdep.c b/sys/arm/annapurna/alpine/alpine_machdep.c index 959b30faa12b..dc6d3a4245db 100644 --- a/sys/arm/annapurna/alpine/alpine_machdep.c +++ b/sys/arm/annapurna/alpine/alpine_machdep.c @@ -123,7 +123,7 @@ platform_late_init(void) } /* - * Construct pmap_devmap[] with DT-derived config data. + * Construct devmap table with DT-derived config data. */ int platform_devmap_init(void) diff --git a/sys/arm/arm/cpufunc.c b/sys/arm/arm/cpufunc.c index 0dd9241f27df..514a6b1a4059 100644 --- a/sys/arm/arm/cpufunc.c +++ b/sys/arm/arm/cpufunc.c @@ -105,23 +105,17 @@ int ctrl; struct cpu_functions arm9_cpufuncs = { /* CPU functions */ - cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ - cpufunc_domains, /* Domain */ arm9_setttb, /* Setttb */ - cpufunc_faultstatus, /* Faultstatus */ - cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ arm9_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushI, /* tlb_flushI */ - (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ @@ -146,18 +140,12 @@ struct cpu_functions arm9_cpufuncs = { /* Other functions */ - cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ - cpufunc_nullop, /* flush_brnchtgt_C */ - (void *)cpufunc_nullop, /* flush_brnchtgt_E */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ - cpufunc_null_fixup, /* dataabt_fixup */ - cpufunc_null_fixup, /* prefetchabt_fixup */ - arm9_context_switch, /* context_switch */ arm9_setup /* cpu setup */ @@ -169,23 +157,17 @@ struct cpu_functions arm9_cpufuncs = { struct cpu_functions armv5_ec_cpufuncs = { /* CPU functions */ - cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ - cpufunc_domains, /* Domain */ armv5_ec_setttb, /* Setttb */ - cpufunc_faultstatus, /* Faultstatus */ - cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ - arm10_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushI, /* tlb_flushI */ - arm10_tlb_flushI_SE, /* tlb_flushI_SE */ + arm9_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ @@ -211,19 +193,13 @@ struct cpu_functions armv5_ec_cpufuncs = { /* Other functions */ - cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ - cpufunc_nullop, /* flush_brnchtgt_C */ - (void *)cpufunc_nullop, /* flush_brnchtgt_E */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ - cpufunc_null_fixup, /* dataabt_fixup */ - cpufunc_null_fixup, /* prefetchabt_fixup */ - - arm10_context_switch, /* context_switch */ + arm9_context_switch, /* context_switch */ arm10_setup /* cpu setup */ @@ -232,23 +208,17 @@ struct cpu_functions armv5_ec_cpufuncs = { struct cpu_functions sheeva_cpufuncs = { /* CPU functions */ - cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ - cpufunc_domains, /* Domain */ sheeva_setttb, /* Setttb */ - cpufunc_faultstatus, /* Faultstatus */ - cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ - arm10_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushI, /* tlb_flushI */ - arm10_tlb_flushI_SE, /* tlb_flushI_SE */ + arm9_tlb_flushID_SE, /* tlb_flushID_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ @@ -274,19 +244,13 @@ struct cpu_functions sheeva_cpufuncs = { /* Other functions */ - cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ - cpufunc_nullop, /* flush_brnchtgt_C */ - (void *)cpufunc_nullop, /* flush_brnchtgt_E */ sheeva_cpu_sleep, /* sleep */ /* Soft functions */ - cpufunc_null_fixup, /* dataabt_fixup */ - cpufunc_null_fixup, /* prefetchabt_fixup */ - - arm10_context_switch, /* context_switch */ + arm9_context_switch, /* context_switch */ arm10_setup /* cpu setup */ }; @@ -296,23 +260,17 @@ struct cpu_functions sheeva_cpufuncs = { struct cpu_functions pj4bv7_cpufuncs = { /* CPU functions */ - cpufunc_id, /* id */ armv7_drain_writebuf, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ - cpufunc_domains, /* Domain */ armv7_setttb, /* Setttb */ - cpufunc_faultstatus, /* Faultstatus */ - cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ armv7_tlb_flushID, /* tlb_flushID */ armv7_tlb_flushID_SE, /* tlb_flushID_SE */ - armv7_tlb_flushID, /* tlb_flushI */ - armv7_tlb_flushID_SE, /* tlb_flushI_SE */ armv7_tlb_flushID, /* tlb_flushD */ armv7_tlb_flushID_SE, /* tlb_flushD_SE */ @@ -337,18 +295,11 @@ struct cpu_functions pj4bv7_cpufuncs = { /* Other functions */ - cpufunc_nullop, /* flush_prefetchbuf */ armv7_drain_writebuf, /* drain_writebuf */ - cpufunc_nullop, /* flush_brnchtgt_C */ - (void *)cpufunc_nullop, /* flush_brnchtgt_E */ (void *)cpufunc_nullop, /* sleep */ /* Soft functions */ - - cpufunc_null_fixup, /* dataabt_fixup */ - cpufunc_null_fixup, /* prefetchabt_fixup */ - armv7_context_switch, /* context_switch */ pj4bv7_setup /* cpu setup */ @@ -362,23 +313,17 @@ struct cpu_functions pj4bv7_cpufuncs = { struct cpu_functions xscale_cpufuncs = { /* CPU functions */ - cpufunc_id, /* id */ xscale_cpwait, /* cpwait */ /* MMU functions */ xscale_control, /* control */ - cpufunc_domains, /* domain */ xscale_setttb, /* setttb */ - cpufunc_faultstatus, /* faultstatus */ - cpufunc_faultaddress, /* faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ xscale_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushI, /* tlb_flushI */ - (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ @@ -403,18 +348,12 @@ struct cpu_functions xscale_cpufuncs = { /* Other functions */ - cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ - cpufunc_nullop, /* flush_brnchtgt_C */ - (void *)cpufunc_nullop, /* flush_brnchtgt_E */ xscale_cpu_sleep, /* sleep */ /* Soft functions */ - cpufunc_null_fixup, /* dataabt_fixup */ - cpufunc_null_fixup, /* prefetchabt_fixup */ - xscale_context_switch, /* context_switch */ xscale_setup /* cpu setup */ @@ -427,23 +366,17 @@ struct cpu_functions xscale_cpufuncs = { struct cpu_functions xscalec3_cpufuncs = { /* CPU functions */ - cpufunc_id, /* id */ xscale_cpwait, /* cpwait */ /* MMU functions */ xscale_control, /* control */ - cpufunc_domains, /* domain */ xscalec3_setttb, /* setttb */ - cpufunc_faultstatus, /* faultstatus */ - cpufunc_faultaddress, /* faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ xscale_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushI, /* tlb_flushI */ - (void *)armv4_tlb_flushI, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ @@ -468,18 +401,12 @@ struct cpu_functions xscalec3_cpufuncs = { /* Other functions */ - cpufunc_nullop, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ - cpufunc_nullop, /* flush_brnchtgt_C */ - (void *)cpufunc_nullop, /* flush_brnchtgt_E */ xscale_cpu_sleep, /* sleep */ /* Soft functions */ - cpufunc_null_fixup, /* dataabt_fixup */ - cpufunc_null_fixup, /* prefetchabt_fixup */ - xscalec3_context_switch, /* context_switch */ xscale_setup /* cpu setup */ @@ -491,23 +418,17 @@ struct cpu_functions xscalec3_cpufuncs = { struct cpu_functions fa526_cpufuncs = { /* CPU functions */ - cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ - cpufunc_domains, /* domain */ fa526_setttb, /* setttb */ - cpufunc_faultstatus, /* faultstatus */ - cpufunc_faultaddress, /* faultaddress */ /* TLB functions */ armv4_tlb_flushID, /* tlb_flushID */ fa526_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushI, /* tlb_flushI */ - fa526_tlb_flushI_SE, /* tlb_flushI_SE */ armv4_tlb_flushD, /* tlb_flushD */ armv4_tlb_flushD_SE, /* tlb_flushD_SE */ @@ -532,17 +453,12 @@ struct cpu_functions fa526_cpufuncs = { /* Other functions */ - fa526_flush_prefetchbuf, /* flush_prefetchbuf */ armv4_drain_writebuf, /* drain_writebuf */ - cpufunc_nullop, /* flush_brnchtgt_C */ - fa526_flush_brnchtgt_E, /* flush_brnchtgt_E */ fa526_cpu_sleep, /* sleep */ /* Soft functions */ - cpufunc_null_fixup, /* dataabt_fixup */ - cpufunc_null_fixup, /* prefetchabt_fixup */ fa526_context_switch, /* context_switch */ @@ -554,23 +470,17 @@ struct cpu_functions fa526_cpufuncs = { struct cpu_functions arm1176_cpufuncs = { /* CPU functions */ - cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ - cpufunc_domains, /* Domain */ arm11x6_setttb, /* Setttb */ - cpufunc_faultstatus, /* Faultstatus */ - cpufunc_faultaddress, /* Faultaddress */ /* TLB functions */ arm11_tlb_flushID, /* tlb_flushID */ arm11_tlb_flushID_SE, /* tlb_flushID_SE */ - arm11_tlb_flushI, /* tlb_flushI */ - arm11_tlb_flushI_SE, /* tlb_flushI_SE */ arm11_tlb_flushD, /* tlb_flushD */ arm11_tlb_flushD_SE, /* tlb_flushD_SE */ @@ -596,18 +506,12 @@ struct cpu_functions arm1176_cpufuncs = { /* Other functions */ - arm11x6_flush_prefetchbuf, /* flush_prefetchbuf */ arm11_drain_writebuf, /* drain_writebuf */ - cpufunc_nullop, /* flush_brnchtgt_C */ - (void *)cpufunc_nullop, /* flush_brnchtgt_E */ arm11x6_sleep, /* sleep */ /* Soft functions */ - cpufunc_null_fixup, /* dataabt_fixup */ - cpufunc_null_fixup, /* prefetchabt_fixup */ - arm11_context_switch, /* context_switch */ arm11x6_setup /* cpu setup */ @@ -618,16 +522,12 @@ struct cpu_functions arm1176_cpufuncs = { struct cpu_functions cortexa_cpufuncs = { /* CPU functions */ - cpufunc_id, /* id */ cpufunc_nullop, /* cpwait */ /* MMU functions */ cpufunc_control, /* control */ - cpufunc_domains, /* Domain */ armv7_setttb, /* Setttb */ - cpufunc_faultstatus, /* Faultstatus */ - cpufunc_faultaddress, /* Faultaddress */ /* * TLB functions. ARMv7 does all TLB ops based on a unified TLB model @@ -637,8 +537,6 @@ struct cpu_functions cortexa_cpufuncs = { armv7_tlb_flushID, /* tlb_flushID */ armv7_tlb_flushID_SE, /* tlb_flushID_SE */ - armv7_tlb_flushID, /* tlb_flushI */ - armv7_tlb_flushID_SE, /* tlb_flushI_SE */ armv7_tlb_flushID, /* tlb_flushD */ armv7_tlb_flushID_SE, /* tlb_flushD_SE */ @@ -668,18 +566,12 @@ struct cpu_functions cortexa_cpufuncs = { /* Other functions */ - cpufunc_nullop, /* flush_prefetchbuf */ armv7_drain_writebuf, /* drain_writebuf */ - cpufunc_nullop, /* flush_brnchtgt_C */ - (void *)cpufunc_nullop, /* flush_brnchtgt_E */ armv7_cpu_sleep, /* sleep */ /* Soft functions */ - cpufunc_null_fixup, /* dataabt_fixup */ - cpufunc_null_fixup, /* prefetchabt_fixup */ - armv7_context_switch, /* context_switch */ cortexa_setup /* cpu setup */ @@ -726,7 +618,7 @@ get_cachetype_cp15() __asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctype)); - cpuid = cpufunc_id(); + cpuid = cpu_ident(); /* * ...and thus spake the ARM ARM: * @@ -833,7 +725,7 @@ get_cachetype_cp15() int set_cpufuncs() { - cputype = cpufunc_id(); + cputype = cpu_ident(); cputype &= CPU_ID_CPU_MASK; #ifdef CPU_ARM9 @@ -889,9 +781,6 @@ set_cpufuncs() cpufuncs = arm1176_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ get_cachetype_cp15(); - - pmap_pte_init_mmu_v6(); - goto out; } #endif /* CPU_ARM1176 */ @@ -915,8 +804,6 @@ set_cpufuncs() cpufuncs = cortexa_cpufuncs; cpu_reset_needs_v4_MMU_disable = 1; /* V4 or higher */ get_cachetype_cp15(); - - pmap_pte_init_mmu_v6(); goto out; } #endif /* CPU_CORTEXA */ @@ -927,7 +814,6 @@ set_cpufuncs() cputype == CPU_ID_ARM_88SV581X_V7) { cpufuncs = pj4bv7_cpufuncs; get_cachetype_cp15(); - pmap_pte_init_mmu_v6(); goto out; } #endif /* CPU_MV_PJ4B */ @@ -1000,27 +886,6 @@ set_cpufuncs() return (0); } -/* - * Fixup routines for data and prefetch aborts. - * - * Several compile time symbols are used - * - * DEBUG_FAULT_CORRECTION - Print debugging information during the - * correction of registers after a fault. - */ - - -/* - * Null abort fixup routine. - * For use when no fixup is required. - */ -int -cpufunc_null_fixup(arg) - void *arg; -{ - return(ABORT_FIXUP_OK); -} - /* * CPU Setup code */ @@ -1146,7 +1011,7 @@ arm11x6_setup(void) uint32_t sbz=0; uint32_t cpuid; - cpuid = cpufunc_id(); + cpuid = cpu_ident(); cpuctrl = CPU_CONTROL_MMU_ENABLE | diff --git a/sys/arm/arm/cpufunc_asm.S b/sys/arm/arm/cpufunc_asm.S index 12f5840365d1..2f733f564cbf 100644 --- a/sys/arm/arm/cpufunc_asm.S +++ b/sys/arm/arm/cpufunc_asm.S @@ -62,15 +62,10 @@ END(cpufunc_nullop) * */ -ENTRY(cpufunc_id) +ENTRY(cpu_ident) mrc p15, 0, r0, c0, c0, 0 RET -END(cpufunc_id) - -ENTRY(cpufunc_cpuid) - mrc p15, 0, r0, c0, c0, 0 - RET -END(cpufunc_cpuid) +END(cpu_ident) ENTRY(cpu_get_control) mrc p15, 0, r0, c1, c0, 0 @@ -82,15 +77,15 @@ ENTRY(cpu_read_cache_config) RET END(cpu_read_cache_config) -ENTRY(cpufunc_faultstatus) +ENTRY(cpu_faultstatus) mrc p15, 0, r0, c5, c0, 0 RET -END(cpufunc_faultstatus) +END(cpu_faultstatus) -ENTRY(cpufunc_faultaddress) +ENTRY(cpu_faultaddress) mrc p15, 0, r0, c6, c0, 0 RET -END(cpufunc_faultaddress) +END(cpu_faultaddress) /* * Generic functions to write the internal coprocessor registers @@ -110,10 +105,10 @@ ENTRY(cpufunc_control) END(cpufunc_control) #endif -ENTRY(cpufunc_domains) +ENTRY(cpu_domains) mcr p15, 0, r0, c3, c0, 0 RET -END(cpufunc_domains) +END(cpu_domains) /* * Generic functions to read/modify/write the internal coprocessor registers diff --git a/sys/arm/arm/cpufunc_asm_arm10.S b/sys/arm/arm/cpufunc_asm_arm10.S deleted file mode 100644 index ab5f791976aa..000000000000 --- a/sys/arm/arm/cpufunc_asm_arm10.S +++ /dev/null @@ -1,76 +0,0 @@ -/* $NetBSD: cpufunc_asm_arm10.S,v 1.1 2003/09/06 09:12:29 rearnsha Exp $ */ - -/*- - * Copyright (c) 2002 ARM Limited - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the company may not be used to endorse or promote - * products derived from this software without specific prior written - * permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * ARM10 assembly functions for CPU / MMU / TLB specific operations - * - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * TLB functions - */ -ENTRY(arm10_tlb_flushID_SE) - mcr p15, 0, r0, c8, c6, 1 /* flush D tlb single entry */ - mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ - bx lr -END(arm10_tlb_flushID_SE) - -ENTRY(arm10_tlb_flushI_SE) - mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ - bx lr -END(arm10_tlb_flushI_SE) - - -/* - * Context switch. - * - * These is the CPU-specific parts of the context switcher cpu_switch() - * These functions actually perform the TTB reload. - * - * NOTE: Special calling convention - * r1, r4-r13 must be preserved - */ -ENTRY(arm10_context_switch) - /* - * We can assume that the caches will only contain kernel addresses - * at this point. So no need to flush them again. - */ - mcr p15, 0, r0, c7, c10, 4 /* drain the write buffer */ - mcr p15, 0, r0, c2, c0, 0 /* set the new TTB */ - mcr p15, 0, r0, c8, c7, 0 /* and flush the I+D tlbs */ - - /* Paranoia -- make sure the pipeline is empty. */ - nop - nop - nop - bx lr -END(arm10_context_switch) diff --git a/sys/arm/arm/cpufunc_asm_arm11.S b/sys/arm/arm/cpufunc_asm_arm11.S index a25decbf0fb1..f83f819d963d 100644 --- a/sys/arm/arm/cpufunc_asm_arm11.S +++ b/sys/arm/arm/cpufunc_asm_arm11.S @@ -47,12 +47,6 @@ ENTRY(arm11_tlb_flushID_SE) RET END(arm11_tlb_flushID_SE) -ENTRY(arm11_tlb_flushI_SE) - mcr p15, 0, r0, c8, c5, 1 /* flush I tlb single entry */ - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - RET -END(arm11_tlb_flushI_SE) - /* * Context switch. * @@ -87,12 +81,6 @@ ENTRY(arm11_tlb_flushID) mov pc, lr END(arm11_tlb_flushID) -ENTRY(arm11_tlb_flushI) - mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */ - mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ - mov pc, lr -END(arm11_tlb_flushI) - ENTRY(arm11_tlb_flushD) mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */ mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */ diff --git a/sys/arm/arm/cpufunc_asm_arm11x6.S b/sys/arm/arm/cpufunc_asm_arm11x6.S index b88c0fd67ee2..be6d5309d935 100644 --- a/sys/arm/arm/cpufunc_asm_arm11x6.S +++ b/sys/arm/arm/cpufunc_asm_arm11x6.S @@ -138,11 +138,6 @@ ENTRY_NP(arm11x6_icache_sync_all) RET END(arm11x6_icache_sync_all) -ENTRY_NP(arm11x6_flush_prefetchbuf) - mcr p15, 0, r0, c7, c5, 4 /* Flush Prefetch Buffer */ - RET -END(arm11x6_flush_prefetchbuf) - ENTRY_NP(arm11x6_icache_sync_range) add r1, r1, r0 sub r1, r1, #1 diff --git a/sys/arm/arm/cpufunc_asm_armv4.S b/sys/arm/arm/cpufunc_asm_armv4.S index a61a3dc2a306..6d665acf512e 100644 --- a/sys/arm/arm/cpufunc_asm_armv4.S +++ b/sys/arm/arm/cpufunc_asm_armv4.S @@ -48,11 +48,6 @@ ENTRY(armv4_tlb_flushID) RET END(armv4_tlb_flushID) -ENTRY(armv4_tlb_flushI) - mcr p15, 0, r0, c8, c5, 0 /* flush I tlb */ - RET -END(armv4_tlb_flushI) - ENTRY(armv4_tlb_flushD) mcr p15, 0, r0, c8, c6, 0 /* flush D tlb */ RET diff --git a/sys/arm/arm/cpufunc_asm_fa526.S b/sys/arm/arm/cpufunc_asm_fa526.S index 55c2f376f738..38cb11ad5323 100644 --- a/sys/arm/arm/cpufunc_asm_fa526.S +++ b/sys/arm/arm/cpufunc_asm_fa526.S @@ -64,14 +64,6 @@ ENTRY(fa526_tlb_flushID_SE) mov pc, lr END(fa526_tlb_flushID_SE) -/* - * TLB functions - */ -ENTRY(fa526_tlb_flushI_SE) - mcr p15, 0, r0, c8, c5, 1 /* flush Itlb single entry */ - mov pc, lr -END(fa526_tlb_flushI_SE) - ENTRY(fa526_cpu_sleep) mov r0, #0 /* nop @@ -80,12 +72,6 @@ ENTRY(fa526_cpu_sleep) mov pc, lr END(fa526_cpu_sleep) -ENTRY(fa526_flush_prefetchbuf) - mov r0, #0 - mcr p15, 0, r0, c7, c5, 4 /* Pre-fetch flush */ - mov pc, lr -END(fa526_flush_prefetchbuf) - /* * Cache functions */ @@ -200,12 +186,6 @@ ENTRY(fa526_icache_sync_range) mov pc, lr END(fa526_icache_sync_range) -ENTRY(fa526_flush_brnchtgt_E) - mov r0, #0 - mcr p15, 0, r0, c7, c5, 6 /* invalidate BTB cache */ - mov pc, lr -END(fa526_flush_brnchtgt_E) - ENTRY(fa526_context_switch) /* * CF_CACHE_PURGE_ID will *ALWAYS* be called prior to this. diff --git a/sys/arm/arm/db_interface.c b/sys/arm/arm/db_interface.c index 43831462d896..613dc08149cc 100644 --- a/sys/arm/arm/db_interface.c +++ b/sys/arm/arm/db_interface.c @@ -170,7 +170,7 @@ db_validate_address(vm_offset_t addr) addr >= VM_MIN_KERNEL_ADDRESS #endif ) - pmap = pmap_kernel(); + pmap = kernel_pmap; else pmap = p->p_vmspace->vm_map.pmap; diff --git a/sys/arm/arm/devmap.c b/sys/arm/arm/devmap.c index 823210f90604..380e12941dc2 100644 --- a/sys/arm/arm/devmap.c +++ b/sys/arm/arm/devmap.c @@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -52,6 +53,9 @@ static boolean_t devmap_bootstrap_done = false; #define PTE_DEVICE VM_MEMATTR_DEVICE #elif defined(__arm__) #define MAX_VADDR ARM_VECTORS_HIGH +#if __ARM_ARCH >= 6 +#define PTE_DEVICE VM_MEMATTR_DEVICE +#endif #endif /* @@ -204,8 +208,13 @@ arm_devmap_bootstrap(vm_offset_t l1pt, const struct arm_devmap_entry *table) for (pd = devmap_table; pd->pd_size != 0; ++pd) { #if defined(__arm__) +#if __ARM_ARCH >= 6 + pmap_preboot_map_attr(pd->pd_pa, pd->pd_va, pd->pd_size, + pd->pd_prot, pd->pd_cache); +#else pmap_map_chunk(l1pt, pd->pd_va, pd->pd_pa, pd->pd_size, - pd->pd_prot,pd->pd_cache); + pd->pd_prot, pd->pd_cache); +#endif #elif defined(__aarch64__) pmap_kenter_device(pd->pd_va, pd->pd_size, pd->pd_pa); #endif diff --git a/sys/arm/arm/elf_trampoline.c b/sys/arm/arm/elf_trampoline.c index c3a7eeddfc93..6c086ea6192c 100644 --- a/sys/arm/arm/elf_trampoline.c +++ b/sys/arm/arm/elf_trampoline.c @@ -49,7 +49,7 @@ void _start(void); void __start(void); void __startC(void); -extern unsigned int cpufunc_id(void); +extern unsigned int cpu_ident(void); extern void armv6_idcache_wbinv_all(void); extern void armv7_idcache_wbinv_all(void); extern void do_call(void *, void *, void *, int); @@ -248,7 +248,7 @@ _startC(void) #ifndef KZIP #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ - if ((cpufunc_id() & 0x0000f000) == 0x00009000) + if ((cpu_ident() & 0x0000f000) == 0x00009000) arm9_setup(); #endif #endif @@ -266,7 +266,7 @@ get_cachetype_cp15() __asm __volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctype)); - cpuid = cpufunc_id(); + cpuid = cpu_ident(); /* * ...and thus spake the ARM ARM: * @@ -683,7 +683,7 @@ __start(void) #ifdef CPU_ARM9 /* So that idcache_wbinv works; */ - if ((cpufunc_id() & 0x0000f000) == 0x00009000) + if ((cpu_ident() & 0x0000f000) == 0x00009000) arm9_setup(); #endif setup_pagetables(pt_addr, (vm_paddr_t)curaddr, diff --git a/sys/arm/arm/genassym.c b/sys/arm/arm/genassym.c index a5d2f6b9645f..eb4e51b81bba 100644 --- a/sys/arm/arm/genassym.c +++ b/sys/arm/arm/genassym.c @@ -49,7 +49,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/arm/arm/machdep.c b/sys/arm/arm/machdep.c index 52413b89109e..8d7c39c4d0b9 100644 --- a/sys/arm/arm/machdep.c +++ b/sys/arm/arm/machdep.c @@ -427,10 +427,8 @@ cpu_startup(void *dummy) { struct pcb *pcb = thread0.td_pcb; const unsigned int mbyte = 1024 * 1024; -#ifdef ARM_TP_ADDRESS -#ifndef ARM_CACHE_LOCK_ENABLE +#if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE) vm_page_t m; -#endif #endif identify_arm_cpu(); @@ -455,12 +453,10 @@ cpu_startup(void *dummy) vm_pager_bufferinit(); pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + USPACE_SVC_STACK_TOP; - pmap_set_pcb_pagedir(pmap_kernel(), pcb); -#if __ARM_ARCH < 6 + pmap_set_pcb_pagedir(kernel_pmap, pcb); +#if __ARM_ARCH < 6 vector_page_setprot(VM_PROT_READ); pmap_postinit(); -#endif -#ifdef ARM_TP_ADDRESS #ifdef ARM_CACHE_LOCK_ENABLE pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); arm_lock_cache_line(ARM_TP_ADDRESS); diff --git a/sys/arm/arm/mp_machdep.c b/sys/arm/arm/mp_machdep.c index 36618669ac61..6cedd46e2bb5 100644 --- a/sys/arm/arm/mp_machdep.c +++ b/sys/arm/arm/mp_machdep.c @@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include @@ -156,7 +155,6 @@ init_secondary(int cpu) #ifndef ARM_INTRNG int start = 0, end = 0; #endif -#if __ARM_ARCH >= 6 uint32_t actlr_mask, actlr_set; pmap_set_tex(); @@ -168,11 +166,6 @@ init_secondary(int cpu) set_stackptrs(cpu); enable_interrupts(PSR_A); -#else /* __ARM_ARCH >= 6 */ - cpu_setup(); - setttb(pmap_pa); - cpu_tlb_flushID(); -#endif /* __ARM_ARCH >= 6 */ pc = &__pcpu[cpu]; /* @@ -184,10 +177,6 @@ init_secondary(int cpu) pcpu_init(pc, cpu, sizeof(struct pcpu)); dpcpu_init(dpcpu[cpu - 1], cpu); -#if __ARM_ARCH < 6 - /* Provide stack pointers for other processor modes. */ - set_stackptrs(cpu); -#endif /* Signal our startup to BSP */ atomic_add_rel_32(&mp_naps, 1); @@ -351,13 +340,6 @@ ipi_hardclock(void *arg) critical_exit(); } -static void -ipi_tlb(void *dummy __unused) -{ - - CTR1(KTR_SMP, "%s: IPI_TLB", __func__); - cpufuncs.cf_tlb_flushID(); -} #else static int ipi_handler(void *arg) @@ -423,10 +405,6 @@ ipi_handler(void *arg) CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; - case IPI_TLB: - CTR1(KTR_SMP, "%s: IPI_TLB", __func__); - cpufuncs.cf_tlb_flushID(); - break; default: panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu); } @@ -456,7 +434,6 @@ release_aps(void *dummy __unused) intr_ipi_set_handler(IPI_STOP, "stop", ipi_stop, NULL, 0); intr_ipi_set_handler(IPI_PREEMPT, "preempt", ipi_preempt, NULL, 0); intr_ipi_set_handler(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL, 0); - intr_ipi_set_handler(IPI_TLB, "tlb", ipi_tlb, NULL, 0); #else #ifdef IPI_IRQ_START @@ -548,10 +525,3 @@ ipi_selected(cpuset_t cpus, u_int ipi) platform_ipi_send(cpus, ipi); } -void -tlb_broadcast(int ipi) -{ - - if (smp_started) - ipi_all_but_self(ipi); -} diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index 2996898a4510..d4ab9305b39d 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -645,7 +645,7 @@ pt2map_pt2pg(vm_offset_t va) * vm_offset_t pmap_preboot_reserve_pages(u_int num); * vm_offset_t pmap_preboot_get_vpages(u_int num); * void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, - * int prot, int attr); + * vm_prot_t prot, vm_memattr_t attr); * * (2) for all stages: * @@ -984,15 +984,16 @@ pmap_preboot_get_vpages(u_int num) * Pre-bootstrap epoch page mapping(s) with attributes. */ void -pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, int prot, - int attr) +pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, + vm_prot_t prot, vm_memattr_t attr) { u_int num; - u_int l1_attr, l1_prot; + u_int l1_attr, l1_prot, l2_prot; pt1_entry_t *pte1p; pt2_entry_t *pte2p; - l1_prot = ATTR_TO_L1(prot); + l2_prot = prot & VM_PROT_WRITE ? PTE2_AP_KRW : PTE2_AP_KR; + l1_prot = ATTR_TO_L1(l2_prot); l1_attr = ATTR_TO_L1(attr); /* Map all the pages. */ @@ -1006,13 +1007,12 @@ pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, int prot, num -= PTE1_SIZE; } else { pte2p = pmap_preboot_vtopte2(va); - pte2_store(pte2p, PTE2_KERN(pa, prot, attr)); + pte2_store(pte2p, PTE2_KERN(pa, l2_prot, attr)); va += PAGE_SIZE; pa += PAGE_SIZE; num -= PAGE_SIZE; } } - } /* @@ -1325,7 +1325,7 @@ pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) PDEBUG(1, printf("%s: virt = %#x, start = %#x, end = %#x (size = %#x)," " prot = %d\n", __func__, *virt, start, end, end - start, prot)); - l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE1_AP_KR; + l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE2_AP_KR; l2prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; l1prot = ATTR_TO_L1(l2prot); @@ -6278,11 +6278,6 @@ pmap_fault(pmap_t pmap, vm_offset_t far, uint32_t fsr, int idx, bool usermode) } /* !!!! REMOVE !!!! */ -void -pmap_pte_init_mmu_v6(void) -{ -} - void vector_page_setprot(int p) { } diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c index 3dd998429dfc..99a39a0ee522 100644 --- a/sys/arm/arm/pmap.c +++ b/sys/arm/arm/pmap.c @@ -394,7 +394,7 @@ int pmap_needs_pte_sync; #define PMAP_SHPGPERPROC 200 #endif -#define pmap_is_current(pm) ((pm) == pmap_kernel() || \ +#define pmap_is_current(pm) ((pm) == kernel_pmap || \ curproc->p_vmspace->vm_map.pmap == (pm)) static uma_zone_t pvzone = NULL; uma_zone_t l2zone; @@ -437,10 +437,10 @@ pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) /* * Copy the kernel's L1 entries to each new L1. */ - if (l1pt != pmap_kernel()->pm_l1->l1_kva) - memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); + if (l1pt != kernel_pmap->pm_l1->l1_kva) + memcpy(l1pt, kernel_pmap->pm_l1->l1_kva, L1_TABLE_SIZE); - if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0) + if ((l1->l1_physaddr = pmap_extract(kernel_pmap, (vm_offset_t)l1pt)) == 0) panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); SLIST_INSERT_HEAD(&l1_list, l1, l1_link); TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); @@ -561,7 +561,7 @@ pmap_pte_init_xscale(void) { uint32_t id, type; - id = cpufunc_id(); + id = cpu_ident(); type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { @@ -932,7 +932,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) * to a performance win over time as we don't need to continually * alloc/free. */ - if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) + if (l2b->l2b_occupancy > 0 || pm == kernel_pmap) return; /* @@ -1002,7 +1002,7 @@ pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) * page tables, we simply fix up the cache-mode here if it's not * correct. */ - l2b = pmap_get_l2_bucket(pmap_kernel(), va); + l2b = pmap_get_l2_bucket(kernel_pmap, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; @@ -1077,9 +1077,9 @@ pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) vm_size_t rest; CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x" - " len 0x%x ", pm, pm == pmap_kernel(), va, len); + " len 0x%x ", pm, pm == kernel_pmap, va, len); - if (pmap_is_current(pm) || pm == pmap_kernel()) { + if (pmap_is_current(pm) || pm == kernel_pmap) { rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); while (len > 0) { if (pmap_has_valid_mapping(pm, va)) { @@ -1100,7 +1100,7 @@ pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv, vm_size_t rest; CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x " - "len 0x%x ", pm, pm == pmap_kernel(), va, len); + "len 0x%x ", pm, pm == kernel_pmap, va, len); CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only); if (pmap_is_current(pm)) { @@ -1230,13 +1230,13 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { /* generate a count of the pv_entry uses */ if (pv->pv_flags & PVF_WRITE) { - if (pv->pv_pmap == pmap_kernel()) + if (pv->pv_pmap == kernel_pmap) kwritable++; else if (pv->pv_pmap == pm) uwritable++; writable++; } - if (pv->pv_pmap == pmap_kernel()) + if (pv->pv_pmap == kernel_pmap) kentries++; else { if (pv->pv_pmap == pm) @@ -1248,19 +1248,19 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) * check if the user duplicate mapping has * been removed. */ - if ((pm != pmap_kernel()) && (((uentries > 1) && uwritable) || + if ((pm != kernel_pmap) && (((uentries > 1) && uwritable) || (uwritable > 1))) pmwc = 1; TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { /* check for user uncachable conditions - order is important */ - if (pm != pmap_kernel() && - (pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel())) { + if (pm != kernel_pmap && + (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap)) { if ((uentries > 1 && uwritable) || uwritable > 1) { /* user duplicate mapping */ - if (pv->pv_pmap != pmap_kernel()) + if (pv->pv_pmap != kernel_pmap) pv->pv_flags |= PVF_MWC; if (!(pv->pv_flags & PVF_NC)) { @@ -1279,7 +1279,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) if ((kwritable && (entries || kentries > 1)) || (kwritable > 1) || ((kwritable != writable) && kentries && - (pv->pv_pmap == pmap_kernel() || + (pv->pv_pmap == kernel_pmap || (pv->pv_flags & PVF_WRITE) || (pv->pv_flags & PVF_MWC)))) { @@ -1291,7 +1291,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) } /* kernel and user are cachable */ - if ((pm == pmap_kernel()) && !(pv->pv_flags & PVF_MWC) && + if ((pm == kernel_pmap) && !(pv->pv_flags & PVF_MWC) && (pv->pv_flags & PVF_NC)) { pv->pv_flags &= ~PVF_NC; @@ -1300,8 +1300,8 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) continue; } /* user is no longer sharable and writable */ - if (pm != pmap_kernel() && - (pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel()) && + if (pm != kernel_pmap && + (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap) && !pmwc && (pv->pv_flags & PVF_NC)) { pv->pv_flags &= ~(PVF_NC | PVF_MWC); @@ -1565,7 +1565,7 @@ vector_page_setprot(int prot) struct l2_bucket *l2b; pt_entry_t *ptep; - l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); + l2b = pmap_get_l2_bucket(kernel_pmap, vector_page); ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; @@ -1603,7 +1603,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) pg->md.pvh_attrs &= ~PVF_REF; else vm_page_aflag_set(pg, PGA_REFERENCED); - if ((pve->pv_flags & PVF_NC) && ((pm == pmap_kernel()) || + if ((pve->pv_flags & PVF_NC) && ((pm == kernel_pmap) || (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC))) pmap_fix_cache(pg, pm, 0); else if (pve->pv_flags & PVF_WRITE) { @@ -1972,7 +1972,7 @@ pmap_postinit(void) pl1pt = (pd_entry_t *)va; while (va < eva) { - l2b = pmap_get_l2_bucket(pmap_kernel(), va); + l2b = pmap_get_l2_bucket(kernel_pmap, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; pte = *ptep; pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; @@ -2122,7 +2122,7 @@ pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, struct l2_bucket *l2b; if (ptep) { - l2b = pmap_get_l2_bucket(pmap_kernel(), va); + l2b = pmap_get_l2_bucket(kernel_pmap, va); if (l2b == NULL) panic("pmap_alloc_specials: no l2b for 0x%x", va); @@ -2381,7 +2381,7 @@ pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) if (pap) *pap = pa; - l2b = pmap_get_l2_bucket(pmap_kernel(), va); + l2b = pmap_get_l2_bucket(kernel_pmap, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; *ptep = L2_S_PROTO | pa | cache_mode | @@ -2494,7 +2494,7 @@ pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) void pmap_growkernel(vm_offset_t addr) { - pmap_t kpm = pmap_kernel(); + pmap_t kpm = kernel_pmap; if (addr <= pmap_curmaxkvaddr) return; /* we are OK */ @@ -2654,9 +2654,9 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) (uint32_t) va, (uint32_t) pa)); - l2b = pmap_get_l2_bucket(pmap_kernel(), va); + l2b = pmap_get_l2_bucket(kernel_pmap, va); if (l2b == NULL) - l2b = pmap_grow_l2_bucket(pmap_kernel(), va); + l2b = pmap_grow_l2_bucket(kernel_pmap, va); KASSERT(l2b != NULL, ("No L2 Bucket")); pte = &l2b->l2b_kva[l2pte_index(va)]; opte = *pte; @@ -2690,11 +2690,11 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva != 0) { if ((pve = pmap_get_pv_entry()) == NULL) panic("pmap_kenter_internal: no pv entries"); - PMAP_LOCK(pmap_kernel()); - pmap_enter_pv(m, pve, pmap_kernel(), va, + PMAP_LOCK(kernel_pmap); + pmap_enter_pv(m, pve, kernel_pmap, va, PVF_WRITE | PVF_UNMAN); - pmap_fix_cache(m, pmap_kernel(), va); - PMAP_UNLOCK(pmap_kernel()); + pmap_fix_cache(m, kernel_pmap, va); + PMAP_UNLOCK(kernel_pmap); } else { m->md.pv_kva = va; } @@ -2758,7 +2758,7 @@ pmap_kenter_user(vm_offset_t va, vm_paddr_t pa) * at the first use of the new address, or bad things will happen, * as we use one of these addresses in the exception handlers. */ - pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1); + pmap_fault_fixup(kernel_pmap, va, VM_PROT_READ|VM_PROT_WRITE, 1); } vm_paddr_t @@ -2780,7 +2780,7 @@ pmap_kremove(vm_offset_t va) vm_page_t m; vm_offset_t pa; - l2b = pmap_get_l2_bucket(pmap_kernel(), va); + l2b = pmap_get_l2_bucket(kernel_pmap, va); if (!l2b) return; KASSERT(l2b != NULL, ("No L2 Bucket")); @@ -2796,11 +2796,11 @@ pmap_kremove(vm_offset_t va) * before the pvzone is initialized. */ rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap_kernel()); + PMAP_LOCK(kernel_pmap); if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) && - (pve = pmap_remove_pv(m, pmap_kernel(), va))) + (pve = pmap_remove_pv(m, kernel_pmap, va))) pmap_free_pv_entry(pve); - PMAP_UNLOCK(pmap_kernel()); + PMAP_UNLOCK(kernel_pmap); rw_wunlock(&pvh_global_lock); va = va & ~PAGE_MASK; cpu_dcache_wbinv_range(va, PAGE_SIZE); @@ -3027,7 +3027,7 @@ pmap_remove_all(vm_page_t m) curpm = vmspace_pmap(curproc->p_vmspace); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { if (flush == FALSE && (pv->pv_pmap == curpm || - pv->pv_pmap == pmap_kernel())) + pv->pv_pmap == kernel_pmap)) flush = TRUE; PMAP_LOCK(pv->pv_pmap); @@ -3239,7 +3239,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags)); - if (pmap == pmap_kernel()) { + if (pmap == kernel_pmap) { l2b = pmap_get_l2_bucket(pmap, va); if (l2b == NULL) l2b = pmap_grow_l2_bucket(pmap, va); @@ -3414,7 +3414,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, /* * Make sure userland mappings get the right permissions */ - if (pmap != pmap_kernel() && va != vector_page) { + if (pmap != kernel_pmap && va != vector_page) { npte |= L2_S_PROT_U; } @@ -3672,9 +3672,9 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) l1pd = pmap->pm_l1->l1_kva[l1idx]; if (l1pte_section_p(l1pd)) { /* - * These should only happen for pmap_kernel() + * These should only happen for kernel_pmap */ - KASSERT(pmap == pmap_kernel(), ("huh")); + KASSERT(pmap == kernel_pmap, ("huh")); /* XXX: what to do about the bits > 32 ? */ if (l1pd & L1_S_SUPERSEC) pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); @@ -4034,7 +4034,7 @@ pmap_use_minicache(vm_offset_t va, vm_size_t size) if (next_bucket > eva) next_bucket = eva; - l2b = pmap_get_l2_bucket(pmap_kernel(), va); + l2b = pmap_get_l2_bucket(kernel_pmap, va); sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; @@ -4137,10 +4137,10 @@ pmap_clean_page(struct pv_entry *pv, boolean_t is_src) if (curthread) pm = vmspace_pmap(curproc->p_vmspace); else - pm = pmap_kernel(); + pm = kernel_pmap; for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { - if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) { + if (npv->pv_pmap == kernel_pmap || npv->pv_pmap == pm) { flags |= npv->pv_flags; /* * The page is mapped non-cacheable in diff --git a/sys/arm/arm/swtch-v4.S b/sys/arm/arm/swtch-v4.S new file mode 100644 index 000000000000..6fdbeed197c2 --- /dev/null +++ b/sys/arm/arm/swtch-v4.S @@ -0,0 +1,377 @@ +/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */ + +/*- + * Copyright 2003 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Steve C. Woodford for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/*- + * Copyright (c) 1994-1998 Mark Brinicombe. + * Copyright (c) 1994 Brini. + * All rights reserved. + * + * This code is derived from software written for Brini by Mark Brinicombe + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Brini. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RiscBSD kernel project + * + * cpuswitch.S + * + * cpu switching functions + * + * Created : 15/10/94 + * + */ + +#include "assym.s" +#include "opt_sched.h" + +#include +#include +#include +#include +#include + +__FBSDID("$FreeBSD$"); + + +#define GET_PCPU(tmp, tmp2) \ + ldr tmp, .Lcurpcpu + +#ifdef VFP + .fpu vfp /* allow VFP instructions */ +#endif + +.Lcurpcpu: + .word _C_LABEL(__pcpu) +.Lblocked_lock: + .word _C_LABEL(blocked_lock) + + +#define DOMAIN_CLIENT 0x01 + +.Lcpufuncs: + .word _C_LABEL(cpufuncs) + +/* + * cpu_throw(oldtd, newtd) + * + * Remove current thread state, then select the next thread to run + * and load its state. + * r0 = oldtd + * r1 = newtd + */ +ENTRY(cpu_throw) + mov r5, r1 + + /* + * r0 = oldtd + * r5 = newtd + */ + +#ifdef VFP /* This thread is dying, disable */ + bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ +#endif + + GET_PCPU(r7, r9) + ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */ + + /* Switch to lwp0 context */ + + ldr r9, .Lcpufuncs + mov lr, pc + ldr pc, [r9, #CF_IDCACHE_WBINV_ALL] + ldr r0, [r7, #(PCB_PL1VEC)] + ldr r1, [r7, #(PCB_DACR)] + /* + * r0 = Pointer to L1 slot for vector_page (or NULL) + * r1 = lwp0's DACR + * r5 = lwp0 + * r7 = lwp0's PCB + * r9 = cpufuncs + */ + + /* + * Ensure the vector table is accessible by fixing up lwp0's L1 + */ + cmp r0, #0 /* No need to fixup vector table? */ + ldrne r3, [r0] /* But if yes, fetch current value */ + ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */ + mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */ + cmpne r3, r2 /* Stuffing the same value? */ + strne r2, [r0] /* Store if not. */ + +#ifdef PMAP_INCLUDE_PTE_SYNC + /* + * Need to sync the cache to make sure that last store is + * visible to the MMU. + */ + movne r1, #4 + movne lr, pc + ldrne pc, [r9, #CF_DCACHE_WB_RANGE] +#endif /* PMAP_INCLUDE_PTE_SYNC */ + + /* + * Note: We don't do the same optimisation as cpu_switch() with + * respect to avoiding flushing the TLB if we're switching to + * the same L1 since this process' VM space may be about to go + * away, so we don't want *any* turds left in the TLB. + */ + + /* Switch the memory to the new process */ + ldr r0, [r7, #(PCB_PAGEDIR)] + mov lr, pc + ldr pc, [r9, #CF_CONTEXT_SWITCH] + + GET_PCPU(r6, r4) + /* Hook in a new pcb */ + str r7, [r6, #PC_CURPCB] + /* We have a new curthread now so make a note it */ + str r5, [r6, #PC_CURTHREAD] + + /* Set the new tp */ + ldr r6, [r5, #(TD_MD + MD_TP)] + ldr r4, =ARM_TP_ADDRESS + str r6, [r4] + ldr r6, [r5, #(TD_MD + MD_RAS_START)] + str r6, [r4, #4] /* ARM_RAS_START */ + ldr r6, [r5, #(TD_MD + MD_RAS_END)] + str r6, [r4, #8] /* ARM_RAS_END */ + + /* Restore all the saved registers and exit */ + add r3, r7, #PCB_R4 + ldmia r3, {r4-r12, sp, pc} +END(cpu_throw) + +/* + * cpu_switch(oldtd, newtd, lock) + * + * Save the current thread state, then select the next thread to run + * and load its state. + * r0 = oldtd + * r1 = newtd + * r2 = lock (new lock for old thread) + */ +ENTRY(cpu_switch) + /* Interrupts are disabled. */ + /* Save all the registers in the old thread's pcb. */ + ldr r3, [r0, #(TD_PCB)] + + /* Restore all the saved registers and exit */ + add r3, #(PCB_R4) + stmia r3, {r4-r12, sp, lr, pc} + + mov r6, r2 /* Save the mutex */ + + /* rem: r0 = old lwp */ + /* rem: interrupts are disabled */ + + /* Process is now on a processor. */ + /* We have a new curthread now so make a note it */ + GET_PCPU(r7, r2) + str r1, [r7, #PC_CURTHREAD] + + /* Hook in a new pcb */ + ldr r2, [r1, #TD_PCB] + str r2, [r7, #PC_CURPCB] + + /* Stage two : Save old context */ + + /* Get the user structure for the old thread. */ + ldr r2, [r0, #(TD_PCB)] + mov r4, r0 /* Save the old thread. */ + + /* Store the old tp; userland can change it on armv4. */ + ldr r3, =ARM_TP_ADDRESS + ldr r9, [r3] + str r9, [r0, #(TD_MD + MD_TP)] + ldr r9, [r3, #4] + str r9, [r0, #(TD_MD + MD_RAS_START)] + ldr r9, [r3, #8] + str r9, [r0, #(TD_MD + MD_RAS_END)] + + /* Set the new tp */ + ldr r9, [r1, #(TD_MD + MD_TP)] + str r9, [r3] + ldr r9, [r1, #(TD_MD + MD_RAS_START)] + str r9, [r3, #4] + ldr r9, [r1, #(TD_MD + MD_RAS_END)] + str r9, [r3, #8] + + /* Get the user structure for the new process in r9 */ + ldr r9, [r1, #(TD_PCB)] + + /* rem: r2 = old PCB */ + /* rem: r9 = new PCB */ + /* rem: interrupts are enabled */ + +#ifdef VFP + fmrx r0, fpexc /* If the VFP is enabled */ + tst r0, #(VFPEXC_EN) /* the current thread has */ + movne r1, #1 /* used it, so go save */ + addne r0, r2, #(PCB_VFPSTATE) /* the state into the PCB */ + blne _C_LABEL(vfp_store) /* and disable the VFP. */ +#endif + + /* r0-r3 now free! */ + + /* Third phase : restore saved context */ + + /* rem: r2 = old PCB */ + /* rem: r9 = new PCB */ + + ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */ + mov r2, #DOMAIN_CLIENT + cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */ + beq .Lcs_context_switched /* Yup. Don't flush cache */ + mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */ + /* + * Get the new L1 table pointer into r11. If we're switching to + * an LWP with the same address space as the outgoing one, we can + * skip the cache purge and the TTB load. + * + * To avoid data dep stalls that would happen anyway, we try + * and get some useful work done in the mean time. + */ + mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */ + ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */ + + teq r10, r11 /* Same L1? */ + cmpeq r0, r5 /* Same DACR? */ + beq .Lcs_context_switched /* yes! */ + + /* + * Definately need to flush the cache. + */ + + ldr r1, .Lcpufuncs + mov lr, pc + ldr pc, [r1, #CF_IDCACHE_WBINV_ALL] + +.Lcs_cache_purge_skipped: + /* rem: r6 = lock */ + /* rem: r9 = new PCB */ + /* rem: r10 = old L1 */ + /* rem: r11 = new L1 */ + + mov r2, #0x00000000 + ldr r7, [r9, #(PCB_PL1VEC)] + + /* + * Ensure the vector table is accessible by fixing up the L1 + */ + cmp r7, #0 /* No need to fixup vector table? */ + ldrne r2, [r7] /* But if yes, fetch current value */ + ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */ + mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */ + cmpne r2, r0 /* Stuffing the same value? */ +#ifndef PMAP_INCLUDE_PTE_SYNC + strne r0, [r7] /* Nope, update it */ +#else + beq .Lcs_same_vector + str r0, [r7] /* Otherwise, update it */ + + /* + * Need to sync the cache to make sure that last store is + * visible to the MMU. + */ + ldr r2, .Lcpufuncs + mov r0, r7 + mov r1, #4 + mov lr, pc + ldr pc, [r2, #CF_DCACHE_WB_RANGE] + +.Lcs_same_vector: +#endif /* PMAP_INCLUDE_PTE_SYNC */ + + cmp r10, r11 /* Switching to the same L1? */ + ldr r10, .Lcpufuncs + beq .Lcs_same_l1 /* Yup. */ + /* + * Do a full context switch, including full TLB flush. + */ + mov r0, r11 + mov lr, pc + ldr pc, [r10, #CF_CONTEXT_SWITCH] + + b .Lcs_context_switched + + /* + * We're switching to a different process in the same L1. + * In this situation, we only need to flush the TLB for the + * vector_page mapping, and even then only if r7 is non-NULL. + */ +.Lcs_same_l1: + cmp r7, #0 + movne r0, #0 /* We *know* vector_page's VA is 0x0 */ + movne lr, pc + ldrne pc, [r10, #CF_TLB_FLUSHID_SE] + +.Lcs_context_switched: + + /* Release the old thread */ + str r6, [r4, #TD_LOCK] + + /* XXXSCW: Safe to re-enable FIQs here */ + + /* rem: r9 = new PCB */ + + /* Restore all the saved registers and exit */ + add r3, r9, #PCB_R4 + ldmia r3, {r4-r12, sp, pc} +END(cpu_switch) diff --git a/sys/arm/arm/swtch-v6.S b/sys/arm/arm/swtch-v6.S new file mode 100644 index 000000000000..ef2156a4f121 --- /dev/null +++ b/sys/arm/arm/swtch-v6.S @@ -0,0 +1,482 @@ +/* $NetBSD: cpuswitch.S,v 1.41 2003/11/15 08:44:18 scw Exp $ */ + +/*- + * Copyright 2003 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Steve C. Woodford for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/*- + * Copyright (c) 1994-1998 Mark Brinicombe. + * Copyright (c) 1994 Brini. + * All rights reserved. + * + * This code is derived from software written for Brini by Mark Brinicombe + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Brini. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RiscBSD kernel project + * + * cpuswitch.S + * + * cpu switching functions + * + * Created : 15/10/94 + * + */ + +#include "assym.s" +#include "opt_sched.h" + +#include +#include +#include +#include +#include +#include + +__FBSDID("$FreeBSD$"); + +#if defined(SMP) +#define GET_PCPU(tmp, tmp2) \ + mrc CP15_MPIDR(tmp); \ + and tmp, tmp, #0xf; \ + ldr tmp2, .Lcurpcpu+4; \ + mul tmp, tmp, tmp2; \ + ldr tmp2, .Lcurpcpu; \ + add tmp, tmp, tmp2; +#else + +#define GET_PCPU(tmp, tmp2) \ + ldr tmp, .Lcurpcpu +#endif + +#ifdef VFP + .fpu vfp /* allow VFP instructions */ +#endif + +.Lcurpcpu: + .word _C_LABEL(__pcpu) + .word PCPU_SIZE +.Lblocked_lock: + .word _C_LABEL(blocked_lock) + +ENTRY(cpu_context_switch) /* QQQ: What about macro instead of function? */ + DSB + mcr CP15_TTBR0(r0) /* set the new TTB */ + ISB + mov r0, #(CPU_ASID_KERNEL) + mcr CP15_TLBIASID(r0) /* flush not global TLBs */ + /* + * Flush entire Branch Target Cache because of the branch predictor + * is not architecturally invisible. See ARM Architecture Reference + * Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch + * predictors and Requirements for branch predictor maintenance + * operations sections. + * + * QQQ: The predictor is virtually addressed and holds virtual target + * addresses. Therefore, if mapping is changed, the predictor cache + * must be flushed.The flush is part of entire i-cache invalidation + * what is always called when code mapping is changed. So herein, + * it's the only place where standalone predictor flush must be + * executed in kernel (except self modifying code case). + */ + mcr CP15_BPIALL /* flush entire Branch Target Cache */ + DSB + mov pc, lr +END(cpu_context_switch) + +/* + * cpu_throw(oldtd, newtd) + * + * Remove current thread state, then select the next thread to run + * and load its state. + * r0 = oldtd + * r1 = newtd + */ +ENTRY(cpu_throw) + mov r10, r0 /* r10 = oldtd */ + mov r11, r1 /* r11 = newtd */ + +#ifdef VFP /* This thread is dying, disable */ + bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ +#endif + GET_PCPU(r8, r9) /* r8 = current pcpu */ + ldr r4, [r8, #PC_CPUID] /* r4 = current cpu id */ + + cmp r10, #0 /* old thread? */ + beq 2f /* no, skip */ + + /* Remove this CPU from the active list. */ + ldr r5, [r8, #PC_CURPMAP] + mov r0, #(PM_ACTIVE) + add r5, r0 /* r5 = old pm_active */ + + /* Compute position and mask. */ +#if _NCPUWORDS > 1 + lsr r0, r4, #3 + bic r0, #3 + add r5, r0 /* r5 = position in old pm_active */ + mov r2, #1 + and r0, r4, #31 + lsl r2, r0 /* r2 = mask */ +#else + mov r2, #1 + lsl r2, r4 /* r2 = mask */ +#endif + /* Clear cpu from old active list. */ +#ifdef SMP +1: ldrex r0, [r5] + bic r0, r2 + strex r1, r0, [r5] + teq r1, #0 + bne 1b +#else + ldr r0, [r5] + bic r0, r2 + str r0, [r5] +#endif + +2: +#ifdef INVARIANTS + cmp r11, #0 /* new thread? */ + beq badsw1 /* no, panic */ +#endif + ldr r7, [r11, #(TD_PCB)] /* r7 = new PCB */ + + /* + * Registers at this point + * r4 = current cpu id + * r7 = new PCB + * r8 = current pcpu + * r11 = newtd + */ + + /* MMU switch to new thread. */ + ldr r0, [r7, #(PCB_PAGEDIR)] +#ifdef INVARIANTS + cmp r0, #0 /* new thread? */ + beq badsw4 /* no, panic */ +#endif + bl _C_LABEL(cpu_context_switch) + + /* + * Set new PMAP as current one. + * Insert cpu to new active list. + */ + + ldr r6, [r11, #(TD_PROC)] /* newtd->proc */ + ldr r6, [r6, #(P_VMSPACE)] /* newtd->proc->vmspace */ + add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ + str r6, [r8, #PC_CURPMAP] /* store to curpmap */ + + mov r0, #PM_ACTIVE + add r6, r0 /* r6 = new pm_active */ + + /* compute position and mask */ +#if _NCPUWORDS > 1 + lsr r0, r4, #3 + bic r0, #3 + add r6, r0 /* r6 = position in new pm_active */ + mov r2, #1 + and r0, r4, #31 + lsl r2, r0 /* r2 = mask */ +#else + mov r2, #1 + lsl r2, r4 /* r2 = mask */ +#endif + /* Set cpu to new active list. */ +#ifdef SMP +1: ldrex r0, [r6] + orr r0, r2 + strex r1, r0, [r6] + teq r1, #0 + bne 1b +#else + ldr r0, [r6] + orr r0, r2 + str r0, [r6] +#endif + /* + * Registers at this point. + * r7 = new PCB + * r8 = current pcpu + * r11 = newtd + * They must match the ones in sw1 position !!! + */ + DMB + b sw1 /* share new thread init with cpu_switch() */ +END(cpu_throw) + +/* + * cpu_switch(oldtd, newtd, lock) + * + * Save the current thread state, then select the next thread to run + * and load its state. + * r0 = oldtd + * r1 = newtd + * r2 = lock (new lock for old thread) + */ +ENTRY(cpu_switch) + /* Interrupts are disabled. */ +#ifdef INVARIANTS + cmp r0, #0 /* old thread? */ + beq badsw2 /* no, panic */ +#endif + /* Save all the registers in the old thread's pcb. */ + ldr r3, [r0, #(TD_PCB)] + add r3, #(PCB_R4) + stmia r3, {r4-r12, sp, lr, pc} + +#ifdef INVARIANTS + cmp r1, #0 /* new thread? */ + beq badsw3 /* no, panic */ +#endif + /* + * Save arguments. Note that we can now use r0-r14 until + * it is time to restore them for the new thread. However, + * some registers are not safe over function call. + */ + mov r9, r2 /* r9 = lock */ + mov r10, r0 /* r10 = oldtd */ + mov r11, r1 /* r11 = newtd */ + + GET_PCPU(r8, r3) /* r8 = current PCPU */ + ldr r7, [r11, #(TD_PCB)] /* r7 = newtd->td_pcb */ + + + +#ifdef VFP + ldr r3, [r10, #(TD_PCB)] + fmrx r0, fpexc /* If the VFP is enabled */ + tst r0, #(VFPEXC_EN) /* the current thread has */ + movne r1, #1 /* used it, so go save */ + addne r0, r3, #(PCB_VFPSTATE) /* the state into the PCB */ + blne _C_LABEL(vfp_store) /* and disable the VFP. */ +#endif + + /* + * MMU switch. If we're switching to a thread with the same + * address space as the outgoing one, we can skip the MMU switch. + */ + mrc CP15_TTBR0(r1) /* r1 = old TTB */ + ldr r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */ + cmp r0, r1 /* Switching to the TTB? */ + beq sw0 /* same TTB, skip */ + +#ifdef INVARIANTS + cmp r0, #0 /* new thread? */ + beq badsw4 /* no, panic */ +#endif + + bl cpu_context_switch /* new TTB as argument */ + + /* + * Registers at this point + * r7 = new PCB + * r8 = current pcpu + * r9 = lock + * r10 = oldtd + * r11 = newtd + */ + + /* + * Set new PMAP as current one. + * Update active list on PMAPs. + */ + ldr r6, [r11, #TD_PROC] /* newtd->proc */ + ldr r6, [r6, #P_VMSPACE] /* newtd->proc->vmspace */ + add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ + + ldr r5, [r8, #PC_CURPMAP] /* get old curpmap */ + str r6, [r8, #PC_CURPMAP] /* and save new one */ + + mov r0, #PM_ACTIVE + add r5, r0 /* r5 = old pm_active */ + add r6, r0 /* r6 = new pm_active */ + + /* Compute position and mask. */ + ldr r4, [r8, #PC_CPUID] +#if _NCPUWORDS > 1 + lsr r0, r4, #3 + bic r0, #3 + add r5, r0 /* r5 = position in old pm_active */ + add r6, r0 /* r6 = position in new pm_active */ + mov r2, #1 + and r0, r4, #31 + lsl r2, r0 /* r2 = mask */ +#else + mov r2, #1 + lsl r2, r4 /* r2 = mask */ +#endif + /* Clear cpu from old active list. */ +#ifdef SMP +1: ldrex r0, [r5] + bic r0, r2 + strex r1, r0, [r5] + teq r1, #0 + bne 1b +#else + ldr r0, [r5] + bic r0, r2 + str r0, [r5] +#endif + /* Set cpu to new active list. */ +#ifdef SMP +1: ldrex r0, [r6] + orr r0, r2 + strex r1, r0, [r6] + teq r1, #0 + bne 1b +#else + ldr r0, [r6] + orr r0, r2 + str r0, [r6] +#endif + +sw0: + /* + * Registers at this point + * r7 = new PCB + * r8 = current pcpu + * r9 = lock + * r10 = oldtd + * r11 = newtd + */ + + /* Change the old thread lock. */ + add r5, r10, #TD_LOCK + DMB +1: ldrex r0, [r5] + strex r1, r9, [r5] + teq r1, #0 + bne 1b + DMB + +sw1: + clrex + /* + * Registers at this point + * r7 = new PCB + * r8 = current pcpu + * r11 = newtd + */ + +#if defined(SMP) && defined(SCHED_ULE) + /* + * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE + * QQQ: What does it mean in reality and why is it done? + */ + ldr r6, =blocked_lock +1: + ldr r3, [r11, #TD_LOCK] /* atomic write regular read */ + cmp r3, r6 + beq 1b +#endif + /* Set the new tls */ + ldr r0, [r11, #(TD_MD + MD_TP)] + mcr CP15_TPIDRURO(r0) /* write tls thread reg 2 */ + + /* We have a new curthread now so make a note it */ + str r11, [r8, #PC_CURTHREAD] + mcr CP15_TPIDRPRW(r11) + + /* store pcb in per cpu structure */ + str r7, [r8, #PC_CURPCB] + + /* + * Restore all saved registers and return. Note that some saved + * registers can be changed when either cpu_fork(), cpu_set_upcall(), + * cpu_set_fork_handler(), or makectx() was called. + */ + add r3, r7, #PCB_R4 + ldmia r3, {r4-r12, sp, pc} + +#ifdef INVARIANTS +badsw1: + ldr r0, =sw1_panic_str + bl _C_LABEL(panic) +1: nop + b 1b + +badsw2: + ldr r0, =sw2_panic_str + bl _C_LABEL(panic) +1: nop + b 1b + +badsw3: + ldr r0, =sw3_panic_str + bl _C_LABEL(panic) +1: nop + b 1b + +badsw4: + ldr r0, =sw4_panic_str + bl _C_LABEL(panic) +1: nop + b 1b + +sw1_panic_str: + .asciz "cpu_throw: no newthread supplied.\n" +sw2_panic_str: + .asciz "cpu_switch: no curthread supplied.\n" +sw3_panic_str: + .asciz "cpu_switch: no newthread supplied.\n" +sw4_panic_str: + .asciz "cpu_switch: new pagedir is NULL.\n" +#endif +END(cpu_switch) diff --git a/sys/arm/arm/swtch.S b/sys/arm/arm/swtch.S index d7571ec37071..e70532a447e2 100644 --- a/sys/arm/arm/swtch.S +++ b/sys/arm/arm/swtch.S @@ -79,9 +79,7 @@ */ #include "assym.s" -#include "opt_sched.h" -#include #include #include #include @@ -89,708 +87,10 @@ __FBSDID("$FreeBSD$"); -#if __ARM_ARCH >= 6 && defined(SMP) -#define GET_PCPU(tmp, tmp2) \ - mrc p15, 0, tmp, c0, c0, 5; \ - and tmp, tmp, #0xf; \ - ldr tmp2, .Lcurpcpu+4; \ - mul tmp, tmp, tmp2; \ - ldr tmp2, .Lcurpcpu; \ - add tmp, tmp, tmp2; -#else - -#define GET_PCPU(tmp, tmp2) \ - ldr tmp, .Lcurpcpu -#endif - #ifdef VFP .fpu vfp /* allow VFP instructions */ #endif -.Lcurpcpu: - .word _C_LABEL(__pcpu) - .word PCPU_SIZE -.Lblocked_lock: - .word _C_LABEL(blocked_lock) - - -#if __ARM_ARCH < 6 - -#define DOMAIN_CLIENT 0x01 - -.Lcpufuncs: - .word _C_LABEL(cpufuncs) - -/* - * cpu_throw(oldtd, newtd) - * - * Remove current thread state, then select the next thread to run - * and load its state. - * r0 = oldtd - * r1 = newtd - */ -ENTRY(cpu_throw) - mov r5, r1 - - /* - * r0 = oldtd - * r5 = newtd - */ - -#ifdef VFP /* This thread is dying, disable */ - bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ -#endif - - GET_PCPU(r7, r9) - ldr r7, [r5, #(TD_PCB)] /* r7 = new thread's PCB */ - - /* Switch to lwp0 context */ - - ldr r9, .Lcpufuncs -#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT) - mov lr, pc - ldr pc, [r9, #CF_IDCACHE_WBINV_ALL] -#endif - ldr r0, [r7, #(PCB_PL1VEC)] - ldr r1, [r7, #(PCB_DACR)] - /* - * r0 = Pointer to L1 slot for vector_page (or NULL) - * r1 = lwp0's DACR - * r5 = lwp0 - * r7 = lwp0's PCB - * r9 = cpufuncs - */ - - /* - * Ensure the vector table is accessible by fixing up lwp0's L1 - */ - cmp r0, #0 /* No need to fixup vector table? */ - ldrne r3, [r0] /* But if yes, fetch current value */ - ldrne r2, [r7, #(PCB_L1VEC)] /* Fetch new vector_page value */ - mcr p15, 0, r1, c3, c0, 0 /* Update DACR for lwp0's context */ - cmpne r3, r2 /* Stuffing the same value? */ - strne r2, [r0] /* Store if not. */ - -#ifdef PMAP_INCLUDE_PTE_SYNC - /* - * Need to sync the cache to make sure that last store is - * visible to the MMU. - */ - movne r1, #4 - movne lr, pc - ldrne pc, [r9, #CF_DCACHE_WB_RANGE] -#endif /* PMAP_INCLUDE_PTE_SYNC */ - - /* - * Note: We don't do the same optimisation as cpu_switch() with - * respect to avoiding flushing the TLB if we're switching to - * the same L1 since this process' VM space may be about to go - * away, so we don't want *any* turds left in the TLB. - */ - - /* Switch the memory to the new process */ - ldr r0, [r7, #(PCB_PAGEDIR)] - mov lr, pc - ldr pc, [r9, #CF_CONTEXT_SWITCH] - - GET_PCPU(r6, r4) - /* Hook in a new pcb */ - str r7, [r6, #PC_CURPCB] - /* We have a new curthread now so make a note it */ - str r5, [r6, #PC_CURTHREAD] -#if __ARM_ARCH >= 6 - mcr p15, 0, r5, c13, c0, 4 -#endif - /* Set the new tp */ - ldr r6, [r5, #(TD_MD + MD_TP)] -#if __ARM_ARCH >= 6 - mcr p15, 0, r6, c13, c0, 3 -#else - ldr r4, =ARM_TP_ADDRESS - str r6, [r4] - ldr r6, [r5, #(TD_MD + MD_RAS_START)] - str r6, [r4, #4] /* ARM_RAS_START */ - ldr r6, [r5, #(TD_MD + MD_RAS_END)] - str r6, [r4, #8] /* ARM_RAS_END */ -#endif - /* Restore all the saved registers and exit */ - add r3, r7, #PCB_R4 - ldmia r3, {r4-r12, sp, pc} -END(cpu_throw) - -/* - * cpu_switch(oldtd, newtd, lock) - * - * Save the current thread state, then select the next thread to run - * and load its state. - * r0 = oldtd - * r1 = newtd - * r2 = lock (new lock for old thread) - */ -ENTRY(cpu_switch) - /* Interrupts are disabled. */ - /* Save all the registers in the old thread's pcb. */ - ldr r3, [r0, #(TD_PCB)] - - /* Restore all the saved registers and exit */ - add r3, #(PCB_R4) - stmia r3, {r4-r12, sp, lr, pc} - - mov r6, r2 /* Save the mutex */ - - /* rem: r0 = old lwp */ - /* rem: interrupts are disabled */ - - /* Process is now on a processor. */ - /* We have a new curthread now so make a note it */ - GET_PCPU(r7, r2) - str r1, [r7, #PC_CURTHREAD] -#if __ARM_ARCH >= 6 - mcr p15, 0, r1, c13, c0, 4 -#endif - - /* Hook in a new pcb */ - ldr r2, [r1, #TD_PCB] - str r2, [r7, #PC_CURPCB] - - /* Stage two : Save old context */ - - /* Get the user structure for the old thread. */ - ldr r2, [r0, #(TD_PCB)] - mov r4, r0 /* Save the old thread. */ - -#if __ARM_ARCH >= 6 - /* - * Set new tp. No need to store the old one first, userland can't - * change it directly on armv6. - */ - ldr r9, [r1, #(TD_MD + MD_TP)] - mcr p15, 0, r9, c13, c0, 3 -#else - /* Store the old tp; userland can change it on armv4. */ - ldr r3, =ARM_TP_ADDRESS - ldr r9, [r3] - str r9, [r0, #(TD_MD + MD_TP)] - ldr r9, [r3, #4] - str r9, [r0, #(TD_MD + MD_RAS_START)] - ldr r9, [r3, #8] - str r9, [r0, #(TD_MD + MD_RAS_END)] - - /* Set the new tp */ - ldr r9, [r1, #(TD_MD + MD_TP)] - str r9, [r3] - ldr r9, [r1, #(TD_MD + MD_RAS_START)] - str r9, [r3, #4] - ldr r9, [r1, #(TD_MD + MD_RAS_END)] - str r9, [r3, #8] -#endif - - /* Get the user structure for the new process in r9 */ - ldr r9, [r1, #(TD_PCB)] - - /* rem: r2 = old PCB */ - /* rem: r9 = new PCB */ - /* rem: interrupts are enabled */ - -#ifdef VFP - fmrx r0, fpexc /* If the VFP is enabled */ - tst r0, #(VFPEXC_EN) /* the current thread has */ - movne r1, #1 /* used it, so go save */ - addne r0, r2, #(PCB_VFPSTATE) /* the state into the PCB */ - blne _C_LABEL(vfp_store) /* and disable the VFP. */ -#endif - - /* r0-r3 now free! */ - - /* Third phase : restore saved context */ - - /* rem: r2 = old PCB */ - /* rem: r9 = new PCB */ - - ldr r5, [r9, #(PCB_DACR)] /* r5 = new DACR */ - mov r2, #DOMAIN_CLIENT - cmp r5, r2, lsl #(PMAP_DOMAIN_KERNEL * 2) /* Sw to kernel thread? */ - beq .Lcs_context_switched /* Yup. Don't flush cache */ - mrc p15, 0, r0, c3, c0, 0 /* r0 = old DACR */ - /* - * Get the new L1 table pointer into r11. If we're switching to - * an LWP with the same address space as the outgoing one, we can - * skip the cache purge and the TTB load. - * - * To avoid data dep stalls that would happen anyway, we try - * and get some useful work done in the mean time. - */ - mrc p15, 0, r10, c2, c0, 0 /* r10 = old L1 */ - ldr r11, [r9, #(PCB_PAGEDIR)] /* r11 = new L1 */ - - teq r10, r11 /* Same L1? */ - cmpeq r0, r5 /* Same DACR? */ - beq .Lcs_context_switched /* yes! */ - -#if !defined(CPU_ARM11) && !defined(CPU_CORTEXA) && !defined(CPU_MV_PJ4B) && !defined(CPU_KRAIT) - /* - * Definately need to flush the cache. - */ - - ldr r1, .Lcpufuncs - mov lr, pc - ldr pc, [r1, #CF_IDCACHE_WBINV_ALL] -#endif -.Lcs_cache_purge_skipped: - /* rem: r6 = lock */ - /* rem: r9 = new PCB */ - /* rem: r10 = old L1 */ - /* rem: r11 = new L1 */ - - mov r2, #0x00000000 - ldr r7, [r9, #(PCB_PL1VEC)] - - /* - * Ensure the vector table is accessible by fixing up the L1 - */ - cmp r7, #0 /* No need to fixup vector table? */ - ldrne r2, [r7] /* But if yes, fetch current value */ - ldrne r0, [r9, #(PCB_L1VEC)] /* Fetch new vector_page value */ - mcr p15, 0, r5, c3, c0, 0 /* Update DACR for new context */ - cmpne r2, r0 /* Stuffing the same value? */ -#ifndef PMAP_INCLUDE_PTE_SYNC - strne r0, [r7] /* Nope, update it */ -#else - beq .Lcs_same_vector - str r0, [r7] /* Otherwise, update it */ - - /* - * Need to sync the cache to make sure that last store is - * visible to the MMU. - */ - ldr r2, .Lcpufuncs - mov r0, r7 - mov r1, #4 - mov lr, pc - ldr pc, [r2, #CF_DCACHE_WB_RANGE] - -.Lcs_same_vector: -#endif /* PMAP_INCLUDE_PTE_SYNC */ - - cmp r10, r11 /* Switching to the same L1? */ - ldr r10, .Lcpufuncs - beq .Lcs_same_l1 /* Yup. */ - /* - * Do a full context switch, including full TLB flush. - */ - mov r0, r11 - mov lr, pc - ldr pc, [r10, #CF_CONTEXT_SWITCH] - - b .Lcs_context_switched - - /* - * We're switching to a different process in the same L1. - * In this situation, we only need to flush the TLB for the - * vector_page mapping, and even then only if r7 is non-NULL. - */ -.Lcs_same_l1: - cmp r7, #0 - movne r0, #0 /* We *know* vector_page's VA is 0x0 */ - movne lr, pc - ldrne pc, [r10, #CF_TLB_FLUSHID_SE] - -.Lcs_context_switched: - - /* Release the old thread */ - str r6, [r4, #TD_LOCK] -#if defined(SCHED_ULE) && defined(SMP) - ldr r6, .Lblocked_lock - GET_CURTHREAD_PTR(r3) -1: - ldr r4, [r3, #TD_LOCK] - cmp r4, r6 - beq 1b -#endif - - /* XXXSCW: Safe to re-enable FIQs here */ - - /* rem: r9 = new PCB */ - - /* Restore all the saved registers and exit */ - add r3, r9, #PCB_R4 - ldmia r3, {r4-r12, sp, pc} -END(cpu_switch) - - -#else /* __ARM_ARCH < 6 */ -#include - -ENTRY(cpu_context_switch) /* QQQ: What about macro instead of function? */ - DSB - mcr CP15_TTBR0(r0) /* set the new TTB */ - ISB - mov r0, #(CPU_ASID_KERNEL) - mcr CP15_TLBIASID(r0) /* flush not global TLBs */ - /* - * Flush entire Branch Target Cache because of the branch predictor - * is not architecturally invisible. See ARM Architecture Reference - * Manual ARMv7-A and ARMv7-R edition, page B2-1264(65), Branch - * predictors and Requirements for branch predictor maintenance - * operations sections. - * - * QQQ: The predictor is virtually addressed and holds virtual target - * addresses. Therefore, if mapping is changed, the predictor cache - * must be flushed.The flush is part of entire i-cache invalidation - * what is always called when code mapping is changed. So herein, - * it's the only place where standalone predictor flush must be - * executed in kernel (except self modifying code case). - */ - mcr CP15_BPIALL /* and flush entire Branch Target Cache */ - DSB - mov pc, lr -END(cpu_context_switch) - -/* - * cpu_throw(oldtd, newtd) - * - * Remove current thread state, then select the next thread to run - * and load its state. - * r0 = oldtd - * r1 = newtd - */ -ENTRY(cpu_throw) - mov r10, r0 /* r10 = oldtd */ - mov r11, r1 /* r11 = newtd */ - -#ifdef VFP /* This thread is dying, disable */ - bl _C_LABEL(vfp_discard) /* VFP without preserving state. */ -#endif - GET_PCPU(r8, r9) /* r8 = current pcpu */ - ldr r4, [r8, #PC_CPUID] /* r4 = current cpu id */ - - cmp r10, #0 /* old thread? */ - beq 2f /* no, skip */ - - /* Remove this CPU from the active list. */ - ldr r5, [r8, #PC_CURPMAP] - mov r0, #(PM_ACTIVE) - add r5, r0 /* r5 = old pm_active */ - - /* Compute position and mask. */ -#if _NCPUWORDS > 1 - lsr r0, r4, #3 - bic r0, #3 - add r5, r0 /* r5 = position in old pm_active */ - mov r2, #1 - and r0, r4, #31 - lsl r2, r0 /* r2 = mask */ -#else - mov r2, #1 - lsl r2, r4 /* r2 = mask */ -#endif - /* Clear cpu from old active list. */ -#ifdef SMP -1: ldrex r0, [r5] - bic r0, r2 - strex r1, r0, [r5] - teq r1, #0 - bne 1b -#else - ldr r0, [r5] - bic r0, r2 - str r0, [r5] -#endif - -2: -#ifdef INVARIANTS - cmp r11, #0 /* new thread? */ - beq badsw1 /* no, panic */ -#endif - ldr r7, [r11, #(TD_PCB)] /* r7 = new PCB */ - - /* - * Registers at this point - * r4 = current cpu id - * r7 = new PCB - * r8 = current pcpu - * r11 = newtd - */ - - /* MMU switch to new thread. */ - ldr r0, [r7, #(PCB_PAGEDIR)] -#ifdef INVARIANTS - cmp r0, #0 /* new thread? */ - beq badsw4 /* no, panic */ -#endif - bl _C_LABEL(cpu_context_switch) - - /* - * Set new PMAP as current one. - * Insert cpu to new active list. - */ - - ldr r6, [r11, #(TD_PROC)] /* newtd->proc */ - ldr r6, [r6, #(P_VMSPACE)] /* newtd->proc->vmspace */ - add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ - str r6, [r8, #PC_CURPMAP] /* store to curpmap */ - - mov r0, #PM_ACTIVE - add r6, r0 /* r6 = new pm_active */ - - /* compute position and mask */ -#if _NCPUWORDS > 1 - lsr r0, r4, #3 - bic r0, #3 - add r6, r0 /* r6 = position in new pm_active */ - mov r2, #1 - and r0, r4, #31 - lsl r2, r0 /* r2 = mask */ -#else - mov r2, #1 - lsl r2, r4 /* r2 = mask */ -#endif - /* Set cpu to new active list. */ -#ifdef SMP -1: ldrex r0, [r6] - orr r0, r2 - strex r1, r0, [r6] - teq r1, #0 - bne 1b -#else - ldr r0, [r6] - orr r0, r2 - str r0, [r6] -#endif - /* - * Registers at this point. - * r7 = new PCB - * r8 = current pcpu - * r11 = newtd - * They must match the ones in sw1 position !!! - */ - DMB - b sw1 /* share new thread init with cpu_switch() */ -END(cpu_throw) - -/* - * cpu_switch(oldtd, newtd, lock) - * - * Save the current thread state, then select the next thread to run - * and load its state. - * r0 = oldtd - * r1 = newtd - * r2 = lock (new lock for old thread) - */ -ENTRY(cpu_switch) - /* Interrupts are disabled. */ -#ifdef INVARIANTS - cmp r0, #0 /* old thread? */ - beq badsw2 /* no, panic */ -#endif - /* Save all the registers in the old thread's pcb. */ - ldr r3, [r0, #(TD_PCB)] - add r3, #(PCB_R4) - stmia r3, {r4-r12, sp, lr, pc} - -#ifdef INVARIANTS - cmp r1, #0 /* new thread? */ - beq badsw3 /* no, panic */ -#endif - /* - * Save arguments. Note that we can now use r0-r14 until - * it is time to restore them for the new thread. However, - * some registers are not safe over function call. - */ - mov r9, r2 /* r9 = lock */ - mov r10, r0 /* r10 = oldtd */ - mov r11, r1 /* r11 = newtd */ - - GET_PCPU(r8, r3) /* r8 = current PCPU */ - ldr r7, [r11, #(TD_PCB)] /* r7 = newtd->td_pcb */ - - - -#ifdef VFP - ldr r3, [r10, #(TD_PCB)] - fmrx r0, fpexc /* If the VFP is enabled */ - tst r0, #(VFPEXC_EN) /* the current thread has */ - movne r1, #1 /* used it, so go save */ - addne r0, r3, #(PCB_VFPSTATE) /* the state into the PCB */ - blne _C_LABEL(vfp_store) /* and disable the VFP. */ -#endif - - /* - * MMU switch. If we're switching to a thread with the same - * address space as the outgoing one, we can skip the MMU switch. - */ - mrc CP15_TTBR0(r1) /* r1 = old TTB */ - ldr r0, [r7, #(PCB_PAGEDIR)] /* r0 = new TTB */ - cmp r0, r1 /* Switching to the TTB? */ - beq sw0 /* same TTB, skip */ - -#ifdef INVARIANTS - cmp r0, #0 /* new thread? */ - beq badsw4 /* no, panic */ -#endif - - bl cpu_context_switch /* new TTB as argument */ - - /* - * Registers at this point - * r7 = new PCB - * r8 = current pcpu - * r9 = lock - * r10 = oldtd - * r11 = newtd - */ - - /* - * Set new PMAP as current one. - * Update active list on PMAPs. - */ - ldr r6, [r11, #TD_PROC] /* newtd->proc */ - ldr r6, [r6, #P_VMSPACE] /* newtd->proc->vmspace */ - add r6, #VM_PMAP /* newtd->proc->vmspace->pmap */ - - ldr r5, [r8, #PC_CURPMAP] /* get old curpmap */ - str r6, [r8, #PC_CURPMAP] /* and save new one */ - - mov r0, #PM_ACTIVE - add r5, r0 /* r5 = old pm_active */ - add r6, r0 /* r6 = new pm_active */ - - /* Compute position and mask. */ - ldr r4, [r8, #PC_CPUID] -#if _NCPUWORDS > 1 - lsr r0, r4, #3 - bic r0, #3 - add r5, r0 /* r5 = position in old pm_active */ - add r6, r0 /* r6 = position in new pm_active */ - mov r2, #1 - and r0, r4, #31 - lsl r2, r0 /* r2 = mask */ -#else - mov r2, #1 - lsl r2, r4 /* r2 = mask */ -#endif - /* Clear cpu from old active list. */ -#ifdef SMP -1: ldrex r0, [r5] - bic r0, r2 - strex r1, r0, [r5] - teq r1, #0 - bne 1b -#else - ldr r0, [r5] - bic r0, r2 - str r0, [r5] -#endif - /* Set cpu to new active list. */ -#ifdef SMP -1: ldrex r0, [r6] - orr r0, r2 - strex r1, r0, [r6] - teq r1, #0 - bne 1b -#else - ldr r0, [r6] - orr r0, r2 - str r0, [r6] -#endif - -sw0: - /* - * Registers at this point - * r7 = new PCB - * r8 = current pcpu - * r9 = lock - * r10 = oldtd - * r11 = newtd - */ - - /* Change the old thread lock. */ - add r5, r10, #TD_LOCK - DMB -1: ldrex r0, [r5] - strex r1, r9, [r5] - teq r1, #0 - bne 1b - DMB - -sw1: - clrex - /* - * Registers at this point - * r7 = new PCB - * r8 = current pcpu - * r11 = newtd - */ - -#if defined(SMP) && defined(SCHED_ULE) - /* - * 386 and amd64 do the blocked lock test only for SMP and SCHED_ULE - * QQQ: What does it mean in reality and why is it done? - */ - ldr r6, =blocked_lock -1: - ldr r3, [r11, #TD_LOCK] /* atomic write regular read */ - cmp r3, r6 - beq 1b -#endif - /* Set the new tls */ - ldr r0, [r11, #(TD_MD + MD_TP)] - mcr CP15_TPIDRURO(r0) /* write tls thread reg 2 */ - - /* We have a new curthread now so make a note it */ - str r11, [r8, #PC_CURTHREAD] - mcr CP15_TPIDRPRW(r11) - - /* store pcb in per cpu structure */ - str r7, [r8, #PC_CURPCB] - - /* - * Restore all saved registers and return. Note that some saved - * registers can be changed when either cpu_fork(), cpu_set_upcall(), - * cpu_set_fork_handler(), or makectx() was called. - */ - add r3, r7, #PCB_R4 - ldmia r3, {r4-r12, sp, pc} - -#ifdef INVARIANTS -badsw1: - ldr r0, =sw1_panic_str - bl _C_LABEL(panic) -1: nop - b 1b - -badsw2: - ldr r0, =sw2_panic_str - bl _C_LABEL(panic) -1: nop - b 1b - -badsw3: - ldr r0, =sw3_panic_str - bl _C_LABEL(panic) -1: nop - b 1b - -badsw4: - ldr r0, =sw4_panic_str - bl _C_LABEL(panic) -1: nop - b 1b - -sw1_panic_str: - .asciz "cpu_throw: no newthread supplied.\n" -sw2_panic_str: - .asciz "cpu_switch: no curthread supplied.\n" -sw3_panic_str: - .asciz "cpu_switch: no newthread supplied.\n" -sw4_panic_str: - .asciz "cpu_switch: new pagedir is NULL.\n" -#endif -END(cpu_switch) - - -#endif /* __ARM_ARCH < 6 */ - ENTRY(savectx) stmfd sp!, {lr} sub sp, sp, #4 diff --git a/sys/arm/include/cpufunc.h b/sys/arm/include/cpufunc.h index 8bbb1dd7491c..aad0febfa470 100644 --- a/sys/arm/include/cpufunc.h +++ b/sys/arm/include/cpufunc.h @@ -60,23 +60,17 @@ struct cpu_functions { /* CPU functions */ - u_int (*cf_id) (void); void (*cf_cpwait) (void); /* MMU functions */ u_int (*cf_control) (u_int bic, u_int eor); - void (*cf_domains) (u_int domains); void (*cf_setttb) (u_int ttb); - u_int (*cf_faultstatus) (void); - u_int (*cf_faultaddress) (void); /* TLB functions */ void (*cf_tlb_flushID) (void); void (*cf_tlb_flushID_SE) (u_int va); - void (*cf_tlb_flushI) (void); - void (*cf_tlb_flushI_SE) (u_int va); void (*cf_tlb_flushD) (void); void (*cf_tlb_flushD_SE) (u_int va); @@ -155,18 +149,12 @@ struct cpu_functions { /* Other functions */ - void (*cf_flush_prefetchbuf) (void); void (*cf_drain_writebuf) (void); - void (*cf_flush_brnchtgt_C) (void); - void (*cf_flush_brnchtgt_E) (u_int va); void (*cf_sleep) (int mode); /* Soft functions */ - int (*cf_dataabt_fixup) (void *arg); - int (*cf_prefetchabt_fixup) (void *arg); - void (*cf_context_switch) (void); void (*cf_setup) (void); @@ -175,69 +163,16 @@ struct cpu_functions { extern struct cpu_functions cpufuncs; extern u_int cputype; -#define cpu_ident() cpufuncs.cf_id() #define cpu_cpwait() cpufuncs.cf_cpwait() #define cpu_control(c, e) cpufuncs.cf_control(c, e) -#define cpu_domains(d) cpufuncs.cf_domains(d) #define cpu_setttb(t) cpufuncs.cf_setttb(t) -#define cpu_faultstatus() cpufuncs.cf_faultstatus() -#define cpu_faultaddress() cpufuncs.cf_faultaddress() - -#ifndef SMP #define cpu_tlb_flushID() cpufuncs.cf_tlb_flushID() #define cpu_tlb_flushID_SE(e) cpufuncs.cf_tlb_flushID_SE(e) -#define cpu_tlb_flushI() cpufuncs.cf_tlb_flushI() -#define cpu_tlb_flushI_SE(e) cpufuncs.cf_tlb_flushI_SE(e) #define cpu_tlb_flushD() cpufuncs.cf_tlb_flushD() #define cpu_tlb_flushD_SE(e) cpufuncs.cf_tlb_flushD_SE(e) -#else -void tlb_broadcast(int); - -#if defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT) -#define TLB_BROADCAST /* No need to explicitely send an IPI */ -#else -#define TLB_BROADCAST tlb_broadcast(7) -#endif - -#define cpu_tlb_flushID() do { \ - cpufuncs.cf_tlb_flushID(); \ - TLB_BROADCAST; \ -} while(0) - -#define cpu_tlb_flushID_SE(e) do { \ - cpufuncs.cf_tlb_flushID_SE(e); \ - TLB_BROADCAST; \ -} while(0) - - -#define cpu_tlb_flushI() do { \ - cpufuncs.cf_tlb_flushI(); \ - TLB_BROADCAST; \ -} while(0) - - -#define cpu_tlb_flushI_SE(e) do { \ - cpufuncs.cf_tlb_flushI_SE(e); \ - TLB_BROADCAST; \ -} while(0) - - -#define cpu_tlb_flushD() do { \ - cpufuncs.cf_tlb_flushD(); \ - TLB_BROADCAST; \ -} while(0) - - -#define cpu_tlb_flushD_SE(e) do { \ - cpufuncs.cf_tlb_flushD_SE(e); \ - TLB_BROADCAST; \ -} while(0) - -#endif - #define cpu_icache_sync_all() cpufuncs.cf_icache_sync_all() #define cpu_icache_sync_range(a, s) cpufuncs.cf_icache_sync_range((a), (s)) @@ -255,19 +190,9 @@ void tlb_broadcast(int); #define cpu_l2cache_wbinv_range(a, s) cpufuncs.cf_l2cache_wbinv_range((a), (s)) #define cpu_l2cache_drain_writebuf() cpufuncs.cf_l2cache_drain_writebuf() -#define cpu_flush_prefetchbuf() cpufuncs.cf_flush_prefetchbuf() #define cpu_drain_writebuf() cpufuncs.cf_drain_writebuf() -#define cpu_flush_brnchtgt_C() cpufuncs.cf_flush_brnchtgt_C() -#define cpu_flush_brnchtgt_E(e) cpufuncs.cf_flush_brnchtgt_E(e) - #define cpu_sleep(m) cpufuncs.cf_sleep(m) -#define cpu_dataabt_fixup(a) cpufuncs.cf_dataabt_fixup(a) -#define cpu_prefetchabt_fixup(a) cpufuncs.cf_prefetchabt_fixup(a) -#define ABORT_FIXUP_OK 0 /* fixup succeeded */ -#define ABORT_FIXUP_FAILED 1 /* fixup failed */ -#define ABORT_FIXUP_RETURN 2 /* abort handler should return */ - #define cpu_setup() cpufuncs.cf_setup() int set_cpufuncs (void); @@ -275,15 +200,11 @@ int set_cpufuncs (void); #define ARCHITECTURE_NOT_SUPPORTED 2 /* not known */ void cpufunc_nullop (void); -int cpufunc_null_fixup (void *); -int early_abort_fixup (void *); -int late_abort_fixup (void *); -u_int cpufunc_id (void); -u_int cpufunc_cpuid (void); +u_int cpu_ident (void); u_int cpufunc_control (u_int clear, u_int bic); -void cpufunc_domains (u_int domains); -u_int cpufunc_faultstatus (void); -u_int cpufunc_faultaddress (void); +void cpu_domains (u_int domains); +u_int cpu_faultstatus (void); +u_int cpu_faultaddress (void); u_int cpu_pfr (int); #if defined(CPU_FA526) @@ -291,10 +212,7 @@ void fa526_setup (void); void fa526_setttb (u_int ttb); void fa526_context_switch (void); void fa526_cpu_sleep (int); -void fa526_tlb_flushI_SE (u_int); void fa526_tlb_flushID_SE (u_int); -void fa526_flush_prefetchbuf (void); -void fa526_flush_brnchtgt_E (u_int); void fa526_icache_sync_all (void); void fa526_icache_sync_range(vm_offset_t start, vm_size_t end); @@ -307,11 +225,13 @@ void fa526_idcache_wbinv_range(vm_offset_t start, vm_size_t end); #endif -#ifdef CPU_ARM9 +#if defined(CPU_ARM9) || defined(CPU_ARM9E) void arm9_setttb (u_int); - void arm9_tlb_flushID_SE (u_int va); +void arm9_context_switch (void); +#endif +#if defined(CPU_ARM9) void arm9_icache_sync_all (void); void arm9_icache_sync_range (vm_offset_t, vm_size_t); @@ -323,8 +243,6 @@ void arm9_dcache_wb_range (vm_offset_t, vm_size_t); void arm9_idcache_wbinv_all (void); void arm9_idcache_wbinv_range (vm_offset_t, vm_size_t); -void arm9_context_switch (void); - void arm9_setup (void); extern unsigned arm9_dcache_sets_max; @@ -334,11 +252,6 @@ extern unsigned arm9_dcache_index_inc; #endif #if defined(CPU_ARM9E) -void arm10_tlb_flushID_SE (u_int); -void arm10_tlb_flushI_SE (u_int); - -void arm10_context_switch (void); - void arm10_setup (void); u_int sheeva_control_ext (u_int, u_int); @@ -390,8 +303,6 @@ void pj4bv7_setup (void); #if defined(CPU_ARM1176) void arm11_tlb_flushID (void); void arm11_tlb_flushID_SE (u_int); -void arm11_tlb_flushI (void); -void arm11_tlb_flushI_SE (u_int); void arm11_tlb_flushD (void); void arm11_tlb_flushD_SE (u_int va); @@ -409,7 +320,6 @@ void arm11x6_setttb (u_int); void arm11x6_idcache_wbinv_all (void); void arm11x6_dcache_wbinv_all (void); void arm11x6_icache_sync_all (void); -void arm11x6_flush_prefetchbuf (void); void arm11x6_icache_sync_range (vm_offset_t, vm_size_t); void arm11x6_idcache_wbinv_range (vm_offset_t, vm_size_t); void arm11x6_setup (void); @@ -438,7 +348,6 @@ void armv5_ec_idcache_wbinv_range(vm_offset_t, vm_size_t); defined(CPU_XSCALE_80219) || defined(CPU_XSCALE_81342) void armv4_tlb_flushID (void); -void armv4_tlb_flushI (void); void armv4_tlb_flushD (void); void armv4_tlb_flushD_SE (u_int va); diff --git a/sys/arm/include/param.h b/sys/arm/include/param.h index bbe9bcb08325..384891daf36a 100644 --- a/sys/arm/include/param.h +++ b/sys/arm/include/param.h @@ -110,7 +110,6 @@ #define PAGE_SHIFT 12 #define PAGE_SIZE (1 << PAGE_SHIFT) /* Page size */ #define PAGE_MASK (PAGE_SIZE - 1) -#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t))) #define PDR_SHIFT 20 /* log2(NBPDR) */ #define NBPDR (1 << PDR_SHIFT) diff --git a/sys/arm/include/pmap-v6.h b/sys/arm/include/pmap-v6.h index beaf638a179b..d5223845fb02 100644 --- a/sys/arm/include/pmap-v6.h +++ b/sys/arm/include/pmap-v6.h @@ -216,28 +216,8 @@ vm_paddr_t pmap_preboot_get_pages(u_int ); void pmap_preboot_map_pages(vm_paddr_t , vm_offset_t , u_int ); vm_offset_t pmap_preboot_reserve_pages(u_int ); vm_offset_t pmap_preboot_get_vpages(u_int ); -void pmap_preboot_map_attr(vm_paddr_t , vm_offset_t , vm_size_t , - int , int ); -static __inline void -pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, - vm_size_t size, int prot, int cache) -{ - pmap_preboot_map_attr(pa, va, size, prot, cache); -} - -/* - * This structure is used by machine-dependent code to describe - * static mappings of devices, created at bootstrap time. - */ -struct pmap_devmap { - vm_offset_t pd_va; /* virtual address */ - vm_paddr_t pd_pa; /* physical address */ - vm_size_t pd_size; /* size of region */ - vm_prot_t pd_prot; /* protection code */ - int pd_cache; /* cache attributes */ -}; - -void pmap_devmap_bootstrap(const struct pmap_devmap *); +void pmap_preboot_map_attr(vm_paddr_t, vm_offset_t, vm_size_t, vm_prot_t, + vm_memattr_t); #endif /* _KERNEL */ @@ -268,41 +248,8 @@ void pmap_devmap_bootstrap(const struct pmap_devmap *); /* * sys/arm/arm/cpufunc.c */ -void pmap_pte_init_mmu_v6(void); void vector_page_setprot(int); - -/* - * sys/arm/arm/db_interface.c - * sys/arm/arm/machdep.c - * sys/arm/arm/minidump_machdep.c - * sys/arm/arm/pmap.c - */ -#define pmap_kernel() kernel_pmap - -/* - * sys/arm/arm/bus_space_generic.c (just comment) - * sys/arm/arm/devmap.c - * sys/arm/arm/pmap.c (just comment) - * sys/arm/at91/at91_machdep.c - * sys/arm/cavium/cns11xx/econa_machdep.c - * sys/arm/freescale/imx/imx6_machdep.c (just comment) - * sys/arm/mv/orion/db88f5xxx.c - * sys/arm/mv/mv_localbus.c - * sys/arm/mv/mv_machdep.c - * sys/arm/mv/mv_pci.c - * sys/arm/s3c2xx0/s3c24x0_machdep.c - * sys/arm/versatile/versatile_machdep.c - * sys/arm/xscale/ixp425/avila_machdep.c - * sys/arm/xscale/i8134x/crb_machdep.c - * sys/arm/xscale/i80321/ep80219_machdep.c - * sys/arm/xscale/i80321/iq31244_machdep.c - * sys/arm/xscale/pxa/pxa_machdep.c - */ -#define PTE_DEVICE PTE2_ATTR_DEVICE - - - #endif /* _KERNEL */ // ----------------------------------------------------------------------------- diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h index dc7fd38a20eb..8222652084b4 100644 --- a/sys/arm/include/pmap.h +++ b/sys/arm/include/pmap.h @@ -60,21 +60,10 @@ /* * Pte related macros */ -#if ARM_ARCH_6 || ARM_ARCH_7A -#ifdef SMP -#define PTE_NOCACHE 2 -#else -#define PTE_NOCACHE 1 -#endif -#define PTE_CACHE 6 -#define PTE_DEVICE 2 -#define PTE_PAGETABLE 6 -#else #define PTE_NOCACHE 1 #define PTE_CACHE 2 #define PTE_DEVICE PTE_NOCACHE #define PTE_PAGETABLE 3 -#endif enum mem_type { STRONG_ORD = 0, @@ -104,11 +93,7 @@ enum mem_type { #define pmap_page_get_memattr(m) ((m)->md.pv_memattr) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) -#if (ARM_MMU_V6 + ARM_MMU_V7) > 0 -boolean_t pmap_page_is_mapped(vm_page_t); -#else #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) -#endif void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); /* @@ -131,9 +116,7 @@ struct pv_chunk; struct md_page { int pvh_attrs; vm_memattr_t pv_memattr; -#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 vm_offset_t pv_kva; /* first kernel VA mapping */ -#endif TAILQ_HEAD(,pv_entry) pv_list; }; @@ -164,11 +147,7 @@ struct pmap { struct l2_dtable *pm_l2[L2_SIZE]; cpuset_t pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statictics */ -#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 - TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */ -#else TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ -#endif }; typedef struct pmap *pmap_t; @@ -176,7 +155,6 @@ typedef struct pmap *pmap_t; #ifdef _KERNEL extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) -#define pmap_kernel() kernel_pmap #define PMAP_ASSERT_LOCKED(pmap) \ mtx_assert(&(pmap)->pm_mtx, MA_OWNED) @@ -199,10 +177,8 @@ typedef struct pv_entry { vm_offset_t pv_va; /* virtual address for mapping */ TAILQ_ENTRY(pv_entry) pv_list; int pv_flags; /* flags (wired, etc...) */ -#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 pmap_t pv_pmap; /* pmap where mapping lies */ TAILQ_ENTRY(pv_entry) pv_plist; -#endif } *pv_entry_t; /* @@ -247,7 +223,7 @@ vtopte(vm_offset_t va) pd_entry_t *pdep; pt_entry_t *ptep; - if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE) + if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) return (NULL); return (ptep); } @@ -271,9 +247,7 @@ void *pmap_mapdev(vm_offset_t, vm_size_t); void pmap_unmapdev(vm_offset_t, vm_size_t); vm_page_t pmap_use_pt(pmap_t, vm_offset_t); void pmap_debug(int); -#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); -#endif void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); void @@ -341,119 +315,9 @@ extern int pmap_needs_pte_sync; /* * User-visible names for the ones that vary with MMU class. */ -#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 -#define L2_AP(x) (L2_AP0(x)) -#else #define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) -#endif -#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 -/* - * AP[2:1] access permissions model: - * - * AP[2](APX) - Write Disable - * AP[1] - User Enable - * AP[0] - Reference Flag - * - * AP[2] AP[1] Kernel User - * 0 0 R/W N - * 0 1 R/W R/W - * 1 0 R N - * 1 1 R R - * - */ -#define L2_S_PROT_R (0) /* kernel read */ -#define L2_S_PROT_U (L2_AP0(2)) /* user read */ -#define L2_S_REF (L2_AP0(1)) /* reference flag */ - -#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R|L2_APX) -#define L2_S_EXECUTABLE(pte) (!(pte & L2_XN)) -#define L2_S_WRITABLE(pte) (!(pte & L2_APX)) -#define L2_S_REFERENCED(pte) (!!(pte & L2_S_REF)) - -#ifndef SMP -#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C) -#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C) -#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C) -#else -#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED) -#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED) -#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED) -#endif /* SMP */ - -#define L1_S_PROTO (L1_TYPE_S) -#define L1_C_PROTO (L1_TYPE_C) -#define L2_S_PROTO (L2_TYPE_S) - -/* - * Promotion to a 1MB (SECTION) mapping requires that the corresponding - * 4KB (SMALL) page mappings have identical settings for the following fields: - */ -#define L2_S_PROMOTE (L2_S_REF | L2_SHARED | L2_S_PROT_MASK | \ - L2_XN | L2_S_PROTO) - -/* - * In order to compare 1MB (SECTION) entry settings with the 4KB (SMALL) - * page mapping it is necessary to read and shift appropriate bits from - * L1 entry to positions of the corresponding bits in the L2 entry. - */ -#define L1_S_DEMOTE(l1pd) ((((l1pd) & L1_S_PROTO) >> 0) | \ - (((l1pd) & L1_SHARED) >> 6) | \ - (((l1pd) & L1_S_REF) >> 6) | \ - (((l1pd) & L1_S_PROT_MASK) >> 6) | \ - (((l1pd) & L1_S_XN) >> 4)) - -#ifndef SMP -#define ARM_L1S_STRONG_ORD (0) -#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2)) -#define ARM_L1S_DEVICE_SHARE (L1_S_B) -#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)) -#define ARM_L1S_NRML_IWT_OWT (L1_S_C) -#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B) -#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B) - -#define ARM_L2L_STRONG_ORD (0) -#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2)) -#define ARM_L2L_DEVICE_SHARE (L2_B) -#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)) -#define ARM_L2L_NRML_IWT_OWT (L2_C) -#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B) -#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B) - -#define ARM_L2S_STRONG_ORD (0) -#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2)) -#define ARM_L2S_DEVICE_SHARE (L2_B) -#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)) -#define ARM_L2S_NRML_IWT_OWT (L2_C) -#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B) -#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B) -#else -#define ARM_L1S_STRONG_ORD (0) -#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2)) -#define ARM_L1S_DEVICE_SHARE (L1_S_B) -#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)|L1_SHARED) -#define ARM_L1S_NRML_IWT_OWT (L1_S_C|L1_SHARED) -#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B|L1_SHARED) -#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED) - -#define ARM_L2L_STRONG_ORD (0) -#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2)) -#define ARM_L2L_DEVICE_SHARE (L2_B) -#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)|L2_SHARED) -#define ARM_L2L_NRML_IWT_OWT (L2_C|L2_SHARED) -#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED) -#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B|L2_SHARED) - -#define ARM_L2S_STRONG_ORD (0) -#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2)) -#define ARM_L2S_DEVICE_SHARE (L2_B) -#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)|L2_SHARED) -#define ARM_L2S_NRML_IWT_OWT (L2_C|L2_SHARED) -#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED) -#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B|L2_SHARED) -#endif /* SMP */ - -#elif ARM_NMMUS > 1 +#if ARM_NMMUS > 1 /* More than one MMU class configured; use variables. */ #define L2_S_PROT_U pte_l2_s_prot_u #define L2_S_PROT_W pte_l2_s_prot_w @@ -495,7 +359,7 @@ extern int pmap_needs_pte_sync; #endif /* ARM_NMMUS > 1 */ -#if defined(CPU_XSCALE_81342) || ARM_ARCH_6 || ARM_ARCH_7A +#if defined(CPU_XSCALE_81342) #define PMAP_NEEDS_PTE_SYNC 1 #define PMAP_INCLUDE_PTE_SYNC #else @@ -506,8 +370,6 @@ extern int pmap_needs_pte_sync; * These macros return various bits based on kernel/user and protection. * Note that the compiler will usually fold these at compile time. */ -#if (ARM_MMU_V6 + ARM_MMU_V7) == 0 - #define L1_S_PROT_U (L1_S_AP(AP_U)) #define L1_S_PROT_W (L1_S_AP(AP_W)) #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) @@ -525,27 +387,6 @@ extern int pmap_needs_pte_sync; #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) -#else -#define L1_S_PROT_U (L1_S_AP(AP_U)) -#define L1_S_PROT_W (L1_S_APX) /* Write disable */ -#define L1_S_PROT_MASK (L1_S_PROT_W|L1_S_PROT_U) -#define L1_S_REF (L1_S_AP(AP_REF)) /* Reference flag */ -#define L1_S_WRITABLE(pd) (!((pd) & L1_S_PROT_W)) -#define L1_S_EXECUTABLE(pd) (!((pd) & L1_S_XN)) -#define L1_S_REFERENCED(pd) ((pd) & L1_S_REF) - -#define L1_S_PROT(ku, pr) (((((ku) == PTE_KERNEL) ? 0 : L1_S_PROT_U) | \ - (((pr) & VM_PROT_WRITE) ? 0 : L1_S_PROT_W) | \ - (((pr) & VM_PROT_EXECUTE) ? 0 : L1_S_XN))) - -#define L2_L_PROT_MASK (L2_APX|L2_AP0(0x3)) -#define L2_L_PROT(ku, pr) (L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \ - (((pr) & VM_PROT_WRITE) ? L2_APX : 0))) - -#define L2_S_PROT(ku, pr) (L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \ - (((pr) & VM_PROT_WRITE) ? L2_APX : 0))) - -#endif /* * Macros to test if a mapping is mappable with an L1 Section mapping @@ -620,15 +461,12 @@ extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); -#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 || defined(CPU_XSCALE_81342) +#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342) void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); void pmap_zero_page_generic(vm_paddr_t, int, int); void pmap_pte_init_generic(void); -#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 -void pmap_pte_init_mmu_v6(void); -#endif /* (ARM_MMU_V6 + ARM_MMU_V7) != 0 */ -#endif /* (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 */ +#endif /* ARM_MMU_GENERIC != 0 */ #if ARM_MMU_XSCALE == 1 void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); diff --git a/sys/arm/include/smp.h b/sys/arm/include/smp.h index c50b99db1236..a993cc0579c2 100644 --- a/sys/arm/include/smp.h +++ b/sys/arm/include/smp.h @@ -14,8 +14,8 @@ enum { IPI_STOP, IPI_STOP_HARD = IPI_STOP, /* These are synonyms on arm. */ IPI_HARDCLOCK, - IPI_TLB, - IPI_CACHE, + IPI_TLB, /* Not used now, but keep it reserved. */ + IPI_CACHE, /* Not used now, but keep it reserved. */ INTR_IPI_COUNT }; #else @@ -25,8 +25,8 @@ enum { #define IPI_STOP 4 #define IPI_STOP_HARD 4 #define IPI_HARDCLOCK 6 -#define IPI_TLB 7 -#define IPI_CACHE 8 +#define IPI_TLB 7 /* Not used now, but keep it reserved. */ +#define IPI_CACHE 8 /* Not used now, but keep it reserved. */ #endif /* INTRNG */ void init_secondary(int cpu); diff --git a/sys/arm/include/vm.h b/sys/arm/include/vm.h index 70a4ab9700bc..552460e9f9a9 100644 --- a/sys/arm/include/vm.h +++ b/sys/arm/include/vm.h @@ -38,13 +38,13 @@ #define VM_MEMATTR_NOCACHE ((vm_memattr_t)PTE2_ATTR_NOCACHE) #define VM_MEMATTR_DEVICE ((vm_memattr_t)PTE2_ATTR_DEVICE) #define VM_MEMATTR_SO ((vm_memattr_t)PTE2_ATTR_SO) -#define VM_MEMATTR_WT ((vm_memattr_t)PTE2_ATTR_WT) +#define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)PTE2_ATTR_WT) #define VM_MEMATTR_DEFAULT VM_MEMATTR_WB_WA #define VM_MEMATTR_UNCACHEABLE VM_MEMATTR_SO /* misused by DMA */ #ifdef _KERNEL /* Don't export aliased VM_MEMATTR to userland */ -#define VM_MEMATTR_WRITE_COMBINING VM_MEMATTR_WT /* for DRM */ +#define VM_MEMATTR_WRITE_COMBINING VM_MEMATTR_WRITE_THROUGH /* for DRM */ #define VM_MEMATTR_WRITE_BACK VM_MEMATTR_WB_WA /* for DRM */ #endif #else diff --git a/sys/arm/mv/armadaxp/armadaxp.c b/sys/arm/mv/armadaxp/armadaxp.c index 693ae6a93e65..3cb03faeae62 100644 --- a/sys/arm/mv/armadaxp/armadaxp.c +++ b/sys/arm/mv/armadaxp/armadaxp.c @@ -128,7 +128,7 @@ get_tclk(void) { uint32_t cputype; - cputype = cpufunc_id(); + cputype = cpu_ident(); cputype &= CPU_ID_CPU_MASK; if (cputype == CPU_ID_MV88SV584X_V7) diff --git a/sys/arm/mv/armadaxp/armadaxp_mp.c b/sys/arm/mv/armadaxp/armadaxp_mp.c index 6685204b59ed..4ccf7e3ef360 100644 --- a/sys/arm/mv/armadaxp/armadaxp_mp.c +++ b/sys/arm/mv/armadaxp/armadaxp_mp.c @@ -111,7 +111,7 @@ platform_mp_start_ap(void) * Initialization procedure depends on core revision, * in this step CHIP ID is checked to choose proper procedure */ - cputype = cpufunc_id(); + cputype = cpu_ident(); cputype &= CPU_ID_CPU_MASK; /* diff --git a/sys/arm/mv/mv_common.c b/sys/arm/mv/mv_common.c index 8e9465512853..afefc7f28600 100644 --- a/sys/arm/mv/mv_common.c +++ b/sys/arm/mv/mv_common.c @@ -377,7 +377,7 @@ soc_id(uint32_t *dev, uint32_t *rev) * Notice: system identifiers are available in the registers range of * PCIE controller, so using this function is only allowed (and * possible) after the internal registers range has been mapped in via - * pmap_devmap_bootstrap(). + * arm_devmap_bootstrap(). */ *dev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 0) >> 16; *rev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 8) & 0xff; diff --git a/sys/arm/mv/mv_machdep.c b/sys/arm/mv/mv_machdep.c index 9b307730b387..8f0375bc1b05 100644 --- a/sys/arm/mv/mv_machdep.c +++ b/sys/arm/mv/mv_machdep.c @@ -337,7 +337,7 @@ __weak_reference(mv_default_fdt_pci_devmap, mv_pci_devmap); */ /* - * Construct pmap_devmap[] with DT-derived config data. + * Construct devmap table with DT-derived config data. */ int platform_devmap_init(void) diff --git a/sys/arm/mv/orion/db88f5xxx.c b/sys/arm/mv/orion/db88f5xxx.c index f59d511f4d09..15751cc990b7 100644 --- a/sys/arm/mv/orion/db88f5xxx.c +++ b/sys/arm/mv/orion/db88f5xxx.c @@ -42,7 +42,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include @@ -74,7 +73,7 @@ __FBSDID("$FreeBSD$"); int platform_pci_get_irq(u_int bus, u_int slot, u_int func, u_int pin); /* Static device mappings. */ -const struct pmap_devmap pmap_devmap[] = { +const struct arm_devmap_entry db88f5xxx_devmap[] = { /* * Map the on-board devices VA == PA so that we can access them * with the MMU on or off. diff --git a/sys/arm/ti/omap4/pandaboard/pandaboard.c b/sys/arm/ti/omap4/pandaboard/pandaboard.c index 20e5e8a6da87..f557f736c56d 100644 --- a/sys/arm/ti/omap4/pandaboard/pandaboard.c +++ b/sys/arm/ti/omap4/pandaboard/pandaboard.c @@ -38,7 +38,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/arm/ti/ti_cpuid.c b/sys/arm/ti/ti_cpuid.c index 38af285c1abd..b2f0f65ab9b6 100644 --- a/sys/arm/ti/ti_cpuid.c +++ b/sys/arm/ti/ti_cpuid.c @@ -120,7 +120,7 @@ omap4_get_revision(void) * the ARM cpuid to get the correct revision. */ if (revision == 0) { - id_code = cpufunc_id(); + id_code = cpu_ident(); revision = (id_code & 0xf) - 1; } diff --git a/sys/arm/versatile/versatile_machdep.c b/sys/arm/versatile/versatile_machdep.c index 2869f1b5d668..ac3ebd22ed40 100644 --- a/sys/arm/versatile/versatile_machdep.c +++ b/sys/arm/versatile/versatile_machdep.c @@ -88,7 +88,7 @@ static struct arm_devmap_entry fdt_devmap[FDT_DEVMAP_MAX] = { /* - * Construct pmap_devmap[] with DT-derived config data. + * Construct devmap table with DT-derived config data. */ int platform_devmap_init(void) diff --git a/sys/arm64/arm64/copystr.c b/sys/arm64/arm64/copystr.c index 008fdd1b5645..ebb4acddfbaf 100644 --- a/sys/arm64/arm64/copystr.c +++ b/sys/arm64/arm64/copystr.c @@ -56,6 +56,6 @@ copystr(const void * __restrict kfaddr, void * __restrict kdaddr, size_t len, if (lencopied != NULL) *lencopied = pos; - return (0); + return (error); } diff --git a/sys/arm64/arm64/genassym.c b/sys/arm64/arm64/genassym.c index 214b99d727d6..67c295f2ed9a 100644 --- a/sys/arm64/arm64/genassym.c +++ b/sys/arm64/arm64/genassym.c @@ -49,10 +49,12 @@ ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); /* Size of pcb, rounded to keep stack alignment */ ASSYM(PCB_SIZE, roundup2(sizeof(struct pcb), STACKALIGNBYTES + 1)); +ASSYM(PCB_SINGLE_STEP_SHIFT, PCB_SINGLE_STEP_SHIFT); ASSYM(PCB_REGS, offsetof(struct pcb, pcb_x)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); ASSYM(PCB_L1ADDR, offsetof(struct pcb, pcb_l1addr)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); +ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); diff --git a/sys/arm64/arm64/gic_fdt.c b/sys/arm64/arm64/gic_fdt.c index 924a08b1cf66..075f1d6b8523 100644 --- a/sys/arm64/arm64/gic_fdt.c +++ b/sys/arm64/arm64/gic_fdt.c @@ -158,12 +158,17 @@ arm_gic_fdt_attach(device_t dev) OF_getencprop(root, "#size-cells", &sc->sc_size_cells, sizeof(sc->sc_size_cells)); + /* If we have no children don't probe for them */ + child = OF_child(root); + if (child == 0) + return (0); + if (gic_fill_ranges(root, sc) < 0) { device_printf(dev, "could not get ranges\n"); return (ENXIO); } - for (child = OF_child(root); child != 0; child = OF_peer(child)) { + for (; child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_DEVBUF, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->obdinfo, child) != 0) { diff --git a/sys/arm64/arm64/intr_machdep.c b/sys/arm64/arm64/intr_machdep.c index e297ff9f2279..3389c69da942 100644 --- a/sys/arm64/arm64/intr_machdep.c +++ b/sys/arm64/arm64/intr_machdep.c @@ -472,9 +472,6 @@ ipi_all_but_self(u_int ipi) other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); - /* ARM64TODO: This will be fixed with arm_intrng */ - ipi += 16; - CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); PIC_IPI_SEND(root_pic, other_cpus, ipi); } diff --git a/sys/arm64/arm64/locore.S b/sys/arm64/arm64/locore.S index 5a61c4a1f7cf..9909a42472d9 100644 --- a/sys/arm64/arm64/locore.S +++ b/sys/arm64/arm64/locore.S @@ -369,8 +369,8 @@ create_pagetables: sub x8, x7, x6 /* Get the number of l2 pages to allocate, rounded down */ lsr x10, x8, #(L2_SHIFT) - /* Add 4 MiB for any rounding above and the module data */ - add x10, x10, #2 + /* Add 8 MiB for any rounding above and the module data */ + add x10, x10, #4 /* Create the kernel space L2 table */ mov x6, x26 diff --git a/sys/arm64/arm64/machdep.c b/sys/arm64/arm64/machdep.c index 409ecd316b27..f7552c46adae 100644 --- a/sys/arm64/arm64/machdep.c +++ b/sys/arm64/arm64/machdep.c @@ -233,7 +233,8 @@ int ptrace_single_step(struct thread *td) { - /* TODO; */ + td->td_frame->tf_spsr |= PSR_SS; + td->td_pcb->pcb_flags |= PCB_SINGLE_STEP; return (0); } @@ -241,7 +242,8 @@ int ptrace_clear_single_step(struct thread *td) { - /* TODO; */ + td->td_frame->tf_spsr &= ~PSR_SS; + td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; return (0); } diff --git a/sys/arm64/arm64/mp_machdep.c b/sys/arm64/arm64/mp_machdep.c index 19cee7788c3f..b89982d32e9c 100644 --- a/sys/arm64/arm64/mp_machdep.c +++ b/sys/arm64/arm64/mp_machdep.c @@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #ifdef VFP @@ -247,6 +248,8 @@ init_secondary(uint64_t cpu) vfp_init(); #endif + dbg_monitor_init(); + /* Enable interrupts */ intr_enable(); diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index dcd6e5dc8021..37b9480b620c 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -596,7 +596,8 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) * up to the physical address KERNBASE points at. */ map_slot = avail_slot = 0; - for (; map_slot < (physmap_idx * 2); map_slot += 2) { + for (; map_slot < (physmap_idx * 2) && + avail_slot < (PHYS_AVAIL_SIZE - 2); map_slot += 2) { if (physmap[map_slot] == physmap[map_slot + 1]) continue; @@ -612,7 +613,7 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) } /* Add the memory before the kernel */ - if (physmap[avail_slot] < pa) { + if (physmap[avail_slot] < pa && avail_slot < (PHYS_AVAIL_SIZE - 2)) { phys_avail[avail_slot] = physmap[map_slot]; phys_avail[avail_slot + 1] = pa; physmem += (phys_avail[avail_slot + 1] - diff --git a/sys/arm64/arm64/swtch.S b/sys/arm64/arm64/swtch.S index 1f20b3d63c9c..3175e879114d 100644 --- a/sys/arm64/arm64/swtch.S +++ b/sys/arm64/arm64/swtch.S @@ -37,10 +37,37 @@ __FBSDID("$FreeBSD$"); +.macro clear_step_flag pcbflags, tmp + tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f + mrs \tmp, mdscr_el1 + bic \tmp, \tmp, #1 + msr mdscr_el1, \tmp + isb +999: +.endm + +.macro set_step_flag pcbflags, tmp + tbz \pcbflags, #PCB_SINGLE_STEP_SHIFT, 999f + mrs \tmp, mdscr_el1 + orr \tmp, \tmp, #1 + msr mdscr_el1, \tmp + isb +999: +.endm + /* * void cpu_throw(struct thread *old, struct thread *new) */ ENTRY(cpu_throw) + /* Of old == NULL skip disabling stepping */ + cbz x0, 1f + + /* If we were single stepping, disable it */ + ldr x4, [x0, #TD_PCB] + ldr w5, [x4, #PCB_FLAGS] + clear_step_flag w5, x6 +1: + #ifdef VFP /* Backup the new thread pointer around a call to C code */ mov x19, x1 @@ -69,6 +96,10 @@ ENTRY(cpu_throw) dsb sy isb + /* If we are single stepping, enable it */ + ldr w5, [x4, #PCB_FLAGS] + set_step_flag w5, x6 + /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 @@ -127,6 +158,10 @@ ENTRY(cpu_switch) mrs x6, tpidr_el0 stp x5, x6, [x4, #PCB_SP] + /* If we were single stepping, disable it */ + ldr w5, [x4, #PCB_FLAGS] + clear_step_flag w5, x6 + #ifdef VFP mov x19, x0 mov x20, x1 @@ -174,6 +209,10 @@ ENTRY(cpu_switch) b.eq 1b #endif + /* If we are single stepping, enable it */ + ldr w5, [x4, #PCB_FLAGS] + set_step_flag w5, x6 + /* Restore the registers */ ldp x5, x6, [x4, #PCB_SP] mov sp, x5 diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c index ac810adad27c..89d6d0c89f84 100644 --- a/sys/arm64/arm64/trap.c +++ b/sys/arm64/arm64/trap.c @@ -138,7 +138,6 @@ svc_handler(struct trapframe *frame) int error; td = curthread; - td->td_frame = frame; error = syscallenter(td, &sa); syscallret(td, error, &sa); @@ -338,6 +337,9 @@ do_el0_sync(struct trapframe *frame) ("Invalid pcpu address from userland: %p (tpidr %lx)", get_pcpu(), READ_SPECIALREG(tpidr_el1))); + td = curthread; + td->td_frame = frame; + esr = READ_SPECIALREG(esr_el1); exception = ESR_ELx_EXCEPTION(esr); switch (exception) { @@ -373,15 +375,22 @@ do_el0_sync(struct trapframe *frame) el0_excp_unknown(frame); break; case EXCP_PC_ALIGN: - td = curthread; call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_elr); userret(td, frame); break; case EXCP_BRK: - td = curthread; call_trapsignal(td, SIGTRAP, TRAP_BRKPT, (void *)frame->tf_elr); userret(td, frame); break; + case EXCP_SOFTSTP_EL0: + td->td_frame->tf_spsr &= ~PSR_SS; + td->td_pcb->pcb_flags &= ~PCB_SINGLE_STEP; + WRITE_SPECIALREG(MDSCR_EL1, + READ_SPECIALREG(MDSCR_EL1) & ~DBG_MDSCR_SS); + call_trapsignal(td, SIGTRAP, TRAP_TRACE, + (void *)frame->tf_elr); + userret(td, frame); + break; default: print_registers(frame); panic("Unknown userland exception %x esr_el1 %lx\n", exception, diff --git a/sys/arm64/include/armreg.h b/sys/arm64/include/armreg.h index ffcf98982187..0c86462376fc 100644 --- a/sys/arm64/include/armreg.h +++ b/sys/arm64/include/armreg.h @@ -101,6 +101,7 @@ #define EXCP_SP_ALIGN 0x26 /* SP slignment fault */ #define EXCP_TRAP_FP 0x2c /* Trapped FP exception */ #define EXCP_SERROR 0x2f /* SError interrupt */ +#define EXCP_SOFTSTP_EL0 0x32 /* Software Step, from lower EL */ #define EXCP_SOFTSTP_EL1 0x33 /* Software Step, from same EL */ #define EXCP_WATCHPT_EL1 0x35 /* Watchpoint, from same EL */ #define EXCP_BRK 0x3c /* Breakpoint */ diff --git a/sys/arm64/include/pcb.h b/sys/arm64/include/pcb.h index 027b60544c38..55dd6e92eb3d 100644 --- a/sys/arm64/include/pcb.h +++ b/sys/arm64/include/pcb.h @@ -45,6 +45,10 @@ struct pcb { /* Fault handler, the error value is passed in x0 */ vm_offset_t pcb_onfault; + u_int pcb_flags; +#define PCB_SINGLE_STEP_SHIFT 0 +#define PCB_SINGLE_STEP (1 << PCB_SINGLE_STEP_SHIFT) + /* Place last to simplify the asm to access the rest if the struct */ __uint128_t pcb_vfp[32]; uint32_t pcb_fpcr; diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h index e109de2816d8..0faf2e8fc9d9 100644 --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -121,7 +121,7 @@ extern struct pmap kernel_pmap_store; #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) -#define PHYS_AVAIL_SIZE 10 +#define PHYS_AVAIL_SIZE 32 extern vm_paddr_t phys_avail[]; extern vm_paddr_t dump_avail[]; extern vm_offset_t virtual_avail; diff --git a/sys/boot/efi/libefi/efinet.c b/sys/boot/efi/libefi/efinet.c index f1e614331999..d9ecdcc45f1c 100644 --- a/sys/boot/efi/libefi/efinet.c +++ b/sys/boot/efi/libefi/efinet.c @@ -184,11 +184,16 @@ efinet_init(struct iodesc *desc, void *machdep_hint) EFI_HANDLE h; EFI_STATUS status; + if (nif->nif_driver->netif_ifs[nif->nif_unit].dif_unit < 0) { + printf("Invalid network interface %d\n", nif->nif_unit); + return; + } + h = nif->nif_driver->netif_ifs[nif->nif_unit].dif_private; status = BS->HandleProtocol(h, &sn_guid, (VOID **)&nif->nif_devdata); if (status != EFI_SUCCESS) { - printf("net%d: cannot start interface (status=%ld)\n", - nif->nif_unit, (long)status); + printf("net%d: cannot start interface (status=%lu)\n", + nif->nif_unit, EFI_ERROR_CODE(status)); return; } @@ -288,11 +293,30 @@ efinet_dev_init() stats = calloc(nifs, sizeof(struct netif_stats)); for (i = 0; i < nifs; i++) { + EFI_SIMPLE_NETWORK *net; + EFI_HANDLE h; + dif = &efinetif.netif_ifs[i]; + dif->dif_unit = -1; + + h = efi_find_handle(&efinet_dev, i); + + /* + * Open the network device in exclusive mode. Without this + * we will be racing with the UEFI network stack. It will + * pull packets off the network leading to lost packets. + */ + status = BS->OpenProtocol(h, &sn_guid, (void **)&net, + IH, 0, EFI_OPEN_PROTOCOL_EXCLUSIVE); + if (status != EFI_SUCCESS) { + printf("Unable to open network interface %d\n", i); + continue; + } + dif->dif_unit = i; dif->dif_nsel = 1; dif->dif_stats = &stats[i]; - dif->dif_private = efi_find_handle(&efinet_dev, i); + dif->dif_private = h; } return (0); diff --git a/sys/boot/fdt/dts/riscv/spike.dts b/sys/boot/fdt/dts/riscv/spike.dts new file mode 100644 index 000000000000..c5013340d538 --- /dev/null +++ b/sys/boot/fdt/dts/riscv/spike.dts @@ -0,0 +1,92 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/dts-v1/; + +/ { + model = "UC Berkeley Spike Simulator RV64I"; + compatible = "riscv,rv64i"; + #address-cells = <1>; + #size-cells = <1>; + #interrupt-cells = <1>; + + aliases { + console0 = &console0; + }; + + memory { + device_type = "memory"; + reg = <0x0 0x8000000>; /* 128MB at 0x0 */ + }; + + soc { + #address-cells = <2>; + #size-cells = <2>; + #interrupt-cells = <1>; + + compatible = "simple-bus"; + ranges; + + pic0: pic@0 { + compatible = "riscv,pic"; + interrupt-controller; + }; + + timer0: timer@0 { + compatible = "riscv,timer"; + interrupts = < 1 >; + interrupt-parent = < &pic0 >; + clock-frequency = < 1000000 >; + }; + + htif0: htif@0 { + compatible = "riscv,htif"; + interrupts = < 0 >; + interrupt-parent = < &pic0 >; + + console0: console@0 { + compatible = "htif,console"; + status = "okay"; + }; + }; + }; + + chosen { + bootargs = "-v"; + stdin = "console0"; + stdout = "console0"; + }; +}; diff --git a/sys/boot/ficl/riscv/sysdep.c b/sys/boot/ficl/riscv/sysdep.c new file mode 100644 index 000000000000..ad38660843cd --- /dev/null +++ b/sys/boot/ficl/riscv/sysdep.c @@ -0,0 +1,99 @@ +/******************************************************************* +** s y s d e p . c +** Forth Inspired Command Language +** Author: John Sadler (john_sadler@alum.mit.edu) +** Created: 16 Oct 1997 +** Implementations of FICL external interface functions... +** +*******************************************************************/ + +/* $FreeBSD$ */ + +#ifdef TESTMAIN +#include +#include +#else +#include +#endif +#include "ficl.h" + +/* +******************* FreeBSD P O R T B E G I N S H E R E ******************** Michael Smith +*/ + +#if PORTABLE_LONGMULDIV == 0 +DPUNS ficlLongMul(FICL_UNS x, FICL_UNS y) +{ + DPUNS q; + u_int64_t qx; + + qx = (u_int64_t)x * (u_int64_t) y; + + q.hi = (u_int32_t)( qx >> 32 ); + q.lo = (u_int32_t)( qx & 0xFFFFFFFFL); + + return q; +} + +UNSQR ficlLongDiv(DPUNS q, FICL_UNS y) +{ + UNSQR result; + u_int64_t qx, qh; + + qh = q.hi; + qx = (qh << 32) | q.lo; + + result.quot = qx / y; + result.rem = qx % y; + + return result; +} +#endif + +void ficlTextOut(FICL_VM *pVM, char *msg, int fNewline) +{ + IGNORE(pVM); + + while(*msg != 0) + putchar(*(msg++)); + if (fNewline) + putchar('\n'); + + return; +} + +void *ficlMalloc (size_t size) +{ + return malloc(size); +} + +void *ficlRealloc (void *p, size_t size) +{ + return realloc(p, size); +} + +void ficlFree (void *p) +{ + free(p); +} + + +/* +** Stub function for dictionary access control - does nothing +** by default, user can redefine to guarantee exclusive dict +** access to a single thread for updates. All dict update code +** is guaranteed to be bracketed as follows: +** ficlLockDictionary(TRUE); +** +** ficlLockDictionary(FALSE); +** +** Returns zero if successful, nonzero if unable to acquire lock +** befor timeout (optional - could also block forever) +*/ +#if FICL_MULTITHREAD +int ficlLockDictionary(short fLock) +{ + IGNORE(fLock); + return 0; +} +#endif /* FICL_MULTITHREAD */ diff --git a/sys/boot/ficl/riscv/sysdep.h b/sys/boot/ficl/riscv/sysdep.h new file mode 100644 index 000000000000..3726b9ef838f --- /dev/null +++ b/sys/boot/ficl/riscv/sysdep.h @@ -0,0 +1,411 @@ +/******************************************************************* + s y s d e p . h +** Forth Inspired Command Language +** Author: John Sadler (john_sadler@alum.mit.edu) +** Created: 16 Oct 1997 +** Ficl system dependent types and prototypes... +** +** Note: Ficl also depends on the use of "assert" when +** FICL_ROBUST is enabled. This may require some consideration +** in firmware systems since assert often +** assumes stderr/stdout. +** $Id: sysdep.h,v 1.6 2001-04-26 21:41:55-07 jsadler Exp jsadler $ +*******************************************************************/ +/* +** Copyright (c) 1997-2001 John Sadler (john_sadler@alum.mit.edu) +** All rights reserved. +** +** Get the latest Ficl release at http://ficl.sourceforge.net +** +** L I C E N S E and D I S C L A I M E R +** +** Redistribution and use in source and binary forms, with or without +** modification, are permitted provided that the following conditions +** are met: +** 1. Redistributions of source code must retain the above copyright +** notice, this list of conditions and the following disclaimer. +** 2. Redistributions in binary form must reproduce the above copyright +** notice, this list of conditions and the following disclaimer in the +** documentation and/or other materials provided with the distribution. +** +** THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +** IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +** ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +** FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +** DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +** OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +** HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +** LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +** OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +** SUCH DAMAGE. +** +** I am interested in hearing from anyone who uses ficl. If you have +** a problem, a success story, a defect, an enhancement request, or +** if you would like to contribute to the ficl release, please send +** contact me by email at the address above. +** +** $Id: sysdep.h,v 1.6 2001-04-26 21:41:55-07 jsadler Exp jsadler $ +** $FreeBSD$ +*/ + +#if !defined (__SYSDEP_H__) +#define __SYSDEP_H__ + +#include + +#include /* size_t, NULL */ +#include +#include + +#if !defined IGNORE /* Macro to silence unused param warnings */ +#define IGNORE(x) (void)(x) +#endif + +/* +** TRUE and FALSE for C boolean operations, and +** portable 32 bit types for CELLs +** +*/ +#if !defined TRUE +#define TRUE 1 +#endif +#if !defined FALSE +#define FALSE 0 +#endif + + +/* +** System dependent data type declarations... +*/ +#if !defined INT32 +#define INT32 int +#endif + +#if !defined UNS32 +#define UNS32 unsigned int +#endif + +#if !defined UNS16 +#define UNS16 unsigned short +#endif + +#if !defined UNS8 +#define UNS8 unsigned char +#endif + +#if !defined NULL +#define NULL ((void *)0) +#endif + +/* +** FICL_UNS and FICL_INT must have the same size as a void* on +** the target system. A CELL is a union of void*, FICL_UNS, and +** FICL_INT. +** (11/2000: same for FICL_FLOAT) +*/ +#if !defined FICL_INT +#define FICL_INT long +#endif + +#if !defined FICL_UNS +#define FICL_UNS unsigned long +#endif + +#if !defined FICL_FLOAT +#define FICL_FLOAT float +#endif + +/* +** Ficl presently supports values of 32 and 64 for BITS_PER_CELL +*/ +#if !defined BITS_PER_CELL +#define BITS_PER_CELL 64 +#endif + +#if ((BITS_PER_CELL != 32) && (BITS_PER_CELL != 64)) + Error! +#endif + +typedef struct +{ + FICL_UNS hi; + FICL_UNS lo; +} DPUNS; + +typedef struct +{ + FICL_UNS quot; + FICL_UNS rem; +} UNSQR; + +typedef struct +{ + FICL_INT hi; + FICL_INT lo; +} DPINT; + +typedef struct +{ + FICL_INT quot; + FICL_INT rem; +} INTQR; + + +/* +** B U I L D C O N T R O L S +*/ + +#if !defined (FICL_MINIMAL) +#define FICL_MINIMAL 0 +#endif +#if (FICL_MINIMAL) +#define FICL_WANT_SOFTWORDS 0 +#define FICL_WANT_FLOAT 0 +#define FICL_WANT_USER 0 +#define FICL_WANT_LOCALS 0 +#define FICL_WANT_DEBUGGER 0 +#define FICL_WANT_OOP 0 +#define FICL_PLATFORM_EXTEND 0 +#define FICL_MULTITHREAD 0 +#define FICL_ROBUST 0 +#define FICL_EXTENDED_PREFIX 0 +#endif + +/* +** FICL_PLATFORM_EXTEND +** Includes words defined in ficlCompilePlatform +*/ +#if !defined (FICL_PLATFORM_EXTEND) +#define FICL_PLATFORM_EXTEND 1 +#endif + +/* +** FICL_WANT_FLOAT +** Includes a floating point stack for the VM, and words to do float operations. +** Contributed by Guy Carver +*/ +#if !defined (FICL_WANT_FLOAT) +#define FICL_WANT_FLOAT 0 +#endif + +/* +** FICL_WANT_DEBUGGER +** Inludes a simple source level debugger +*/ +#if !defined (FICL_WANT_DEBUGGER) +#define FICL_WANT_DEBUGGER 1 +#endif + +/* +** User variables: per-instance variables bound to the VM. +** Kinda like thread-local storage. Could be implemented in a +** VM private dictionary, but I've chosen the lower overhead +** approach of an array of CELLs instead. +*/ +#if !defined FICL_WANT_USER +#define FICL_WANT_USER 1 +#endif + +#if !defined FICL_USER_CELLS +#define FICL_USER_CELLS 16 +#endif + +/* +** FICL_WANT_LOCALS controls the creation of the LOCALS wordset and +** a private dictionary for local variable compilation. +*/ +#if !defined FICL_WANT_LOCALS +#define FICL_WANT_LOCALS 1 +#endif + +/* Max number of local variables per definition */ +#if !defined FICL_MAX_LOCALS +#define FICL_MAX_LOCALS 16 +#endif + +/* +** FICL_WANT_OOP +** Inludes object oriented programming support (in softwords) +** OOP support requires locals and user variables! +*/ +#if !(FICL_WANT_LOCALS) || !(FICL_WANT_USER) +#if !defined (FICL_WANT_OOP) +#define FICL_WANT_OOP 0 +#endif +#endif + +#if !defined (FICL_WANT_OOP) +#define FICL_WANT_OOP 1 +#endif + +/* +** FICL_WANT_SOFTWORDS +** Controls inclusion of all softwords in softcore.c +*/ +#if !defined (FICL_WANT_SOFTWORDS) +#define FICL_WANT_SOFTWORDS 1 +#endif + +/* +** FICL_MULTITHREAD enables dictionary mutual exclusion +** wia the ficlLockDictionary system dependent function. +** Note: this implementation is experimental and poorly +** tested. Further, it's unnecessary unless you really +** intend to have multiple SESSIONS (poor choice of name +** on my part) - that is, threads that modify the dictionary +** at the same time. +*/ +#if !defined FICL_MULTITHREAD +#define FICL_MULTITHREAD 0 +#endif + +/* +** PORTABLE_LONGMULDIV causes ficlLongMul and ficlLongDiv to be +** defined in C in sysdep.c. Use this if you cannot easily +** generate an inline asm definition +*/ +#if !defined (PORTABLE_LONGMULDIV) +#define PORTABLE_LONGMULDIV 0 +#endif + +/* +** INLINE_INNER_LOOP causes the inner interpreter to be inline code +** instead of a function call. This is mainly because MS VC++ 5 +** chokes with an internal compiler error on the function version. +** in release mode. Sheesh. +*/ +#if !defined INLINE_INNER_LOOP +#if defined _DEBUG +#define INLINE_INNER_LOOP 0 +#else +#define INLINE_INNER_LOOP 1 +#endif +#endif + +/* +** FICL_ROBUST enables bounds checking of stacks and the dictionary. +** This will detect stack over and underflows and dictionary overflows. +** Any exceptional condition will result in an assertion failure. +** (As generated by the ANSI assert macro) +** FICL_ROBUST == 1 --> stack checking in the outer interpreter +** FICL_ROBUST == 2 also enables checking in many primitives +*/ + +#if !defined FICL_ROBUST +#define FICL_ROBUST 2 +#endif + +/* +** FICL_DEFAULT_STACK Specifies the default size (in CELLs) of +** a new virtual machine's stacks, unless overridden at +** create time. +*/ +#if !defined FICL_DEFAULT_STACK +#define FICL_DEFAULT_STACK 128 +#endif + +/* +** FICL_DEFAULT_DICT specifies the number of CELLs to allocate +** for the system dictionary by default. The value +** can be overridden at startup time as well. +** FICL_DEFAULT_ENV specifies the number of cells to allot +** for the environment-query dictionary. +*/ +#if !defined FICL_DEFAULT_DICT +#define FICL_DEFAULT_DICT 12288 +#endif + +#if !defined FICL_DEFAULT_ENV +#define FICL_DEFAULT_ENV 260 +#endif + +/* +** FICL_DEFAULT_VOCS specifies the maximum number of wordlists in +** the dictionary search order. See Forth DPANS sec 16.3.3 +** (file://dpans16.htm#16.3.3) +*/ +#if !defined FICL_DEFAULT_VOCS +#define FICL_DEFAULT_VOCS 16 +#endif + +/* +** FICL_MAX_PARSE_STEPS controls the size of an array in the FICL_SYSTEM structure +** that stores pointers to parser extension functions. I would never expect to have +** more than 8 of these, so that's the default limit. Too many of these functions +** will probably exact a nasty performance penalty. +*/ +#if !defined FICL_MAX_PARSE_STEPS +#define FICL_MAX_PARSE_STEPS 8 +#endif + +/* +** FICL_EXTENDED_PREFIX enables a bunch of extra prefixes in prefix.c and prefix.fr (if +** included as part of softcore.c) +*/ +#if !defined FICL_EXTENDED_PREFIX +#define FICL_EXTENDED_PREFIX 0 +#endif + +/* +** FICL_ALIGN is the power of two to which the dictionary +** pointer address must be aligned. This value is usually +** either 1 or 2, depending on the memory architecture +** of the target system; 2 is safe on any 16 or 32 bit +** machine. 3 would be appropriate for a 64 bit machine. +*/ +#if !defined FICL_ALIGN +#define FICL_ALIGN 3 +#define FICL_ALIGN_ADD ((1 << FICL_ALIGN) - 1) +#endif + +/* +** System dependent routines -- +** edit the implementations in sysdep.c to be compatible +** with your runtime environment... +** ficlTextOut sends a NULL terminated string to the +** default output device - used for system error messages +** ficlMalloc and ficlFree have the same semantics as malloc and free +** in standard C +** ficlLongMul multiplies two UNS32s and returns a 64 bit unsigned +** product +** ficlLongDiv divides an UNS64 by an UNS32 and returns UNS32 quotient +** and remainder +*/ +struct vm; +void ficlTextOut(struct vm *pVM, char *msg, int fNewline); +void *ficlMalloc (size_t size); +void ficlFree (void *p); +void *ficlRealloc(void *p, size_t size); +/* +** Stub function for dictionary access control - does nothing +** by default, user can redefine to guarantee exclusive dict +** access to a single thread for updates. All dict update code +** must be bracketed as follows: +** ficlLockDictionary(TRUE); +** +** ficlLockDictionary(FALSE); +** +** Returns zero if successful, nonzero if unable to acquire lock +** before timeout (optional - could also block forever) +** +** NOTE: this function must be implemented with lock counting +** semantics: nested calls must behave properly. +*/ +#if FICL_MULTITHREAD +int ficlLockDictionary(short fLock); +#else +#define ficlLockDictionary(x) 0 /* ignore */ +#endif + +/* +** 64 bit integer math support routines: multiply two UNS32s +** to get a 64 bit product, & divide the product by an UNS32 +** to get an UNS32 quotient and remainder. Much easier in asm +** on a 32 bit CPU than in C, which usually doesn't support +** the double length result (but it should). +*/ +DPUNS ficlLongMul(FICL_UNS x, FICL_UNS y); +UNSQR ficlLongDiv(DPUNS q, FICL_UNS y); + +#endif /*__SYSDEP_H__*/ diff --git a/sys/cam/scsi/scsi_pass.c b/sys/cam/scsi/scsi_pass.c index c0c313e82bd8..b9612851d2a7 100644 --- a/sys/cam/scsi/scsi_pass.c +++ b/sys/cam/scsi/scsi_pass.c @@ -773,9 +773,6 @@ passclose(struct cdev *dev, int flag, int fmt, struct thread *td) if (softc->open_count == 0) { struct pass_io_req *io_req, *io_req2; - int need_unlock; - - need_unlock = 0; TAILQ_FOREACH_SAFE(io_req, &softc->done_queue, links, io_req2) { TAILQ_REMOVE(&softc->done_queue, io_req, links); diff --git a/sys/cam/scsi/scsi_sa.c b/sys/cam/scsi/scsi_sa.c index 149d06228455..78d43d81c92f 100644 --- a/sys/cam/scsi/scsi_sa.c +++ b/sys/cam/scsi/scsi_sa.c @@ -4961,10 +4961,6 @@ sasetpos(struct cam_periph *periph, int hard, struct mtlocate *locate_info) /*sense_len*/ SSD_FULL_SIZE, /*timeout*/ SPACE_TIMEOUT); } else { - uint32_t blk_pointer; - - blk_pointer = locate_info->logical_id; - scsi_locate_10(&ccb->csio, /*retries*/ 1, /*cbfcnp*/ sadone, diff --git a/sys/cddl/compat/opensolaris/sys/atomic.h b/sys/cddl/compat/opensolaris/sys/atomic.h index 363d55893532..81f75da80772 100644 --- a/sys/cddl/compat/opensolaris/sys/atomic.h +++ b/sys/cddl/compat/opensolaris/sys/atomic.h @@ -51,7 +51,7 @@ extern uint8_t atomic_or_8_nv(volatile uint8_t *target, uint8_t value); extern void membar_producer(void); #if defined(__sparc64__) || defined(__powerpc__) || defined(__arm__) || \ - defined(__mips__) || defined(__aarch64__) + defined(__mips__) || defined(__aarch64__) || defined(__riscv__) extern void atomic_or_8(volatile uint8_t *target, uint8_t value); #else static __inline void diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c index 9293e9a97e27..200369216947 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sa.c @@ -1652,7 +1652,7 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr, int spill_data_size = 0; int spill_attr_count = 0; int error; - uint16_t length; + uint16_t length, reg_length; int i, j, k, length_idx; sa_hdr_phys_t *hdr; sa_idx_tab_t *idx_tab; @@ -1712,34 +1712,50 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr, hdr = SA_GET_HDR(hdl, SA_BONUS); idx_tab = SA_IDX_TAB_GET(hdl, SA_BONUS); for (; k != 2; k++) { - /* iterate over each attribute in layout */ + /* + * Iterate over each attribute in layout. Fetch the + * size of variable-length attributes needing rewrite + * from sa_lengths[]. + */ for (i = 0, length_idx = 0; i != count; i++) { sa_attr_type_t attr; attr = idx_tab->sa_layout->lot_attrs[i]; - if (attr == newattr) { - /* duplicate attributes are not allowed */ - ASSERT(action == SA_REPLACE || - action == SA_REMOVE); - /* must be variable-sized to be replaced here */ - if (action == SA_REPLACE) { - ASSERT(SA_REGISTERED_LEN(sa, attr) == 0); - SA_ADD_BULK_ATTR(attr_desc, j, attr, - locator, datastart, buflen); - } + reg_length = SA_REGISTERED_LEN(sa, attr); + if (reg_length == 0) { + length = hdr->sa_lengths[length_idx]; + length_idx++; } else { - length = SA_REGISTERED_LEN(sa, attr); - if (length == 0) { - length = hdr->sa_lengths[length_idx]; - } + length = reg_length; + } + if (attr == newattr) { + /* + * There is nothing to do for SA_REMOVE, + * so it is just skipped. + */ + if (action == SA_REMOVE) + continue; + /* + * Duplicate attributes are not allowed, so the + * action can not be SA_ADD here. + */ + ASSERT3S(action, ==, SA_REPLACE); + + /* + * Only a variable-sized attribute can be + * replaced here, and its size must be changing. + */ + ASSERT3U(reg_length, ==, 0); + ASSERT3U(length, !=, buflen); + SA_ADD_BULK_ATTR(attr_desc, j, attr, + locator, datastart, buflen); + } else { SA_ADD_BULK_ATTR(attr_desc, j, attr, NULL, (void *) (TOC_OFF(idx_tab->sa_idx_tab[attr]) + (uintptr_t)old_data[k]), length); } - if (SA_REGISTERED_LEN(sa, attr) == 0) - length_idx++; } if (k == 0 && hdl->sa_spill) { hdr = SA_GET_HDR(hdl, SA_SPILL); @@ -1750,10 +1766,8 @@ sa_modify_attrs(sa_handle_t *hdl, sa_attr_type_t newattr, } } if (action == SA_ADD) { - length = SA_REGISTERED_LEN(sa, newattr); - if (length == 0) { - length = buflen; - } + reg_length = SA_REGISTERED_LEN(sa, newattr); + IMPLY(reg_length != 0, reg_length == buflen); SA_ADD_BULK_ATTR(attr_desc, j, newattr, locator, datastart, buflen); } diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c index 99d313b55faf..5eb9df13d611 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zvol.c @@ -134,6 +134,9 @@ SYSCTL_NODE(_vfs_zfs, OID_AUTO, vol, CTLFLAG_RW, 0, "ZFS VOLUME"); static int volmode = ZFS_VOLMODE_GEOM; SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, mode, CTLFLAG_RWTUN, &volmode, 0, "Expose as GEOM providers (1), device files (2) or neither"); +static boolean_t zpool_on_zvol = B_FALSE; +SYSCTL_INT(_vfs_zfs_vol, OID_AUTO, recursive, CTLFLAG_RWTUN, &zpool_on_zvol, 0, + "Allow zpools to use zvols as vdevs (DANGEROUS)"); #endif typedef struct zvol_extent { @@ -1114,7 +1117,9 @@ zvol_open(struct g_provider *pp, int flag, int count) return (err); } #else /* !illumos */ - if (tsd_get(zfs_geom_probe_vdev_key) != NULL) { + boolean_t locked = B_FALSE; + + if (!zpool_on_zvol && tsd_get(zfs_geom_probe_vdev_key) != NULL) { /* * if zfs_geom_probe_vdev_key is set, that means that zfs is * attempting to probe geom providers while looking for a @@ -1125,19 +1130,34 @@ zvol_open(struct g_provider *pp, int flag, int count) */ return (EOPNOTSUPP); } - - mutex_enter(&zfsdev_state_lock); + /* + * Protect against recursively entering spa_namespace_lock + * when spa_open() is used for a pool on a (local) ZVOL(s). + * This is needed since we replaced upstream zfsdev_state_lock + * with spa_namespace_lock in the ZVOL code. + * We are using the same trick as spa_open(). + * Note that calls in zvol_first_open which need to resolve + * pool name to a spa object will enter spa_open() + * recursively, but that function already has all the + * necessary protection. + */ + if (!MUTEX_HELD(&zfsdev_state_lock)) { + mutex_enter(&zfsdev_state_lock); + locked = B_TRUE; + } zv = pp->private; if (zv == NULL) { - mutex_exit(&zfsdev_state_lock); + if (locked) + mutex_exit(&zfsdev_state_lock); return (SET_ERROR(ENXIO)); } if (zv->zv_total_opens == 0) { err = zvol_first_open(zv); if (err) { - mutex_exit(&zfsdev_state_lock); + if (locked) + mutex_exit(&zfsdev_state_lock); return (err); } pp->mediasize = zv->zv_volsize; @@ -1171,7 +1191,8 @@ zvol_open(struct g_provider *pp, int flag, int count) mutex_exit(&zfsdev_state_lock); #else zv->zv_total_opens += count; - mutex_exit(&zfsdev_state_lock); + if (locked) + mutex_exit(&zfsdev_state_lock); #endif return (err); @@ -1181,7 +1202,8 @@ zvol_open(struct g_provider *pp, int flag, int count) #ifdef illumos mutex_exit(&zfsdev_state_lock); #else - mutex_exit(&zfsdev_state_lock); + if (locked) + mutex_exit(&zfsdev_state_lock); #endif return (err); } diff --git a/sys/cddl/contrib/opensolaris/uts/common/sys/isa_defs.h b/sys/cddl/contrib/opensolaris/uts/common/sys/isa_defs.h index 281abd755919..e46330c4ec4a 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/sys/isa_defs.h +++ b/sys/cddl/contrib/opensolaris/uts/common/sys/isa_defs.h @@ -388,6 +388,48 @@ extern "C" { #define _DONT_USE_1275_GENERIC_NAMES #define _HAVE_CPUID_INSN +#elif defined(__riscv__) + +/* + * Define the appropriate "processor characteristics" + */ +#define _STACK_GROWS_DOWNWARD +#define _LONG_LONG_LTOH +#define _BIT_FIELDS_LTOH +#define _IEEE_754 +#define _CHAR_IS_UNSIGNED +#define _BOOL_ALIGNMENT 1 +#define _CHAR_ALIGNMENT 1 +#define _SHORT_ALIGNMENT 2 +#define _INT_ALIGNMENT 4 +#define _FLOAT_ALIGNMENT 4 +#define _FLOAT_COMPLEX_ALIGNMENT 4 +#define _LONG_ALIGNMENT 8 +#define _LONG_LONG_ALIGNMENT 8 +#define _DOUBLE_ALIGNMENT 8 +#define _DOUBLE_COMPLEX_ALIGNMENT 8 +#define _LONG_DOUBLE_ALIGNMENT 16 +#define _LONG_DOUBLE_COMPLEX_ALIGNMENT 16 +#define _POINTER_ALIGNMENT 8 +#define _MAX_ALIGNMENT 16 +#define _ALIGNMENT_REQUIRED 1 + +#define _LONG_LONG_ALIGNMENT_32 _LONG_LONG_ALIGNMENT + +/* + * Define the appropriate "implementation choices" + */ +#if !defined(_LP64) +#define _LP64 +#endif +#define _SUNOS_VTOC_16 +#define _DMA_USES_PHYSADDR +#define _FIRMWARE_NEEDS_FDISK +#define _PSM_MODULES +#define _RTC_CONFIG +#define _DONT_USE_1275_GENERIC_NAMES +#define _HAVE_CPUID_INSN + #elif defined(__arm__) /* diff --git a/sys/conf/Makefile.arm b/sys/conf/Makefile.arm index af5f7da3ec76..a7df55b5fe3b 100644 --- a/sys/conf/Makefile.arm +++ b/sys/conf/Makefile.arm @@ -68,7 +68,6 @@ SYSTEM_LD_TAIL +=;sed s/" + SIZEOF_HEADERS"// ldscript.$M\ FILES_CPU_FUNC = \ $S/$M/$M/cpufunc_asm_arm9.S \ - $S/$M/$M/cpufunc_asm_arm10.S \ $S/$M/$M/cpufunc_asm_xscale.S $S/$M/$M/cpufunc_asm.S \ $S/$M/$M/cpufunc_asm_xscale_c3.S $S/$M/$M/cpufunc_asm_armv5_ec.S \ $S/$M/$M/cpufunc_asm_fa526.S $S/$M/$M/cpufunc_asm_sheeva.S \ diff --git a/sys/conf/Makefile.riscv b/sys/conf/Makefile.riscv new file mode 100644 index 000000000000..27338b470413 --- /dev/null +++ b/sys/conf/Makefile.riscv @@ -0,0 +1,49 @@ +# Makefile.riscv -- with config changes. +# Copyright 1990 W. Jolitz +# from: @(#)Makefile.i386 7.1 5/10/91 +# from FreeBSD: src/sys/conf/Makefile.i386,v 1.255 2002/02/20 23:35:49 +# $FreeBSD$ +# +# Makefile for FreeBSD +# +# RISCVTODO: copy pasted from aarch64, needs to be +# constructed from a machine description: +# config machineid +# Most changes should be made in the machine description +# /sys/riscv/conf/``machineid'' +# after which you should do +# config machineid +# Generic makefile changes should be made in +# /sys/conf/Makefile.riscv +# after which config should be rerun for all machines. +# + +# Which version of config(8) is required. +%VERSREQ= 600012 + +.if !defined(S) +S= ../../.. +.endif +.include "$S/conf/kern.pre.mk" + +INCLUDES+= -I$S/contrib/libfdt + +.if !empty(DDB_ENABLED) +CFLAGS += -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer +.endif + +%BEFORE_DEPEND + +%OBJS + +%FILES.c + +%FILES.s + +%FILES.m + +%CLEAN + +%RULES + +.include "$S/conf/kern.post.mk" diff --git a/sys/conf/NOTES b/sys/conf/NOTES index 7d06fe321703..67a1bb4e6866 100644 --- a/sys/conf/NOTES +++ b/sys/conf/NOTES @@ -1078,6 +1078,14 @@ options UFS_GJOURNAL # Make space in the kernel for a root filesystem on a md device. # Define to the number of kilobytes to reserve for the filesystem. +# This is now optional. +# If not defined, the root filesystem passed in as the MFS_IMAGE makeoption +# will be automatically embedded in the kernel during linking. Its exact size +# will be consumed within the kernel. +# If defined, the old way of embedding the filesystem in the kernel will be +# used. That is to say MD_ROOT_SIZE KB will be allocated in the kernel and +# later, the filesystem image passed in as the MFS_IMAGE makeoption will be +# dd'd into the reserved space if it fits. options MD_ROOT_SIZE=10 # Make the md device a potential root device, either with preloaded diff --git a/sys/conf/files.arm b/sys/conf/files.arm index e6b89a197be6..9f3a6bffedc0 100644 --- a/sys/conf/files.arm +++ b/sys/conf/files.arm @@ -11,8 +11,7 @@ arm/arm/busdma_machdep-v6.c optional armv6 arm/arm/copystr.S standard arm/arm/cpufunc.c standard arm/arm/cpufunc_asm.S standard -arm/arm/cpufunc_asm_arm9.S optional cpu_arm9 -arm/arm/cpufunc_asm_arm10.S optional cpu_arm9e +arm/arm/cpufunc_asm_arm9.S optional cpu_arm9 | cpu_arm9e arm/arm/cpufunc_asm_arm11.S optional cpu_arm1176 arm/arm/cpufunc_asm_arm11x6.S optional cpu_arm1176 arm/arm/cpufunc_asm_armv4.S optional cpu_arm9 | cpu_arm9e | cpu_fa526 | cpu_xscale_80321 | cpu_xscale_pxa2x0 | cpu_xscale_ixp425 | cpu_xscale_80219 | cpu_xscale_81342 @@ -74,6 +73,8 @@ arm/arm/stdatomic.c standard \ compile-with "${NORMAL_C:N-Wmissing-prototypes}" arm/arm/support.S standard arm/arm/swtch.S standard +arm/arm/swtch-v4.S optional !armv6 +arm/arm/swtch-v6.S optional armv6 arm/arm/sys_machdep.c standard arm/arm/syscall.c standard arm/arm/trap.c optional !armv6 diff --git a/sys/conf/files.riscv b/sys/conf/files.riscv new file mode 100644 index 000000000000..a101b57cfe2f --- /dev/null +++ b/sys/conf/files.riscv @@ -0,0 +1,44 @@ +# $FreeBSD$ +crypto/blowfish/bf_enc.c optional crypto | ipsec +crypto/des/des_enc.c optional crypto | ipsec | netsmb +kern/kern_clocksource.c standard +kern/subr_dummy_vdso_tc.c standard +libkern/bcmp.c standard +libkern/ffs.c standard +libkern/ffsl.c standard +libkern/fls.c standard +libkern/flsl.c standard +libkern/flsll.c standard +libkern/memmove.c standard +libkern/memset.c standard +riscv/htif/htif.c standard +riscv/htif/htif_block.c standard +riscv/htif/htif_console.c standard +riscv/riscv/autoconf.c standard +riscv/riscv/bcopy.c standard +riscv/riscv/bus_machdep.c standard +riscv/riscv/busdma_machdep.c standard +riscv/riscv/clock.c standard +riscv/riscv/copyinout.S standard +riscv/riscv/copystr.c standard +riscv/riscv/cpufunc_asm.S standard +riscv/riscv/devmap.c standard +riscv/riscv/dump_machdep.c standard +riscv/riscv/elf_machdep.c standard +riscv/riscv/intr_machdep.c standard +riscv/riscv/in_cksum.c optional inet | inet6 +riscv/riscv/identcpu.c standard +riscv/riscv/locore.S standard no-obj +riscv/riscv/minidump_machdep.c standard +riscv/riscv/machdep.c standard +riscv/riscv/mem.c standard +riscv/riscv/nexus.c standard +riscv/riscv/pmap.c standard +riscv/riscv/sys_machdep.c standard +riscv/riscv/support.S standard +riscv/riscv/swtch.S standard +riscv/riscv/trap.c standard +riscv/riscv/timer.c standard +riscv/riscv/uio_machdep.c standard +riscv/riscv/uma_machdep.c standard +riscv/riscv/vm_machdep.c standard diff --git a/sys/conf/kern.mk b/sys/conf/kern.mk index 56ddbda4996e..fb72a9789482 100644 --- a/sys/conf/kern.mk +++ b/sys/conf/kern.mk @@ -104,6 +104,10 @@ CFLAGS += -mgeneral-regs-only CFLAGS += -ffixed-x18 .endif +.if ${MACHINE_CPUARCH} == "riscv" +INLINE_LIMIT?= 8000 +.endif + # # For sparc64 we want the medany code model so modules may be located # anywhere in the 64-bit address space. We also tell GCC to use floating diff --git a/sys/conf/kern.post.mk b/sys/conf/kern.post.mk index 56f7c507f1fb..5bcc56eba733 100644 --- a/sys/conf/kern.post.mk +++ b/sys/conf/kern.post.mk @@ -130,6 +130,9 @@ ${FULLKERNEL}: ${SYSTEM_DEP} vers.o @rm -f ${.TARGET} @echo linking ${.TARGET} ${SYSTEM_LD} +.if !empty(MD_ROOT_SIZE_CONFIGURED) && defined(MFS_IMAGE) + @sh ${S}/tools/embed_mfs.sh ${.TARGET} ${MFS_IMAGE} +.endif .if ${MK_CTF} != "no" @echo ${CTFMERGE} ${CTFFLAGS} -o ${.TARGET} ... @${CTFMERGE} ${CTFFLAGS} -o ${.TARGET} ${SYSTEM_OBJS} vers.o @@ -353,6 +356,7 @@ vnode_if_typedef.h: ${AWK} -f $S/tools/vnode_if.awk $S/kern/vnode_if.src -q .if ${MFS_IMAGE:Uno} != "no" +.if empty(MD_ROOT_SIZE_CONFIGURED) # Generate an object file from the file system image to embed in the kernel # via linking. Make sure the contents are in the mfs section and rename the # start/end/size variables to __start_mfs, __stop_mfs, and mfs_size, @@ -372,6 +376,7 @@ embedfs_${MFS_IMAGE:T:R}.o: ${MFS_IMAGE} _binary_${MFS_IMAGE:C,[^[:alnum:]],_,g}_end=mfs_root_end \ ${.TARGET} .endif +.endif # XXX strictly, everything depends on Makefile because changes to ${PROF} # only appear there, but we don't handle that. diff --git a/sys/conf/kern.pre.mk b/sys/conf/kern.pre.mk index 7860701e8440..cf6ec1066f53 100644 --- a/sys/conf/kern.pre.mk +++ b/sys/conf/kern.pre.mk @@ -195,9 +195,13 @@ SYSTEM_DEP= Makefile ${SYSTEM_OBJS} SYSTEM_OBJS= locore.o ${MDOBJS} ${OBJS} SYSTEM_OBJS+= ${SYSTEM_CFILES:.c=.o} SYSTEM_OBJS+= hack.So + +MD_ROOT_SIZE_CONFIGURED!= grep MD_ROOT_SIZE opt_md.h || true ; echo .if ${MFS_IMAGE:Uno} != "no" +.if empty(MD_ROOT_SIZE_CONFIGURED) SYSTEM_OBJS+= embedfs_${MFS_IMAGE:T:R}.o .endif +.endif SYSTEM_LD= @${LD} -Bdynamic -T ${LDSCRIPT} ${_LDFLAGS} --no-warn-mismatch \ --warn-common --export-dynamic --dynamic-linker /red/herring \ -o ${.TARGET} -X ${SYSTEM_OBJS} vers.o @@ -230,8 +234,9 @@ MKMODULESENV+= __MPATH="${__MPATH}" # Architecture and output format arguments for objdump to convert image to # object file -.if ${MFS_IMAGE:Uno} != "no" +.if ${MFS_IMAGE:Uno} != "no" +.if empty(MD_ROOT_SIZE_CONFIGURED) .if !defined(EMBEDFS_FORMAT.${MACHINE_ARCH}) EMBEDFS_FORMAT.${MACHINE_ARCH}!= awk -F'"' '/OUTPUT_FORMAT/ {print $$2}' ${LDSCRIPT} .if empty(EMBEDFS_FORMAT.${MACHINE_ARCH}) @@ -252,6 +257,8 @@ EMBEDFS_FORMAT.mips?= elf32-tradbigmips EMBEDFS_FORMAT.mipsel?= elf32-tradlittlemips EMBEDFS_FORMAT.mips64?= elf64-tradbigmips EMBEDFS_FORMAT.mips64el?= elf64-tradlittlemips +EMBEDFS_FORMAT.riscv?= elf64-littleriscv +.endif .endif # Detect kernel config options that force stack frames to be turned on. diff --git a/sys/conf/ldscript.riscv b/sys/conf/ldscript.riscv new file mode 100644 index 000000000000..31fd5df9ce4e --- /dev/null +++ b/sys/conf/ldscript.riscv @@ -0,0 +1,136 @@ +/* $FreeBSD$ */ +OUTPUT_ARCH(riscv) +ENTRY(_start) + +SEARCH_DIR(/usr/lib); +SECTIONS +{ + /* Read-only sections, merged into text segment: */ + . = kernbase + 0x100; + .text : AT(ADDR(.text) - kernbase) + { + *(.text) + *(.stub) + /* .gnu.warning sections are handled specially by elf32.em. */ + *(.gnu.warning) + *(.gnu.linkonce.t*) + } =0x9090 + _etext = .; + PROVIDE (etext = .); + .fini : { *(.fini) } =0x9090 + .rodata : { *(.rodata) *(.gnu.linkonce.r*) } + .rodata1 : { *(.rodata1) } + .interp : { *(.interp) } + .hash : { *(.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + .rel.text : + { *(.rel.text) *(.rel.gnu.linkonce.t*) } + .rela.text : + { *(.rela.text) *(.rela.gnu.linkonce.t*) } + .rel.data : + { *(.rel.data) *(.rel.gnu.linkonce.d*) } + .rela.data : + { *(.rela.data) *(.rela.gnu.linkonce.d*) } + .rel.rodata : + { *(.rel.rodata) *(.rel.gnu.linkonce.r*) } + .rela.rodata : + { *(.rela.rodata) *(.rela.gnu.linkonce.r*) } + .rel.got : { *(.rel.got) } + .rela.got : { *(.rela.got) } + .rel.ctors : { *(.rel.ctors) } + .rela.ctors : { *(.rela.ctors) } + .rel.dtors : { *(.rel.dtors) } + .rela.dtors : { *(.rela.dtors) } + .rel.init : { *(.rel.init) } + .rela.init : { *(.rela.init) } + .rel.fini : { *(.rel.fini) } + .rela.fini : { *(.rela.fini) } + .rel.bss : { *(.rel.bss) } + .rela.bss : { *(.rela.bss) } + .rel.plt : { *(.rel.plt) } + .rela.plt : { *(.rela.plt) } + .init : { *(.init) } =0x9090 + .plt : { *(.plt) } + + /* Adjust the address for the data segment. We want to adjust up to + the same address within the page on the next page up. */ + . = ALIGN(0x1000) + (. & (0x1000 - 1)) ; + .data : + { + *(.data) + *(.gnu.linkonce.d*) + CONSTRUCTORS + } + .data1 : { *(.data1) } + . = ALIGN(32 / 8); + _start_ctors = .; + PROVIDE (start_ctors = .); + .ctors : + { + *(.ctors) + } + _stop_ctors = .; + PROVIDE (stop_ctors = .); + .dtors : + { + *(.dtors) + } + .got : { *(.got.plt) *(.got) } + .dynamic : { *(.dynamic) } + /* We want the small data sections together, so single-instruction offsets + can access them all, and initialized data all before uninitialized, so + we can shorten the on-disk segment size. */ + . = ALIGN(8); + .sdata : { *(.sdata) } + _edata = .; + PROVIDE (edata = .); + __bss_start = .; + .sbss : { *(.sbss) *(.scommon) } + .bss : + { + *(.dynbss) + *(.bss) + *(COMMON) + } + . = ALIGN(8); + _end = . ; + PROVIDE (end = .); + /* Stabs debugging sections. */ + .stab 0 : { *(.stab) } + .stabstr 0 : { *(.stabstr) } + .stab.excl 0 : { *(.stab.excl) } + .stab.exclstr 0 : { *(.stab.exclstr) } + .stab.index 0 : { *(.stab.index) } + .stab.indexstr 0 : { *(.stab.indexstr) } + .comment 0 : { *(.comment) } + /* DWARF debug sections. + Symbols in the DWARF debugging sections are relative to the beginning + of the section so we begin them at 0. */ + /* DWARF 1 */ + .debug 0 : { *(.debug) } + .line 0 : { *(.line) } + /* GNU DWARF 1 extensions */ + .debug_srcinfo 0 : { *(.debug_srcinfo) } + .debug_sfnames 0 : { *(.debug_sfnames) } + /* DWARF 1.1 and DWARF 2 */ + .debug_aranges 0 : { *(.debug_aranges) } + .debug_pubnames 0 : { *(.debug_pubnames) } + /* DWARF 2 */ + .debug_info 0 : { *(.debug_info) } + .debug_abbrev 0 : { *(.debug_abbrev) } + .debug_line 0 : { *(.debug_line) } + .debug_frame 0 : { *(.debug_frame) } + .debug_str 0 : { *(.debug_str) } + .debug_loc 0 : { *(.debug_loc) } + .debug_macinfo 0 : { *(.debug_macinfo) } + /* SGI/MIPS DWARF 2 extensions */ + .debug_weaknames 0 : { *(.debug_weaknames) } + .debug_funcnames 0 : { *(.debug_funcnames) } + .debug_typenames 0 : { *(.debug_typenames) } + .debug_varnames 0 : { *(.debug_varnames) } + /* These must appear regardless of . */ +} diff --git a/sys/conf/options.mips b/sys/conf/options.mips index 47cff06ff7c2..e85f5b1327e2 100644 --- a/sys/conf/options.mips +++ b/sys/conf/options.mips @@ -29,9 +29,13 @@ # $FreeBSD$ CPU_MIPS4KC opt_global.h -CPU_MIPS24KC opt_global.h -CPU_MIPS74KC opt_global.h -CPU_MIPS1004KC opt_global.h +CPU_MIPS24K opt_global.h +CPU_MIPS34K opt_global.h +CPU_MIPS74K opt_global.h +CPU_MIPS1004K opt_global.h +CPU_MIPS1074K opt_global.h +CPU_INTERAPTIV opt_global.h +CPU_PROAPTIV opt_global.h CPU_MIPS32 opt_global.h CPU_MIPS64 opt_global.h CPU_SENTRY5 opt_global.h diff --git a/sys/conf/options.riscv b/sys/conf/options.riscv new file mode 100644 index 000000000000..c263bd860875 --- /dev/null +++ b/sys/conf/options.riscv @@ -0,0 +1,4 @@ +# $FreeBSD$ + +RISCV opt_global.h +VFP opt_global.h diff --git a/sys/contrib/ipfilter/netinet/ip_compat.h b/sys/contrib/ipfilter/netinet/ip_compat.h index 8aece74b53cc..bcb47e93464d 100644 --- a/sys/contrib/ipfilter/netinet/ip_compat.h +++ b/sys/contrib/ipfilter/netinet/ip_compat.h @@ -147,6 +147,7 @@ struct ether_addr { # include # include +# include # include # define KRWLOCK_FILL_SZ 56 # define KMUTEX_FILL_SZ 56 diff --git a/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c b/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c index 30ec46c11b9d..8a5a90d74073 100644 --- a/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c +++ b/sys/contrib/ipfilter/netinet/ip_fil_freebsd.c @@ -36,6 +36,7 @@ static const char rcsid[] = "@(#)$Id$"; #if defined(__FreeBSD_version) && (__FreeBSD_version >= 800000) #include #endif +# include # include # include #if !defined(__hpux) diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c index 93996e27beb8..e2228e6e3c59 100644 --- a/sys/dev/bwn/if_bwn.c +++ b/sys/dev/bwn/if_bwn.c @@ -36,8 +36,9 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include +#include +#include #include #include #include diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c index c1a9ce8717df..b6c98b160e25 100644 --- a/sys/dev/e1000/if_em.c +++ b/sys/dev/e1000/if_em.c @@ -1371,8 +1371,15 @@ em_init_locked(struct adapter *adapter) if_clearhwassist(ifp); if (if_getcapenable(ifp) & IFCAP_TXCSUM) if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0); - if (if_getcapenable(ifp) & IFCAP_TSO4) - if_sethwassistbits(ifp, CSUM_TSO, 0); + /* + ** There have proven to be problems with TSO when not + ** at full gigabit speed, so disable the assist automatically + ** when at lower speeds. -jfv + */ + if (if_getcapenable(ifp) & IFCAP_TSO4) { + if (adapter->link_speed == SPEED_1000) + if_sethwassistbits(ifp, CSUM_TSO, 0); + } /* Configure for OS presence */ em_init_manageability(adapter); diff --git a/sys/dev/ed/if_ed.c b/sys/dev/ed/if_ed.c index 00e785d91c35..95d80f690643 100644 --- a/sys/dev/ed/if_ed.c +++ b/sys/dev/ed/if_ed.c @@ -43,8 +43,9 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include +#include +#include #include #include #include diff --git a/sys/dev/ep/if_ep.c b/sys/dev/ep/if_ep.c index cd7afb530a46..9f6848a1de58 100644 --- a/sys/dev/ep/if_ep.c +++ b/sys/dev/ep/if_ep.c @@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/fe/if_fe.c b/sys/dev/fe/if_fe.c index 4598b59fcb97..1be74f9d2c8b 100644 --- a/sys/dev/fe/if_fe.c +++ b/sys/dev/fe/if_fe.c @@ -72,6 +72,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include diff --git a/sys/dev/fxp/if_fxp.c b/sys/dev/fxp/if_fxp.c index cf0e6a249ff9..dc9181758a30 100644 --- a/sys/dev/fxp/if_fxp.c +++ b/sys/dev/fxp/if_fxp.c @@ -45,6 +45,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/hwpmc/hwpmc_riscv.h b/sys/dev/hwpmc/hwpmc_riscv.h new file mode 100644 index 000000000000..3f1f5999f6bc --- /dev/null +++ b/sys/dev/hwpmc/hwpmc_riscv.h @@ -0,0 +1,51 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * This software was developed by the University of Cambridge Computer + * Laboratory with support from ARM Ltd. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _DEV_HWPMC_RISCV_H_ +#define _DEV_HWPMC_RISCV_H_ + +#define RISCV_PMC_CAPS (PMC_CAP_INTERRUPT | PMC_CAP_USER | \ + PMC_CAP_SYSTEM | PMC_CAP_EDGE | \ + PMC_CAP_THRESHOLD | PMC_CAP_READ | \ + PMC_CAP_WRITE | PMC_CAP_INVERT | \ + PMC_CAP_QUALIFIER) + +#define RISCV_RELOAD_COUNT_TO_PERFCTR_VALUE(R) (-(R)) +#define RISCV_PERFCTR_VALUE_TO_RELOAD_COUNT(P) (-(P)) +#define EVENT_ID_MASK 0xFF + +#ifdef _KERNEL +/* MD extension for 'struct pmc' */ +struct pmc_md_riscv_pmc { + uint32_t pm_riscv_evsel; +}; +#endif /* _KERNEL */ +#endif /* _DEV_HWPMC_RISCV_H_ */ diff --git a/sys/dev/iicbus/iic.c b/sys/dev/iicbus/iic.c index 84e1314a8d19..c6dd53d610b5 100644 --- a/sys/dev/iicbus/iic.c +++ b/sys/dev/iicbus/iic.c @@ -293,7 +293,8 @@ iicrdwr(struct iic_cdevpriv *priv, struct iic_rdwr_data *d, int flags) struct iic_msg *buf, *m; void **usrbufs; device_t iicdev, parent; - int error, i; + int error; + uint32_t i; iicdev = priv->sc->sc_dev; parent = device_get_parent(iicdev); diff --git a/sys/dev/iscsi_initiator/isc_cam.c b/sys/dev/iscsi_initiator/isc_cam.c index 6089694364c6..e53a0fb66ced 100644 --- a/sys/dev/iscsi_initiator/isc_cam.c +++ b/sys/dev/iscsi_initiator/isc_cam.c @@ -46,6 +46,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/dev/iscsi_initiator/isc_sm.c b/sys/dev/iscsi_initiator/isc_sm.c index 6810047b77ac..097cdcca2153 100644 --- a/sys/dev/iscsi_initiator/isc_sm.c +++ b/sys/dev/iscsi_initiator/isc_sm.c @@ -56,6 +56,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/dev/iscsi_initiator/isc_soc.c b/sys/dev/iscsi_initiator/isc_soc.c index e77f0700c69e..adb9914c00f7 100644 --- a/sys/dev/iscsi_initiator/isc_soc.c +++ b/sys/dev/iscsi_initiator/isc_soc.c @@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/dev/iscsi_initiator/isc_subr.c b/sys/dev/iscsi_initiator/isc_subr.c index 677c5f1674c8..d553cd1c3326 100644 --- a/sys/dev/iscsi_initiator/isc_subr.c +++ b/sys/dev/iscsi_initiator/isc_subr.c @@ -54,6 +54,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/dev/iscsi_initiator/iscsi_subr.c b/sys/dev/iscsi_initiator/iscsi_subr.c index b22ed831207b..cf746cda65f7 100644 --- a/sys/dev/iscsi_initiator/iscsi_subr.c +++ b/sys/dev/iscsi_initiator/iscsi_subr.c @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c index 168fa1310f74..c6a50840d207 100644 --- a/sys/dev/ixgbe/if_ix.c +++ b/sys/dev/ixgbe/if_ix.c @@ -592,6 +592,12 @@ ixgbe_attach(device_t dev) if (error) goto err_late; + /* Enable the optics for 82599 SFP+ fiber */ + ixgbe_enable_tx_laser(hw); + + /* Enable power to the phy. */ + ixgbe_set_phy_power(hw, TRUE); + /* Setup OS specific network interface */ if (ixgbe_setup_interface(dev, adapter) != 0) goto err_late; @@ -1260,6 +1266,9 @@ ixgbe_init_locked(struct adapter *adapter) device_printf(dev, "Error setting up EEE: %d\n", err); } + /* Enable power to the phy. */ + ixgbe_set_phy_power(hw, TRUE); + /* Config/Enable Link */ ixgbe_config_link(adapter); @@ -3980,6 +3989,9 @@ ixgbe_setup_low_power_mode(struct adapter *adapter) mtx_assert(&adapter->core_mtx, MA_OWNED); + if (!hw->wol_enabled) + ixgbe_set_phy_power(hw, FALSE); + /* Limit power management flow to X550EM baseT */ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T && hw->phy.ops.enter_lplu) { diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c index d67e6802b184..feb74f6f8b34 100644 --- a/sys/dev/ixgbe/ixgbe_common.c +++ b/sys/dev/ixgbe/ixgbe_common.c @@ -199,9 +199,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) break; } - ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, + if (!supported) { + ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, "Device %x does not support flow control autoneg", hw->device_id); + } + return supported; } diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c index 6bff17291f1f..f5d22b252e3c 100644 --- a/sys/dev/ixgbe/ixgbe_phy.c +++ b/sys/dev/ixgbe/ixgbe_phy.c @@ -2734,6 +2734,9 @@ s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) u32 status; u16 reg; + if (!on && ixgbe_mng_present(hw)) + return 0; + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h index d76cde26cddf..da03f79e5fc1 100644 --- a/sys/dev/ixgbe/ixgbe_type.h +++ b/sys/dev/ixgbe/ixgbe_type.h @@ -1479,7 +1479,10 @@ struct ixgbe_dmac_config { #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ #define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ #define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ #define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */ +#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ #define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ #define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */ #define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c index 96478eaf6a7b..fd52ebeb8c52 100644 --- a/sys/dev/ixgbe/ixgbe_x540.c +++ b/sys/dev/ixgbe/ixgbe_x540.c @@ -82,8 +82,7 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) /* PHY */ phy->ops.init = ixgbe_init_phy_ops_generic; phy->ops.reset = NULL; - if (!ixgbe_mng_present(hw)) - phy->ops.set_phy_power = ixgbe_set_copper_phy_power; + phy->ops.set_phy_power = ixgbe_set_copper_phy_power; /* MAC */ mac->ops.reset_hw = ixgbe_reset_hw_X540; diff --git a/sys/dev/ixgbe/ixgbe_x550.c b/sys/dev/ixgbe/ixgbe_x550.c index d62035d44f9f..1199d38f9158 100644 --- a/sys/dev/ixgbe/ixgbe_x550.c +++ b/sys/dev/ixgbe/ixgbe_x550.c @@ -965,7 +965,7 @@ void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf) num_qs = 4; /* 32 VFs / pools */ bitmask = 0x0000000F; break; - default: /* 64 VFs / pools */ + default: /* 64 VFs / pools */ num_qs = 2; bitmask = 0x00000003; break; @@ -1264,7 +1264,7 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) IXGBE_MDIO_GLOBAL_ALARM_1_INT))) return status; - /* High temperature failure alarm triggered */ + /* Global alarm triggered */ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ®); @@ -1277,6 +1277,21 @@ static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) /* power down the PHY in case the PHY FW didn't already */ ixgbe_set_copper_phy_power(hw, FALSE); return IXGBE_ERR_OVERTEMP; + } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { + /* device fault alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status != IXGBE_SUCCESS) + return status; + + /* if device fault was due to high temp alarm handle and exit */ + if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { + /* power down the PHY in case the PHY FW didn't */ + ixgbe_set_copper_phy_power(hw, FALSE); + return IXGBE_ERR_OVERTEMP; + } } /* Vendor alarm 2 triggered */ diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c index 505d4b4cbb12..d5ab42fb6025 100644 --- a/sys/dev/ixl/if_ixl.c +++ b/sys/dev/ixl/if_ixl.c @@ -674,9 +674,9 @@ ixl_attach(device_t dev) } /* Limit phy interrupts to link and modules failure */ - error = i40e_aq_set_phy_int_mask(hw, - I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); - if (error) + error = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); + if (error) device_printf(dev, "set phy mask failed: %d\n", error); /* Get the bus configuration and set the shared code */ diff --git a/sys/dev/le/lance.c b/sys/dev/le/lance.c index 2b4202be2cdd..f8a333b9fd5c 100644 --- a/sys/dev/le/lance.c +++ b/sys/dev/le/lance.c @@ -72,6 +72,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c index ff487082e708..6cd0f2969472 100644 --- a/sys/dev/malo/if_malo.c +++ b/sys/dev/malo/if_malo.c @@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c index 2fa9f460edd9..222bc403e67d 100644 --- a/sys/dev/md/md.c +++ b/sys/dev/md/md.c @@ -130,18 +130,12 @@ SYSCTL_INT(_vm, OID_AUTO, md_malloc_wait, CTLFLAG_RW, &md_malloc_wait, 0, */ #if defined(MD_ROOT_SIZE) /* + * We put the mfs_root symbol into the oldmfs section of the kernel object file. * Applications that patch the object with the image can determine - * the size looking at the start and end markers (strings), - * so we want them contiguous. + * the size looking at the oldmfs section size within the kernel. */ -static struct { - u_char start[MD_ROOT_SIZE*1024]; - u_char end[128]; -} mfs_root = { - .start = "MFS Filesystem goes here", - .end = "MFS Filesystem had better STOP here", -}; -const int mfs_root_size = sizeof(mfs_root.start); +u_char mfs_root[MD_ROOT_SIZE*1024] __attribute__ ((section ("oldmfs"))); +const int mfs_root_size = sizeof(mfs_root); #else extern volatile u_char __weak_symbol mfs_root; extern volatile u_char __weak_symbol mfs_root_end; diff --git a/sys/dev/mps/mps_mapping.c b/sys/dev/mps/mps_mapping.c index d0819efc26e9..d96f33cdd919 100644 --- a/sys/dev/mps/mps_mapping.c +++ b/sys/dev/mps/mps_mapping.c @@ -890,7 +890,7 @@ _mapping_get_dev_info(struct mps_softc *sc, u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; - u8 entry, enc_idx, phy_idx, sata_end_device; + u8 entry, enc_idx, phy_idx; u32 map_idx, index, device_info; struct _map_phy_change *phy_change, *tmp_phy_change; uint64_t sas_address; @@ -920,10 +920,8 @@ _mapping_get_dev_info(struct mps_softc *sc, sas_address = sas_device_pg0.SASAddress.High; sas_address = (sas_address << 32) | sas_device_pg0.SASAddress.Low; - sata_end_device = 0; if ((device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) && (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) { - sata_end_device = 1; rc = mpssas_get_sas_address_for_sata_disk(sc, &sas_address, phy_change->dev_handle, device_info, &phy_change->is_SATA_SSD); diff --git a/sys/dev/mps/mps_sas.c b/sys/dev/mps/mps_sas.c index 4fbdbb2d20c0..8cba1d72286c 100644 --- a/sys/dev/mps/mps_sas.c +++ b/sys/dev/mps/mps_sas.c @@ -2797,11 +2797,9 @@ mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr) uint8_t *request, *response; MPI2_SMP_PASSTHROUGH_REQUEST *req; struct mps_softc *sc; - struct sglist *sg; int error; sc = sassc->sc; - sg = NULL; error = 0; /* diff --git a/sys/dev/netmap/netmap_offloadings.c b/sys/dev/netmap/netmap_offloadings.c index 34eafab7c87e..4055c427cb56 100644 --- a/sys/dev/netmap/netmap_offloadings.c +++ b/sys/dev/netmap/netmap_offloadings.c @@ -31,6 +31,7 @@ #include #include #include /* defines used in kernel.h */ +#include /* types used in module initialization */ #include /* types used in module initialization */ #include #include /* struct socket */ diff --git a/sys/dev/nvme/nvme.h b/sys/dev/nvme/nvme.h index 227a89ef0daf..19a63e8d5d7d 100644 --- a/sys/dev/nvme/nvme.h +++ b/sys/dev/nvme/nvme.h @@ -392,6 +392,34 @@ enum nvme_activate_action { NVME_AA_ACTIVATE = 0x2, }; +struct nvme_power_state { + /** Maximum Power */ + uint16_t mp; /* Maximum Power */ + uint8_t ps_rsvd1; + uint8_t mps : 1; /* Max Power Scale */ + uint8_t nops : 1; /* Non-Operational State */ + uint8_t ps_rsvd2 : 6; + uint32_t enlat; /* Entry Latency */ + uint32_t exlat; /* Exit Latency */ + uint8_t rrt : 5; /* Relative Read Throughput */ + uint8_t ps_rsvd3 : 3; + uint8_t rrl : 5; /* Relative Read Latency */ + uint8_t ps_rsvd4 : 3; + uint8_t rwt : 5; /* Relative Write Throughput */ + uint8_t ps_rsvd5 : 3; + uint8_t rwl : 5; /* Relative Write Latency */ + uint8_t ps_rsvd6 : 3; + uint16_t idlp; /* Idle Power */ + uint8_t ps_rsvd7 : 6; + uint8_t ips : 2; /* Idle Power Scale */ + uint8_t ps_rsvd8; + uint16_t actp; /* Active Power */ + uint8_t apw : 3; /* Active Power Workload */ + uint8_t ps_rsvd9 : 3; + uint8_t aps : 2; /* Active Power Scale */ + uint8_t ps_rsvd10[9]; +} __packed; + #define NVME_SERIAL_NUMBER_LENGTH 20 #define NVME_MODEL_NUMBER_LENGTH 40 #define NVME_FIRMWARE_REVISION_LENGTH 8 @@ -532,7 +560,7 @@ struct nvme_controller_data { uint8_t reserved5[1344]; /* bytes 2048-3071: power state descriptors */ - uint8_t reserved6[1024]; + struct nvme_power_state power_state[32]; /* bytes 3072-4095: vendor specific */ uint8_t vs[1024]; diff --git a/sys/dev/oce/oce_if.h b/sys/dev/oce/oce_if.h index bb788413faa0..99707e496783 100644 --- a/sys/dev/oce/oce_if.h +++ b/sys/dev/oce/oce_if.h @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include diff --git a/sys/dev/otus/if_otus.c b/sys/dev/otus/if_otus.c index daa96266353a..03bfa73d1f52 100644 --- a/sys/dev/otus/if_otus.c +++ b/sys/dev/otus/if_otus.c @@ -31,6 +31,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/sfxge/sfxge_rx.c b/sys/dev/sfxge/sfxge_rx.c index 5ee946872867..d4e4fd431c1b 100644 --- a/sys/dev/sfxge/sfxge_rx.c +++ b/sys/dev/sfxge/sfxge_rx.c @@ -34,7 +34,8 @@ #include __FBSDID("$FreeBSD$"); -#include +#include +#include #include #include #include diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c index 9cf5c79a8213..e729360c6b63 100644 --- a/sys/dev/sfxge/sfxge_tx.c +++ b/sys/dev/sfxge/sfxge_tx.c @@ -49,7 +49,8 @@ #include __FBSDID("$FreeBSD$"); -#include +#include +#include #include #include #include diff --git a/sys/dev/sn/if_sn.c b/sys/dev/sn/if_sn.c index 26e60bc0445a..c1fbc9ad8ac7 100644 --- a/sys/dev/sn/if_sn.c +++ b/sys/dev/sn/if_sn.c @@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/tx/if_tx.c b/sys/dev/tx/if_tx.c index 7a12aee288d1..42b327fbdf26 100644 --- a/sys/dev/tx/if_tx.c +++ b/sys/dev/tx/if_tx.c @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/usb/controller/ehcireg.h b/sys/dev/usb/controller/ehcireg.h index 1bfda909cdd0..2394b2c17c9a 100644 --- a/sys/dev/usb/controller/ehcireg.h +++ b/sys/dev/usb/controller/ehcireg.h @@ -167,7 +167,7 @@ * bits are equal */ #define EHCI_USBMODE_NOLPM 0x68 /* RW USB Device mode reg (no LPM) */ -#define EHCI_USBMODE_LPM 0xA8 /* RW USB Device mode reg (LPM) */ +#define EHCI_USBMODE_LPM 0xC8 /* RW USB Device mode reg (LPM) */ #define EHCI_UM_CM 0x00000003 /* R/WO Controller Mode */ #define EHCI_UM_CM_IDLE 0x0 /* Idle */ #define EHCI_UM_CM_HOST 0x3 /* Host Controller */ diff --git a/sys/dev/usb/wlan/if_rsu.c b/sys/dev/usb/wlan/if_rsu.c index 0dcde19add44..5aa460ba82c1 100644 --- a/sys/dev/usb/wlan/if_rsu.c +++ b/sys/dev/usb/wlan/if_rsu.c @@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/wi/if_wi.c b/sys/dev/wi/if_wi.c index 832e6aa37c04..8f9d1a0e7525 100644 --- a/sys/dev/wi/if_wi.c +++ b/sys/dev/wi/if_wi.c @@ -74,6 +74,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/wi/if_wi_pci.c b/sys/dev/wi/if_wi_pci.c index b218c2479a69..a0e872af9c88 100644 --- a/sys/dev/wi/if_wi_pci.c +++ b/sys/dev/wi/if_wi_pci.c @@ -42,6 +42,7 @@ #include #include +#include #include #include #include diff --git a/sys/dev/xe/if_xe.c b/sys/dev/xe/if_xe.c index 4a7aa6e8eb69..4975fd53726b 100644 --- a/sys/dev/xe/if_xe.c +++ b/sys/dev/xe/if_xe.c @@ -96,6 +96,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/dev/xl/if_xl.c b/sys/dev/xl/if_xl.c index f7bd599a04ed..9568967d609b 100644 --- a/sys/dev/xl/if_xl.c +++ b/sys/dev/xl/if_xl.c @@ -106,8 +106,9 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include +#include +#include #include #include #include diff --git a/sys/kern/kern_ktr.c b/sys/kern/kern_ktr.c index 6885706e4c06..1af3b94f55d4 100644 --- a/sys/kern/kern_ktr.c +++ b/sys/kern/kern_ktr.c @@ -419,7 +419,7 @@ DB_SHOW_COMMAND(ktr, db_ktr_all) db_ktr_verbose |= (strchr(modif, 'V') != NULL) ? 1 : 0; /* just timestap please */ if (strchr(modif, 'a') != NULL) { db_disable_pager(); - while (cncheckc() != -1) + while (cncheckc() == -1) if (db_mach_vtrace() == 0) break; } else { diff --git a/sys/kern/subr_mchain.c b/sys/kern/subr_mchain.c index e9d7d22129b4..233a78aac706 100644 --- a/sys/kern/subr_mchain.c +++ b/sys/kern/subr_mchain.c @@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/kern/uipc_debug.c b/sys/kern/uipc_debug.c index caecad9bac84..7c8b93cdbd92 100644 --- a/sys/kern/uipc_debug.c +++ b/sys/kern/uipc_debug.c @@ -461,9 +461,9 @@ db_print_socket(struct socket *so, const char *socketname, int indent) db_print_indent(indent); /* so_list skipped */ - db_printf("so_qlen: %d ", so->so_qlen); - db_printf("so_incqlen: %d ", so->so_incqlen); - db_printf("so_qlimit: %d ", so->so_qlimit); + db_printf("so_qlen: %u ", so->so_qlen); + db_printf("so_incqlen: %u ", so->so_incqlen); + db_printf("so_qlimit: %u ", so->so_qlimit); db_printf("so_timeo: %d ", so->so_timeo); db_printf("so_error: %d\n", so->so_error); diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c index ba77fcaca92a..edf03a3b51fc 100644 --- a/sys/kern/uipc_sockbuf.c +++ b/sys/kern/uipc_sockbuf.c @@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$"); #include /* for aio_swake proto */ #include #include +#include #include #include #include diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c index 350ca3cdd0b0..5d2247fddb1d 100644 --- a/sys/kern/uipc_socket.c +++ b/sys/kern/uipc_socket.c @@ -196,7 +196,7 @@ VNET_DEFINE(struct hhook_head *, socket_hhh[HHOOK_SOCKET_LAST + 1]); * NB: The orginal sysctl somaxconn is still available but hidden * to prevent confusion about the actual purpose of this number. */ -static int somaxconn = SOMAXCONN; +static u_int somaxconn = SOMAXCONN; static int sysctl_somaxconn(SYSCTL_HANDLER_ARGS) @@ -209,7 +209,13 @@ sysctl_somaxconn(SYSCTL_HANDLER_ARGS) if (error || !req->newptr ) return (error); - if (val < 1 || val > USHRT_MAX) + /* + * The purpose of the UINT_MAX / 3 limit, is so that the formula + * 3 * so_qlimit / 2 + * below, will not overflow. + */ + + if (val < 1 || val > UINT_MAX / 3) return (EINVAL); somaxconn = val; diff --git a/sys/kgssapi/krb5/kcrypto_aes.c b/sys/kgssapi/krb5/kcrypto_aes.c index d2dac2158f0a..fca963c5f1c6 100644 --- a/sys/kgssapi/krb5/kcrypto_aes.c +++ b/sys/kgssapi/krb5/kcrypto_aes.c @@ -43,7 +43,8 @@ __FBSDID("$FreeBSD$"); struct aes_state { struct mtx as_lock; - uint64_t as_session; + uint64_t as_session_aes; + uint64_t as_session_sha1; }; static void @@ -61,8 +62,10 @@ aes_destroy(struct krb5_key_state *ks) { struct aes_state *as = ks->ks_priv; - if (as->as_session) - crypto_freesession(as->as_session); + if (as->as_session_aes != 0) + crypto_freesession(as->as_session_aes); + if (as->as_session_sha1 != 0) + crypto_freesession(as->as_session_sha1); mtx_destroy(&as->as_lock); free(ks->ks_priv, M_GSSAPI); } @@ -72,32 +75,35 @@ aes_set_key(struct krb5_key_state *ks, const void *in) { void *kp = ks->ks_key; struct aes_state *as = ks->ks_priv; - struct cryptoini cri[2]; + struct cryptoini cri; if (kp != in) bcopy(in, kp, ks->ks_class->ec_keylen); - if (as->as_session) - crypto_freesession(as->as_session); - - bzero(cri, sizeof(cri)); + if (as->as_session_aes != 0) + crypto_freesession(as->as_session_aes); + if (as->as_session_sha1 != 0) + crypto_freesession(as->as_session_sha1); /* * We only want the first 96 bits of the HMAC. */ - cri[0].cri_alg = CRYPTO_SHA1_HMAC; - cri[0].cri_klen = ks->ks_class->ec_keybits; - cri[0].cri_mlen = 12; - cri[0].cri_key = ks->ks_key; - cri[0].cri_next = &cri[1]; + bzero(&cri, sizeof(cri)); + cri.cri_alg = CRYPTO_SHA1_HMAC; + cri.cri_klen = ks->ks_class->ec_keybits; + cri.cri_mlen = 12; + cri.cri_key = ks->ks_key; + cri.cri_next = NULL; + crypto_newsession(&as->as_session_sha1, &cri, + CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); - cri[1].cri_alg = CRYPTO_AES_CBC; - cri[1].cri_klen = ks->ks_class->ec_keybits; - cri[1].cri_mlen = 0; - cri[1].cri_key = ks->ks_key; - cri[1].cri_next = NULL; - - crypto_newsession(&as->as_session, cri, + bzero(&cri, sizeof(cri)); + cri.cri_alg = CRYPTO_AES_CBC; + cri.cri_klen = ks->ks_class->ec_keybits; + cri.cri_mlen = 0; + cri.cri_key = ks->ks_key; + cri.cri_next = NULL; + crypto_newsession(&as->as_session_aes, &cri, CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE); } @@ -114,7 +120,7 @@ aes_crypto_cb(struct cryptop *crp) int error; struct aes_state *as = (struct aes_state *) crp->crp_opaque; - if (CRYPTO_SESID2CAPS(as->as_session) & CRYPTOCAP_F_SYNC) + if (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC) return (0); error = crp->crp_etype; @@ -151,7 +157,7 @@ aes_encrypt_1(const struct krb5_key_state *ks, int buftype, void *buf, crd->crd_next = NULL; crd->crd_alg = CRYPTO_AES_CBC; - crp->crp_sid = as->as_session; + crp->crp_sid = as->as_session_aes; crp->crp_flags = buftype | CRYPTO_F_CBIFSYNC; crp->crp_buf = buf; crp->crp_opaque = (void *) as; @@ -159,7 +165,7 @@ aes_encrypt_1(const struct krb5_key_state *ks, int buftype, void *buf, error = crypto_dispatch(crp); - if ((CRYPTO_SESID2CAPS(as->as_session) & CRYPTOCAP_F_SYNC) == 0) { + if ((CRYPTO_SESID2CAPS(as->as_session_aes) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&as->as_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &as->as_lock, 0, "gssaes", 0); @@ -326,7 +332,7 @@ aes_checksum(const struct krb5_key_state *ks, int usage, crd->crd_next = NULL; crd->crd_alg = CRYPTO_SHA1_HMAC; - crp->crp_sid = as->as_session; + crp->crp_sid = as->as_session_sha1; crp->crp_ilen = inlen; crp->crp_olen = 12; crp->crp_etype = 0; @@ -337,7 +343,7 @@ aes_checksum(const struct krb5_key_state *ks, int usage, error = crypto_dispatch(crp); - if ((CRYPTO_SESID2CAPS(as->as_session) & CRYPTOCAP_F_SYNC) == 0) { + if ((CRYPTO_SESID2CAPS(as->as_session_sha1) & CRYPTOCAP_F_SYNC) == 0) { mtx_lock(&as->as_lock); if (!error && !(crp->crp_flags & CRYPTO_F_DONE)) error = msleep(crp, &as->as_lock, 0, "gssaes", 0); diff --git a/sys/mips/conf/AR933X_BASE b/sys/mips/conf/AR933X_BASE index 38f847db12eb..c5619fe4ff4a 100644 --- a/sys/mips/conf/AR933X_BASE +++ b/sys/mips/conf/AR933X_BASE @@ -12,7 +12,7 @@ machine mips mips ident AR933X_BASE -cpu CPU_MIPS4KC +cpu CPU_MIPS24K makeoptions KERNLOADADDR=0x80050000 options HZ=1000 diff --git a/sys/mips/conf/AR934X_BASE b/sys/mips/conf/AR934X_BASE index 4faaf9e6be61..87bea17beac8 100644 --- a/sys/mips/conf/AR934X_BASE +++ b/sys/mips/conf/AR934X_BASE @@ -12,7 +12,7 @@ machine mips mips ident AR934X_BASE -cpu CPU_MIPS74KC +cpu CPU_MIPS74K makeoptions KERNLOADADDR=0x80050000 options HZ=1000 diff --git a/sys/mips/conf/QCA955X_BASE b/sys/mips/conf/QCA955X_BASE index 45dcbb7aa837..a7b5df73dc05 100644 --- a/sys/mips/conf/QCA955X_BASE +++ b/sys/mips/conf/QCA955X_BASE @@ -13,7 +13,7 @@ machine mips mips ident QCA955X_BASE -cpu CPU_MIPS74KC +cpu CPU_MIPS74K makeoptions KERNLOADADDR=0x80050000 options HZ=1000 diff --git a/sys/mips/include/asm.h b/sys/mips/include/asm.h index 9e7e7719076a..8a7d640f9e12 100644 --- a/sys/mips/include/asm.h +++ b/sys/mips/include/asm.h @@ -700,7 +700,7 @@ _C_LABEL(x): #elif defined(CPU_RMI) #define HAZARD_DELAY #define ITLBNOPFIX -#elif defined(CPU_MIPS74KC) +#elif defined(CPU_MIPS74K) #define HAZARD_DELAY sll $0,$0,3 #define ITLBNOPFIX sll $0,$0,3 #else diff --git a/sys/mips/include/cpufunc.h b/sys/mips/include/cpufunc.h index 6ffb0ba1c099..3ebb8c14b448 100644 --- a/sys/mips/include/cpufunc.h +++ b/sys/mips/include/cpufunc.h @@ -248,7 +248,7 @@ MIPS_RW32_COP0_SEL(config5, MIPS_COP_0_CONFIG, 5); #if defined(CPU_NLM) || defined(BERI_LARGE_TLB) MIPS_RW32_COP0_SEL(config6, MIPS_COP_0_CONFIG, 6); #endif -#if defined(CPU_NLM) || defined(CPU_MIPS1004KC) +#if defined(CPU_NLM) || defined(CPU_MIPS1004K) MIPS_RW32_COP0_SEL(config7, MIPS_COP_0_CONFIG, 7); #endif MIPS_RW32_COP0(count, MIPS_COP_0_COUNT); diff --git a/sys/mips/include/cpuregs.h b/sys/mips/include/cpuregs.h index 976321ab318b..a1d9bc0195da 100644 --- a/sys/mips/include/cpuregs.h +++ b/sys/mips/include/cpuregs.h @@ -110,6 +110,7 @@ * C: Cacheable, coherency unspecified. * CNC: Cacheable non-coherent. * CC: Cacheable coherent. + * CCS: Cacheable coherent, shared read. * CCE: Cacheable coherent, exclusive read. * CCEW: Cacheable coherent, exclusive write. * CCUOW: Cacheable coherent, update on write. @@ -149,14 +150,25 @@ #define MIPS_CCA_CC 0x05 /* Cacheable Coherent. */ #endif -#if defined(CPU_MIPS74KC) +#if defined(CPU_MIPS74K) #define MIPS_CCA_UNCACHED 0x02 #define MIPS_CCA_CACHED 0x03 #endif -#if defined(CPU_MIPS1004KC) -#define MIPS_CCA_UNCACHED 0x02 -#define MIPS_CCA_CACHED 0x05 +/* + * 1004K and 1074K cores, as well as interAptiv and proAptiv cores, support + * Cacheable Coherent CCAs 0x04 and 0x05, as well as Cacheable non-Coherent + * CCA 0x03 and Uncached Accelerated CCA 0x07 + */ +#if defined(CPU_MIPS1004K) || defined(CPU_MIPS1074K) || \ + defined(CPU_INTERAPTIV) || defined(CPU_PROAPTIV) +#define MIPS_CCA_CNC 0x03 +#define MIPS_CCA_CCE 0x04 +#define MIPS_CCA_CCS 0x05 +#define MIPS_CCA_UA 0x07 + +/* We use shared read CCA for CACHED CCA */ +#define MIPS_CCA_CACHED MIPS_CCA_CCS #endif #ifndef MIPS_CCA_UNCACHED @@ -214,8 +226,18 @@ #define COP0_SYNC .word 0xc0 /* ehb */ #elif defined(CPU_SB1) #define COP0_SYNC ssnop; ssnop; ssnop; ssnop; ssnop; ssnop; ssnop; ssnop; ssnop -#elif defined(CPU_MIPS74KC) || defined(CPU_MIPS1004KC) -#define COP0_SYNC .word 0xc0 /* ehb */ +#elif defined(CPU_MIPS24K) || defined(CPU_MIPS34K) || \ + defined(CPU_MIPS74K) || defined(CPU_MIPS1004K) || \ + defined(CPU_MIPS1074K) || defined(CPU_INTERAPTIV) || \ + defined(CPU_PROAPTIV) +/* + * According to MIPS32tm Architecture for Programmers, Vol.II, rev. 2.00: + * "As EHB becomes standard in MIPS implementations, the previous SSNOPs can be + * removed, leaving only the EHB". + * Also, all MIPS32 Release 2 implementations have the EHB instruction, which + * resolves all execution hazards. The same goes for MIPS32 Release 3. + */ +#define COP0_SYNC .word 0xc0 /* ehb */ #else /* * Pick a reasonable default based on the "typical" spacing described in the diff --git a/sys/mips/rt305x/std.rt305x b/sys/mips/rt305x/std.rt305x index c7212a2c6d3e..d0e09b3b6b07 100644 --- a/sys/mips/rt305x/std.rt305x +++ b/sys/mips/rt305x/std.rt305x @@ -3,5 +3,5 @@ files "../rt305x/files.rt305x" -cpu CPU_MIPS4KC +cpu CPU_MIPS24K diff --git a/sys/net/bridgestp.c b/sys/net/bridgestp.c index bd5ef9f906e6..d6e118cb5891 100644 --- a/sys/net/bridgestp.c +++ b/sys/net/bridgestp.c @@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/net/if_epair.c b/sys/net/if_epair.c index fd7a757c9f86..6c14037f3d02 100644 --- a/sys/net/if_epair.c +++ b/sys/net/if_epair.c @@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include diff --git a/sys/net/if_mib.c b/sys/net/if_mib.c index a9840caa68c2..590664268667 100644 --- a/sys/net/if_mib.c +++ b/sys/net/if_mib.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include diff --git a/sys/net/netisr.c b/sys/net/netisr.c index 4b3576ded2ae..64efbd772b6e 100644 --- a/sys/net/netisr.c +++ b/sys/net/netisr.c @@ -70,6 +70,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h index 2a7182e78b9f..ed23eb5b20a7 100644 --- a/sys/net/pfvar.h +++ b/sys/net/pfvar.h @@ -36,8 +36,10 @@ #include #include #include +#include #include #include +#include #include #include diff --git a/sys/net80211/ieee80211.c b/sys/net80211/ieee80211.c index ca27cb13f249..a1c70401748d 100644 --- a/sys/net80211/ieee80211.c +++ b/sys/net80211/ieee80211.c @@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/net80211/ieee80211_acl.c b/sys/net80211/ieee80211_acl.c index eeb7f0bc68da..58679b0b1765 100644 --- a/sys/net80211/ieee80211_acl.c +++ b/sys/net80211/ieee80211_acl.c @@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/net80211/ieee80211_action.c b/sys/net80211/ieee80211_action.c index e37863ee12fb..9c7598932970 100644 --- a/sys/net80211/ieee80211_action.c +++ b/sys/net80211/ieee80211_action.c @@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include diff --git a/sys/net80211/ieee80211_ageq.c b/sys/net80211/ieee80211_ageq.c index b650136c05be..d282fccf9bb4 100644 --- a/sys/net80211/ieee80211_ageq.c +++ b/sys/net80211/ieee80211_ageq.c @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include diff --git a/sys/net80211/ieee80211_amrr.c b/sys/net80211/ieee80211_amrr.c index b7c11fd2417b..f50334e9af1b 100644 --- a/sys/net80211/ieee80211_amrr.c +++ b/sys/net80211/ieee80211_amrr.c @@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include diff --git a/sys/net80211/ieee80211_crypto_none.c b/sys/net80211/ieee80211_crypto_none.c index 30f2fc38882a..fef4c0946b20 100644 --- a/sys/net80211/ieee80211_crypto_none.c +++ b/sys/net80211/ieee80211_crypto_none.c @@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include diff --git a/sys/net80211/ieee80211_ddb.c b/sys/net80211/ieee80211_ddb.c index ae4910c6ee0d..de7a243098de 100644 --- a/sys/net80211/ieee80211_ddb.c +++ b/sys/net80211/ieee80211_ddb.c @@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/net80211/ieee80211_freebsd.c b/sys/net80211/ieee80211_freebsd.c index 0ccf378fff6d..57cbbf57056e 100644 --- a/sys/net80211/ieee80211_freebsd.c +++ b/sys/net80211/ieee80211_freebsd.c @@ -32,10 +32,11 @@ __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include -#include #include #include +#include #include +#include #include #include #include diff --git a/sys/net80211/ieee80211_ht.c b/sys/net80211/ieee80211_ht.c index e6fe16e58fa5..1a217346f811 100644 --- a/sys/net80211/ieee80211_ht.c +++ b/sys/net80211/ieee80211_ht.c @@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include diff --git a/sys/net80211/ieee80211_ioctl.c b/sys/net80211/ieee80211_ioctl.c index e78b86221121..bdaac4810df0 100644 --- a/sys/net80211/ieee80211_ioctl.c +++ b/sys/net80211/ieee80211_ioctl.c @@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/net80211/ieee80211_output.c b/sys/net80211/ieee80211_output.c index 322491d25a2c..42046807abaf 100644 --- a/sys/net80211/ieee80211_output.c +++ b/sys/net80211/ieee80211_output.c @@ -33,8 +33,9 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include +#include +#include #include #include diff --git a/sys/net80211/ieee80211_power.c b/sys/net80211/ieee80211_power.c index 73c1382b1218..587e48f07d3f 100644 --- a/sys/net80211/ieee80211_power.c +++ b/sys/net80211/ieee80211_power.c @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include diff --git a/sys/net80211/ieee80211_proto.c b/sys/net80211/ieee80211_proto.c index 408bf935b92c..1b8b5249946a 100644 --- a/sys/net80211/ieee80211_proto.c +++ b/sys/net80211/ieee80211_proto.c @@ -35,8 +35,9 @@ __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include -#include #include +#include +#include #include #include diff --git a/sys/net80211/ieee80211_scan.c b/sys/net80211/ieee80211_scan.c index f28a9829d60d..8bc5d2eed0d1 100644 --- a/sys/net80211/ieee80211_scan.c +++ b/sys/net80211/ieee80211_scan.c @@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/net80211/ieee80211_scan_sta.c b/sys/net80211/ieee80211_scan_sta.c index ea922d7e82a3..accf6c8cd299 100644 --- a/sys/net80211/ieee80211_scan_sta.c +++ b/sys/net80211/ieee80211_scan_sta.c @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/net80211/ieee80211_scan_sw.c b/sys/net80211/ieee80211_scan_sw.c index 53e45203f10c..52f6ea7210b8 100644 --- a/sys/net80211/ieee80211_scan_sw.c +++ b/sys/net80211/ieee80211_scan_sw.c @@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/net80211/ieee80211_xauth.c b/sys/net80211/ieee80211_xauth.c index 2341ffb162e9..1e57e16af646 100644 --- a/sys/net80211/ieee80211_xauth.c +++ b/sys/net80211/ieee80211_xauth.c @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/netgraph/netflow/netflow.c b/sys/netgraph/netflow/netflow.c index 6adffc9e6072..84dee4767240 100644 --- a/sys/netgraph/netflow/netflow.c +++ b/sys/netgraph/netflow/netflow.c @@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include diff --git a/sys/netgraph/netflow/netflow_v9.c b/sys/netgraph/netflow/netflow_v9.c index 2fc700d5d4f7..5124bbd48075 100644 --- a/sys/netgraph/netflow/netflow_v9.c +++ b/sys/netgraph/netflow/netflow_v9.c @@ -37,9 +37,11 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include +#include #include #include diff --git a/sys/netgraph/netflow/ng_netflow.c b/sys/netgraph/netflow/ng_netflow.c index b524ca51fcbd..4f9f953c6f91 100644 --- a/sys/netgraph/netflow/ng_netflow.c +++ b/sys/netgraph/netflow/ng_netflow.c @@ -40,10 +40,12 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include #include +#include #include #include diff --git a/sys/netgraph/ng_base.c b/sys/netgraph/ng_base.c index 673dc25045f6..0f48e12a8759 100644 --- a/sys/netgraph/ng_base.c +++ b/sys/netgraph/ng_base.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include diff --git a/sys/netinet/in_proto.c b/sys/netinet/in_proto.c index 0ac91c72bb88..e7569c45f740 100644 --- a/sys/netinet/in_proto.c +++ b/sys/netinet/in_proto.c @@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/netinet/sctp_input.c b/sys/netinet/sctp_input.c index 9edfcf67808a..a6f625d13a11 100644 --- a/sys/netinet/sctp_input.c +++ b/sys/netinet/sctp_input.c @@ -85,7 +85,7 @@ static void sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_init_chunk *cp, struct sctp_inpcb *inp, - struct sctp_tcb *stcb, int *abort_no_unlock, + struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_no_unlock, uint8_t mflowtype, uint32_t mflowid, uint32_t vrf_id, uint16_t port) { @@ -198,8 +198,8 @@ sctp_handle_init(struct mbuf *m, int iphlen, int offset, sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); } else { SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); - sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, src, dst, - sh, cp, + sctp_send_initiate_ack(inp, stcb, net, m, iphlen, offset, + src, dst, sh, cp, mflowtype, mflowid, vrf_id, port, ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); @@ -4840,7 +4840,7 @@ __attribute__((noinline)) } sctp_handle_init(m, iphlen, *offset, src, dst, sh, (struct sctp_init_chunk *)ch, inp, - stcb, &abort_no_unlock, + stcb, *netp, &abort_no_unlock, mflowtype, mflowid, vrf_id, port); *offset = length; @@ -5684,9 +5684,18 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt stcb = sctp_findassociation_addr(m, offset, src, dst, sh, ch, &inp, &net, vrf_id); #if defined(INET) || defined(INET6) - if ((net != NULL) && (port != 0)) { + if ((ch->chunk_type != SCTP_INITIATION) && + (net != NULL) && (net->port != port)) { if (net->port == 0) { - sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr)); + /* UDP encapsulation turned on. */ + net->mtu -= sizeof(struct udphdr); + if (stcb->asoc.smallest_mtu > net->mtu) { + sctp_pathmtu_adjustment(stcb, net->mtu); + } + } else if (port == 0) { + /* UDP encapsulation turned off. */ + net->mtu += sizeof(struct udphdr); + /* XXX Update smallest_mtu */ } net->port = port; } @@ -5715,9 +5724,18 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt stcb = sctp_findassociation_addr(m, offset, src, dst, sh, ch, &inp, &net, vrf_id); #if defined(INET) || defined(INET6) - if ((net != NULL) && (port != 0)) { + if ((ch->chunk_type != SCTP_INITIATION) && + (net != NULL) && (net->port != port)) { if (net->port == 0) { - sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr)); + /* UDP encapsulation turned on. */ + net->mtu -= sizeof(struct udphdr); + if (stcb->asoc.smallest_mtu > net->mtu) { + sctp_pathmtu_adjustment(stcb, net->mtu); + } + } else if (port == 0) { + /* UDP encapsulation turned off. */ + net->mtu += sizeof(struct udphdr); + /* XXX Update smallest_mtu */ } net->port = port; } @@ -5827,9 +5845,18 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int lengt */ inp = stcb->sctp_ep; #if defined(INET) || defined(INET6) - if ((net != NULL) && (port != 0)) { + if ((ch->chunk_type != SCTP_INITIATION) && + (net != NULL) && (net->port != port)) { if (net->port == 0) { - sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr)); + /* UDP encapsulation turned on. */ + net->mtu -= sizeof(struct udphdr); + if (stcb->asoc.smallest_mtu > net->mtu) { + sctp_pathmtu_adjustment(stcb, net->mtu); + } + } else if (port == 0) { + /* UDP encapsulation turned off. */ + net->mtu += sizeof(struct udphdr); + /* XXX Update smallest_mtu */ } net->port = port; } diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c index 95ac97c903dd..017261000189 100644 --- a/sys/netinet/sctp_output.c +++ b/sys/netinet/sctp_output.c @@ -5307,6 +5307,7 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, uint16_t ptype, plen; uint8_t fnd; struct sctp_nets *net; + int check_src; #ifdef INET struct sockaddr_in sin4, *sa4; @@ -5328,39 +5329,61 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, sin6.sin6_len = sizeof(sin6); #endif /* First what about the src address of the pkt ? */ - fnd = 0; - TAILQ_FOREACH(net, &asoc->nets, sctp_next) { - sa = (struct sockaddr *)&net->ro._l_addr; - if (sa->sa_family == src->sa_family) { + check_src = 0; + switch (src->sa_family) { #ifdef INET - if (sa->sa_family == AF_INET) { - struct sockaddr_in *src4; - - sa4 = (struct sockaddr_in *)sa; - src4 = (struct sockaddr_in *)src; - if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) { - fnd = 1; - break; - } - } + case AF_INET: + if (asoc->scope.ipv4_addr_legal) { + check_src = 1; + } + break; #endif #ifdef INET6 - if (sa->sa_family == AF_INET6) { - struct sockaddr_in6 *src6; - - sa6 = (struct sockaddr_in6 *)sa; - src6 = (struct sockaddr_in6 *)src; - if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) { - fnd = 1; - break; - } - } -#endif + case AF_INET6: + if (asoc->scope.ipv6_addr_legal) { + check_src = 1; } + break; +#endif + default: + /* TSNH */ + break; } - if (fnd == 0) { - /* New address added! no need to look futher. */ - return (1); + if (check_src) { + fnd = 0; + TAILQ_FOREACH(net, &asoc->nets, sctp_next) { + sa = (struct sockaddr *)&net->ro._l_addr; + if (sa->sa_family == src->sa_family) { +#ifdef INET + if (sa->sa_family == AF_INET) { + struct sockaddr_in *src4; + + sa4 = (struct sockaddr_in *)sa; + src4 = (struct sockaddr_in *)src; + if (sa4->sin_addr.s_addr == src4->sin_addr.s_addr) { + fnd = 1; + break; + } + } +#endif +#ifdef INET6 + if (sa->sa_family == AF_INET6) { + struct sockaddr_in6 *src6; + + sa6 = (struct sockaddr_in6 *)sa; + src6 = (struct sockaddr_in6 *)src; + if (SCTP6_ARE_ADDR_EQUAL(sa6, src6)) { + fnd = 1; + break; + } + } +#endif + } + } + if (fnd == 0) { + /* New address added! no need to look futher. */ + return (1); + } } /* Ok so far lets munge through the rest of the packet */ offset += sizeof(struct sctp_init_chunk); @@ -5381,9 +5404,11 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, phdr == NULL) { return (1); } - p4 = (struct sctp_ipv4addr_param *)phdr; - sin4.sin_addr.s_addr = p4->addr; - sa_touse = (struct sockaddr *)&sin4; + if (asoc->scope.ipv4_addr_legal) { + p4 = (struct sctp_ipv4addr_param *)phdr; + sin4.sin_addr.s_addr = p4->addr; + sa_touse = (struct sockaddr *)&sin4; + } break; } #endif @@ -5398,10 +5423,12 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, phdr == NULL) { return (1); } - p6 = (struct sctp_ipv6addr_param *)phdr; - memcpy((caddr_t)&sin6.sin6_addr, p6->addr, - sizeof(p6->addr)); - sa_touse = (struct sockaddr *)&sin6; + if (asoc->scope.ipv6_addr_legal) { + p6 = (struct sctp_ipv6addr_param *)phdr; + memcpy((caddr_t)&sin6.sin6_addr, p6->addr, + sizeof(p6->addr)); + sa_touse = (struct sockaddr *)&sin6; + } break; } #endif @@ -5457,7 +5484,8 @@ sctp_are_there_new_addresses(struct sctp_association *asoc, */ void sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, - struct mbuf *init_pkt, int iphlen, int offset, + struct sctp_nets *src_net, struct mbuf *init_pkt, + int iphlen, int offset, struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, struct sctp_init_chunk *init_chk, uint8_t mflowtype, uint32_t mflowid, @@ -5501,20 +5529,39 @@ sctp_send_initiate_ack(struct sctp_inpcb *inp, struct sctp_tcb *stcb, asoc = NULL; } if ((asoc != NULL) && - (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT) && - (sctp_are_there_new_addresses(asoc, init_pkt, offset, src))) { - /* new addresses, out of here in non-cookie-wait states */ - /* - * Send a ABORT, we don't add the new address error clause - * though we even set the T bit and copy in the 0 tag.. this - * looks no different than if no listener was present. - */ - op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), - "Address added"); - sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, - mflowtype, mflowid, inp->fibnum, - vrf_id, port); - return; + (SCTP_GET_STATE(asoc) != SCTP_STATE_COOKIE_WAIT)) { + if (sctp_are_there_new_addresses(asoc, init_pkt, offset, src)) { + /* + * new addresses, out of here in non-cookie-wait + * states + * + * Send an ABORT, without the new address error cause. + * This looks no different than if no listener was + * present. + */ + op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), + "Address added"); + sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, + mflowtype, mflowid, inp->fibnum, + vrf_id, port); + return; + } + if (src_net != NULL && (src_net->port != port)) { + /* + * change of remote encapsulation port, out of here + * in non-cookie-wait states + * + * Send an ABORT, without an specific error cause. This + * looks no different than if no listener was + * present. + */ + op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), + "Remote encapsulation port changed"); + sctp_send_abort(init_pkt, iphlen, src, dst, sh, 0, op_err, + mflowtype, mflowid, inp->fibnum, + vrf_id, port); + return; + } } abort_flag = 0; op_err = sctp_arethere_unrecognized_parameters(init_pkt, diff --git a/sys/netinet/sctp_output.h b/sys/netinet/sctp_output.h index d7222c44bf51..b2441a6fe0b6 100644 --- a/sys/netinet/sctp_output.h +++ b/sys/netinet/sctp_output.h @@ -80,7 +80,8 @@ sctp_send_initiate(struct sctp_inpcb *, struct sctp_tcb *, int ); void -sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *, struct mbuf *, +sctp_send_initiate_ack(struct sctp_inpcb *, struct sctp_tcb *, + struct sctp_nets *, struct mbuf *, int, int, struct sockaddr *, struct sockaddr *, struct sctphdr *, struct sctp_init_chunk *, diff --git a/sys/netinet/sctp_sysctl.c b/sys/netinet/sctp_sysctl.c index b72e896cd223..e19793679984 100644 --- a/sys/netinet/sctp_sysctl.c +++ b/sys/netinet/sctp_sysctl.c @@ -426,7 +426,11 @@ sctp_sysctl_handle_assoclist(SYSCTL_HANDLER_ARGS) xinpcb.maxqlen = 0; } else { xinpcb.qlen = so->so_qlen; + xinpcb.qlen_old = so->so_qlen > USHRT_MAX ? + USHRT_MAX : (uint16_t) so->so_qlen; xinpcb.maxqlen = so->so_qlimit; + xinpcb.maxqlen_old = so->so_qlimit > USHRT_MAX ? + USHRT_MAX : (uint16_t) so->so_qlimit; } SCTP_INP_INCR_REF(inp); SCTP_INP_RUNLOCK(inp); diff --git a/sys/netinet/sctp_uio.h b/sys/netinet/sctp_uio.h index cfc79788adda..2299f66fa51d 100644 --- a/sys/netinet/sctp_uio.h +++ b/sys/netinet/sctp_uio.h @@ -1170,13 +1170,15 @@ struct xsctp_inpcb { uint32_t total_nospaces; uint32_t fragmentation_point; uint16_t local_port; - uint16_t qlen; - uint16_t maxqlen; + uint16_t qlen_old; + uint16_t maxqlen_old; void *socket; + uint32_t qlen; + uint32_t maxqlen; #if defined(__LP64__) - uint32_t extra_padding[29]; /* future */ + uint32_t extra_padding[27]; /* future */ #else - uint32_t extra_padding[30]; /* future */ + uint32_t extra_padding[28]; /* future */ #endif }; diff --git a/sys/netinet/sctp_var.h b/sys/netinet/sctp_var.h index ae1b3eb3089d..7213b97684bd 100644 --- a/sys/netinet/sctp_var.h +++ b/sys/netinet/sctp_var.h @@ -86,7 +86,7 @@ extern struct pr_usrreqs sctp_usrreqs; #define sctp_sbspace_failedmsgs(sb) ((long) ((sctp_maxspace(sb) > (sb)->sb_cc) ? (sctp_maxspace(sb) - (sb)->sb_cc) : 0)) -#define sctp_sbspace_sub(a,b) ((a > b) ? (a - b) : 0) +#define sctp_sbspace_sub(a,b) (((a) > (b)) ? ((a) - (b)) : 0) /* * I tried to cache the readq entries at one point. But the reality diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c index 566334348810..d49071c50d28 100644 --- a/sys/netinet/tcp_lro.c +++ b/sys/netinet/tcp_lro.c @@ -38,8 +38,9 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include +#include +#include #include #include diff --git a/sys/netinet/toecore.c b/sys/netinet/toecore.c index cfa77e76a128..4bca16c09120 100644 --- a/sys/netinet/toecore.c +++ b/sys/netinet/toecore.c @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/netinet6/in6_proto.c b/sys/netinet6/in6_proto.c index 4d328d282ec3..cad09316cf94 100644 --- a/sys/netinet6/in6_proto.c +++ b/sys/netinet6/in6_proto.c @@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/netinet6/send.c b/sys/netinet6/send.c index b946ded155d1..66b36db7827b 100644 --- a/sys/netinet6/send.c +++ b/sys/netinet6/send.c @@ -29,6 +29,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include diff --git a/sys/netipsec/ipsec_mbuf.c b/sys/netipsec/ipsec_mbuf.c index 8e68ffb41c6f..b3df0e01cc50 100644 --- a/sys/netipsec/ipsec_mbuf.c +++ b/sys/netipsec/ipsec_mbuf.c @@ -34,6 +34,7 @@ #include #include +#include #include #include diff --git a/sys/netipsec/key_debug.c b/sys/netipsec/key_debug.c index b5bdb0ed42ca..4aad7433ee7a 100644 --- a/sys/netipsec/key_debug.c +++ b/sys/netipsec/key_debug.c @@ -36,10 +36,10 @@ #include "opt_ipsec.h" #endif -#include #include #ifdef _KERNEL #include +#include #include #include #endif diff --git a/sys/netpfil/ipfw/ip_fw_log.c b/sys/netpfil/ipfw/ip_fw_log.c index 29374f949015..236c4f1f19d7 100644 --- a/sys/netpfil/ipfw/ip_fw_log.c +++ b/sys/netpfil/ipfw/ip_fw_log.c @@ -39,8 +39,9 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include +#include +#include #include #include #include diff --git a/sys/powerpc/fpu/fpu_emu.c b/sys/powerpc/fpu/fpu_emu.c index 82074aab868f..9056dca8aa23 100644 --- a/sys/powerpc/fpu/fpu_emu.c +++ b/sys/powerpc/fpu/fpu_emu.c @@ -183,7 +183,7 @@ fpu_dumpfpn(struct fpn *fp) * (Typically: zero, SIGFPE, SIGILL, SIGSEGV) */ int -fpu_emulate(struct trapframe *frame, struct fpreg *fpf) +fpu_emulate(struct trapframe *frame, struct fpu *fpf) { static union instr insn; static struct fpemu fe; diff --git a/sys/powerpc/fpu/fpu_emu.h b/sys/powerpc/fpu/fpu_emu.h index 89ccc3611528..30e617e474ec 100644 --- a/sys/powerpc/fpu/fpu_emu.h +++ b/sys/powerpc/fpu/fpu_emu.h @@ -138,7 +138,7 @@ struct fpn { * Emulator state. */ struct fpemu { - struct fpreg *fe_fpstate; /* registers, etc */ + struct fpu *fe_fpstate; /* registers, etc */ int fe_fpscr; /* fpscr copy (modified during op) */ int fe_cx; /* keep track of exceptions */ struct fpn fe_f1; /* operand 1 */ diff --git a/sys/powerpc/fpu/fpu_extern.h b/sys/powerpc/fpu/fpu_extern.h index 9c24f1dab038..9f4b0132dc18 100644 --- a/sys/powerpc/fpu/fpu_extern.h +++ b/sys/powerpc/fpu/fpu_extern.h @@ -31,14 +31,14 @@ */ struct proc; -struct fpreg; +struct fpu; struct trapframe; union instr; struct fpemu; struct fpn; /* fpu.c */ -int fpu_emulate(struct trapframe *, struct fpreg *); +int fpu_emulate(struct trapframe *, struct fpu *); int fpu_execute(struct trapframe *, struct fpemu *, union instr *); /* fpu_explode.c */ diff --git a/sys/powerpc/include/reg.h b/sys/powerpc/include/reg.h index e77625ab8ddf..0eff51e5451e 100644 --- a/sys/powerpc/include/reg.h +++ b/sys/powerpc/include/reg.h @@ -18,12 +18,8 @@ struct reg { register_t pc; }; -/* Must match pcb.pcb_fpu */ struct fpreg { - union { - double fpr; - uint64_t vsr[2]; - } fpreg[32]; + double fpreg[32]; double fpscr; }; diff --git a/sys/powerpc/powerpc/exec_machdep.c b/sys/powerpc/powerpc/exec_machdep.c index d657244ffe56..d8238a1b58f1 100644 --- a/sys/powerpc/powerpc/exec_machdep.c +++ b/sys/powerpc/powerpc/exec_machdep.c @@ -608,13 +608,18 @@ int fill_fpregs(struct thread *td, struct fpreg *fpregs) { struct pcb *pcb; + int i; pcb = td->td_pcb; if ((pcb->pcb_flags & PCB_FPREGS) == 0) memset(fpregs, 0, sizeof(struct fpreg)); - else - memcpy(fpregs, &pcb->pcb_fpu, sizeof(struct fpreg)); + else { + memcpy(&fpregs->fpscr, &pcb->pcb_fpu.fpscr, sizeof(double)); + for (i = 0; i < 32; i++) + memcpy(&fpregs->fpreg[i], &pcb->pcb_fpu.fpr[i].fpr, + sizeof(double)); + } return (0); } @@ -641,10 +646,15 @@ int set_fpregs(struct thread *td, struct fpreg *fpregs) { struct pcb *pcb; + int i; pcb = td->td_pcb; pcb->pcb_flags |= PCB_FPREGS; - memcpy(&pcb->pcb_fpu, fpregs, sizeof(struct fpreg)); + memcpy(&pcb->pcb_fpu.fpscr, &fpregs->fpscr, sizeof(double)); + for (i = 0; i < 32; i++) { + memcpy(&pcb->pcb_fpu.fpr[i].fpr, &fpregs->fpreg[i], + sizeof(double)); + } return (0); } @@ -1060,7 +1070,7 @@ ppc_instr_emulate(struct trapframe *frame, struct pcb *pcb) bzero(&pcb->pcb_fpu, sizeof(pcb->pcb_fpu)); pcb->pcb_flags |= PCB_FPREGS; } - sig = fpu_emulate(frame, (struct fpreg *)&pcb->pcb_fpu); + sig = fpu_emulate(frame, &pcb->pcb_fpu); #endif return (sig); diff --git a/sys/powerpc/powerpc/nexus.c b/sys/powerpc/powerpc/nexus.c index 8a4d81583f67..dff21f804146 100644 --- a/sys/powerpc/powerpc/nexus.c +++ b/sys/powerpc/powerpc/nexus.c @@ -189,13 +189,13 @@ nexus_activate_resource(device_t bus __unused, device_t child __unused, { if (type == SYS_RES_MEMORY) { - vm_offset_t start; + vm_paddr_t start; void *p; - start = (vm_offset_t) rman_get_start(r); + start = (vm_paddr_t) rman_get_start(r); if (bootverbose) - printf("nexus mapdev: start %zx, len %ld\n", start, - rman_get_size(r)); + printf("nexus mapdev: start %jx, len %ld\n", + (uintmax_t)start, rman_get_size(r)); p = pmap_mapdev(start, (vm_size_t) rman_get_size(r)); if (p == NULL) diff --git a/sys/riscv/conf/DEFAULTS b/sys/riscv/conf/DEFAULTS new file mode 100644 index 000000000000..5451decf822d --- /dev/null +++ b/sys/riscv/conf/DEFAULTS @@ -0,0 +1,13 @@ +# +# DEFAULTS -- Default kernel configuration file for FreeBSD/RISC-V +# +# $FreeBSD$ + +machine riscv riscv64 + +# Pseudo devices. +device mem # Memory and kernel memory devices + +# Default partitioning schemes +options GEOM_PART_BSD +options GEOM_PART_MBR diff --git a/sys/riscv/conf/GENERIC b/sys/riscv/conf/GENERIC new file mode 100644 index 000000000000..a32a1f24ecfb --- /dev/null +++ b/sys/riscv/conf/GENERIC @@ -0,0 +1,104 @@ +# +# GENERIC -- Generic kernel configuration file for FreeBSD/RISC-V +# +# For more information on this file, please read the config(5) manual page, +# and/or the handbook section on Kernel Configuration Files: +# +# http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html +# +# The handbook is also available locally in /usr/share/doc/handbook +# if you've installed the doc distribution, otherwise always see the +# FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the +# latest information. +# +# An exhaustive list of options and more detailed explanations of the +# device lines is also present in the ../../conf/NOTES and NOTES files. +# If you are in doubt as to the purpose or necessity of a line, check first +# in NOTES. +# +# $FreeBSD$ + +cpu RISCV +ident GENERIC + +makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols +# makeoptions WITH_CTF=1 # Run ctfconvert(1) for DTrace support +makeoptions NO_MODULES=1 # We don't yet support modules on RISC-V + +options SCHED_ULE # ULE scheduler +options PREEMPTION # Enable kernel thread preemption +options INET # InterNETworking +options INET6 # IPv6 communications protocols +options IPSEC # IP (v4/v6) security +options TCP_OFFLOAD # TCP offload +options SCTP # Stream Control Transmission Protocol +options FFS # Berkeley Fast Filesystem +options SOFTUPDATES # Enable FFS soft updates support +options UFS_ACL # Support for access control lists +options UFS_DIRHASH # Improve performance on big directories +options UFS_GJOURNAL # Enable gjournal-based UFS journaling +options QUOTA # Enable disk quotas for UFS +options MD_ROOT # MD is a potential root device +options NFSCL # Network Filesystem Client +options NFSD # Network Filesystem Server +options NFSLOCKD # Network Lock Manager +options NFS_ROOT # NFS usable as /, requires NFSCL +options MSDOSFS # MSDOS Filesystem +options CD9660 # ISO 9660 Filesystem +options PROCFS # Process filesystem (requires PSEUDOFS) +options PSEUDOFS # Pseudo-filesystem framework +options GEOM_PART_GPT # GUID Partition Tables. +# options GEOM_RAID # Soft RAID functionality. +options GEOM_LABEL # Provides labelization +options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI +options KTRACE # ktrace(1) support +# options STACK # stack(9) support +options SYSVSHM # SYSV-style shared memory +options SYSVMSG # SYSV-style message queues +options SYSVSEM # SYSV-style semaphores +options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions +options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed. +options KBD_INSTALL_CDEV # install a CDEV entry in /dev +# options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4) +options AUDIT # Security event auditing +options CAPABILITY_MODE # Capsicum capability mode +options CAPABILITIES # Capsicum capabilities +options MAC # TrustedBSD MAC Framework +# options KDTRACE_FRAME # Ensure frames are compiled in +# options KDTRACE_HOOKS # Kernel DTrace hooks +# options VFP # Floating-point support +options RACCT # Resource accounting framework +options RACCT_DEFAULT_TO_DISABLED # Set kern.racct.enable=0 by default +options RCTL # Resource limits +# options SMP + +# Debugging support. Always need this: +# options KDB # Enable kernel debugger support. +# options KDB_TRACE # Print a stack trace for a panic. +# For full debugger support use (turn off in stable branch): +# options DDB # Support DDB. +# options GDB # Support remote GDB. +options DEADLKRES # Enable the deadlock resolver +options INVARIANTS # Enable calls of extra sanity checking +options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS +# options WITNESS # Enable checks to detect deadlocks and cycles +# options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed +options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones + +options ROOTDEVNAME=\"ufs:/dev/htif_blk0\" +# options EARLY_PRINTF + +# Pseudo devices. +device loop # Network loopback +device random # Entropy device +device ether # Ethernet support +device vlan # 802.1Q VLAN support +device tun # Packet tunnel. +device md # Memory "disks" +device gif # IPv6 and IPv4 tunneling +device firmware # firmware assist module + +# RISCVTODO: This needs to be done via loader (when it's available). +options FDT +options FDT_DTB_STATIC +makeoptions FDT_DTS_FILE=spike.dts diff --git a/sys/riscv/htif/htif.c b/sys/riscv/htif/htif.c new file mode 100644 index 000000000000..08e6a43c2b94 --- /dev/null +++ b/sys/riscv/htif/htif.c @@ -0,0 +1,284 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "htif.h" + +static struct resource_spec htif_spec[] = { + { SYS_RES_IRQ, 0, RF_ACTIVE }, + { -1, 0 } +}; + +struct intr_entry { + void (*func) (void *, uint64_t); + void *arg; +}; + +struct intr_entry intrs[HTIF_NDEV]; + +uint64_t +htif_command(uint64_t arg) +{ + + return (machine_command(ECALL_HTIF_CMD, arg)); +} + +int +htif_setup_intr(int id, void *func, void *arg) +{ + + if (id >= HTIF_NDEV) + return (-1); + + intrs[id].func = func; + intrs[id].arg = arg; + + return (0); +} + +static void +htif_handle_entry(struct htif_softc *sc) +{ + uint64_t entry; + uint8_t devcmd; + uint8_t devid; + + entry = machine_command(ECALL_HTIF_GET_ENTRY, 0); + while (entry) { + devid = HTIF_DEV_ID(entry); + devcmd = HTIF_DEV_CMD(entry); + + if (devcmd == HTIF_CMD_IDENTIFY) { + /* Enumeration interrupt */ + if (devid == sc->identify_id) + sc->identify_done = 1; + } else { + /* Device interrupt */ + if (intrs[devid].func != NULL) + intrs[devid].func(intrs[devid].arg, entry); + } + + entry = machine_command(ECALL_HTIF_GET_ENTRY, 0); + } +} + +static int +htif_intr(void *arg) +{ + struct htif_softc *sc; + + sc = arg; + + htif_handle_entry(sc); + + csr_clear(sip, SIE_SSIE); + + return (FILTER_HANDLED); +} + +static int +htif_add_device(struct htif_softc *sc, int i, char *id, char *name) +{ + struct htif_dev_ivars *di; + + di = malloc(sizeof(struct htif_dev_ivars), M_DEVBUF, M_WAITOK | M_ZERO); + di->sc = sc; + di->index = i; + di->id = malloc(HTIF_ID_LEN, M_DEVBUF, M_WAITOK | M_ZERO); + memcpy(di->id, id, HTIF_ID_LEN); + + di->dev = device_add_child(sc->dev, name, -1); + device_set_ivars(di->dev, di); + + return (0); +} + +static int +htif_enumerate(struct htif_softc *sc) +{ + char id[HTIF_ID_LEN] __aligned(HTIF_ALIGN); + uint64_t paddr; + uint64_t data; + uint64_t cmd; + int len; + int i; + + device_printf(sc->dev, "Enumerating devices\n"); + + for (i = 0; i < HTIF_NDEV; i++) { + paddr = pmap_kextract((vm_offset_t)&id); + data = (paddr << IDENTIFY_PADDR_SHIFT); + data |= IDENTIFY_IDENT; + + sc->identify_id = i; + sc->identify_done = 0; + + cmd = i; + cmd <<= HTIF_DEV_ID_SHIFT; + cmd |= (HTIF_CMD_IDENTIFY << HTIF_CMD_SHIFT); + cmd |= data; + + htif_command(cmd); + + /* Do poll as interrupts are disabled yet */ + while (sc->identify_done == 0) { + htif_handle_entry(sc); + } + + len = strnlen(id, sizeof(id)); + if (len <= 0) { + continue; + } + + if (bootverbose) + printf(" %d %s\n", i, id); + + if (strncmp(id, "disk", 4) == 0) + htif_add_device(sc, i, id, "htif_blk"); + else if (strncmp(id, "bcd", 3) == 0) + htif_add_device(sc, i, id, "htif_console"); + else if (strncmp(id, "syscall_proxy", 13) == 0) + htif_add_device(sc, i, id, "htif_syscall_proxy"); + } + + return (bus_generic_attach(sc->dev)); +} + +int +htif_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) +{ + struct htif_dev_ivars *ivars; + + ivars = device_get_ivars(child); + + switch (which) { + case HTIF_IVAR_INDEX: + *result = ivars->index; + break; + case HTIF_IVAR_ID: + *result = (uintptr_t)ivars->id; + default: + return (EINVAL); + } + + return (0); +} + +static int +htif_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (!ofw_bus_is_compatible(dev, "riscv,htif")) + return (ENXIO); + + device_set_desc(dev, "HTIF bus device"); + return (BUS_PROBE_DEFAULT); +} + +static int +htif_attach(device_t dev) +{ + struct htif_softc *sc; + int error; + + sc = device_get_softc(dev); + sc->dev = dev; + + if (bus_alloc_resources(dev, htif_spec, sc->res)) { + device_printf(dev, "could not allocate resources\n"); + return (ENXIO); + } + + /* Setup IRQs handler */ + error = bus_setup_intr(dev, sc->res[0], INTR_TYPE_CLK, + htif_intr, NULL, sc, &sc->ihl[0]); + if (error) { + device_printf(dev, "Unable to alloc int resource.\n"); + return (ENXIO); + } + + csr_set(sie, SIE_SSIE); + + return (htif_enumerate(sc)); +} + +static device_method_t htif_methods[] = { + DEVMETHOD(device_probe, htif_probe), + DEVMETHOD(device_attach, htif_attach), + + /* Bus interface */ + DEVMETHOD(bus_read_ivar, htif_read_ivar), + + DEVMETHOD_END +}; + +static driver_t htif_driver = { + "htif", + htif_methods, + sizeof(struct htif_softc) +}; + +static devclass_t htif_devclass; + +DRIVER_MODULE(htif, simplebus, htif_driver, + htif_devclass, 0, 0); diff --git a/sys/riscv/htif/htif.h b/sys/riscv/htif/htif.h new file mode 100644 index 000000000000..a1183d97c08e --- /dev/null +++ b/sys/riscv/htif/htif.h @@ -0,0 +1,93 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#define HTIF_DEV_ID_SHIFT (56) +#define HTIF_DEV_ID_MASK (0xfful << HTIF_DEV_ID_SHIFT) +#define HTIF_CMD_SHIFT (48) +#define HTIF_CMD_MASK (0xfful << HTIF_CMD_SHIFT) +#define HTIF_DATA_SHIFT (0) +#define HTIF_DATA_MASK (0xffffffff << HTIF_DATA_SHIFT) + +#define HTIF_CMD_READ (0x00ul) +#define HTIF_CMD_WRITE (0x01ul) +#define HTIF_CMD_READ_CONTROL_REG (0x02ul) +#define HTIF_CMD_WRITE_CONTROL_REG (0x03ul) +#define HTIF_CMD_IDENTIFY (0xfful) +#define IDENTIFY_PADDR_SHIFT 8 +#define IDENTIFY_IDENT 0xff + +#define HTIF_NDEV (256) +#define HTIF_ID_LEN (64) +#define HTIF_ALIGN (64) + +#define HTIF_DEV_CMD(entry) ((entry & HTIF_CMD_MASK) >> HTIF_CMD_SHIFT) +#define HTIF_DEV_ID(entry) ((entry & HTIF_DEV_ID_MASK) >> HTIF_DEV_ID_SHIFT) +#define HTIF_DEV_DATA(entry) ((entry & HTIF_DATA_MASK) >> HTIF_DATA_SHIFT) + +/* bus softc */ +struct htif_softc { + struct resource *res[1]; + void *ihl[1]; + device_t dev; + uint64_t identify_id; + uint64_t identify_done; +}; + +/* device private data */ +struct htif_dev_ivars { + char *id; + int index; + device_t dev; + struct htif_softc *sc; +}; + +uint64_t htif_command(uint64_t); +int htif_setup_intr(int id, void *func, void *arg); +int htif_read_ivar(device_t dev, device_t child, int which, uintptr_t *result); + +enum htif_device_ivars { + HTIF_IVAR_INDEX, + HTIF_IVAR_ID, +}; + +/* + * Simplified accessors for HTIF devices + */ +#define HTIF_ACCESSOR(var, ivar, type) \ + __BUS_ACCESSOR(htif, var, HTIF, ivar, type) + +HTIF_ACCESSOR(index, INDEX, int); +HTIF_ACCESSOR(id, ID, char *); diff --git a/sys/riscv/htif/htif_block.c b/sys/riscv/htif/htif_block.c new file mode 100644 index 000000000000..58804d7aeb88 --- /dev/null +++ b/sys/riscv/htif/htif_block.c @@ -0,0 +1,289 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include "htif.h" + +#define SECTOR_SIZE_SHIFT (9) +#define SECTOR_SIZE (1 << SECTOR_SIZE_SHIFT) + +#define HTIF_BLK_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) +#define HTIF_BLK_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) +#define HTIF_BLK_LOCK_INIT(_sc) \ + mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ + "htif_blk", MTX_DEF) +#define HTIF_BLK_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); +#define HTIF_BLK_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); +#define HTIF_BLK_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); + +static void htif_blk_task(void *arg); + +static disk_open_t htif_blk_open; +static disk_close_t htif_blk_close; +static disk_strategy_t htif_blk_strategy; + +struct htif_blk_softc { + device_t dev; + struct disk *disk; + struct mtx htif_io_mtx; + struct mtx sc_mtx; + struct proc *p; + struct bio_queue_head bio_queue; + int running; + int intr_chan; + int cmd_done; + int index; + uint16_t curtag; +}; + +struct htif_blk_request { + uint64_t addr; + uint64_t offset; /* offset in bytes */ + uint64_t size; /* length in bytes */ + uint64_t tag; +}; + +static void +htif_blk_intr(void *arg, uint64_t entry) +{ + struct htif_blk_softc *sc; + uint64_t devcmd; + uint64_t data; + + sc = arg; + + devcmd = HTIF_DEV_CMD(entry); + data = HTIF_DEV_DATA(entry); + + if (sc->curtag == data) { + sc->cmd_done = 1; + wakeup(&sc->intr_chan); + } +} + +static int +htif_blk_probe(device_t dev) +{ + + return (0); +} + +static int +htif_blk_attach(device_t dev) +{ + struct htif_blk_softc *sc; + char prefix[] = " size="; + char *str; + long size; + + sc = device_get_softc(dev); + sc->dev = dev; + + mtx_init(&sc->htif_io_mtx, device_get_nameunit(dev), "htif_blk", MTX_DEF); + HTIF_BLK_LOCK_INIT(sc); + + str = strstr(htif_get_id(dev), prefix); + + size = strtol((str + 6), NULL, 10); + if (size == 0) { + return (ENXIO); + } + + sc->index = htif_get_index(dev); + if (sc->index < 0) + return (EINVAL); + htif_setup_intr(sc->index, htif_blk_intr, sc); + + sc->disk = disk_alloc(); + sc->disk->d_drv1 = sc; + + sc->disk->d_maxsize = 4096; /* Max transfer */ + sc->disk->d_name = "htif_blk"; + sc->disk->d_open = htif_blk_open; + sc->disk->d_close = htif_blk_close; + sc->disk->d_strategy = htif_blk_strategy; + sc->disk->d_unit = 0; + sc->disk->d_sectorsize = SECTOR_SIZE; + sc->disk->d_mediasize = size; + disk_create(sc->disk, DISK_VERSION); + + bioq_init(&sc->bio_queue); + + sc->running = 1; + + kproc_create(&htif_blk_task, sc, &sc->p, 0, 0, "%s: transfer", + device_get_nameunit(dev)); + + return (0); +} + +static int +htif_blk_open(struct disk *dp) +{ + + return (0); +} + +static int +htif_blk_close(struct disk *dp) +{ + + return (0); +} + +static void +htif_blk_task(void *arg) +{ + struct htif_blk_request req __aligned(HTIF_ALIGN); + struct htif_blk_softc *sc; + struct bio *bp; + uint64_t paddr; + uint64_t cmd; + int i; + + sc = (struct htif_blk_softc *)arg; + + while (1) { + HTIF_BLK_LOCK(sc); + do { + bp = bioq_takefirst(&sc->bio_queue); + if (bp == NULL) + msleep(sc, &sc->sc_mtx, PRIBIO, "jobqueue", 0); + } while (bp == NULL); + HTIF_BLK_UNLOCK(sc); + + if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { + req.offset = (bp->bio_pblkno * sc->disk->d_sectorsize); + req.size = bp->bio_bcount; + paddr = vtophys(bp->bio_data); + KASSERT(paddr != 0, ("paddr is 0")); + req.addr = paddr; + req.tag = sc->curtag; + + cmd = sc->index; + cmd <<= HTIF_DEV_ID_SHIFT; + if (bp->bio_cmd == BIO_READ) + cmd |= (HTIF_CMD_READ << HTIF_CMD_SHIFT); + else + cmd |= (HTIF_CMD_WRITE << HTIF_CMD_SHIFT); + paddr = vtophys(&req); + KASSERT(paddr != 0, ("paddr is 0")); + cmd |= paddr; + + sc->cmd_done = 0; + htif_command(cmd); + + /* Wait for interrupt */ + HTIF_BLK_LOCK(sc); + i = 0; + while (sc->cmd_done == 0) { + msleep(&sc->intr_chan, &sc->sc_mtx, PRIBIO, "intr", hz/2); + + if (i++ > 2) { + /* TODO: try to re-issue operation on timeout ? */ + bp->bio_error = EIO; + bp->bio_flags |= BIO_ERROR; + disk_err(bp, "hard error", -1, 1); + break; + } + } + HTIF_BLK_UNLOCK(sc); + + biodone(bp); + } else { + printf("unknown op %d\n", bp->bio_cmd); + } + } +} + +static void +htif_blk_strategy(struct bio *bp) +{ + struct htif_blk_softc *sc; + + sc = bp->bio_disk->d_drv1; + + HTIF_BLK_LOCK(sc); + if (sc->running > 0) { + bioq_disksort(&sc->bio_queue, bp); + HTIF_BLK_UNLOCK(sc); + wakeup(sc); + } else { + HTIF_BLK_UNLOCK(sc); + biofinish(bp, NULL, ENXIO); + } +} + +static device_method_t htif_blk_methods[] = { + DEVMETHOD(device_probe, htif_blk_probe), + DEVMETHOD(device_attach, htif_blk_attach), +}; + +static driver_t htif_blk_driver = { + "htif_blk", + htif_blk_methods, + sizeof(struct htif_blk_softc) +}; + +static devclass_t htif_blk_devclass; + +DRIVER_MODULE(htif_blk, htif, htif_blk_driver, htif_blk_devclass, 0, 0); diff --git a/sys/riscv/htif/htif_console.c b/sys/riscv/htif/htif_console.c new file mode 100644 index 000000000000..b4a46766949e --- /dev/null +++ b/sys/riscv/htif/htif_console.c @@ -0,0 +1,361 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "htif.h" + +#include + +#include + +extern uint64_t console_intr; + +static tsw_outwakeup_t riscvtty_outwakeup; + +static struct ttydevsw riscv_ttydevsw = { + .tsw_flags = TF_NOPREFIX, + .tsw_outwakeup = riscvtty_outwakeup, +}; + +static int polltime; +static struct callout riscv_callout; +static struct tty *tp = NULL; + +#if defined(KDB) +static int alt_break_state; +#endif + +static void riscv_timeout(void *); + +static cn_probe_t riscv_cnprobe; +static cn_init_t riscv_cninit; +static cn_term_t riscv_cnterm; +static cn_getc_t riscv_cngetc; +static cn_putc_t riscv_cnputc; +static cn_grab_t riscv_cngrab; +static cn_ungrab_t riscv_cnungrab; + +CONSOLE_DRIVER(riscv); + +#define MAX_BURST_LEN 1 +#define QUEUE_SIZE 256 +#define CONSOLE_DEFAULT_ID 1ul + +struct queue_entry { + uint64_t data; + uint64_t used; + struct queue_entry *next; +}; + +struct queue_entry cnqueue[QUEUE_SIZE]; +struct queue_entry *entry_last; +struct queue_entry *entry_served; + +static void +htif_putc(int c) +{ + uint64_t cmd; + + cmd = (HTIF_CMD_WRITE << HTIF_CMD_SHIFT); + cmd |= (CONSOLE_DEFAULT_ID << HTIF_DEV_ID_SHIFT); + cmd |= c; + + htif_command(cmd); +} + +static uint8_t +htif_getc(void) +{ + uint64_t cmd; + uint8_t res; + + cmd = (HTIF_CMD_READ << HTIF_CMD_SHIFT); + cmd |= (CONSOLE_DEFAULT_ID << HTIF_DEV_ID_SHIFT); + + res = htif_command(cmd); + + return (res); +} + +static void +riscv_putc(int c) +{ + uint64_t counter; + uint64_t *cc; + uint64_t val; + + val = 0; + counter = 0; + + cc = (uint64_t*)&console_intr; + *cc = 0; + + htif_putc(c); + + /* Wait for an interrupt */ + __asm __volatile( + "li %0, 1\n" /* counter = 1 */ + "slli %0, %0, 12\n" /* counter <<= 12 */ + "1:" + "addi %0, %0, -1\n" /* counter -= 1 */ + "beqz %0, 2f\n" /* counter == 0 ? finish */ + "ld %1, 0(%2)\n" /* val = *cc */ + "beqz %1, 1b\n" /* val == 0 ? repeat */ + "2:" + : "=&r"(counter), "=&r"(val) : "r"(cc) + ); +} + +#ifdef EARLY_PRINTF +early_putc_t *early_putc = riscv_putc; +#endif + +static void +cn_drvinit(void *unused) +{ + + if (riscv_consdev.cn_pri != CN_DEAD && + riscv_consdev.cn_name[0] != '\0') { + tp = tty_alloc(&riscv_ttydevsw, NULL); + tty_init_console(tp, 0); + tty_makedev(tp, NULL, "%s", "rcons"); + + polltime = 1; + + callout_init(&riscv_callout, 1); + callout_reset(&riscv_callout, polltime, riscv_timeout, NULL); + } +} + +SYSINIT(cndev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, cn_drvinit, NULL); + +static void +riscvtty_outwakeup(struct tty *tp) +{ + u_char buf[MAX_BURST_LEN]; + int len; + int i; + + for (;;) { + len = ttydisc_getc(tp, buf, sizeof(buf)); + if (len == 0) + break; + + KASSERT(len == 1, ("tty error")); + + for (i = 0; i < len; i++) + riscv_putc(buf[i]); + } +} + +static void +riscv_timeout(void *v) +{ + int c; + + tty_lock(tp); + while ((c = riscv_cngetc(NULL)) != -1) + ttydisc_rint(tp, c, 0); + ttydisc_rint_done(tp); + tty_unlock(tp); + + callout_reset(&riscv_callout, polltime, riscv_timeout, NULL); +} + +static void +riscv_cnprobe(struct consdev *cp) +{ + + cp->cn_pri = CN_NORMAL; +} + +static void +riscv_cninit(struct consdev *cp) +{ + int i; + + strcpy(cp->cn_name, "rcons"); + + for (i = 0; i < QUEUE_SIZE; i++) { + if (i == (QUEUE_SIZE - 1)) + cnqueue[i].next = &cnqueue[0]; + else + cnqueue[i].next = &cnqueue[i+1]; + cnqueue[i].data = 0; + cnqueue[i].used = 0; + } + + entry_last = &cnqueue[0]; + entry_served = &cnqueue[0]; +} + +static void +riscv_cnterm(struct consdev *cp) +{ + +} + +static void +riscv_cngrab(struct consdev *cp) +{ + +} + +static void +riscv_cnungrab(struct consdev *cp) +{ + +} + +static int +riscv_cngetc(struct consdev *cp) +{ + uint8_t data; + int ch; + + ch = htif_getc(); + + if (entry_served->used == 1) { + data = entry_served->data; + entry_served->used = 0; + entry_served = entry_served->next; + ch = (data & 0xff); + if (ch > 0 && ch < 0xff) { +#if defined(KDB) + kdb_alt_break(ch, &alt_break_state); +#endif + return (ch); + } + } + + return (-1); +} + +static void +riscv_cnputc(struct consdev *cp, int c) +{ + + riscv_putc(c); +} + +/* + * Bus interface. + */ + +struct htif_console_softc { + device_t dev; + int running; + int intr_chan; + int cmd_done; + int curtag; + int index; +}; + +static void +htif_console_intr(void *arg, uint64_t entry) +{ + struct htif_console_softc *sc; + uint8_t devcmd; + uint64_t data; + + sc = arg; + + devcmd = HTIF_DEV_CMD(entry); + data = HTIF_DEV_DATA(entry); + + if (devcmd == 0) { + entry_last->data = data; + entry_last->used = 1; + entry_last = entry_last->next; + } +} + +static int +htif_console_probe(device_t dev) +{ + + return (0); +} + +static int +htif_console_attach(device_t dev) +{ + struct htif_console_softc *sc; + + sc = device_get_softc(dev); + sc->dev = dev; + + sc->index = htif_get_index(dev); + if (sc->index < 0) + return (EINVAL); + + htif_setup_intr(sc->index, htif_console_intr, sc); + + return (0); +} + +static device_method_t htif_console_methods[] = { + DEVMETHOD(device_probe, htif_console_probe), + DEVMETHOD(device_attach, htif_console_attach), + DEVMETHOD_END +}; + +static driver_t htif_console_driver = { + "htif_console", + htif_console_methods, + sizeof(struct htif_console_softc) +}; + +static devclass_t htif_console_devclass; + +DRIVER_MODULE(htif_console, htif, htif_console_driver, + htif_console_devclass, 0, 0); diff --git a/sys/riscv/riscv/autoconf.c b/sys/riscv/riscv/autoconf.c new file mode 100644 index 000000000000..d6afb42754d1 --- /dev/null +++ b/sys/riscv/riscv/autoconf.c @@ -0,0 +1,94 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Setup the system to run on the current machine. + * + * Configure() is called at boot time and initializes the vba + * device tables and the memory controller monitoring. Available + * devices are determined (from possibilities mentioned in ioconf.c), + * and the drivers are initialized. + */ + +#include +#include +#include +#include +#include + +static void configure_first(void *); +static void configure(void *); +static void configure_final(void *); + +SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL); +/* SI_ORDER_SECOND is hookable */ +SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL); +/* SI_ORDER_MIDDLE is hookable */ +SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL); + +/* + * Determine i/o configuration for a machine. + */ +static void +configure_first(void *dummy) +{ + + /* nexus0 is the top of the riscv device tree */ + device_add_child(root_bus, "nexus", 0); +} + +static void +configure(void *dummy) +{ + + /* initialize new bus architecture */ + root_bus_configure(); +} + +static void +configure_final(void *dummy) +{ + + intr_enable(); + + cninit_finish(); + + if (bootverbose) + printf("Device configuration finished.\n"); + + cold = 0; +} diff --git a/sys/riscv/riscv/bcopy.c b/sys/riscv/riscv/bcopy.c new file mode 100644 index 000000000000..613ca97e5430 --- /dev/null +++ b/sys/riscv/riscv/bcopy.c @@ -0,0 +1,139 @@ +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Chris Torek. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * From: sys/powerpc/powerpc/bcopy.c + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +/* + * sizeof(word) MUST BE A POWER OF TWO + * SO THAT wmask BELOW IS ALL ONES + */ +typedef long word; /* "word" used for optimal copy speed */ + +#define wsize sizeof(word) +#define wmask (wsize - 1) + +/* + * Copy a block of memory, handling overlap. + * This is the routine that actually implements + * (the portable versions of) bcopy, memcpy, and memmove. + */ +void * +memcpy(void *dst0, const void *src0, size_t length) +{ + char *dst; + const char *src; + size_t t; + + dst = dst0; + src = src0; + + if (length == 0 || dst == src) { /* nothing to do */ + goto done; + } + + /* + * Macros: loop-t-times; and loop-t-times, t>0 + */ +#define TLOOP(s) if (t) TLOOP1(s) +#define TLOOP1(s) do { s; } while (--t) + + if ((unsigned long)dst < (unsigned long)src) { + /* + * Copy forward. + */ + t = (size_t)src; /* only need low bits */ + + if ((t | (uintptr_t)dst) & wmask) { + /* + * Try to align operands. This cannot be done + * unless the low bits match. + */ + if ((t ^ (uintptr_t)dst) & wmask || length < wsize) { + t = length; + } else { + t = wsize - (t & wmask); + } + + length -= t; + TLOOP1(*dst++ = *src++); + } + /* + * Copy whole words, then mop up any trailing bytes. + */ + t = length / wsize; + TLOOP(*(word *)dst = *(const word *)src; src += wsize; + dst += wsize); + t = length & wmask; + TLOOP(*dst++ = *src++); + } else { + /* + * Copy backwards. Otherwise essentially the same. + * Alignment works as before, except that it takes + * (t&wmask) bytes to align, not wsize-(t&wmask). + */ + src += length; + dst += length; + t = (uintptr_t)src; + + if ((t | (uintptr_t)dst) & wmask) { + if ((t ^ (uintptr_t)dst) & wmask || length <= wsize) { + t = length; + } else { + t &= wmask; + } + + length -= t; + TLOOP1(*--dst = *--src); + } + t = length / wsize; + TLOOP(src -= wsize; dst -= wsize; + *(word *)dst = *(const word *)src); + t = length & wmask; + TLOOP(*--dst = *--src); + } +done: + return (dst0); +} + +void +bcopy(const void *src0, void *dst0, size_t length) +{ + + memcpy(dst0, src0, length); +} + diff --git a/sys/riscv/riscv/bus_machdep.c b/sys/riscv/riscv/bus_machdep.c new file mode 100644 index 000000000000..54e3419be5ee --- /dev/null +++ b/sys/riscv/riscv/bus_machdep.c @@ -0,0 +1,144 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "opt_platform.h" + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +#include + +struct bus_space memmap_bus = { + /* cookie */ + .bs_cookie = NULL, + + /* mapping/unmapping */ + .bs_map = NULL, + .bs_unmap = NULL, + .bs_subregion = NULL, + + /* allocation/deallocation */ + .bs_alloc = NULL, + .bs_free = NULL, + + /* barrier */ + .bs_barrier = NULL, + + /* read single */ + .bs_r_1 = NULL, + .bs_r_2 = NULL, + .bs_r_4 = NULL, + .bs_r_8 = NULL, + + /* read multiple */ + .bs_rm_1 = NULL, + .bs_rm_2 = NULL, + .bs_rm_4 = NULL, + .bs_rm_8 = NULL, + + /* write single */ + .bs_w_1 = NULL, + .bs_w_2 = NULL, + .bs_w_4 = NULL, + .bs_w_8 = NULL, + + /* write multiple */ + .bs_wm_1 = NULL, + .bs_wm_2 = NULL, + .bs_wm_4 = NULL, + .bs_wm_8 = NULL, + + /* write region */ + .bs_wr_1 = NULL, + .bs_wr_2 = NULL, + .bs_wr_4 = NULL, + .bs_wr_8 = NULL, + + /* set multiple */ + .bs_sm_1 = NULL, + .bs_sm_2 = NULL, + .bs_sm_4 = NULL, + .bs_sm_8 = NULL, + + /* set region */ + .bs_sr_1 = NULL, + .bs_sr_2 = NULL, + .bs_sr_4 = NULL, + .bs_sr_8 = NULL, + + /* copy */ + .bs_c_1 = NULL, + .bs_c_2 = NULL, + .bs_c_4 = NULL, + .bs_c_8 = NULL, + + /* read single stream */ + .bs_r_1_s = NULL, + .bs_r_2_s = NULL, + .bs_r_4_s = NULL, + .bs_r_8_s = NULL, + + /* read multiple stream */ + .bs_rm_1_s = NULL, + .bs_rm_2_s = NULL, + .bs_rm_4_s = NULL, + .bs_rm_8_s = NULL, + + /* read region stream */ + .bs_rr_1_s = NULL, + .bs_rr_2_s = NULL, + .bs_rr_4_s = NULL, + .bs_rr_8_s = NULL, + + /* write single stream */ + .bs_w_1_s = NULL, + .bs_w_2_s = NULL, + .bs_w_4_s = NULL, + .bs_w_8_s = NULL, + + /* write multiple stream */ + .bs_wm_1_s = NULL, + .bs_wm_2_s = NULL, + .bs_wm_4_s = NULL, + .bs_wm_8_s = NULL, + + /* write region stream */ + .bs_wr_1_s = NULL, + .bs_wr_2_s = NULL, + .bs_wr_4_s = NULL, + .bs_wr_8_s = NULL, +}; diff --git a/sys/riscv/riscv/busdma_machdep.c b/sys/riscv/riscv/busdma_machdep.c new file mode 100644 index 000000000000..ec0fea91252f --- /dev/null +++ b/sys/riscv/riscv/busdma_machdep.c @@ -0,0 +1,102 @@ +/*- + * Copyright (c) 1997, 1998 Justin T. Gibbs. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification, immediately at the beginning of the file. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +int +_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) +{ + + panic("busdma"); +} + +int +_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, + bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, + int *segp) +{ + + panic("busdma"); +} + +int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, + int *segp) +{ + + panic("busdma"); +} + +void +__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, + struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) +{ + + panic("busdma"); +} + +bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) +{ + + panic("busdma"); +} + +/* + * Release the mapping held by map. + */ +void +_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + + panic("busdma"); +} + +void +_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) +{ + + panic("busdma"); +} diff --git a/sys/riscv/riscv/clock.c b/sys/riscv/riscv/clock.c new file mode 100644 index 000000000000..e0c2d4ba3c0b --- /dev/null +++ b/sys/riscv/riscv/clock.c @@ -0,0 +1,46 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +void +cpu_initclocks(void) +{ + + cpu_initclocks_bsp(); +} diff --git a/sys/riscv/riscv/copyinout.S b/sys/riscv/riscv/copyinout.S new file mode 100644 index 000000000000..44d68390d7b6 --- /dev/null +++ b/sys/riscv/riscv/copyinout.S @@ -0,0 +1,137 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include + +#include "assym.s" + +/* + * Fault handler for the copy{in,out} functions below. + */ +ENTRY(copyio_fault) + SET_FAULT_HANDLER(x0, a1) /* Clear the handler */ +copyio_fault_nopcb: + li a0, EFAULT + ret +END(copyio_fault) + +/* + * Copies from a kernel to user address + * + * int copyout(const void *kaddr, void *udaddr, size_t len) + */ +ENTRY(copyout) + beqz a2, 2f /* If len == 0 then skip loop */ + add a3, a1, a2 + li a4, VM_MAXUSER_ADDRESS + bgt a3, a4, copyio_fault_nopcb + + la a6, copyio_fault /* Get the handler address */ + SET_FAULT_HANDLER(a6, a7) /* Set the handler */ + +1: lb a4, 0(a0) /* Load from kaddr */ + addi a0, a0, 1 + sb a4, 0(a1) /* Store in uaddr */ + addi a1, a1, 1 + addi a2, a2, -1 /* len-- */ + bnez a2, 1b + + SET_FAULT_HANDLER(x0, a7) /* Clear the handler */ + +2: li a0, 0 /* return 0 */ + ret +END(copyout) + +/* + * Copies from a user to kernel address + * + * int copyin(const void *uaddr, void *kdaddr, size_t len) + */ +ENTRY(copyin) + beqz a2, 2f /* If len == 0 then skip loop */ + add a3, a0, a2 + li a4, VM_MAXUSER_ADDRESS + bgt a3, a4, copyio_fault_nopcb + + la a6, copyio_fault /* Get the handler address */ + SET_FAULT_HANDLER(a6, a7) /* Set the handler */ + +1: lb a4, 0(a0) /* Load from uaddr */ + addi a0, a0, 1 + sb a4, 0(a1) /* Store in kaddr */ + addi a1, a1, 1 + addi a2, a2, -1 /* len-- */ + bnez a2, 1b + + SET_FAULT_HANDLER(x0, a7) /* Clear the handler */ + +2: li a0, 0 /* return 0 */ + ret +END(copyin) + +/* + * Copies a string from a user to kernel address + * + * int copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done) + */ +ENTRY(copyinstr) + mv a5, x0 /* count = 0 */ + beqz a2, 3f /* If len == 0 then skip loop */ + li a7, VM_MAXUSER_ADDRESS + + la a6, copyio_fault /* Get the handler address */ + SET_FAULT_HANDLER(a6, a7) /* Set the handler */ + +1: bgt a7, a0, copyio_fault + lb a4, 0(a0) /* Load from uaddr */ + addi a0, a0, 1 + sb a4, 0(a1) /* Store in kaddr */ + addi a1, a1, 1 + beqz a4, 2f + addi a2, a2, -1 /* len-- */ + addi a5, a5, 1 /* count++ */ + bnez a2, 1b + +2: SET_FAULT_HANDLER(x0, a7) /* Clear the handler */ + +3: beqz a3, 4f /* Check if done != NULL */ + addi a5, a5, 1 /* count++ */ + sd a5, 0(a3) /* done = count */ + +4: mv a0, x0 /* return 0 */ + ret +END(copyinstr) diff --git a/sys/riscv/riscv/copystr.c b/sys/riscv/riscv/copystr.c new file mode 100644 index 000000000000..261dbc81ef76 --- /dev/null +++ b/sys/riscv/riscv/copystr.c @@ -0,0 +1,59 @@ +/*- + * Copyright (c) 2014 Andrew Turner + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include + +int +copystr(const void * __restrict kfaddr, void * __restrict kdaddr, size_t len, + size_t * __restrict lencopied) +{ + const char *src; + size_t pos; + char *dst; + int error; + + error = ENAMETOOLONG; + src = kfaddr; + dst = kdaddr; + for (pos = 0; pos < len; pos++) { + dst[pos] = src[pos]; + if (src[pos] == '\0') { + /* Increment pos to hold the number of bytes copied */ + pos++; + error = 0; + break; + } + } + + if (lencopied != NULL) + *lencopied = pos; + + return (error); +} diff --git a/sys/riscv/riscv/cpufunc_asm.S b/sys/riscv/riscv/cpufunc_asm.S new file mode 100644 index 000000000000..21bce533187a --- /dev/null +++ b/sys/riscv/riscv/cpufunc_asm.S @@ -0,0 +1,101 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +#include +__FBSDID("$FreeBSD$"); + + .text + .align 2 + +.Lpage_mask: + .word PAGE_MASK + +ENTRY(riscv_nullop) + ret +END(riscv_nullop) + +/* + * Generic functions to read/modify/write the internal coprocessor registers + */ + +ENTRY(riscv_tlb_flushID) + sfence.vm + ret +END(riscv_tlb_flushID) + +ENTRY(riscv_tlb_flushID_SE) + sfence.vm + ret +END(riscv_tlb_flushID_SE) + +/* + * void riscv_dcache_wb_range(vm_offset_t, vm_size_t) + */ +ENTRY(riscv_dcache_wb_range) + /* RISCVTODO */ + ret +END(riscv_dcache_wb_range) + +/* + * void riscv_dcache_wbinv_range(vm_offset_t, vm_size_t) + */ +ENTRY(riscv_dcache_wbinv_range) + /* RISCVTODO */ + ret +END(riscv_dcache_wbinv_range) + +/* + * void riscv_dcache_inv_range(vm_offset_t, vm_size_t) + */ +ENTRY(riscv_dcache_inv_range) + /* RISCVTODO */ + ret +END(riscv_dcache_inv_range) + +/* + * void riscv_idcache_wbinv_range(vm_offset_t, vm_size_t) + */ +ENTRY(riscv_idcache_wbinv_range) + /* RISCVTODO */ + ret +END(riscv_idcache_wbinv_range) + +/* + * void riscv_icache_sync_range(vm_offset_t, vm_size_t) + */ +ENTRY(riscv_icache_sync_range) + /* RISCVTODO */ + ret +END(riscv_icache_sync_range) diff --git a/sys/riscv/riscv/devmap.c b/sys/riscv/riscv/devmap.c new file mode 100644 index 000000000000..092532aa7bf4 --- /dev/null +++ b/sys/riscv/riscv/devmap.c @@ -0,0 +1,61 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* RISC-V doesn't provide memory-mapped devices yet */ + +#include "opt_ddb.h" + +#include +#include +#include +#include +#include +#include +#include + +void * +pmap_mapdev(vm_offset_t pa, vm_size_t size) +{ + + return (NULL); +} + +void +pmap_unmapdev(vm_offset_t va, vm_size_t size) +{ + +} diff --git a/sys/riscv/riscv/dump_machdep.c b/sys/riscv/riscv/dump_machdep.c new file mode 100644 index 000000000000..b8473314e27b --- /dev/null +++ b/sys/riscv/riscv/dump_machdep.c @@ -0,0 +1,57 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_watchdog.h" + +#include +#include +#include +#include +#include +#include +#include + +int do_minidump = 1; +SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RWTUN, &do_minidump, 0, + "Enable mini crash dumps"); + +void +dumpsys_map_chunk(vm_paddr_t pa, size_t chunk, void **va) +{ + + printf("dumpsys_map_chunk\n"); +} diff --git a/sys/riscv/riscv/elf_machdep.c b/sys/riscv/riscv/elf_machdep.c new file mode 100644 index 000000000000..1e4480121d1b --- /dev/null +++ b/sys/riscv/riscv/elf_machdep.c @@ -0,0 +1,169 @@ +/*- + * Copyright 1996-1998 John D. Polstra. + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +struct sysentvec elf64_freebsd_sysvec = { + .sv_size = SYS_MAXSYSCALL, + .sv_table = sysent, + .sv_mask = 0, + .sv_errsize = 0, + .sv_errtbl = NULL, + .sv_transtrap = NULL, + .sv_fixup = __elfN(freebsd_fixup), + .sv_sendsig = sendsig, + .sv_sigcode = sigcode, + .sv_szsigcode = &szsigcode, + .sv_name = "FreeBSD ELF64", + .sv_coredump = __elfN(coredump), + .sv_imgact_try = NULL, + .sv_minsigstksz = MINSIGSTKSZ, + .sv_pagesize = PAGE_SIZE, + .sv_minuser = VM_MIN_ADDRESS, + .sv_maxuser = VM_MAXUSER_ADDRESS, + .sv_usrstack = USRSTACK, + .sv_psstrings = PS_STRINGS, + .sv_stackprot = VM_PROT_ALL, + .sv_copyout_strings = exec_copyout_strings, + .sv_setregs = exec_setregs, + .sv_fixlimit = NULL, + .sv_maxssiz = NULL, + .sv_flags = SV_ABI_FREEBSD | SV_LP64, + .sv_set_syscall_retval = cpu_set_syscall_retval, + .sv_fetch_syscall_args = cpu_fetch_syscall_args, + .sv_syscallnames = syscallnames, + .sv_schedtail = NULL, + .sv_thread_detach = NULL, + .sv_trap = NULL, +}; +INIT_SYSENTVEC(elf64_sysvec, &elf64_freebsd_sysvec); + +static Elf64_Brandinfo freebsd_brand_info = { + .brand = ELFOSABI_FREEBSD, + .machine = EM_RISCV, + .compat_3_brand = "FreeBSD", + .emul_path = NULL, + .interp_path = "/libexec/ld-elf.so.1", + .sysvec = &elf64_freebsd_sysvec, + .interp_newpath = NULL, + .brand_note = &elf64_freebsd_brandnote, + .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE +}; + +SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST, + (sysinit_cfunc_t) elf64_insert_brand_entry, + &freebsd_brand_info); + +static Elf64_Brandinfo freebsd_brand_oinfo = { + .brand = ELFOSABI_FREEBSD, + .machine = EM_RISCV, + .compat_3_brand = "FreeBSD", + .emul_path = NULL, + .interp_path = "/usr/libexec/ld-elf.so.1", + .sysvec = &elf64_freebsd_sysvec, + .interp_newpath = NULL, + .brand_note = &elf64_freebsd_brandnote, + .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE +}; + +SYSINIT(oelf64, SI_SUB_EXEC, SI_ORDER_ANY, + (sysinit_cfunc_t) elf64_insert_brand_entry, + &freebsd_brand_oinfo); + +void +elf64_dump_thread(struct thread *td, void *dst, size_t *off) +{ + +} + +/* Process one elf relocation with addend. */ +static int +elf_reloc_internal(linker_file_t lf, Elf_Addr relocbase, const void *data, + int type, int local, elf_lookup_fn lookup) +{ + + panic("elf_reloc_internal"); +} + +int +elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type, + elf_lookup_fn lookup) +{ + + return (elf_reloc_internal(lf, relocbase, data, type, 0, lookup)); +} + +int +elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data, + int type, elf_lookup_fn lookup) +{ + + return (elf_reloc_internal(lf, relocbase, data, type, 1, lookup)); +} + +int +elf_cpu_load_file(linker_file_t lf __unused) +{ + + return (0); +} + +int +elf_cpu_unload_file(linker_file_t lf __unused) +{ + + return (0); +} diff --git a/sys/riscv/riscv/exception.S b/sys/riscv/riscv/exception.S new file mode 100644 index 000000000000..07fcfc52787a --- /dev/null +++ b/sys/riscv/riscv/exception.S @@ -0,0 +1,456 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "assym.s" + +#include +#include + +.macro save_registers el + addi sp, sp, -280 + + sd ra, (TF_RA)(sp) + sd gp, (TF_GP)(sp) + sd tp, (TF_TP)(sp) + + sd t0, (TF_T + 0 * 8)(sp) + sd t1, (TF_T + 1 * 8)(sp) + sd t2, (TF_T + 2 * 8)(sp) + sd t3, (TF_T + 3 * 8)(sp) + sd t4, (TF_T + 4 * 8)(sp) + sd t5, (TF_T + 5 * 8)(sp) + sd t6, (TF_T + 6 * 8)(sp) + + sd s0, (TF_S + 0 * 8)(sp) + sd s1, (TF_S + 1 * 8)(sp) + sd s2, (TF_S + 2 * 8)(sp) + sd s3, (TF_S + 3 * 8)(sp) + sd s4, (TF_S + 4 * 8)(sp) + sd s5, (TF_S + 5 * 8)(sp) + sd s6, (TF_S + 6 * 8)(sp) + sd s7, (TF_S + 7 * 8)(sp) + sd s8, (TF_S + 8 * 8)(sp) + sd s9, (TF_S + 9 * 8)(sp) + sd s10, (TF_S + 10 * 8)(sp) + sd s11, (TF_S + 11 * 8)(sp) + + sd a0, (TF_A + 0 * 8)(sp) + sd a1, (TF_A + 1 * 8)(sp) + sd a2, (TF_A + 2 * 8)(sp) + sd a3, (TF_A + 3 * 8)(sp) + sd a4, (TF_A + 4 * 8)(sp) + sd a5, (TF_A + 5 * 8)(sp) + sd a6, (TF_A + 6 * 8)(sp) + sd a7, (TF_A + 7 * 8)(sp) + +#if 0 + /* XXX: temporary test: spin if stack is not kernel one */ +.if \el == 1 /* kernel */ + mv t0, sp + srli t0, t0, 63 +1: + beqz t0, 1b +.endif +#endif + +.if \el == 1 + /* Store kernel sp */ + sd sp, (TF_SP)(sp) +.else + /* Store user sp */ + csrr t0, sscratch + sd t0, (TF_SP)(sp) +.endif + li t0, 0 + csrw sscratch, t0 + csrr t0, sepc + sd t0, (TF_SEPC)(sp) + csrr t0, sstatus + sd t0, (TF_SSTATUS)(sp) + csrr t0, sbadaddr + sd t0, (TF_SBADADDR)(sp) + csrr t0, scause + sd t0, (TF_SCAUSE)(sp) +.endm + +.macro load_registers el + ld t0, (TF_SSTATUS)(sp) +.if \el == 0 + /* Ensure user interrupts will be enabled on eret. */ + ori t0, t0, SSTATUS_PIE +.else + /* + * Disable interrupts for supervisor mode exceptions. + * For user mode exceptions we have already done this + * in do_ast. + */ + li t1, ~SSTATUS_IE + and t0, t0, t1 +.endif + csrw sstatus, t0 + + ld t0, (TF_SEPC)(sp) + csrw sepc, t0 + +.if \el == 0 + /* Load user sp */ + ld t0, (TF_SP)(sp) + csrw sscratch, t0 +.endif + + ld ra, (TF_RA)(sp) + ld gp, (TF_GP)(sp) + ld tp, (TF_TP)(sp) + + ld t0, (TF_T + 0 * 8)(sp) + ld t1, (TF_T + 1 * 8)(sp) + ld t2, (TF_T + 2 * 8)(sp) + ld t3, (TF_T + 3 * 8)(sp) + ld t4, (TF_T + 4 * 8)(sp) + ld t5, (TF_T + 5 * 8)(sp) + ld t6, (TF_T + 6 * 8)(sp) + + ld s0, (TF_S + 0 * 8)(sp) + ld s1, (TF_S + 1 * 8)(sp) + ld s2, (TF_S + 2 * 8)(sp) + ld s3, (TF_S + 3 * 8)(sp) + ld s4, (TF_S + 4 * 8)(sp) + ld s5, (TF_S + 5 * 8)(sp) + ld s6, (TF_S + 6 * 8)(sp) + ld s7, (TF_S + 7 * 8)(sp) + ld s8, (TF_S + 8 * 8)(sp) + ld s9, (TF_S + 9 * 8)(sp) + ld s10, (TF_S + 10 * 8)(sp) + ld s11, (TF_S + 11 * 8)(sp) + + ld a0, (TF_A + 0 * 8)(sp) + ld a1, (TF_A + 1 * 8)(sp) + ld a2, (TF_A + 2 * 8)(sp) + ld a3, (TF_A + 3 * 8)(sp) + ld a4, (TF_A + 4 * 8)(sp) + ld a5, (TF_A + 5 * 8)(sp) + ld a6, (TF_A + 6 * 8)(sp) + ld a7, (TF_A + 7 * 8)(sp) + + addi sp, sp, 280 +.endm + +.macro do_ast + /* Disable interrupts */ + csrr a4, sstatus +1: + csrci sstatus, SSTATUS_IE + + la a1, pcpup + ld a1, 0(a1) + ld a1, PC_CURTHREAD(a1) + lw a2, TD_FLAGS(a1) + + li a3, (TDF_ASTPENDING|TDF_NEEDRESCHED) + and a2, a2, a3 + beqz a2, 2f + + /* Restore interrupts */ + andi a4, a4, SSTATUS_IE + csrs sstatus, a4 + + /* Handle the ast */ + mv a0, sp + call _C_LABEL(ast) + + /* Re-check for new ast scheduled */ + j 1b +2: +.endm + +ENTRY(cpu_exception_handler_supervisor) + save_registers 1 + mv a0, sp + call _C_LABEL(do_trap_supervisor) + load_registers 1 + eret +END(cpu_exception_handler_supervisor) + +ENTRY(cpu_exception_handler_user) + csrrw sp, sscratch, sp + save_registers 0 + mv a0, sp + call _C_LABEL(do_trap_user) + do_ast + load_registers 0 + csrrw sp, sscratch, sp + eret +END(cpu_exception_handler_user) + +/* + * Trap handlers + */ + .text +bad_trap: + j bad_trap + +user_trap: + csrrw sp, mscratch, sp + addi sp, sp, -64 + sd t0, (8 * 0)(sp) + sd t1, (8 * 1)(sp) + sd t2, (8 * 2)(sp) + sd t3, (8 * 3)(sp) + sd t4, (8 * 4)(sp) + sd t5, (8 * 5)(sp) + sd a0, (8 * 7)(sp) + + la t2, _C_LABEL(cpu_exception_handler_user) + + csrr t0, mcause + bltz t0, machine_interrupt + j exit_mrts + +supervisor_trap: + /* Save state */ + csrrw sp, mscratch, sp + addi sp, sp, -64 + sd t0, (8 * 0)(sp) + sd t1, (8 * 1)(sp) + sd t2, (8 * 2)(sp) + sd t3, (8 * 3)(sp) + sd t4, (8 * 4)(sp) + sd t5, (8 * 5)(sp) + sd a0, (8 * 7)(sp) + + la t2, _C_LABEL(cpu_exception_handler_supervisor) + + csrr t0, mcause + bltz t0, machine_interrupt + + li t1, EXCP_SMODE_ENV_CALL + beq t0, t1, supervisor_call + j exit_mrts + +machine_interrupt: + /* Type of interrupt ? */ + csrr t0, mcause + andi t0, t0, 3 + li t1, 0 + beq t1, t0, software_interrupt + li t1, 1 + beq t1, t0, timer_interrupt + li t1, 2 + beq t1, t0, htif_interrupt + + /* not reached */ +1: + j 1b + +software_interrupt: + /* Redirect to supervisor */ + j exit_mrts + +timer_interrupt: + /* Disable machine timer interrupts */ + li t0, MIE_MTIE + csrc mie, t0 + + /* Clear machine pending */ + li t0, MIP_MTIP + csrc mip, t0 + + /* Post supervisor software interrupt */ + li t0, MIP_STIP + csrs mip, t0 + + /* If PRV1 is PRV_U (user) then serve a trap */ + csrr t0, mstatus + li t1, (MSTATUS_PRV_M << MSTATUS_PRV1_SHIFT) + and t0, t0, t1 + beqz t0, 1f + + /* If PRV1 is supervisor and interrupts were enabled, then serve a trap */ + csrr t0, mstatus + li t1, (SR_IE1 | (MSTATUS_PRV_M << MSTATUS_PRV1_SHIFT)) + and t0, t0, t1 + li t1, (SR_IE1 | (MSTATUS_PRV_S << MSTATUS_PRV1_SHIFT)) + beq t0, t1, 1f + + j exit + +1: + /* Serve a trap in supervisor mode */ + j exit_mrts + +htif_interrupt: +1: + li t5, 0 + csrrw t5, mfromhost, t5 + beqz t5, 3f + + /* Console PUT intr ? */ + mv t1, t5 + li t0, 0x101 + srli t1, t1, 48 + bne t1, t0, 2f + /* Yes */ + la t0, console_intr + li t1, 1 + sd t1, 0(t0) + j 3f + +2: + /* Save entry */ + la t0, htif_ring_cursor + beqz t0, 3f /* not initialized */ + ld t0, 0(t0) /* load struct */ + sd t5, 0(t0) /* put entry */ + li t4, 1 + sd t4, 8(t0) /* mark used */ + ld t4, 16(t0) /* take next */ + /* Update cursor */ + la t0, htif_ring_cursor + sd t4, 0(t0) + + /* Post supervisor software interrupt */ + li t0, MIP_SSIP + csrs mip, t0 + +3: + j exit + +supervisor_call: + csrr t1, mepc + addi t1, t1, 4 /* Next instruction in t1 */ + li t4, ECALL_HTIF_CMD + beq t5, t4, htif_cmd + li t4, ECALL_HTIF_GET_ENTRY + beq t5, t4, htif_get_entry + li t4, ECALL_MTIMECMP + beq t5, t4, set_mtimecmp + li t4, ECALL_CLEAR_PENDING + beq t5, t4, clear_pending + li t4, ECALL_MCPUID_GET + beq t5, t4, mcpuid_get + li t4, ECALL_MIMPID_GET + beq t5, t4, mimpid_get + j exit_next_instr + +mcpuid_get: + csrr t6, mcpuid + j exit_next_instr + +mimpid_get: + csrr t6, mimpid + j exit_next_instr + +htif_get_entry: + li t6, 0 /* preset return value */ + la t0, htif_ring_last + ld t0, 0(t0) /* load struct */ + ld t4, 8(t0) /* get used */ + beqz t4, 1f + ld t6, 0(t0) /* get entry */ + li t4, 0 + sd t4, 8(t0) /* mark free */ + sd t4, 0(t0) /* free entry, just in case */ + ld t4, 16(t0) /* take next */ + la t0, htif_ring_last + sd t4, 0(t0) +1: + /* Exit. Result is stored in t6 */ + j exit_next_instr + +htif_cmd: + mv t0, t6 +1: + csrrw t0, mtohost, t0 + bnez t0, 1b + j exit_next_instr + +set_mtimecmp: + csrr t2, stime + add t6, t6, t2 + csrw mtimecmp, t6 + + /* Enable interrupts */ + li t0, (MIE_MTIE | MIE_STIE) + csrs mie, t0 + j exit_next_instr + +clear_pending: + li t0, MIP_STIP + csrc mip, t0 + j exit_next_instr + +/* + * Trap exit functions + */ +exit_next_instr: + /* Next instruction is in t1 */ + csrw mepc, t1 +exit: + /* Restore state */ + ld t0, (8 * 0)(sp) + ld t1, (8 * 1)(sp) + ld t2, (8 * 2)(sp) + ld t3, (8 * 3)(sp) + ld t4, (8 * 4)(sp) + ld t5, (8 * 5)(sp) + ld a0, (8 * 7)(sp) + addi sp, sp, 64 + csrrw sp, mscratch, sp + eret + +/* + * Redirect to supervisor + */ +exit_mrts: + /* Setup exception handler */ + li t1, KERNBASE + add t2, t2, t1 + csrw stvec, t2 + + /* Restore state */ + ld t0, (8 * 0)(sp) + ld t1, (8 * 1)(sp) + ld t2, (8 * 2)(sp) + ld t3, (8 * 3)(sp) + ld t4, (8 * 4)(sp) + ld t5, (8 * 5)(sp) + ld a0, (8 * 7)(sp) + addi sp, sp, 64 + csrrw sp, mscratch, sp + + /* Redirect to supervisor */ + mrts diff --git a/sys/riscv/riscv/genassym.c b/sys/riscv/riscv/genassym.c new file mode 100644 index 000000000000..f5c971d178a7 --- /dev/null +++ b/sys/riscv/riscv/genassym.c @@ -0,0 +1,98 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +ASSYM(KERNBASE, KERNBASE); +ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); +ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); +ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); + +ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); +ASSYM(PCB_L1ADDR, offsetof(struct pcb, pcb_l1addr)); +ASSYM(PCB_SIZE, sizeof(struct pcb)); +ASSYM(PCB_RA, offsetof(struct pcb, pcb_ra)); +ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); +ASSYM(PCB_GP, offsetof(struct pcb, pcb_gp)); +ASSYM(PCB_TP, offsetof(struct pcb, pcb_tp)); +ASSYM(PCB_T, offsetof(struct pcb, pcb_t)); +ASSYM(PCB_S, offsetof(struct pcb, pcb_s)); +ASSYM(PCB_A, offsetof(struct pcb, pcb_a)); + +ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); + +ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); +ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); + +ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); +ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); +ASSYM(TD_PROC, offsetof(struct thread, td_proc)); +ASSYM(TD_FRAME, offsetof(struct thread, td_frame)); +ASSYM(TD_MD, offsetof(struct thread, td_md)); +ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); + +ASSYM(TF_RA, offsetof(struct trapframe, tf_ra)); +ASSYM(TF_SP, offsetof(struct trapframe, tf_sp)); +ASSYM(TF_GP, offsetof(struct trapframe, tf_gp)); +ASSYM(TF_TP, offsetof(struct trapframe, tf_tp)); +ASSYM(TF_T, offsetof(struct trapframe, tf_t)); +ASSYM(TF_S, offsetof(struct trapframe, tf_s)); +ASSYM(TF_A, offsetof(struct trapframe, tf_a)); +ASSYM(TF_SEPC, offsetof(struct trapframe, tf_sepc)); +ASSYM(TF_SBADADDR, offsetof(struct trapframe, tf_sbadaddr)); +ASSYM(TF_SCAUSE, offsetof(struct trapframe, tf_scause)); +ASSYM(TF_SSTATUS, offsetof(struct trapframe, tf_sstatus)); diff --git a/sys/riscv/riscv/identcpu.c b/sys/riscv/riscv/identcpu.c new file mode 100644 index 000000000000..dd1f2ba13382 --- /dev/null +++ b/sys/riscv/riscv/identcpu.c @@ -0,0 +1,149 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include + +#include +#include +#include + +char machine[] = "riscv"; + +SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, + "Machine class"); + +struct cpu_desc { + u_int cpu_impl; + u_int cpu_part_num; + const char *cpu_impl_name; + const char *cpu_part_name; +}; + +struct cpu_desc cpu_desc[MAXCPU]; + +struct cpu_parts { + u_int part_id; + const char *part_name; +}; +#define CPU_PART_NONE { -1, "Unknown Processor" } + +struct cpu_implementers { + u_int impl_id; + const char *impl_name; + /* + * Part number is implementation defined + * so each vendor will have its own set of values and names. + */ + const struct cpu_parts *cpu_parts; +}; +#define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none } + +/* + * Per-implementer table of (PartNum, CPU Name) pairs. + */ +/* UC Berkeley */ +static const struct cpu_parts cpu_parts_ucb[] = { + { CPU_PART_RV32I, "RV32I" }, + { CPU_PART_RV32E, "RV32E" }, + { CPU_PART_RV64I, "RV64I" }, + { CPU_PART_RV128I, "RV128I" }, + CPU_PART_NONE, +}; + +/* Unknown */ +static const struct cpu_parts cpu_parts_none[] = { + CPU_PART_NONE, +}; + +/* + * Implementers table. + */ +const struct cpu_implementers cpu_implementers[] = { + { CPU_IMPL_UCB_ROCKET, "UC Berkeley Rocket", cpu_parts_ucb }, + CPU_IMPLEMENTER_NONE, +}; + +void +identify_cpu(void) +{ + const struct cpu_parts *cpu_partsp; + uint32_t part_id; + uint32_t impl_id; + uint64_t mimpid; + uint64_t mcpuid; + u_int cpu; + size_t i; + + cpu_partsp = NULL; + + mimpid = machine_command(ECALL_MIMPID_GET, 0); + mcpuid = machine_command(ECALL_MCPUID_GET, 0); + + /* SMPTODO: use mhartid ? */ + cpu = PCPU_GET(cpuid); + + impl_id = CPU_IMPL(mimpid); + for (i = 0; i < nitems(cpu_implementers); i++) { + if (impl_id == cpu_implementers[i].impl_id || + cpu_implementers[i].impl_id == 0) { + cpu_desc[cpu].cpu_impl = impl_id; + cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name; + cpu_partsp = cpu_implementers[i].cpu_parts; + break; + } + } + + part_id = CPU_PART(mcpuid); + for (i = 0; &cpu_partsp[i] != NULL; i++) { + if (part_id == cpu_partsp[i].part_id || + cpu_partsp[i].part_id == -1) { + cpu_desc[cpu].cpu_part_num = part_id; + cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name; + break; + } + } + + /* Print details for boot CPU or if we want verbose output */ + if (cpu == 0 || bootverbose) { + printf("CPU(%d): %s %s\n", cpu, + cpu_desc[cpu].cpu_impl_name, + cpu_desc[cpu].cpu_part_name); + } +} diff --git a/sys/riscv/riscv/in_cksum.c b/sys/riscv/riscv/in_cksum.c new file mode 100644 index 000000000000..ae02e91d9203 --- /dev/null +++ b/sys/riscv/riscv/in_cksum.c @@ -0,0 +1,241 @@ +/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */ + +/*- + * Copyright (c) 1988, 1992, 1993 + * The Regents of the University of California. All rights reserved. + * Copyright (c) 1996 + * Matt Thomas + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93 + */ + +#include /* RCS ID & Copyright macro defns */ +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include + +/* + * Checksum routine for Internet Protocol family headers + * (Portable Alpha version). + * + * This routine is very heavily used in the network + * code and should be modified for each CPU to be as fast as possible. + */ + +#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x) +#define REDUCE32 \ + { \ + q_util.q = sum; \ + sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \ + } +#define REDUCE16 \ + { \ + q_util.q = sum; \ + l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \ + sum = l_util.s[0] + l_util.s[1]; \ + ADDCARRY(sum); \ + } + +static const u_int32_t in_masks[] = { + /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/ + 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */ + 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */ + 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */ + 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */ +}; + +union l_util { + u_int16_t s[2]; + u_int32_t l; +}; +union q_util { + u_int16_t s[4]; + u_int32_t l[2]; + u_int64_t q; +}; + +static u_int64_t +in_cksumdata(const void *buf, int len) +{ + const u_int32_t *lw = (const u_int32_t *) buf; + u_int64_t sum = 0; + u_int64_t prefilled; + int offset; + union q_util q_util; + + if ((3 & (long) lw) == 0 && len == 20) { + sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4]; + REDUCE32; + return sum; + } + + if ((offset = 3 & (long) lw) != 0) { + const u_int32_t *masks = in_masks + (offset << 2); + lw = (u_int32_t *) (((long) lw) - offset); + sum = *lw++ & masks[len >= 3 ? 3 : len]; + len -= 4 - offset; + if (len <= 0) { + REDUCE32; + return sum; + } + } +#if 0 + /* + * Force to cache line boundary. + */ + offset = 32 - (0x1f & (long) lw); + if (offset < 32 && len > offset) { + len -= offset; + if (4 & offset) { + sum += (u_int64_t) lw[0]; + lw += 1; + } + if (8 & offset) { + sum += (u_int64_t) lw[0] + lw[1]; + lw += 2; + } + if (16 & offset) { + sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3]; + lw += 4; + } + } +#endif + /* + * access prefilling to start load of next cache line. + * then add current cache line + * save result of prefilling for loop iteration. + */ + prefilled = lw[0]; + while ((len -= 32) >= 4) { + u_int64_t prefilling = lw[8]; + sum += prefilled + lw[1] + lw[2] + lw[3] + + lw[4] + lw[5] + lw[6] + lw[7]; + lw += 8; + prefilled = prefilling; + } + if (len >= 0) { + sum += prefilled + lw[1] + lw[2] + lw[3] + + lw[4] + lw[5] + lw[6] + lw[7]; + lw += 8; + } else { + len += 32; + } + while ((len -= 16) >= 0) { + sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3]; + lw += 4; + } + len += 16; + while ((len -= 4) >= 0) { + sum += (u_int64_t) *lw++; + } + len += 4; + if (len > 0) + sum += (u_int64_t) (in_masks[len] & *lw); + REDUCE32; + return sum; +} + +u_short +in_addword(u_short a, u_short b) +{ + u_int64_t sum = a + b; + + ADDCARRY(sum); + return (sum); +} + +u_short +in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c) +{ + u_int64_t sum; + union q_util q_util; + union l_util l_util; + + sum = (u_int64_t) a + b + c; + REDUCE16; + return (sum); +} + +u_short +in_cksum_skip(struct mbuf *m, int len, int skip) +{ + u_int64_t sum = 0; + int mlen = 0; + int clen = 0; + caddr_t addr; + union q_util q_util; + union l_util l_util; + + len -= skip; + for (; skip && m; m = m->m_next) { + if (m->m_len > skip) { + mlen = m->m_len - skip; + addr = mtod(m, caddr_t) + skip; + goto skip_start; + } else { + skip -= m->m_len; + } + } + + for (; m && len; m = m->m_next) { + if (m->m_len == 0) + continue; + mlen = m->m_len; + addr = mtod(m, caddr_t); +skip_start: + if (len < mlen) + mlen = len; + if ((clen ^ (long) addr) & 1) + sum += in_cksumdata(addr, mlen) << 8; + else + sum += in_cksumdata(addr, mlen); + + clen += mlen; + len -= mlen; + } + REDUCE16; + return (~sum & 0xffff); +} + +u_int in_cksum_hdr(const struct ip *ip) +{ + u_int64_t sum = in_cksumdata(ip, sizeof(struct ip)); + union q_util q_util; + union l_util l_util; + REDUCE16; + return (~sum & 0xffff); +} diff --git a/sys/riscv/riscv/intr_machdep.c b/sys/riscv/riscv/intr_machdep.c new file mode 100644 index 000000000000..c51075c8a24b --- /dev/null +++ b/sys/riscv/riscv/intr_machdep.c @@ -0,0 +1,223 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +enum { + IRQ_SOFTWARE, + IRQ_TIMER, + IRQ_HTIF, + NIRQS +}; + +u_long intrcnt[NIRQS]; +size_t sintrcnt = sizeof(intrcnt); + +char intrnames[NIRQS * (MAXCOMLEN + 1) * 2]; +size_t sintrnames = sizeof(intrnames); + +static struct intr_event *intr_events[NIRQS]; +static riscv_intrcnt_t riscv_intr_counters[NIRQS]; + +static int intrcnt_index; + +riscv_intrcnt_t +riscv_intrcnt_create(const char* name) +{ + riscv_intrcnt_t counter; + + counter = &intrcnt[intrcnt_index++]; + riscv_intrcnt_setname(counter, name); + + return (counter); +} + +void +riscv_intrcnt_setname(riscv_intrcnt_t counter, const char *name) +{ + int i; + + i = (counter - intrcnt); + + KASSERT(counter != NULL, ("riscv_intrcnt_setname: NULL counter")); + + snprintf(intrnames + (MAXCOMLEN + 1) * i, + MAXCOMLEN + 1, "%-*s", MAXCOMLEN, name); +} + +static void +riscv_mask_irq(void *source) +{ + uintptr_t irq; + + irq = (uintptr_t)source; + + switch (irq) { + case IRQ_TIMER: + csr_clear(sie, SIE_STIE); + break; + case IRQ_SOFTWARE: + csr_clear(sie, SIE_SSIE); + break; + default: + panic("Unknown irq %d\n", irq); + } +} + +static void +riscv_unmask_irq(void *source) +{ + uintptr_t irq; + + irq = (uintptr_t)source; + + switch (irq) { + case IRQ_TIMER: + csr_set(sie, SIE_STIE); + break; + case IRQ_SOFTWARE: + csr_set(sie, SIE_SSIE); + break; + default: + panic("Unknown irq %d\n", irq); + } +} + +void +riscv_init_interrupts(void) +{ + char name[MAXCOMLEN + 1]; + int i; + + for (i = 0; i < NIRQS; i++) { + snprintf(name, MAXCOMLEN + 1, "int%d:", i); + riscv_intr_counters[i] = riscv_intrcnt_create(name); + } +} + +int +riscv_setup_intr(const char *name, driver_filter_t *filt, + void (*handler)(void*), void *arg, int irq, int flags, void **cookiep) +{ + struct intr_event *event; + int error; + + if (irq < 0 || irq >= NIRQS) + panic("%s: unknown intr %d", __func__, irq); + + event = intr_events[irq]; + if (event == NULL) { + error = intr_event_create(&event, (void *)(uintptr_t)irq, 0, + irq, riscv_mask_irq, riscv_unmask_irq, + NULL, NULL, "int%d", irq); + if (error) + return (error); + intr_events[irq] = event; + riscv_unmask_irq((void*)(uintptr_t)irq); + } + + intr_event_add_handler(event, name, filt, handler, arg, + intr_priority(flags), flags, cookiep); + + riscv_intrcnt_setname(riscv_intr_counters[irq], + event->ie_fullname); + + return (0); +} + +int +riscv_teardown_intr(void *ih) +{ + + /* TODO */ + + return (0); +} + +int +riscv_config_intr(u_int irq, enum intr_trigger trig, enum intr_polarity pol) +{ + + /* There is no configuration for interrupts */ + + return (0); +} + +void +riscv_cpu_intr(struct trapframe *frame) +{ + struct intr_event *event; + int active_irq; + + critical_enter(); + + KASSERT(frame->tf_scause & EXCP_INTR, + ("riscv_cpu_intr: wrong frame passed")); + + active_irq = (frame->tf_scause & EXCP_MASK); + + switch (active_irq) { + case IRQ_SOFTWARE: + case IRQ_TIMER: + event = intr_events[active_irq]; + /* Update counters */ + atomic_add_long(riscv_intr_counters[active_irq], 1); + PCPU_INC(cnt.v_intr); + break; + case IRQ_HTIF: + /* HTIF interrupts are only handled in machine mode */ + panic("%s: HTIF interrupt", __func__); + break; + default: + event = NULL; + } + + if (!event || TAILQ_EMPTY(&event->ie_handlers) || + (intr_event_handle(event, frame) != 0)) + printf("stray interrupt %d\n", active_irq); + + critical_exit(); +} diff --git a/sys/riscv/riscv/locore.S b/sys/riscv/riscv/locore.S new file mode 100644 index 000000000000..8dc424e7fc4e --- /dev/null +++ b/sys/riscv/riscv/locore.S @@ -0,0 +1,274 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include "assym.s" + +#include +#include +#include +#include +#include +#include + +#define HTIF_RING_SIZE (64) +#define HTIF_RING_LAST (24 * (HTIF_RING_SIZE - 1)) + + .globl kernbase + .set kernbase, KERNBASE + + /* Trap entries */ + .text + +mentry: + /* User mode entry point (mtvec + 0x000) */ + .align 6 + j user_trap + + /* Supervisor mode entry point (mtvec + 0x040) */ + .align 6 + j supervisor_trap + + /* Hypervisor mode entry point (mtvec + 0x080) */ + .align 6 + j bad_trap + + /* Machine mode entry point (mtvec + 0x0C0) */ + .align 6 + j bad_trap + + /* Reset vector */ + .text + .align 8 + .globl _start +_start: + li s11, KERNBASE + + /* Build ring */ + la t0, htif_ring + li t1, 0 + sd t1, 0(t0) /* zero data */ + sd t1, 8(t0) /* zero used */ + mv t2, t0 + mv t3, t0 + li t5, HTIF_RING_LAST + li t6, 0 + add t4, t0, t5 +1: + addi t3, t3, 24 /* pointer to next */ + beq t3, t4, 2f /* finish */ + sd t3, 16(t2) /* store pointer */ + addi t2, t2, 24 /* next entry */ + addi t6, t6, 1 /* counter */ + j 1b +2: + sd t0, 16(t3) /* last -> first */ + la t1, htif_ring_cursor + sd t0, 0(t1) + la t1, htif_ring_last + sd t0, 0(t1) + /* finish building ring */ + + la t0, hardstack_end + sub t0, t0, s11 + csrw mscratch, t0 + + la t0, mentry + csrw mtvec, t0 + + li t0, 0 + csrw sscratch, t0 + + li s10, PAGE_SIZE + li s9, (PAGE_SIZE * KSTACK_PAGES) + + /* Page tables */ + + /* Level 0 */ + la s1, pagetable_l0 + la s2, pagetable_l1 /* Link to next level PN */ + srli s2, s2, PAGE_SHIFT + + li t4, (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)) + slli t5, s2, PTE_PPN0_S /* (s2 << PTE_PPN0_S) */ + or t6, t4, t5 + + /* Store single level0 PTE entry to position */ + li a5, 0x1ff + li a6, PTE_SIZE + mulw a5, a5, a6 + add t0, s1, a5 + sd t6, 0(t0) + + /* Level 1 */ + la s1, pagetable_l1 + la s2, pagetable_l2 /* Link to next level PN */ + srli s2, s2, PAGE_SHIFT + + li a5, KERNBASE + srli a5, a5, 0x1e /* >> 30 */ + andi a5, a5, 0x1ff /* & 0x1ff */ + li t4, (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)) + slli t5, s2, PTE_PPN0_S /* (s2 << PTE_PPN0_S) */ + or t6, t4, t5 + + /* Store single level1 PTE entry to position */ + li a6, PTE_SIZE + mulw a5, a5, a6 + add t0, s1, a5 + sd t6, (t0) + + /* Level 2 superpages (512 x 2MiB) */ + la s1, pagetable_l2 + li t3, 512 /* Build 512 entries */ + li t4, 0 /* Counter */ + li t5, 0 +2: + li t0, (PTE_VALID | (PTE_TYPE_SRWX << PTE_TYPE_S)) + slli t2, t4, PTE_PPN1_S /* << PTE_PPN1_S */ + or t5, t0, t2 + sd t5, (s1) /* Store PTE entry to position */ + addi s1, s1, PTE_SIZE + + addi t4, t4, 1 + bltu t4, t3, 2b + + /* Set page tables base register */ + la s1, pagetable_l0 + csrw sptbr, s1 + + /* Page tables END */ + + /* Enter supervisor mode */ + li s0, ((MSTATUS_VM_SV48 << MSTATUS_VM_SHIFT) | \ + (MSTATUS_PRV_M << MSTATUS_PRV_SHIFT) | \ + (MSTATUS_PRV_S << MSTATUS_PRV1_SHIFT) | \ + (MSTATUS_PRV_U << MSTATUS_PRV2_SHIFT)); + csrw mstatus, s0 + + /* Exit from machine mode */ + la t0, .Lmmu_on + add t0, t0, s11 + csrw mepc, t0 + eret + +.Lmmu_on: + /* Initialize stack pointer */ + la s3, initstack_end + mv sp, s3 + addi sp, sp, -PCB_SIZE + + /* Clear BSS */ + la a0, _C_LABEL(__bss_start) + la s1, _C_LABEL(_end) +1: + sd zero, 0(a0) + addi a0, a0, 8 + bltu a0, s1, 1b + + /* Fill riscv_bootparams */ + addi sp, sp, -16 + la t0, pagetable_l1 + sd t0, 0(sp) /* kern_l1pt */ + la t0, initstack_end + sd t0, 8(sp) /* kern_stack */ + + mv a0, sp + call _C_LABEL(initriscv) /* Off we go */ + call _C_LABEL(mi_startup) + + .align 4 +initstack: + .space (PAGE_SIZE * KSTACK_PAGES) +initstack_end: +hardstack: + .space (PAGE_SIZE) +hardstack_end: + + .globl htif_ring +htif_ring: + .space (24 * 1024) + + .globl htif_ring_cursor +htif_ring_cursor: + .space (8) + + .globl htif_ring_last +htif_ring_last: + .space (8) + + .globl console_intr +console_intr: + .space (8) + +ENTRY(sigcode) + mv a0, sp + addi a0, a0, SF_UC + +1: + li t0, SYS_sigreturn + ecall + + /* sigreturn failed, exit */ + li t0, SYS_exit + ecall + + j 1b +END(sigcode) + /* This may be copied to the stack, keep it 16-byte aligned */ + .align 3 +esigcode: + + .data + .align 3 + .global szsigcode +szsigcode: + .quad esigcode - sigcode + + .align 12 + .globl pagetable_l0 +pagetable_l0: + .space PAGE_SIZE +pagetable_l1: + .space PAGE_SIZE +pagetable_l2: + .space PAGE_SIZE +pagetable_end: + + .globl init_pt_va +init_pt_va: + .quad pagetable_l2 /* XXX: Keep page tables VA */ + +#include "exception.S" diff --git a/sys/riscv/riscv/machdep.c b/sys/riscv/riscv/machdep.c new file mode 100644 index 000000000000..5f9bd1f283f2 --- /dev/null +++ b/sys/riscv/riscv/machdep.c @@ -0,0 +1,795 @@ +/*- + * Copyright (c) 2014 Andrew Turner + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "opt_platform.h" + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#ifdef VFP +#include +#endif + +#ifdef FDT +#include +#include +#endif + +struct pcpu __pcpu[MAXCPU]; + +static struct trapframe proc0_tf; + +vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2]; +vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2]; + +int early_boot = 1; +int cold = 1; +long realmem = 0; +long Maxmem = 0; + +#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) +vm_paddr_t physmap[PHYSMAP_SIZE]; +u_int physmap_idx; + +struct kva_md_info kmi; + +int64_t dcache_line_size; /* The minimum D cache line size */ +int64_t icache_line_size; /* The minimum I cache line size */ +int64_t idcache_line_size; /* The minimum cache line size */ + +extern int *end; +extern int *initstack_end; + +struct pcpu *pcpup; + +uintptr_t mcall_trap(uintptr_t mcause, uintptr_t* regs); + +uintptr_t +mcall_trap(uintptr_t mcause, uintptr_t* regs) +{ + + return (0); +} + +static void +cpu_startup(void *dummy) +{ + + identify_cpu(); + + vm_ksubmap_init(&kmi); + bufinit(); + vm_pager_bufferinit(); +} + +SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); + +int +cpu_idle_wakeup(int cpu) +{ + + return (0); +} + +void +bzero(void *buf, size_t len) +{ + uint8_t *p; + + p = buf; + while(len-- > 0) + *p++ = 0; +} + +int +fill_regs(struct thread *td, struct reg *regs) +{ + struct trapframe *frame; + + frame = td->td_frame; + regs->sepc = frame->tf_sepc; + regs->sstatus = frame->tf_sstatus; + regs->ra = frame->tf_ra; + regs->sp = frame->tf_sp; + regs->gp = frame->tf_gp; + regs->tp = frame->tf_tp; + + memcpy(regs->t, frame->tf_t, sizeof(regs->t)); + memcpy(regs->s, frame->tf_s, sizeof(regs->s)); + memcpy(regs->a, frame->tf_a, sizeof(regs->a)); + + return (0); +} + +int +set_regs(struct thread *td, struct reg *regs) +{ + struct trapframe *frame; + + frame = td->td_frame; + frame->tf_sepc = regs->sepc; + frame->tf_sstatus = regs->sstatus; + frame->tf_ra = regs->ra; + frame->tf_sp = regs->sp; + frame->tf_gp = regs->gp; + frame->tf_tp = regs->tp; + + memcpy(frame->tf_t, regs->t, sizeof(frame->tf_t)); + memcpy(frame->tf_s, regs->s, sizeof(frame->tf_s)); + memcpy(frame->tf_a, regs->a, sizeof(frame->tf_a)); + + return (0); +} + +int +fill_fpregs(struct thread *td, struct fpreg *regs) +{ + + /* TODO */ + bzero(regs, sizeof(*regs)); + return (0); +} + +int +set_fpregs(struct thread *td, struct fpreg *regs) +{ + + /* TODO */ + return (0); +} + +int +fill_dbregs(struct thread *td, struct dbreg *regs) +{ + + panic("fill_dbregs"); +} + +int +set_dbregs(struct thread *td, struct dbreg *regs) +{ + + panic("set_dbregs"); +} + +int +ptrace_set_pc(struct thread *td, u_long addr) +{ + + panic("ptrace_set_pc"); + return (0); +} + +int +ptrace_single_step(struct thread *td) +{ + + /* TODO; */ + return (0); +} + +int +ptrace_clear_single_step(struct thread *td) +{ + + /* TODO; */ + return (0); +} + +void +exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) +{ + struct trapframe *tf = td->td_frame; + + memset(tf, 0, sizeof(struct trapframe)); + + /* + * We need to set a0 for init as it doesn't call + * cpu_set_syscall_retval to copy the value. We also + * need to set td_retval for the cases where we do. + */ + tf->tf_a[0] = td->td_retval[0] = stack; + tf->tf_sp = STACKALIGN(stack); + tf->tf_ra = imgp->entry_addr; + tf->tf_sepc = imgp->entry_addr; +} + +/* Sanity check these are the same size, they will be memcpy'd to and fro */ +CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == + sizeof((struct gpregs *)0)->gp_a); +CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == + sizeof((struct gpregs *)0)->gp_s); +CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == + sizeof((struct gpregs *)0)->gp_t); +CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == + sizeof((struct reg *)0)->a); +CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == + sizeof((struct reg *)0)->s); +CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == + sizeof((struct reg *)0)->t); + +int +get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) +{ + struct trapframe *tf = td->td_frame; + + memcpy(mcp->mc_gpregs.gp_t, tf->tf_t, sizeof(mcp->mc_gpregs.gp_t)); + memcpy(mcp->mc_gpregs.gp_s, tf->tf_s, sizeof(mcp->mc_gpregs.gp_s)); + memcpy(mcp->mc_gpregs.gp_a, tf->tf_a, sizeof(mcp->mc_gpregs.gp_a)); + + if (clear_ret & GET_MC_CLEAR_RET) { + mcp->mc_gpregs.gp_a[0] = 0; + mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */ + } + + mcp->mc_gpregs.gp_ra = tf->tf_ra; + mcp->mc_gpregs.gp_sp = tf->tf_sp; + mcp->mc_gpregs.gp_gp = tf->tf_gp; + mcp->mc_gpregs.gp_tp = tf->tf_tp; + mcp->mc_gpregs.gp_sepc = tf->tf_sepc; + mcp->mc_gpregs.gp_sstatus = tf->tf_sstatus; + + return (0); +} + +int +set_mcontext(struct thread *td, mcontext_t *mcp) +{ + struct trapframe *tf; + + tf = td->td_frame; + + memcpy(tf->tf_t, mcp->mc_gpregs.gp_t, sizeof(tf->tf_t)); + memcpy(tf->tf_s, mcp->mc_gpregs.gp_s, sizeof(tf->tf_s)); + memcpy(tf->tf_a, mcp->mc_gpregs.gp_a, sizeof(tf->tf_a)); + + tf->tf_ra = mcp->mc_gpregs.gp_ra; + tf->tf_sp = mcp->mc_gpregs.gp_sp; + tf->tf_gp = mcp->mc_gpregs.gp_gp; + tf->tf_tp = mcp->mc_gpregs.gp_tp; + tf->tf_sepc = mcp->mc_gpregs.gp_sepc; + tf->tf_sstatus = mcp->mc_gpregs.gp_sstatus; + + return (0); +} + +static void +get_fpcontext(struct thread *td, mcontext_t *mcp) +{ + /* TODO */ +} + +static void +set_fpcontext(struct thread *td, mcontext_t *mcp) +{ + /* TODO */ +} + +void +cpu_idle(int busy) +{ + + spinlock_enter(); + if (!busy) + cpu_idleclock(); + if (!sched_runnable()) + __asm __volatile( + "fence \n" + "wfi \n"); + if (!busy) + cpu_activeclock(); + spinlock_exit(); +} + +void +cpu_halt(void) +{ + + panic("cpu_halt"); +} + +/* + * Flush the D-cache for non-DMA I/O so that the I-cache can + * be made coherent later. + */ +void +cpu_flush_dcache(void *ptr, size_t len) +{ + + /* TBD */ +} + +/* Get current clock frequency for the given CPU ID. */ +int +cpu_est_clockrate(int cpu_id, uint64_t *rate) +{ + + panic("cpu_est_clockrate"); +} + +void +cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) +{ + +} + +void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) { + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_sstatus_ie = intr_disable(); + } else + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + register_t sstatus_ie; + + td = curthread; + critical_exit(); + sstatus_ie = td->td_md.md_saved_sstatus_ie; + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + intr_restore(sstatus_ie); +} + +#ifndef _SYS_SYSPROTO_H_ +struct sigreturn_args { + ucontext_t *ucp; +}; +#endif + +int +sys_sigreturn(struct thread *td, struct sigreturn_args *uap) +{ + uint64_t sstatus; + ucontext_t uc; + int error; + + if (uap == NULL) + return (EFAULT); + if (copyin(uap->sigcntxp, &uc, sizeof(uc))) + return (EFAULT); + + /* + * Make sure the processor mode has not been tampered with and + * interrupts have not been disabled. + */ + sstatus = uc.uc_mcontext.mc_gpregs.gp_sstatus; + if ((sstatus & SSTATUS_PS) != 0 || + (sstatus & SSTATUS_PIE) == 0) + return (EINVAL); + + error = set_mcontext(td, &uc.uc_mcontext); + if (error != 0) + return (error); + + set_fpcontext(td, &uc.uc_mcontext); + + /* Restore signal mask. */ + kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); + + return (EJUSTRETURN); +} + +/* + * Construct a PCB from a trapframe. This is called from kdb_trap() where + * we want to start a backtrace from the function that caused us to enter + * the debugger. We have the context in the trapframe, but base the trace + * on the PCB. The PCB doesn't have to be perfect, as long as it contains + * enough for a backtrace. + */ +void +makectx(struct trapframe *tf, struct pcb *pcb) +{ + + memcpy(pcb->pcb_t, tf->tf_t, sizeof(tf->tf_t)); + memcpy(pcb->pcb_s, tf->tf_s, sizeof(tf->tf_s)); + memcpy(pcb->pcb_a, tf->tf_a, sizeof(tf->tf_a)); + + pcb->pcb_ra = tf->tf_ra; + pcb->pcb_sp = tf->tf_sp; + pcb->pcb_gp = tf->tf_gp; + pcb->pcb_tp = tf->tf_tp; + pcb->pcb_sepc = tf->tf_sepc; +} + +void +sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) +{ + struct sigframe *fp, frame; + struct sysentvec *sysent; + struct trapframe *tf; + struct sigacts *psp; + struct thread *td; + struct proc *p; + int onstack; + int code; + int sig; + + td = curthread; + p = td->td_proc; + PROC_LOCK_ASSERT(p, MA_OWNED); + + sig = ksi->ksi_signo; + code = ksi->ksi_code; + psp = p->p_sigacts; + mtx_assert(&psp->ps_mtx, MA_OWNED); + + tf = td->td_frame; + onstack = sigonstack(tf->tf_sp); + + CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, + catcher, sig); + + /* Allocate and validate space for the signal handler context. */ + if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && + SIGISMEMBER(psp->ps_sigonstack, sig)) { + fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + + td->td_sigstk.ss_size); + } else { + fp = (struct sigframe *)td->td_frame->tf_sp; + } + + /* Make room, keeping the stack aligned */ + fp--; + fp = (struct sigframe *)STACKALIGN(fp); + + /* Fill in the frame to copy out */ + get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); + get_fpcontext(td, &frame.sf_uc.uc_mcontext); + frame.sf_si = ksi->ksi_info; + frame.sf_uc.uc_sigmask = *mask; + frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? + ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; + frame.sf_uc.uc_stack = td->td_sigstk; + mtx_unlock(&psp->ps_mtx); + PROC_UNLOCK(td->td_proc); + + /* Copy the sigframe out to the user's stack. */ + if (copyout(&frame, fp, sizeof(*fp)) != 0) { + /* Process has trashed its stack. Kill it. */ + CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); + PROC_LOCK(p); + sigexit(td, SIGILL); + } + + tf->tf_a[0] = sig; + tf->tf_a[1] = (register_t)&fp->sf_si; + tf->tf_a[2] = (register_t)&fp->sf_uc; + + tf->tf_sepc = (register_t)catcher; + tf->tf_sp = (register_t)fp; + + sysent = p->p_sysent; + if (sysent->sv_sigcode_base != 0) + tf->tf_ra = (register_t)sysent->sv_sigcode_base; + else + tf->tf_ra = (register_t)(sysent->sv_psstrings - + *(sysent->sv_szsigcode)); + + CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr, + tf->tf_sp); + + PROC_LOCK(p); + mtx_lock(&psp->ps_mtx); +} + +static void +init_proc0(vm_offset_t kstack) +{ + pcpup = &__pcpu[0]; + + proc_linkup0(&proc0, &thread0); + thread0.td_kstack = kstack; + thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1; + thread0.td_frame = &proc0_tf; + pcpup->pc_curpcb = thread0.td_pcb; +} + +static int +add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, + u_int *physmap_idxp) +{ + u_int i, insert_idx, _physmap_idx; + + _physmap_idx = *physmap_idxp; + + if (length == 0) + return (1); + + /* + * Find insertion point while checking for overlap. Start off by + * assuming the new entry will be added to the end. + */ + insert_idx = _physmap_idx; + for (i = 0; i <= _physmap_idx; i += 2) { + if (base < physmap[i + 1]) { + if (base + length <= physmap[i]) { + insert_idx = i; + break; + } + if (boothowto & RB_VERBOSE) + printf( + "Overlapping memory regions, ignoring second region\n"); + return (1); + } + } + + /* See if we can prepend to the next entry. */ + if (insert_idx <= _physmap_idx && + base + length == physmap[insert_idx]) { + physmap[insert_idx] = base; + return (1); + } + + /* See if we can append to the previous entry. */ + if (insert_idx > 0 && base == physmap[insert_idx - 1]) { + physmap[insert_idx - 1] += length; + return (1); + } + + _physmap_idx += 2; + *physmap_idxp = _physmap_idx; + if (_physmap_idx == PHYSMAP_SIZE) { + printf( + "Too many segments in the physical address map, giving up\n"); + return (0); + } + + /* + * Move the last 'N' entries down to make room for the new + * entry if needed. + */ + for (i = _physmap_idx; i > insert_idx; i -= 2) { + physmap[i] = physmap[i - 2]; + physmap[i + 1] = physmap[i - 1]; + } + + /* Insert the new entry. */ + physmap[insert_idx] = base; + physmap[insert_idx + 1] = base + length; + + printf("physmap[%d] = 0x%016lx\n", insert_idx, base); + printf("physmap[%d] = 0x%016lx\n", insert_idx + 1, base + length); + return (1); +} + +#ifdef FDT +static void +try_load_dtb(caddr_t kmdp) +{ + vm_offset_t dtbp; + + dtbp = (vm_offset_t)&fdt_static_dtb; + if (dtbp == (vm_offset_t)NULL) { + printf("ERROR loading DTB\n"); + return; + } + + if (OF_install(OFW_FDT, 0) == FALSE) + panic("Cannot install FDT"); + + if (OF_init((void *)dtbp) != 0) + panic("OF_init failed with the found device tree"); +} +#endif + +static void +cache_setup(void) +{ + + /* TODO */ +} + +/* + * Fake up a boot descriptor table. + * RISCVTODO: This needs to be done via loader (when it's available). + */ +vm_offset_t +fake_preload_metadata(struct riscv_bootparams *rvbp __unused) +{ +#ifdef DDB + vm_offset_t zstart = 0, zend = 0; +#endif + vm_offset_t lastaddr; + int i = 0; + static uint32_t fake_preload[35]; + + fake_preload[i++] = MODINFO_NAME; + fake_preload[i++] = strlen("kernel") + 1; + strcpy((char*)&fake_preload[i++], "kernel"); + i += 1; + fake_preload[i++] = MODINFO_TYPE; + fake_preload[i++] = strlen("elf64 kernel") + 1; + strcpy((char*)&fake_preload[i++], "elf64 kernel"); + i += 3; + fake_preload[i++] = MODINFO_ADDR; + fake_preload[i++] = sizeof(vm_offset_t); + fake_preload[i++] = (uint64_t)(KERNBASE + KERNENTRY); + i += 1; + fake_preload[i++] = MODINFO_SIZE; + fake_preload[i++] = sizeof(uint64_t); + printf("end is 0x%016lx\n", (uint64_t)&end); + fake_preload[i++] = (uint64_t)&end - (uint64_t)(KERNBASE + KERNENTRY); + i += 1; +#ifdef DDB +#if 0 + /* RISCVTODO */ + if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { + fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; + fake_preload[i++] = sizeof(vm_offset_t); + fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); + fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; + fake_preload[i++] = sizeof(vm_offset_t); + fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); + lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); + zend = lastaddr; + zstart = *(uint32_t *)(KERNVIRTADDR + 4); + db_fetch_ksymtab(zstart, zend); + } else +#endif +#endif + lastaddr = (vm_offset_t)&end; + fake_preload[i++] = 0; + fake_preload[i] = 0; + preload_metadata = (void *)fake_preload; + + return (lastaddr); +} + +void +initriscv(struct riscv_bootparams *rvbp) +{ + vm_offset_t lastaddr; + vm_size_t kernlen; + caddr_t kmdp; + + /* Set the module data location */ + lastaddr = fake_preload_metadata(rvbp); + + /* Find the kernel address */ + kmdp = preload_search_by_type("elf kernel"); + if (kmdp == NULL) + kmdp = preload_search_by_type("elf64 kernel"); + + boothowto = 0; + + kern_envp = NULL; + +#ifdef FDT + try_load_dtb(kmdp); +#endif + + /* Load the physical memory ranges */ + physmap_idx = 0; + + /* + * RISCVTODO: figure out whether platform provides ranges, + * or grab from FDT. + */ + add_physmap_entry(0, 0x8000000, physmap, &physmap_idx); + + /* Set the pcpu data, this is needed by pmap_bootstrap */ + pcpup = &__pcpu[0]; + pcpu_init(pcpup, 0, sizeof(struct pcpu)); + + /* Set the pcpu pointer */ +#if 0 + /* SMP TODO: try re-use gp for pcpu pointer */ + __asm __volatile( + "mv gp, %0" :: "r"(pcpup)); +#endif + + PCPU_SET(curthread, &thread0); + + /* Do basic tuning, hz etc */ + init_param1(); + + cache_setup(); + + /* Bootstrap enough of pmap to enter the kernel proper */ + kernlen = (lastaddr - KERNBASE); + pmap_bootstrap(rvbp->kern_l1pt, KERNENTRY, kernlen); + + cninit(); + + init_proc0(rvbp->kern_stack); + + /* set page table base register for thread0 */ + thread0.td_pcb->pcb_l1addr = (rvbp->kern_l1pt - KERNBASE); + + msgbufinit(msgbufp, msgbufsize); + mutex_init(); + init_param2(physmem); + kdb_init(); + + riscv_init_interrupts(); + + early_boot = 0; +} diff --git a/sys/riscv/riscv/mem.c b/sys/riscv/riscv/mem.c new file mode 100644 index 000000000000..38488a2c9a87 --- /dev/null +++ b/sys/riscv/riscv/mem.c @@ -0,0 +1,124 @@ +/*- + * Copyright (c) 2014 Andrew Turner + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +struct mem_range_softc mem_range_softc; + +int +memrw(struct cdev *dev, struct uio *uio, int flags) +{ + ssize_t orig_resid; + vm_offset_t off, v; + struct iovec *iov; + struct vm_page m; + vm_page_t marr; + u_int cnt; + int error; + + error = 0; + orig_resid = uio->uio_resid; + while (uio->uio_resid > 0 && error == 0) { + iov = uio->uio_iov; + if (iov->iov_len == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + if (uio->uio_iovcnt < 0) + panic("memrw"); + continue; + } + + v = uio->uio_offset; + off = v & PAGE_MASK; + cnt = ulmin(iov->iov_len, PAGE_SIZE - (u_int)off); + if (cnt == 0) + continue; + + switch(dev2unit(dev)) { + case CDEV_MINOR_KMEM: + /* If the address is in the DMAP just copy it */ + if (VIRT_IN_DMAP(v)) { + error = uiomove((void *)v, cnt, uio); + break; + } + + if (!kernacc((void *)v, cnt, uio->uio_rw == UIO_READ ? + VM_PROT_READ : VM_PROT_WRITE)) { + error = EFAULT; + break; + } + + /* Get the physical address to read */ + v = pmap_extract(kernel_pmap, v); + if (v == 0) { + error = EFAULT; + break; + } + + /* FALLTHROUGH */ + case CDEV_MINOR_MEM: + /* If within the DMAP use this to copy from */ + if (PHYS_IN_DMAP(v)) { + v = PHYS_TO_DMAP(v); + error = uiomove((void *)v, cnt, uio); + break; + } + + /* Have uiomove_fromphys handle the data */ + m.phys_addr = trunc_page(v); + marr = &m; + uiomove_fromphys(&marr, off, cnt, uio); + break; + } + } + + /* + * Don't return error if any byte was written. Read and write + * can return error only if no i/o was performed. + */ + if (uio->uio_resid != orig_resid) + error = 0; + + return (error); +} + diff --git a/sys/riscv/riscv/minidump_machdep.c b/sys/riscv/riscv/minidump_machdep.c new file mode 100644 index 000000000000..ca51cfc77e50 --- /dev/null +++ b/sys/riscv/riscv/minidump_machdep.c @@ -0,0 +1,59 @@ +/*- + * Copyright (c) 2006 Peter Wemm + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include "opt_watchdog.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +CTASSERT(sizeof(struct kerneldumpheader) == 512); +CTASSERT(sizeof(*vm_page_dump) == 8); + +int +minidumpsys(struct dumperinfo *di) +{ + + panic("minidumpsys"); +} diff --git a/sys/riscv/riscv/nexus.c b/sys/riscv/riscv/nexus.c new file mode 100644 index 000000000000..83b57959edab --- /dev/null +++ b/sys/riscv/riscv/nexus.c @@ -0,0 +1,387 @@ +/*- + * Copyright 1998 Massachusetts Institute of Technology + * + * Permission to use, copy, modify, and distribute this software and + * its documentation for any purpose and without fee is hereby + * granted, provided that both the above copyright notice and this + * permission notice appear in all copies, that both the above + * copyright notice and this permission notice appear in all + * supporting documentation, and that the name of M.I.T. not be used + * in advertising or publicity pertaining to distribution of the + * software without specific, written prior permission. M.I.T. makes + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied + * warranty. + * + * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS + * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT + * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + */ + +/* + * This code implements a `root nexus' for RISC-V Architecture + * machines. The function of the root nexus is to serve as an + * attachment point for both processors and buses, and to manage + * resources which are common to all of them. In particular, + * this code implements the core resource managers for interrupt + * requests, DMA requests (which rightfully should be a part of the + * ISA code but it's easier to do it here for now), I/O port addresses, + * and I/O memory address space. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include "opt_platform.h" + +#include +#include "ofw_bus_if.h" + +extern struct bus_space memmap_bus; + +static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); + +struct nexus_device { + struct resource_list nx_resources; +}; + +#define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) + +static struct rman mem_rman; +static struct rman irq_rman; + +static device_probe_t nexus_fdt_probe; +static int nexus_attach(device_t); + +static int nexus_print_child(device_t, device_t); +static device_t nexus_add_child(device_t, u_int, const char *, int); +static struct resource *nexus_alloc_resource(device_t, device_t, int, int *, + u_long, u_long, u_long, u_int); +static int nexus_activate_resource(device_t, device_t, int, int, + struct resource *); +static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, + enum intr_polarity pol); +static struct resource_list *nexus_get_reslist(device_t, device_t); +static int nexus_set_resource(device_t, device_t, int, int, u_long, u_long); +static int nexus_deactivate_resource(device_t, device_t, int, int, + struct resource *); + +static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, + int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); +static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); + +static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, + int icells, pcell_t *intr); + +static device_method_t nexus_methods[] = { + /* Device interface */ + DEVMETHOD(device_probe, nexus_fdt_probe), + DEVMETHOD(device_attach, nexus_attach), + + /* OFW interface */ + DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), + + /* Bus interface */ + DEVMETHOD(bus_print_child, nexus_print_child), + DEVMETHOD(bus_add_child, nexus_add_child), + DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), + DEVMETHOD(bus_activate_resource, nexus_activate_resource), + DEVMETHOD(bus_config_intr, nexus_config_intr), + DEVMETHOD(bus_get_resource_list, nexus_get_reslist), + DEVMETHOD(bus_set_resource, nexus_set_resource), + DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), + DEVMETHOD(bus_setup_intr, nexus_setup_intr), + DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), + + { 0, 0 } +}; + +static driver_t nexus_fdt_driver = { + "nexus", + nexus_methods, + 1 /* no softc */ +}; + +static int +nexus_fdt_probe(device_t dev) +{ + + device_quiet(dev); + return (BUS_PROBE_DEFAULT); +} + +static int +nexus_attach(device_t dev) +{ + + mem_rman.rm_start = 0; + mem_rman.rm_end = ~0ul; + mem_rman.rm_type = RMAN_ARRAY; + mem_rman.rm_descr = "I/O memory addresses"; + if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, ~0)) + panic("nexus_attach mem_rman"); + irq_rman.rm_start = 0; + irq_rman.rm_end = ~0ul; + irq_rman.rm_type = RMAN_ARRAY; + irq_rman.rm_descr = "Interrupts"; + if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) + panic("nexus_attach irq_rman"); + + nexus_add_child(dev, 10, "ofwbus", 0); + + bus_generic_probe(dev); + bus_generic_attach(dev); + + return (0); +} + +static int +nexus_print_child(device_t bus, device_t child) +{ + int retval = 0; + + retval += bus_print_child_header(bus, child); + retval += printf("\n"); + + return (retval); +} + +static device_t +nexus_add_child(device_t bus, u_int order, const char *name, int unit) +{ + device_t child; + struct nexus_device *ndev; + + ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); + if (!ndev) + return (0); + resource_list_init(&ndev->nx_resources); + + child = device_add_child_ordered(bus, order, name, unit); + + /* should we free this in nexus_child_detached? */ + device_set_ivars(child, ndev); + + return (child); +} + + +/* + * Allocate a resource on behalf of child. NB: child is usually going to be a + * child of one of our descendants, not a direct child of nexus0. + * (Exceptions include footbridge.) + */ +static struct resource * +nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, + u_long start, u_long end, u_long count, u_int flags) +{ + struct nexus_device *ndev = DEVTONX(child); + struct resource *rv; + struct resource_list_entry *rle; + struct rman *rm; + int needactivate = flags & RF_ACTIVE; + + /* + * If this is an allocation of the "default" range for a given + * RID, and we know what the resources for this device are + * (ie. they aren't maintained by a child bus), then work out + * the start/end values. + */ + if ((start == 0UL) && (end == ~0UL) && (count == 1)) { + if (device_get_parent(child) != bus || ndev == NULL) + return(NULL); + rle = resource_list_find(&ndev->nx_resources, type, *rid); + if (rle == NULL) + return(NULL); + start = rle->start; + end = rle->end; + count = rle->count; + } + + switch (type) { + case SYS_RES_IRQ: + rm = &irq_rman; + break; + + case SYS_RES_MEMORY: + case SYS_RES_IOPORT: + rm = &mem_rman; + break; + + default: + return (NULL); + } + + rv = rman_reserve_resource(rm, start, end, count, flags, child); + if (rv == 0) + return (NULL); + + rman_set_rid(rv, *rid); + rman_set_bushandle(rv, rman_get_start(rv)); + + if (needactivate) { + if (bus_activate_resource(child, type, *rid, rv)) { + rman_release_resource(rv); + return (NULL); + } + } + + return (rv); +} + +static int +nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, + enum intr_polarity pol) +{ + + return (riscv_config_intr(irq, trig, pol)); +} + +static int +nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, + driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) +{ + int error; + + if ((rman_get_flags(res) & RF_SHAREABLE) == 0) + flags |= INTR_EXCL; + + /* We depend here on rman_activate_resource() being idempotent. */ + error = rman_activate_resource(res); + if (error) + return (error); + + error = riscv_setup_intr(device_get_nameunit(child), filt, intr, + arg, rman_get_start(res), flags, cookiep); + + return (error); +} + +static int +nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) +{ + + return (riscv_teardown_intr(ih)); +} + +static int +nexus_activate_resource(device_t bus, device_t child, int type, int rid, + struct resource *r) +{ + int err; + bus_addr_t paddr; + bus_size_t psize; + bus_space_handle_t vaddr; + + if ((err = rman_activate_resource(r)) != 0) + return (err); + + /* + * If this is a memory resource, map it into the kernel. + */ + if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { + paddr = (bus_addr_t)rman_get_start(r); + psize = (bus_size_t)rman_get_size(r); + err = bus_space_map(&memmap_bus, paddr, psize, 0, &vaddr); + if (err != 0) { + rman_deactivate_resource(r); + return (err); + } + rman_set_bustag(r, &memmap_bus); + rman_set_virtual(r, (void *)vaddr); + rman_set_bushandle(r, vaddr); + } + return (0); +} + +static struct resource_list * +nexus_get_reslist(device_t dev, device_t child) +{ + struct nexus_device *ndev = DEVTONX(child); + + return (&ndev->nx_resources); +} + +static int +nexus_set_resource(device_t dev, device_t child, int type, int rid, + u_long start, u_long count) +{ + struct nexus_device *ndev = DEVTONX(child); + struct resource_list *rl = &ndev->nx_resources; + + /* XXX this should return a success/failure indicator */ + resource_list_add(rl, type, rid, start, start + count - 1, count); + + return(0); +} + + +static int +nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, + struct resource *r) +{ + bus_size_t psize; + bus_space_handle_t vaddr; + + psize = (bus_size_t)rman_get_size(r); + vaddr = rman_get_bushandle(r); + + if (vaddr != 0) { + bus_space_unmap(&memmap_bus, vaddr, psize); + rman_set_virtual(r, NULL); + rman_set_bushandle(r, 0); + } + + return (rman_deactivate_resource(r)); +} + +static devclass_t nexus_fdt_devclass; + +EARLY_DRIVER_MODULE(nexus_fdt, root, nexus_fdt_driver, nexus_fdt_devclass, + 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_FIRST); + +static int +nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, + pcell_t *intr) +{ + int irq; + + if (icells == 3) { + irq = intr[1]; + if (intr[0] == 0) + irq += 32; /* SPI */ + else + irq += 16; /* PPI */ + } else + irq = intr[0]; + + return (irq); +} diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c new file mode 100644 index 000000000000..e4ca19fff330 --- /dev/null +++ b/sys/riscv/riscv/pmap.c @@ -0,0 +1,3197 @@ +/*- + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * Copyright (c) 1994 John S. Dyson + * All rights reserved. + * Copyright (c) 1994 David Greenman + * All rights reserved. + * Copyright (c) 2003 Peter Wemm + * All rights reserved. + * Copyright (c) 2005-2010 Alan L. Cox + * All rights reserved. + * Copyright (c) 2014 Andrew Turner + * All rights reserved. + * Copyright (c) 2014 The FreeBSD Foundation + * All rights reserved. + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department and William Jolitz of UUNET Technologies Inc. + * + * Portions of this software were developed by Andrew Turner under + * sponsorship from The FreeBSD Foundation. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 + */ +/*- + * Copyright (c) 2003 Networks Associates Technology, Inc. + * All rights reserved. + * + * This software was developed for the FreeBSD Project by Jake Burkholder, + * Safeport Network Services, and Network Associates Laboratories, the + * Security Research Division of Network Associates, Inc. under + * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA + * CHATS research program. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * Manages physical address maps. + * + * Since the information managed by this module is + * also stored by the logical address mapping module, + * this module may throw away valid virtual-to-physical + * mappings at almost any time. However, invalidations + * of virtual-to-physical mappings must be done as + * requested. + * + * In order to cope with hardware architectures which + * make virtual-to-physical map invalidates expensive, + * this module may delay invalidate or reduced protection + * operations until such time as they are actually + * necessary. This module is given full information as + * to which processors are currently using which maps, + * and to when physical maps must be made correct. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t))) +#define NUPDE (NPDEPG * NPDEPG) +#define NUSERPGTBLS (NUPDE + NPDEPG) + +#if !defined(DIAGNOSTIC) +#ifdef __GNUC_GNU_INLINE__ +#define PMAP_INLINE __attribute__((__gnu_inline__)) inline +#else +#define PMAP_INLINE extern inline +#endif +#else +#define PMAP_INLINE +#endif + +#ifdef PV_STATS +#define PV_STAT(x) do { x ; } while (0) +#else +#define PV_STAT(x) do { } while (0) +#endif + +#define pmap_l2_pindex(v) ((v) >> L2_SHIFT) + +#define NPV_LIST_LOCKS MAXCPU + +#define PHYS_TO_PV_LIST_LOCK(pa) \ + (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS]) + +#define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \ + struct rwlock **_lockp = (lockp); \ + struct rwlock *_new_lock; \ + \ + _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \ + if (_new_lock != *_lockp) { \ + if (*_lockp != NULL) \ + rw_wunlock(*_lockp); \ + *_lockp = _new_lock; \ + rw_wlock(*_lockp); \ + } \ +} while (0) + +#define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \ + CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m)) + +#define RELEASE_PV_LIST_LOCK(lockp) do { \ + struct rwlock **_lockp = (lockp); \ + \ + if (*_lockp != NULL) { \ + rw_wunlock(*_lockp); \ + *_lockp = NULL; \ + } \ +} while (0) + +#define VM_PAGE_TO_PV_LIST_LOCK(m) \ + PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m)) + +struct pmap kernel_pmap_store; + +vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ +vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ +vm_offset_t kernel_vm_end = 0; + +struct msgbuf *msgbufp = NULL; + +static struct rwlock_padalign pvh_global_lock; + +extern uint64_t pagetable_l0; + +/* + * Data for the pv entry allocation mechanism + */ +static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); +static struct mtx pv_chunks_mutex; +static struct rwlock pv_list_locks[NPV_LIST_LOCKS]; + +static void free_pv_chunk(struct pv_chunk *pc); +static void free_pv_entry(pmap_t pmap, pv_entry_t pv); +static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); +static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); +static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); +static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, + vm_offset_t va); +static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, + vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); +static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva, + pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp); +static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, + vm_page_t m, struct rwlock **lockp); + +static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, + struct rwlock **lockp); + +static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, + struct spglist *free); +static int pmap_unuse_l3(pmap_t, vm_offset_t, pd_entry_t, struct spglist *); + +/* + * These load the old table data and store the new value. + * They need to be atomic as the System MMU may write to the table at + * the same time as the CPU. + */ +#define pmap_load_store(table, entry) atomic_swap_64(table, entry) +#define pmap_set(table, mask) atomic_set_64(table, mask) +#define pmap_load_clear(table) atomic_swap_64(table, 0) +#define pmap_load(table) (*table) + +/********************/ +/* Inline functions */ +/********************/ + +static __inline void +pagecopy(void *s, void *d) +{ + + memcpy(d, s, PAGE_SIZE); +} + +static __inline void +pagezero(void *p) +{ + + bzero(p, PAGE_SIZE); +} + +#define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK) +#define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK) +#define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK) + +#define PTE_TO_PHYS(pte) ((pte >> PTE_PPN0_S) * PAGE_SIZE) + +static __inline pd_entry_t * +pmap_l1(pmap_t pmap, vm_offset_t va) +{ + + return (&pmap->pm_l1[pmap_l1_index(va)]); +} + +static __inline pd_entry_t * +pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va) +{ + vm_paddr_t phys; + pd_entry_t *l2; + + phys = PTE_TO_PHYS(pmap_load(l1)); + l2 = (pd_entry_t *)PHYS_TO_DMAP(phys); + + return (&l2[pmap_l2_index(va)]); +} + +static __inline pd_entry_t * +pmap_l2(pmap_t pmap, vm_offset_t va) +{ + pd_entry_t *l1; + + l1 = pmap_l1(pmap, va); + + if ((pmap_load(l1) & PTE_VALID) == 0) + return (NULL); + if ((pmap_load(l1) & PTE_TYPE_M) != (PTE_TYPE_PTR << PTE_TYPE_S)) + return (NULL); + + return (pmap_l1_to_l2(l1, va)); +} + +static __inline pt_entry_t * +pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va) +{ + vm_paddr_t phys; + pt_entry_t *l3; + + phys = PTE_TO_PHYS(pmap_load(l2)); + l3 = (pd_entry_t *)PHYS_TO_DMAP(phys); + + return (&l3[pmap_l3_index(va)]); +} + +static __inline pt_entry_t * +pmap_l3(pmap_t pmap, vm_offset_t va) +{ + pd_entry_t *l2; + + l2 = pmap_l2(pmap, va); + if (l2 == NULL) + return (NULL); + if ((pmap_load(l2) & PTE_VALID) == 0) + return (NULL); + if (l2 == NULL || (pmap_load(l2) & PTE_TYPE_M) != (PTE_TYPE_PTR << PTE_TYPE_S)) + return (NULL); + + return (pmap_l2_to_l3(l2, va)); +} + + +static __inline int +pmap_is_write(pt_entry_t entry) +{ + + if (entry & (1 << PTE_TYPE_S)) + return (1); + + return (0); +} + +static __inline int +pmap_is_current(pmap_t pmap) +{ + + return ((pmap == pmap_kernel()) || + (pmap == curthread->td_proc->p_vmspace->vm_map.pmap)); +} + +static __inline int +pmap_l3_valid(pt_entry_t l3) +{ + + return (l3 & PTE_VALID); +} + +static __inline int +pmap_l3_valid_cacheable(pt_entry_t l3) +{ + + /* TODO */ + + return (0); +} + +#define PTE_SYNC(pte) cpu_dcache_wb_range((vm_offset_t)pte, sizeof(*pte)) + +/* Checks if the page is dirty. */ +static inline int +pmap_page_dirty(pt_entry_t pte) +{ + + return (pte & PTE_DIRTY); +} + +static __inline void +pmap_resident_count_inc(pmap_t pmap, int count) +{ + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + pmap->pm_stats.resident_count += count; +} + +static __inline void +pmap_resident_count_dec(pmap_t pmap, int count) +{ + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + KASSERT(pmap->pm_stats.resident_count >= count, + ("pmap %p resident count underflow %ld %d", pmap, + pmap->pm_stats.resident_count, count)); + pmap->pm_stats.resident_count -= count; +} + +static pt_entry_t * +pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot, + u_int *l2_slot) +{ + pt_entry_t *l2; + pd_entry_t *l1; + + l1 = (pd_entry_t *)l1pt; + *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK; + + /* Check locore has used a table L1 map */ + KASSERT((l1[*l1_slot] & PTE_TYPE_M) == (PTE_TYPE_PTR << PTE_TYPE_S), + ("Invalid bootstrap L1 table")); + + /* Find the address of the L2 table */ + l2 = (pt_entry_t *)init_pt_va; + *l2_slot = pmap_l2_index(va); + + return (l2); +} + +static vm_paddr_t +pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va) +{ + u_int l1_slot, l2_slot; + pt_entry_t *l2; + u_int ret; + + l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot); + + /* L2 is superpages */ + ret = (l2[l2_slot] >> PTE_PPN1_S) << L2_SHIFT; + ret += (va & L2_OFFSET); + + return (ret); +} + +static void +pmap_bootstrap_dmap(vm_offset_t l2pt) +{ + vm_offset_t va; + vm_paddr_t pa; + pd_entry_t *l2; + u_int l2_slot; + pt_entry_t entry; + u_int pn; + + va = DMAP_MIN_ADDRESS; + l2 = (pd_entry_t *)l2pt; + l2_slot = pmap_l2_index(DMAP_MIN_ADDRESS); + + for (pa = 0; va < DMAP_MAX_ADDRESS; pa += L2_SIZE, va += L2_SIZE, l2_slot++) { + KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); + + /* superpages */ + pn = ((pa >> L2_SHIFT) & Ln_ADDR_MASK); + entry = (PTE_VALID | (PTE_TYPE_SRWX << PTE_TYPE_S)); + entry |= (pn << PTE_PPN1_S); + + pmap_load_store(&l2[l2_slot], entry); + } + + cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE); + cpu_tlb_flushID(); +} + +/* + * Bootstrap the system enough to run with virtual memory. + */ +void +pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) +{ + u_int l1_slot, l2_slot, avail_slot, map_slot, used_map_slot; + uint64_t kern_delta; + pt_entry_t *l2; + vm_offset_t va, freemempos; + vm_offset_t dpcpu, msgbufpv; + vm_paddr_t pa, min_pa; + vm_offset_t l2pt; + int i; + + kern_delta = KERNBASE - kernstart; + physmem = 0; + + printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen); + printf("%lx\n", l1pt); + printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK); + + /* Set this early so we can use the pagetable walking functions */ + kernel_pmap_store.pm_l1 = (pd_entry_t *)l1pt; + PMAP_LOCK_INIT(kernel_pmap); + + /* + * Initialize the global pv list lock. + */ + rw_init(&pvh_global_lock, "pmap pv global"); + + /* Assume the address we were loaded to is a valid physical address */ + min_pa = KERNBASE - kern_delta; + + /* + * Find the minimum physical address. physmap is sorted, + * but may contain empty ranges. + */ + for (i = 0; i < (physmap_idx * 2); i += 2) { + if (physmap[i] == physmap[i + 1]) + continue; + if (physmap[i] <= min_pa) + min_pa = physmap[i]; + break; + } + + /* Create a direct map region early so we can use it for pa -> va */ + l2pt = (l1pt + PAGE_SIZE); + pmap_bootstrap_dmap(l2pt); + + va = KERNBASE; + pa = KERNBASE - kern_delta; + + /* + * Start to initialize phys_avail by copying from physmap + * up to the physical address KERNBASE points at. + */ + map_slot = avail_slot = 0; + for (; map_slot < (physmap_idx * 2); map_slot += 2) { + if (physmap[map_slot] == physmap[map_slot + 1]) + continue; + + phys_avail[avail_slot] = physmap[map_slot]; + phys_avail[avail_slot + 1] = physmap[map_slot + 1]; + physmem += (phys_avail[avail_slot + 1] - + phys_avail[avail_slot]) >> PAGE_SHIFT; + avail_slot += 2; + } + + /* Add the memory before the kernel */ + if (physmap[avail_slot] < pa) { + phys_avail[avail_slot] = physmap[map_slot]; + phys_avail[avail_slot + 1] = pa; + physmem += (phys_avail[avail_slot + 1] - + phys_avail[avail_slot]) >> PAGE_SHIFT; + avail_slot += 2; + } + used_map_slot = map_slot; + + /* + * Read the page table to find out what is already mapped. + * This assumes we have mapped a block of memory from KERNBASE + * using a single L1 entry. + */ + l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot); + + /* Sanity check the index, KERNBASE should be the first VA */ + KASSERT(l2_slot == 0, ("The L2 index is non-zero")); + + /* Find how many pages we have mapped */ + for (; l2_slot < Ln_ENTRIES; l2_slot++) { + if ((l2[l2_slot] & PTE_VALID) == 0) + break; + + /* Check locore used L2 superpages */ + KASSERT((l2[l2_slot] & PTE_TYPE_M) != (PTE_TYPE_PTR << PTE_TYPE_S), + ("Invalid bootstrap L2 table")); + + va += L2_SIZE; + pa += L2_SIZE; + } + + va = roundup2(va, L2_SIZE); + + freemempos = KERNBASE + kernlen; + freemempos = roundup2(freemempos, PAGE_SIZE); + + cpu_tlb_flushID(); + +#define alloc_pages(var, np) \ + (var) = freemempos; \ + freemempos += (np * PAGE_SIZE); \ + memset((char *)(var), 0, ((np) * PAGE_SIZE)); + + /* Allocate dynamic per-cpu area. */ + alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); + dpcpu_init((void *)dpcpu, 0); + + /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */ + alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); + msgbufp = (void *)msgbufpv; + + virtual_avail = roundup2(freemempos, L2_SIZE); + virtual_end = VM_MAX_KERNEL_ADDRESS - L2_SIZE; + kernel_vm_end = virtual_avail; + + pa = pmap_early_vtophys(l1pt, freemempos); + + /* Finish initialising physmap */ + map_slot = used_map_slot; + for (; avail_slot < (PHYS_AVAIL_SIZE - 2) && + map_slot < (physmap_idx * 2); map_slot += 2) { + if (physmap[map_slot] == physmap[map_slot + 1]) + continue; + + /* Have we used the current range? */ + if (physmap[map_slot + 1] <= pa) + continue; + + /* Do we need to split the entry? */ + if (physmap[map_slot] < pa) { + phys_avail[avail_slot] = pa; + phys_avail[avail_slot + 1] = physmap[map_slot + 1]; + } else { + phys_avail[avail_slot] = physmap[map_slot]; + phys_avail[avail_slot + 1] = physmap[map_slot + 1]; + } + physmem += (phys_avail[avail_slot + 1] - + phys_avail[avail_slot]) >> PAGE_SHIFT; + + avail_slot += 2; + } + phys_avail[avail_slot] = 0; + phys_avail[avail_slot + 1] = 0; + + /* + * Maxmem isn't the "maximum memory", it's one larger than the + * highest page of the physical address space. It should be + * called something like "Maxphyspage". + */ + Maxmem = atop(phys_avail[avail_slot - 1]); + + cpu_tlb_flushID(); +} + +/* + * Initialize a vm_page's machine-dependent fields. + */ +void +pmap_page_init(vm_page_t m) +{ + + TAILQ_INIT(&m->md.pv_list); + m->md.pv_memattr = VM_MEMATTR_WRITE_BACK; +} + +/* + * Initialize the pmap module. + * Called by vm_init, to initialize any structures that the pmap + * system needs to map virtual memory. + */ +void +pmap_init(void) +{ + int i; + + /* + * Initialize the pv chunk list mutex. + */ + mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); + + /* + * Initialize the pool of pv list locks. + */ + for (i = 0; i < NPV_LIST_LOCKS; i++) + rw_init(&pv_list_locks[i], "pmap pv list"); +} + +/* + * Normal, non-SMP, invalidation functions. + * We inline these within pmap.c for speed. + */ +PMAP_INLINE void +pmap_invalidate_page(pmap_t pmap, vm_offset_t va) +{ + + /* TODO */ + + sched_pin(); + __asm __volatile("sfence.vm"); + sched_unpin(); +} + +PMAP_INLINE void +pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) +{ + + /* TODO */ + + sched_pin(); + __asm __volatile("sfence.vm"); + sched_unpin(); +} + +PMAP_INLINE void +pmap_invalidate_all(pmap_t pmap) +{ + + /* TODO */ + + sched_pin(); + __asm __volatile("sfence.vm"); + sched_unpin(); +} + +/* + * Routine: pmap_extract + * Function: + * Extract the physical page address associated + * with the given map/virtual_address pair. + */ +vm_paddr_t +pmap_extract(pmap_t pmap, vm_offset_t va) +{ + pd_entry_t *l2p, l2; + pt_entry_t *l3p, l3; + vm_paddr_t pa; + + pa = 0; + PMAP_LOCK(pmap); + /* + * Start with the l2 tabel. We are unable to allocate + * pages in the l1 table. + */ + l2p = pmap_l2(pmap, va); + if (l2p != NULL) { + l2 = pmap_load(l2p); + if ((l2 & PTE_TYPE_M) == (PTE_TYPE_PTR << PTE_TYPE_S)) { + l3p = pmap_l2_to_l3(l2p, va); + if (l3p != NULL) { + l3 = pmap_load(l3p); + pa = PTE_TO_PHYS(l3); + pa |= (va & L3_OFFSET); + } + } else { + /* L2 is superpages */ + pa = (l2 >> PTE_PPN1_S) << L2_SHIFT; + pa |= (va & L2_OFFSET); + } + } + PMAP_UNLOCK(pmap); + return (pa); +} + +/* + * Routine: pmap_extract_and_hold + * Function: + * Atomically extract and hold the physical page + * with the given pmap and virtual address pair + * if that mapping permits the given protection. + */ +vm_page_t +pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) +{ + pt_entry_t *l3p, l3; + vm_paddr_t phys; + vm_paddr_t pa; + vm_page_t m; + + pa = 0; + m = NULL; + PMAP_LOCK(pmap); +retry: + l3p = pmap_l3(pmap, va); + if (l3p != NULL && (l3 = pmap_load(l3p)) != 0) { + if ((pmap_is_write(l3)) || ((prot & VM_PROT_WRITE) == 0)) { + phys = PTE_TO_PHYS(l3); + if (vm_page_pa_tryrelock(pmap, phys, &pa)) + goto retry; + m = PHYS_TO_VM_PAGE(phys); + vm_page_hold(m); + } + } + PA_UNLOCK_COND(pa); + PMAP_UNLOCK(pmap); + return (m); +} + +vm_paddr_t +pmap_kextract(vm_offset_t va) +{ + pd_entry_t *l2; + pt_entry_t *l3; + vm_paddr_t pa; + + if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) { + pa = DMAP_TO_PHYS(va); + } else { + l2 = pmap_l2(kernel_pmap, va); + if (l2 == NULL) + panic("pmap_kextract: No l2"); + if ((pmap_load(l2) & PTE_TYPE_M) != (PTE_TYPE_PTR << PTE_TYPE_S)) { + /* superpages */ + pa = (pmap_load(l2) >> PTE_PPN1_S) << L2_SHIFT; + pa |= (va & L2_OFFSET); + return (pa); + } + + l3 = pmap_l2_to_l3(l2, va); + if (l3 == NULL) + panic("pmap_kextract: No l3..."); + pa = PTE_TO_PHYS(pmap_load(l3)); + pa |= (va & PAGE_MASK); + } + return (pa); +} + +/*************************************************** + * Low level mapping routines..... + ***************************************************/ + +void +pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) +{ + pt_entry_t *l3; + vm_offset_t va; + + panic("%s: implement me\n", __func__); + + KASSERT((pa & L3_OFFSET) == 0, + ("pmap_kenter_device: Invalid physical address")); + KASSERT((sva & L3_OFFSET) == 0, + ("pmap_kenter_device: Invalid virtual address")); + KASSERT((size & PAGE_MASK) == 0, + ("pmap_kenter_device: Mapping is not page-sized")); + + va = sva; + while (size != 0) { + l3 = pmap_l3(kernel_pmap, va); + KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va)); + panic("%s: unimplemented", __func__); +#if 0 /* implement me */ + pmap_load_store(l3, (pa & ~L3_OFFSET) | ATTR_DEFAULT | + ATTR_IDX(DEVICE_MEMORY) | L3_PAGE); +#endif + PTE_SYNC(l3); + + va += PAGE_SIZE; + pa += PAGE_SIZE; + size -= PAGE_SIZE; + } + pmap_invalidate_range(kernel_pmap, sva, va); +} + +/* + * Remove a page from the kernel pagetables. + * Note: not SMP coherent. + */ +PMAP_INLINE void +pmap_kremove(vm_offset_t va) +{ + pt_entry_t *l3; + + l3 = pmap_l3(kernel_pmap, va); + KASSERT(l3 != NULL, ("pmap_kremove: Invalid address")); + + if (pmap_l3_valid_cacheable(pmap_load(l3))) + cpu_dcache_wb_range(va, L3_SIZE); + pmap_load_clear(l3); + PTE_SYNC(l3); + pmap_invalidate_page(kernel_pmap, va); +} + +void +pmap_kremove_device(vm_offset_t sva, vm_size_t size) +{ + pt_entry_t *l3; + vm_offset_t va; + + KASSERT((sva & L3_OFFSET) == 0, + ("pmap_kremove_device: Invalid virtual address")); + KASSERT((size & PAGE_MASK) == 0, + ("pmap_kremove_device: Mapping is not page-sized")); + + va = sva; + while (size != 0) { + l3 = pmap_l3(kernel_pmap, va); + KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va)); + pmap_load_clear(l3); + PTE_SYNC(l3); + + va += PAGE_SIZE; + size -= PAGE_SIZE; + } + pmap_invalidate_range(kernel_pmap, sva, va); +} + +/* + * Used to map a range of physical addresses into kernel + * virtual address space. + * + * The value passed in '*virt' is a suggested virtual address for + * the mapping. Architectures which can support a direct-mapped + * physical to virtual region can return the appropriate address + * within that region, leaving '*virt' unchanged. Other + * architectures should map the pages starting at '*virt' and + * update '*virt' with the first usable address after the mapped + * region. + */ +vm_offset_t +pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) +{ + + return PHYS_TO_DMAP(start); +} + + +/* + * Add a list of wired pages to the kva + * this routine is only used for temporary + * kernel mappings that do not need to have + * page modification or references recorded. + * Note that old mappings are simply written + * over. The page *must* be wired. + * Note: SMP coherent. Uses a ranged shootdown IPI. + */ +void +pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) +{ + pt_entry_t *l3, pa; + vm_offset_t va; + vm_page_t m; + pt_entry_t entry; + u_int pn; + int i; + + va = sva; + for (i = 0; i < count; i++) { + m = ma[i]; + pa = VM_PAGE_TO_PHYS(m); + pn = (pa / PAGE_SIZE); + l3 = pmap_l3(kernel_pmap, va); + + entry = (PTE_VALID | (PTE_TYPE_SRWX << PTE_TYPE_S)); + entry |= (pn << PTE_PPN0_S); + pmap_load_store(l3, entry); + + PTE_SYNC(l3); + va += L3_SIZE; + } + pmap_invalidate_range(kernel_pmap, sva, va); +} + +/* + * This routine tears out page mappings from the + * kernel -- it is meant only for temporary mappings. + * Note: SMP coherent. Uses a ranged shootdown IPI. + */ +void +pmap_qremove(vm_offset_t sva, int count) +{ + pt_entry_t *l3; + vm_offset_t va; + + KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva)); + + va = sva; + while (count-- > 0) { + l3 = pmap_l3(kernel_pmap, va); + KASSERT(l3 != NULL, ("pmap_kremove: Invalid address")); + + if (pmap_l3_valid_cacheable(pmap_load(l3))) + cpu_dcache_wb_range(va, L3_SIZE); + pmap_load_clear(l3); + PTE_SYNC(l3); + + va += PAGE_SIZE; + } + pmap_invalidate_range(kernel_pmap, sva, va); +} + +/*************************************************** + * Page table page management routines..... + ***************************************************/ +static __inline void +pmap_free_zero_pages(struct spglist *free) +{ + vm_page_t m; + + while ((m = SLIST_FIRST(free)) != NULL) { + SLIST_REMOVE_HEAD(free, plinks.s.ss); + /* Preserve the page's PG_ZERO setting. */ + vm_page_free_toq(m); + } +} + +/* + * Schedule the specified unused page table page to be freed. Specifically, + * add the page to the specified list of pages that will be released to the + * physical memory manager after the TLB has been updated. + */ +static __inline void +pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, + boolean_t set_PG_ZERO) +{ + + if (set_PG_ZERO) + m->flags |= PG_ZERO; + else + m->flags &= ~PG_ZERO; + SLIST_INSERT_HEAD(free, m, plinks.s.ss); +} + +/* + * Decrements a page table page's wire count, which is used to record the + * number of valid page table entries within the page. If the wire count + * drops to zero, then the page table page is unmapped. Returns TRUE if the + * page table page was unmapped and FALSE otherwise. + */ +static inline boolean_t +pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) +{ + + --m->wire_count; + if (m->wire_count == 0) { + _pmap_unwire_l3(pmap, va, m, free); + return (TRUE); + } else { + return (FALSE); + } +} + +static void +_pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) +{ + vm_paddr_t phys; + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + /* + * unmap the page table page + */ + if (m->pindex >= NUPDE) { + /* PD page */ + pd_entry_t *l1; + l1 = pmap_l1(pmap, va); + pmap_load_clear(l1); + PTE_SYNC(l1); + } else { + /* PTE page */ + pd_entry_t *l2; + l2 = pmap_l2(pmap, va); + pmap_load_clear(l2); + PTE_SYNC(l2); + } + pmap_resident_count_dec(pmap, 1); + if (m->pindex < NUPDE) { + pd_entry_t *l1; + /* We just released a PT, unhold the matching PD */ + vm_page_t pdpg; + + l1 = pmap_l1(pmap, va); + phys = PTE_TO_PHYS(pmap_load(l1)); + pdpg = PHYS_TO_VM_PAGE(phys); + pmap_unwire_l3(pmap, va, pdpg, free); + } + pmap_invalidate_page(pmap, va); + + /* + * This is a release store so that the ordinary store unmapping + * the page table page is globally performed before TLB shoot- + * down is begun. + */ + atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1); + + /* + * Put page on a list so that it is released after + * *ALL* TLB shootdown is done + */ + pmap_add_delayed_free_list(m, free, TRUE); +} + +/* + * After removing an l3 entry, this routine is used to + * conditionally free the page, and manage the hold/wire counts. + */ +static int +pmap_unuse_l3(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, + struct spglist *free) +{ + vm_paddr_t phys; + vm_page_t mpte; + + if (va >= VM_MAXUSER_ADDRESS) + return (0); + KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); + + phys = PTE_TO_PHYS(ptepde); + + mpte = PHYS_TO_VM_PAGE(phys); + return (pmap_unwire_l3(pmap, va, mpte, free)); +} + +void +pmap_pinit0(pmap_t pmap) +{ + + PMAP_LOCK_INIT(pmap); + bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); + pmap->pm_l1 = kernel_pmap->pm_l1; +} + +int +pmap_pinit(pmap_t pmap) +{ + vm_paddr_t l1phys; + vm_page_t l1pt; + + /* + * allocate the l1 page + */ + while ((l1pt = vm_page_alloc(NULL, 0xdeadbeef, VM_ALLOC_NORMAL | + VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) + VM_WAIT; + + l1phys = VM_PAGE_TO_PHYS(l1pt); + pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys); + + if ((l1pt->flags & PG_ZERO) == 0) + pagezero(pmap->pm_l1); + + bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); + + return (1); +} + +/* + * This routine is called if the desired page table page does not exist. + * + * If page table page allocation fails, this routine may sleep before + * returning NULL. It sleeps only if a lock pointer was given. + * + * Note: If a page allocation fails at page table level two or three, + * one or two pages may be held during the wait, only to be released + * afterwards. This conservative approach is easily argued to avoid + * race conditions. + */ +static vm_page_t +_pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) +{ + vm_page_t m, /*pdppg, */pdpg; + pt_entry_t entry; + vm_paddr_t phys; + int pn; + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + + /* + * Allocate a page table page. + */ + if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ | + VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) { + if (lockp != NULL) { + RELEASE_PV_LIST_LOCK(lockp); + PMAP_UNLOCK(pmap); + rw_runlock(&pvh_global_lock); + VM_WAIT; + rw_rlock(&pvh_global_lock); + PMAP_LOCK(pmap); + } + + /* + * Indicate the need to retry. While waiting, the page table + * page may have been allocated. + */ + return (NULL); + } + + if ((m->flags & PG_ZERO) == 0) + pmap_zero_page(m); + + /* + * Map the pagetable page into the process address space, if + * it isn't already there. + */ + + if (ptepindex >= NUPDE) { + pd_entry_t *l1; + vm_pindex_t l1index; + + l1index = ptepindex - NUPDE; + l1 = &pmap->pm_l1[l1index]; + + pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE); + entry = (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)); + entry |= (pn << PTE_PPN0_S); + pmap_load_store(l1, entry); + + PTE_SYNC(l1); + + } else { + vm_pindex_t l1index; + pd_entry_t *l1, *l2; + + l1index = ptepindex >> (L1_SHIFT - L2_SHIFT); + l1 = &pmap->pm_l1[l1index]; + if (pmap_load(l1) == 0) { + /* recurse for allocating page dir */ + if (_pmap_alloc_l3(pmap, NUPDE + l1index, + lockp) == NULL) { + --m->wire_count; + atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_page_free_zero(m); + return (NULL); + } + } else { + phys = PTE_TO_PHYS(pmap_load(l1)); + pdpg = PHYS_TO_VM_PAGE(phys); + pdpg->wire_count++; + } + + phys = PTE_TO_PHYS(pmap_load(l1)); + l2 = (pd_entry_t *)PHYS_TO_DMAP(phys); + l2 = &l2[ptepindex & Ln_ADDR_MASK]; + + pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE); + entry = (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)); + entry |= (pn << PTE_PPN0_S); + pmap_load_store(l2, entry); + + PTE_SYNC(l2); + } + + pmap_resident_count_inc(pmap, 1); + + return (m); +} + +static vm_page_t +pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp) +{ + vm_pindex_t ptepindex; + pd_entry_t *l2; + vm_paddr_t phys; + vm_page_t m; + + /* + * Calculate pagetable page index + */ + ptepindex = pmap_l2_pindex(va); +retry: + /* + * Get the page directory entry + */ + l2 = pmap_l2(pmap, va); + + /* + * If the page table page is mapped, we just increment the + * hold count, and activate it. + */ + if (l2 != NULL && pmap_load(l2) != 0) { + phys = PTE_TO_PHYS(pmap_load(l2)); + m = PHYS_TO_VM_PAGE(phys); + m->wire_count++; + } else { + /* + * Here if the pte page isn't mapped, or if it has been + * deallocated. + */ + m = _pmap_alloc_l3(pmap, ptepindex, lockp); + if (m == NULL && lockp != NULL) + goto retry; + } + return (m); +} + + +/*************************************************** + * Pmap allocation/deallocation routines. + ***************************************************/ + +/* + * Release any resources held by the given physical map. + * Called when a pmap initialized by pmap_pinit is being released. + * Should only be called if the map contains no valid mappings. + */ +void +pmap_release(pmap_t pmap) +{ + vm_page_t m; + + KASSERT(pmap->pm_stats.resident_count == 0, + ("pmap_release: pmap resident count %ld != 0", + pmap->pm_stats.resident_count)); + + m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l1)); + m->wire_count--; + atomic_subtract_int(&vm_cnt.v_wire_count, 1); + vm_page_free_zero(m); +} + +#if 0 +static int +kvm_size(SYSCTL_HANDLER_ARGS) +{ + unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; + + return sysctl_handle_long(oidp, &ksize, 0, req); +} +SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, + 0, 0, kvm_size, "LU", "Size of KVM"); + +static int +kvm_free(SYSCTL_HANDLER_ARGS) +{ + unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end; + + return sysctl_handle_long(oidp, &kfree, 0, req); +} +SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, + 0, 0, kvm_free, "LU", "Amount of KVM free"); +#endif /* 0 */ + +/* + * grow the number of kernel page table entries, if needed + */ +void +pmap_growkernel(vm_offset_t addr) +{ + vm_paddr_t paddr; + vm_page_t nkpg; + pd_entry_t *l1, *l2; + pt_entry_t entry; + int pn; + + mtx_assert(&kernel_map->system_mtx, MA_OWNED); + + addr = roundup2(addr, L2_SIZE); + if (addr - 1 >= kernel_map->max_offset) + addr = kernel_map->max_offset; + while (kernel_vm_end < addr) { + l1 = pmap_l1(kernel_pmap, kernel_vm_end); + if (pmap_load(l1) == 0) { + /* We need a new PDP entry */ + nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT, + VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | + VM_ALLOC_WIRED | VM_ALLOC_ZERO); + if (nkpg == NULL) + panic("pmap_growkernel: no memory to grow kernel"); + if ((nkpg->flags & PG_ZERO) == 0) + pmap_zero_page(nkpg); + paddr = VM_PAGE_TO_PHYS(nkpg); + + panic("%s: implement grow l1\n", __func__); +#if 0 + pmap_load_store(l1, paddr | L1_TABLE); +#endif + PTE_SYNC(l1); + continue; /* try again */ + } + l2 = pmap_l1_to_l2(l1, kernel_vm_end); + if ((pmap_load(l2) & PTE_REF) != 0) { + kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; + if (kernel_vm_end - 1 >= kernel_map->max_offset) { + kernel_vm_end = kernel_map->max_offset; + break; + } + continue; + } + + nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT, + VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | + VM_ALLOC_ZERO); + if (nkpg == NULL) + panic("pmap_growkernel: no memory to grow kernel"); + if ((nkpg->flags & PG_ZERO) == 0) + pmap_zero_page(nkpg); + paddr = VM_PAGE_TO_PHYS(nkpg); + + pn = (paddr / PAGE_SIZE); + entry = (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)); + entry |= (pn << PTE_PPN0_S); + pmap_load_store(l2, entry); + + PTE_SYNC(l2); + pmap_invalidate_page(kernel_pmap, kernel_vm_end); + + kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET; + if (kernel_vm_end - 1 >= kernel_map->max_offset) { + kernel_vm_end = kernel_map->max_offset; + break; + } + } +} + + +/*************************************************** + * page management routines. + ***************************************************/ + +CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); +CTASSERT(_NPCM == 3); +CTASSERT(_NPCPV == 168); + +static __inline struct pv_chunk * +pv_to_chunk(pv_entry_t pv) +{ + + return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); +} + +#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) + +#define PC_FREE0 0xfffffffffffffffful +#define PC_FREE1 0xfffffffffffffffful +#define PC_FREE2 0x000000fffffffffful + +static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 }; + +#if 0 +#ifdef PV_STATS +static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; + +SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, + "Current number of pv entry chunks"); +SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, + "Current number of pv entry chunks allocated"); +SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, + "Current number of pv entry chunks frees"); +SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, + "Number of times tried to get a chunk page but failed."); + +static long pv_entry_frees, pv_entry_allocs, pv_entry_count; +static int pv_entry_spare; + +SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, + "Current number of pv entry frees"); +SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, + "Current number of pv entry allocs"); +SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, + "Current number of pv entries"); +SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, + "Current number of spare pv entries"); +#endif +#endif /* 0 */ + +/* + * We are in a serious low memory condition. Resort to + * drastic measures to free some pages so we can allocate + * another pv entry chunk. + * + * Returns NULL if PV entries were reclaimed from the specified pmap. + * + * We do not, however, unmap 2mpages because subsequent accesses will + * allocate per-page pv entries until repromotion occurs, thereby + * exacerbating the shortage of free pv entries. + */ +static vm_page_t +reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) +{ + + panic("RISCVTODO: reclaim_pv_chunk"); +} + +/* + * free the pv_entry back to the free list + */ +static void +free_pv_entry(pmap_t pmap, pv_entry_t pv) +{ + struct pv_chunk *pc; + int idx, field, bit; + + rw_assert(&pvh_global_lock, RA_LOCKED); + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + PV_STAT(atomic_add_long(&pv_entry_frees, 1)); + PV_STAT(atomic_add_int(&pv_entry_spare, 1)); + PV_STAT(atomic_subtract_long(&pv_entry_count, 1)); + pc = pv_to_chunk(pv); + idx = pv - &pc->pc_pventry[0]; + field = idx / 64; + bit = idx % 64; + pc->pc_map[field] |= 1ul << bit; + if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 || + pc->pc_map[2] != PC_FREE2) { + /* 98% of the time, pc is already at the head of the list. */ + if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { + TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); + TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); + } + return; + } + TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); + free_pv_chunk(pc); +} + +static void +free_pv_chunk(struct pv_chunk *pc) +{ + vm_page_t m; + + mtx_lock(&pv_chunks_mutex); + TAILQ_REMOVE(&pv_chunks, pc, pc_lru); + mtx_unlock(&pv_chunks_mutex); + PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV)); + PV_STAT(atomic_subtract_int(&pc_chunk_count, 1)); + PV_STAT(atomic_add_int(&pc_chunk_frees, 1)); + /* entire chunk is free, return it */ + m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc)); +#if 0 /* TODO: For minidump */ + dump_drop_page(m->phys_addr); +#endif + vm_page_unwire(m, PQ_INACTIVE); + vm_page_free(m); +} + +/* + * Returns a new PV entry, allocating a new PV chunk from the system when + * needed. If this PV chunk allocation fails and a PV list lock pointer was + * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is + * returned. + * + * The given PV list lock may be released. + */ +static pv_entry_t +get_pv_entry(pmap_t pmap, struct rwlock **lockp) +{ + int bit, field; + pv_entry_t pv; + struct pv_chunk *pc; + vm_page_t m; + + rw_assert(&pvh_global_lock, RA_LOCKED); + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + PV_STAT(atomic_add_long(&pv_entry_allocs, 1)); +retry: + pc = TAILQ_FIRST(&pmap->pm_pvchunk); + if (pc != NULL) { + for (field = 0; field < _NPCM; field++) { + if (pc->pc_map[field]) { + bit = ffsl(pc->pc_map[field]) - 1; + break; + } + } + if (field < _NPCM) { + pv = &pc->pc_pventry[field * 64 + bit]; + pc->pc_map[field] &= ~(1ul << bit); + /* If this was the last item, move it to tail */ + if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && + pc->pc_map[2] == 0) { + TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); + TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, + pc_list); + } + PV_STAT(atomic_add_long(&pv_entry_count, 1)); + PV_STAT(atomic_subtract_int(&pv_entry_spare, 1)); + return (pv); + } + } + /* No free items, allocate another chunk */ + m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | + VM_ALLOC_WIRED); + if (m == NULL) { + if (lockp == NULL) { + PV_STAT(pc_chunk_tryfail++); + return (NULL); + } + m = reclaim_pv_chunk(pmap, lockp); + if (m == NULL) + goto retry; + } + PV_STAT(atomic_add_int(&pc_chunk_count, 1)); + PV_STAT(atomic_add_int(&pc_chunk_allocs, 1)); +#if 0 /* TODO: This is for minidump */ + dump_add_page(m->phys_addr); +#endif + pc = (void *)PHYS_TO_DMAP(m->phys_addr); + pc->pc_pmap = pmap; + pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ + pc->pc_map[1] = PC_FREE1; + pc->pc_map[2] = PC_FREE2; + mtx_lock(&pv_chunks_mutex); + TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); + mtx_unlock(&pv_chunks_mutex); + pv = &pc->pc_pventry[0]; + TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); + PV_STAT(atomic_add_long(&pv_entry_count, 1)); + PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1)); + return (pv); +} + +/* + * First find and then remove the pv entry for the specified pmap and virtual + * address from the specified pv list. Returns the pv entry if found and NULL + * otherwise. This operation can be performed on pv lists for either 4KB or + * 2MB page mappings. + */ +static __inline pv_entry_t +pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) +{ + pv_entry_t pv; + + rw_assert(&pvh_global_lock, RA_LOCKED); + TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { + if (pmap == PV_PMAP(pv) && va == pv->pv_va) { + TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); + pvh->pv_gen++; + break; + } + } + return (pv); +} + +/* + * First find and then destroy the pv entry for the specified pmap and virtual + * address. This operation can be performed on pv lists for either 4KB or 2MB + * page mappings. + */ +static void +pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) +{ + pv_entry_t pv; + + pv = pmap_pvh_remove(pvh, pmap, va); + + KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); + free_pv_entry(pmap, pv); +} + +/* + * Conditionally create the PV entry for a 4KB page mapping if the required + * memory can be allocated without resorting to reclamation. + */ +static boolean_t +pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, + struct rwlock **lockp) +{ + pv_entry_t pv; + + rw_assert(&pvh_global_lock, RA_LOCKED); + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + /* Pass NULL instead of the lock pointer to disable reclamation. */ + if ((pv = get_pv_entry(pmap, NULL)) != NULL) { + pv->pv_va = va; + CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); + m->md.pv_gen++; + return (TRUE); + } else + return (FALSE); +} + +/* + * pmap_remove_l3: do the things to unmap a page in a process + */ +static int +pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va, + pd_entry_t l2e, struct spglist *free, struct rwlock **lockp) +{ + pt_entry_t old_l3; + vm_paddr_t phys; + vm_page_t m; + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3))) + cpu_dcache_wb_range(va, L3_SIZE); + old_l3 = pmap_load_clear(l3); + PTE_SYNC(l3); + pmap_invalidate_page(pmap, va); + if (old_l3 & PTE_SW_WIRED) + pmap->pm_stats.wired_count -= 1; + pmap_resident_count_dec(pmap, 1); + if (old_l3 & PTE_SW_MANAGED) { + phys = PTE_TO_PHYS(old_l3); + m = PHYS_TO_VM_PAGE(phys); + if (pmap_page_dirty(old_l3)) + vm_page_dirty(m); + if (old_l3 & PTE_REF) + vm_page_aflag_set(m, PGA_REFERENCED); + CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); + pmap_pvh_free(&m->md, pmap, va); + } + + return (pmap_unuse_l3(pmap, va, l2e, free)); +} + +/* + * Remove the given range of addresses from the specified map. + * + * It is assumed that the start and end are properly + * rounded to the page size. + */ +void +pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) +{ + struct rwlock *lock; + vm_offset_t va, va_next; + pd_entry_t *l1, *l2; + pt_entry_t l3_pte, *l3; + struct spglist free; + int anyvalid; + + /* + * Perform an unsynchronized read. This is, however, safe. + */ + if (pmap->pm_stats.resident_count == 0) + return; + + anyvalid = 0; + SLIST_INIT(&free); + + rw_rlock(&pvh_global_lock); + PMAP_LOCK(pmap); + + lock = NULL; + for (; sva < eva; sva = va_next) { + if (pmap->pm_stats.resident_count == 0) + break; + + l1 = pmap_l1(pmap, sva); + if (pmap_load(l1) == 0) { + va_next = (sva + L1_SIZE) & ~L1_OFFSET; + if (va_next < sva) + va_next = eva; + continue; + } + + /* + * Calculate index for next page table. + */ + va_next = (sva + L2_SIZE) & ~L2_OFFSET; + if (va_next < sva) + va_next = eva; + + l2 = pmap_l1_to_l2(l1, sva); + if (l2 == NULL) + continue; + + l3_pte = pmap_load(l2); + + /* + * Weed out invalid mappings. + */ + if (l3_pte == 0) + continue; + if ((pmap_load(l2) & PTE_TYPE_M) != (PTE_TYPE_PTR << PTE_TYPE_S)) + continue; + + /* + * Limit our scan to either the end of the va represented + * by the current page table page, or to the end of the + * range being removed. + */ + if (va_next > eva) + va_next = eva; + + va = va_next; + for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, + sva += L3_SIZE) { + if (l3 == NULL) + panic("l3 == NULL"); + if (pmap_load(l3) == 0) { + if (va != va_next) { + pmap_invalidate_range(pmap, va, sva); + va = va_next; + } + continue; + } + if (va == va_next) + va = sva; + if (pmap_remove_l3(pmap, l3, sva, l3_pte, &free, + &lock)) { + sva += L3_SIZE; + break; + } + } + if (va != va_next) + pmap_invalidate_range(pmap, va, sva); + } + if (lock != NULL) + rw_wunlock(lock); + if (anyvalid) + pmap_invalidate_all(pmap); + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + pmap_free_zero_pages(&free); +} + +/* + * Routine: pmap_remove_all + * Function: + * Removes this physical page from + * all physical maps in which it resides. + * Reflects back modify bits to the pager. + * + * Notes: + * Original versions of this routine were very + * inefficient because they iteratively called + * pmap_remove (slow...) + */ + +void +pmap_remove_all(vm_page_t m) +{ + pv_entry_t pv; + pmap_t pmap; + pt_entry_t *l3, tl3; + pd_entry_t *l2, tl2; + struct spglist free; + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_remove_all: page %p is not managed", m)); + SLIST_INIT(&free); + rw_wlock(&pvh_global_lock); + while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { + pmap = PV_PMAP(pv); + PMAP_LOCK(pmap); + pmap_resident_count_dec(pmap, 1); + l2 = pmap_l2(pmap, pv->pv_va); + KASSERT(l2 != NULL, ("pmap_remove_all: no l2 table found")); + tl2 = pmap_load(l2); + + KASSERT((tl2 & PTE_TYPE_M) == (PTE_TYPE_PTR << PTE_TYPE_S), + ("pmap_remove_all: found a table when expecting " + "a block in %p's pv list", m)); + + l3 = pmap_l2_to_l3(l2, pv->pv_va); + if (pmap_is_current(pmap) && + pmap_l3_valid_cacheable(pmap_load(l3))) + cpu_dcache_wb_range(pv->pv_va, L3_SIZE); + tl3 = pmap_load_clear(l3); + PTE_SYNC(l3); + pmap_invalidate_page(pmap, pv->pv_va); + if (tl3 & PTE_SW_WIRED) + pmap->pm_stats.wired_count--; + if ((tl3 & PTE_REF) != 0) + vm_page_aflag_set(m, PGA_REFERENCED); + + /* + * Update the vm_page_t clean and reference bits. + */ + if (pmap_page_dirty(tl3)) + vm_page_dirty(m); + pmap_unuse_l3(pmap, pv->pv_va, pmap_load(l2), &free); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); + m->md.pv_gen++; + free_pv_entry(pmap, pv); + PMAP_UNLOCK(pmap); + } + vm_page_aflag_clear(m, PGA_WRITEABLE); + rw_wunlock(&pvh_global_lock); + pmap_free_zero_pages(&free); +} + +/* + * Set the physical protection on the + * specified range of this map as requested. + */ +void +pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) +{ + vm_offset_t va, va_next; + pd_entry_t *l1, *l2; + pt_entry_t *l3p, l3; + pt_entry_t entry; + + if ((prot & VM_PROT_READ) == VM_PROT_NONE) { + pmap_remove(pmap, sva, eva); + return; + } + + if ((prot & VM_PROT_WRITE) == VM_PROT_WRITE) + return; + + PMAP_LOCK(pmap); + for (; sva < eva; sva = va_next) { + + l1 = pmap_l1(pmap, sva); + if (pmap_load(l1) == 0) { + va_next = (sva + L1_SIZE) & ~L1_OFFSET; + if (va_next < sva) + va_next = eva; + continue; + } + + va_next = (sva + L2_SIZE) & ~L2_OFFSET; + if (va_next < sva) + va_next = eva; + + l2 = pmap_l1_to_l2(l1, sva); + if (l2 == NULL) + continue; + if ((pmap_load(l2) & PTE_TYPE_M) != (PTE_TYPE_PTR << PTE_TYPE_S)) + continue; + + if (va_next > eva) + va_next = eva; + + va = va_next; + for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++, + sva += L3_SIZE) { + l3 = pmap_load(l3p); + if (pmap_l3_valid(l3)) { + entry = pmap_load(l3p); + entry &= ~(1 << PTE_TYPE_S); + pmap_load_store(l3p, entry); + PTE_SYNC(l3p); + /* XXX: Use pmap_invalidate_range */ + pmap_invalidate_page(pmap, va); + } + } + } + PMAP_UNLOCK(pmap); + + /* TODO: Only invalidate entries we are touching */ + pmap_invalidate_all(pmap); +} + +/* + * Insert the given physical page (p) at + * the specified virtual address (v) in the + * target physical map with the protection requested. + * + * If specified, the page will be wired down, meaning + * that the related pte can not be reclaimed. + * + * NB: This is the only routine which MAY NOT lazy-evaluate + * or lose information. That is, this routine must actually + * insert this page into the given map NOW. + */ +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind __unused) +{ + struct rwlock *lock; + pd_entry_t *l1, *l2; + pt_entry_t new_l3, orig_l3; + pt_entry_t *l3; + pv_entry_t pv; + vm_paddr_t opa, pa, l2_pa, l3_pa; + vm_page_t mpte, om, l2_m, l3_m; + boolean_t nosleep; + pt_entry_t entry; + int l2_pn; + int l3_pn; + int pn; + + va = trunc_page(va); + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) + VM_OBJECT_ASSERT_LOCKED(m->object); + pa = VM_PAGE_TO_PHYS(m); + pn = (pa / PAGE_SIZE); + + new_l3 = PTE_VALID; + + if ((prot & VM_PROT_WRITE) == 0) { /* Read-only */ + if ((va >> 63) == 0) /* USER */ + new_l3 |= (PTE_TYPE_SURX << PTE_TYPE_S); + else /* KERNEL */ + new_l3 |= (PTE_TYPE_SRX << PTE_TYPE_S); + } else { + if ((va >> 63) == 0) /* USER */ + new_l3 |= (PTE_TYPE_SURWX << PTE_TYPE_S); + else /* KERNEL */ + new_l3 |= (PTE_TYPE_SRWX << PTE_TYPE_S); + } + + new_l3 |= (pn << PTE_PPN0_S); + if ((flags & PMAP_ENTER_WIRED) != 0) + new_l3 |= PTE_SW_WIRED; + + CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa); + + mpte = NULL; + + lock = NULL; + rw_rlock(&pvh_global_lock); + PMAP_LOCK(pmap); + + if (va < VM_MAXUSER_ADDRESS) { + nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; + mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock); + if (mpte == NULL && nosleep) { + CTR0(KTR_PMAP, "pmap_enter: mpte == NULL"); + if (lock != NULL) + rw_wunlock(lock); + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + return (KERN_RESOURCE_SHORTAGE); + } + l3 = pmap_l3(pmap, va); + } else { + l3 = pmap_l3(pmap, va); + /* TODO: This is not optimal, but should mostly work */ + if (l3 == NULL) { + l2 = pmap_l2(pmap, va); + if (l2 == NULL) { + l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | + VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | + VM_ALLOC_ZERO); + if (l2_m == NULL) + panic("pmap_enter: l2 pte_m == NULL"); + if ((l2_m->flags & PG_ZERO) == 0) + pmap_zero_page(l2_m); + + l2_pa = VM_PAGE_TO_PHYS(l2_m); + l2_pn = (l2_pa / PAGE_SIZE); + + l1 = pmap_l1(pmap, va); + entry = (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)); + entry |= (l2_pn << PTE_PPN0_S); + pmap_load_store(l1, entry); + PTE_SYNC(l1); + + l2 = pmap_l1_to_l2(l1, va); + } + + KASSERT(l2 != NULL, + ("No l2 table after allocating one")); + + l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | + VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); + if (l3_m == NULL) + panic("pmap_enter: l3 pte_m == NULL"); + if ((l3_m->flags & PG_ZERO) == 0) + pmap_zero_page(l3_m); + + l3_pa = VM_PAGE_TO_PHYS(l3_m); + l3_pn = (l3_pa / PAGE_SIZE); + entry = (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)); + entry |= (l3_pn << PTE_PPN0_S); + pmap_load_store(l2, entry); + PTE_SYNC(l2); + l3 = pmap_l2_to_l3(l2, va); + } + pmap_invalidate_page(pmap, va); + } + + om = NULL; + orig_l3 = pmap_load(l3); + opa = PTE_TO_PHYS(orig_l3); + + /* + * Is the specified virtual address already mapped? + */ + if (pmap_l3_valid(orig_l3)) { + /* + * Wiring change, just update stats. We don't worry about + * wiring PT pages as they remain resident as long as there + * are valid mappings in them. Hence, if a user page is wired, + * the PT page will be also. + */ + if ((flags & PMAP_ENTER_WIRED) != 0 && + (orig_l3 & PTE_SW_WIRED) == 0) + pmap->pm_stats.wired_count++; + else if ((flags & PMAP_ENTER_WIRED) == 0 && + (orig_l3 & PTE_SW_WIRED) != 0) + pmap->pm_stats.wired_count--; + + /* + * Remove the extra PT page reference. + */ + if (mpte != NULL) { + mpte->wire_count--; + KASSERT(mpte->wire_count > 0, + ("pmap_enter: missing reference to page table page," + " va: 0x%lx", va)); + } + + /* + * Has the physical page changed? + */ + if (opa == pa) { + /* + * No, might be a protection or wiring change. + */ + if ((orig_l3 & PTE_SW_MANAGED) != 0) { + new_l3 |= PTE_SW_MANAGED; + if (pmap_is_write(new_l3)) + vm_page_aflag_set(m, PGA_WRITEABLE); + } + goto validate; + } + + /* Flush the cache, there might be uncommitted data in it */ + if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3)) + cpu_dcache_wb_range(va, L3_SIZE); + } else { + /* + * Increment the counters. + */ + if ((new_l3 & PTE_SW_WIRED) != 0) + pmap->pm_stats.wired_count++; + pmap_resident_count_inc(pmap, 1); + } + /* + * Enter on the PV list if part of our managed memory. + */ + if ((m->oflags & VPO_UNMANAGED) == 0) { + new_l3 |= PTE_SW_MANAGED; + pv = get_pv_entry(pmap, &lock); + pv->pv_va = va; + CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); + m->md.pv_gen++; + if (pmap_is_write(new_l3)) + vm_page_aflag_set(m, PGA_WRITEABLE); + } + + /* + * Update the L3 entry. + */ + if (orig_l3 != 0) { +validate: + orig_l3 = pmap_load_store(l3, new_l3); + PTE_SYNC(l3); + opa = PTE_TO_PHYS(orig_l3); + + if (opa != pa) { + if ((orig_l3 & PTE_SW_MANAGED) != 0) { + om = PHYS_TO_VM_PAGE(opa); + if (pmap_page_dirty(orig_l3)) + vm_page_dirty(om); + if ((orig_l3 & PTE_REF) != 0) + vm_page_aflag_set(om, PGA_REFERENCED); + CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa); + pmap_pvh_free(&om->md, pmap, va); + } + } else if (pmap_page_dirty(orig_l3)) { + if ((orig_l3 & PTE_SW_MANAGED) != 0) + vm_page_dirty(m); + } + } else { + pmap_load_store(l3, new_l3); + PTE_SYNC(l3); + } + pmap_invalidate_page(pmap, va); + if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap)) + cpu_icache_sync_range(va, PAGE_SIZE); + + if (lock != NULL) + rw_wunlock(lock); + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + return (KERN_SUCCESS); +} + +/* + * Maps a sequence of resident pages belonging to the same object. + * The sequence begins with the given page m_start. This page is + * mapped at the given virtual address start. Each subsequent page is + * mapped at a virtual address that is offset from start by the same + * amount as the page is offset from m_start within the object. The + * last page in the sequence is the page with the largest offset from + * m_start that can be mapped at a virtual address less than the given + * virtual address end. Not every virtual page between start and end + * is mapped; only those for which a resident page exists with the + * corresponding offset from m_start are mapped. + */ +void +pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, + vm_page_t m_start, vm_prot_t prot) +{ + struct rwlock *lock; + vm_offset_t va; + vm_page_t m, mpte; + vm_pindex_t diff, psize; + + VM_OBJECT_ASSERT_LOCKED(m_start->object); + + psize = atop(end - start); + mpte = NULL; + m = m_start; + lock = NULL; + rw_rlock(&pvh_global_lock); + PMAP_LOCK(pmap); + while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { + va = start + ptoa(diff); + mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock); + m = TAILQ_NEXT(m, listq); + } + if (lock != NULL) + rw_wunlock(lock); + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); +} + +/* + * this code makes some *MAJOR* assumptions: + * 1. Current pmap & pmap exists. + * 2. Not wired. + * 3. Read access. + * 4. No page table pages. + * but is *MUCH* faster than pmap_enter... + */ + +void +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) +{ + struct rwlock *lock; + + lock = NULL; + rw_rlock(&pvh_global_lock); + PMAP_LOCK(pmap); + (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock); + if (lock != NULL) + rw_wunlock(lock); + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); +} + +static vm_page_t +pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, + vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) +{ + struct spglist free; + vm_paddr_t phys; + pd_entry_t *l2; + pt_entry_t *l3; + vm_paddr_t pa; + pt_entry_t entry; + int pn; + + KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || + (m->oflags & VPO_UNMANAGED) != 0, + ("pmap_enter_quick_locked: managed mapping within the clean submap")); + rw_assert(&pvh_global_lock, RA_LOCKED); + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + + CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va); + /* + * In the case that a page table page is not + * resident, we are creating it here. + */ + if (va < VM_MAXUSER_ADDRESS) { + vm_pindex_t l2pindex; + + /* + * Calculate pagetable page index + */ + l2pindex = pmap_l2_pindex(va); + if (mpte && (mpte->pindex == l2pindex)) { + mpte->wire_count++; + } else { + /* + * Get the l2 entry + */ + l2 = pmap_l2(pmap, va); + + /* + * If the page table page is mapped, we just increment + * the hold count, and activate it. Otherwise, we + * attempt to allocate a page table page. If this + * attempt fails, we don't retry. Instead, we give up. + */ + if (l2 != NULL && pmap_load(l2) != 0) { + phys = PTE_TO_PHYS(pmap_load(l2)); + mpte = PHYS_TO_VM_PAGE(phys); + mpte->wire_count++; + } else { + /* + * Pass NULL instead of the PV list lock + * pointer, because we don't intend to sleep. + */ + mpte = _pmap_alloc_l3(pmap, l2pindex, NULL); + if (mpte == NULL) + return (mpte); + } + } + l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte)); + l3 = &l3[pmap_l3_index(va)]; + } else { + mpte = NULL; + l3 = pmap_l3(kernel_pmap, va); + } + if (l3 == NULL) + panic("pmap_enter_quick_locked: No l3"); + if (pmap_load(l3) != 0) { + if (mpte != NULL) { + mpte->wire_count--; + mpte = NULL; + } + return (mpte); + } + + /* + * Enter on the PV list if part of our managed memory. + */ + if ((m->oflags & VPO_UNMANAGED) == 0 && + !pmap_try_insert_pv_entry(pmap, va, m, lockp)) { + if (mpte != NULL) { + SLIST_INIT(&free); + if (pmap_unwire_l3(pmap, va, mpte, &free)) { + pmap_invalidate_page(pmap, va); + pmap_free_zero_pages(&free); + } + mpte = NULL; + } + return (mpte); + } + + /* + * Increment counters + */ + pmap_resident_count_inc(pmap, 1); + + pa = VM_PAGE_TO_PHYS(m); + pn = (pa / PAGE_SIZE); + + /* RISCVTODO: check permissions */ + entry = (PTE_VALID | (PTE_TYPE_SRWX << PTE_TYPE_S)); + entry |= (pn << PTE_PPN0_S); + + /* + * Now validate mapping with RO protection + */ + if ((m->oflags & VPO_UNMANAGED) == 0) + entry |= PTE_SW_MANAGED; + pmap_load_store(l3, entry); + + PTE_SYNC(l3); + pmap_invalidate_page(pmap, va); + return (mpte); +} + +/* + * This code maps large physical mmap regions into the + * processor address space. Note that some shortcuts + * are taken, but the code works. + */ +void +pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, + vm_pindex_t pindex, vm_size_t size) +{ + + VM_OBJECT_ASSERT_WLOCKED(object); + KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, + ("pmap_object_init_pt: non-device object")); +} + +/* + * Clear the wired attribute from the mappings for the specified range of + * addresses in the given pmap. Every valid mapping within that range + * must have the wired attribute set. In contrast, invalid mappings + * cannot have the wired attribute set, so they are ignored. + * + * The wired attribute of the page table entry is not a hardware feature, + * so there is no need to invalidate any TLB entries. + */ +void +pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) +{ + vm_offset_t va_next; + pd_entry_t *l1, *l2; + pt_entry_t *l3; + boolean_t pv_lists_locked; + + pv_lists_locked = FALSE; + PMAP_LOCK(pmap); + for (; sva < eva; sva = va_next) { + l1 = pmap_l1(pmap, sva); + if (pmap_load(l1) == 0) { + va_next = (sva + L1_SIZE) & ~L1_OFFSET; + if (va_next < sva) + va_next = eva; + continue; + } + + va_next = (sva + L2_SIZE) & ~L2_OFFSET; + if (va_next < sva) + va_next = eva; + + l2 = pmap_l1_to_l2(l1, sva); + if (pmap_load(l2) == 0) + continue; + + if (va_next > eva) + va_next = eva; + for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++, + sva += L3_SIZE) { + if (pmap_load(l3) == 0) + continue; + if ((pmap_load(l3) & PTE_SW_WIRED) == 0) + panic("pmap_unwire: l3 %#jx is missing " + "PTE_SW_WIRED", (uintmax_t)*l3); + + /* + * PG_W must be cleared atomically. Although the pmap + * lock synchronizes access to PG_W, another processor + * could be setting PG_M and/or PG_A concurrently. + */ + atomic_clear_long(l3, PTE_SW_WIRED); + pmap->pm_stats.wired_count--; + } + } + if (pv_lists_locked) + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); +} + +/* + * Copy the range specified by src_addr/len + * from the source map to the range dst_addr/len + * in the destination map. + * + * This routine is only advisory and need not do anything. + */ + +void +pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, + vm_offset_t src_addr) +{ + +} + +/* + * pmap_zero_page zeros the specified hardware page by mapping + * the page into KVM and using bzero to clear its contents. + */ +void +pmap_zero_page(vm_page_t m) +{ + vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + + pagezero((void *)va); +} + +/* + * pmap_zero_page_area zeros the specified hardware page by mapping + * the page into KVM and using bzero to clear its contents. + * + * off and size may not cover an area beyond a single hardware page. + */ +void +pmap_zero_page_area(vm_page_t m, int off, int size) +{ + vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + + if (off == 0 && size == PAGE_SIZE) + pagezero((void *)va); + else + bzero((char *)va + off, size); +} + +/* + * pmap_zero_page_idle zeros the specified hardware page by mapping + * the page into KVM and using bzero to clear its contents. This + * is intended to be called from the vm_pagezero process only and + * outside of Giant. + */ +void +pmap_zero_page_idle(vm_page_t m) +{ + vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); + + pagezero((void *)va); +} + +/* + * pmap_copy_page copies the specified (machine independent) + * page by mapping the page into virtual memory and using + * bcopy to copy the page, one machine dependent page at a + * time. + */ +void +pmap_copy_page(vm_page_t msrc, vm_page_t mdst) +{ + vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc)); + vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst)); + + pagecopy((void *)src, (void *)dst); +} + +int unmapped_buf_allowed = 1; + +void +pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], + vm_offset_t b_offset, int xfersize) +{ + void *a_cp, *b_cp; + vm_page_t m_a, m_b; + vm_paddr_t p_a, p_b; + vm_offset_t a_pg_offset, b_pg_offset; + int cnt; + + while (xfersize > 0) { + a_pg_offset = a_offset & PAGE_MASK; + m_a = ma[a_offset >> PAGE_SHIFT]; + p_a = m_a->phys_addr; + b_pg_offset = b_offset & PAGE_MASK; + m_b = mb[b_offset >> PAGE_SHIFT]; + p_b = m_b->phys_addr; + cnt = min(xfersize, PAGE_SIZE - a_pg_offset); + cnt = min(cnt, PAGE_SIZE - b_pg_offset); + if (__predict_false(!PHYS_IN_DMAP(p_a))) { + panic("!DMAP a %lx", p_a); + } else { + a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset; + } + if (__predict_false(!PHYS_IN_DMAP(p_b))) { + panic("!DMAP b %lx", p_b); + } else { + b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset; + } + bcopy(a_cp, b_cp, cnt); + a_offset += cnt; + b_offset += cnt; + xfersize -= cnt; + } +} + +vm_offset_t +pmap_quick_enter_page(vm_page_t m) +{ + + return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); +} + +void +pmap_quick_remove_page(vm_offset_t addr) +{ +} + +/* + * Returns true if the pmap's pv is one of the first + * 16 pvs linked to from this page. This count may + * be changed upwards or downwards in the future; it + * is only necessary that true be returned for a small + * subset of pmaps for proper page aging. + */ +boolean_t +pmap_page_exists_quick(pmap_t pmap, vm_page_t m) +{ + struct rwlock *lock; + pv_entry_t pv; + int loops = 0; + boolean_t rv; + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_page_exists_quick: page %p is not managed", m)); + rv = FALSE; + rw_rlock(&pvh_global_lock); + lock = VM_PAGE_TO_PV_LIST_LOCK(m); + rw_rlock(lock); + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { + if (PV_PMAP(pv) == pmap) { + rv = TRUE; + break; + } + loops++; + if (loops >= 16) + break; + } + rw_runlock(lock); + rw_runlock(&pvh_global_lock); + return (rv); +} + +/* + * pmap_page_wired_mappings: + * + * Return the number of managed mappings to the given physical page + * that are wired. + */ +int +pmap_page_wired_mappings(vm_page_t m) +{ + struct rwlock *lock; + pmap_t pmap; + pt_entry_t *l3; + pv_entry_t pv; + int count, md_gen; + + if ((m->oflags & VPO_UNMANAGED) != 0) + return (0); + rw_rlock(&pvh_global_lock); + lock = VM_PAGE_TO_PV_LIST_LOCK(m); + rw_rlock(lock); +restart: + count = 0; + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { + pmap = PV_PMAP(pv); + if (!PMAP_TRYLOCK(pmap)) { + md_gen = m->md.pv_gen; + rw_runlock(lock); + PMAP_LOCK(pmap); + rw_rlock(lock); + if (md_gen != m->md.pv_gen) { + PMAP_UNLOCK(pmap); + goto restart; + } + } + l3 = pmap_l3(pmap, pv->pv_va); + if (l3 != NULL && (pmap_load(l3) & PTE_SW_WIRED) != 0) + count++; + PMAP_UNLOCK(pmap); + } + rw_runlock(lock); + rw_runlock(&pvh_global_lock); + return (count); +} + +/* + * Destroy all managed, non-wired mappings in the given user-space + * pmap. This pmap cannot be active on any processor besides the + * caller. + * + * This function cannot be applied to the kernel pmap. Moreover, it + * is not intended for general use. It is only to be used during + * process termination. Consequently, it can be implemented in ways + * that make it faster than pmap_remove(). First, it can more quickly + * destroy mappings by iterating over the pmap's collection of PV + * entries, rather than searching the page table. Second, it doesn't + * have to test and clear the page table entries atomically, because + * no processor is currently accessing the user address space. In + * particular, a page table entry's dirty bit won't change state once + * this function starts. + */ +void +pmap_remove_pages(pmap_t pmap) +{ + pd_entry_t ptepde, *l2; + pt_entry_t *l3, tl3; + struct spglist free; + vm_page_t m; + pv_entry_t pv; + struct pv_chunk *pc, *npc; + struct rwlock *lock; + int64_t bit; + uint64_t inuse, bitmask; + int allfree, field, freed, idx; + vm_paddr_t pa; + + lock = NULL; + + SLIST_INIT(&free); + rw_rlock(&pvh_global_lock); + PMAP_LOCK(pmap); + TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { + allfree = 1; + freed = 0; + for (field = 0; field < _NPCM; field++) { + inuse = ~pc->pc_map[field] & pc_freemask[field]; + while (inuse != 0) { + bit = ffsl(inuse) - 1; + bitmask = 1UL << bit; + idx = field * 64 + bit; + pv = &pc->pc_pventry[idx]; + inuse &= ~bitmask; + + l2 = pmap_l2(pmap, pv->pv_va); + ptepde = pmap_load(l2); + l3 = pmap_l2_to_l3(l2, pv->pv_va); + tl3 = pmap_load(l3); + +/* + * We cannot remove wired pages from a process' mapping at this time + */ + if (tl3 & PTE_SW_WIRED) { + allfree = 0; + continue; + } + + pa = PTE_TO_PHYS(tl3); + m = PHYS_TO_VM_PAGE(pa); + KASSERT(m->phys_addr == pa, + ("vm_page_t %p phys_addr mismatch %016jx %016jx", + m, (uintmax_t)m->phys_addr, + (uintmax_t)tl3)); + + KASSERT((m->flags & PG_FICTITIOUS) != 0 || + m < &vm_page_array[vm_page_array_size], + ("pmap_remove_pages: bad l3 %#jx", + (uintmax_t)tl3)); + + if (pmap_is_current(pmap) && + pmap_l3_valid_cacheable(pmap_load(l3))) + cpu_dcache_wb_range(pv->pv_va, L3_SIZE); + pmap_load_clear(l3); + PTE_SYNC(l3); + pmap_invalidate_page(pmap, pv->pv_va); + + /* + * Update the vm_page_t clean/reference bits. + */ + if (pmap_page_dirty(tl3)) + vm_page_dirty(m); + + CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m); + + /* Mark free */ + pc->pc_map[field] |= bitmask; + + pmap_resident_count_dec(pmap, 1); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); + m->md.pv_gen++; + + pmap_unuse_l3(pmap, pv->pv_va, ptepde, &free); + freed++; + } + } + PV_STAT(atomic_add_long(&pv_entry_frees, freed)); + PV_STAT(atomic_add_int(&pv_entry_spare, freed)); + PV_STAT(atomic_subtract_long(&pv_entry_count, freed)); + if (allfree) { + TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); + free_pv_chunk(pc); + } + } + pmap_invalidate_all(pmap); + if (lock != NULL) + rw_wunlock(lock); + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + pmap_free_zero_pages(&free); +} + +/* + * This is used to check if a page has been accessed or modified. As we + * don't have a bit to see if it has been modified we have to assume it + * has been if the page is read/write. + */ +static boolean_t +pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) +{ + struct rwlock *lock; + pv_entry_t pv; + pt_entry_t *l3, mask, value; + pmap_t pmap; + int md_gen; + boolean_t rv; + + rv = FALSE; + rw_rlock(&pvh_global_lock); + lock = VM_PAGE_TO_PV_LIST_LOCK(m); + rw_rlock(lock); +restart: + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { + pmap = PV_PMAP(pv); + if (!PMAP_TRYLOCK(pmap)) { + md_gen = m->md.pv_gen; + rw_runlock(lock); + PMAP_LOCK(pmap); + rw_rlock(lock); + if (md_gen != m->md.pv_gen) { + PMAP_UNLOCK(pmap); + goto restart; + } + } + l3 = pmap_l3(pmap, pv->pv_va); + mask = 0; + value = 0; + if (modified) { + mask |= PTE_DIRTY; + value |= PTE_DIRTY; + } + if (accessed) { + mask |= PTE_REF; + value |= PTE_REF; + } + +#if 0 + if (modified) { + mask |= ATTR_AP_RW_BIT; + value |= ATTR_AP(ATTR_AP_RW); + } + if (accessed) { + mask |= ATTR_AF | ATTR_DESCR_MASK; + value |= ATTR_AF | L3_PAGE; + } +#endif + + rv = (pmap_load(l3) & mask) == value; + PMAP_UNLOCK(pmap); + if (rv) + goto out; + } +out: + rw_runlock(lock); + rw_runlock(&pvh_global_lock); + return (rv); +} + +/* + * pmap_is_modified: + * + * Return whether or not the specified physical page was modified + * in any physical maps. + */ +boolean_t +pmap_is_modified(vm_page_t m) +{ + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_is_modified: page %p is not managed", m)); + + /* + * If the page is not exclusive busied, then PGA_WRITEABLE cannot be + * concurrently set while the object is locked. Thus, if PGA_WRITEABLE + * is clear, no PTEs can have PG_M set. + */ + VM_OBJECT_ASSERT_WLOCKED(m->object); + if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + return (FALSE); + return (pmap_page_test_mappings(m, FALSE, TRUE)); +} + +/* + * pmap_is_prefaultable: + * + * Return whether or not the specified virtual address is eligible + * for prefault. + */ +boolean_t +pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) +{ + pt_entry_t *l3; + boolean_t rv; + + rv = FALSE; + PMAP_LOCK(pmap); + l3 = pmap_l3(pmap, addr); + if (l3 != NULL && pmap_load(l3) != 0) { + rv = TRUE; + } + PMAP_UNLOCK(pmap); + return (rv); +} + +/* + * pmap_is_referenced: + * + * Return whether or not the specified physical page was referenced + * in any physical maps. + */ +boolean_t +pmap_is_referenced(vm_page_t m) +{ + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_is_referenced: page %p is not managed", m)); + return (pmap_page_test_mappings(m, TRUE, FALSE)); +} + +/* + * Clear the write and modified bits in each of the given page's mappings. + */ +void +pmap_remove_write(vm_page_t m) +{ + pmap_t pmap; + struct rwlock *lock; + pv_entry_t pv; + pt_entry_t *l3, oldl3; + pt_entry_t newl3; + int md_gen; + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_remove_write: page %p is not managed", m)); + + /* + * If the page is not exclusive busied, then PGA_WRITEABLE cannot be + * set by another thread while the object is locked. Thus, + * if PGA_WRITEABLE is clear, no page table entries need updating. + */ + VM_OBJECT_ASSERT_WLOCKED(m->object); + if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) + return; + rw_rlock(&pvh_global_lock); + lock = VM_PAGE_TO_PV_LIST_LOCK(m); +retry_pv_loop: + rw_wlock(lock); + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { + pmap = PV_PMAP(pv); + if (!PMAP_TRYLOCK(pmap)) { + md_gen = m->md.pv_gen; + rw_wunlock(lock); + PMAP_LOCK(pmap); + rw_wlock(lock); + if (md_gen != m->md.pv_gen) { + PMAP_UNLOCK(pmap); + rw_wunlock(lock); + goto retry_pv_loop; + } + } + l3 = pmap_l3(pmap, pv->pv_va); +retry: + oldl3 = pmap_load(l3); + + if (pmap_is_write(oldl3)) { + newl3 = oldl3 & ~(1 << PTE_TYPE_S); + if (!atomic_cmpset_long(l3, oldl3, newl3)) + goto retry; + /* TODO: use pmap_page_dirty(oldl3) ? */ + if ((oldl3 & PTE_REF) != 0) + vm_page_dirty(m); + pmap_invalidate_page(pmap, pv->pv_va); + } + PMAP_UNLOCK(pmap); + } + rw_wunlock(lock); + vm_page_aflag_clear(m, PGA_WRITEABLE); + rw_runlock(&pvh_global_lock); +} + +static __inline boolean_t +safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte) +{ + + return (FALSE); +} + +#define PMAP_TS_REFERENCED_MAX 5 + +/* + * pmap_ts_referenced: + * + * Return a count of reference bits for a page, clearing those bits. + * It is not necessary for every reference bit to be cleared, but it + * is necessary that 0 only be returned when there are truly no + * reference bits set. + * + * XXX: The exact number of bits to check and clear is a matter that + * should be tested and standardized at some point in the future for + * optimal aging of shared pages. + */ +int +pmap_ts_referenced(vm_page_t m) +{ + pv_entry_t pv, pvf; + pmap_t pmap; + struct rwlock *lock; + pd_entry_t *l2; + pt_entry_t *l3; + vm_paddr_t pa; + int cleared, md_gen, not_cleared; + struct spglist free; + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_ts_referenced: page %p is not managed", m)); + SLIST_INIT(&free); + cleared = 0; + pa = VM_PAGE_TO_PHYS(m); + lock = PHYS_TO_PV_LIST_LOCK(pa); + rw_rlock(&pvh_global_lock); + rw_wlock(lock); +retry: + not_cleared = 0; + if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) + goto out; + pv = pvf; + do { + if (pvf == NULL) + pvf = pv; + pmap = PV_PMAP(pv); + if (!PMAP_TRYLOCK(pmap)) { + md_gen = m->md.pv_gen; + rw_wunlock(lock); + PMAP_LOCK(pmap); + rw_wlock(lock); + if (md_gen != m->md.pv_gen) { + PMAP_UNLOCK(pmap); + goto retry; + } + } + l2 = pmap_l2(pmap, pv->pv_va); + + KASSERT((pmap_load(l2) & PTE_TYPE_M) == (PTE_TYPE_PTR << PTE_TYPE_S), + ("pmap_ts_referenced: found an invalid l2 table")); + + l3 = pmap_l2_to_l3(l2, pv->pv_va); + if ((pmap_load(l3) & PTE_REF) != 0) { + if (safe_to_clear_referenced(pmap, pmap_load(l3))) { + /* + * TODO: We don't handle the access flag + * at all. We need to be able to set it in + * the exception handler. + */ + panic("RISCVTODO: safe_to_clear_referenced\n"); + } else if ((pmap_load(l3) & PTE_SW_WIRED) == 0) { + /* + * Wired pages cannot be paged out so + * doing accessed bit emulation for + * them is wasted effort. We do the + * hard work for unwired pages only. + */ + pmap_remove_l3(pmap, l3, pv->pv_va, + pmap_load(l2), &free, &lock); + pmap_invalidate_page(pmap, pv->pv_va); + cleared++; + if (pvf == pv) + pvf = NULL; + pv = NULL; + KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m), + ("inconsistent pv lock %p %p for page %p", + lock, VM_PAGE_TO_PV_LIST_LOCK(m), m)); + } else + not_cleared++; + } + PMAP_UNLOCK(pmap); + /* Rotate the PV list if it has more than one entry. */ + if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) { + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); + m->md.pv_gen++; + } + } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared + + not_cleared < PMAP_TS_REFERENCED_MAX); +out: + rw_wunlock(lock); + rw_runlock(&pvh_global_lock); + pmap_free_zero_pages(&free); + return (cleared + not_cleared); +} + +/* + * Apply the given advice to the specified range of addresses within the + * given pmap. Depending on the advice, clear the referenced and/or + * modified flags in each mapping and set the mapped page's dirty field. + */ +void +pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) +{ +} + +/* + * Clear the modify bits on the specified physical page. + */ +void +pmap_clear_modify(vm_page_t m) +{ + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_ASSERT_WLOCKED(m->object); + KASSERT(!vm_page_xbusied(m), + ("pmap_clear_modify: page %p is exclusive busied", m)); + + /* + * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set. + * If the object containing the page is locked and the page is not + * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. + */ + if ((m->aflags & PGA_WRITEABLE) == 0) + return; + + /* RISCVTODO: We lack support for tracking if a page is modified */ +} + +void * +pmap_mapbios(vm_paddr_t pa, vm_size_t size) +{ + + return ((void *)PHYS_TO_DMAP(pa)); +} + +void +pmap_unmapbios(vm_paddr_t pa, vm_size_t size) +{ +} + +/* + * Sets the memory attribute for the specified page. + */ +void +pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) +{ + + m->md.pv_memattr = ma; + + /* + * RISCVTODO: Implement the below (from the amd64 pmap) + * If "m" is a normal page, update its direct mapping. This update + * can be relied upon to perform any cache operations that are + * required for data coherence. + */ + if ((m->flags & PG_FICTITIOUS) == 0 && + PHYS_IN_DMAP(VM_PAGE_TO_PHYS(m))) + panic("RISCVTODO: pmap_page_set_memattr"); +} + +/* + * perform the pmap work for mincore + */ +int +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) +{ + + panic("RISCVTODO: pmap_mincore"); +} + +void +pmap_activate(struct thread *td) +{ + uint64_t entry; + uint64_t pn; + pmap_t pmap; + + critical_enter(); + pmap = vmspace_pmap(td->td_proc->p_vmspace); + td->td_pcb->pcb_l1addr = vtophys(pmap->pm_l1); + + pn = (td->td_pcb->pcb_l1addr / PAGE_SIZE); + entry = (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)); + entry |= (pn << PTE_PPN0_S); + pmap_load_store(&pagetable_l0, entry); + + pmap_invalidate_all(pmap); + critical_exit(); +} + +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ + + panic("RISCVTODO: pmap_sync_icache"); +} + +/* + * Increase the starting virtual address of the given mapping if a + * different alignment might result in more superpage mappings. + */ +void +pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, + vm_offset_t *addr, vm_size_t size) +{ +} + +/** + * Get the kernel virtual address of a set of physical pages. If there are + * physical addresses not covered by the DMAP perform a transient mapping + * that will be removed when calling pmap_unmap_io_transient. + * + * \param page The pages the caller wishes to obtain the virtual + * address on the kernel memory map. + * \param vaddr On return contains the kernel virtual memory address + * of the pages passed in the page parameter. + * \param count Number of pages passed in. + * \param can_fault TRUE if the thread using the mapped pages can take + * page faults, FALSE otherwise. + * + * \returns TRUE if the caller must call pmap_unmap_io_transient when + * finished or FALSE otherwise. + * + */ +boolean_t +pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, + boolean_t can_fault) +{ + vm_paddr_t paddr; + boolean_t needs_mapping; + int error, i; + + /* + * Allocate any KVA space that we need, this is done in a separate + * loop to prevent calling vmem_alloc while pinned. + */ + needs_mapping = FALSE; + for (i = 0; i < count; i++) { + paddr = VM_PAGE_TO_PHYS(page[i]); + if (__predict_false(paddr >= DMAP_MAX_PHYSADDR)) { + error = vmem_alloc(kernel_arena, PAGE_SIZE, + M_BESTFIT | M_WAITOK, &vaddr[i]); + KASSERT(error == 0, ("vmem_alloc failed: %d", error)); + needs_mapping = TRUE; + } else { + vaddr[i] = PHYS_TO_DMAP(paddr); + } + } + + /* Exit early if everything is covered by the DMAP */ + if (!needs_mapping) + return (FALSE); + + if (!can_fault) + sched_pin(); + for (i = 0; i < count; i++) { + paddr = VM_PAGE_TO_PHYS(page[i]); + if (paddr >= DMAP_MAX_PHYSADDR) { + panic( + "pmap_map_io_transient: TODO: Map out of DMAP data"); + } + } + + return (needs_mapping); +} + +void +pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, + boolean_t can_fault) +{ + vm_paddr_t paddr; + int i; + + if (!can_fault) + sched_unpin(); + for (i = 0; i < count; i++) { + paddr = VM_PAGE_TO_PHYS(page[i]); + if (paddr >= DMAP_MAX_PHYSADDR) { + panic("RISCVTODO: pmap_unmap_io_transient: Unmap data"); + } + } +} diff --git a/sys/riscv/riscv/support.S b/sys/riscv/riscv/support.S new file mode 100644 index 000000000000..59e2680fa33e --- /dev/null +++ b/sys/riscv/riscv/support.S @@ -0,0 +1,295 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include + +#include "assym.s" + +/* + * One of the fu* or su* functions failed, return -1. + */ +ENTRY(fsu_fault) + SET_FAULT_HANDLER(x0, a1) /* Reset the handler function */ +fsu_fault_nopcb: + li a0, -1 + ret +END(fsu_fault) + +/* + * int casueword32(volatile uint32_t *, uint32_t, uint32_t *, uint32_t) + */ +ENTRY(casueword32) + li a4, (VM_MAXUSER_ADDRESS-3) + bgt a0, a4, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a4) /* And set it */ +1: lr.w a4, 0(a0) /* Load-exclusive the data */ + bne a4, a1, 2f /* If not equal then exit */ + sc.w a5, a3, 0(a0) /* Store the new data */ + bnez a5, 1b /* Retry on failure */ +2: SET_FAULT_HANDLER(x0, a5) /* Reset the fault handler */ + sw a4, 0(a2) /* Store the read data */ + li a0, 0 /* Success */ + ret /* Return */ +END(casueword32) + +/* + * int casueword(volatile u_long *, u_long, u_long *, u_long) + */ +ENTRY(casueword) + li a4, (VM_MAXUSER_ADDRESS-7) + bgt a0, a4, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a4) /* And set it */ +1: lr.d a4, 0(a0) /* Load-exclusive the data */ + bne a4, a1, 2f /* If not equal then exit */ + sc.d a5, a3, 0(a0) /* Store the new data */ + bnez a5, 1b /* Retry on failure */ +2: SET_FAULT_HANDLER(x0, a5) /* Reset the fault handler */ + sd a4, 0(a2) /* Store the read data */ + li a0, 0 /* Success */ + ret /* Return */ +END(casueword) + +/* + * int fubyte(volatile const void *) + */ +ENTRY(fubyte) + li a1, VM_MAXUSER_ADDRESS + bgt a0, a1, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a1) /* And set it */ + lb a0, 0(a0) /* Try loading the data */ + SET_FAULT_HANDLER(x0, a1) /* Reset the fault handler */ + ret /* Return */ +END(fubyte) + +/* + * int fuword(volatile const void *) + */ +ENTRY(fuword16) + li a1, (VM_MAXUSER_ADDRESS-1) + bgt a0, a1, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a1) /* And set it */ + lh a0, 0(a0) /* Try loading the data */ + SET_FAULT_HANDLER(x0, a1) /* Reset the fault handler */ + ret /* Return */ +END(fuword16) + +/* + * int32_t fueword32(volatile const void *, int32_t *) + */ +ENTRY(fueword32) + li a2, (VM_MAXUSER_ADDRESS-3) + bgt a0, a2, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a2) /* And set it */ + lw a0, 0(a0) /* Try loading the data */ + SET_FAULT_HANDLER(x0, a2) /* Reset the fault handler */ + sw a0, 0(a1) /* Save the data in kernel space */ + li a0, 0 /* Success */ + ret /* Return */ +END(fueword32) + +/* + * long fueword(volatile const void *, int64_t *) + * int64_t fueword64(volatile const void *, int64_t *) + */ +ENTRY(fueword) +EENTRY(fueword64) + li a2, (VM_MAXUSER_ADDRESS-7) + bgt a0, a2, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a2) /* And set it */ + ld a0, 0(a0) /* Try loading the data */ + SET_FAULT_HANDLER(x0, a2) /* Reset the fault handler */ + sd a0, 0(a1) /* Save the data in kernel space */ + li a0, 0 /* Success */ + ret /* Return */ +EEND(fueword64) +END(fueword) + +/* + * int subyte(volatile void *, int) + */ +ENTRY(subyte) + li a2, VM_MAXUSER_ADDRESS + bgt a0, a2, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a2) /* And set it */ + sb a1, 0(a0) /* Try storing the data */ + SET_FAULT_HANDLER(x0, a2) /* Reset the fault handler */ + li a0, 0 /* Success */ + ret /* Return */ +END(subyte) + +/* + * int suword16(volatile void *, int) + */ +ENTRY(suword16) + li a2, (VM_MAXUSER_ADDRESS-1) + bgt a0, a2, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a2) /* And set it */ + sh a1, 0(a0) /* Try storing the data */ + SET_FAULT_HANDLER(x0, a2) /* Reset the fault handler */ + li a0, 0 /* Success */ + ret /* Return */ +END(suword16) + +/* + * int suword32(volatile void *, int) + */ +ENTRY(suword32) + li a2, (VM_MAXUSER_ADDRESS-3) + bgt a0, a2, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a2) /* And set it */ + sw a1, 0(a0) /* Try storing the data */ + SET_FAULT_HANDLER(x0, a2) /* Reset the fault handler */ + li a0, 0 /* Success */ + ret /* Return */ +END(suword32) + +/* + * int suword(volatile void *, long) + */ +ENTRY(suword) +EENTRY(suword64) + li a2, (VM_MAXUSER_ADDRESS-7) + bgt a0, a2, fsu_fault_nopcb + la a6, fsu_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a2) /* And set it */ + sd a1, 0(a0) /* Try storing the data */ + SET_FAULT_HANDLER(x0, a2) /* Reset the fault handler */ + li a0, 0 /* Success */ + ret /* Return */ +EEND(suword64) +END(suword) + +/* + * fuswintr and suswintr are just like fusword and susword except that if + * the page is not in memory or would cause a trap, then we return an error. + * The important thing is to prevent sleep() and switch(). + */ + +/* + * Special handler so the trap code knows not to sleep. + */ +ENTRY(fsu_intr_fault) + SET_FAULT_HANDLER(x0, a1) /* Reset the handler function */ + li a0, -1 + ret +END(fsu_fault) + +/* + * int fuswintr(void *) + */ +ENTRY(fuswintr) + li a1, (VM_MAXUSER_ADDRESS-3) + bgt a0, a1, fsu_fault_nopcb + la a6, fsu_intr_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a1) /* And set it */ + lw a0, 0(a0) /* Try loading the data */ + SET_FAULT_HANDLER(x0, x1) /* Reset the fault handler */ + ret /* Return */ +END(fuswintr) + +/* + * int suswintr(void *base, int word) + */ +ENTRY(suswintr) + li a2, (VM_MAXUSER_ADDRESS-3) + bgt a0, a2, fsu_fault_nopcb + la a6, fsu_intr_fault /* Load the fault handler */ + SET_FAULT_HANDLER(a6, a2) /* And set it */ + sw a1, 0(a0) /* Try storing the data */ + SET_FAULT_HANDLER(x0, a2) /* Reset the fault handler */ + li a0, 0 /* Success */ + ret /* Return */ +END(suswintr) + +ENTRY(setjmp) + /* Store the stack pointer */ + sd sp, 0(a0) + addi a0, a0, 8 + + /* Store the general purpose registers and ra */ + sd s0, (0 * 8)(a0) + sd s1, (1 * 8)(a0) + sd s2, (2 * 8)(a0) + sd s3, (3 * 8)(a0) + sd s4, (4 * 8)(a0) + sd s5, (5 * 8)(a0) + sd s6, (6 * 8)(a0) + sd s7, (7 * 8)(a0) + sd s8, (8 * 8)(a0) + sd s9, (9 * 8)(a0) + sd s10, (10 * 8)(a0) + sd s11, (11 * 8)(a0) + sd ra, (12 * 8)(a0) + + /* Return value */ + li a0, 0 + ret +END(setjmp) + +ENTRY(longjmp) + /* Restore the stack pointer */ + ld sp, 0(a0) + addi a0, a0, 8 + + /* Restore the general purpose registers and ra */ + ld s0, (0 * 8)(a0) + ld s1, (1 * 8)(a0) + ld s2, (2 * 8)(a0) + ld s3, (3 * 8)(a0) + ld s4, (4 * 8)(a0) + ld s5, (5 * 8)(a0) + ld s6, (6 * 8)(a0) + ld s7, (7 * 8)(a0) + ld s8, (8 * 8)(a0) + ld s9, (9 * 8)(a0) + ld s10, (10 * 8)(a0) + ld s11, (11 * 8)(a0) + ld ra, (12 * 8)(a0) + + /* Load the return value */ + mv a0, a1 + ret +END(longjmp) diff --git a/sys/riscv/riscv/swtch.S b/sys/riscv/riscv/swtch.S new file mode 100644 index 000000000000..945fce354972 --- /dev/null +++ b/sys/riscv/riscv/swtch.S @@ -0,0 +1,272 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "assym.s" +#include "opt_sched.h" + +#include +#include +#include +#include + +__FBSDID("$FreeBSD$"); + +/* + * void cpu_throw(struct thread *old, struct thread *new) + */ +ENTRY(cpu_throw) + /* Load pcpu */ + la x14, pcpup + ld x14, 0(x14) + /* Store the new curthread */ + sd a1, PC_CURTHREAD(x14) + /* And the new pcb */ + ld x13, TD_PCB(a1) + sd x13, PC_CURPCB(x14) + + sfence.vm + + /* Switch to the new pmap */ + la t0, pagetable_l0 + ld t1, PCB_L1ADDR(x13) /* Link to next level PN */ + srli t1, t1, PAGE_SHIFT /* PN no */ + li t2, (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)) + slli t3, t1, PTE_PPN0_S /* (t1 << PTE_PPN0_S) */ + or t4, t2, t3 + /* Store single level0 PTE entry to position */ + sd t4, 0(t0) + + /* TODO: Invalidate the TLB */ + + sfence.vm + + /* Load registers */ + ld ra, (PCB_RA)(x13) + ld sp, (PCB_SP)(x13) + ld gp, (PCB_GP)(x13) + ld tp, (PCB_TP)(x13) + + /* s[0-11] */ + ld s0, (PCB_S + 0 * 8)(x13) + ld s1, (PCB_S + 1 * 8)(x13) + ld s2, (PCB_S + 2 * 8)(x13) + ld s3, (PCB_S + 3 * 8)(x13) + ld s4, (PCB_S + 4 * 8)(x13) + ld s5, (PCB_S + 5 * 8)(x13) + ld s6, (PCB_S + 6 * 8)(x13) + ld s7, (PCB_S + 7 * 8)(x13) + ld s8, (PCB_S + 8 * 8)(x13) + ld s9, (PCB_S + 9 * 8)(x13) + ld s10, (PCB_S + 10 * 8)(x13) + ld s11, (PCB_S + 11 * 8)(x13) + ret + +.Lcpu_throw_panic_str: + .asciz "cpu_throw: %p\0" +END(cpu_throw) + +/* + * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx) + * + * a0 = old + * a1 = new + * a2 = mtx + * x3 to x7, x16 and x17 are caller saved + */ +ENTRY(cpu_switch) + /* Load pcpu */ + la x14, pcpup + ld x14, 0(x14) + /* Store the new curthread */ + sd a1, PC_CURTHREAD(x14) + /* And the new pcb */ + ld x13, TD_PCB(a1) + sd x13, PC_CURPCB(x14) + + /* Save the old context. */ + ld x13, TD_PCB(a0) + + /* Store the callee-saved registers */ + sd ra, (PCB_RA)(x13) + sd sp, (PCB_SP)(x13) + sd gp, (PCB_GP)(x13) + sd tp, (PCB_TP)(x13) + + /* We use these in fork_trampoline */ + sd t0, (PCB_T + 0 * 8)(x13) + sd t1, (PCB_T + 1 * 8)(x13) + + /* s[0-11] */ + sd s0, (PCB_S + 0 * 8)(x13) + sd s1, (PCB_S + 1 * 8)(x13) + sd s2, (PCB_S + 2 * 8)(x13) + sd s3, (PCB_S + 3 * 8)(x13) + sd s4, (PCB_S + 4 * 8)(x13) + sd s5, (PCB_S + 5 * 8)(x13) + sd s6, (PCB_S + 6 * 8)(x13) + sd s7, (PCB_S + 7 * 8)(x13) + sd s8, (PCB_S + 8 * 8)(x13) + sd s9, (PCB_S + 9 * 8)(x13) + sd s10, (PCB_S + 10 * 8)(x13) + sd s11, (PCB_S + 11 * 8)(x13) + + /* + * Restore the saved context. + */ + ld x13, TD_PCB(a1) + + /* + * TODO: We may need to flush the cache here if switching + * to a user process. + */ + + sfence.vm + + /* Switch to the new pmap */ + la t0, pagetable_l0 + ld t1, PCB_L1ADDR(x13) /* Link to next level PN */ + srli t1, t1, PAGE_SHIFT /* PN no */ + li t2, (PTE_VALID | (PTE_TYPE_PTR << PTE_TYPE_S)) + slli t3, t1, PTE_PPN0_S /* (t1 << PTE_PPN0_S) */ + or t4, t2, t3 + /* Store single level0 PTE entry to position */ + sd t4, 0(t0) + + /* TODO: Invalidate the TLB */ + + sfence.vm + + /* Release the old thread */ + sd a2, TD_LOCK(a0) +#if defined(SCHED_ULE) && defined(SMP) + /* TODO */ +#endif + + /* Restore the registers */ + ld ra, (PCB_RA)(x13) + ld sp, (PCB_SP)(x13) + ld gp, (PCB_GP)(x13) + ld tp, (PCB_TP)(x13) + + /* We use these in fork_trampoline */ + ld t0, (PCB_T + 0 * 8)(x13) + ld t1, (PCB_T + 1 * 8)(x13) + + /* s[0-11] */ + ld s0, (PCB_S + 0 * 8)(x13) + ld s1, (PCB_S + 1 * 8)(x13) + ld s2, (PCB_S + 2 * 8)(x13) + ld s3, (PCB_S + 3 * 8)(x13) + ld s4, (PCB_S + 4 * 8)(x13) + ld s5, (PCB_S + 5 * 8)(x13) + ld s6, (PCB_S + 6 * 8)(x13) + ld s7, (PCB_S + 7 * 8)(x13) + ld s8, (PCB_S + 8 * 8)(x13) + ld s9, (PCB_S + 9 * 8)(x13) + ld s10, (PCB_S + 10 * 8)(x13) + ld s11, (PCB_S + 11 * 8)(x13) + ret +.Lcpu_switch_panic_str: + .asciz "cpu_switch: %p\0" +END(cpu_switch) + +/* + * fork_exit(void (*callout)(void *, struct trapframe *), void *arg, + * struct trapframe *frame) + */ + +ENTRY(fork_trampoline) + mv a0, x5 + mv a1, x6 + mv a2, sp + call _C_LABEL(fork_exit) + + /* Restore sstatus */ + ld t0, (TF_SSTATUS)(sp) + /* Ensure interrupts disabled */ + li t1, ~SSTATUS_IE + and t0, t0, t1 + csrw sstatus, t0 + + /* Restore exception program counter */ + ld t0, (TF_SEPC)(sp) + csrw sepc, t0 + + /* Restore the registers */ + ld t0, (TF_T + 0 * 8)(sp) + ld t1, (TF_T + 1 * 8)(sp) + ld t2, (TF_T + 2 * 8)(sp) + ld t3, (TF_T + 3 * 8)(sp) + ld t4, (TF_T + 4 * 8)(sp) + ld t5, (TF_T + 5 * 8)(sp) + ld t6, (TF_T + 6 * 8)(sp) + + ld s0, (TF_S + 0 * 8)(sp) + ld s1, (TF_S + 1 * 8)(sp) + ld s2, (TF_S + 2 * 8)(sp) + ld s3, (TF_S + 3 * 8)(sp) + ld s4, (TF_S + 4 * 8)(sp) + ld s5, (TF_S + 5 * 8)(sp) + ld s6, (TF_S + 6 * 8)(sp) + ld s7, (TF_S + 7 * 8)(sp) + ld s8, (TF_S + 8 * 8)(sp) + ld s9, (TF_S + 9 * 8)(sp) + ld s10, (TF_S + 10 * 8)(sp) + ld s11, (TF_S + 11 * 8)(sp) + + ld a0, (TF_A + 0 * 8)(sp) + ld a1, (TF_A + 1 * 8)(sp) + ld a2, (TF_A + 2 * 8)(sp) + ld a3, (TF_A + 3 * 8)(sp) + ld a4, (TF_A + 4 * 8)(sp) + ld a5, (TF_A + 5 * 8)(sp) + ld a6, (TF_A + 6 * 8)(sp) + ld a7, (TF_A + 7 * 8)(sp) + + /* Save kernel stack so we can use it doing a user trap */ + csrw sscratch, sp + + /* Load user ra and sp */ + ld ra, (TF_RA)(sp) + ld sp, (TF_SP)(sp) + + eret +END(fork_trampoline) + +ENTRY(savectx) + la a0, .Lsavectx_panic_str + call panic +.Lsavectx_panic_str: + .asciz "savectx_panic: %p\0" +END(savectx) diff --git a/sys/riscv/riscv/sys_machdep.c b/sys/riscv/riscv/sys_machdep.c new file mode 100644 index 000000000000..8fdc7dbe0aaf --- /dev/null +++ b/sys/riscv/riscv/sys_machdep.c @@ -0,0 +1,49 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include + +#include + +int +sysarch(struct thread *td, struct sysarch_args *uap) +{ + + return (ENOTSUP); +} diff --git a/sys/riscv/riscv/timer.c b/sys/riscv/riscv/timer.c new file mode 100644 index 000000000000..e3f3f22579a1 --- /dev/null +++ b/sys/riscv/riscv/timer.c @@ -0,0 +1,298 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + * RISC-V Timer + */ + +#include "opt_platform.h" + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define DEFAULT_FREQ 1000000 + +struct riscv_tmr_softc { + struct resource *res[1]; + void *ihl[1]; + uint32_t clkfreq; + struct eventtimer et; +}; + +static struct riscv_tmr_softc *riscv_tmr_sc = NULL; + +static struct resource_spec timer_spec[] = { + { SYS_RES_IRQ, 0, RF_ACTIVE }, + { -1, 0 } +}; + +static timecounter_get_t riscv_tmr_get_timecount; + +static struct timecounter riscv_tmr_timecount = { + .tc_name = "RISC-V Timecounter", + .tc_get_timecount = riscv_tmr_get_timecount, + .tc_poll_pps = NULL, + .tc_counter_mask = ~0u, + .tc_frequency = 0, + .tc_quality = 1000, +}; + +static long +get_counts(void) +{ + + return (csr_read(stime)); +} + +static unsigned +riscv_tmr_get_timecount(struct timecounter *tc) +{ + + return (get_counts()); +} + +static int +riscv_tmr_start(struct eventtimer *et, sbintime_t first, sbintime_t period) +{ + struct riscv_tmr_softc *sc; + int counts; + + sc = (struct riscv_tmr_softc *)et->et_priv; + + if (first != 0) { + counts = ((uint32_t)et->et_frequency * first) >> 32; + machine_command(ECALL_MTIMECMP, counts); + return (0); + } + + return (EINVAL); + +} + +static int +riscv_tmr_stop(struct eventtimer *et) +{ + struct riscv_tmr_softc *sc; + + sc = (struct riscv_tmr_softc *)et->et_priv; + + /* TODO */ + + return (0); +} + +static int +riscv_tmr_intr(void *arg) +{ + struct riscv_tmr_softc *sc; + + sc = (struct riscv_tmr_softc *)arg; + + /* + * Clear interrupt pending bit. + * Note sip register is unimplemented in Spike simulator, + * so use machine command to clear in mip. + */ + machine_command(ECALL_CLEAR_PENDING, 0); + + if (sc->et.et_active) + sc->et.et_event_cb(&sc->et, sc->et.et_arg); + + return (FILTER_HANDLED); +} + +static int +riscv_tmr_fdt_probe(device_t dev) +{ + + if (!ofw_bus_status_okay(dev)) + return (ENXIO); + + if (ofw_bus_is_compatible(dev, "riscv,timer")) { + device_set_desc(dev, "RISC-V Timer"); + return (BUS_PROBE_DEFAULT); + } + + return (ENXIO); +} + +static int +riscv_tmr_attach(device_t dev) +{ + struct riscv_tmr_softc *sc; + phandle_t node; + pcell_t clock; + int error; + + sc = device_get_softc(dev); + if (riscv_tmr_sc) + return (ENXIO); + + /* Get the base clock frequency */ + node = ofw_bus_get_node(dev); + if (node > 0) { + error = OF_getprop(node, "clock-frequency", &clock, + sizeof(clock)); + if (error > 0) { + sc->clkfreq = fdt32_to_cpu(clock); + } + } + + if (sc->clkfreq == 0) + sc->clkfreq = DEFAULT_FREQ; + + if (sc->clkfreq == 0) { + device_printf(dev, "No clock frequency specified\n"); + return (ENXIO); + } + + if (bus_alloc_resources(dev, timer_spec, sc->res)) { + device_printf(dev, "could not allocate resources\n"); + return (ENXIO); + } + + riscv_tmr_sc = sc; + + /* Setup IRQs handler */ + error = bus_setup_intr(dev, sc->res[0], INTR_TYPE_CLK, + riscv_tmr_intr, NULL, sc, &sc->ihl[0]); + if (error) { + device_printf(dev, "Unable to alloc int resource.\n"); + return (ENXIO); + } + + riscv_tmr_timecount.tc_frequency = sc->clkfreq; + tc_init(&riscv_tmr_timecount); + + sc->et.et_name = "RISC-V Eventtimer"; + sc->et.et_flags = ET_FLAGS_ONESHOT | ET_FLAGS_PERCPU; + sc->et.et_quality = 1000; + + sc->et.et_frequency = sc->clkfreq; + sc->et.et_min_period = (0x00000002LLU << 32) / sc->et.et_frequency; + sc->et.et_max_period = (0xfffffffeLLU << 32) / sc->et.et_frequency; + sc->et.et_start = riscv_tmr_start; + sc->et.et_stop = riscv_tmr_stop; + sc->et.et_priv = sc; + et_register(&sc->et); + + return (0); +} + +static device_method_t riscv_tmr_fdt_methods[] = { + DEVMETHOD(device_probe, riscv_tmr_fdt_probe), + DEVMETHOD(device_attach, riscv_tmr_attach), + { 0, 0 } +}; + +static driver_t riscv_tmr_fdt_driver = { + "timer", + riscv_tmr_fdt_methods, + sizeof(struct riscv_tmr_softc), +}; + +static devclass_t riscv_tmr_fdt_devclass; + +EARLY_DRIVER_MODULE(timer, simplebus, riscv_tmr_fdt_driver, riscv_tmr_fdt_devclass, + 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); +EARLY_DRIVER_MODULE(timer, ofwbus, riscv_tmr_fdt_driver, riscv_tmr_fdt_devclass, + 0, 0, BUS_PASS_TIMER + BUS_PASS_ORDER_MIDDLE); + +void +DELAY(int usec) +{ + int32_t counts, counts_per_usec; + uint32_t first, last; + + /* + * Check the timers are setup, if not just + * use a for loop for the meantime + */ + if (riscv_tmr_sc == NULL) { + for (; usec > 0; usec--) + for (counts = 200; counts > 0; counts--) + /* + * Prevent the compiler from optimizing + * out the loop + */ + cpufunc_nullop(); + return; + } + + /* Get the number of times to count */ + counts_per_usec = ((riscv_tmr_timecount.tc_frequency / 1000000) + 1); + + /* + * Clamp the timeout at a maximum value (about 32 seconds with + * a 66MHz clock). *Nobody* should be delay()ing for anywhere + * near that length of time and if they are, they should be hung + * out to dry. + */ + if (usec >= (0x80000000U / counts_per_usec)) + counts = (0x80000000U / counts_per_usec) - 1; + else + counts = usec * counts_per_usec; + + first = get_counts(); + + while (counts > 0) { + last = get_counts(); + counts -= (int32_t)(last - first); + first = last; + } +} diff --git a/sys/riscv/riscv/trap.c b/sys/riscv/riscv/trap.c new file mode 100644 index 000000000000..c192b3135feb --- /dev/null +++ b/sys/riscv/riscv/trap.c @@ -0,0 +1,311 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +extern register_t fsu_intr_fault; + +/* Called from exception.S */ +void do_trap_supervisor(struct trapframe *); +void do_trap_user(struct trapframe *); + +static __inline void +call_trapsignal(struct thread *td, int sig, int code, void *addr) +{ + ksiginfo_t ksi; + + ksiginfo_init_trap(&ksi); + ksi.ksi_signo = sig; + ksi.ksi_code = code; + ksi.ksi_addr = addr; + trapsignal(td, &ksi); +} + +int +cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa) +{ + struct proc *p; + register_t *ap; + int nap; + + nap = 8; + p = td->td_proc; + ap = &td->td_frame->tf_a[0]; + + sa->code = td->td_frame->tf_t[0]; + + if (sa->code == SYS_syscall || sa->code == SYS___syscall) { + sa->code = *ap++; + nap--; + } + + if (p->p_sysent->sv_mask) + sa->code &= p->p_sysent->sv_mask; + if (sa->code >= p->p_sysent->sv_size) + sa->callp = &p->p_sysent->sv_table[0]; + else + sa->callp = &p->p_sysent->sv_table[sa->code]; + + sa->narg = sa->callp->sy_narg; + memcpy(sa->args, ap, nap * sizeof(register_t)); + if (sa->narg > nap) + panic("TODO: Could we have more then 8 args?"); + + td->td_retval[0] = 0; + td->td_retval[1] = 0; + + return (0); +} + +#include "../../kern/subr_syscall.c" + +static void +dump_regs(struct trapframe *frame) +{ + int n; + int i; + + n = (sizeof(frame->tf_t) / sizeof(frame->tf_t[0])); + for (i = 0; i < n; i++) + printf("t[%d] == 0x%016lx\n", i, frame->tf_t[i]); + + n = (sizeof(frame->tf_s) / sizeof(frame->tf_s[0])); + for (i = 0; i < n; i++) + printf("s[%d] == 0x%016lx\n", i, frame->tf_s[i]); + + n = (sizeof(frame->tf_a) / sizeof(frame->tf_a[0])); + for (i = 0; i < n; i++) + printf("a[%d] == 0x%016lx\n", i, frame->tf_a[i]); + + printf("sepc == 0x%016lx\n", frame->tf_sepc); + printf("sstatus == 0x%016lx\n", frame->tf_sstatus); +} + +static void +svc_handler(struct trapframe *frame) +{ + struct syscall_args sa; + struct thread *td; + int error; + + td = curthread; + td->td_frame = frame; + + error = syscallenter(td, &sa); + syscallret(td, error, &sa); +} + +static void +data_abort(struct trapframe *frame, int lower) +{ + struct vm_map *map; + uint64_t sbadaddr; + struct thread *td; + struct pcb *pcb; + vm_prot_t ftype; + vm_offset_t va; + struct proc *p; + int ucode; + int error; + int sig; + + td = curthread; + pcb = td->td_pcb; + + /* + * Special case for fuswintr and suswintr. These can't sleep so + * handle them early on in the trap handler. + */ + if (__predict_false(pcb->pcb_onfault == (vm_offset_t)&fsu_intr_fault)) { + frame->tf_sepc = pcb->pcb_onfault; + return; + } + + sbadaddr = frame->tf_sbadaddr; + + p = td->td_proc; + + if (lower) + map = &td->td_proc->p_vmspace->vm_map; + else { + /* The top bit tells us which range to use */ + if ((sbadaddr >> 63) == 1) + map = kernel_map; + else + map = &td->td_proc->p_vmspace->vm_map; + } + + va = trunc_page(sbadaddr); + + if (frame->tf_scause == EXCP_STORE_ACCESS_FAULT) { + ftype = (VM_PROT_READ | VM_PROT_WRITE); + } else { + ftype = (VM_PROT_READ); + } + + if (map != kernel_map) { + /* + * Keep swapout from messing with us during this + * critical time. + */ + PROC_LOCK(p); + ++p->p_lock; + PROC_UNLOCK(p); + + /* Fault in the user page: */ + error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); + + PROC_LOCK(p); + --p->p_lock; + PROC_UNLOCK(p); + } else { + /* + * Don't have to worry about process locking or stacks in the + * kernel. + */ + error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); + } + + if (error != KERN_SUCCESS) { + if (lower) { + sig = SIGSEGV; + if (error == KERN_PROTECTION_FAILURE) + ucode = SEGV_ACCERR; + else + ucode = SEGV_MAPERR; + call_trapsignal(td, sig, ucode, (void *)sbadaddr); + } else { + if (td->td_intr_nesting_level == 0 && + pcb->pcb_onfault != 0) { + frame->tf_a[0] = error; + frame->tf_sepc = pcb->pcb_onfault; + return; + } + dump_regs(frame); + panic("vm_fault failed: %lx, va 0x%016lx", + frame->tf_sepc, sbadaddr); + } + } + + if (lower) + userret(td, frame); +} + +void +do_trap_supervisor(struct trapframe *frame) +{ + uint64_t exception; + + exception = (frame->tf_scause & EXCP_MASK); + if (frame->tf_scause & EXCP_INTR) { + /* Interrupt */ + riscv_cpu_intr(frame); + return; + } + + CTR3(KTR_TRAP, "do_trap_supervisor: curthread: %p, sepc: %lx, frame: %p", + curthread, frame->tf_sepc, frame); + + switch(exception) { + case EXCP_LOAD_ACCESS_FAULT: + case EXCP_STORE_ACCESS_FAULT: + case EXCP_INSTR_ACCESS_FAULT: + data_abort(frame, 0); + break; + default: + dump_regs(frame); + panic("Unknown kernel exception %x badaddr %lx\n", + exception, frame->tf_sbadaddr); + } +} + +void +do_trap_user(struct trapframe *frame) +{ + uint64_t exception; + + exception = (frame->tf_scause & EXCP_MASK); + if (frame->tf_scause & EXCP_INTR) { + /* Interrupt */ + riscv_cpu_intr(frame); + return; + } + + CTR3(KTR_TRAP, "do_trap_user: curthread: %p, sepc: %lx, frame: %p", + curthread, frame->tf_sepc, frame); + + switch(exception) { + case EXCP_LOAD_ACCESS_FAULT: + case EXCP_STORE_ACCESS_FAULT: + case EXCP_INSTR_ACCESS_FAULT: + data_abort(frame, 1); + break; + case EXCP_UMODE_ENV_CALL: + frame->tf_sepc += 4; /* Next instruction */ + svc_handler(frame); + break; + default: + dump_regs(frame); + panic("Unknown userland exception %x badaddr %lx\n", + exception, frame->tf_sbadaddr); + } +} diff --git a/sys/riscv/riscv/uio_machdep.c b/sys/riscv/riscv/uio_machdep.c new file mode 100644 index 000000000000..e6f6d39f02be --- /dev/null +++ b/sys/riscv/riscv/uio_machdep.c @@ -0,0 +1,134 @@ +/*- + * Copyright (c) 2004 Alan L. Cox + * Copyright (c) 1982, 1986, 1991, 1993 + * The Regents of the University of California. All rights reserved. + * (c) UNIX System Laboratories, Inc. + * All or some portions of this file are derived from material licensed + * to the University of California by American Telephone and Telegraph + * Co. or Unix System Laboratories, Inc. and are reproduced herein with + * the permission of UNIX System Laboratories, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +/* + * Implement uiomove(9) from physical memory using the direct map to + * avoid the creation and destruction of ephemeral mappings. + */ +int +uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) +{ + struct thread *td = curthread; + struct iovec *iov; + void *cp; + vm_offset_t page_offset, vaddr; + size_t cnt; + int error = 0; + int save = 0; + boolean_t mapped; + + KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, + ("uiomove_fromphys: mode")); + KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, + ("uiomove_fromphys proc")); + save = td->td_pflags & TDP_DEADLKTREAT; + td->td_pflags |= TDP_DEADLKTREAT; + mapped = FALSE; + while (n > 0 && uio->uio_resid) { + iov = uio->uio_iov; + cnt = iov->iov_len; + if (cnt == 0) { + uio->uio_iov++; + uio->uio_iovcnt--; + continue; + } + if (cnt > n) + cnt = n; + page_offset = offset & PAGE_MASK; + cnt = min(cnt, PAGE_SIZE - page_offset); + if (uio->uio_segflg != UIO_NOCOPY) { + mapped = pmap_map_io_transient( + &ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE); + cp = (char *)vaddr + page_offset; + } + switch (uio->uio_segflg) { + case UIO_USERSPACE: + maybe_yield(); + if (uio->uio_rw == UIO_READ) + error = copyout(cp, iov->iov_base, cnt); + else + error = copyin(iov->iov_base, cp, cnt); + if (error) + goto out; + break; + case UIO_SYSSPACE: + if (uio->uio_rw == UIO_READ) + bcopy(cp, iov->iov_base, cnt); + else + bcopy(iov->iov_base, cp, cnt); + break; + case UIO_NOCOPY: + break; + } + if (__predict_false(mapped)) { + pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], + &vaddr, 1, TRUE); + mapped = FALSE; + } + iov->iov_base = (char *)iov->iov_base + cnt; + iov->iov_len -= cnt; + uio->uio_resid -= cnt; + uio->uio_offset += cnt; + offset += cnt; + n -= cnt; + } +out: + if (__predict_false(mapped)) { + panic("TODO 3"); + pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1, + TRUE); + } + if (save == 0) + td->td_pflags &= ~TDP_DEADLKTREAT; + return (error); +} diff --git a/sys/riscv/riscv/uma_machdep.c b/sys/riscv/riscv/uma_machdep.c new file mode 100644 index 000000000000..ba480713fc75 --- /dev/null +++ b/sys/riscv/riscv/uma_machdep.c @@ -0,0 +1,55 @@ +/*- + * Copyright (c) 2003 Alan L. Cox + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +void * +uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) +{ + + panic("uma_small_alloc"); +} + +void +uma_small_free(void *mem, vm_size_t size, u_int8_t flags) +{ + + panic("uma_small_free"); +} diff --git a/sys/riscv/riscv/vm_machdep.c b/sys/riscv/riscv/vm_machdep.c new file mode 100644 index 000000000000..62e466f9e19a --- /dev/null +++ b/sys/riscv/riscv/vm_machdep.c @@ -0,0 +1,259 @@ +/*- + * Copyright (c) 2015 Ruslan Bukin + * All rights reserved. + * + * Portions of this software were developed by SRI International and the + * University of Cambridge Computer Laboratory under DARPA/AFRL contract + * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. + * + * Portions of this software were developed by the University of Cambridge + * Computer Laboratory as part of the CTSRD Project, with support from the + * UK Higher Education Innovation Fund (HEIF). + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +/* + * Finish a fork operation, with process p2 nearly set up. + * Copy and update the pcb, set up the stack so that the child + * ready to run and return to user mode. + */ +void +cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags) +{ + struct pcb *pcb2; + struct trapframe *tf; + uint64_t val; + + if ((flags & RFPROC) == 0) + return; + + if (td1 == curthread) { + __asm __volatile("mv %0, tp" : "=&r"(val)); + td1->td_pcb->pcb_tp = val; + } + + pcb2 = (struct pcb *)(td2->td_kstack + + td2->td_kstack_pages * PAGE_SIZE) - 1; + + td2->td_pcb = pcb2; + bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); + + td2->td_pcb->pcb_l1addr = + vtophys(vmspace_pmap(td2->td_proc->p_vmspace)->pm_l1); + + tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1); + bcopy(td1->td_frame, tf, sizeof(*tf)); + + /* Clear syscall error flag */ + tf->tf_t[0] = 0; + + /* Arguments for child */ + tf->tf_a[0] = 0; + tf->tf_a[1] = 0; + tf->tf_sstatus = SSTATUS_PIE; + + td2->td_frame = tf; + + /* Set the return value registers for fork() */ + td2->td_pcb->pcb_t[0] = (uintptr_t)fork_return; + td2->td_pcb->pcb_t[1] = (uintptr_t)td2; + td2->td_pcb->pcb_ra = (uintptr_t)fork_trampoline; + td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame; + + /* Setup to release spin count in fork_exit(). */ + td2->td_md.md_spinlock_count = 1; + td2->td_md.md_saved_sstatus_ie = 1; +} + +void +cpu_reset(void) +{ + + printf("cpu_reset"); + while(1) + __asm volatile("wfi" ::: "memory"); +} + +void +cpu_thread_swapin(struct thread *td) +{ +} + +void +cpu_thread_swapout(struct thread *td) +{ +} + +void +cpu_set_syscall_retval(struct thread *td, int error) +{ + struct trapframe *frame; + + frame = td->td_frame; + + switch (error) { + case 0: + frame->tf_a[0] = td->td_retval[0]; + frame->tf_a[1] = td->td_retval[1]; + frame->tf_t[0] = 0; /* syscall succeeded */ + break; + case ERESTART: + frame->tf_sepc -= 4; /* prev instruction */ + break; + case EJUSTRETURN: + break; + default: + frame->tf_a[0] = error; + frame->tf_t[0] = 1; /* syscall error */ + break; + } +} + +/* + * Initialize machine state (pcb and trap frame) for a new thread about to + * upcall. Put enough state in the new thread's PCB to get it to go back + * userret(), where we can intercept it again to set the return (upcall) + * Address and stack, along with those from upcals that are from other sources + * such as those generated in thread_userret() itself. + */ +void +cpu_set_upcall(struct thread *td, struct thread *td0) +{ + + bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe)); + bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb)); + + td->td_pcb->pcb_t[0] = (uintptr_t)fork_return; + td->td_pcb->pcb_t[1] = (uintptr_t)td; + td->td_pcb->pcb_ra = (uintptr_t)fork_trampoline; + td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; + + /* Setup to release spin count in fork_exit(). */ + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_sstatus_ie = 1; +} + +/* + * Set that machine state for performing an upcall that has to + * be done in thread_userret() so that those upcalls generated + * in thread_userret() itself can be done as well. + */ +void +cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg, + stack_t *stack) +{ + struct trapframe *tf = td->td_frame; + + tf->tf_sp = STACKALIGN((uintptr_t)stack->ss_sp + stack->ss_size); + tf->tf_sepc = (register_t)entry; + tf->tf_a[0] = (register_t)arg; +} + +int +cpu_set_user_tls(struct thread *td, void *tls_base) +{ + struct pcb *pcb; + + if ((uintptr_t)tls_base >= VM_MAXUSER_ADDRESS) + return (EINVAL); + + pcb = td->td_pcb; + pcb->pcb_tp = (register_t)tls_base; + + return (0); +} + +void +cpu_thread_exit(struct thread *td) +{ +} + +void +cpu_thread_alloc(struct thread *td) +{ + + td->td_pcb = (struct pcb *)(td->td_kstack + + td->td_kstack_pages * PAGE_SIZE) - 1; + td->td_frame = (struct trapframe *)STACKALIGN( + td->td_pcb - 1); +} + +void +cpu_thread_free(struct thread *td) +{ +} + +void +cpu_thread_clean(struct thread *td) +{ +} + +/* + * Intercept the return address from a freshly forked process that has NOT + * been scheduled yet. + * + * This is needed to make kernel threads stay in kernel mode. + */ +void +cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg) +{ + + td->td_pcb->pcb_t[0] = (uintptr_t)func; + td->td_pcb->pcb_t[1] = (uintptr_t)arg; + td->td_pcb->pcb_ra = (uintptr_t)fork_trampoline; + td->td_pcb->pcb_sp = (uintptr_t)td->td_frame; +} + +void +cpu_exit(struct thread *td) +{ +} + +void +swi_vm(void *v) +{ + + /* Nothing to do here - busdma bounce buffers are not implemented. */ +} diff --git a/sys/rpc/replay.c b/sys/rpc/replay.c index 1bd5378d9fbe..3900a84d0f14 100644 --- a/sys/rpc/replay.c +++ b/sys/rpc/replay.c @@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include diff --git a/sys/sys/cdefs.h b/sys/sys/cdefs.h index 59cc45f0e5fa..530359f6c1e4 100644 --- a/sys/sys/cdefs.h +++ b/sys/sys/cdefs.h @@ -776,7 +776,7 @@ #endif #endif -#if defined(__mips) || defined(__powerpc64__) +#if defined(__mips) || defined(__powerpc64__) || defined(__riscv__) #define __NO_TLS 1 #endif diff --git a/sys/sys/kerneldump.h b/sys/sys/kerneldump.h index 763b7cd1c5aa..fd010181eb1d 100644 --- a/sys/sys/kerneldump.h +++ b/sys/sys/kerneldump.h @@ -72,6 +72,7 @@ struct kerneldumpheader { #define KERNELDUMP_I386_VERSION 2 #define KERNELDUMP_MIPS_VERSION 1 #define KERNELDUMP_POWERPC_VERSION 1 +#define KERNELDUMP_RISCV_VERSION 1 #define KERNELDUMP_SPARC64_VERSION 1 #define KERNELDUMP_TEXT_VERSION 1 uint64_t dumplength; /* excl headers */ diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h index 26cf9a6721ed..55a7950cebb7 100644 --- a/sys/sys/socketvar.h +++ b/sys/sys/socketvar.h @@ -95,10 +95,10 @@ struct socket { TAILQ_HEAD(, socket) so_incomp; /* (e) queue of partial unaccepted connections */ TAILQ_HEAD(, socket) so_comp; /* (e) queue of complete unaccepted connections */ TAILQ_ENTRY(socket) so_list; /* (e) list of unaccepted connections */ - u_short so_qlen; /* (e) number of unaccepted connections */ - u_short so_incqlen; /* (e) number of unaccepted incomplete + u_int so_qlen; /* (e) number of unaccepted connections */ + u_int so_incqlen; /* (e) number of unaccepted incomplete connections */ - u_short so_qlimit; /* (e) max number queued connections */ + u_int so_qlimit; /* (e) max number queued connections */ short so_timeo; /* (g) connection timeout */ u_short so_error; /* (f) error affecting connection */ struct sigio *so_sigio; /* [sg] information for async I/O or @@ -172,9 +172,9 @@ struct xsocket { caddr_t so_pcb; /* another convenient handle */ int xso_protocol; int xso_family; - u_short so_qlen; - u_short so_incqlen; - u_short so_qlimit; + u_int so_qlen; + u_int so_incqlen; + u_int so_qlimit; short so_timeo; u_short so_error; pid_t so_pgid; diff --git a/sys/tools/embed_mfs.sh b/sys/tools/embed_mfs.sh index 785cf3f81bc0..3f20257b7a58 100644 --- a/sys/tools/embed_mfs.sh +++ b/sys/tools/embed_mfs.sh @@ -32,8 +32,20 @@ # $2: MFS image filename # -obs=`strings -at d $1 | grep "MFS Filesystem goes here" | awk '{print $1}'` -dd if=$2 ibs=8192 of=$1 obs=${obs} oseek=1 conv=notrunc 2> /dev/null +mfs_size=`stat -f '%z' $2 2> /dev/null` +# If we can't determine MFS image size - bail. +[ -z ${mfs_size} ] && echo "Can't determine MFS image size" && exit 1 -strings $1 | grep 'MFS Filesystem had better STOP here' > /dev/null || \ - (rm $1 && echo "MFS image too large" && false) +sec_info=`objdump -h $1 2> /dev/null | grep " oldmfs "` +# If we can't find the mfs section within the given kernel - bail. +[ -z "${sec_info}" ] && echo "Can't locate mfs section within kernel" && exit 1 + +sec_size=`echo ${sec_info} | awk '{printf("%d", "0x" $3)}' 2> /dev/null` +sec_start=`echo ${sec_info} | awk '{printf("%d", "0x" $6)}' 2> /dev/null` + +# If the mfs section size is smaller than the mfs image - bail. +[ ${sec_size} -lt ${mfs_size} ] && echo "MFS image too large" && exit 1 + +# Dump the mfs image into the mfs section +dd if=$2 ibs=8192 of=$1 obs=${sec_start} oseek=1 conv=notrunc 2> /dev/null && \ + echo "MFS image embedded into kernel" && exit 0 diff --git a/tests/sys/geom/class/gate/1_test.sh b/tests/sys/geom/class/gate/1_test.sh index 3e277349631f..ba573bb6e122 100644 --- a/tests/sys/geom/class/gate/1_test.sh +++ b/tests/sys/geom/class/gate/1_test.sh @@ -10,6 +10,7 @@ us=0 while [ -c /dev/ggate${us} ]; do : $(( us += 1 )) done +pidfile=ggated.$$.pid conf=`mktemp $base.XXXXXX` || exit 1 port=33080 @@ -19,7 +20,7 @@ src=$(attach_md -t malloc -s 1M) test_cleanup() { ggatec destroy -f -u $us - killall ggated + pkill -F $pidfile geom_test_cleanup } trap test_cleanup ABRT EXIT INT TERM @@ -30,7 +31,7 @@ src_checksum=$(md5 -q /dev/$src) echo "127.0.0.1 RW /dev/$work" > $conf -if ! ggated -p $port $conf; then +if ! ggated -p $port -F $pidfile $conf; then echo 'ggated failed to start' echo 'Bail out!' exit 1 @@ -41,6 +42,7 @@ if ! ggatec create -p $port -u $us 127.0.0.1 /dev/$work; then echo 'Bail out!' exit 1 fi +sleep 1 dd if=/dev/${src} of=/dev/ggate${us} bs=1m count=1 sleep 1 diff --git a/tools/tools/nanobsd/defaults.sh b/tools/tools/nanobsd/defaults.sh index 8084bfaf0fcb..a0e6be580b9b 100755 --- a/tools/tools/nanobsd/defaults.sh +++ b/tools/tools/nanobsd/defaults.sh @@ -156,6 +156,8 @@ NANO_SLICE_ROOT=s1 NANO_SLICE_ALTROOT=s2 NANO_SLICE_CFG=s3 NANO_SLICE_DATA=s4 +NANO_ROOT=s1a +NANO_ALTROOT=s2a # Default ownwership for nopriv build NANO_DEF_UNAME=root @@ -569,7 +571,7 @@ setup_nanobsd_etc ( ) ( # save config file for scripts echo "NANO_DRIVE=${NANO_DRIVE}" > etc/nanobsd.conf - echo "/dev/${NANO_DRIVE}${NANO_SLICE_ROOT}a / ufs ro 1 1" > etc/fstab + echo "/dev/${NANO_DRIVE}${NANO_ROOT} / ufs ro 1 1" > etc/fstab echo "/dev/${NANO_DRIVE}${NANO_SLICE_CFG} /cfg ufs rw,noauto 2 2" >> etc/fstab mkdir -p cfg ) @@ -731,8 +733,8 @@ create_diskimage ( ) ( bsdlabel ${MD}${NANO_SLICE_ROOT} # Create first image - populate_slice /dev/${MD}${NANO_SLICE_ROOT}a ${NANO_WORLDDIR} ${MNT} "${NANO_SLICE_ROOT}a" - mount /dev/${MD}${NANO_SLICE_ROOT}a ${MNT} + populate_slice /dev/${MD}${NANO_ROOT} ${NANO_WORLDDIR} ${MNT} "${NANO_ROOT}" + mount /dev/${MD}${NANO_ROOT} ${MNT} echo "Generating mtree..." ( cd "${MNT}" && mtree -c ) > ${NANO_OBJ}/_.mtree ( cd "${MNT}" && du -k ) > ${NANO_OBJ}/_.du @@ -742,7 +744,7 @@ create_diskimage ( ) ( # Duplicate to second image (if present) echo "Duplicating to second image..." dd conv=sparse if=/dev/${MD}${NANO_SLICE_ROOT} of=/dev/${MD}${NANO_SLICE_ALTROOT} bs=64k - mount /dev/${MD}${NANO_SLICE_ALTROOT}a ${MNT} + mount /dev/${MD}${NANO_ALTROOT} ${MNT} for f in ${MNT}/etc/fstab ${MNT}/conf/base/etc/fstab do sed -i "" "s=${NANO_DRIVE}${NANO_SLICE_ROOT}=${NANO_DRIVE}${NANO_SLICE_ALTROOT}=g" $f @@ -751,7 +753,7 @@ create_diskimage ( ) ( # Override the label from the first partition so we # don't confuse glabel with duplicates. if [ -n "${NANO_LABEL}" ]; then - tunefs -L ${NANO_LABEL}"${NANO_SLICE_ALTROOT}a" /dev/${MD}${NANO_SLICE_ALTROOT}a + tunefs -L ${NANO_LABEL}"${NANO_ALTROOT}" /dev/${MD}${NANO_ALTROOT} fi fi diff --git a/tools/tools/nanobsd/embedded/README b/tools/tools/nanobsd/embedded/README index e8b1c8c6506e..ebaba4c7c97a 100644 --- a/tools/tools/nanobsd/embedded/README +++ b/tools/tools/nanobsd/embedded/README @@ -10,8 +10,6 @@ and DHCPd. This is a work in progress. Generally, to build this you should cd tools/tools/nanobsd/embedded sudo sh ../nanobsd.sh -c foo.cfg -but do be careful if things are interrupted. There may still be -bugs lurking that cause your entire FreeBSD tree to disappear. Some features: diff --git a/tools/tools/nanobsd/embedded/common b/tools/tools/nanobsd/embedded/common index a1d401a03782..bbac16205304 100644 --- a/tools/tools/nanobsd/embedded/common +++ b/tools/tools/nanobsd/embedded/common @@ -76,6 +76,8 @@ fi NANO_SLICE_FAT_SIZE=32m NANO_SLICE_CFG_SIZE=32m +NANO_BOOT2CFG="-D -h -S115200 comconsole_port=0x3e8" + NANO_RAM_ETCSIZE=8192 NANO_RAM_TMPVARSIZE=8192 NANO_IMAGES=2 @@ -292,7 +294,7 @@ create_diskimage_mbr ( ) ( bootbsd=${NANO_BOOT_BSD:+-b ${NANO_BOOT_BSD}} skiparg=${NANO_MBR_FIRST_SKIP:+-S ${NANO_MBR_FIRST_SKIP}} - for i in s1 s2 s3 s4 empty; do + for i in s1 s2 s3 s4 p1 p2 p3 p4 p5 empty; do rm -fr ${NANO_OBJ}/_.${i}* done @@ -317,10 +319,10 @@ create_diskimage_mbr ( ) ( # bsd label [ -z ${NANO_NOPRIV_BUILD} ] || extra="-F ${NANO_METALOG}" sz=${NANO_SLICE_ROOT_SIZE:+-s ${NANO_SLICE_ROOT_SIZE}} - eval "${NANO_MAKEFS_UFS}" ${extra} $sz "${NANO_OBJ}/_.${NANO_SLICE_ROOT}a" \ + eval "${NANO_MAKEFS_UFS}" ${extra} $sz "${NANO_OBJ}/_.${NANO_SLICE_ROOT}" \ "${NANO_WORLDDIR}" - mkimg -s bsd ${bootbsd} -p freebsd-ufs:=${NANO_OBJ}/_.${NANO_SLICE_ROOT}a \ - -o ${NANO_OBJ}/_.${NANO_SLICE_ROOT} +# mkimg -s bsd ${bootbsd} -p freebsd-ufs:=${NANO_OBJ}/_.${NANO_SLICE_ROOT} \ +# -o ${NANO_OBJ}/_.${NANO_SLICE_ROOT} # Populate the /cfg partition, empty if none given if [ -z "${NANO_CFGDIR}" ]; then @@ -339,8 +341,18 @@ create_diskimage_mbr ( ) ( if [ -n "$NANO_SLICE_FAT" ]; then eval $NANO_SLICE_FAT=fat16b fi - eval $NANO_SLICE_CFG=freebsd - eval $NANO_SLICE_ROOT=freebsd + case ${NANO_SLICE_CFG} in + s*) + echo slice + eval $NANO_SLICE_CFG=freebsd + eval $NANO_SLICE_ROOT=freebsd + ;; + p*) + echo part + eval $NANO_SLICE_CFG=freebsd-ufs + eval $NANO_SLICE_ROOT=freebsd-ufs + ;; + esac # below depends on https://reviews.freebsd.org/D4403 not yet in the tree # but there's problems: it marks all partitions as active, so you have to # boot off parittion 3 or 2 by hand if you're playing around with this WIP @@ -358,6 +370,25 @@ create_diskimage_mbr ( ) ( -p ${s2}:=${NANO_OBJ}/_.s2 \ -o ${NANO_OBJ}/_.disk.image.${NANO_NAME}${fmt} ;; + std-uefi) + # s1 is boot, s2 is cfg, s3 is /, not sure how to make that + # boot (marked as active) with mkimg yet + mkimg -a 2 ${fmtarg} ${bootmbr} -s mbr \ + -p efi:=${NANO_WORLDDIR}/boot/boot1.efifat \ + -p ${s2}:=${NANO_OBJ}/_.s2 \ + -p ${s3}:=${NANO_OBJ}/_.s3 \ + -o ${NANO_OBJ}/_.disk.image.${NANO_NAME}${fmt} + ;; + std-uefi-bios) + # p1 is boot for uefi, p2 is boot for gpt, p3 is cfg, p4 is / + # and p5 is alt-root (after resize) + mkimg -a 2 ${fmtarg} ${bootmbr} -s gpt \ + -p efi:=${NANO_WORLDDIR}/boot/boot1.efifat \ + -p freebsd-boot:=${NAANO_WORLDDIR}/boot/gptboot \ + -p ${p3}:=${NANO_OBJ}/_.p3 \ + -p ${p4}:=${NANO_OBJ}/_.p4 \ + -o ${NANO_OBJ}/_.disk.image.${NANO_NAME}${fmt} + ;; powerpc64-ibm) # A lie to make the boot loader work, it boots the first BSD partition # it finds, regardless of the active flag. @@ -658,12 +689,30 @@ powerpc64-apple) echo Not yet exit 1 ;; +std-uefi) + NANO_SLICE_UEFI=s1 + NANO_SLICE_CFG=s2 + NANO_SLICE_ROOT=s3 + NANO_SLICE_ALTROOT=s4 + ;; +std-uefi-bios) + NANO_SLICE_UEFI=p1 + NANO_SLICE_BOOT=p2 + NANO_SLICE_CFG=p3 + NANO_SLICE_ROOT=p4 + NANO_SLICE_ALTROOT=p5 + ;; *) echo Unknown Layout ${NANO_LAYOUT} exit 1 ;; esac +# For this config, no BSD labels so NANO_ROOT and NANO_ALTROOT need to be +# adjusted +NANO_ROOT=${NANO_SLICE_ROOT} +NANO_ALTROOT=${NANO_SLICE_ALTROOT} + NANO_SLICE_DATA= # Not included # Each major disk scheme has its own routine. Generally diff --git a/tools/tools/nanobsd/embedded/qemu-amd64-uefi-bios.cfg b/tools/tools/nanobsd/embedded/qemu-amd64-uefi-bios.cfg new file mode 100644 index 000000000000..7f4f4276bb90 --- /dev/null +++ b/tools/tools/nanobsd/embedded/qemu-amd64-uefi-bios.cfg @@ -0,0 +1,43 @@ +# $FreeBSD$ + +#- +# Copyright (c) 2015 Warner Losh. All Rights Reserved. +# Copyright (c) 2010-2011 iXsystems, Inc., All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL iXsystems, Inc. OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# + +NANO_ARCH=amd64 +NANO_NAME=qemu-amd64-uefi-bios +NANO_LAYOUT=std-uefi-bios + +. common # Pull in common definitions + +qemu_env + +# +# Run with +# qemu-system-x86_64 -serial stdio -bios OVMF.fd \ +# -hda _.disk.image.qemu-amd64-uefi.qcow2 +# OVMF.fd is from +# http://sourceforge.net/projects/edk2/files/OVMF/OVMF-X64-r15214.zip +# diff --git a/tools/tools/nanobsd/embedded/qemu-amd64-uefi.cfg b/tools/tools/nanobsd/embedded/qemu-amd64-uefi.cfg new file mode 100644 index 000000000000..a88e459a50c0 --- /dev/null +++ b/tools/tools/nanobsd/embedded/qemu-amd64-uefi.cfg @@ -0,0 +1,43 @@ +# $FreeBSD$ + +#- +# Copyright (c) 2015 Warner Losh. All Rights Reserved. +# Copyright (c) 2010-2011 iXsystems, Inc., All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL iXsystems, Inc. OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# + +NANO_ARCH=amd64 +NANO_NAME=qemu-amd64-uefi +NANO_LAYOUT=std-uefi + +. common # Pull in common definitions + +qemu_env + +# +# Run with +# qemu-system-x86_64 -serial stdio -bios OVMF.fd \ +# -hda _.disk.image.qemu-amd64-uefi.qcow2 +# OVMF.fd is from +# http://sourceforge.net/projects/edk2/files/OVMF/OVMF-X64-r15214.zip +# diff --git a/usr.bin/kdump/kdump.c b/usr.bin/kdump/kdump.c index d4406545fc4d..8fe03ea27766 100644 --- a/usr.bin/kdump/kdump.c +++ b/usr.bin/kdump/kdump.c @@ -690,20 +690,20 @@ syscallabi(u_int sv_flags) { if (sv_flags == 0) - return (FREEBSD); + return (SYSDECODE_ABI_FREEBSD); switch (sv_flags & SV_ABI_MASK) { case SV_ABI_FREEBSD: - return (FREEBSD); + return (SYSDECODE_ABI_FREEBSD); #if defined(__amd64__) || defined(__i386__) case SV_ABI_LINUX: #ifdef __amd64__ if (sv_flags & SV_ILP32) - return (LINUX32); + return (SYSDECODE_ABI_LINUX32); #endif - return (LINUX); + return (SYSDECODE_ABI_LINUX); #endif default: - return (UNKNOWN_ABI); + return (SYSDECODE_ABI_UNKNOWN); } } diff --git a/usr.bin/netstat/inet.c b/usr.bin/netstat/inet.c index 6a933145e7e8..b0a0a7fca050 100644 --- a/usr.bin/netstat/inet.c +++ b/usr.bin/netstat/inet.c @@ -486,11 +486,11 @@ protopr(u_long off, const char *name, int af1, int proto) else xo_emit("{:protocol/%-3.3s%-2.2s/%s%s} ", name, vchar); if (Lflag) { - char buf1[15]; + char buf1[33]; - snprintf(buf1, 15, "%d/%d/%d", so->so_qlen, + snprintf(buf1, sizeof buf1, "%u/%u/%u", so->so_qlen, so->so_incqlen, so->so_qlimit); - xo_emit("{:listen-queue-sizes/%-14.14s} ", buf1); + xo_emit("{:listen-queue-sizes/%-32.32s} ", buf1); } else if (Tflag) { if (istcp) xo_emit("{:sent-retransmit-packets/%6u} " diff --git a/usr.bin/netstat/sctp.c b/usr.bin/netstat/sctp.c index 5f609a077d24..6c766b2ad082 100644 --- a/usr.bin/netstat/sctp.c +++ b/usr.bin/netstat/sctp.c @@ -467,9 +467,10 @@ sctp_process_inpcb(struct xsctp_inpcb *xinpcb, tname = "????"; if (Lflag) { - char buf1[9]; + char buf1[22]; - snprintf(buf1, 9, "%hu/%hu", xinpcb->qlen, xinpcb->maxqlen); + snprintf(buf1, sizeof buf1, "%u/%u", + xinpcb->qlen, xinpcb->maxqlen); xo_emit("{:protocol/%-6.6s/%s} {:type/%-5.5s/%s} ", pname, tname); xo_emit("{d:queues/%-8.8s}{e:queue-len/%hu}" diff --git a/usr.bin/netstat/unix.c b/usr.bin/netstat/unix.c index a7503f5f4725..04e4ae5e0aa7 100644 --- a/usr.bin/netstat/unix.c +++ b/usr.bin/netstat/unix.c @@ -271,7 +271,7 @@ unixdomainpr(struct xunpcb *xunp, struct xsocket *so) struct unpcb *unp; struct sockaddr_un *sa; static int first = 1; - char buf1[15]; + char buf1[33]; static const char *titles[2] = { "{T:/%-8.8s} {T:/%-6.6s} {T:/%-6.6s} {T:/%-6.6s} {T:/%8.8s} " "{T:/%8.8s} {T:/%8.8s} {T:/%8.8s} {T:Addr}\n", @@ -310,10 +310,10 @@ unixdomainpr(struct xunpcb *xunp, struct xsocket *so) return; if (Lflag) { - snprintf(buf1, 15, "%d/%d/%d", so->so_qlen, + snprintf(buf1, sizeof buf1, "%u/%u/%u", so->so_qlen, so->so_incqlen, so->so_qlimit); - xo_emit("unix {d:socket/%-14.14s}{e:queue-length/%d}" - "{e:incomplete-queue-length/%d}{e:queue-limit/%d}", + xo_emit("unix {d:socket/%-32.32s}{e:queue-length/%u}" + "{e:incomplete-queue-length/%u}{e:queue-limit/%u}", buf1, so->so_qlen, so->so_incqlen, so->so_qlimit); } else { xo_emit(format[fmt], diff --git a/usr.bin/truss/aarch64-cloudabi64.c b/usr.bin/truss/aarch64-cloudabi64.c index 099cb1e7d746..f2891afe02a8 100644 --- a/usr.bin/truss/aarch64-cloudabi64.c +++ b/usr.bin/truss/aarch64-cloudabi64.c @@ -81,7 +81,7 @@ aarch64_cloudabi64_fetch_retval(struct trussinfo *trussinfo, long *retval, static struct procabi aarch64_cloudabi64 = { "CloudABI ELF64", - CLOUDABI64, + SYSDECODE_ABI_CLOUDABI64, aarch64_cloudabi64_fetch_args, aarch64_cloudabi64_fetch_retval }; diff --git a/usr.bin/truss/aarch64-freebsd.c b/usr.bin/truss/aarch64-freebsd.c index 454bba7d77ca..534441c60b8c 100644 --- a/usr.bin/truss/aarch64-freebsd.c +++ b/usr.bin/truss/aarch64-freebsd.c @@ -99,7 +99,7 @@ aarch64_fetch_retval(struct trussinfo *trussinfo, long *retval, int *errorp) static struct procabi aarch64_freebsd = { "FreeBSD ELF64", - FREEBSD, + SYSDECODE_ABI_FREEBSD, aarch64_fetch_args, aarch64_fetch_retval }; diff --git a/usr.bin/truss/amd64-cloudabi64.c b/usr.bin/truss/amd64-cloudabi64.c index 38768ff0074d..ce8b18918fd8 100644 --- a/usr.bin/truss/amd64-cloudabi64.c +++ b/usr.bin/truss/amd64-cloudabi64.c @@ -90,7 +90,7 @@ amd64_cloudabi64_fetch_retval(struct trussinfo *trussinfo, long *retval, static struct procabi amd64_cloudabi64 = { "CloudABI ELF64", - CLOUDABI64, + SYSDECODE_ABI_CLOUDABI64, amd64_cloudabi64_fetch_args, amd64_cloudabi64_fetch_retval }; diff --git a/usr.bin/truss/amd64-freebsd.c b/usr.bin/truss/amd64-freebsd.c index 8a211a0ce474..a2f378ca8ea5 100644 --- a/usr.bin/truss/amd64-freebsd.c +++ b/usr.bin/truss/amd64-freebsd.c @@ -121,7 +121,7 @@ amd64_fetch_retval(struct trussinfo *trussinfo, long *retval, int *errorp) static struct procabi amd64_freebsd = { "FreeBSD ELF64", - FREEBSD, + SYSDECODE_ABI_FREEBSD, amd64_fetch_args, amd64_fetch_retval }; diff --git a/usr.bin/truss/amd64-freebsd32.c b/usr.bin/truss/amd64-freebsd32.c index adce798b7c5c..cfcead2a6014 100644 --- a/usr.bin/truss/amd64-freebsd32.c +++ b/usr.bin/truss/amd64-freebsd32.c @@ -117,7 +117,7 @@ amd64_freebsd32_fetch_retval(struct trussinfo *trussinfo, long *retval, static struct procabi amd64_freebsd32 = { "FreeBSD ELF32", - FREEBSD32, + SYSDECODE_ABI_FREEBSD32, amd64_freebsd32_fetch_args, amd64_freebsd32_fetch_retval }; @@ -126,7 +126,7 @@ PROCABI(amd64_freebsd32); static struct procabi amd64_freebsd32_aout = { "FreeBSD a.out", - FREEBSD32, + SYSDECODE_ABI_FREEBSD32, amd64_freebsd32_fetch_args, amd64_freebsd32_fetch_retval }; diff --git a/usr.bin/truss/amd64-linux32.c b/usr.bin/truss/amd64-linux32.c index dd1d83361095..2ede61757b03 100644 --- a/usr.bin/truss/amd64-linux32.c +++ b/usr.bin/truss/amd64-linux32.c @@ -131,7 +131,7 @@ amd64_linux32_fetch_retval(struct trussinfo *trussinfo, long *retval, static struct procabi amd64_linux32 = { "Linux ELF32", - LINUX32, + SYSDECODE_ABI_LINUX32, amd64_linux32_fetch_args, amd64_linux32_fetch_retval }; diff --git a/usr.bin/truss/arm-freebsd.c b/usr.bin/truss/arm-freebsd.c index 5722c91442fb..a1b2b21313ba 100644 --- a/usr.bin/truss/arm-freebsd.c +++ b/usr.bin/truss/arm-freebsd.c @@ -128,7 +128,7 @@ arm_fetch_retval(struct trussinfo *trussinfo, long *retval, int *errorp) static struct procabi arm_freebsd = { "FreeBSD ELF32", - FREEBSD, + SYSDECODE_ABI_FREEBSD, arm_fetch_args, arm_fetch_retval }; diff --git a/usr.bin/truss/i386-freebsd.c b/usr.bin/truss/i386-freebsd.c index c166596dcb11..bf35af2f015b 100644 --- a/usr.bin/truss/i386-freebsd.c +++ b/usr.bin/truss/i386-freebsd.c @@ -110,7 +110,7 @@ i386_fetch_retval(struct trussinfo *trussinfo, long *retval, int *errorp) static struct procabi i386_freebsd = { "FreeBSD ELF32", - FREEBSD, + SYSDECODE_ABI_FREEBSD, i386_fetch_args, i386_fetch_retval }; @@ -119,7 +119,7 @@ PROCABI(i386_freebsd); static struct procabi i386_freebsd_aout = { "FreeBSD a.out", - FREEBSD, + SYSDECODE_ABI_FREEBSD, i386_fetch_args, i386_fetch_retval }; diff --git a/usr.bin/truss/i386-linux.c b/usr.bin/truss/i386-linux.c index 5fdae8ee958c..fbc54a096d4f 100644 --- a/usr.bin/truss/i386-linux.c +++ b/usr.bin/truss/i386-linux.c @@ -130,7 +130,7 @@ i386_linux_fetch_retval(struct trussinfo *trussinfo, long *retval, int *errorp) static struct procabi i386_linux = { "Linux ELF32", - LINUX, + SYSDECODE_ABI_LINUX, i386_linux_fetch_args, i386_linux_fetch_retval }; diff --git a/usr.bin/truss/mips-freebsd.c b/usr.bin/truss/mips-freebsd.c index f4b5a7eb2aa0..3a42a59d7323 100644 --- a/usr.bin/truss/mips-freebsd.c +++ b/usr.bin/truss/mips-freebsd.c @@ -131,7 +131,7 @@ static struct procabi mips_freebsd = { #else "FreeBSD ELF32", #endif - FREEBSD, + SYSDECODE_ABI_FREEBSD, mips_fetch_args, mips_fetch_retval }; diff --git a/usr.bin/truss/powerpc-freebsd.c b/usr.bin/truss/powerpc-freebsd.c index 6a245df76c6a..ee78d038297a 100644 --- a/usr.bin/truss/powerpc-freebsd.c +++ b/usr.bin/truss/powerpc-freebsd.c @@ -112,7 +112,7 @@ powerpc_fetch_retval(struct trussinfo *trussinfo, long *retval, int *errorp) static struct procabi powerpc_freebsd = { "FreeBSD ELF32", - FREEBSD, + SYSDECODE_ABI_FREEBSD, powerpc_fetch_args, powerpc_fetch_retval }; diff --git a/usr.bin/truss/powerpc64-freebsd.c b/usr.bin/truss/powerpc64-freebsd.c index 68a0b5d36abe..cc64cd685e7e 100644 --- a/usr.bin/truss/powerpc64-freebsd.c +++ b/usr.bin/truss/powerpc64-freebsd.c @@ -108,7 +108,7 @@ powerpc64_fetch_retval(struct trussinfo *trussinfo, long *retval, int *errorp) static struct procabi powerpc64_freebsd = { "FreeBSD ELF64", - FREEBSD, + SYSDECODE_ABI_FREEBSD, powerpc64_fetch_args, powerpc64_fetch_retval }; diff --git a/usr.bin/truss/powerpc64-freebsd32.c b/usr.bin/truss/powerpc64-freebsd32.c index ee37ead3cc11..c6f3b10a7092 100644 --- a/usr.bin/truss/powerpc64-freebsd32.c +++ b/usr.bin/truss/powerpc64-freebsd32.c @@ -117,7 +117,7 @@ powerpc64_freebsd32_fetch_retval(struct trussinfo *trussinfo, long *retval, int static struct procabi powerpc64_freebsd32 = { "FreeBSD ELF32", - FREEBSD32, + SYSDECODE_ABI_FREEBSD32, powerpc64_freebsd32_fetch_args, powerpc64_freebsd32_fetch_retval }; diff --git a/usr.bin/truss/sparc64-freebsd.c b/usr.bin/truss/sparc64-freebsd.c index 23486d76baa4..a8569c010e06 100644 --- a/usr.bin/truss/sparc64-freebsd.c +++ b/usr.bin/truss/sparc64-freebsd.c @@ -115,7 +115,7 @@ sparc64_fetch_retval(struct trussinfo *trussinfo, long *retval, int *errorp) static struct procabi sparc64_freebsd = { "FreeBSD ELF64", - FREEBSD, + SYSDECODE_ABI_FREEBSD, sparc64_fetch_args, sparc64_fetch_retval }; diff --git a/usr.sbin/bsdconfig/bsdconfig b/usr.sbin/bsdconfig/bsdconfig index 993865cacc8c..2ecdb30bc5a9 100755 --- a/usr.sbin/bsdconfig/bsdconfig +++ b/usr.sbin/bsdconfig/bsdconfig @@ -212,7 +212,7 @@ dialog_menu_main() *) menu_program="$menuitem/$menu_program" esac - tag=$( f_substr "$DIALOG_MENU_TAGS" $index 1 ) + f_substr -v tag "$DIALOG_MENU_TAGS" $index 1 setvar "menu_program$tag" "$menu_program" f_shell_escape "$menu_title" menu_title @@ -256,7 +256,7 @@ dialog_menu_main() *) menu_program="$BSDCFG_LOCAL_LIBE/$menuitem/$menu_program" esac - tag=$( f_substr "$DIALOG_MENU_TAGS" $index 1 ) + f_substr -v tag "$DIALOG_MENU_TAGS" $index 1 setvar "menu_program$tag" "$menu_program" f_shell_escape "$menu_title" menu_title diff --git a/usr.sbin/bsdconfig/networking/share/media.subr b/usr.sbin/bsdconfig/networking/share/media.subr index 1cb77f802948..028944b5eb6b 100644 --- a/usr.sbin/bsdconfig/networking/share/media.subr +++ b/usr.sbin/bsdconfig/networking/share/media.subr @@ -180,18 +180,18 @@ f_dialog_menu_media_options() f_ifconfig_media $interface | \ ( index=1 - echo "'$( f_substr "$DIALOG_MENU_TAGS" $index 1 )'" - echo "'$opt_none'" + f_substr -v tagn "$DIALOG_MENU_TAGS" $index 1 + echo "'$tagn' '$opt_none'" index=$(( $index + 1 )) - echo "'$( f_substr "$DIALOG_MENU_TAGS" $index 1 )'" - echo "'$opt_cust'" + f_substr -v tagn "$DIALOG_MENU_TAGS" $index 1 + echo "'$tagn' '$opt_cust'" index=$(( $index + 1 )) while read media_options; do [ $index -lt ${#DIALOG_MENU_TAGS} ] || break - echo "'$( f_substr "$DIALOG_MENU_TAGS" $index 1 )'" - echo "'$media_options'" + f_substr -v tagn "$DIALOG_MENU_TAGS" $index 1 + echo "'$tagn' '$media_options'" index=$(( $index + 1 )) done ) diff --git a/usr.sbin/bsdconfig/networking/share/resolv.subr b/usr.sbin/bsdconfig/networking/share/resolv.subr index fc42e1265c9c..779863c9981d 100644 --- a/usr.sbin/bsdconfig/networking/share/resolv.subr +++ b/usr.sbin/bsdconfig/networking/share/resolv.subr @@ -441,7 +441,7 @@ f_dialog_menu_nameservers() for ns in $nameservers; do [ $index -lt ${#DIALOG_MENU_TAGS} ] || break - tag=$( f_substr "$DIALOG_MENU_TAGS" $index 1 ) + f_substr -v tag "$DIALOG_MENU_TAGS" $index 1 echo "'$tag nameserver' '$ns'" index=$(( $index + 1 )) done diff --git a/usr.sbin/bsdconfig/share/device.subr b/usr.sbin/bsdconfig/share/device.subr index d95684dc702e..97c3ecd99586 100644 --- a/usr.sbin/bsdconfig/share/device.subr +++ b/usr.sbin/bsdconfig/share/device.subr @@ -1,6 +1,6 @@ if [ ! "$_DEVICE_SUBR" ]; then _DEVICE_SUBR=1 # -# Copyright (c) 2012-2014 Devin Teske +# Copyright (c) 2012-2016 Devin Teske # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -1032,7 +1032,7 @@ f_device_is_active() # f_device_find() { - local OPTIND OPTARG flag only_one= + local OPTIND=1 OPTARG flag only_one= while getopts 1 flag; do case "$flag" in 1) only_one=1 ;; diff --git a/usr.sbin/bsdconfig/share/strings.subr b/usr.sbin/bsdconfig/share/strings.subr index e0cd7e0a1d5a..1a4fe6ef081c 100644 --- a/usr.sbin/bsdconfig/share/strings.subr +++ b/usr.sbin/bsdconfig/share/strings.subr @@ -1,6 +1,6 @@ if [ ! "$_STRINGS_SUBR" ]; then _STRINGS_SUBR=1 # -# Copyright (c) 2006-2013 Devin Teske +# Copyright (c) 2006-2016 Devin Teske # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -52,44 +52,63 @@ VALID_VARNAME_CHARS="0-9ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_" ############################################################ FUNCTIONS -# f_substr "$string" $start [$length] +# f_isinteger $arg # -# Simple wrapper to awk(1)'s `substr' function. +# Returns true if argument is a positive/negative whole integer. +# +f_isinteger() +{ + local arg="${1#-}" + [ "${arg:-x}" = "${arg%[!0-9]*}" ] +} + +# f_substr [-v $var_to_set] $string $start [$length] +# +# Similar to awk(1)'s substr(), return length substring of string that begins +# at start position counted from 1. # f_substr() { - local string="$1" start="${2:-0}" len="${3:-0}" - echo "$string" | awk "{ print substr(\$0, $start, $len) }" -} + local OPTIND=1 OPTARG __flag __var_to_set= + while getopts v: __flag; do + case "$__flag" in + v) __var_to_set="$OPTARG" ;; + esac + done + shift $(( $OPTIND - 1 )) + + local __tmp="$1" __start="${2:-1}" __size="$3" + local __tbuf __tbuf_len __trim __trimq -# f_snprintf $var_to_set $size $format [$arguments ...] -# -# Similar to snprintf(3), write at most $size number of bytes into $var_to_set -# using printf(1) syntax (`$format [$arguments ...]'). The value of $var_to_set -# is NULL unless at-least one byte is stored from the output. -# -f_snprintf() -{ - local __funcname=f_snprintf - local __var_to_set="$1" __size="$2" - shift 2 # var_to_set size - - if [ "$__size" -eq 0 ] 2> /dev/null; then - setvar "$__var_to_set" "" + if [ ! "$__tmp" ]; then + [ "$__var_to_set" ] && setvar "$__var_to_set" "" return ${SUCCESS:-0} - elif [ $? -ge 2 ] || [ $__size -lt 0 ]; then - setvar "$__var_to_set" "" - echo "$__funcname: invalid size argument \`__size'" >&2 + fi + [ "$__start" -ge 1 ] 2> /dev/null || __start=1 + if ! [ "${__size:-1}" -ge 1 ] 2> /dev/null; then + [ "$__var_to_set" ] && setvar "$__var_to_set" "" return ${FAILURE:-1} fi - local __f_snprintf_tmp - f_sprintf __f_snprintf_tmp "$@" + __trim=$(( $__start - 1 )) + while [ $__trim -gt 0 ]; do + __tbuf="?" + __tbuf_len=1 + while [ $__tbuf_len -lt $(( $__trim / $__tbuf_len )) ]; do + __tbuf="$__tbuf?" + __tbuf_len=$(( $__tbuf_len + 1 )) + done + __trimq=$(( $__trim / $__tbuf_len )) + __trim=$(( $__trim - $__tbuf_len * $__trimq )) + while [ $__trimq -gt 0 ]; do + __tmp="${__tmp#$__tbuf}" + __trimq=$(( $__trimq - 1 )) + done + done - local __tmp_size=${#__f_snprintf_tmp} - local __trim=$(( $__tmp_size - $__size )) __trimq - local __tbuf __tbuf_len + local __tmp_size=${#__tmp} local __mask __mask_len + __trim=$(( $__tmp_size - ${__size:-$__tmp_size} )) while [ $__trim -gt 0 ]; do __tbuf="?" __tbuf_len=1 @@ -102,11 +121,11 @@ f_snprintf() __trimq=$(( $__trim / $__tbuf_len )) __trim=$(( $__trim - $__tbuf_len * $__trimq )) while [ $__trimq -gt 0 ]; do - __f_snprintf_tmp="${__f_snprintf_tmp%$__tbuf}" + __tmp="${__tmp%$__tbuf}" __trimq=$(( $__trimq - 1 )) done else - __mask="$__f_snprintf_tmp" + __mask="$__tmp" while [ $__tbuf_len -lt $(( $__size / $__tbuf_len )) ] do __tbuf="$__tbuf?" @@ -123,10 +142,11 @@ f_snprintf() __mask="${__mask#$__tbuf}" __trimq=$(( $__trimq - 1 )) done - __f_snprintf_tmp="${__f_snprintf_tmp%"$__mask"}" + __tmp="${__tmp%"$__mask"}" fi done - setvar "$__var_to_set" "$__f_snprintf_tmp" + + setvar "$__var_to_set" "$__tmp" } # f_sprintf $var_to_set $format [$arguments ...] @@ -138,7 +158,40 @@ f_sprintf() { local __var_to_set="$1" shift 1 # var_to_set - eval "$__var_to_set"=\$\( printf -- \"\$@\" \) + + case "$BASH_VERSION" in + 3.1*|4.*) + local __tmp + printf -v __tmp "$@" + eval "$__var_to_set"=\"\${__tmp%\$NL}\" + ;; + *) eval "$__var_to_set"=\$\( printf -- \"\$@\" \) + esac +} + +# f_vsprintf $var_to_set $format $format_args +# +# Similar to vsprintf(3), write a string into $var_to_set using printf(1) +# syntax (`$format $format_args'). +# +f_vsprintf() +{ + eval f_sprintf \"\$1\" \"\$2\" $3 +} + +# f_snprintf $var_to_set $size $format [$arguments ...] +# +# Similar to snprintf(3), write at most $size number of bytes into $var_to_set +# using printf(1) syntax (`$format [$arguments ...]'). +# +f_snprintf() +{ + local __var_to_set="$1" __size="$2" + shift 2 # var_to_set size + + local __f_snprintf_tmp + f_sprintf __f_snprintf_tmp "$@" + f_substr "$__var_to_set" "$__f_snprintf_tmp" 1 "$__size" } # f_vsnprintf $var_to_set $size $format $format_args @@ -176,140 +229,6 @@ f_vsnprintf() eval f_snprintf \"\$1\" \"\$2\" \"\$3\" $4 } -# f_vsprintf $var_to_set $format $format_args -# -# Similar to vsprintf(3), write a string into $var_to_set using printf(1) -# syntax (`$format $format_args'). -# -f_vsprintf() -{ - eval f_sprintf \"\$1\" \"\$2\" $3 -} - -# f_longest_line_length -# -# Simple wrapper to an awk(1) script to print the length of the longest line of -# input (read from stdin). Supports the newline escape-sequence `\n' for -# splitting a single line into multiple lines. -# -f_longest_line_length_awk=' -BEGIN { longest = 0 } -{ - if (split($0, lines, /\\n/) > 1) - { - for (n in lines) - { - len = length(lines[n]) - longest = ( len > longest ? len : longest ) - } - } - else - { - len = length($0) - longest = ( len > longest ? len : longest ) - } -} -END { print longest } -' -f_longest_line_length() -{ - awk "$f_longest_line_length_awk" -} - -# f_number_of_lines -# -# Simple wrapper to an awk(1) script to print the number of lines read from -# stdin. Supports newline escape-sequence `\n' for splitting a single line into -# multiple lines. -# -f_number_of_lines_awk=' -BEGIN { num_lines = 0 } -{ - num_lines += split(" "$0, unused, /\\n/) -} -END { print num_lines } -' -f_number_of_lines() -{ - awk "$f_number_of_lines_awk" -} - -# f_isinteger $arg -# -# Returns true if argument is a positive/negative whole integer. -# -f_isinteger() -{ - local arg="${1#-}" - [ "${arg:-x}" = "${arg%[!0-9]*}" ] -} - -# f_uriencode [$text] -# -# Encode $text for the purpose of embedding safely into a URL. Non-alphanumeric -# characters are converted to `%XX' sequence where XX represents the hexa- -# decimal ordinal of the non-alphanumeric character. If $text is missing, data -# is instead read from standard input. -# -f_uriencode_awk=' -BEGIN { - output = "" - for (n = 0; n < 256; n++) pack[sprintf("%c", n)] = sprintf("%%%02x", n) -} -{ - sline = "" - slen = length($0) - for (n = 1; n <= slen; n++) { - char = substr($0, n, 1) - if ( char !~ /^[[:alnum:]_]$/ ) char = pack[char] - sline = sline char - } - output = output ( output ? "%0a" : "" ) sline -} -END { print output } -' -f_uriencode() -{ - if [ $# -gt 0 ]; then - echo "$1" | awk "$f_uriencode_awk" - else - awk "$f_uriencode_awk" - fi -} - -# f_uridecode [$text] -# -# Decode $text from a URI. Encoded characters are converted from their `%XX' -# sequence into original unencoded ASCII sequences. If $text is missing, data -# is instead read from standard input. -# -f_uridecode_awk=' -BEGIN { for (n = 0; n < 256; n++) chr[n] = sprintf("%c", n) } -{ - sline = "" - slen = length($0) - for (n = 1; n <= slen; n++) - { - seq = substr($0, n, 3) - if ( seq ~ /^%[[:xdigit:]][[:xdigit:]]$/ ) { - hex = substr(seq, 2, 2) - sline = sline chr[sprintf("%u", "0x"hex)] - n += 2 - } else - sline = sline substr(seq, 1, 1) - } - print sline -} -' -f_uridecode() -{ - if [ $# -gt 0 ]; then - echo "$1" | awk "$f_uridecode_awk" - else - awk "$f_uridecode_awk" - fi -} - # f_replaceall $string $find $replace [$var_to_set] # # Replace all occurrences of $find in $string with $replace. If $var_to_set is @@ -493,6 +412,120 @@ f_expand_number() fi } +# f_longest_line_length +# +# Simple wrapper to an awk(1) script to print the length of the longest line of +# input (read from stdin). Supports the newline escape-sequence `\n' for +# splitting a single line into multiple lines. +# +f_longest_line_length_awk=' +BEGIN { longest = 0 } +{ + if (split($0, lines, /\\n/) > 1) + { + for (n in lines) + { + len = length(lines[n]) + longest = ( len > longest ? len : longest ) + } + } + else + { + len = length($0) + longest = ( len > longest ? len : longest ) + } +} +END { print longest } +' +f_longest_line_length() +{ + awk "$f_longest_line_length_awk" +} + +# f_number_of_lines +# +# Simple wrapper to an awk(1) script to print the number of lines read from +# stdin. Supports newline escape-sequence `\n' for splitting a single line into +# multiple lines. +# +f_number_of_lines_awk=' +BEGIN { num_lines = 0 } +{ + num_lines += split(" "$0, unused, /\\n/) +} +END { print num_lines } +' +f_number_of_lines() +{ + awk "$f_number_of_lines_awk" +} + +# f_uriencode [$text] +# +# Encode $text for the purpose of embedding safely into a URL. Non-alphanumeric +# characters are converted to `%XX' sequence where XX represents the hexa- +# decimal ordinal of the non-alphanumeric character. If $text is missing, data +# is instead read from standard input. +# +f_uriencode_awk=' +BEGIN { + output = "" + for (n = 0; n < 256; n++) pack[sprintf("%c", n)] = sprintf("%%%02x", n) +} +{ + sline = "" + slen = length($0) + for (n = 1; n <= slen; n++) { + char = substr($0, n, 1) + if ( char !~ /^[[:alnum:]_]$/ ) char = pack[char] + sline = sline char + } + output = output ( output ? "%0a" : "" ) sline +} +END { print output } +' +f_uriencode() +{ + if [ $# -gt 0 ]; then + echo "$1" | awk "$f_uriencode_awk" + else + awk "$f_uriencode_awk" + fi +} + +# f_uridecode [$text] +# +# Decode $text from a URI. Encoded characters are converted from their `%XX' +# sequence into original unencoded ASCII sequences. If $text is missing, data +# is instead read from standard input. +# +f_uridecode_awk=' +BEGIN { for (n = 0; n < 256; n++) chr[n] = sprintf("%c", n) } +{ + sline = "" + slen = length($0) + for (n = 1; n <= slen; n++) + { + seq = substr($0, n, 3) + if ( seq ~ /^%[[:xdigit:]][[:xdigit:]]$/ ) { + hex = substr(seq, 2, 2) + sline = sline chr[sprintf("%u", "0x"hex)] + n += 2 + } else + sline = sline substr(seq, 1, 1) + } + print sline +} +' +f_uridecode() +{ + if [ $# -gt 0 ]; then + echo "$1" | awk "$f_uridecode_awk" + else + awk "$f_uridecode_awk" + fi +} + ############################################################ MAIN f_dprintf "%s: Successfully loaded." strings.subr diff --git a/usr.sbin/bsdinstall/scripts/zfsboot b/usr.sbin/bsdinstall/scripts/zfsboot index 5c3fc8011aa5..ed93c370bf2b 100755 --- a/usr.sbin/bsdinstall/scripts/zfsboot +++ b/usr.sbin/bsdinstall/scripts/zfsboot @@ -109,7 +109,12 @@ f_include $BSDCFG_SHARE/variable.subr # # Default partitioning scheme to use on disks # -: ${ZFSBOOT_PARTITION_SCHEME:=GPT} +: ${ZFSBOOT_PARTITION_SCHEME:=} + +# +# Default partitioning scheme to use on disks +# +: ${ZFSBOOT_BOOT_TYPE:=} # # How much swap to put on each block device in the boot zpool @@ -193,6 +198,7 @@ GPART_ADD_ALIGN_LABEL='gpart add %s -l %s -t %s "%s"' GPART_ADD_ALIGN_LABEL_WITH_SIZE='gpart add %s -l %s -t %s -s %s "%s"' GPART_BOOTCODE='gpart bootcode -b "%s" "%s"' GPART_BOOTCODE_PART='gpart bootcode -b "%s" -p "%s" -i %s "%s"' +GPART_BOOTCODE_PARTONLY='gpart bootcode -p "%s" -i %s "%s"' GPART_CREATE='gpart create -s %s "%s"' GPART_DESTROY_F='gpart destroy -F "%s"' GPART_SET_ACTIVE='gpart set -a active -i %s "%s"' @@ -297,7 +303,6 @@ msg_swap_size="Swap Size" msg_swap_size_help="Customize how much swap space is allocated to each selected disk" msg_swap_toosmall="The selected swap size (%s) is to small. Please enter a value greater than 100MB or enter 0 for no swap" msg_these_disks_are_too_small="These disks are too small given the amount of requested\nswap (%s) and/or geli(8) (%s) partitions, which would\ntake 50%% or more of each of the following selected disk\ndevices (not recommended):\n\n %s\n\nRecommend changing partition size(s) and/or selecting a\ndifferent set of devices." -msg_uefi_not_supported="The FreeBSD UEFI loader does not currently support booting root-on-ZFS. Your system will need to boot in legacy (CSM) mode.\nDo you want to continue?" msg_unable_to_get_disk_capacity="Unable to get disk capacity of \`%s'" msg_unsupported_partition_scheme="%s is an unsupported partition scheme" msg_user_cancelled="User Cancelled." @@ -345,7 +350,7 @@ dialog_menu_main() 'E $msg_encrypt_disks' '$usegeli' '$msg_encrypt_disks_help' 'P $msg_partition_scheme' - '$ZFSBOOT_PARTITION_SCHEME' + '$ZFSBOOT_PARTITION_SCHEME ($ZFSBOOT_BOOT_TYPE)' '$msg_partition_scheme_help' 'S $msg_swap_size' '$ZFSBOOT_SWAP_SIZE' '$msg_swap_size_help' @@ -695,48 +700,6 @@ dialog_menu_layout() return $DIALOG_OK } -# dialog_uefi_prompt -# -# Confirm that the user wants to continue with the installation on a BIOS -# system when they have booted with UEFI -# -dialog_uefi_prompt() -{ - local title="$DIALOG_TITLE" - local btitle="$DIALOG_BACKTITLE" - local prompt # Calculated below - local hline="$hline_arrows_tab_enter" - - local height=8 width=50 prefix=" " - local plen=${#prefix} list= line= - local max_width=$(( $width - 3 - $plen )) - - local yes no defaultno extra_args format - if [ "$USE_XDIALOG" ]; then - yes=ok no=cancel defaultno=default-no - extra_args="--wrap --left" - format="$msg_uefi_not_supported" - else - yes=yes no=no defaultno=defaultno - extra_args="--cr-wrap" - format="$msg_uefi_not_supported" - fi - - # Add height for Xdialog(1) - [ "$USE_XDIALOG" ] && height=$(( $height + $height / 5 + 3 )) - - prompt=$( printf "$format" ) - f_dprintf "%s: UEFI prompt" "$0" - $DIALOG \ - --title "$title" \ - --backtitle "$btitle" \ - --hline "$hline" \ - --$yes-label "$msg_yes" \ - --$no-label "$msg_no" \ - $extra_args \ - --yesno "$prompt" $height $width -} - # zfs_create_diskpart $disk $index # # For each block device to be used in the zpool, rather than just create the @@ -848,14 +811,25 @@ zfs_create_diskpart() fi # - # 2. Add small freebsd-boot partition labeled `boot#' + # 2. Add small freebsd-boot or efi partition # - f_eval_catch $funcname gpart "$GPART_ADD_ALIGN_LABEL_WITH_SIZE" \ - "$align_small" gptboot$index freebsd-boot 512k $disk || - return $FAILURE - f_eval_catch $funcname gpart "$GPART_BOOTCODE_PART" \ - /boot/pmbr /boot/gptzfsboot 1 $disk || - return $FAILURE + if [ "$ZFSBOOT_BOOT_TYPE" = "UEFI" ]; then + f_eval_catch $funcname gpart \ + "$GPART_ADD_ALIGN_LABEL_WITH_SIZE" \ + "$align_small" efiboot$index efi 800k $disk || + return $FAILURE + f_eval_catch $funcname gpart "$GPART_BOOTCODE_PARTONLY" \ + /boot/boot1.efifat 1 $disk || + return $FAILURE + else + f_eval_catch $funcname gpart \ + "$GPART_ADD_ALIGN_LABEL_WITH_SIZE" \ + "$align_small" gptboot$index freebsd-boot \ + 512k $disk || return $FAILURE + f_eval_catch $funcname gpart "$GPART_BOOTCODE_PART" \ + /boot/pmbr /boot/gptzfsboot 1 $disk || + return $FAILURE + fi # NB: zpool will use the `zfs#' GPT labels bootpart=p2 swappart=p2 targetpart=p2 @@ -1463,18 +1437,16 @@ f_dprintf "BSDINSTALL_TMPETC=[%s]" "$BSDINSTALL_TMPETC" f_dprintf "FSTAB_FMT=[%s]" "$FSTAB_FMT" # -# If the system was booted with UEFI, warn the user that FreeBSD can't do -# ZFS with UEFI yet +# If the system was booted with UEFI, set the default boot type to UEFI # -if f_interactive; then - bootmethod=$( sysctl -n machdep.bootmethod ) - f_dprintf "machdep.bootmethod=[%s]" "$bootmethod" - if [ "$bootmethod" != "BIOS" ]; then - dialog_uefi_prompt - retval=$? - f_dprintf "uefi_prompt=[%s]" "$retval" - [ $retval -eq $DIALOG_OK ] || f_die - fi +bootmethod=$( sysctl -n machdep.bootmethod ) +f_dprintf "machdep.bootmethod=[%s]" "$bootmethod" +if [ "$bootmethod" = "UEFI" ]; then + : ${ZFSBOOT_BOOT_TYPE:=UEFI} + : ${ZFSBOOT_PARTITION_SCHEME:=GPT} +else + : ${ZFSBOOT_BOOT_TYPE:=BIOS} + : ${ZFSBOOT_PARTITION_SCHEME:=GPT} fi # @@ -1596,15 +1568,22 @@ while :; do fi ;; ?" $msg_partition_scheme") - # Toggle between GPT and MBR - if [ "$ZFSBOOT_PARTITION_SCHEME" = GPT ]; then - ZFSBOOT_PARTITION_SCHEME=MBR - elif [ "$ZFSBOOT_PARTITION_SCHEME" = MBR ]; then + # Toggle between GPT (BIOS), GPT (UEFI) and MBR + if [ "$ZFSBOOT_PARTITION_SCHEME" = "GPT" -a "$ZFSBOOT_BOOT_TYPE" = "BIOS" ]; then + ZFSBOOT_PARTITION_SCHEME="GPT" + ZFSBOOT_BOOT_TYPE="UEFI" + elif [ "$ZFSBOOT_PARTITION_SCHEME" = "GPT" ]; then + ZFSBOOT_PARTITION_SCHEME="MBR" + ZFSBOOT_BOOT_TYPE="BIOS" + elif [ "$ZFSBOOT_PARTITION_SCHEME" = "MBR" ]; then ZFSBOOT_PARTITION_SCHEME="GPT + Active" + ZFSBOOT_BOOT_TYPE="BIOS" elif [ "$ZFSBOOT_PARTITION_SCHEME" = "GPT + Active" ]; then ZFSBOOT_PARTITION_SCHEME="GPT + Lenovo Fix" + ZFSBOOT_BOOT_TYPE="BIOS" else - ZFSBOOT_PARTITION_SCHEME=GPT + ZFSBOOT_PARTITION_SCHEME="GPT" + ZFSBOOT_BOOT_TYPE="BIOS" fi ;; ?" $msg_swap_size") diff --git a/usr.sbin/sysrc/sysrc b/usr.sbin/sysrc/sysrc index e384dff93f6c..aa57f1beb7f2 100644 --- a/usr.sbin/sysrc/sysrc +++ b/usr.sbin/sysrc/sysrc @@ -790,7 +790,7 @@ while [ $# -gt 0 ]; do delim="${add%"${add#?}"}" # first character oldIFS="$IFS" case "$delim" in - ""|[$IFS]|[a-zA-Z0-9]) delim=" " ;; + ""|[$IFS]|[a-zA-Z0-9./]) delim=" " ;; *) IFS="$delim" esac new="$before" @@ -812,7 +812,7 @@ while [ $# -gt 0 ]; do delim="${remove%"${remove#?}"}" # first character oldIFS="$IFS" case "$delim" in - ""|[$IFS]|[a-zA-Z0-9]) delim=" " ;; + ""|[$IFS]|[a-zA-Z0-9./]) delim=" " ;; *) IFS="$delim" esac new= diff --git a/usr.sbin/sysrc/sysrc.8 b/usr.sbin/sysrc/sysrc.8 index dbd0e4b3c326..3996b1f9482c 100644 --- a/usr.sbin/sysrc/sysrc.8 +++ b/usr.sbin/sysrc/sysrc.8 @@ -1,4 +1,4 @@ -.\" Copyright (c) 2011-2015 Devin Teske +.\" Copyright (c) 2011-2016 Devin Teske .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without @@ -24,7 +24,7 @@ .\" .\" $FreeBSD$ .\" -.Dd September 12, 2015 +.Dd February 2, 2016 .Dt SYSRC 8 .Os .Sh NAME @@ -255,7 +255,7 @@ When using the .Ql key+=value syntax to add items to existing values, the first character of the value is taken as the delimiter separating items -.Pq usually Qo \ Qc or Qo , Qc . +.Pq usually Qo (space) Qc or Qo , Qc . For example, in the following statement: .Bl -item -offset indent .It @@ -275,6 +275,10 @@ it is added .Pp For convenience, if the first character is alpha-numeric .Pq letters A-Z, a-z, or numbers 0-9 , +dot +.Pq Li . , +or slash +.Pq Li / , .Nm uses the default setting of whitespace as separator. For example, the above and below statements are equivalent since @@ -329,6 +333,10 @@ it is removed .Pp For convenience, if the first character is alpha-numeric .Pq letters A-Z, a-z, or numbers 0-9 , +dot +.Pq Li . , +or slash +.Pq Li / , .Nm uses the default setting of whitespace as separator. For example, the above and below statements are equivalent since