Merge ^/head r325505 through r325662.

This commit is contained in:
Hans Petter Selasky 2017-11-10 14:46:50 +00:00
commit f819030092
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/bsd_rdma_4_9/; revision=325663
164 changed files with 7826 additions and 3220 deletions

View File

@ -191,10 +191,11 @@ PATH= /sbin:/bin:/usr/sbin:/usr/bin
MAKEOBJDIRPREFIX?= /usr/obj
_MAKEOBJDIRPREFIX!= /usr/bin/env -i PATH=${PATH} ${MAKE} MK_AUTO_OBJ=no \
${.MAKEFLAGS:MMAKEOBJDIRPREFIX=*} __MAKE_CONF=${__MAKE_CONF} \
SRCCONF=${SRCCONF} \
-f /dev/null -V MAKEOBJDIRPREFIX dummy
.if !empty(_MAKEOBJDIRPREFIX)
.error MAKEOBJDIRPREFIX can only be set in environment, not as a global\
(in make.conf(5)) or command-line variable.
(in make.conf(5) or src.conf(5)) or command-line variable.
.endif
# We often need to use the tree's version of make to build it.
@ -553,12 +554,13 @@ universe_${target}_kernels: universe_${target}_worlds .PHONY
universe_${target}_kernels: universe_${target}_prologue .MAKE .PHONY
.if exists(${KERNSRCDIR}/${target}/conf/NOTES)
@(cd ${KERNSRCDIR}/${target}/conf && env __MAKE_CONF=/dev/null \
${SUB_MAKE} LINT > ${.CURDIR}/_.${target}.makeLINT 2>&1 || \
${SUB_MAKE} -DNO_OBJ LINT \
> ${.CURDIR}/_.${target}.makeLINT 2>&1 || \
(echo "${target} 'make LINT' failed," \
"check _.${target}.makeLINT for details"| ${MAKEFAIL}))
.endif
@cd ${.CURDIR}; ${SUB_MAKE} ${.MAKEFLAGS} TARGET=${target} \
universe_kernels
universe_kernels MK_AUTO_OBJ=no
.endif # !MAKE_JUST_WORLDS
# Tell the user the worlds and kernels have completed

View File

@ -988,6 +988,14 @@ buildenvvars: .PHONY
.endif
.endif
BUILDENV_DIR?= ${.CURDIR}
#
# Note: make will report any errors the shell reports. This can
# be odd if the last command in an interactive shell generates an
# error or is terminated by SIGINT. These reported errors look bad,
# but are harmless. Allowing them also allows BUIDLENV_SHELL to
# be a complex command whose status will be returned to the caller.
# Some scripts in tools rely on this behavior to report build errors.
#
buildenv: .PHONY
@echo Entering world for ${TARGET_ARCH}:${TARGET}
.if ${BUILDENV_SHELL:M*zsh*}
@ -2388,6 +2396,9 @@ lib/libcasper__L: lib/libnv__L
lib/liblzma__L: lib/libthr__L
_generic_libs= ${_cddl_lib} gnu/lib ${_kerberos5_lib} lib ${_secure_lib} usr.bin/lex/lib ${_ofed_lib}
.if ${MK_IPFILTER} != "no"
_generic_libs+= sbin/ipf/libipf
.endif
.for _DIR in ${LOCAL_LIB_DIRS}
.if exists(${.CURDIR}/${_DIR}/Makefile) && empty(_generic_libs:M${_DIR})
_generic_libs+= ${_DIR}
@ -2768,7 +2779,8 @@ BW_CANONICALOBJDIR:=${OBJROOT}
.endif
.endif
cleanworld cleanuniverse: .PHONY
.if !empty(BW_CANONICALOBJDIR) && exists(${BW_CANONICALOBJDIR})
.if !empty(BW_CANONICALOBJDIR) && exists(${BW_CANONICALOBJDIR}) && \
${.CURDIR:tA} != ${BW_CANONICALOBJDIR:tA}
-rm -rf ${BW_CANONICALOBJDIR}*
-chflags -R 0 ${BW_CANONICALOBJDIR}
rm -rf ${BW_CANONICALOBJDIR}*

View File

@ -43,8 +43,8 @@ OLD_FILES+=sbin/badsect
OLD_FILES+=rescue/badsect
OLD_FILES+=usr/share/man/man8/badsect.8.gz
# 20171105: fixing lib/libclang_rt CRTARCH for arm:armv[67].
.if ${MACHINE} == "arm"
.if ${MACHINE_ARCH:Marmv[67]*} != "" && ${CPUTYPE:M*soft*} == ""
.if ${MACHINE_ARCH:Marmv[67]*} != "" && \
(!defined(CPUTYPE) || ${CPUTYPE:M*soft*} == "")
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-preinit-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.a
OLD_LIBS+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.asan-arm.so
@ -56,7 +56,6 @@ OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.stats_client-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone-arm.a
OLD_FILES+=usr/lib/clang/5.0.0/lib/freebsd/libclang_rt.ubsan_standalone_cxx-arm.a
.endif
.endif
# 20171104: libcap_random should be in /lib not in /usr/lib
OLD_LIBS+=usr/lib/libcap_random.so.0
# 20171104: Casper can work only as shared library

View File

@ -122,7 +122,7 @@ ifn_vnet0()
#
ifconfig_up()
{
local _cfg _ipv6_opts ifconfig_args
local _cfg _ifconfig_descr _ipv6_opts ifconfig_args
_cfg=1
# Make sure lo0 always comes up.
@ -214,6 +214,11 @@ ifconfig_up()
ifalias $1 link alias
ifalias $1 ether alias
_ifconfig_descr=`get_if_var $1 ifconfig_IF_descr`
if [ -n "${_ifconfig_descr}" ]; then
${IFCONFIG_CMD} $1 description "${_ifconfig_descr}"
fi
if wpaif $1; then
/etc/rc.d/wpa_supplicant start $1
_cfg=0 # XXX: not sure this should count

View File

@ -356,7 +356,7 @@ getumask(void)
* security.bsd.unprivileged_proc_debug is set to 0.
*/
len = sizeof(smask);
if (sysctl((int[4]){ CTL_KERN, KERN_PROC, KERN_PROC_UMASK, getpid() },
if (sysctl((int[4]){ CTL_KERN, KERN_PROC, KERN_PROC_UMASK, 0 },
4, &smask, &len, NULL, 0) == 0)
return (smask);

View File

@ -4,7 +4,7 @@ SHLIBDIR?= /lib
.include <src.opts.mk>
PACKAGE=${LIB}
PACKAGE=casper
.if ${MK_CASPER} != "no"
SHLIB= casper

View File

@ -2,12 +2,12 @@
.include <bsd.compiler.mk>
.if ${MACHINE} == "arm"
# armv[67] is a bit special since we allow a soft-floating version via
# CPUTYPE matching *soft*. This variant is may not actually work though.
.if ${MACHINE_ARCH:Marmv[67]*} != "" && \
(defined(CPUTYPE) && ${CPUTYPE:M*soft*} == "")
(!defined(CPUTYPE) || ${CPUTYPE:M*soft*} == "")
CRTARCH= armhf
.endif
.endif
CRTARCH?= ${MACHINE_CPUARCH:C/amd64/x86_64/}
CRTSRC= ${SRCTOP}/contrib/compiler-rt

View File

@ -15,7 +15,7 @@ MLINKS+= netgraph.3 NgMkSockNode.3
MLINKS+= netgraph.3 NgNameNode.3
MLINKS+= netgraph.3 NgSendMsg.3
MLINKS+= netgraph.3 NgSendAsciiMsg.3
MLINKS+= netgraph.3 NgSendMsgReply.3
MLINKS+= netgraph.3 NgSendReplyMsg.3
MLINKS+= netgraph.3 NgRecvMsg.3
MLINKS+= netgraph.3 NgAllocRecvMsg.3
MLINKS+= netgraph.3 NgRecvAsciiMsg.3

View File

@ -43,7 +43,7 @@
.Nm NgNameNode ,
.Nm NgSendMsg ,
.Nm NgSendAsciiMsg ,
.Nm NgSendMsgReply ,
.Nm NgSendReplyMsg ,
.Nm NgRecvMsg ,
.Nm NgAllocRecvMsg ,
.Nm NgRecvAsciiMsg ,
@ -70,7 +70,7 @@
.Ft int
.Fn NgSendAsciiMsg "int cs" "const char *path" "const char *fmt" ...
.Ft int
.Fo NgSendMsgReply
.Fo NgSendReplyMsg
.Fa "int cs" "const char *path" "struct ng_mesg *msg" "const void *arg"
.Fa "size_t arglen"
.Fc
@ -162,7 +162,7 @@ header is returned.
This value is typically used to associate replies.
.Pp
Use
.Fn NgSendMsgReply
.Fn NgSendReplyMsg
to send reply to a previously received control message.
The original message header should be pointed to by
.Fa msg .

View File

@ -647,9 +647,11 @@ sysdecode_reboot_howto(FILE *fp, int howto, int *rem)
/*
* RB_AUTOBOOT is special in that its value is zero, but it is
* also an implied argument if a different operation is not
* requested via RB_HALT, RB_POWERCYCLE, RB_POWEROFF, or RB_REROOT.
* requested via RB_HALT, RB_POWERCYCLE, RB_POWEROFF, or
* RB_REROOT.
*/
if (howto != 0 && (howto & (RB_HALT | RB_POWEROFF | RB_REROOT | RB_POWERCYCLE)) == 0) {
if (howto != 0 && (howto & (RB_HALT | RB_POWEROFF | RB_REROOT |
RB_POWERCYCLE)) == 0) {
fputs("RB_AUTOBOOT|", fp);
printed = true;
} else

View File

@ -23,7 +23,7 @@ NETBSD_ATF_TESTS_C+= sigmask_test
NETBSD_ATF_TESTS_C+= sigsuspend_test
NETBSD_ATF_TESTS_C+= siglongjmp_test
NETBSD_ATF_TESTS_C+= sleep_test
.if ${MACHINE} != "arm64" # ARM64TODO: Missing makecontext
.if ${MACHINE_CPUARCH} != "aarch64" # ARM64TODO: Missing makecontext
NETBSD_ATF_TESTS_C+= swapcontext_test
.endif
NETBSD_ATF_TESTS_C+= timedmutex_test

View File

@ -32,6 +32,8 @@
#include <machine/asm.h>
.cfi_sections .debug_frame
.globl _C_LABEL(_rtld_relocate_nonplt_self)
.globl _C_LABEL(_rtld)
@ -43,8 +45,7 @@
* a2 rtld object (filled in by dynamic loader)
* a3 ps_strings
*/
LEAF(rtld_start)
.frame sp, 4*PTR_SIZE, ra
NESTED(rtld_start, 4*PTR_SIZE, ra)
.mask 0x10090000,-PTR_SIZE
.set noreorder
SETUP_GP
@ -108,6 +109,7 @@ END(rtld_start)
_rtld_bind_start:
.frame sp, XCALLFRAME_SIZ, $15
.cfi_startproc simple
.cfi_def_cfa sp, 0
.cfi_register ra, $15
move v1, gp /* save old GP */
#if defined(__mips_o32) || defined(__mips_o64)
@ -115,7 +117,7 @@ _rtld_bind_start:
#endif
SETUP_GP
PTR_SUBU sp, XCALLFRAME_SIZ /* save arguments and sp value in stack */
.cfi_def_cfa sp, XCALLFRAME_SIZ
.cfi_def_cfa_offset XCALLFRAME_SIZ
SETUP_GP64(XCALLFRAME_GP, _rtld_bind_start)
SAVE_GP(XCALLFRAME_GP)
#if defined(__mips_n32) || defined(__mips_n64)
@ -201,6 +203,7 @@ END(_rtld_bind_start)
_rtld_pltbind_start:
.frame sp, XCALLFRAME_SIZ, $15
.cfi_startproc simple
.cfi_def_cfa sp, 0
.cfi_register ra, $15
#if defined(__mips_o32)
move v1, gp /* save pointer to .got.plt */
@ -212,7 +215,7 @@ _rtld_pltbind_start:
#endif
SETUP_GP
PTR_SUBU sp, XCALLFRAME_SIZ /* save arguments and sp value in stack */
.cfi_def_cfa sp, XCALLFRAME_SIZ
.cfi_def_cfa_offset XCALLFRAME_SIZ
SETUP_GP64(XCALLFRAME_GP, _rtld_pltbind_start)
SAVE_GP(XCALLFRAME_GP)
#if defined(__mips_n32) || defined(__mips_n64)

View File

@ -108,6 +108,7 @@ CRUNCH_PROGS_sbin+= rtsol
.if ${MK_IPFILTER} != "no"
CRUNCH_PROGS_sbin+= ipf
CRUNCH_LIBS_ipf+= ${LIBIPF}
.endif
.if ${MK_ROUTED} != "no"
@ -192,7 +193,7 @@ CRUNCH_ALIAS_xz= unxz lzma unlzma xzcat lzcat
CRUNCH_PROGS_usr.bin+= zstd
CRUNCH_ALIAS_zstd= unzstd zstdcat zstdmt
CRUNCH_LIBS+= -lprivatezstd
CRUNCH_LIBS+= ${LDADD_zstd}
CRUNCH_PROGS_usr.bin+= tar
CRUNCH_LIBS+= -larchive

View File

@ -1143,7 +1143,7 @@ unsetifdescr(const char *val, int value, int s, const struct afswtch *afp)
"\020\1RXCSUM\2TXCSUM\3NETCONS\4VLAN_MTU\5VLAN_HWTAGGING\6JUMBO_MTU\7POLLING" \
"\10VLAN_HWCSUM\11TSO4\12TSO6\13LRO\14WOL_UCAST\15WOL_MCAST\16WOL_MAGIC" \
"\17TOE4\20TOE6\21VLAN_HWFILTER\23VLAN_HWTSO\24LINKSTATE\25NETMAP" \
"\26RXCSUM_IPV6\27TXCSUM_IPV6\31TXRTLMT"
"\26RXCSUM_IPV6\27TXCSUM_IPV6\31TXRTLMT\32HWRXTSTMP"
/*
* Print the status of the interface. If an address family was
@ -1456,6 +1456,8 @@ static struct cmd basic_cmds[] = {
DEF_CMD("-wol_magic", -IFCAP_WOL_MAGIC, setifcap),
DEF_CMD("txrtlmt", IFCAP_TXRTLMT, setifcap),
DEF_CMD("-txrtlmt", -IFCAP_TXRTLMT, setifcap),
DEF_CMD("hwrxtsmp", IFCAP_HWRXTSTMP, setifcap),
DEF_CMD("-hwrxtsmp", -IFCAP_HWRXTSTMP, setifcap),
DEF_CMD("normal", -IFF_LINK0, setifflags),
DEF_CMD("compress", IFF_LINK0, setifflags),
DEF_CMD("noicmp", IFF_LINK1, setifflags),

View File

@ -30,10 +30,7 @@ ipf_l.h: lexer.h
sed -e 's/yy/ipf_yy/g' \
${.ALLSRC} > ${.TARGET}
.if defined(RESCUE)
LIBIPF_SRCS!= cd ${.CURDIR:H}/libipf && ${MAKE} -V SRCS
SRCS+= ${LIBIPF_SRCS}
.else
.if !defined(RESCUE)
LIBADD+= pcap
.endif

View File

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd July 14, 2017
.Dd November 8, 2017
.Dt ARCMSR 4
.Os
.Sh NAME
@ -104,6 +104,8 @@ ARC-1213
.It
ARC-1214
.It
ARC-1216
.It
ARC-1220
.It
ARC-1222
@ -112,6 +114,8 @@ ARC-1223
.It
ARC-1224
.It
ARC-1226
.It
ARC-1230
.It
ARC-1231

View File

@ -191,6 +191,17 @@ If there is no primary node (both nodes are secondary, or secondary node has
no connection to primary one), secondary node(s) report Transitioning state.
State with two primary nodes is illegal (split brain condition).
.El
.Sh TUNABLE VARIABLES
The following variables are available as
.Xr loader 8
tunables:
.Bl -tag -width indent
.It Va kern.cam.ctl.max_luns
Specifies the maximum number of LUNs we support, must be a power of 2.
The default value is 1024.
.It Va kern.cam.ctl.max_ports
Specifies the maximum number of ports we support, must be a power of 2.
The default value is 256.
.Sh SEE ALSO
.Xr cfiscsi 4 ,
.Xr cfumass 4 ,

View File

@ -1616,6 +1616,23 @@ is set to 1.
.Pp
Default is
.Dq Li NO .
.It Va ifconfig_ Ns Ao Ar interface Ac Ns _descr
.Pq Vt str
This assigns arbitrary description to an interface.
The
.Xr sysctl 8
variable
.Va net.ifdescr_maxlen
limits its length.
This static setting may be overridden by commands
started with dynamic interface configuration utilities
like
.Xr dhclient 8
hooks. The description can be seen with
.Xr ifconfig 8
command and it may be exported with
.Xr bsnmpd 1
daemon using its MIB-2 module.
.It Va ifconfig_ Ns Ao Ar interface Ac Ns _ipv6
.Pq Vt str
IPv6 functionality on an interface should be configured by

View File

@ -494,7 +494,7 @@ More information on these and other related variables may be found in
and the
.Fx
Porter's Handbook.
.Bl -tag -width ".Va OVERRIDE_LINUX_BASE_PORT"
.Bl -tag -width ".Va WITH_GHOSTSCRIPT_VER"
.It Va WITH_OPENSSL_PORT
.Pq Vt bool
If set, causes ports that make use of OpenSSL to use the OpenSSL from
@ -514,8 +514,6 @@ If set, enables
for most ports.
.It Va WITH_GHOSTSCRIPT_VER
If set, the version of ghostscript to be used by ports.
.It Va OVERRIDE_LINUX_BASE_PORT
The default linux base to use.
.It Va WITH_CCACHE_BUILD
.Pq Vt bool
If set, enables the use of

View File

@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\" $FreeBSD$
.\"
.Dd October 1, 2017
.Dd October 31, 2017
.Dt EVENTHANDLER 9
.Os
.Sh NAME
@ -38,6 +38,9 @@
.Fn EVENTHANDLER_REGISTER name func arg priority
.Fn EVENTHANDLER_DEREGISTER name tag
.Fn EVENTHANDLER_DEREGISTER_NOWAIT name tag
.Fn EVENTHANDLER_LIST_DECLARE name
.Fn EVENTHANDLER_LIST_DEFINE name
.Fn EVENTHANDLER_DIRECT_INVOKE name
.Ft eventhandler_tag
.Fo eventhandler_register
.Fa "struct eventhandler_list *list"
@ -82,8 +85,13 @@ may be used if the handler does not have a specific priority
associated with it.
.Pp
The normal way to use this subsystem is via the macro interface.
The macros that can be used for working with event handlers and callback
function lists are:
For events that are high frequency it is suggested that you additionally use
.Fn EVENTHANDLER_LIST_DEFINE
so that the event handlers can be invoked directly using
.Fn EVENTHANDLER_DIRECT_INVOKE
(see below).
This saves the invoker from having to do a locked traversal of a global
list of event handler lists.
.Bl -tag -width indent
.It Fn EVENTHANDLER_DECLARE
This macro declares an event handler named by argument
@ -148,6 +156,27 @@ Additional arguments to the macro after the
.Fa name
parameter are passed as the second and subsequent arguments to each
registered callback function.
.It Fn EVENTHANDLER_LIST_DEFINE
This macro defines a reference to an event handler list named by
argument
.Fa name .
It uses
.Xr SYSINIT 9
to initialize the reference and the eventhandler list.
.It Fn EVENTHANDLER_LIST_DECLARE
This macro declares an event handler list named by argument
.Fa name .
This is only needed for users of
.Fn EVENTHANDLER_DIRECT_INVOKE
which are not in the same compilation unit of that list's definition.
.It Fn EVENTHANDLER_DIRECT_INVOKE
This macro invokes the event handlers registered for the list named by
argument
.Fa name .
This macro can only be used if the list was defined with
.Fn EVENTHANDLER_LIST_DEFINE .
The macro is variadic with the same semantics as
.Fn EVENTHANDLER_INVOKE .
.El
.Pp
The macros are implemented using the following functions:
@ -315,7 +344,7 @@ This is never called.
.It Vt process_fork
Callbacks invoked when a process forks a child.
.It Vt process_init
Callback invoked when a process is initalized.
Callback invoked when a process is initialized.
.It Vt random_adaptor_attach
Callback invoked when a new random module has been loaded.
.It Vt register_framebuffer
@ -337,7 +366,7 @@ Callback invoked when a thread object is created.
.It Vt thread_dtor
Callback invoked when a thread object is destroyed.
.It Vt thread_init
Callback invoked when a thread object is initalized.
Callback invoked when a thread object is initialized.
.It Vt thread_fini
Callback invoked when a thread object is deinitalized.
.It Vt usb_dev_configured
@ -384,4 +413,6 @@ facility first appeared in
.Fx 4.0 .
.Sh AUTHORS
This manual page was written by
.An Joseph Koshy Aq Mt jkoshy@FreeBSD.org .
.An Joseph Koshy Aq Mt jkoshy@FreeBSD.org
and
.An Matt Joras Aq Mt mjoras@FreeBSD.org .

View File

@ -71,7 +71,7 @@ CLANG_ANALYZE_SRCS= \
${DPSRCS:M*.[cC]} ${DPSRCS:M*.cc} \
${DPSRCS:M*.cpp} ${DPSRCS:M*.cxx}
.if !empty(CLANG_ANALYZE_SRCS)
CLANG_ANALYZE_OBJS= ${CLANG_ANALYZE_SRCS:O:u:R:S,$,.clang-analyzer,}
CLANG_ANALYZE_OBJS= ${CLANG_ANALYZE_SRCS:O:u:${OBJS_SRCS_FILTER:ts:}:S,$,.clang-analyzer,}
.NOPATH: ${CLANG_ANALYZE_OBJS}
.endif

View File

@ -100,6 +100,9 @@ ${CONF}: Makefile
.else
echo special ${P} buildopts DIRPRFX=${DIRPRFX}${P}/ >>${.TARGET}
.endif
.ifdef CRUNCH_LIBS_${P}
echo special ${P} lib ${CRUNCH_LIBS_${P}} >>${.TARGET}
.endif
.for A in ${CRUNCH_ALIAS_${P}}
echo ln ${P} ${A} >>${.TARGET}
.endfor

View File

@ -233,10 +233,14 @@ afterdepend: beforedepend
# mimicing what bmake's meta_name() does and adding in the full path
# as well to ensure that the expected meta file is read.
.if ${__obj:M*/*}
.if ${MAKE_VERSION} < 20171028
_meta_obj= ${.OBJDIR:C,/,_,g}_${__obj:C,/,_,g}.meta
.else
_meta_obj= ${__obj:C,/,_,g}.meta
.endif # ${MAKE_VERSION} < 20171028
.else
_meta_obj= ${__obj}.meta
.endif
.endif # ${__obj:M*/*}
_dep_obj= ${DEPENDFILE}.${__obj:${DEPEND_FILTER}}
.if defined(_meta_filemon)
_depfile= ${.OBJDIR}/${_meta_obj}

View File

@ -13,7 +13,7 @@ __<bsd.init.mk>__:
.if ${MK_AUTO_OBJ} == "yes"
# This is also done in bsd.obj.mk
.if defined(NO_OBJ)
.if defined(NO_OBJ) && ${.OBJDIR} != ${.CURDIR}
.OBJDIR: ${.CURDIR}
.endif
.endif

View File

@ -44,11 +44,11 @@ __<bsd.obj.mk>__:
.if ${MK_AUTO_OBJ} == "yes"
# it is done by now
objwarn:
obj:
objwarn: .PHONY
obj: .PHONY
CANONICALOBJDIR= ${.OBJDIR}
# This is also done in bsd.init.mk
.if defined(NO_OBJ)
.if defined(NO_OBJ) && ${.OBJDIR} != ${.CURDIR}
# but this makefile does not want it!
.OBJDIR: ${.CURDIR}
.endif
@ -145,7 +145,7 @@ obj: .PHONY
.endif
.if !target(objlink)
objlink:
objlink: .PHONY
@if test -d ${CANONICALOBJDIR}/; then \
rm -f ${.CURDIR}/obj; \
ln -s ${CANONICALOBJDIR} ${.CURDIR}/obj; \
@ -159,17 +159,17 @@ objlink:
# where would that obj directory be?
#
.if !target(whereobj)
whereobj:
whereobj: .PHONY
@echo ${.OBJDIR}
.endif
# Same check in bsd.progs.mk
.if ${CANONICALOBJDIR} != ${.CURDIR} && exists(${CANONICALOBJDIR}/) && \
(${MK_AUTO_OBJ} == "no" || ${.TARGETS:Nclean*:N*clean:Ndestroy*} == "")
cleanobj:
cleanobj: .PHONY
-rm -rf ${CANONICALOBJDIR}
.else
cleanobj: clean cleandepend
cleanobj: .PHONY clean cleandepend
.endif
@if [ -L ${.CURDIR}/obj ]; then rm -f ${.CURDIR}/obj; fi
@ -180,7 +180,7 @@ NOPATH_FILES+= ${CLEANFILES}
.endif
.if !target(clean)
clean:
clean: .PHONY
.if defined(CLEANFILES) && !empty(CLEANFILES)
rm -f ${CLEANFILES}
.endif
@ -196,7 +196,7 @@ clean:
.include <bsd.subdir.mk>
cleandir: .WAIT cleanobj
cleandir: .PHONY .WAIT cleanobj
.if make(destroy*) && defined(OBJROOT)
# this (rm -rf objdir) is much faster and more reliable than cleaning.
@ -206,18 +206,18 @@ _OBJDIR?= ${.OBJDIR}
_CURDIR?= ${.CURDIR}
# destroy almost everything
destroy: destroy-all
destroy-all:
destroy: .PHONY destroy-all
destroy-all: .PHONY
# just remove our objdir
destroy-arch: .NOMETA
destroy-arch: .PHONY .NOMETA
.if ${_OBJDIR} != ${_CURDIR}
cd ${_CURDIR} && rm -rf ${_OBJDIR}
.endif
.if defined(HOST_OBJTOP)
destroy-host: destroy.host
destroy.host: .NOMETA
destroy.host: .PHONY .NOMETA
cd ${_CURDIR} && rm -rf ${HOST_OBJTOP}/${RELDIR:N.}
.endif
@ -226,7 +226,7 @@ destroy-all: destroy-stage
.endif
# remove the stage tree
destroy-stage: .NOMETA
destroy-stage: .PHONY .NOMETA
.if defined(STAGE_ROOT)
cd ${_CURDIR} && rm -rf ${STAGE_ROOT}
.endif
@ -236,7 +236,7 @@ _destroy_machine_list = common host ${ALL_MACHINE_LIST}
.for m in ${_destroy_machine_list:O:u}
destroy-all: destroy.$m
.if !target(destroy.$m)
destroy.$m: .NOMETA
destroy.$m: .PHONY .NOMETA
.if ${_OBJDIR} != ${_CURDIR}
cd ${_CURDIR} && rm -rf ${OBJROOT}$m*/${RELDIR:N.}
.endif

View File

@ -104,7 +104,7 @@ _PROGS_ALL_SRCS+= ${s}
.if !empty(_PROGS_COMMON_SRCS)
_PROGS_COMMON_OBJS= ${_PROGS_COMMON_SRCS:M*.[dhly]}
.if !empty(_PROGS_COMMON_SRCS:N*.[dhly])
_PROGS_COMMON_OBJS+= ${_PROGS_COMMON_SRCS:N*.[dhly]:R:S/$/.o/g}
_PROGS_COMMON_OBJS+= ${_PROGS_COMMON_SRCS:N*.[dhly]:${OBJS_SRCS_FILTER:ts:}:S/$/.o/g}
.endif
.endif

View File

@ -44,4 +44,8 @@ TIME_STAMP_END?= ${TIME_STAMP_DATE}
# error spam and show a proper error.
Mkdirs= Mkdirs() { mkdir -p $$* || :; }
.if !empty(.MAKEFLAGS:M-s)
ECHO_TRACE?= true
.endif
.include "src.sys.env.mk"

View File

@ -70,7 +70,10 @@ OBJROOT:= ${OBJROOT:H:tA}/${OBJROOT:T}
.export OBJROOT SRCTOP
.endif
.if ${MK_UNIFIED_OBJDIR} == "yes"
# SRCTOP == OBJROOT only happens with clever MAKEOBJDIRPREFIX=/. Don't
# append TARGET.TARGET_ARCH for that case since the user wants to build
# in the source tree.
.if ${MK_UNIFIED_OBJDIR} == "yes" && ${SRCTOP} != ${OBJROOT:tA}
OBJTOP:= ${OBJROOT}${TARGET:D${TARGET}.${TARGET_ARCH}:U${MACHINE}.${MACHINE_ARCH}}
.else
# TARGET.TARGET_ARCH handled in OBJROOT already.
@ -103,13 +106,12 @@ __objdir:= ${MAKEOBJDIR}
.endif
# Try to enable MK_AUTO_OBJ by default if we can write to the __objdir. Only
# do this if AUTO_OBJ is not disabled by the user, not cleaning, and this is
# the first make ran.
.if 0 && ${.MAKE.LEVEL} == 0 && \
# do this if AUTO_OBJ is not disabled by the user, and this is the first make
# ran.
.if ${.MAKE.LEVEL} == 0 && \
${MK_AUTO_OBJ} == "no" && empty(.MAKEOVERRIDES:MMK_AUTO_OBJ) && \
!defined(WITHOUT_AUTO_OBJ) && !make(showconfig) && !make(print-dir) && \
!defined(NO_OBJ) && \
(${.TARGETS} == "" || ${.TARGETS:Nclean*:N*clean:Ndestroy*} != "")
!defined(NO_OBJ)
# Find the last existing directory component and check if we can write to it.
# If the last component is a symlink then recurse on the new path.
CheckAutoObj= \
@ -147,9 +149,13 @@ CheckAutoObj() { \
fi; \
}
.if !empty(__objdir)
.if ${.CURDIR} == ${__objdir}
__objdir_writable?= yes
.else
__objdir_writable!= \
${CheckAutoObj}; CheckAutoObj "${__objdir}" || echo no
.endif
.endif
__objdir_writable?= no
# Export the decision to sub-makes.
MK_AUTO_OBJ:= ${__objdir_writable}
@ -179,3 +185,14 @@ MK_AUTO_OBJ:= ${__objdir_writable}
# auto.obj.mk or bsd.obj.mk will create the directory and fix .OBJDIR later.
.OBJDIR: ${.CURDIR}
.endif
# Ensure .OBJDIR=.CURDIR cases have a proper OBJTOP and .OBJDIR
.if defined(NO_OBJ) || ${__objdir_writable:Uunknown} == "no" || \
${__objdir} == ${.CURDIR}
OBJTOP= ${SRCTOP}
OBJROOT= ${SRCTOP}/
# Compare only to avoid an unneeded chdir(2), :tA purposely left out.
.if ${.OBJDIR} != ${.CURDIR}
.OBJDIR: ${.CURDIR}
.endif
.endif # defined(NO_OBJ)

View File

@ -2414,9 +2414,8 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
/*
* allocate the page directory page
*/
while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
VM_WAIT;
pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
pml4phys = VM_PAGE_TO_PHYS(pml4pg);
pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(pml4phys);

View File

@ -47,20 +47,12 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
vm_page_t m;
vm_paddr_t pa;
void *va;
int pflags;
*flags = UMA_SLAB_PRIV;
pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
for (;;) {
m = vm_page_alloc(NULL, 0, pflags);
if (m == NULL) {
if (wait & M_NOWAIT)
return (NULL);
else
VM_WAIT;
} else
break;
}
m = vm_page_alloc(NULL, 0,
malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);

View File

@ -1,3 +1,4 @@
# $FreeBSD$
arm/allwinner/a13/a13_padconf.c standard
arm/allwinner/clkng/ccu_a13.c standard

View File

@ -58,6 +58,10 @@ __FBSDID("$FreeBSD$");
#include "opt_soc.h"
#endif
#if defined(SOC_ALLWINNER_A13)
#include <arm/allwinner/clkng/ccu_a13.h>
#endif
#if defined(SOC_ALLWINNER_A31)
#include <arm/allwinner/clkng/ccu_a31.h>
#endif
@ -94,7 +98,14 @@ static struct resource_spec aw_ccung_spec[] = {
#define A64_R_CCU 5
#endif
#if defined(SOC_ALLWINNER_A13)
#define A13_CCU 6
#endif
static struct ofw_compat_data compat_data[] = {
#if defined(SOC_ALLWINNER_A31)
{ "allwinner,sun5i-a13-ccu", A13_CCU},
#endif
#if defined(SOC_ALLWINNER_H3) || defined(SOC_ALLWINNER_H5)
{ "allwinner,sun8i-h3-ccu", H3_CCU },
{ "allwinner,sun8i-h3-r-ccu", H3_R_CCU },
@ -322,6 +333,11 @@ aw_ccung_attach(device_t dev)
panic("Cannot create clkdom\n");
switch (sc->type) {
#if defined(SOC_ALLWINNER_A13)
case A13_CCU:
ccu_a13_register_clocks(sc);
break;
#endif
#if defined(SOC_ALLWINNER_H3) || defined(SOC_ALLWINNER_H5)
case H3_CCU:
ccu_h3_register_clocks(sc);

View File

@ -300,6 +300,7 @@ aw_clk_nkmp_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
val |= aw_clk_factor_get_value(&sc->p, best_p) << sc->p.shift;
WRITE4(clk, sc->offset, val);
DELAY(2000);
DEVICE_UNLOCK(clk);
if ((sc->flags & AW_CLK_HAS_UPDATE) != 0) {
DEVICE_LOCK(clk);
@ -307,6 +308,7 @@ aw_clk_nkmp_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
val |= 1 << sc->update_shift;
WRITE4(clk, sc->offset, val);
DELAY(2000);
DEVICE_UNLOCK(clk);
}
if ((sc->flags & AW_CLK_HAS_LOCK) != 0) {

View File

@ -0,0 +1,566 @@
/*-
* Copyright (c) 2017 Emmanuel Vadot <manu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <dev/extres/clk/clk_div.h>
#include <dev/extres/clk/clk_fixed.h>
#include <dev/extres/clk/clk_mux.h>
#include <arm/allwinner/clkng/aw_ccung.h>
#include <arm/allwinner/clkng/aw_clk.h>
#include <arm/allwinner/clkng/aw_clk_nm.h>
#include <arm/allwinner/clkng/aw_clk_nkmp.h>
#include <arm/allwinner/clkng/aw_clk_prediv_mux.h>
#include <dt-bindings/clock/sun5i-ccu.h>
#include <dt-bindings/reset/sun5i-ccu.h>
#include "ccu_a13.h"
/* Non-exported clocks */
#define CLK_PLL_CORE 2
#define CLK_PLL_AUDIO_BASE 3
#define CLK_PLL_AUDIO 4
#define CLK_PLL_AUDIO_2X 5
#define CLK_PLL_AUDIO_4X 6
#define CLK_PLL_AUDIO_8X 7
#define CLK_PLL_VIDEO0 8
#define CLK_PLL_VE 10
#define CLK_PLL_DDR_BASE 11
#define CLK_PLL_DDR 12
#define CLK_PLL_DDR_OTHER 13
#define CLK_PLL_PERIPH 14
#define CLK_PLL_VIDEO1 15
#define CLK_AXI 18
#define CLK_AHB 19
#define CLK_APB0 20
#define CLK_APB1 21
#define CLK_DRAM_AXI 22
#define CLK_TCON_CH1_SCLK 91
#define CLK_MBUS 99
static struct aw_ccung_reset a13_ccu_resets[] = {
CCU_RESET(RST_USB_PHY0, 0xcc, 0)
CCU_RESET(RST_USB_PHY1, 0xcc, 1)
CCU_RESET(RST_GPS, 0xd0, 30)
CCU_RESET(RST_DE_BE, 0x104, 30)
CCU_RESET(RST_DE_FE, 0x10c, 30)
CCU_RESET(RST_TVE, 0x118, 29)
CCU_RESET(RST_LCD, 0x118, 30)
CCU_RESET(RST_CSI, 0x134, 30)
CCU_RESET(RST_VE, 0x13c, 0)
CCU_RESET(RST_GPU, 0x154, 30)
CCU_RESET(RST_IEP, 0x160, 30)
};
static struct aw_ccung_gate a13_ccu_gates[] = {
CCU_GATE(CLK_HOSC, "hosc", "osc24M", 0x50, 0)
CCU_GATE(CLK_DRAM_AXI, "axi-dram", "axi", 0x5c, 0)
CCU_GATE(CLK_AHB_OTG, "ahb-otg", "ahb", 0x60, 0)
CCU_GATE(CLK_AHB_EHCI, "ahb-ehci", "ahb", 0x60, 1)
CCU_GATE(CLK_AHB_OHCI, "ahb-ohci", "ahb", 0x60, 2)
CCU_GATE(CLK_AHB_SS, "ahb-ss", "ahb", 0x60, 5)
CCU_GATE(CLK_AHB_DMA, "ahb-dma", "ahb", 0x60, 6)
CCU_GATE(CLK_AHB_BIST, "ahb-bist", "ahb", 0x60, 7)
CCU_GATE(CLK_AHB_MMC0, "ahb-mmc0", "ahb", 0x60, 8)
CCU_GATE(CLK_AHB_MMC1, "ahb-mmc1", "ahb", 0x60, 9)
CCU_GATE(CLK_AHB_MMC2, "ahb-mmc2", "ahb", 0x60, 10)
CCU_GATE(CLK_AHB_NAND, "ahb-nand", "ahb", 0x60, 13)
CCU_GATE(CLK_AHB_SDRAM, "ahb-sdram", "ahb", 0x60, 14)
CCU_GATE(CLK_AHB_SPI0, "ahb-spi0", "ahb", 0x60, 20)
CCU_GATE(CLK_AHB_SPI1, "ahb-spi1", "ahb", 0x60, 21)
CCU_GATE(CLK_AHB_SPI2, "ahb-spi2", "ahb", 0x60, 22)
CCU_GATE(CLK_AHB_GPS, "ahb-gps", "ahb", 0x60, 26)
CCU_GATE(CLK_AHB_HSTIMER, "ahb-hstimer", "ahb", 0x60, 28)
CCU_GATE(CLK_AHB_VE, "ahb-ve", "ahb", 0x64, 0)
CCU_GATE(CLK_AHB_LCD, "ahb-lcd", "ahb", 0x64, 4)
CCU_GATE(CLK_AHB_CSI, "ahb-csi", "ahb", 0x64, 8)
CCU_GATE(CLK_AHB_DE_BE, "ahb-de-be", "ahb", 0x64, 12)
CCU_GATE(CLK_AHB_DE_FE, "ahb-de-fe", "ahb", 0x64, 14)
CCU_GATE(CLK_AHB_IEP, "ahb-iep", "ahb", 0x64, 19)
CCU_GATE(CLK_AHB_GPU, "ahb-gpu", "ahb", 0x64, 20)
CCU_GATE(CLK_APB0_CODEC, "apb0-codec", "apb0", 0x68, 0)
CCU_GATE(CLK_APB0_PIO, "apb0-pio", "apb0", 0x68, 5)
CCU_GATE(CLK_APB0_IR, "apb0-ir", "apb0", 0x68, 6)
CCU_GATE(CLK_APB1_I2C0, "apb1-i2c0", "apb1", 0x6c, 0)
CCU_GATE(CLK_APB1_I2C1, "apb1-i2c1", "apb1", 0x6c, 1)
CCU_GATE(CLK_APB1_I2C2, "apb1-i2c2", "apb1", 0x6c, 2)
CCU_GATE(CLK_APB1_UART1, "apb1-uart1", "apb1", 0x6c, 17)
CCU_GATE(CLK_APB1_UART3, "apb1-uart3", "apb1", 0x6c, 19)
CCU_GATE(CLK_DRAM_VE, "dram-ve", "pll-ddr", 0x100, 0)
CCU_GATE(CLK_DRAM_CSI, "dram-csi", "pll-ddr", 0x100, 1)
CCU_GATE(CLK_DRAM_DE_FE, "dram-de-fe", "pll-ddr", 0x100, 25)
CCU_GATE(CLK_DRAM_DE_BE, "dram-de-be", "pll-ddr", 0x100, 26)
CCU_GATE(CLK_DRAM_ACE, "dram-ace", "pll-ddr", 0x100, 29)
CCU_GATE(CLK_DRAM_IEP, "dram-iep", "pll-ddr", 0x100, 31)
CCU_GATE(CLK_CODEC, "codec", "pll-audio", 0x140, 31)
CCU_GATE(CLK_AVS, "avs", "hosc", 0x144, 31)
};
static const char *pll_parents[] = {"hosc"};
static struct aw_clk_nkmp_def pll_core = {
.clkdef = {
.id = CLK_PLL_CORE,
.name = "pll-core",
.parent_names = pll_parents,
.parent_cnt = nitems(pll_parents),
},
.offset = 0x00,
.n = {.shift = 8, .width = 5},
.k = {.shift = 4, .width = 2},
.m = {.shift = 0, .width = 2},
.p = {.shift = 16, .width = 2},
.gate_shift = 31,
.flags = AW_CLK_HAS_GATE,
};
/*
* We only implement pll-audio for now
* For pll-audio-2/4/8 x we need a way to change the frequency
* of the parent clocks
*/
static struct aw_clk_nkmp_def pll_audio = {
.clkdef = {
.id = CLK_PLL_AUDIO,
.name = "pll-audio",
.parent_names = pll_parents,
.parent_cnt = nitems(pll_parents),
},
.offset = 0x08,
.n = {.shift = 8, .width = 7},
.k = {.value = 1, .flags = AW_CLK_FACTOR_FIXED},
.m = {.shift = 0, .width = 5},
.p = {.shift = 26, .width = 4},
.gate_shift = 31,
.flags = AW_CLK_HAS_GATE,
};
/* Missing PLL3-Video */
/* Missing PLL4-VE */
static struct aw_clk_nkmp_def pll_ddr_base = {
.clkdef = {
.id = CLK_PLL_DDR_BASE,
.name = "pll-ddr-base",
.parent_names = pll_parents,
.parent_cnt = nitems(pll_parents),
},
.offset = 0x20,
.n = {.shift = 8, .width = 5},
.k = {.shift = 4, .width = 2},
.m = {.value = 1, .flags = AW_CLK_FACTOR_FIXED},
.p = {.value = 1, .flags = AW_CLK_FACTOR_FIXED},
.gate_shift = 31,
.flags = AW_CLK_HAS_GATE,
};
static const char *pll_ddr_parents[] = {"pll-ddr-base"};
static struct clk_div_def pll_ddr = {
.clkdef = {
.id = CLK_PLL_DDR,
.name = "pll-ddr",
.parent_names = pll_ddr_parents,
.parent_cnt = nitems(pll_ddr_parents),
},
.offset = 0x20,
.i_shift = 0,
.i_width = 2,
};
static const char *pll_ddr_other_parents[] = {"pll-ddr-base"};
static struct clk_div_def pll_ddr_other = {
.clkdef = {
.id = CLK_PLL_DDR_OTHER,
.name = "pll-ddr-other",
.parent_names = pll_ddr_other_parents,
.parent_cnt = nitems(pll_ddr_other_parents),
},
.offset = 0x20,
.i_shift = 16,
.i_width = 2,
};
static struct aw_clk_nkmp_def pll_periph = {
.clkdef = {
.id = CLK_PLL_PERIPH,
.name = "pll-periph",
.parent_names = pll_parents,
.parent_cnt = nitems(pll_parents),
},
.offset = 0x28,
.n = {.shift = 8, .width = 5},
.k = {.shift = 4, .width = 2},
.m = {.shift = 0, .width = 2},
.p = {.value = 2, .flags = AW_CLK_FACTOR_FIXED},
.gate_shift = 31,
.flags = AW_CLK_HAS_GATE,
};
/* Missing PLL7-VIDEO1 */
static const char *cpu_parents[] = {"osc32k", "hosc", "pll-core", "pll-periph"};
static struct aw_clk_prediv_mux_def cpu_clk = {
.clkdef = {
.id = CLK_CPU,
.name = "cpu",
.parent_names = cpu_parents,
.parent_cnt = nitems(cpu_parents),
},
.offset = 0x54,
.mux_shift = 16, .mux_width = 2,
.prediv = {
.value = 6,
.flags = AW_CLK_FACTOR_FIXED,
.cond_shift = 16,
.cond_width = 2,
.cond_value = 3,
},
};
static const char *axi_parents[] = {"cpu"};
static struct clk_div_def axi_clk = {
.clkdef = {
.id = CLK_AXI,
.name = "axi",
.parent_names = axi_parents,
.parent_cnt = nitems(axi_parents),
},
.offset = 0x50,
.i_shift = 0, .i_width = 2,
};
static const char *ahb_parents[] = {"axi", "cpu", "pll-periph"};
static struct aw_clk_prediv_mux_def ahb_clk = {
.clkdef = {
.id = CLK_AHB,
.name = "ahb",
.parent_names = ahb_parents,
.parent_cnt = nitems(ahb_parents),
},
.offset = 0x54,
.mux_shift = 6,
.mux_width = 2,
.div = {
.shift = 4,
.width = 2,
.flags = AW_CLK_FACTOR_POWER_OF_TWO
},
.prediv = {
.value = 2,
.flags = AW_CLK_FACTOR_FIXED,
.cond_shift = 6,
.cond_width = 2,
.cond_value = 2,
},
};
static const char *apb0_parents[] = {"ahb"};
static struct clk_div_table apb0_div_table[] = {
{ .value = 0, .divider = 2, },
{ .value = 1, .divider = 2, },
{ .value = 2, .divider = 4, },
{ .value = 3, .divider = 8, },
{ },
};
static struct clk_div_def apb0_clk = {
.clkdef = {
.id = CLK_APB0,
.name = "apb0",
.parent_names = apb0_parents,
.parent_cnt = nitems(apb0_parents),
},
.offset = 0x54,
.i_shift = 8, .i_width = 2,
.div_flags = CLK_DIV_WITH_TABLE,
.div_table = apb0_div_table,
};
static const char *apb1_parents[] = {"hosc", "pll-periph", "osc32k"};
static struct aw_clk_nm_def apb1_clk = {
.clkdef = {
.id = CLK_APB1,
.name = "apb1",
.parent_names = apb1_parents,
.parent_cnt = nitems(apb1_parents),
},
.offset = 0x58,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 5},
.mux_shift = 24,
.mux_width = 2,
.flags = AW_CLK_HAS_MUX,
};
static const char *mod_parents[] = {"hosc", "pll-periph", "pll-ddr-other"};
static struct aw_clk_nm_def nand_clk = {
.clkdef = {
.id = CLK_NAND,
.name = "nand",
.parent_names = mod_parents,
.parent_cnt = nitems(mod_parents),
},
.offset = 0x80,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 4},
.mux_shift = 24,
.mux_width = 2,
.gate_shift = 31,
.flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT
};
static struct aw_clk_nm_def mmc0_clk = {
.clkdef = {
.id = CLK_MMC0,
.name = "mmc0",
.parent_names = mod_parents,
.parent_cnt = nitems(mod_parents),
},
.offset = 0x88,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 4},
.mux_shift = 24,
.mux_width = 2,
.gate_shift = 31,
.flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT
};
static struct aw_clk_nm_def mmc1_clk = {
.clkdef = {
.id = CLK_MMC1,
.name = "mmc1",
.parent_names = mod_parents,
.parent_cnt = nitems(mod_parents),
},
.offset = 0x8C,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 4},
.mux_shift = 24,
.mux_width = 2,
.gate_shift = 31,
.flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT
};
static struct aw_clk_nm_def mmc2_clk = {
.clkdef = {
.id = CLK_MMC2,
.name = "mmc2",
.parent_names = mod_parents,
.parent_cnt = nitems(mod_parents),
},
.offset = 0x90,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 4},
.mux_shift = 24,
.mux_width = 2,
.gate_shift = 31,
.flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT
};
static struct aw_clk_nm_def ss_clk = {
.clkdef = {
.id = CLK_SS,
.name = "ss",
.parent_names = mod_parents,
.parent_cnt = nitems(mod_parents),
},
.offset = 0x9C,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 4},
.mux_shift = 24,
.mux_width = 2,
.gate_shift = 31,
.flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT
};
static struct aw_clk_nm_def spi0_clk = {
.clkdef = {
.id = CLK_SPI0,
.name = "spi0",
.parent_names = mod_parents,
.parent_cnt = nitems(mod_parents),
},
.offset = 0xA0,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 4},
.mux_shift = 24,
.mux_width = 2,
.gate_shift = 31,
.flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT
};
static struct aw_clk_nm_def spi1_clk = {
.clkdef = {
.id = CLK_SPI1,
.name = "spi1",
.parent_names = mod_parents,
.parent_cnt = nitems(mod_parents),
},
.offset = 0xA4,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 4},
.mux_shift = 24,
.mux_width = 2,
.gate_shift = 31,
.flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT
};
static struct aw_clk_nm_def spi2_clk = {
.clkdef = {
.id = CLK_SPI2,
.name = "spi2",
.parent_names = mod_parents,
.parent_cnt = nitems(mod_parents),
},
.offset = 0xA8,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 4},
.mux_shift = 24,
.mux_width = 2,
.gate_shift = 31,
.flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT
};
static struct aw_clk_nm_def ir_clk = {
.clkdef = {
.id = CLK_IR,
.name = "ir",
.parent_names = mod_parents,
.parent_cnt = nitems(mod_parents),
},
.offset = 0xB0,
.n = {.shift = 16, .width = 2, .flags = AW_CLK_FACTOR_POWER_OF_TWO, },
.m = {.shift = 0, .width = 4},
.mux_shift = 24,
.mux_width = 2,
.gate_shift = 31,
.flags = AW_CLK_HAS_MUX | AW_CLK_HAS_GATE | AW_CLK_REPARENT
};
/* Missing DE-BE clock */
/* Missing DE-FE clock */
/* Missing LCD CH1 clock */
/* Missing CSI clock */
/* Missing VE clock */
/* Clocks list */
static struct aw_clk_nkmp_def *nkmp_clks[] = {
&pll_core,
&pll_audio,
&pll_ddr_base,
&pll_periph,
};
static struct aw_clk_nm_def *nm_clks[] = {
&apb1_clk,
&nand_clk,
&mmc0_clk,
&mmc1_clk,
&mmc2_clk,
&ss_clk,
&spi0_clk,
&spi1_clk,
&spi2_clk,
&ir_clk,
};
static struct aw_clk_prediv_mux_def *prediv_mux_clks[] = {
&cpu_clk,
&ahb_clk,
};
static struct clk_div_def *div_clks[] = {
&pll_ddr,
&pll_ddr_other,
&axi_clk,
&apb0_clk,
};
static struct clk_mux_def *mux_clks[] = {
};
static struct clk_fixed_def *fixed_factor_clks[] = {
};
static struct aw_clk_init init_clks[] = {
};
void
ccu_a13_register_clocks(struct aw_ccung_softc *sc)
{
int i;
sc->resets = a13_ccu_resets;
sc->nresets = nitems(a13_ccu_resets);
sc->gates = a13_ccu_gates;
sc->ngates = nitems(a13_ccu_gates);
sc->clk_init = init_clks;
sc->n_clk_init = nitems(init_clks);
for (i = 0; i < nitems(nkmp_clks); i++)
aw_clk_nkmp_register(sc->clkdom, nkmp_clks[i]);
for (i = 0; i < nitems(nm_clks); i++)
aw_clk_nm_register(sc->clkdom, nm_clks[i]);
for (i = 0; i < nitems(prediv_mux_clks); i++)
aw_clk_prediv_mux_register(sc->clkdom, prediv_mux_clks[i]);
for (i = 0; i < nitems(mux_clks); i++)
clknode_mux_register(sc->clkdom, mux_clks[i]);
for (i = 0; i < nitems(div_clks); i++)
clknode_div_register(sc->clkdom, div_clks[i]);
for (i = 0; i < nitems(fixed_factor_clks); i++)
clknode_fixed_register(sc->clkdom, fixed_factor_clks[i]);
}

View File

@ -0,0 +1,34 @@
/*-
* Copyright (c) 2017 Emmanuel Vadot <manu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CCU_A13_H__
#define __CCU_A13_H__
void ccu_a13_register_clocks(struct aw_ccung_softc *sc);
#endif /* __CCU_a13 H__ */

View File

@ -47,20 +47,12 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
vm_page_t m;
vm_paddr_t pa;
void *va;
int pflags;
*flags = UMA_SLAB_PRIV;
pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
for (;;) {
m = vm_page_alloc(NULL, 0, pflags);
if (m == NULL) {
if (wait & M_NOWAIT)
return (NULL);
else
VM_WAIT;
} else
break;
}
m = vm_page_alloc(NULL, 0,
malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
if (m == NULL)
return (NULL);
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);

View File

@ -1,5 +1,16 @@
# $FreeBSD$
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_MSDOS_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= no
LOADER_BZIP2_SUPPORT?= no
LOADER_FDT_SUPPORT= ${MK_FDT}
.include <bsd.init.mk>
FILES= ubldr ubldr.bin
@ -18,17 +29,6 @@ SRCS= start.S conf.c self_reloc.c vers.c
CWARNFLAGS.self_reloc.c+= -Wno-error=maybe-uninitialized
.endif
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_MSDOS_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= no
LOADER_BZIP2_SUPPORT?= no
LOADER_FDT_SUPPORT= ${MK_FDT}
# Always add MI sources
.include "${BOOTSRC}/loader.mk"
CFLAGS+= -I.

View File

@ -82,7 +82,7 @@ EFI_HANDLE efi_devpath_handle(EFI_DEVICE_PATH *);
EFI_DEVICE_PATH *efi_devpath_last_node(EFI_DEVICE_PATH *);
EFI_DEVICE_PATH *efi_devpath_trim(EFI_DEVICE_PATH *);
bool efi_devpath_match(EFI_DEVICE_PATH *, EFI_DEVICE_PATH *);
int efi_devpath_is_prefix(EFI_DEVICE_PATH *, EFI_DEVICE_PATH *);
bool efi_devpath_is_prefix(EFI_DEVICE_PATH *, EFI_DEVICE_PATH *);
CHAR16 *efi_devpath_name(EFI_DEVICE_PATH *);
void efi_free_devpath_name(CHAR16 *);

View File

@ -167,13 +167,13 @@ efi_devpath_match(EFI_DEVICE_PATH *devpath1, EFI_DEVICE_PATH *devpath2)
return (true);
}
int
bool
efi_devpath_is_prefix(EFI_DEVICE_PATH *prefix, EFI_DEVICE_PATH *path)
{
int len;
size_t len;
if (prefix == NULL || path == NULL)
return (0);
return (false);
while (1) {
if (IsDevicePathEnd(prefix))
@ -181,17 +181,17 @@ efi_devpath_is_prefix(EFI_DEVICE_PATH *prefix, EFI_DEVICE_PATH *path)
if (DevicePathType(prefix) != DevicePathType(path) ||
DevicePathSubType(prefix) != DevicePathSubType(path))
return (0);
return (false);
len = DevicePathNodeLength(prefix);
if (len != DevicePathNodeLength(path))
return (0);
return (false);
if (memcmp(prefix, path, (size_t)len) != 0)
return (0);
if (memcmp(prefix, path, len) != 0)
return (false);
prefix = NextDevicePathNode(prefix);
path = NextDevicePathNode(path);
}
return (1);
return (true);
}

View File

@ -2,6 +2,12 @@
MAN=
LOADER_NET_SUPPORT?= yes
LOADER_MSDOS_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
.include <bsd.init.mk>
MK_SSP= no
@ -9,12 +15,6 @@ MK_SSP= no
PROG= loader.sym
INTERNALPROG=
WARNS?= 3
LOADER_NET_SUPPORT?= yes
LOADER_MSDOS_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
# architecture-specific loader code
SRCS= autoload.c \

View File

@ -1,5 +1,15 @@
# $FreeBSD$
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_MSDOS_SUPPORT?= no
LOADER_UFS_SUPPORT?= yes
LOADER_GZIP_SUPPORT?= yes
LOADER_BZIP2_SUPPORT?= yes
.include <bsd.init.mk>
MK_SSP= no
@ -10,16 +20,6 @@ MAN=
INTERNALPROG=
NEWVERSWHAT?= "bootstrap loader" x86
VERSION_FILE= ${.CURDIR}/../loader/version
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_MSDOS_SUPPORT?= no
LOADER_UFS_SUPPORT?= yes
LOADER_GZIP_SUPPORT?= yes
LOADER_BZIP2_SUPPORT?= yes
# architecture-specific loader code
SRCS= main.c conf.c vers.c chain.c

View File

@ -1,12 +1,13 @@
# $FreeBSD$
LOADER_ZFS_SUPPORT=yes
.include <bsd.init.mk>
.PATH: ${BOOTSRC}/i386/loader
LOADER= zfsloader
NEWVERSWHAT= "ZFS enabled bootstrap loader" x86
LOADER_ZFS_SUPPORT=yes
LOADER_ONLY= yes
MAN=

View File

@ -29,6 +29,13 @@
#
# $FreeBSD$
LOADER_MSDOS_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_GZIP_SUPPORT?= yes
LOADER_BZIP2_SUPPORT?= yes
.include <bsd.init.mk>
MK_SSP= no
@ -60,13 +67,6 @@ SRCS+= altera_jtag_uart.c \
# Since we don't have a backward compatibility issue, default to this on BERI.
CFLAGS+= -DBOOT_PROMPT_123
LOADER_MSDOS_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_GZIP_SUPPORT?= yes
LOADER_BZIP2_SUPPORT?= yes
# Always add MI sources
.include "${BOOTSRC}/loader.mk"

View File

@ -1,5 +1,16 @@
# $FreeBSD$
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_MSDOS_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= no
LOADER_BZIP2_SUPPORT?= no
LOADER_FDT_SUPPORT= ${MK_FDT}
.include <bsd.init.mk>
FILES= ubldr
@ -14,17 +25,6 @@ UBLDR_LOADADDR?= 0xffffffff80800000
# Architecture-specific loader code
SRCS= start.S conf.c vers.c
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_MSDOS_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= no
LOADER_BZIP2_SUPPORT?= no
LOADER_FDT_SUPPORT= ${MK_FDT}
# Always add MI sources
.include "${BOOTSRC}/loader.mk"
CFLAGS+= -I.

View File

@ -1,5 +1,16 @@
# $FreeBSD$
LOADER_CD9660_SUPPORT?= yes
LOADER_MSDOS_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= yes
LOADER_FDT_SUPPORT= yes
LOADER_BZIP2_SUPPORT?= no
.include <bsd.init.mk>
MK_SSP= no
MAN=
@ -13,17 +24,6 @@ SRCS= conf.c metadata.c vers.c main.c ppc64_elf_freebsd.c
SRCS+= host_syscall.S hostcons.c hostdisk.c kerneltramp.S kbootfdt.c
SRCS+= ucmpdi2.c
LOADER_CD9660_SUPPORT?= yes
LOADER_MSDOS_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= yes
LOADER_FDT_SUPPORT= yes
LOADER_BZIP2_SUPPORT?= no
.if ${LOADER_FDT_SUPPORT} == "yes"
CFLAGS+= -I${FDTSRC}
CFLAGS+= -I${SYSDIR}/contrib/libfdt

View File

@ -1,5 +1,16 @@
# $FreeBSD$
LOADER_CD9660_SUPPORT?= yes
LOADER_EXT2FS_SUPPORT?= no
LOADER_MSDOS_SUPPORT?= no
LOADER_UFS_SUPPORT?= yes
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= yes
LOADER_BZIP2_SUPPORT?= no
LOADER_FDT_SUPPORT?= yes
.include <bsd.init.mk>
MK_SSP= no
MAN=
@ -12,17 +23,6 @@ INSTALLFLAGS= -b
SRCS= conf.c metadata.c vers.c start.c
SRCS+= ucmpdi2.c
LOADER_CD9660_SUPPORT?= yes
LOADER_EXT2FS_SUPPORT?= no
LOADER_MSDOS_SUPPORT?= no
LOADER_UFS_SUPPORT?= yes
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= yes
LOADER_BZIP2_SUPPORT?= no
LOADER_FDT_SUPPORT?= yes
.if ${LOADER_FDT_SUPPORT} == "yes"
SRCS+= ofwfdt.c
CFLAGS+= -I${FDTSRC}

View File

@ -1,5 +1,15 @@
# $FreeBSD$
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= yes
LOADER_EXT2FS_SUPPORT?= yes
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= yes
LOADER_FDT_SUPPORT?= no
LOADER_BZIP2_SUPPORT?= no
.include <bsd.init.mk>
MK_SSP= no
MAN=
@ -14,16 +24,6 @@ SRCS+= lv1call.S ps3cons.c font.h ps3mmu.c ps3net.c ps3repo.c \
ps3stor.c ps3disk.c ps3cdrom.c
SRCS+= ucmpdi2.c
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= yes
LOADER_EXT2FS_SUPPORT?= yes
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= yes
LOADER_FDT_SUPPORT?= no
LOADER_BZIP2_SUPPORT?= no
.if ${LOADER_FDT_SUPPORT} == "yes"
CFLAGS+= -I${FDTSRC}
CFLAGS+= -DLOADER_FDT_SUPPORT

View File

@ -1,5 +1,15 @@
# $FreeBSD$
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= no
LOADER_BZIP2_SUPPORT?= no
LOADER_FDT_SUPPORT= ${MK_FDT}
.include <bsd.init.mk>
PROG= ubldr
@ -11,16 +21,6 @@ MAN=
SRCS= start.S conf.c vers.c
SRCS+= ucmpdi2.c
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
LOADER_NET_SUPPORT?= yes
LOADER_NFS_SUPPORT?= yes
LOADER_TFTP_SUPPORT?= no
LOADER_GZIP_SUPPORT?= no
LOADER_BZIP2_SUPPORT?= no
LOADER_FDT_SUPPORT= ${MK_FDT}
# Always add MI sources
.include "${BOOTSRC}/loader.mk"
.PATH: ${SYSDIR}/libkern

View File

@ -1,17 +1,5 @@
# $FreeBSD$
.include <bsd.init.mk>
MK_SSP= no
MAN=
PROG?= loader
NEWVERSWHAT?= "bootstrap loader" sparc64
VERSION_FILE= ${.CURDIR}/../loader/version
INSTALLFLAGS= -b
# Architecture-specific loader code
SRCS= locore.S main.c metadata.c vers.c
LOADER_DISK_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= yes
@ -25,6 +13,18 @@ LOADER_GZIP_SUPPORT?= yes
LOADER_BZIP2_SUPPORT?= no
LOADER_DEBUG?= no
.include <bsd.init.mk>
MK_SSP= no
MAN=
PROG?= loader
NEWVERSWHAT?= "bootstrap loader" sparc64
VERSION_FILE= ${.CURDIR}/../loader/version
INSTALLFLAGS= -b
# Architecture-specific loader code
SRCS= locore.S main.c metadata.c vers.c
.if ${LOADER_DEBUG} == "yes"
CFLAGS+= -DLOADER_DEBUG
.endif

View File

@ -2,15 +2,15 @@
MAN=
.include <bsd.init.mk>
MK_SSP= no
LOADER_MSDOS_SUPPORT?= yes
LOADER_UFS_SUPPORT?= yes
LOADER_CD9660_SUPPORT?= no
LOADER_EXT2FS_SUPPORT?= no
.include <bsd.init.mk>
MK_SSP= no
SHLIB_NAME= userboot.so
MK_CTF= no
STRIP=

View File

@ -414,6 +414,29 @@ SYSCTL_INT(_kern_cam_ctl, OID_AUTO, time_io_secs, CTLFLAG_RWTUN,
&ctl_time_io_secs, 0, "Log requests taking more seconds");
#endif
/*
* Maximum number of LUNs we support. MUST be a power of 2.
*/
#define CTL_DEFAULT_MAX_LUNS 1024
static int ctl_max_luns = CTL_DEFAULT_MAX_LUNS;
TUNABLE_INT("kern.cam.ctl.max_luns", &ctl_max_luns);
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_luns, CTLFLAG_RDTUN,
&ctl_max_luns, CTL_DEFAULT_MAX_LUNS, "Maximum number of LUNs");
/*
* Maximum number of ports registered at one time.
*/
#define CTL_DEFAULT_MAX_PORTS 256
static int ctl_max_ports = CTL_DEFAULT_MAX_PORTS;
TUNABLE_INT("kern.cam.ctl.max_ports", &ctl_max_ports);
SYSCTL_INT(_kern_cam_ctl, OID_AUTO, max_ports, CTLFLAG_RDTUN,
&ctl_max_ports, CTL_DEFAULT_MAX_LUNS, "Maximum number of ports");
/*
* Maximum number of initiators we support.
*/
#define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * ctl_max_ports)
/*
* Supported pages (0x00), Serial number (0x80), Device ID (0x83),
* Extended INQUIRY Data (0x86), Mode Page Policy (0x87),
@ -1005,8 +1028,8 @@ ctl_isc_ha_link_up(struct ctl_softc *softc)
msg.login.version = CTL_HA_VERSION;
msg.login.ha_mode = softc->ha_mode;
msg.login.ha_id = softc->ha_id;
msg.login.max_luns = CTL_MAX_LUNS;
msg.login.max_ports = CTL_MAX_PORTS;
msg.login.max_luns = ctl_max_luns;
msg.login.max_ports = ctl_max_ports;
msg.login.max_init_per_port = CTL_MAX_INIT_PER_PORT;
ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg.login, sizeof(msg.login),
M_WAITOK);
@ -1069,7 +1092,7 @@ ctl_isc_ua(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
uint32_t iid = ctl_get_initindex(&msg->hdr.nexus);
mtx_lock(&softc->ctl_lock);
if (msg->hdr.nexus.targ_mapped_lun >= CTL_MAX_LUNS ||
if (msg->hdr.nexus.targ_mapped_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[msg->hdr.nexus.targ_mapped_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
@ -1103,7 +1126,7 @@ ctl_isc_lun_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
@ -1325,8 +1348,8 @@ ctl_isc_login(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
return;
}
if (msg->login.max_luns != CTL_MAX_LUNS ||
msg->login.max_ports != CTL_MAX_PORTS ||
if (msg->login.max_luns != ctl_max_luns ||
msg->login.max_ports != ctl_max_ports ||
msg->login.max_init_per_port != CTL_MAX_INIT_PER_PORT) {
printf("CTL HA peers have different limits\n");
ctl_ha_msg_abort(CTL_HA_CHAN_CTL);
@ -1343,7 +1366,7 @@ ctl_isc_mode_sync(struct ctl_softc *softc, union ctl_ha_msg *msg, int len)
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
@ -1873,6 +1896,26 @@ ctl_init(void)
OID_AUTO, "ha_mode", CTLFLAG_RDTUN, (int *)&softc->ha_mode, 0,
"HA mode (0 - act/stby, 1 - serialize only, 2 - xfer)");
if (ctl_max_luns <= 0 || powerof2(ctl_max_luns) == 0) {
printf("Bad value %d for kern.cam.ctl.max_luns, must be a power of two, using %d\n",
ctl_max_luns, CTL_DEFAULT_MAX_LUNS);
ctl_max_luns = CTL_DEFAULT_MAX_LUNS;
}
softc->ctl_luns = malloc(sizeof(struct ctl_lun *) * ctl_max_luns,
M_DEVBUF, M_WAITOK | M_ZERO);
softc->ctl_lun_mask = malloc(sizeof(uint32_t) *
((ctl_max_luns + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO);
if (ctl_max_ports <= 0 || powerof2(ctl_max_ports) == 0) {
printf("Bad value %d for kern.cam.ctl.max_ports, must be a power of two, using %d\n",
ctl_max_ports, CTL_DEFAULT_MAX_PORTS);
ctl_max_ports = CTL_DEFAULT_MAX_PORTS;
}
softc->ctl_port_mask = malloc(sizeof(uint32_t) *
((ctl_max_ports + 31) / 32), M_DEVBUF, M_WAITOK | M_ZERO);
softc->ctl_ports = malloc(sizeof(struct ctl_port *) * ctl_max_ports,
M_DEVBUF, M_WAITOK | M_ZERO);
/*
* In Copan's HA scheme, the "master" and "slave" roles are
* figured out through the slot the controller is in. Although it
@ -1884,10 +1927,10 @@ ctl_init(void)
if (softc->ha_id == 0 || softc->ha_id > NUM_HA_SHELVES) {
softc->flags |= CTL_FLAG_ACTIVE_SHELF;
softc->is_single = 1;
softc->port_cnt = CTL_MAX_PORTS;
softc->port_cnt = ctl_max_ports;
softc->port_min = 0;
} else {
softc->port_cnt = CTL_MAX_PORTS / NUM_HA_SHELVES;
softc->port_cnt = ctl_max_ports / NUM_HA_SHELVES;
softc->port_min = (softc->ha_id - 1) * softc->port_cnt;
}
softc->port_max = softc->port_min + softc->port_cnt;
@ -1988,6 +2031,11 @@ ctl_shutdown(void)
uma_zdestroy(softc->io_zone);
mtx_destroy(&softc->ctl_lock);
free(softc->ctl_luns, M_DEVBUF);
free(softc->ctl_lun_mask, M_DEVBUF);
free(softc->ctl_port_mask, M_DEVBUF);
free(softc->ctl_ports, M_DEVBUF);
sysctl_ctx_free(&softc->sysctl_ctx);
free(softc, M_DEVBUF);
@ -2249,7 +2297,7 @@ ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio)
/* Make sure that we know about this LUN. */
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
@ -2717,7 +2765,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
mtx_lock(&softc->ctl_lock);
if ((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0 &&
(ooa_hdr->lun_num >= CTL_MAX_LUNS ||
(ooa_hdr->lun_num >= ctl_max_luns ||
softc->ctl_luns[ooa_hdr->lun_num] == NULL)) {
mtx_unlock(&softc->ctl_lock);
free(entries, M_CTL);
@ -2770,7 +2818,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
#ifdef CTL_IO_DELAY
mtx_lock(&softc->ctl_lock);
if (delay_info->lun_id >= CTL_MAX_LUNS ||
if (delay_info->lun_id >= ctl_max_luns ||
(lun = softc->ctl_luns[delay_info->lun_id]) == NULL) {
mtx_unlock(&softc->ctl_lock);
delay_info->status = CTL_DELAY_STATUS_INVALID_LUN;
@ -2849,7 +2897,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
bcopy(err_desc, new_err_desc, sizeof(*new_err_desc));
mtx_lock(&softc->ctl_lock);
if (err_desc->lun_id >= CTL_MAX_LUNS ||
if (err_desc->lun_id >= ctl_max_luns ||
(lun = softc->ctl_luns[err_desc->lun_id]) == NULL) {
mtx_unlock(&softc->ctl_lock);
free(new_err_desc, M_CTL);
@ -2893,7 +2941,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
delete_done = 0;
mtx_lock(&softc->ctl_lock);
if (delete_desc->lun_id >= CTL_MAX_LUNS ||
if (delete_desc->lun_id >= ctl_max_luns ||
(lun = softc->ctl_luns[delete_desc->lun_id]) == NULL) {
mtx_unlock(&softc->ctl_lock);
printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n",
@ -2936,7 +2984,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
continue;
}
for (j = 0; j < CTL_MAX_PORTS; j++) {
for (j = 0; j < ctl_max_ports; j++) {
if (lun->pr_keys[j] == NULL)
continue;
for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){
@ -3411,7 +3459,7 @@ ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
if (lm->plun != UINT32_MAX) {
if (lm->lun == UINT32_MAX)
retval = ctl_lun_map_unset(port, lm->plun);
else if (lm->lun < CTL_MAX_LUNS &&
else if (lm->lun < ctl_max_luns &&
softc->ctl_luns[lm->lun] != NULL)
retval = ctl_lun_map_set(port, lm->plun, lm->lun);
else
@ -4519,6 +4567,13 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
if (lun_malloced)
lun->flags = CTL_LUN_MALLOCED;
lun->pending_sense = malloc(sizeof(struct scsi_sense_data *) *
ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO);
lun->pending_ua = malloc(sizeof(ctl_ua_type *) * ctl_max_ports,
M_DEVBUF, M_WAITOK | M_ZERO);
lun->pr_keys = malloc(sizeof(uint64_t *) * ctl_max_ports,
M_DEVBUF, M_WAITOK | M_ZERO);
/* Generate LUN ID. */
devidlen = max(CTL_DEVID_MIN_LEN,
strnlen(be_lun->device_id, CTL_DEVID_LEN));
@ -4605,13 +4660,13 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
* if it is available. Otherwise, allocate the first available LUN.
*/
if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) {
if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1))
if ((be_lun->req_lun_id > (ctl_max_luns - 1))
|| (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) {
mtx_unlock(&ctl_softc->ctl_lock);
if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) {
if (be_lun->req_lun_id > (ctl_max_luns - 1)) {
printf("ctl: requested LUN ID %d is higher "
"than CTL_MAX_LUNS - 1 (%d)\n",
be_lun->req_lun_id, CTL_MAX_LUNS - 1);
"than ctl_max_luns - 1 (%d)\n",
be_lun->req_lun_id, ctl_max_luns - 1);
} else {
/*
* XXX KDM return an error, or just assign
@ -4630,7 +4685,7 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
}
lun_number = be_lun->req_lun_id;
} else {
lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, CTL_MAX_LUNS);
lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, 0, ctl_max_luns);
if (lun_number == -1) {
mtx_unlock(&ctl_softc->ctl_lock);
printf("ctl: can't allocate LUN, out of LUNs\n");
@ -4697,7 +4752,9 @@ ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun,
lun->legacy_stats.blocksize = be_lun->blocksize;
if (be_lun->blocksize == 0)
lun->legacy_stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE;
for (len = 0; len < CTL_MAX_PORTS; len++)
lun->legacy_stats.ports = malloc(sizeof(struct ctl_lun_io_port_stats) *
ctl_max_ports, M_DEVBUF, M_WAITOK | M_ZERO);
for (len = 0; len < ctl_max_ports; len++)
lun->legacy_stats.ports[len].targ_port = len;
#endif /* CTL_LEGACY_STATS */
lun->stats.item = lun_number;
@ -4760,10 +4817,12 @@ ctl_free_lun(struct ctl_lun *lun)
ctl_tpc_lun_shutdown(lun);
mtx_destroy(&lun->lun_lock);
free(lun->lun_devid, M_CTL);
for (i = 0; i < CTL_MAX_PORTS; i++)
for (i = 0; i < ctl_max_ports; i++)
free(lun->pending_ua[i], M_CTL);
for (i = 0; i < CTL_MAX_PORTS; i++)
free(lun->pending_ua, M_DEVBUF);
for (i = 0; i < ctl_max_ports; i++)
free(lun->pr_keys[i], M_CTL);
free(lun->pr_keys, M_DEVBUF);
free(lun->write_buffer, M_CTL);
free(lun->prevent, M_CTL);
if (lun->flags & CTL_LUN_MALLOCED)
@ -8483,7 +8542,7 @@ ctl_hndl_per_res_out_on_other_sc(union ctl_io *io)
targ_lun = msg->hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
@ -9010,7 +9069,7 @@ ctl_report_luns(struct ctl_scsiio *ctsio)
CTL_DEBUG_PRINT(("ctl_report_luns\n"));
num_luns = 0;
num_port_luns = port->lun_map ? port->lun_map_size : CTL_MAX_LUNS;
num_port_luns = port->lun_map ? port->lun_map_size : ctl_max_luns;
mtx_lock(&softc->ctl_lock);
for (targ_lun_id = 0; targ_lun_id < num_port_luns; targ_lun_id++) {
if (ctl_lun_map_from_port(port, targ_lun_id) != UINT32_MAX)
@ -11200,7 +11259,7 @@ ctl_failover_lun(union ctl_io *rio)
/* Find and lock the LUN. */
mtx_lock(&softc->ctl_lock);
if (targ_lun > CTL_MAX_LUNS ||
if (targ_lun > ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
return;
@ -11282,7 +11341,7 @@ ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio)
lun = NULL;
targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun;
if (targ_lun < CTL_MAX_LUNS)
if (targ_lun < ctl_max_luns)
lun = softc->ctl_luns[targ_lun];
if (lun) {
/*
@ -11672,7 +11731,7 @@ ctl_do_lun_reset(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua_type)
xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS;
}
/* Clear CA. */
for (i = 0; i < CTL_MAX_PORTS; i++) {
for (i = 0; i < ctl_max_ports; i++) {
free(lun->pending_sense[i], M_CTL);
lun->pending_sense[i] = NULL;
}
@ -11705,7 +11764,7 @@ ctl_lun_reset(union ctl_io *io)
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
initidx = ctl_get_initindex(&io->io_hdr.nexus);
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@ -11784,7 +11843,7 @@ ctl_abort_task_set(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@ -11886,7 +11945,7 @@ ctl_abort_task(union ctl_io *io)
*/
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@ -12010,7 +12069,7 @@ ctl_query_task(union ctl_io *io, int task_set)
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@ -12049,7 +12108,7 @@ ctl_query_async_event(union ctl_io *io)
targ_lun = io->io_hdr.nexus.targ_mapped_lun;
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
io->taskio.task_status = CTL_TASK_LUN_DOES_NOT_EXIST;
@ -12141,7 +12200,7 @@ ctl_handle_isc(union ctl_io *io)
break;
case CTL_MSG_R2R: /* Only used in SER_ONLY mode. */
entry = ctl_get_cmd_entry(&io->scsiio, NULL);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
ctl_done(io);
break;
@ -12161,7 +12220,7 @@ ctl_handle_isc(union ctl_io *io)
ctl_done(io);
break;
}
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
ctl_free_io(io);
break;
@ -13130,7 +13189,7 @@ ctl_queue_sense(union ctl_io *io)
* If we don't have a LUN for this, just toss the sense information.
*/
mtx_lock(&softc->ctl_lock);
if (targ_lun >= CTL_MAX_LUNS ||
if (targ_lun >= ctl_max_luns ||
(lun = softc->ctl_luns[targ_lun]) == NULL) {
mtx_unlock(&softc->ctl_lock);
goto bailout;

View File

@ -58,26 +58,11 @@
*/
#define CTL_MAX_TARGID 15
/*
* Maximum number of LUNs we support at the moment. MUST be a power of 2.
*/
#define CTL_MAX_LUNS 1024
/*
* Maximum number of initiators per port.
*/
#define CTL_MAX_INIT_PER_PORT 2048
/*
* Maximum number of ports registered at one time.
*/
#define CTL_MAX_PORTS 256
/*
* Maximum number of initiators we support.
*/
#define CTL_MAX_INITIATORS (CTL_MAX_INIT_PER_PORT * CTL_MAX_PORTS)
/* Hopefully this won't conflict with new misc devices that pop up */
#define CTL_MINOR 225
@ -150,7 +135,7 @@ struct ctl_lun_io_stats {
uint64_t lun_number;
uint32_t blocksize;
ctl_lun_stats_flags flags;
struct ctl_lun_io_port_stats ports[CTL_MAX_PORTS];
struct ctl_lun_io_port_stats *ports;
};
struct ctl_stats {

View File

@ -390,8 +390,8 @@ struct ctl_lun {
TAILQ_HEAD(ctl_ooaq, ctl_io_hdr) ooa_queue;
TAILQ_HEAD(ctl_blockq,ctl_io_hdr) blocked_queue;
STAILQ_ENTRY(ctl_lun) links;
struct scsi_sense_data *pending_sense[CTL_MAX_PORTS];
ctl_ua_type *pending_ua[CTL_MAX_PORTS];
struct scsi_sense_data **pending_sense;
ctl_ua_type **pending_ua;
uint8_t ua_tpt_info[8];
time_t lasttpt;
uint8_t ie_asc; /* Informational exceptions */
@ -407,7 +407,7 @@ struct ctl_lun {
struct ctl_io_stats stats;
uint32_t res_idx;
uint32_t pr_generation;
uint64_t *pr_keys[CTL_MAX_PORTS];
uint64_t **pr_keys;
int pr_key_count;
uint32_t pr_res_idx;
uint8_t pr_res_type;
@ -453,16 +453,16 @@ struct ctl_softc {
struct sysctl_oid *sysctl_tree;
void *othersc_pool;
struct proc *ctl_proc;
uint32_t ctl_lun_mask[(CTL_MAX_LUNS + 31) / 32];
struct ctl_lun *ctl_luns[CTL_MAX_LUNS];
uint32_t ctl_port_mask[(CTL_MAX_PORTS + 31) / 32];
uint32_t *ctl_lun_mask;
struct ctl_lun **ctl_luns;
uint32_t *ctl_port_mask;
STAILQ_HEAD(, ctl_lun) lun_list;
STAILQ_HEAD(, ctl_be_lun) pending_lun_queue;
uint32_t num_frontends;
STAILQ_HEAD(, ctl_frontend) fe_list;
uint32_t num_ports;
STAILQ_HEAD(, ctl_port) port_list;
struct ctl_port *ctl_ports[CTL_MAX_PORTS];
struct ctl_port **ctl_ports;
uint32_t num_backends;
STAILQ_HEAD(, ctl_backend_driver) be_list;
struct uma_zone *io_zone;

View File

@ -44,6 +44,7 @@
#include <string.h>
#endif
#include <sys/dsl_dir.h>
#include <sys/param.h>
#include <sys/nvpair.h>
#include "zfs_namecheck.h"
@ -301,8 +302,14 @@ pool_namecheck(const char *pool, namecheck_err_t *why, char *what)
/*
* Make sure the name is not too long.
* If we're creating a pool with version >= SPA_VERSION_DSL_SCRUB (v11)
* we need to account for additional space needed by the origin ds which
* will also be snapshotted: "poolname"+"/"+"$ORIGIN"+"@"+"$ORIGIN".
* Play it safe and enforce this limit even if the pool version is < 11
* so it can be upgraded without issues.
*/
if (strlen(pool) >= ZFS_MAX_DATASET_NAME_LEN) {
if (strlen(pool) >= (ZFS_MAX_DATASET_NAME_LEN - 2 -
strlen(ORIGIN_DIR_NAME) * 2)) {
if (why)
*why = NAME_ERR_TOOLONG;
return (-1);

View File

@ -2373,6 +2373,10 @@ dbuf_prefetch_indirect_done(zio_t *zio, arc_buf_t *abuf, void *private)
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
iter_aflags |= ARC_FLAG_L2CACHE;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
@ -2482,6 +2486,10 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
dpa->dpa_epbs = epbs;
dpa->dpa_zio = pio;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
/*
* If we have the indirect just above us, no need to do the asynchronous
* prefetch chain; we'll just run the last step ourselves. If we're at
@ -2497,6 +2505,10 @@ dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
iter_aflags |= ARC_FLAG_L2CACHE;
SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, curlevel, curblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,

View File

@ -487,7 +487,6 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg)
int err;
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
objset_t *os;
dsl_dataset_t *ds;
uint64_t obj;
@ -538,12 +537,15 @@ dsl_pool_create(spa_t *spa, nvlist_t *zplprops, uint64_t txg)
/* create the root objset */
VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
os = dmu_objset_create_impl(dp->dp_spa, ds,
dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
#ifdef _KERNEL
zfs_create_fs(os, kcred, zplprops, tx);
{
objset_t *os;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
os = dmu_objset_create_impl(dp->dp_spa, ds,
dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
zfs_create_fs(os, kcred, zplprops, tx);
}
#endif
dsl_dataset_rele(ds, FTAG);

View File

@ -347,6 +347,12 @@ boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
(dbuf_is_metadata(_db) && \
((_db)->db_objset->os_secondary_cache == ZFS_CACHE_METADATA)))
#define DNODE_LEVEL_IS_L2CACHEABLE(_dn, _level) \
((_dn)->dn_objset->os_secondary_cache == ZFS_CACHE_ALL || \
(((_level) > 0 || \
DMU_OT_IS_METADATA((_dn)->dn_handle->dnh_dnode->dn_type)) && \
((_dn)->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA)))
#ifdef ZFS_DEBUG
/*

View File

@ -5827,10 +5827,6 @@ zfs_ioc_send_space(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
dsl_dataset_t *tosnap;
int error;
char *fromname;
/* LINTED E_FUNC_SET_NOT_USED */
boolean_t largeblockok;
/* LINTED E_FUNC_SET_NOT_USED */
boolean_t embedok;
boolean_t compressok;
uint64_t space;
@ -5844,8 +5840,6 @@ zfs_ioc_send_space(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
return (error);
}
largeblockok = nvlist_exists(innvl, "largeblockok");
embedok = nvlist_exists(innvl, "embedok");
compressok = nvlist_exists(innvl, "compressok");
error = nvlist_lookup_string(innvl, "from", &fromname);
@ -5886,7 +5880,9 @@ zfs_ioc_send_space(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
goto out;
}
} else {
// If estimating the size of a full send, use dmu_send_estimate
/*
* If estimating the size of a full send, use dmu_send_estimate.
*/
error = dmu_send_estimate(tosnap, NULL, compressok, &space);
}

View File

@ -63,10 +63,10 @@ cloudabi32_copyout_strings(struct image_params *imgp)
int
cloudabi32_fixup(register_t **stack_base, struct image_params *imgp)
{
char canarybuf[64];
char canarybuf[64], pidbuf[16];
Elf32_Auxargs *args;
struct thread *td;
void *argdata, *canary;
void *argdata, *canary, *pid;
size_t argdatalen;
int error;
@ -79,8 +79,9 @@ cloudabi32_fixup(register_t **stack_base, struct image_params *imgp)
td = curthread;
td->td_proc->p_osrel = __FreeBSD_version;
/* Store canary for stack smashing protection. */
argdata = *stack_base;
/* Store canary for stack smashing protection. */
arc4rand(canarybuf, sizeof(canarybuf), 0);
*stack_base -= howmany(sizeof(canarybuf), sizeof(register_t));
canary = *stack_base;
@ -88,6 +89,20 @@ cloudabi32_fixup(register_t **stack_base, struct image_params *imgp)
if (error != 0)
return (error);
/*
* Generate a random UUID that identifies the process. Right now
* we don't store this UUID in the kernel. Ideally, it should be
* exposed through ps(1).
*/
arc4rand(pidbuf, sizeof(pidbuf), 0);
pidbuf[6] = (pidbuf[6] & 0x0f) | 0x40;
pidbuf[8] = (pidbuf[8] & 0x3f) | 0x80;
*stack_base -= howmany(sizeof(pidbuf), sizeof(register_t));
pid = *stack_base;
error = copyout(pidbuf, pid, sizeof(pidbuf));
if (error != 0)
return (error);
/*
* Compute length of program arguments. As the argument data is
* binary safe, we had to add a trailing null byte in
@ -111,9 +126,10 @@ cloudabi32_fixup(register_t **stack_base, struct image_params *imgp)
VAL(CLOUDABI_AT_PAGESZ, args->pagesz),
PTR(CLOUDABI_AT_PHDR, args->phdr),
VAL(CLOUDABI_AT_PHNUM, args->phnum),
VAL(CLOUDABI_AT_TID, td->td_tid),
PTR(CLOUDABI_AT_PID, pid),
PTR(CLOUDABI_AT_SYSINFO_EHDR,
imgp->proc->p_sysent->sv_shared_page_base),
VAL(CLOUDABI_AT_TID, td->td_tid),
#undef VAL
#undef PTR
{ .a_type = CLOUDABI_AT_NULL },

View File

@ -78,7 +78,7 @@ convert_signal(int sig)
struct cloudabi32_kevent_args {
const cloudabi32_subscription_t *in;
cloudabi32_event_t *out;
cloudabi_event_t *out;
};
/* Converts CloudABI's subscription objects to FreeBSD's struct kevent. */
@ -145,7 +145,7 @@ cloudabi32_kevent_copyin(void *arg, struct kevent *kevp, int count)
static int
cloudabi32_kevent_copyout(void *arg, struct kevent *kevp, int count)
{
cloudabi32_event_t ev;
cloudabi_event_t ev;
struct cloudabi32_kevent_args *args;
int error;
@ -157,19 +157,15 @@ cloudabi32_kevent_copyout(void *arg, struct kevent *kevp, int count)
switch (kevp->filter) {
case EVFILT_TIMER:
ev.type = CLOUDABI_EVENTTYPE_CLOCK;
ev.clock.identifier = kevp->ident;
break;
case EVFILT_READ:
ev.type = CLOUDABI_EVENTTYPE_FD_READ;
ev.fd_readwrite.fd = kevp->ident;
break;
case EVFILT_WRITE:
ev.type = CLOUDABI_EVENTTYPE_FD_WRITE;
ev.fd_readwrite.fd = kevp->ident;
break;
case EVFILT_PROCDESC:
ev.type = CLOUDABI_EVENTTYPE_PROC_TERMINATE;
ev.proc_terminate.fd = kevp->ident;
break;
}
@ -231,7 +227,7 @@ cloudabi32_sys_poll(struct thread *td, struct cloudabi32_sys_poll_args *uap)
*/
if (uap->nsubscriptions == 1) {
cloudabi32_subscription_t sub;
cloudabi32_event_t ev = {};
cloudabi_event_t ev = {};
int error;
error = copyin(uap->in, &sub, sizeof(sub));
@ -241,7 +237,6 @@ cloudabi32_sys_poll(struct thread *td, struct cloudabi32_sys_poll_args *uap)
ev.type = sub.type;
if (sub.type == CLOUDABI_EVENTTYPE_CONDVAR) {
/* Wait on a condition variable. */
ev.condvar.condvar = sub.condvar.condvar;
ev.error = cloudabi_convert_errno(
cloudabi_futex_condvar_wait(
td, TO_PTR(sub.condvar.condvar),
@ -253,7 +248,6 @@ cloudabi32_sys_poll(struct thread *td, struct cloudabi32_sys_poll_args *uap)
return (copyout(&ev, uap->out, sizeof(ev)));
} else if (sub.type == CLOUDABI_EVENTTYPE_LOCK_RDLOCK) {
/* Acquire a read lock. */
ev.lock.lock = sub.lock.lock;
ev.error = cloudabi_convert_errno(
cloudabi_futex_lock_rdlock(
td, TO_PTR(sub.lock.lock),
@ -263,7 +257,6 @@ cloudabi32_sys_poll(struct thread *td, struct cloudabi32_sys_poll_args *uap)
return (copyout(&ev, uap->out, sizeof(ev)));
} else if (sub.type == CLOUDABI_EVENTTYPE_LOCK_WRLOCK) {
/* Acquire a write lock. */
ev.lock.lock = sub.lock.lock;
ev.error = cloudabi_convert_errno(
cloudabi_futex_lock_wrlock(
td, TO_PTR(sub.lock.lock),
@ -274,7 +267,7 @@ cloudabi32_sys_poll(struct thread *td, struct cloudabi32_sys_poll_args *uap)
}
} else if (uap->nsubscriptions == 2) {
cloudabi32_subscription_t sub[2];
cloudabi32_event_t ev[2] = {};
cloudabi_event_t ev[2] = {};
int error;
error = copyin(uap->in, &sub, sizeof(sub));
@ -288,8 +281,6 @@ cloudabi32_sys_poll(struct thread *td, struct cloudabi32_sys_poll_args *uap)
sub[1].type == CLOUDABI_EVENTTYPE_CLOCK &&
sub[1].clock.flags == CLOUDABI_SUBSCRIPTION_CLOCK_ABSTIME) {
/* Wait for a condition variable with timeout. */
ev[0].condvar.condvar = sub[0].condvar.condvar;
ev[1].clock.identifier = sub[1].clock.identifier;
error = cloudabi_futex_condvar_wait(
td, TO_PTR(sub[0].condvar.condvar),
sub[0].condvar.condvar_scope,
@ -309,8 +300,6 @@ cloudabi32_sys_poll(struct thread *td, struct cloudabi32_sys_poll_args *uap)
sub[1].type == CLOUDABI_EVENTTYPE_CLOCK &&
sub[1].clock.flags == CLOUDABI_SUBSCRIPTION_CLOCK_ABSTIME) {
/* Acquire a read lock with a timeout. */
ev[0].lock.lock = sub[0].lock.lock;
ev[1].clock.identifier = sub[1].clock.identifier;
error = cloudabi_futex_lock_rdlock(
td, TO_PTR(sub[0].lock.lock),
sub[0].lock.lock_scope, sub[1].clock.clock_id,
@ -328,8 +317,6 @@ cloudabi32_sys_poll(struct thread *td, struct cloudabi32_sys_poll_args *uap)
sub[1].type == CLOUDABI_EVENTTYPE_CLOCK &&
sub[1].clock.flags == CLOUDABI_SUBSCRIPTION_CLOCK_ABSTIME) {
/* Acquire a write lock with a timeout. */
ev[0].lock.lock = sub[0].lock.lock;
ev[1].clock.identifier = sub[1].clock.identifier;
error = cloudabi_futex_lock_wrlock(
td, TO_PTR(sub[0].lock.lock),
sub[0].lock.lock_scope, sub[1].clock.clock_id,

View File

@ -224,7 +224,7 @@ struct cloudabi_sys_mem_unmap_args {
};
struct cloudabi32_sys_poll_args {
char in_l_[PADL_(const cloudabi32_subscription_t *)]; const cloudabi32_subscription_t * in; char in_r_[PADR_(const cloudabi32_subscription_t *)];
char out_l_[PADL_(cloudabi32_event_t *)]; cloudabi32_event_t * out; char out_r_[PADR_(cloudabi32_event_t *)];
char out_l_[PADL_(cloudabi_event_t *)]; cloudabi_event_t * out; char out_r_[PADR_(cloudabi_event_t *)];
char nsubscriptions_l_[PADL_(size_t)]; size_t nsubscriptions; char nsubscriptions_r_[PADR_(size_t)];
};
struct cloudabi_sys_proc_exec_args {

View File

@ -352,7 +352,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 37: {
struct cloudabi32_sys_poll_args *p = params;
uarg[0] = (intptr_t) p->in; /* const cloudabi32_subscription_t * */
uarg[1] = (intptr_t) p->out; /* cloudabi32_event_t * */
uarg[1] = (intptr_t) p->out; /* cloudabi_event_t * */
uarg[2] = p->nsubscriptions; /* size_t */
*n_args = 3;
break;
@ -1062,7 +1062,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "userland const cloudabi32_subscription_t *";
break;
case 1:
p = "userland cloudabi32_event_t *";
p = "userland cloudabi_event_t *";
break;
case 2:
p = "size_t";

View File

@ -63,10 +63,10 @@ cloudabi64_copyout_strings(struct image_params *imgp)
int
cloudabi64_fixup(register_t **stack_base, struct image_params *imgp)
{
char canarybuf[64];
char canarybuf[64], pidbuf[16];
Elf64_Auxargs *args;
struct thread *td;
void *argdata, *canary;
void *argdata, *canary, *pid;
size_t argdatalen;
int error;
@ -79,8 +79,9 @@ cloudabi64_fixup(register_t **stack_base, struct image_params *imgp)
td = curthread;
td->td_proc->p_osrel = __FreeBSD_version;
/* Store canary for stack smashing protection. */
argdata = *stack_base;
/* Store canary for stack smashing protection. */
arc4rand(canarybuf, sizeof(canarybuf), 0);
*stack_base -= howmany(sizeof(canarybuf), sizeof(register_t));
canary = *stack_base;
@ -88,6 +89,20 @@ cloudabi64_fixup(register_t **stack_base, struct image_params *imgp)
if (error != 0)
return (error);
/*
* Generate a random UUID that identifies the process. Right now
* we don't store this UUID in the kernel. Ideally, it should be
* exposed through ps(1).
*/
arc4rand(pidbuf, sizeof(pidbuf), 0);
pidbuf[6] = (pidbuf[6] & 0x0f) | 0x40;
pidbuf[8] = (pidbuf[8] & 0x3f) | 0x80;
*stack_base -= howmany(sizeof(pidbuf), sizeof(register_t));
pid = *stack_base;
error = copyout(pidbuf, pid, sizeof(pidbuf));
if (error != 0)
return (error);
/*
* Compute length of program arguments. As the argument data is
* binary safe, we had to add a trailing null byte in
@ -111,9 +126,10 @@ cloudabi64_fixup(register_t **stack_base, struct image_params *imgp)
VAL(CLOUDABI_AT_PAGESZ, args->pagesz),
PTR(CLOUDABI_AT_PHDR, args->phdr),
VAL(CLOUDABI_AT_PHNUM, args->phnum),
VAL(CLOUDABI_AT_TID, td->td_tid),
PTR(CLOUDABI_AT_PID, pid),
PTR(CLOUDABI_AT_SYSINFO_EHDR,
imgp->proc->p_sysent->sv_shared_page_base),
VAL(CLOUDABI_AT_TID, td->td_tid),
#undef VAL
#undef PTR
{ .a_type = CLOUDABI_AT_NULL },

View File

@ -78,7 +78,7 @@ convert_signal(int sig)
struct cloudabi64_kevent_args {
const cloudabi64_subscription_t *in;
cloudabi64_event_t *out;
cloudabi_event_t *out;
};
/* Converts CloudABI's subscription objects to FreeBSD's struct kevent. */
@ -145,7 +145,7 @@ cloudabi64_kevent_copyin(void *arg, struct kevent *kevp, int count)
static int
cloudabi64_kevent_copyout(void *arg, struct kevent *kevp, int count)
{
cloudabi64_event_t ev;
cloudabi_event_t ev;
struct cloudabi64_kevent_args *args;
int error;
@ -157,19 +157,15 @@ cloudabi64_kevent_copyout(void *arg, struct kevent *kevp, int count)
switch (kevp->filter) {
case EVFILT_TIMER:
ev.type = CLOUDABI_EVENTTYPE_CLOCK;
ev.clock.identifier = kevp->ident;
break;
case EVFILT_READ:
ev.type = CLOUDABI_EVENTTYPE_FD_READ;
ev.fd_readwrite.fd = kevp->ident;
break;
case EVFILT_WRITE:
ev.type = CLOUDABI_EVENTTYPE_FD_WRITE;
ev.fd_readwrite.fd = kevp->ident;
break;
case EVFILT_PROCDESC:
ev.type = CLOUDABI_EVENTTYPE_PROC_TERMINATE;
ev.proc_terminate.fd = kevp->ident;
break;
}
@ -231,7 +227,7 @@ cloudabi64_sys_poll(struct thread *td, struct cloudabi64_sys_poll_args *uap)
*/
if (uap->nsubscriptions == 1) {
cloudabi64_subscription_t sub;
cloudabi64_event_t ev = {};
cloudabi_event_t ev = {};
int error;
error = copyin(uap->in, &sub, sizeof(sub));
@ -241,7 +237,6 @@ cloudabi64_sys_poll(struct thread *td, struct cloudabi64_sys_poll_args *uap)
ev.type = sub.type;
if (sub.type == CLOUDABI_EVENTTYPE_CONDVAR) {
/* Wait on a condition variable. */
ev.condvar.condvar = sub.condvar.condvar;
ev.error = cloudabi_convert_errno(
cloudabi_futex_condvar_wait(
td, TO_PTR(sub.condvar.condvar),
@ -253,7 +248,6 @@ cloudabi64_sys_poll(struct thread *td, struct cloudabi64_sys_poll_args *uap)
return (copyout(&ev, uap->out, sizeof(ev)));
} else if (sub.type == CLOUDABI_EVENTTYPE_LOCK_RDLOCK) {
/* Acquire a read lock. */
ev.lock.lock = sub.lock.lock;
ev.error = cloudabi_convert_errno(
cloudabi_futex_lock_rdlock(
td, TO_PTR(sub.lock.lock),
@ -263,7 +257,6 @@ cloudabi64_sys_poll(struct thread *td, struct cloudabi64_sys_poll_args *uap)
return (copyout(&ev, uap->out, sizeof(ev)));
} else if (sub.type == CLOUDABI_EVENTTYPE_LOCK_WRLOCK) {
/* Acquire a write lock. */
ev.lock.lock = sub.lock.lock;
ev.error = cloudabi_convert_errno(
cloudabi_futex_lock_wrlock(
td, TO_PTR(sub.lock.lock),
@ -274,7 +267,7 @@ cloudabi64_sys_poll(struct thread *td, struct cloudabi64_sys_poll_args *uap)
}
} else if (uap->nsubscriptions == 2) {
cloudabi64_subscription_t sub[2];
cloudabi64_event_t ev[2] = {};
cloudabi_event_t ev[2] = {};
int error;
error = copyin(uap->in, &sub, sizeof(sub));
@ -288,8 +281,6 @@ cloudabi64_sys_poll(struct thread *td, struct cloudabi64_sys_poll_args *uap)
sub[1].type == CLOUDABI_EVENTTYPE_CLOCK &&
sub[1].clock.flags == CLOUDABI_SUBSCRIPTION_CLOCK_ABSTIME) {
/* Wait for a condition variable with timeout. */
ev[0].condvar.condvar = sub[0].condvar.condvar;
ev[1].clock.identifier = sub[1].clock.identifier;
error = cloudabi_futex_condvar_wait(
td, TO_PTR(sub[0].condvar.condvar),
sub[0].condvar.condvar_scope,
@ -309,8 +300,6 @@ cloudabi64_sys_poll(struct thread *td, struct cloudabi64_sys_poll_args *uap)
sub[1].type == CLOUDABI_EVENTTYPE_CLOCK &&
sub[1].clock.flags == CLOUDABI_SUBSCRIPTION_CLOCK_ABSTIME) {
/* Acquire a read lock with a timeout. */
ev[0].lock.lock = sub[0].lock.lock;
ev[1].clock.identifier = sub[1].clock.identifier;
error = cloudabi_futex_lock_rdlock(
td, TO_PTR(sub[0].lock.lock),
sub[0].lock.lock_scope, sub[1].clock.clock_id,
@ -328,8 +317,6 @@ cloudabi64_sys_poll(struct thread *td, struct cloudabi64_sys_poll_args *uap)
sub[1].type == CLOUDABI_EVENTTYPE_CLOCK &&
sub[1].clock.flags == CLOUDABI_SUBSCRIPTION_CLOCK_ABSTIME) {
/* Acquire a write lock with a timeout. */
ev[0].lock.lock = sub[0].lock.lock;
ev[1].clock.identifier = sub[1].clock.identifier;
error = cloudabi_futex_lock_wrlock(
td, TO_PTR(sub[0].lock.lock),
sub[0].lock.lock_scope, sub[1].clock.clock_id,

View File

@ -224,7 +224,7 @@ struct cloudabi_sys_mem_unmap_args {
};
struct cloudabi64_sys_poll_args {
char in_l_[PADL_(const cloudabi64_subscription_t *)]; const cloudabi64_subscription_t * in; char in_r_[PADR_(const cloudabi64_subscription_t *)];
char out_l_[PADL_(cloudabi64_event_t *)]; cloudabi64_event_t * out; char out_r_[PADR_(cloudabi64_event_t *)];
char out_l_[PADL_(cloudabi_event_t *)]; cloudabi_event_t * out; char out_r_[PADR_(cloudabi_event_t *)];
char nsubscriptions_l_[PADL_(size_t)]; size_t nsubscriptions; char nsubscriptions_r_[PADR_(size_t)];
};
struct cloudabi_sys_proc_exec_args {

View File

@ -352,7 +352,7 @@ systrace_args(int sysnum, void *params, uint64_t *uarg, int *n_args)
case 37: {
struct cloudabi64_sys_poll_args *p = params;
uarg[0] = (intptr_t) p->in; /* const cloudabi64_subscription_t * */
uarg[1] = (intptr_t) p->out; /* cloudabi64_event_t * */
uarg[1] = (intptr_t) p->out; /* cloudabi_event_t * */
uarg[2] = p->nsubscriptions; /* size_t */
*n_args = 3;
break;
@ -1062,7 +1062,7 @@ systrace_entry_setargdesc(int sysnum, int ndx, char *desc, size_t descsz)
p = "userland const cloudabi64_subscription_t *";
break;
case 1:
p = "userland cloudabi64_event_t *";
p = "userland cloudabi_event_t *";
break;
case 2:
p = "size_t";

View File

@ -391,10 +391,6 @@ hlist_move_list(struct hlist_head *old, struct hlist_head *new)
old->first = NULL;
}
/**
* list_is_singular - tests whether a list has just one entry.
* @head: the list to test.
*/
static inline int list_is_singular(const struct list_head *head)
{
return !list_empty(head) && (head->next == head->prev);
@ -412,20 +408,6 @@ static inline void __list_cut_position(struct list_head *list,
new_first->prev = head;
}
/**
* list_cut_position - cut a list into two
* @list: a new list to add all removed entries
* @head: a list with entries
* @entry: an entry within head, could be the head itself
* and if so we won't cut the list
*
* This helper moves the initial part of @head, up to and
* including @entry, from @head to @list. You should
* pass on @entry an element you know is on @head. @list
* should be an empty list or a list you do not care about
* losing its data.
*
*/
static inline void list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
@ -440,11 +422,6 @@ static inline void list_cut_position(struct list_head *list,
__list_cut_position(list, head, entry);
}
/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
static inline int list_is_last(const struct list_head *list,
const struct list_head *head)
{

View File

@ -4648,7 +4648,9 @@ dev/mlx5/mlx5_core/mlx5_diagnostics.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_eq.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_flow_table.c optional mlx5 pci \
dev/mlx5/mlx5_core/mlx5_fs_cmd.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fs_tree.c optional mlx5 pci \
compile-with "${OFED_C}"
dev/mlx5/mlx5_core/mlx5_fw.c optional mlx5 pci \
compile-with "${OFED_C}"

View File

@ -24,6 +24,9 @@ _srcconf_included_:
.MAKE.MODE+= curdirOk=yes
.endif
# The kernel build always expects .OBJDIR=.CURDIR.
.OBJDIR: ${.CURDIR}
.if defined(NO_OBJWALK) || ${MK_AUTO_OBJ} == "yes"
NO_OBJWALK= t
NO_MODULES_OBJ= t

View File

@ -52,47 +52,6 @@ _Static_assert(offsetof(cloudabi32_ciovec_t, buf_len) == 4, "Incorrect layout");
_Static_assert(sizeof(cloudabi32_ciovec_t) == 8, "Incorrect layout");
_Static_assert(_Alignof(cloudabi32_ciovec_t) == 4, "Incorrect layout");
typedef struct {
_Alignas(8) cloudabi_userdata_t userdata;
_Alignas(2) cloudabi_errno_t error;
_Alignas(1) cloudabi_eventtype_t type;
union {
struct {
_Alignas(8) cloudabi_userdata_t identifier;
} clock;
struct {
_Alignas(4) uint32_t condvar;
} condvar;
struct {
_Alignas(8) cloudabi_filesize_t nbytes;
_Alignas(4) cloudabi_fd_t fd;
_Alignas(2) cloudabi_eventrwflags_t flags;
} fd_readwrite;
struct {
_Alignas(4) uint32_t lock;
} lock;
struct {
_Alignas(4) cloudabi_fd_t fd;
_Alignas(1) cloudabi_signal_t signal;
_Alignas(4) cloudabi_exitcode_t exitcode;
} proc_terminate;
};
} cloudabi32_event_t;
_Static_assert(offsetof(cloudabi32_event_t, userdata) == 0, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, error) == 8, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, type) == 10, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, clock.identifier) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, condvar.condvar) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, fd_readwrite.nbytes) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, fd_readwrite.fd) == 24, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, fd_readwrite.flags) == 28, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, lock.lock) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, proc_terminate.fd) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, proc_terminate.signal) == 20, "Incorrect layout");
_Static_assert(offsetof(cloudabi32_event_t, proc_terminate.exitcode) == 24, "Incorrect layout");
_Static_assert(sizeof(cloudabi32_event_t) == 32, "Incorrect layout");
_Static_assert(_Alignof(cloudabi32_event_t) == 8, "Incorrect layout");
typedef struct {
_Alignas(4) uint32_t buf;
_Alignas(4) uint32_t buf_len;

View File

@ -52,47 +52,6 @@ _Static_assert(offsetof(cloudabi64_ciovec_t, buf_len) == 8, "Incorrect layout");
_Static_assert(sizeof(cloudabi64_ciovec_t) == 16, "Incorrect layout");
_Static_assert(_Alignof(cloudabi64_ciovec_t) == 8, "Incorrect layout");
typedef struct {
_Alignas(8) cloudabi_userdata_t userdata;
_Alignas(2) cloudabi_errno_t error;
_Alignas(1) cloudabi_eventtype_t type;
union {
struct {
_Alignas(8) cloudabi_userdata_t identifier;
} clock;
struct {
_Alignas(8) uint64_t condvar;
} condvar;
struct {
_Alignas(8) cloudabi_filesize_t nbytes;
_Alignas(4) cloudabi_fd_t fd;
_Alignas(2) cloudabi_eventrwflags_t flags;
} fd_readwrite;
struct {
_Alignas(8) uint64_t lock;
} lock;
struct {
_Alignas(4) cloudabi_fd_t fd;
_Alignas(1) cloudabi_signal_t signal;
_Alignas(4) cloudabi_exitcode_t exitcode;
} proc_terminate;
};
} cloudabi64_event_t;
_Static_assert(offsetof(cloudabi64_event_t, userdata) == 0, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, error) == 8, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, type) == 10, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, clock.identifier) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, condvar.condvar) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, fd_readwrite.nbytes) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, fd_readwrite.fd) == 24, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, fd_readwrite.flags) == 28, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, lock.lock) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, proc_terminate.fd) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, proc_terminate.signal) == 20, "Incorrect layout");
_Static_assert(offsetof(cloudabi64_event_t, proc_terminate.exitcode) == 24, "Incorrect layout");
_Static_assert(sizeof(cloudabi64_event_t) == 32, "Incorrect layout");
_Static_assert(_Alignof(cloudabi64_event_t) == 8, "Incorrect layout");
typedef struct {
_Alignas(8) uint64_t buf;
_Alignas(8) uint64_t buf_len;

View File

@ -56,6 +56,7 @@ typedef uint32_t cloudabi_auxtype_t;
#define CLOUDABI_AT_PAGESZ 6
#define CLOUDABI_AT_PHDR 3
#define CLOUDABI_AT_PHNUM 4
#define CLOUDABI_AT_PID 263
#define CLOUDABI_AT_SYSINFO_EHDR 262
#define CLOUDABI_AT_TID 261
@ -352,6 +353,35 @@ _Static_assert(offsetof(cloudabi_dirent_t, d_type) == 20, "Incorrect layout");
_Static_assert(sizeof(cloudabi_dirent_t) == 24, "Incorrect layout");
_Static_assert(_Alignof(cloudabi_dirent_t) == 8, "Incorrect layout");
typedef struct {
_Alignas(8) cloudabi_userdata_t userdata;
_Alignas(2) cloudabi_errno_t error;
_Alignas(1) cloudabi_eventtype_t type;
union {
struct {
_Alignas(8) cloudabi_filesize_t nbytes;
_Alignas(1) char unused[4];
_Alignas(2) cloudabi_eventrwflags_t flags;
} fd_readwrite;
struct {
_Alignas(1) char unused[4];
_Alignas(1) cloudabi_signal_t signal;
_Alignas(4) cloudabi_exitcode_t exitcode;
} proc_terminate;
};
} cloudabi_event_t;
_Static_assert(offsetof(cloudabi_event_t, userdata) == 0, "Incorrect layout");
_Static_assert(offsetof(cloudabi_event_t, error) == 8, "Incorrect layout");
_Static_assert(offsetof(cloudabi_event_t, type) == 10, "Incorrect layout");
_Static_assert(offsetof(cloudabi_event_t, fd_readwrite.nbytes) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi_event_t, fd_readwrite.unused) == 24, "Incorrect layout");
_Static_assert(offsetof(cloudabi_event_t, fd_readwrite.flags) == 28, "Incorrect layout");
_Static_assert(offsetof(cloudabi_event_t, proc_terminate.unused) == 16, "Incorrect layout");
_Static_assert(offsetof(cloudabi_event_t, proc_terminate.signal) == 20, "Incorrect layout");
_Static_assert(offsetof(cloudabi_event_t, proc_terminate.exitcode) == 24, "Incorrect layout");
_Static_assert(sizeof(cloudabi_event_t) == 32, "Incorrect layout");
_Static_assert(_Alignof(cloudabi_event_t) == 8, "Incorrect layout");
typedef struct {
_Alignas(1) cloudabi_filetype_t fs_filetype;
_Alignas(2) cloudabi_fdflags_t fs_flags;

View File

@ -228,7 +228,7 @@
37 AUE_NULL STD { size_t cloudabi32_sys_poll( \
const cloudabi32_subscription_t *in, \
cloudabi32_event_t *out, \
cloudabi_event_t *out, \
size_t nsubscriptions); }
38 AUE_NULL STD { void cloudabi_sys_proc_exec( \

View File

@ -228,7 +228,7 @@
37 AUE_NULL STD { size_t cloudabi64_sys_poll( \
const cloudabi64_subscription_t *in, \
cloudabi64_event_t *out, \
cloudabi_event_t *out, \
size_t nsubscriptions); }
38 AUE_NULL STD { void cloudabi_sys_proc_exec( \

View File

@ -104,14 +104,11 @@ extern struct ena_bus_space ebs;
#define ENA_IOQ (1 << 7) /* Detailed info about IO queues. */
#define ENA_ADMQ (1 << 8) /* Detailed info about admin queue. */
#ifndef ENA_DEBUG_LEVEL
#define ENA_DEBUG_LEVEL (ENA_ALERT | ENA_WARNING)
#endif
extern int ena_log_level;
#ifdef ENA_TRACE
#define ena_trace_raw(level, fmt, args...) \
do { \
if (((level) & ENA_DEBUG_LEVEL) != (level)) \
if (((level) & ena_log_level) != (level)) \
break; \
printf(fmt, ##args); \
} while (0)
@ -120,10 +117,6 @@ extern struct ena_bus_space ebs;
ena_trace_raw(level, "%s() [TID:%d]: " \
fmt " \n", __func__, curthread->td_tid, ##args)
#else /* ENA_TRACE */
#define ena_trace_raw(...)
#define ena_trace(...)
#endif /* ENA_TRACE */
#define ena_trc_dbg(format, arg...) ena_trace(ENA_DBG, format, ##arg)
#define ena_trc_info(format, arg...) ena_trace(ENA_INFO, format, ##arg)

View File

@ -398,7 +398,7 @@ db_command(struct command **last_cmdp, struct command_table *cmd_table,
case CMD_HELP:
if (cmd_table == &db_cmd_table) {
db_printf("This is ddb(4), the kernel debugger; "
"see http://man.freebsd.org/ddb/4 for help.\n");
"see https://man.FreeBSD.org/ddb/4 for help.\n");
db_printf("Use \"bt\" for backtrace, \"dump\" for "
"kernel core dump, \"reset\" to reboot.\n");
db_printf("Available commands:\n");

View File

@ -73,6 +73,11 @@ static const struct {
{0x78021022, 0x00, "AMD Hudson-2", 0},
{0x78031022, 0x00, "AMD Hudson-2", 0},
{0x78041022, 0x00, "AMD Hudson-2", 0},
{0x79001022, 0x00, "AMD KERNCZ", 0},
{0x79011022, 0x00, "AMD KERNCZ", 0},
{0x79021022, 0x00, "AMD KERNCZ", 0},
{0x79031022, 0x00, "AMD KERNCZ", 0},
{0x79041022, 0x00, "AMD KERNCZ", 0},
{0x06011b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS|AHCI_Q_NOAUX},
{0x06021b21, 0x00, "ASMedia ASM1060", AHCI_Q_NOCCS|AHCI_Q_NOAUX},
{0x06111b21, 0x00, "ASMedia ASM1061", AHCI_Q_NOCCS|AHCI_Q_NOAUX},

View File

@ -78,6 +78,7 @@
** 1.20.00.29 12/18/2013 Ching Huang Change simq allocation number, support ARC1883
** 1.30.00.00 11/30/2015 Ching Huang Added support ARC1203
** 1.40.00.00 07/11/2017 Ching Huang Added support ARC1884
** 1.40.00.01 10/30/2017 Ching Huang Fixed release memory resource
******************************************************************************************
*/
@ -149,7 +150,7 @@ __FBSDID("$FreeBSD$");
#define arcmsr_callout_init(a) callout_init(a);
#endif
#define ARCMSR_DRIVER_VERSION "arcmsr version 1.40.00.00 2017-07-11"
#define ARCMSR_DRIVER_VERSION "arcmsr version 1.40.00.01 2017-10-30"
#include <dev/arcmsr/arcmsr.h>
/*
**************************************************************************
@ -187,7 +188,7 @@ static void arcmsr_polling_devmap(void *arg);
static void arcmsr_srb_timeout(void *arg);
static void arcmsr_hbd_postqueue_isr(struct AdapterControlBlock *acb);
static void arcmsr_hbe_postqueue_isr(struct AdapterControlBlock *acb);
void arcmsr_teardown_intr(device_t dev, struct AdapterControlBlock *acb);
static void arcmsr_teardown_intr(device_t dev, struct AdapterControlBlock *acb);
#ifdef ARCMSR_DEBUG1
static void arcmsr_dump_data(struct AdapterControlBlock *acb);
#endif
@ -910,6 +911,10 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t fl
/* check if command done with no error*/
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A:
case ACB_ADAPTER_TYPE_B:
srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
break;
case ACB_ADAPTER_TYPE_C:
case ACB_ADAPTER_TYPE_D:
srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb & 0xFFFFFFE0)); /*frame must be 32 bytes aligned*/
@ -917,8 +922,6 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, u_int32_t fl
case ACB_ADAPTER_TYPE_E:
srb = acb->psrb_pool[flag_srb];
break;
case ACB_ADAPTER_TYPE_A:
case ACB_ADAPTER_TYPE_B:
default:
srb = (struct CommandControlBlock *)(acb->vir2phy_offset+(flag_srb << 5));/*frame must be 32 bytes aligned*/
break;
@ -1585,8 +1588,7 @@ static u_int32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
u_int8_t *iop_data;
u_int32_t iop_len;
if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D) ||
(acb->adapter_type == ACB_ADAPTER_TYPE_E)) {
if(acb->adapter_type >= ACB_ADAPTER_TYPE_B) {
return(arcmsr_Read_iop_rqbuffer_data_D(acb, prbuffer));
}
iop_data = (u_int8_t *)prbuffer->data;
@ -1681,8 +1683,7 @@ static void arcmsr_Write_data_2iop_wqbuffer(struct AdapterControlBlock *acb)
u_int8_t *iop_data;
int32_t allxfer_len=0;
if((acb->adapter_type == ACB_ADAPTER_TYPE_C) || (acb->adapter_type == ACB_ADAPTER_TYPE_D) ||
(acb->adapter_type == ACB_ADAPTER_TYPE_E)) {
if(acb->adapter_type >= ACB_ADAPTER_TYPE_B) {
arcmsr_Write_data_2iop_wqbuffer_D(acb);
return;
}
@ -2495,7 +2496,7 @@ static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
**
************************************************************************
*/
u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
static u_int32_t arcmsr_iop_ioctlcmd(struct AdapterControlBlock *acb, u_int32_t ioctl_cmd, caddr_t arg)
{
struct CMD_MESSAGE_FIELD *pcmdmessagefld;
u_int32_t retvalue = EINVAL;
@ -2683,7 +2684,7 @@ static void arcmsr_free_srb(struct CommandControlBlock *srb)
**************************************************************************
**************************************************************************
*/
struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb)
static struct CommandControlBlock *arcmsr_get_freesrb(struct AdapterControlBlock *acb)
{
struct CommandControlBlock *srb = NULL;
u_int32_t workingsrb_startindex, workingsrb_doneindex;
@ -4115,12 +4116,11 @@ static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
outbound_doorbell = CHIP_REG_READ32(HBA_MessageUnit, 0, outbound_doorbell);
CHIP_REG_WRITE32(HBA_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */
CHIP_REG_WRITE32(HBA_MessageUnit, 0, inbound_doorbell, ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
}
break;
case ACB_ADAPTER_TYPE_B: {
struct HBB_MessageUnit *phbbmu = (struct HBB_MessageUnit *)acb->pmu;
WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_MESSAGE_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
WRITE_CHIP_REG32(0, phbbmu->iop2drv_doorbell, ARCMSR_DOORBELL_INT_CLEAR_PATTERN);/*clear interrupt and message state*/
WRITE_CHIP_REG32(0, phbbmu->drv2iop_doorbell, ARCMSR_DRV2IOP_DATA_READ_OK);
/* let IOP know data has been read */
}
@ -4139,7 +4139,6 @@ static void arcmsr_clear_doorbell_queue_buffer( struct AdapterControlBlock *acb)
outbound_doorbell = CHIP_REG_READ32(HBD_MessageUnit, 0, outbound_doorbell);
CHIP_REG_WRITE32(HBD_MessageUnit, 0, outbound_doorbell, outbound_doorbell); /*clear doorbell interrupt */
CHIP_REG_WRITE32(HBD_MessageUnit, 0, inbound_doorbell, ARCMSR_HBDMU_DRV2IOP_DATA_OUT_READ);
}
break;
case ACB_ADAPTER_TYPE_E: {
@ -4353,7 +4352,8 @@ static void arcmsr_map_free_srb(void *arg, bus_dma_segment_t *segs, int nseg, in
srb_phyaddr = srb_phyaddr + SRB_SIZE;
srb_tmp = (struct CommandControlBlock *)((unsigned long)srb_tmp + SRB_SIZE);
}
acb->pCompletionQ = (pCompletion_Q)srb_tmp;
if (acb->adapter_type == ACB_ADAPTER_TYPE_E)
acb->pCompletionQ = (pCompletion_Q)srb_tmp;
acb->vir2phy_offset = (unsigned long)srb_tmp - (unsigned long)srb_phyaddr;
}
/*
@ -4414,7 +4414,9 @@ static u_int32_t arcmsr_initialize(device_t dev)
case PCIDevVenIDARC1213:
case PCIDevVenIDARC1223: {
acb->adapter_type = ACB_ADAPTER_TYPE_C;
if (acb->sub_device_id == ARECA_SUB_DEV_ID_1883)
if ((acb->sub_device_id == ARECA_SUB_DEV_ID_1883) ||
(acb->sub_device_id == ARECA_SUB_DEV_ID_1216) ||
(acb->sub_device_id == ARECA_SUB_DEV_ID_1226))
acb->adapter_bus_speed = ACB_BUS_SPEED_12G;
else
acb->adapter_bus_speed = ACB_BUS_SPEED_6G;
@ -4598,7 +4600,7 @@ static u_int32_t arcmsr_initialize(device_t dev)
acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
acb->pmu = (struct MessageUnit_UNION *)mem_base0;
acb->rid = 0;
acb->rid[0] = rid0;
}
break;
case ACB_ADAPTER_TYPE_B: {
@ -4606,19 +4608,8 @@ static u_int32_t arcmsr_initialize(device_t dev)
struct CommandControlBlock *freesrb;
u_int32_t rid[]={ PCIR_BAR(0), PCIR_BAR(2) };
vm_offset_t mem_base[]={0,0};
u_long size;
if (vendor_dev_id == PCIDevVenIDARC1203)
size = sizeof(struct HBB_DOORBELL_1203);
else
size = sizeof(struct HBB_DOORBELL);
for(i=0; i < 2; i++) {
if(i == 0) {
acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev,SYS_RES_MEMORY, &rid[i],
RF_ACTIVE);
} else {
acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid[i],
RF_ACTIVE);
}
acb->sys_res_arcmsr[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid[i], RF_ACTIVE);
if(acb->sys_res_arcmsr[i] == NULL) {
arcmsr_free_resource(acb);
printf("arcmsr%d: bus_alloc_resource %d failure!\n", device_get_unit(dev), i);
@ -4654,7 +4645,8 @@ static u_int32_t arcmsr_initialize(device_t dev)
phbbmu->iop2drv_doorbell = offsetof(struct HBB_DOORBELL, iop2drv_doorbell);
phbbmu->iop2drv_doorbell_mask = offsetof(struct HBB_DOORBELL, iop2drv_doorbell_mask);
}
acb->rid = 0;
acb->rid[0] = rid[0];
acb->rid[1] = rid[1];
}
break;
case ACB_ADAPTER_TYPE_C: {
@ -4681,7 +4673,7 @@ static u_int32_t arcmsr_initialize(device_t dev)
acb->btag[0] = rman_get_bustag(acb->sys_res_arcmsr[0]);
acb->bhandle[0] = rman_get_bushandle(acb->sys_res_arcmsr[0]);
acb->pmu = (struct MessageUnit_UNION *)mem_base0;
acb->rid = 1;
acb->rid[0] = rid0;
}
break;
case ACB_ADAPTER_TYPE_D: {
@ -4711,14 +4703,14 @@ static u_int32_t arcmsr_initialize(device_t dev)
acb->pmu = (struct MessageUnit_UNION *)((unsigned long)acb->uncacheptr+ARCMSR_SRBS_POOL_SIZE);
phbdmu = (struct HBD_MessageUnit0 *)acb->pmu;
phbdmu->phbdmu = (struct HBD_MessageUnit *)mem_base0;
acb->rid = 0;
acb->rid[0] = rid0;
}
break;
case ACB_ADAPTER_TYPE_E: {
u_int32_t rid0 = PCIR_BAR(1);
vm_offset_t mem_base0;
acb->sys_res_arcmsr[0] = bus_alloc_resource(dev,SYS_RES_MEMORY, &rid0, 0ul, ~0ul, sizeof(struct HBE_MessageUnit), RF_ACTIVE);
acb->sys_res_arcmsr[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid0, RF_ACTIVE);
if(acb->sys_res_arcmsr[0] == NULL) {
arcmsr_free_resource(acb);
printf("arcmsr%d: bus_alloc_resource failure!\n", device_get_unit(dev));
@ -4741,7 +4733,7 @@ static u_int32_t arcmsr_initialize(device_t dev)
acb->doneq_index = 0;
acb->in_doorbell = 0;
acb->out_doorbell = 0;
acb->rid = 1;
acb->rid[0] = rid0;
CHIP_REG_WRITE32(HBE_MessageUnit, 0, host_int_status, 0); /*clear interrupt*/
CHIP_REG_WRITE32(HBE_MessageUnit, 0, iobound_doorbell, ARCMSR_HBEMU_DOORBELL_SYNC); /* synchronize doorbell to 0 */
}
@ -4773,7 +4765,7 @@ static int arcmsr_setup_msix(struct AdapterControlBlock *acb)
int i;
for (i = 0; i < acb->msix_vectors; i++) {
acb->irq_id[i] = acb->rid + i;
acb->irq_id[i] = 1 + i;
acb->irqres[i] = bus_alloc_resource_any(acb->pci_dev,
SYS_RES_IRQ, &acb->irq_id[i], RF_ACTIVE);
if (acb->irqres[i] == NULL) {
@ -4825,7 +4817,7 @@ static int arcmsr_attach(device_t dev)
if (arcmsr_setup_msix(acb) == TRUE)
goto irqx;
}
acb->irq_id[0] = acb->rid;
acb->irq_id[0] = 0;
irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &acb->irq_id[0], RF_SHAREABLE | RF_ACTIVE);
if(irqres == NULL ||
#if __FreeBSD_version >= 700025
@ -4957,7 +4949,9 @@ static int arcmsr_probe(device_t dev)
case PCIDevVenIDARC1882:
case PCIDevVenIDARC1213:
case PCIDevVenIDARC1223:
if (sub_device_id == ARECA_SUB_DEV_ID_1883)
if ((sub_device_id == ARECA_SUB_DEV_ID_1883) ||
(sub_device_id == ARECA_SUB_DEV_ID_1216) ||
(sub_device_id == ARECA_SUB_DEV_ID_1226))
type = "SAS 12G";
else
type = "SAS 6G";
@ -5027,7 +5021,7 @@ static int arcmsr_shutdown(device_t dev)
************************************************************************
************************************************************************
*/
void arcmsr_teardown_intr(device_t dev, struct AdapterControlBlock *acb)
static void arcmsr_teardown_intr(device_t dev, struct AdapterControlBlock *acb)
{
int i;
@ -5066,7 +5060,7 @@ static int arcmsr_detach(device_t dev)
arcmsr_shutdown(dev);
arcmsr_free_resource(acb);
for(i=0; (acb->sys_res_arcmsr[i]!=NULL) && (i<2); i++) {
bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(i), acb->sys_res_arcmsr[i]);
bus_release_resource(dev, SYS_RES_MEMORY, acb->rid[i], acb->sys_res_arcmsr[i]);
}
ARCMSR_LOCK_ACQUIRE(&acb->isr_lock);
xpt_async(AC_LOST_DEVICE, acb->ppath, NULL);
@ -5089,4 +5083,3 @@ static void arcmsr_dump_data(struct AdapterControlBlock *acb)
printf("Queued Command Count =0x%x\n",acb->srboutstandingcount);
}
#endif

View File

@ -125,8 +125,10 @@
#define ARECA_SUB_DEV_ID_1884 0x1884 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1212 0x1212 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1213 0x1213 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1216 0x1216 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1222 0x1222 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1223 0x1223 /* Subsystem Device ID */
#define ARECA_SUB_DEV_ID_1226 0x1226 /* Subsystem Device ID */
#define PCIDevVenIDARC1110 0x111017D3 /* Vendor Device ID */
#define PCIDevVenIDARC1120 0x112017D3 /* Vendor Device ID */
@ -1326,7 +1328,7 @@ struct AdapterControlBlock {
u_int32_t completionQ_entry;
pCompletion_Q pCompletionQ;
int msix_vectors;
int rid;
int rid[2];
};/* HW_DEVICE_EXTENSION */
/* acb_flags */
#define ACB_F_SCSISTOPADAPTER 0x0001

View File

@ -501,9 +501,11 @@ bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
if (!(errors & RX_PKT_CMPL_ERRORS_IP_CS_ERROR))
ri->iri_csum_flags |= CSUM_IP_VALID;
}
if (flags2 & RX_PKT_CMPL_FLAGS2_L4_CS_CALC) {
if (flags2 & (RX_PKT_CMPL_FLAGS2_L4_CS_CALC |
RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC)) {
ri->iri_csum_flags |= CSUM_L4_CALC;
if (!(errors & RX_PKT_CMPL_ERRORS_L4_CS_ERROR)) {
if (!(errors & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR |
RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR))) {
ri->iri_csum_flags |= CSUM_L4_VALID;
ri->iri_csum_data = 0xffff;
}

View File

@ -3682,7 +3682,7 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
{
struct fw_port_cmd c;
unsigned int mdi = V_FW_PORT_CAP_MDI(FW_PORT_CAP_MDI_AUTO);
unsigned int aneg, fc, fec, speed;
unsigned int aneg, fc, fec, speed, rcap;
fc = 0;
if (lc->requested_fc & PAUSE_RX)
@ -3727,6 +3727,13 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
V_FW_PORT_CAP_SPEED(M_FW_PORT_CAP_SPEED);
}
rcap = aneg | speed | fc | fec | mdi;
if ((rcap | lc->supported) != lc->supported) {
CH_WARN(adap, "rcap 0x%08x, pcap 0x%08x\n", rcap,
lc->supported);
rcap &= lc->supported;
}
memset(&c, 0, sizeof(c));
c.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_EXEC |
@ -3734,7 +3741,7 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
c.action_to_len16 =
cpu_to_be32(V_FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
FW_LEN16(c));
c.u.l1cfg.rcap = cpu_to_be32(aneg | speed | fc | fec | mdi);
c.u.l1cfg.rcap = cpu_to_be32(rcap);
return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
}

File diff suppressed because it is too large Load Diff

View File

@ -58,7 +58,8 @@
#define ENA_DMA_BIT_MASK(x) ((1ULL << (x)) - 1ULL)
/* 1 for AENQ + ADMIN */
#define ENA_MAX_MSIX_VEC(io_queues) (1 + (io_queues))
#define ENA_ADMIN_MSIX_VEC 1
#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
#define ENA_REG_BAR 0
#define ENA_MEM_BAR 2
@ -66,29 +67,20 @@
#define ENA_BUS_DMA_SEGS 32
#define ENA_DEFAULT_RING_SIZE 1024
#define ENA_DEFAULT_SMALL_PACKET_LEN 128
#define ENA_DEFAULT_MAX_RX_BUFF_ALLOC_SIZE 1536
#define ENA_RX_REFILL_THRESH_DEVIDER 8
#define ENA_RX_REFILL_THRESH_DIVIDER 8
#define ENA_MAX_PUSH_PKT_SIZE 128
#define ENA_NAME_MAX_LEN 20
#define ENA_IRQNAME_SIZE 40
#define ENA_PKT_MAX_BUFS 19
#define ENA_STALL_TIMEOUT 100
#define ENA_RX_RSS_TABLE_LOG_SIZE 7
#define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE)
#define ENA_HASH_KEY_SIZE 40
#define ENA_DMA_BITS_MASK 40
#define ENA_MAX_FRAME_LEN 10000
#define ENA_MIN_FRAME_LEN 60
#define ENA_RX_HASH_KEY_NUM 10
#define ENA_RX_THASH_TABLE_SIZE (1 << 8)
#define ENA_TX_CLEANUP_THRESHOLD 128
@ -111,18 +103,15 @@
#define RX_IRQ_INTERVAL 20
#define TX_IRQ_INTERVAL 50
#define ENA_MAX_MTU 9216
#define ENA_MIN_MTU 128
#define ENA_TSO_MAXSIZE 65536
#define ENA_TSO_NSEGS ENA_PKT_MAX_BUFS
#define ENA_RX_OFFSET NET_SKB_PAD + NET_IP_ALIGN
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
#define ENA_RX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
#define ENA_RX_RING_IDX_ADD(idx, n, ring_size) \
(((idx) + (n)) & ((ring_size) - 1))
#define ENA_IO_TXQ_IDX(q) (2 * (q))
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
@ -209,20 +198,11 @@ struct ena_rx_buffer {
struct ena_com_buf ena_buf;
} __aligned(CACHE_LINE_SIZE);
struct ena_stats_tx {
counter_u64_t cnt;
counter_u64_t bytes;
counter_u64_t queue_stop;
counter_u64_t prepare_ctx_err;
counter_u64_t queue_wakeup;
counter_u64_t dma_mapping_err;
/* Not counted */
counter_u64_t unsupported_desc_num;
/* Not counted */
counter_u64_t napi_comp;
/* Not counted */
counter_u64_t tx_poll;
counter_u64_t doorbells;
counter_u64_t missing_tx_comp;
counter_u64_t bad_req_id;
@ -235,32 +215,38 @@ struct ena_stats_rx {
counter_u64_t bytes;
counter_u64_t refil_partial;
counter_u64_t bad_csum;
/* Not counted */
counter_u64_t page_alloc_fail;
counter_u64_t mjum_alloc_fail;
counter_u64_t mbuf_alloc_fail;
counter_u64_t dma_mapping_err;
counter_u64_t bad_desc_num;
/* Not counted */
counter_u64_t small_copy_len_pkt;
counter_u64_t bad_req_id;
counter_u64_t empty_rx_ring;
};
struct ena_ring {
/* Holds the empty requests for TX out of order completions */
uint16_t *free_tx_ids;
/* Holds the empty requests for TX/RX out of order completions */
union {
uint16_t *free_tx_ids;
uint16_t *free_rx_ids;
};
struct ena_com_dev *ena_dev;
struct ena_adapter *adapter;
struct ena_com_io_cq *ena_com_io_cq;
struct ena_com_io_sq *ena_com_io_sq;
/* The maximum length the driver can push to the device (For LLQ) */
enum ena_admin_placement_policy_type tx_mem_queue_type;
uint16_t rx_small_copy_len;
uint16_t qid;
uint16_t mtu;
/* Determines if device will use LLQ or normal mode for TX */
enum ena_admin_placement_policy_type tx_mem_queue_type;
/* The maximum length the driver can push to the device (For LLQ) */
uint8_t tx_max_header_size;
struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS];
/*
* Fields used for Adaptive Interrupt Modulation - to be implemented in
* the future releases
*/
uint32_t smoothed_interval;
enum ena_intr_moder_level moder_tbl_idx;
@ -277,32 +263,33 @@ struct ena_ring {
int ring_size; /* number of tx/rx_buffer_info's entries */
struct buf_ring *br; /* only for TX */
struct mtx ring_mtx;
char mtx_name[16];
struct task enqueue_task;
struct taskqueue *enqueue_tq;
struct task cmpl_task;
struct taskqueue *cmpl_tq;
union {
struct {
struct task enqueue_task;
struct taskqueue *enqueue_tq;
};
struct {
struct task cmpl_task;
struct taskqueue *cmpl_tq;
};
};
union {
struct ena_stats_tx tx_stats;
struct ena_stats_rx rx_stats;
};
int empty_rx_queue;
} __aligned(CACHE_LINE_SIZE);
struct ena_stats_dev {
/* Not counted */
counter_u64_t tx_timeout;
/* Not counted */
counter_u64_t io_suspend;
/* Not counted */
counter_u64_t io_resume;
/* Not counted */
counter_u64_t wd_expired;
counter_u64_t interface_up;
counter_u64_t interface_down;
/* Not counted */
counter_u64_t admin_q_pause;
};
@ -326,8 +313,8 @@ struct ena_adapter {
struct ifmedia media;
/* OS resources */
struct resource * memory;
struct resource * registers;
struct resource *memory;
struct resource *registers;
struct mtx global_mtx;
struct sx ioctl_sx;
@ -341,11 +328,8 @@ struct ena_adapter {
bus_dma_tag_t tx_buf_tag;
bus_dma_tag_t rx_buf_tag;
int dma_width;
/*
* RX packets that shorter that this len will be copied to the skb
* header
*/
unsigned int small_copy_len;
uint32_t max_mtu;
uint16_t max_tx_sgl_size;
uint16_t max_rx_sgl_size;
@ -355,28 +339,21 @@ struct ena_adapter {
/* Tx fast path data */
int num_queues;
unsigned int tx_usecs, rx_usecs; /* Interrupt coalescing */
unsigned int tx_ring_size;
unsigned int rx_ring_size;
/* RSS*/
uint8_t rss_ind_tbl[ENA_RX_RSS_TABLE_SIZE];
uint8_t rss_ind_tbl[ENA_RX_RSS_TABLE_SIZE];
bool rss_support;
uint32_t msg_enable;
uint8_t mac_addr[ETHER_ADDR_LEN];
/* mdio and phy*/
char name[ENA_NAME_MAX_LEN];
bool link_status;
bool trigger_reset;
bool up;
bool running;
uint32_t wol;
/* Queue will represent one TX and one RX ring */
struct ena_que que[ENA_MAX_NUM_IO_QUEUES]
__aligned(CACHE_LINE_SIZE);
@ -410,19 +387,10 @@ struct ena_adapter {
enum ena_regs_reset_reason_types reset_reason;
};
#define ENA_DEV_LOCK mtx_lock(&adapter->global_mtx)
#define ENA_DEV_UNLOCK mtx_unlock(&adapter->global_mtx)
#define ENA_RING_MTX_LOCK(_ring) mtx_lock(&(_ring)->ring_mtx)
#define ENA_RING_MTX_TRYLOCK(_ring) mtx_trylock(&(_ring)->ring_mtx)
#define ENA_RING_MTX_UNLOCK(_ring) mtx_unlock(&(_ring)->ring_mtx)
struct ena_dev *ena_efa_enadev_get(device_t pdev);
int ena_register_adapter(struct ena_adapter *adapter);
void ena_unregister_adapter(struct ena_adapter *adapter);
static inline int ena_mbuf_count(struct mbuf *mbuf)
{
int count = 1;

View File

@ -32,14 +32,53 @@ __FBSDID("$FreeBSD$");
#include "ena_sysctl.h"
static void ena_sysctl_add_wd(struct ena_adapter *);
static void ena_sysctl_add_stats(struct ena_adapter *);
void
ena_sysctl_add_nodes(struct ena_adapter *adapter)
{
ena_sysctl_add_wd(adapter);
ena_sysctl_add_stats(adapter);
}
static void
ena_sysctl_add_wd(struct ena_adapter *adapter)
{
device_t dev;
struct sysctl_ctx_list *ctx;
struct sysctl_oid *tree;
struct sysctl_oid_list *child;
dev = adapter->pdev;
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
child = SYSCTL_CHILDREN(tree);
/* Sysctl calls for Watchdog service */
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "wd_active",
CTLFLAG_RWTUN, &adapter->wd_active, 0,
"Watchdog is active");
SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, "keep_alive_timeout",
CTLFLAG_RWTUN, &adapter->keep_alive_timeout,
"Timeout for Keep Alive messages");
SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, "missing_tx_timeout",
CTLFLAG_RWTUN, &adapter->missing_tx_timeout,
"Timeout for TX completion");
SYSCTL_ADD_U32(ctx, child, OID_AUTO, "missing_tx_max_queues",
CTLFLAG_RWTUN, &adapter->missing_tx_max_queues, 0,
"Number of TX queues to check per run");
SYSCTL_ADD_U32(ctx, child, OID_AUTO, "missing_tx_threshold",
CTLFLAG_RWTUN, &adapter->missing_tx_threshold, 0,
"Max number of timeouted packets");
}
static void
ena_sysctl_add_stats(struct ena_adapter *adapter)
{
@ -80,15 +119,6 @@ ena_sysctl_add_stats(struct ena_adapter *adapter)
dev_stats = &adapter->dev_stats;
admin_stats = &adapter->ena_dev->admin_queue.stats;
SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "tx_timeout",
CTLFLAG_RD, &dev_stats->tx_timeout,
"Driver TX timeouts");
SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "io_suspend",
CTLFLAG_RD, &dev_stats->io_suspend,
"IO queue suspends");
SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "io_resume",
CTLFLAG_RD, &dev_stats->io_resume,
"IO queue resumes");
SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "wd_expired",
CTLFLAG_RD, &dev_stats->wd_expired,
"Watchdog expiry count");
@ -126,22 +156,9 @@ ena_sysctl_add_stats(struct ena_adapter *adapter)
"prepare_ctx_err", CTLFLAG_RD,
&tx_stats->prepare_ctx_err,
"TX buffer preparation failures");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"queue_wakeup", CTLFLAG_RD,
&tx_stats->queue_wakeup, "Queue wakeups");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"dma_mapping_err", CTLFLAG_RD,
&tx_stats->dma_mapping_err, "DMA mapping failures");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"unsupported_desc_num", CTLFLAG_RD,
&tx_stats->unsupported_desc_num,
"Excessive descriptor packet discards");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"napi_comp", CTLFLAG_RD,
&tx_stats->napi_comp, "Napi completions");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"tx_poll", CTLFLAG_RD,
&tx_stats->tx_poll, "TX poll count");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"doorbells", CTLFLAG_RD,
&tx_stats->doorbells, "Queue doorbells");
@ -151,9 +168,6 @@ ena_sysctl_add_stats(struct ena_adapter *adapter)
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"bad_req_id", CTLFLAG_RD,
&tx_stats->bad_req_id, "Bad request id count");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"stops", CTLFLAG_RD,
&tx_stats->queue_stop, "Queue stops");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"mbuf_collapses", CTLFLAG_RD,
&tx_stats->collapse,
@ -182,12 +196,12 @@ ena_sysctl_add_stats(struct ena_adapter *adapter)
SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
"bad_csum", CTLFLAG_RD,
&rx_stats->bad_csum, "Bad RX checksum");
SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
"page_alloc_fail", CTLFLAG_RD,
&rx_stats->page_alloc_fail, "Failed page allocs");
SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
"mbuf_alloc_fail", CTLFLAG_RD,
&rx_stats->mbuf_alloc_fail, "Failed mbuf allocs");
SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
"mjum_alloc_fail", CTLFLAG_RD,
&rx_stats->mjum_alloc_fail, "Failed jumbo mbuf allocs");
SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
"dma_mapping_err", CTLFLAG_RD,
&rx_stats->dma_mapping_err, "DMA mapping errors");
@ -195,8 +209,11 @@ ena_sysctl_add_stats(struct ena_adapter *adapter)
"bad_desc_num", CTLFLAG_RD,
&rx_stats->bad_desc_num, "Bad descriptor count");
SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
"small_copy_len_pkt", CTLFLAG_RD,
&rx_stats->small_copy_len_pkt, "Small copy packet count");
"bad_req_id", CTLFLAG_RD,
&rx_stats->bad_req_id, "Bad request id count");
SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
"empty_rx_ring", CTLFLAG_RD,
&rx_stats->empty_rx_ring, "RX descriptors depletion count");
}
/* Stats read from device */

View File

@ -272,6 +272,7 @@ ds1307_start(void *xdev)
struct sysctl_oid *tree_node;
struct sysctl_oid_list *tree;
uint8_t secs;
uint8_t osc_en;
dev = (device_t)xdev;
sc = device_get_softc(dev);
@ -286,7 +287,12 @@ ds1307_start(void *xdev)
device_printf(sc->sc_dev, "cannot read from RTC.\n");
return;
}
if ((secs & DS1307_SECS_CH) != 0) {
if (sc->sc_mcp7941x)
osc_en = 0x80;
else
osc_en = 0x00;
if (((secs & DS1307_SECS_CH) ^ osc_en) != 0) {
device_printf(sc->sc_dev,
"WARNING: RTC clock stopped, check the battery.\n");
}
@ -318,7 +324,7 @@ ds1307_gettime(device_t dev, struct timespec *ts)
int error;
struct clocktime ct;
struct ds1307_softc *sc;
uint8_t data[7], hourmask;
uint8_t data[7], hourmask, st_mask;
sc = device_get_softc(dev);
error = iicdev_readfrom(sc->sc_dev, DS1307_SECS, data, sizeof(data),
@ -329,7 +335,12 @@ ds1307_gettime(device_t dev, struct timespec *ts)
}
/* If the clock halted, we don't have good data. */
if (data[DS1307_SECS] & DS1307_SECS_CH)
if (sc->sc_mcp7941x)
st_mask = 0x80;
else
st_mask = 0x00;
if (((data[DS1307_SECS] & DS1307_SECS_CH) ^ st_mask) != 0)
return (EINVAL);
/* If chip is in AM/PM mode remember that. */
@ -394,6 +405,13 @@ ds1307_settime(device_t dev, struct timespec *ts)
data[DS1307_WEEKDAY] = ct.dow;
data[DS1307_MONTH] = TOBCD(ct.mon);
data[DS1307_YEAR] = TOBCD(ct.year % 100);
if (sc->sc_mcp7941x) {
data[DS1307_SECS] |= MCP7941X_SECS_ST;
data[DS1307_WEEKDAY] |= MCP7941X_WEEKDAY_VBATEN;
if ((ct.year % 4 == 0 && ct.year % 100 != 0) ||
ct.year % 400 == 0)
data[DS1307_MONTH] |= MCP7941X_MONTH_LPYR;
}
/* Write the time back to RTC. */
error = iicdev_writeto(sc->sc_dev, DS1307_SECS, data, sizeof(data),
IIC_INTRWAIT);

View File

@ -36,6 +36,7 @@
#define DS1307_SECS 0x00
#define DS1307_SECS_MASK 0x7f
#define DS1307_SECS_CH 0x80
#define MCP7941X_SECS_ST 0x80
#define DS1307_MINS 0x01
#define DS1307_MINS_MASK 0x7f
#define DS1307_HOUR 0x02
@ -44,10 +45,12 @@
#define DS1307_HOUR_IS_PM 0x20
#define DS1307_HOUR_USE_AMPM 0x40
#define DS1307_WEEKDAY 0x03
#define MCP7941X_WEEKDAY_VBATEN 0x08
#define DS1307_WEEKDAY_MASK 0x07
#define DS1307_DATE 0x04
#define DS1307_DATE_MASK 0x3f
#define DS1307_MONTH 0x05
#define MCP7941X_MONTH_LPYR 0x20
#define DS1307_MONTH_MASK 0x1f
#define DS1307_YEAR 0x06
#define DS1307_YEAR_MASK 0xff

View File

@ -100,6 +100,35 @@ __mlx5_mask(typ, fld))
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
__mlx5_64_off(typ, fld)))
#define MLX5_GET_BE(type_t, typ, p, fld) ({ \
type_t tmp; \
switch (sizeof(tmp)) { \
case sizeof(u8): \
tmp = (__force type_t)MLX5_GET(typ, p, fld); \
break; \
case sizeof(u16): \
tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
break; \
case sizeof(u32): \
tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
break; \
case sizeof(u64): \
tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
break; \
} \
tmp; \
})
#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
enum {
MLX5_MAX_COMMANDS = 32,
MLX5_CMD_DATA_BLOCK_SIZE = 512,
@ -326,6 +355,17 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
enum {
/*
* Max wqe size for rdma read is 512 bytes, so this
* limits our max_sge_rd as the wqe needs to fit:
* - ctrl segment (16 bytes)
* - rdma segment (16 bytes)
* - scatter elements (16 bytes each)
*/
MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
};
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
@ -486,6 +526,11 @@ struct mlx5_eqe_port_module_event {
u8 error_type;
};
struct mlx5_eqe_general_notification_event {
u32 rq_user_index_delay_drop;
u32 rsvd0[6];
};
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@ -499,6 +544,7 @@ union ev_data {
struct mlx5_eqe_page_req req_pages;
struct mlx5_eqe_port_module_event port_module_event;
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_general_notification_event general_notifications;
} __packed;
struct mlx5_eqe {
@ -642,9 +688,9 @@ enum {
};
enum {
CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
};
enum {
@ -1085,6 +1131,7 @@ enum {
MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 5,
MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 6,
MLX5_FLOW_TABLE_TYPE_NIC_RX_RDMA = 7,
};
enum {
@ -1281,6 +1328,7 @@ enum {
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
@ -1386,6 +1434,10 @@ static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
}
enum {
MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
};
/* 8 regular priorities + 1 for multicast */
#define MLX5_NUM_BYPASS_FTS 9

View File

@ -306,6 +306,11 @@ struct cmd_msg_cache {
};
struct mlx5_traffic_counter {
u64 packets;
u64 octets;
};
struct mlx5_cmd_stats {
u64 sum;
u64 n;
@ -582,6 +587,7 @@ struct mlx5_special_contexts {
int resd_lkey;
};
struct mlx5_flow_root_namespace;
struct mlx5_core_dev {
struct pci_dev *pdev;
char board_id[MLX5_BOARD_ID_LEN];
@ -589,6 +595,7 @@ struct mlx5_core_dev {
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
enum mlx5_device_state state;
void (*event) (struct mlx5_core_dev *dev,
@ -600,6 +607,12 @@ struct mlx5_core_dev {
u32 issi;
struct mlx5_special_contexts special_contexts;
unsigned int module_status[MLX5_MAX_PORTS];
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER];
};
@ -735,6 +748,13 @@ struct mlx5_pas {
u8 log_sz;
};
enum port_state_policy {
MLX5_POLICY_DOWN = 0,
MLX5_POLICY_UP = 1,
MLX5_POLICY_FOLLOW = 2,
MLX5_POLICY_INVALID = 0xffffffff
};
static inline void *
mlx5_buf_offset(struct mlx5_buf *buf, int offset)
{
@ -803,6 +823,11 @@ static inline void *mlx5_vmalloc(unsigned long size)
return rtn;
}
static inline u32 mlx5_base_mkey(const u32 key)
{
return key & 0xffffff00u;
}
void mlx5_enter_error_state(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
@ -855,7 +880,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
void mlx5_fwp_flush(struct mlx5_fw_page *fwp);
void mlx5_fwp_invalidate(struct mlx5_fw_page *fwp);
@ -959,6 +984,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
u8 mlx5_is_wol_supported(struct mlx5_core_dev *dev);
int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode);
int mlx5_set_dropless_mode(struct mlx5_core_dev *dev, u16 timeout);
int mlx5_query_dropless_mode(struct mlx5_core_dev *dev, u16 *timeout);
int mlx5_query_wol(struct mlx5_core_dev *dev, u8 *wol_mode);
int mlx5_core_access_pvlc(struct mlx5_core_dev *dev,
struct mlx5_pvlc_reg *pvlc, int write);

View File

@ -1,46 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef MLX5_ESWITCH_VACL_TABLE_H
#define MLX5_ESWITCH_VACL_TABLE_H
#include <dev/mlx5/driver.h>
void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
u16 vport, bool is_egress);
void mlx5_vacl_table_cleanup(void *acl_t);
int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan);
void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan);
int mlx5_vacl_table_enable_vlan_filter(void *acl_t);
void mlx5_vacl_table_disable_vlan_filter(void *acl_t);
int mlx5_vacl_table_drop_untagged(void *acl_t);
int mlx5_vacl_table_allow_untagged(void *acl_t);
int mlx5_vacl_table_drop_unknown_vlan(void *acl_t);
int mlx5_vacl_table_allow_unknown_vlan(void *acl_t);
int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac);
#endif /* MLX5_ESWITCH_VACL_TABLE_H */

View File

@ -1,56 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef MLX5_FLOW_TABLE_H
#define MLX5_FLOW_TABLE_H
#include <dev/mlx5/driver.h>
#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET 0x0
#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_RESET 0x1
struct mlx5_flow_table_group {
u8 log_sz;
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
};
void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
u16 vport,
u16 num_groups,
struct mlx5_flow_table_group *group);
void mlx5_destroy_flow_table(void *flow_table);
int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
void *match_criteria, void *flow_context,
u32 *flow_index);
int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
u32 mlx5_get_flow_table_id(void *flow_table);
int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
u8 vport_num, u8 table_type, u32 table_id,
u32 underlay_qpn);
#endif /* MLX5_FLOW_TABLE_H */

232
sys/dev/mlx5/fs.h Normal file
View File

@ -0,0 +1,232 @@
/*-
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MLX5_FS_
#define _MLX5_FS_
#include <linux/list.h>
#include <dev/mlx5/mlx5_ifc.h>
#include <dev/mlx5/device.h>
#include <dev/mlx5/driver.h>
enum {
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
};
/*Flow tag*/
enum {
MLX5_FS_DEFAULT_FLOW_TAG = 0xFFFFFF,
MLX5_FS_ETH_FLOW_TAG = 0xFFFFFE,
MLX5_FS_SNIFFER_FLOW_TAG = 0xFFFFFD,
};
enum {
MLX5_FS_FLOW_TAG_MASK = 0xFFFFFF,
};
#define FS_MAX_TYPES 10
#define FS_MAX_ENTRIES 32000U
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
};
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_rule;
struct mlx5_flow_namespace;
struct mlx5_flow_spec {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
};
struct mlx5_flow_destination {
u32 type;
union {
u32 tir_num;
struct mlx5_flow_table *ft;
u32 vport_num;
};
};
#define FT_NAME_STR_SZ 20
#define LEFTOVERS_RULE_NUM 2
static inline void build_leftovers_ft_param(char *name,
unsigned int *priority,
int *n_ent,
int *n_grp)
{
snprintf(name, FT_NAME_STR_SZ, "leftovers");
*priority = 0; /*Priority of leftovers_prio-0*/
*n_ent = LEFTOVERS_RULE_NUM + 1; /*1: star rules*/
*n_grp = LEFTOVERS_RULE_NUM;
}
static inline bool outer_header_zero(u32 *match_criteria)
{
int size = MLX5_ST_SZ_BYTES(fte_match_param);
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers);
return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
outer_headers_c + 1,
size - 1);
}
struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
/* The underlying implementation create two more entries for
* chaining flow tables. the user should be aware that if he pass
* max_num_ftes as 2^N it will result in doubled size flow table
*/
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
const char *name,
int num_flow_table_entries,
int max_num_groups);
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
u16 vport,
int prio,
const char *name,
int num_flow_table_entries);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
int prio,
const char *name,
int num_flow_table_entries);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
* start_flow_index
* end_flow_index
* match_criteria_enable
* match_criteria
*/
struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
u32 *match_criteria,
u32 *match_value,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest);
void mlx5_del_flow_rule(struct mlx5_flow_rule *fr);
/*The following API is for sniffer*/
typedef int (*rule_event_fn)(struct mlx5_flow_rule *rule,
bool ctx_changed,
void *client_data,
void *context);
struct mlx5_flow_handler;
struct flow_client_priv_data;
void mlx5e_sniffer_roce_mode_notify(
struct mlx5_core_dev *mdev,
int action);
int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule, struct
mlx5_flow_handler *handler, void
*client_data);
struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type ns_type,
rule_event_fn add_cb,
rule_event_fn del_cb,
void *context);
void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler);
void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns,
rule_event_fn cb,
void *context);
void mlx5_get_match_criteria(u32 *match_criteria,
struct mlx5_flow_rule *rule);
void mlx5_get_match_value(u32 *match_value,
struct mlx5_flow_rule *rule);
u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule);
struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode);
void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list);
struct mlx5_flow_rules_list {
struct list_head head;
};
struct mlx5_flow_rule_node {
struct list_head list;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
u8 match_criteria_enable;
};
struct mlx5_core_fs_mask {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
};
bool fs_match_exact_val(
struct mlx5_core_fs_mask *mask,
void *val1,
void *val2);
bool fs_match_exact_mask(
u8 match_criteria_enable1,
u8 match_criteria_enable2,
void *mask1,
void *mask2);
/**********end API for sniffer**********/
#endif

View File

@ -0,0 +1,300 @@
/*-
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MLX5_FS_CORE_
#define _MLX5_FS_CORE_
#include <asm/atomic.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <dev/mlx5/fs.h>
enum fs_type {
FS_TYPE_NAMESPACE,
FS_TYPE_PRIO,
FS_TYPE_FLOW_TABLE,
FS_TYPE_FLOW_GROUP,
FS_TYPE_FLOW_ENTRY,
FS_TYPE_FLOW_DEST
};
enum fs_ft_type {
FS_FT_NIC_RX = 0x0,
FS_FT_ESW_EGRESS_ACL = 0x2,
FS_FT_ESW_INGRESS_ACL = 0x3,
FS_FT_FDB = 0X4,
FS_FT_SNIFFER_RX = 0x5,
FS_FT_SNIFFER_TX = 0x6
};
enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
/* Should always be the first variable in the struct */
struct fs_base {
struct list_head list;
struct fs_base *parent;
enum fs_type type;
struct kref refcount;
/* lock the node for writing and traversing */
struct mutex lock;
struct completion complete;
atomic_t users_refcount;
const char *name;
};
struct mlx5_flow_rule {
struct fs_base base;
struct mlx5_flow_destination dest_attr;
struct list_head clients_data;
/*protect clients lits*/
struct mutex clients_lock;
};
struct fs_fte {
struct fs_base base;
u32 val[MLX5_ST_SZ_DW(fte_match_param)];
uint32_t dests_size;
uint32_t flow_tag;
struct list_head dests;
uint32_t index; /* index in ft */
u8 action; /* MLX5_FLOW_CONTEXT_ACTION */
enum fs_fte_status status;
};
struct fs_star_rule {
struct mlx5_flow_group *fg;
struct fs_fte *fte;
};
struct mlx5_flow_table {
struct fs_base base;
/* sorted list by start_index */
struct list_head fgs;
struct {
bool active;
unsigned int max_types;
unsigned int num_types;
} autogroup;
unsigned int max_fte;
unsigned int level;
uint32_t id;
u16 vport;
enum fs_ft_type type;
struct fs_star_rule star_rule;
unsigned int shared_refcount;
};
enum fs_prio_flags {
MLX5_CORE_FS_PRIO_SHARED = 1
};
struct fs_prio {
struct fs_base base;
struct list_head objs; /* each object is a namespace or ft */
unsigned int max_ft;
unsigned int num_ft;
unsigned int max_ns;
unsigned int prio;
/*When create shared flow table, this lock should be taken*/
struct mutex shared_lock;
u8 flags;
};
struct mlx5_flow_namespace {
/* parent == NULL => root ns */
struct fs_base base;
/* sorted by priority number */
struct list_head prios; /* list of fs_prios */
struct list_head list_notifiers;
struct rw_semaphore notifiers_rw_sem;
struct rw_semaphore dests_rw_sem;
};
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
struct mlx5_flow_table *ft_level_0;
enum fs_ft_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
/* When chaining flow-tables, this lock should be taken */
struct mutex fs_chain_lock;
};
struct mlx5_flow_group {
struct fs_base base;
struct list_head ftes;
struct mlx5_core_fs_mask mask;
uint32_t start_index;
uint32_t max_ftes;
uint32_t num_ftes;
uint32_t id;
};
struct mlx5_flow_handler {
struct list_head list;
rule_event_fn add_dst_cb;
rule_event_fn del_dst_cb;
void *client_context;
struct mlx5_flow_namespace *ns;
};
struct fs_client_priv_data {
struct mlx5_flow_handler *fs_handler;
struct list_head list;
void *client_dst_data;
};
void _fs_remove_node(struct kref *kref);
#define fs_get_obj(v, _base) {v = container_of((_base), typeof(*v), base); }
#define fs_get_parent(v, child) {v = (child)->base.parent ? \
container_of((child)->base.parent, \
typeof(*v), base) : NULL; }
#define fs_list_for_each_entry(pos, cond, root) \
list_for_each_entry(pos, root, base.list) \
if (!(cond)) {} else
#define fs_list_for_each_entry_continue(pos, cond, root) \
list_for_each_entry_continue(pos, root, base.list) \
if (!(cond)) {} else
#define fs_list_for_each_entry_reverse(pos, cond, root) \
list_for_each_entry_reverse(pos, root, base.list) \
if (!(cond)) {} else
#define fs_list_for_each_entry_continue_reverse(pos, cond, root) \
list_for_each_entry_continue_reverse(pos, root, base.list) \
if (!(cond)) {} else
#define fs_for_each_ft(pos, prio) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_TABLE, \
&(prio)->objs)
#define fs_for_each_ft_reverse(pos, prio) \
fs_list_for_each_entry_reverse(pos, \
(pos)->base.type == FS_TYPE_FLOW_TABLE, \
&(prio)->objs)
#define fs_for_each_ns(pos, prio) \
fs_list_for_each_entry(pos, \
(pos)->base.type == FS_TYPE_NAMESPACE, \
&(prio)->objs)
#define fs_for_each_ns_or_ft_reverse(pos, prio) \
list_for_each_entry_reverse(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_ns_or_ft(pos, prio) \
list_for_each_entry(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_ns_or_ft_continue_reverse(pos, prio) \
list_for_each_entry_continue_reverse(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_ns_or_ft_continue(pos, prio) \
list_for_each_entry_continue(pos, &(prio)->objs, list) \
if (!((pos)->type == FS_TYPE_NAMESPACE || \
(pos)->type == FS_TYPE_FLOW_TABLE)) {} else
#define fs_for_each_prio(pos, ns) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_prio_reverse(pos, ns) \
fs_list_for_each_entry_reverse(pos, (pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_prio_continue(pos, ns) \
fs_list_for_each_entry_continue(pos, (pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_prio_continue_reverse(pos, ns) \
fs_list_for_each_entry_continue_reverse(pos, \
(pos)->base.type == FS_TYPE_PRIO, \
&(ns)->prios)
#define fs_for_each_fg(pos, ft) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_GROUP, \
&(ft)->fgs)
#define fs_for_each_fte(pos, fg) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_ENTRY, \
&(fg)->ftes)
#define fs_for_each_dst(pos, fte) \
fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_DEST, \
&(fte)->dests)
int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int level,
unsigned int log_size, unsigned int *table_id);
int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id);
int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
u32 *in,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int *group_id);
int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int group_id);
int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
u32 *match_val,
enum fs_ft_type type, unsigned int table_id,
unsigned int index, unsigned int group_id,
unsigned int flow_tag,
unsigned short action, int dest_size,
struct list_head *dests); /* mlx5_flow_desination */
int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
enum fs_ft_type type, unsigned int table_id,
unsigned int index);
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
enum fs_ft_type type,
unsigned int id);
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
#endif

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -80,6 +80,8 @@ struct cre_des_eq {
/*Function prototype*/
static void mlx5_port_module_event(struct mlx5_core_dev *dev,
struct mlx5_eqe *eqe);
static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
struct mlx5_eqe *eqe);
static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
{
@ -157,6 +159,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT";
case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT";
default:
return "Unrecognized event";
}
@ -296,6 +300,10 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
}
break;
case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
mlx5_port_general_notification_event(dev, eqe);
break;
case MLX5_EVENT_TYPE_CQ_ERROR:
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
@ -502,7 +510,7 @@ void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
int mlx5_start_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
if (MLX5_CAP_GEN(dev, port_module_event))
@ -667,3 +675,24 @@ static void mlx5_port_module_event(struct mlx5_core_dev *dev,
dev->module_status[module_num] = module_status;
}
static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
struct mlx5_eqe *eqe)
{
u8 port = (eqe->data.port.port >> 4) & 0xf;
u32 rqn = 0;
struct mlx5_eqe_general_notification_event *general_event = NULL;
switch (eqe->sub_type) {
case MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT:
general_event = &eqe->data.general_notifications;
rqn = be32_to_cpu(general_event->rq_user_index_delay_drop) &
0xffffff;
break;
default:
mlx5_core_warn(dev,
"general event with unrecognized subtype: port %d, sub_type %d\n",
port, eqe->sub_type);
break;
}
}

View File

@ -1,803 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <linux/etherdevice.h>
#include <dev/mlx5/driver.h>
#include <dev/mlx5/flow_table.h>
#include <dev/mlx5/eswitch_vacl.h>
#include "mlx5_core.h"
enum {
MLX5_ACL_LOOPBACK_GROUP_IDX = 0,
MLX5_ACL_UNTAGGED_GROUP_IDX = 1,
MLX5_ACL_VLAN_GROUP_IDX = 2,
MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX = 3,
MLX5_ACL_DEFAULT_GROUP_IDX = 4,
MLX5_ACL_GROUPS_NUM,
};
struct mlx_vacl_fr {
bool applied;
u32 fi;
u16 action;
};
struct mlx5_vacl_table {
struct mlx5_core_dev *dev;
u16 vport;
void *ft;
int max_ft_size;
int acl_type;
struct mlx_vacl_fr loopback_fr;
struct mlx_vacl_fr untagged_fr;
struct mlx_vacl_fr unknown_vlan_fr;
struct mlx_vacl_fr default_fr;
bool vlan_filter_enabled;
bool vlan_filter_applied;
unsigned long *vlan_allowed_bitmap;
u32 vlan_fi_table[4096];
bool spoofchk_enabled;
u8 smac[ETH_ALEN];
};
static int mlx5_vacl_table_allow_vlan(void *acl_t, u16 vlan)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
u8 *smac;
int vlan_mc_enable = MLX5_MATCH_OUTER_HEADERS;
int err = 0;
if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
return -EINVAL;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
/* Apply vlan rule */
MLX5_SET(flow_context, flow_context, action,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 1);
MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
vlan);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
0xfff);
if (acl_table->spoofchk_enabled) {
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac, acl_table->smac);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
err = mlx5_add_flow_table_entry(acl_table->ft, vlan_mc_enable,
in_match_criteria, flow_context,
&acl_table->vlan_fi_table[vlan]);
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_loopback_filter(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 loopback_mc_enable = MLX5_MATCH_MISC_PARAMETERS;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
void *mv_misc = NULL;
void *mc_misc = NULL;
int err = 0;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->loopback_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->loopback_fr.fi);
/* Apply new loopback rule */
MLX5_SET(flow_context, flow_context, action, new_action);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
mv_misc = MLX5_ADDR_OF(fte_match_param, in_match_value,
misc_parameters);
mc_misc = MLX5_ADDR_OF(fte_match_param, in_match_criteria,
misc_parameters);
MLX5_SET(fte_match_set_misc, mv_misc, source_port, acl_table->vport);
MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
err = mlx5_add_flow_table_entry(acl_table->ft, loopback_mc_enable,
in_match_criteria, flow_context,
&acl_table->loopback_fr.fi);
if (err) {
acl_table->loopback_fr.applied = false;
} else {
acl_table->loopback_fr.applied = true;
acl_table->loopback_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_default(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 default_mc_enable = 0;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
int err = 0;
if (!acl_table->spoofchk_enabled)
return -EINVAL;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->default_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->default_fr.fi);
/* Apply new default rule */
MLX5_SET(flow_context, flow_context, action, new_action);
err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
in_match_criteria, flow_context,
&acl_table->default_fr.fi);
if (err) {
acl_table->default_fr.applied = false;
} else {
acl_table->default_fr.applied = true;
acl_table->default_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_untagged(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 untagged_mc_enable = MLX5_MATCH_OUTER_HEADERS;
u8 *smac;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
int err = 0;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->untagged_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->untagged_fr.fi);
/* Apply new untagged rule */
MLX5_SET(flow_context, flow_context, action, new_action);
in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 0);
MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
if (acl_table->spoofchk_enabled) {
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac, acl_table->smac);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
err = mlx5_add_flow_table_entry(acl_table->ft, untagged_mc_enable,
in_match_criteria, flow_context,
&acl_table->untagged_fr.fi);
if (err) {
acl_table->untagged_fr.applied = false;
} else {
acl_table->untagged_fr.applied = true;
acl_table->untagged_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_unknown_vlan(void *acl_t, u16 new_action)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
u8 default_mc_enable = (!acl_table->spoofchk_enabled) ? 0 :
MLX5_MATCH_OUTER_HEADERS;
u32 *flow_context = NULL;
void *in_match_criteria = NULL;
void *in_match_value = NULL;
u8 *smac;
int err = 0;
flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context));
if (!flow_context) {
err = -ENOMEM;
goto out;
}
in_match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!in_match_criteria) {
err = -ENOMEM;
goto out;
}
if (acl_table->unknown_vlan_fr.applied)
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->unknown_vlan_fr.fi);
/* Apply new unknown vlan rule */
MLX5_SET(flow_context, flow_context, action, new_action);
if (acl_table->spoofchk_enabled) {
in_match_value = MLX5_ADDR_OF(flow_context, flow_context,
match_value);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_value,
outer_headers.smac_47_16);
ether_addr_copy(smac, acl_table->smac);
smac = MLX5_ADDR_OF(fte_match_param,
in_match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
err = mlx5_add_flow_table_entry(acl_table->ft, default_mc_enable,
in_match_criteria, flow_context,
&acl_table->unknown_vlan_fr.fi);
if (err) {
acl_table->unknown_vlan_fr.applied = false;
} else {
acl_table->unknown_vlan_fr.applied = true;
acl_table->unknown_vlan_fr.action = new_action;
}
out:
if (flow_context)
vfree(flow_context);
if (in_match_criteria)
vfree(in_match_criteria);
return err;
}
static int mlx5_vacl_table_apply_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int index = 0;
int err_index = 0;
int err = 0;
if (acl_table->vlan_filter_applied)
return 0;
for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
index < 4096;
index = find_next_bit(acl_table->vlan_allowed_bitmap,
4096, ++index)) {
err = mlx5_vacl_table_allow_vlan(acl_t, index);
if (err)
goto err_disable_vlans;
}
acl_table->vlan_filter_applied = true;
return 0;
err_disable_vlans:
for (err_index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
err_index < index;
err_index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
++err_index)) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->vlan_fi_table[err_index]);
}
return err;
}
static void mlx5_vacl_table_disapply_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int index = 0;
if (!acl_table->vlan_filter_applied)
return;
for (index = find_first_bit(acl_table->vlan_allowed_bitmap, 4096);
index < 4096;
index = find_next_bit(acl_table->vlan_allowed_bitmap, 4096,
++index)) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->vlan_fi_table[index]);
}
acl_table->vlan_filter_applied = false;
}
static void mlx5_vacl_table_disapply_all_filters(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
if (acl_table->default_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->default_fr.fi);
acl_table->default_fr.applied = false;
}
if (acl_table->unknown_vlan_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->unknown_vlan_fr.fi);
acl_table->unknown_vlan_fr.applied = false;
}
if (acl_table->loopback_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->loopback_fr.fi);
acl_table->loopback_fr.applied = false;
}
if (acl_table->untagged_fr.applied) {
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->untagged_fr.fi);
acl_table->untagged_fr.applied = false;
}
if (acl_table->vlan_filter_applied) {
mlx5_vacl_table_disapply_vlan_filter(acl_t);
acl_table->vlan_filter_applied = false;
}
}
static int mlx5_vacl_table_apply_all_filters(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int err = 0;
if (!acl_table->default_fr.applied && acl_table->spoofchk_enabled) {
err = mlx5_vacl_table_apply_default(acl_table,
acl_table->default_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->unknown_vlan_fr.applied) {
err = mlx5_vacl_table_apply_unknown_vlan(acl_table,
acl_table->unknown_vlan_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->loopback_fr.applied &&
acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
err = mlx5_vacl_table_apply_loopback_filter(
acl_table,
acl_table->loopback_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->untagged_fr.applied) {
err = mlx5_vacl_table_apply_untagged(acl_table,
acl_table->untagged_fr.action);
if (err)
goto err_disapply_all;
}
if (!acl_table->vlan_filter_applied && acl_table->vlan_filter_enabled) {
err = mlx5_vacl_table_apply_vlan_filter(acl_t);
if (err)
goto err_disapply_all;
}
goto out;
err_disapply_all:
mlx5_vacl_table_disapply_all_filters(acl_t);
out:
return err;
}
static void mlx5_vacl_table_destroy_ft(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
mlx5_vacl_table_disapply_all_filters(acl_t);
if (acl_table->ft)
mlx5_destroy_flow_table(acl_table->ft);
acl_table->ft = NULL;
}
static int mlx5_vacl_table_create_ft(void *acl_t, bool spoofchk)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int log_acl_ft_size;
int err = 0;
int groups_num = MLX5_ACL_GROUPS_NUM - 1;
int shift_idx = MLX5_ACL_UNTAGGED_GROUP_IDX;
u8 *smac;
struct mlx5_flow_table_group *g;
if (acl_table->ft)
return -EINVAL;
g = kcalloc(MLX5_ACL_GROUPS_NUM, sizeof(*g), GFP_KERNEL);
if (!g)
goto out;
acl_table->spoofchk_enabled = spoofchk;
/*
* for vlan group
*/
log_acl_ft_size = 4096;
/*
* for loopback filter rule
*/
log_acl_ft_size += 1;
/*
* for untagged rule
*/
log_acl_ft_size += 1;
/*
* for unknown vlan rule
*/
log_acl_ft_size += 1;
/*
* for default rule
*/
log_acl_ft_size += 1;
log_acl_ft_size = order_base_2(log_acl_ft_size);
log_acl_ft_size = min_t(int, log_acl_ft_size, acl_table->max_ft_size);
if (log_acl_ft_size < 2)
goto out;
if (acl_table->acl_type == MLX5_FLOW_TABLE_TYPE_EGRESS_ACL) {
/* Loopback filter group */
g[MLX5_ACL_LOOPBACK_GROUP_IDX].log_sz = 0;
g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria_enable =
MLX5_MATCH_MISC_PARAMETERS;
MLX5_SET_TO_ONES(fte_match_param,
g[MLX5_ACL_LOOPBACK_GROUP_IDX].match_criteria,
misc_parameters.source_port);
groups_num++;
shift_idx = MLX5_ACL_LOOPBACK_GROUP_IDX;
}
/* Untagged traffic group */
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].log_sz = 0;
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria_enable =
MLX5_MATCH_OUTER_HEADERS;
MLX5_SET(fte_match_param,
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
outer_headers.cvlan_tag, 1);
if (spoofchk) {
smac = MLX5_ADDR_OF(fte_match_param,
g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
.match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
/* Allowed vlans group */
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].log_sz = log_acl_ft_size - 1;
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
MLX5_MATCH_OUTER_HEADERS;
MLX5_SET(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
outer_headers.cvlan_tag, 1);
MLX5_SET(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
outer_headers.first_vid, 0xfff);
if (spoofchk) {
smac = MLX5_ADDR_OF(fte_match_param,
g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx]
.match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
/* Unknown vlan traffic group */
g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].log_sz = 0;
g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx].match_criteria_enable =
(spoofchk ? MLX5_MATCH_OUTER_HEADERS : 0);
if (spoofchk) {
smac = MLX5_ADDR_OF(
fte_match_param,
g[MLX5_ACL_UNKNOWN_VLAN_GROUP_IDX - shift_idx]
.match_criteria,
outer_headers.smac_47_16);
memset(smac, 0xff, ETH_ALEN);
}
/*
* Default group - for spoofchk only.
*/
g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].log_sz = 0;
g[MLX5_ACL_DEFAULT_GROUP_IDX - shift_idx].match_criteria_enable = 0;
acl_table->ft = mlx5_create_flow_table(acl_table->dev,
0,
acl_table->acl_type,
acl_table->vport,
groups_num,
g);
if (!acl_table->ft) {
err = -ENOMEM;
goto out;
}
err = mlx5_vacl_table_apply_all_filters(acl_t);
if (err)
goto err_destroy_ft;
goto out;
err_destroy_ft:
mlx5_vacl_table_destroy_ft(acl_table->ft);
acl_table->ft = NULL;
out:
kfree(g);
return err;
}
void *mlx5_vacl_table_create(struct mlx5_core_dev *dev,
u16 vport, bool is_egress)
{
struct mlx5_vacl_table *acl_table;
int err = 0;
if (is_egress && !MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
return NULL;
if (!is_egress && !MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
return NULL;
acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
if (!acl_table)
return NULL;
acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
acl_table->max_ft_size = (is_egress ?
MLX5_CAP_ESW_EGRESS_ACL(dev,
log_max_ft_size) :
MLX5_CAP_ESW_INGRESS_ACL(dev,
log_max_ft_size));
acl_table->dev = dev;
acl_table->vport = vport;
/*
* default behavior : Allow and if spoofchk drop the default
*/
acl_table->default_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
acl_table->loopback_fr.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
acl_table->unknown_vlan_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
acl_table->untagged_fr.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
err = mlx5_vacl_table_create_ft(acl_table, false);
if (err)
goto err_free_acl_table;
acl_table->vlan_allowed_bitmap = kcalloc(BITS_TO_LONGS(4096),
sizeof(uintptr_t),
GFP_KERNEL);
if (!acl_table->vlan_allowed_bitmap)
goto err_destroy_ft;
goto out;
err_destroy_ft:
mlx5_vacl_table_destroy_ft(acl_table->ft);
acl_table->ft = NULL;
err_free_acl_table:
kfree(acl_table);
acl_table = NULL;
out:
return (void *)acl_table;
}
EXPORT_SYMBOL(mlx5_vacl_table_create);
void mlx5_vacl_table_cleanup(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
mlx5_vacl_table_destroy_ft(acl_t);
kfree(acl_table->vlan_allowed_bitmap);
kfree(acl_table);
}
EXPORT_SYMBOL(mlx5_vacl_table_cleanup);
int mlx5_vacl_table_add_vlan(void *acl_t, u16 vlan)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int err = 0;
if (test_bit(vlan, acl_table->vlan_allowed_bitmap))
return 0;
__set_bit(vlan, acl_table->vlan_allowed_bitmap);
if (!acl_table->vlan_filter_applied)
return 0;
err = mlx5_vacl_table_allow_vlan(acl_t, vlan);
if (err)
goto err_clear_vbit;
goto out;
err_clear_vbit:
__clear_bit(vlan, acl_table->vlan_allowed_bitmap);
out:
return err;
}
EXPORT_SYMBOL(mlx5_vacl_table_add_vlan);
void mlx5_vacl_table_del_vlan(void *acl_t, u16 vlan)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
if (!test_bit(vlan, acl_table->vlan_allowed_bitmap))
return;
__clear_bit(vlan, acl_table->vlan_allowed_bitmap);
if (!acl_table->vlan_filter_applied)
return;
mlx5_del_flow_table_entry(acl_table->ft,
acl_table->vlan_fi_table[vlan]);
}
EXPORT_SYMBOL(mlx5_vacl_table_del_vlan);
int mlx5_vacl_table_enable_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
acl_table->vlan_filter_enabled = true;
return mlx5_vacl_table_apply_vlan_filter(acl_t);
}
EXPORT_SYMBOL(mlx5_vacl_table_enable_vlan_filter);
void mlx5_vacl_table_disable_vlan_filter(void *acl_t)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
acl_table->vlan_filter_enabled = false;
mlx5_vacl_table_disapply_vlan_filter(acl_t);
}
EXPORT_SYMBOL(mlx5_vacl_table_disable_vlan_filter);
int mlx5_vacl_table_drop_untagged(void *acl_t)
{
return mlx5_vacl_table_apply_untagged(acl_t,
MLX5_FLOW_CONTEXT_ACTION_DROP);
}
EXPORT_SYMBOL(mlx5_vacl_table_drop_untagged);
int mlx5_vacl_table_allow_untagged(void *acl_t)
{
return mlx5_vacl_table_apply_untagged(acl_t,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
}
EXPORT_SYMBOL(mlx5_vacl_table_allow_untagged);
int mlx5_vacl_table_drop_unknown_vlan(void *acl_t)
{
return mlx5_vacl_table_apply_unknown_vlan(acl_t,
MLX5_FLOW_CONTEXT_ACTION_DROP);
}
EXPORT_SYMBOL(mlx5_vacl_table_drop_unknown_vlan);
int mlx5_vacl_table_allow_unknown_vlan(void *acl_t)
{
return mlx5_vacl_table_apply_unknown_vlan(acl_t,
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
}
EXPORT_SYMBOL(mlx5_vacl_table_allow_unknown_vlan);
int mlx5_vacl_table_set_spoofchk(void *acl_t, bool spoofchk, u8 *vport_mac)
{
struct mlx5_vacl_table *acl_table = (struct mlx5_vacl_table *)acl_t;
int err = 0;
if (spoofchk == acl_table->spoofchk_enabled) {
if (!spoofchk ||
(spoofchk && !memcmp(acl_table->smac, vport_mac, ETH_ALEN)))
return 0;
}
ether_addr_copy(acl_table->smac, vport_mac);
if (spoofchk != acl_table->spoofchk_enabled) {
mlx5_vacl_table_destroy_ft(acl_t);
err = mlx5_vacl_table_create_ft(acl_t, spoofchk);
} else {
mlx5_vacl_table_disapply_all_filters(acl_t);
err = mlx5_vacl_table_apply_all_filters(acl_t);
}
return err;
}
EXPORT_SYMBOL(mlx5_vacl_table_set_spoofchk);

View File

@ -1,479 +0,0 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <dev/mlx5/driver.h>
#include <dev/mlx5/flow_table.h>
#include "mlx5_core.h"
struct mlx5_ftg {
struct mlx5_flow_table_group g;
u32 id;
u32 start_ix;
};
struct mlx5_flow_table {
struct mlx5_core_dev *dev;
u8 level;
u8 type;
u32 id;
u16 vport;
struct mutex mutex; /* sync bitmap alloc */
u16 num_groups;
struct mlx5_ftg *group;
unsigned long *bitmap;
u32 size;
};
static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
u32 flow_index, void *flow_context)
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)];
u32 *in;
void *in_flow_context;
int fcdls =
MLX5_GET(flow_context, flow_context, destination_list_size) *
MLX5_ST_SZ_BYTES(dest_format_struct);
int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
int err;
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
return -ENOMEM;
}
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport, !!ft->vport);
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, flow_index);
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
memcpy(in_flow_context, flow_context,
MLX5_ST_SZ_BYTES(flow_context) + fcdls);
MLX5_SET(flow_context, in_flow_context, group_id, ft->group[group_ix].id);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
sizeof(out));
kvfree(in);
return err;
}
static int mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
{
u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
MLX5_SET_DFTEI(in, vport_number, ft->vport);
MLX5_SET_DFTEI(in, other_vport, !!ft->vport);
MLX5_SET_DFTEI(in, table_type, ft->type);
MLX5_SET_DFTEI(in, table_id, ft->id);
MLX5_SET_DFTEI(in, flow_index, flow_index);
MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
return mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
sizeof(out));
}
static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
MLX5_SET_DFGI(in, vport_number, ft->vport);
MLX5_SET_DFGI(in, other_vport, !!ft->vport);
MLX5_SET_DFGI(in, table_type, ft->type);
MLX5_SET_DFGI(in, table_id, ft->id);
MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET_DFGI(in, group_id, ft->group[i].id);
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
u32 *in;
void *in_match_criteria;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_group *g = &ft->group[i].g;
u32 start_ix = ft->group[i].start_ix;
u32 end_ix = start_ix + (1 << g->log_sz) - 1;
int err;
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
return -ENOMEM;
}
in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
match_criteria);
memset(out, 0, sizeof(out));
#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
MLX5_SET_CFGI(in, vport_number, ft->vport);
MLX5_SET_CFGI(in, other_vport, !!ft->vport);
MLX5_SET_CFGI(in, table_type, ft->type);
MLX5_SET_CFGI(in, table_id, ft->id);
MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET_CFGI(in, start_flow_index, start_ix);
MLX5_SET_CFGI(in, end_flow_index, end_ix);
MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
memcpy(in_match_criteria, g->match_criteria,
MLX5_ST_SZ_BYTES(fte_match_param));
err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
sizeof(out));
if (!err)
ft->group[i].id = MLX5_GET(create_flow_group_out, out,
group_id);
kvfree(in);
return err;
}
static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
{
int i;
for (i = 0; i < ft->num_groups; i++)
mlx5_destroy_flow_group_cmd(ft, i);
}
static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
{
int err;
int i;
for (i = 0; i < ft->num_groups; i++) {
err = mlx5_create_flow_group_cmd(ft, i);
if (err)
goto err_destroy_flow_table_groups;
}
return 0;
err_destroy_flow_table_groups:
for (i--; i >= 0; i--)
mlx5_destroy_flow_group_cmd(ft, i);
return err;
}
static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
int err;
memset(in, 0, sizeof(in));
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport, !!ft->vport);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, level, ft->level);
MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
sizeof(out));
if (err)
return err;
ft->id = MLX5_GET(create_flow_table_out, out, table_id);
return 0;
}
static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
MLX5_SET_DFTI(in, vport_number, ft->vport);
MLX5_SET_DFTI(in, other_vport, !!ft->vport);
MLX5_SET_DFTI(in, table_type, ft->type);
MLX5_SET_DFTI(in, table_id, ft->id);
MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
}
static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
u32 *match_criteria, int *group_ix)
{
void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
outer_headers);
void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
misc_parameters);
void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
inner_headers);
int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
int i;
for (i = 0; i < ft->num_groups; i++) {
struct mlx5_flow_table_group *g = &ft->group[i].g;
void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
g->match_criteria,
outer_headers);
void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
g->match_criteria,
misc_parameters);
void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
g->match_criteria,
inner_headers);
if (g->match_criteria_enable != match_criteria_enable)
continue;
if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
continue;
if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
continue;
if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
continue;
*group_ix = i;
return 0;
}
return -EINVAL;
}
static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
{
struct mlx5_ftg *g = &ft->group[group_ix];
int err = 0;
mutex_lock(&ft->mutex);
*ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
err = -ENOSPC;
else
__set_bit(*ix, ft->bitmap);
mutex_unlock(&ft->mutex);
return err;
}
static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
{
__clear_bit(ix, ft->bitmap);
}
int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
void *match_criteria, void *flow_context,
u32 *flow_index)
{
struct mlx5_flow_table *ft = flow_table;
int group_ix;
int err;
err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
&group_ix);
if (err) {
mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
return err;
}
err = alloc_flow_index(ft, group_ix, flow_index);
if (err) {
mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
return err;
}
err = mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
if (err)
mlx5_free_flow_index(ft, *flow_index);
return err;
}
EXPORT_SYMBOL(mlx5_add_flow_table_entry);
int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
{
struct mlx5_flow_table *ft = flow_table;
int ret;
ret = mlx5_del_flow_entry_cmd(ft, flow_index);
if (!ret)
mlx5_free_flow_index(ft, flow_index);
return ret;
}
EXPORT_SYMBOL(mlx5_del_flow_table_entry);
void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
u16 vport,
u16 num_groups,
struct mlx5_flow_table_group *group)
{
struct mlx5_flow_table *ft;
u32 start_ix = 0;
u32 ft_size = 0;
void *gr;
void *bm;
int err;
int i;
for (i = 0; i < num_groups; i++)
ft_size += (1 << group[i].log_sz);
ft = kzalloc(sizeof(*ft), GFP_KERNEL);
gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
ft->group = gr;
ft->bitmap = bm;
ft->num_groups = num_groups;
ft->level = level;
ft->vport = vport;
ft->type = table_type;
ft->size = ft_size;
ft->dev = dev;
mutex_init(&ft->mutex);
for (i = 0; i < ft->num_groups; i++) {
memcpy(&ft->group[i].g, &group[i], sizeof(*group));
ft->group[i].start_ix = start_ix;
start_ix += 1 << group[i].log_sz;
}
err = mlx5_create_flow_table_cmd(ft);
if (err)
goto err_free_ft;
err = mlx5_create_flow_table_groups(ft);
if (err)
goto err_destroy_flow_table_cmd;
return ft;
err_destroy_flow_table_cmd:
mlx5_destroy_flow_table_cmd(ft);
err_free_ft:
mlx5_core_warn(dev, "failed to alloc flow table\n");
kfree(bm);
kfree(gr);
kfree(ft);
return NULL;
}
EXPORT_SYMBOL(mlx5_create_flow_table);
void mlx5_destroy_flow_table(void *flow_table)
{
struct mlx5_flow_table *ft = flow_table;
mlx5_destroy_flow_table_groups(ft);
mlx5_destroy_flow_table_cmd(ft);
kfree(ft->bitmap);
kfree(ft->group);
kfree(ft);
}
EXPORT_SYMBOL(mlx5_destroy_flow_table);
u32 mlx5_get_flow_table_id(void *flow_table)
{
struct mlx5_flow_table *ft = flow_table;
return ft->id;
}
EXPORT_SYMBOL(mlx5_get_flow_table_id);
int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
u8 vport_num, u8 table_type, u32 table_id,
u32 underlay_qpn)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
int err;
int is_group_manager;
is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
memset(in, 0, sizeof(in));
MLX5_SET(set_flow_table_root_in, in, op_mod, op_mod);
MLX5_SET(set_flow_table_root_in, in, table_type, table_type);
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
if (op_mod == MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET)
MLX5_SET(set_flow_table_root_in, in, table_id, table_id);
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
if (vport_num) {
if (is_group_manager) {
MLX5_SET(set_flow_table_root_in, in, other_vport,
1);
MLX5_SET(set_flow_table_root_in, in, vport_number,
vport_num);
} else {
return -EPERM;
}
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
sizeof(out));
if (err)
return err;
return 0;
}
EXPORT_SYMBOL(mlx5_set_flow_table_root);

View File

@ -0,0 +1,302 @@
/*-
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <linux/types.h>
#include <linux/module.h>
#include <dev/mlx5/mlx5_ifc.h>
#include <dev/mlx5/device.h>
#include <dev/mlx5/fs.h>
#include "fs_core.h"
#include "mlx5_core.h"
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
enum fs_ft_type type,
unsigned int id)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, type);
MLX5_SET(set_flow_table_root_in, in, table_id, id);
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
}
int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int level,
unsigned int log_size, unsigned int *table_id)
{
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
int err;
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size,
log_size);
if (vport) {
MLX5_SET(create_flow_table_in, in, vport_number, vport);
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
if (err)
return err;
*table_id = MLX5_GET(create_flow_table_out, out, table_id);
return 0;
}
int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, type);
MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
if (vport) {
MLX5_SET(destroy_flow_table_in, in, vport_number, vport);
MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
}
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
u32 *in,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int *group_id)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
int err;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
if (!dev)
return -EINVAL;
memset(out, 0, sizeof(out));
MLX5_SET(create_flow_group_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, type);
MLX5_SET(create_flow_group_in, in, table_id, table_id);
if (vport) {
MLX5_SET(create_flow_group_in, in, vport_number, vport);
MLX5_SET(create_flow_group_in, in, other_vport, 1);
}
err = mlx5_cmd_exec_check_status(dev, in,
inlen, out,
sizeof(out));
if (!err)
*group_id = MLX5_GET(create_flow_group_out, out, group_id);
return err;
}
int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev,
u16 vport,
enum fs_ft_type type, unsigned int table_id,
unsigned int group_id)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, type);
MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
if (vport) {
MLX5_SET(destroy_flow_group_in, in, vport_number, vport);
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
}
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
u32 *match_val,
enum fs_ft_type type, unsigned int table_id,
unsigned int index, unsigned int group_id,
unsigned int flow_tag,
unsigned short action, int dest_size,
struct list_head *dests) /* mlx5_flow_desination */
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)];
u32 *in;
unsigned int inlen;
struct mlx5_flow_rule *dst;
void *in_flow_context;
void *in_match_value;
void *in_dests;
int err;
int opmod = 0;
int modify_mask = 0;
int atomic_mod_cap;
if (action != MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)
dest_size = 0;
inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
dest_size * MLX5_ST_SZ_BYTES(dest_format_struct);
if (!dev)
return -EINVAL;
if (*fte_status & FS_FTE_STATUS_EXISTING) {
atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
return -ENOTSUPP;
opmod = 1;
modify_mask = 1 <<
MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
}
in = mlx5_vzalloc(inlen);
if (!in) {
mlx5_core_warn(dev, "failed to allocate inbox\n");
return -ENOMEM;
}
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, op_mod, opmod);
MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
MLX5_SET(set_fte_in, in, table_type, type);
MLX5_SET(set_fte_in, in, table_id, table_id);
MLX5_SET(set_fte_in, in, flow_index, index);
if (vport) {
MLX5_SET(set_fte_in, in, vport_number, vport);
MLX5_SET(set_fte_in, in, other_vport, 1);
}
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, flow_tag);
MLX5_SET(flow_context, in_flow_context, action, action);
MLX5_SET(flow_context, in_flow_context, destination_list_size,
dest_size);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, match_val, MLX5_ST_SZ_BYTES(fte_match_param));
if (dest_size) {
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
list_for_each_entry(dst, dests, base.list) {
unsigned int id;
MLX5_SET(dest_format_struct, in_dests, destination_type,
dst->dest_attr.type);
if (dst->dest_attr.type ==
MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE)
id = dst->dest_attr.ft->id;
else
id = dst->dest_attr.tir_num;
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
}
}
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
sizeof(out));
if (!err)
*fte_status |= FS_FTE_STATUS_EXISTING;
kvfree(in);
return err;
}
int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
u16 vport,
enum fs_fte_status *fte_status,
enum fs_ft_type type, unsigned int table_id,
unsigned int index)
{
u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
int err;
if (!(*fte_status & FS_FTE_STATUS_EXISTING))
return 0;
if (!dev)
return -EINVAL;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, type);
MLX5_SET(delete_fte_in, in, table_id, table_id);
MLX5_SET(delete_fte_in, in, flow_index, index);
if (vport) {
MLX5_SET(delete_fte_in, in, vport_number, vport);
MLX5_SET(delete_fte_in, in, other_vport, 1);
}
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
if (!err)
*fte_status = 0;
return err;
}

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,7 @@
#include <dev/mlx5/driver.h>
#include "mlx5_core.h"
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port)
{
struct mlx5_mad_ifc_mbox_in *in = NULL;

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
* Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <dev/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
#include "fs_core.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@ -73,6 +74,11 @@ struct mlx5_device_context {
void *context;
};
enum {
MLX5_ATOMIC_REQ_MODE_BE = 0x0,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
};
static struct mlx5_profile profiles[] = {
[0] = {
.mask = 0,
@ -392,6 +398,53 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
return err;
}
static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
{
void *set_ctx;
void *set_hca_cap;
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
int req_endianness;
int err;
if (MLX5_CAP_GEN(dev, atomic)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
HCA_CAP_OPMOD_GET_MAX);
if (err)
return err;
err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
HCA_CAP_OPMOD_GET_CUR);
if (err)
return err;
} else {
return 0;
}
req_endianness =
MLX5_CAP_ATOMIC(dev,
supported_atomic_req_8B_endianess_mode_1);
if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
return 0;
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
return -ENOMEM;
MLX5_SET(set_hca_cap_in, set_ctx, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC << 1);
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
/* Set requestor to host endianness */
MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
err = set_caps(dev, set_ctx, set_sz);
kfree(set_ctx);
return err;
}
static int set_hca_ctrl(struct mlx5_core_dev *dev)
{
struct mlx5_reg_host_endianess he_in;
@ -663,8 +716,8 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_clr_master;
}
dev->iseg = ioremap(pci_resource_start(dev->pdev, 0),
sizeof(*dev->iseg));
dev->iseg_base = pci_resource_start(dev->pdev, 0);
dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
if (!dev->iseg) {
err = -ENOMEM;
device_printf((&pdev->dev)->bsddev, "ERR: ""Failed mapping initialization segment, aborting\n");
@ -716,15 +769,21 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_pagealloc_stop;
}
err = set_hca_ctrl(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
goto reclaim_boot_pages;
}
err = handle_hca_cap(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap failed\n");
goto reclaim_boot_pages;
}
err = set_hca_ctrl(dev);
err = handle_hca_cap_atomic(dev);
if (err) {
device_printf((&pdev->dev)->bsddev, "ERR: ""set_hca_ctrl failed\n");
device_printf((&pdev->dev)->bsddev, "ERR: ""handle_hca_cap_atomic failed\n");
goto reclaim_boot_pages;
}
@ -794,8 +853,21 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_init_srq_table(dev);
mlx5_init_mr_table(dev);
err = mlx5_init_fs(dev);
if (err) {
mlx5_core_err(dev, "flow steering init %d\n", err);
goto err_init_tables;
}
return 0;
err_init_tables:
mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
unmap_bf_area(dev);
err_stop_eqs:
mlx5_stop_eqs(dev);
@ -848,6 +920,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
mlx5_cleanup_fs(dev);
mlx5_cleanup_mr_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
@ -1061,6 +1134,12 @@ static void remove_one(struct pci_dev *pdev)
kfree(dev);
}
static void shutdown_one(struct pci_dev *pdev)
{
/* prevent device from accessing host memory after shutdown */
pci_clear_master(pdev);
}
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */
{ PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */
@ -1102,6 +1181,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
.shutdown = shutdown_one,
.probe = init_one,
.remove = remove_one
};

View File

@ -459,6 +459,46 @@ int mlx5_set_wol(struct mlx5_core_dev *dev, u8 wol_mode)
}
EXPORT_SYMBOL_GPL(mlx5_set_wol);
int mlx5_query_dropless_mode(struct mlx5_core_dev *dev, u16 *timeout)
{
u32 in[MLX5_ST_SZ_DW(query_delay_drop_params_in)];
u32 out[MLX5_ST_SZ_DW(query_delay_drop_params_out)];
int err = 0;
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(query_delay_drop_params_in, in, opcode,
MLX5_CMD_OP_QUERY_DELAY_DROP_PARAMS);
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
if (err)
return err;
*timeout = MLX5_GET(query_delay_drop_params_out, out,
delay_drop_timeout);
return 0;
}
EXPORT_SYMBOL_GPL(mlx5_query_dropless_mode);
int mlx5_set_dropless_mode(struct mlx5_core_dev *dev, u16 timeout)
{
u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)];
u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)];
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_SET(set_delay_drop_params_in, in, opcode,
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout, timeout);
return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
out, sizeof(out));
}
EXPORT_SYMBOL_GPL(mlx5_set_dropless_mode);
int mlx5_core_access_pvlc(struct mlx5_core_dev *dev,
struct mlx5_pvlc_reg *pvlc, int write)
{

Some files were not shown because too many files have changed in this diff Show More