Merge ^/head r275262 through r275363.
This commit is contained in:
commit
60c317af72
@ -38,7 +38,7 @@
|
||||
# xargs -n1 | sort | uniq -d;
|
||||
# done
|
||||
|
||||
# 20141128: new clang import which bumps version from 3.4.1 to 3.5.0.
|
||||
# 20141201: new clang import which bumps version from 3.4.1 to 3.5.0.
|
||||
OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_aes.h
|
||||
OLD_FILES+=usr/include/clang/3.4.1/__wmmintrin_pclmul.h
|
||||
OLD_FILES+=usr/include/clang/3.4.1/altivec.h
|
||||
@ -74,6 +74,8 @@ OLD_FILES+=usr/include/clang/3.4.1/x86intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.4.1/xmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.4.1/xopintrin.h
|
||||
OLD_DIRS+=usr/include/clang/3.4.1
|
||||
# 20141129: mrouted rc.d scripts removed from base
|
||||
OLD_FILES+=etc/rc.d/mrouted
|
||||
# 20141126: convert sbin/mdconfig/tests to ATF format tests
|
||||
OLD_FILES+=usr/tests/sbin/mdconfig/legacy_test
|
||||
OLD_FILES+=usr/tests/sbin/mdconfig/mdconfig.test
|
||||
|
@ -774,14 +774,6 @@ xtracecommand(struct arglist *varlist, struct arglist *arglist)
|
||||
for (sp = arglist->list ; sp ; sp = sp->next) {
|
||||
if (sep != 0)
|
||||
out2c(' ');
|
||||
/* Disambiguate command looking like assignment. */
|
||||
if (sp == arglist->list &&
|
||||
strchr(sp->text, '=') != NULL &&
|
||||
strchr(sp->text, '\'') == NULL) {
|
||||
out2c('\'');
|
||||
out2str(sp->text);
|
||||
out2c('\'');
|
||||
} else
|
||||
out2qstr(sp->text);
|
||||
sep = ' ';
|
||||
}
|
||||
|
@ -122,8 +122,7 @@ outqstr(const char *p, struct output *file)
|
||||
outstr("''", file);
|
||||
return;
|
||||
}
|
||||
/* Caller will handle '=' if necessary */
|
||||
if (p[strcspn(p, "|&;<>()$`\\\"' \t\n*?[~#")] == '\0' ||
|
||||
if (p[strcspn(p, "|&;<>()$`\\\"' \t\n*?[~#=")] == '\0' ||
|
||||
strcmp(p, "[") == 0) {
|
||||
outstr(p, file);
|
||||
return;
|
||||
|
@ -4960,7 +4960,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
+ input_section->output_offset
|
||||
+ rel->r_offset);
|
||||
|
||||
value = abs (relocation);
|
||||
value = llabs (relocation);
|
||||
|
||||
if (value >= 0x1000)
|
||||
return bfd_reloc_overflow;
|
||||
@ -4998,7 +4998,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
+ input_section->output_offset
|
||||
+ rel->r_offset);
|
||||
|
||||
value = abs (relocation);
|
||||
value = llabs (relocation);
|
||||
|
||||
if (value >= 0x1000)
|
||||
return bfd_reloc_overflow;
|
||||
@ -5984,7 +5984,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
|
||||
/* Calculate the value of the relevant G_n, in encoded
|
||||
constant-with-rotation format. */
|
||||
g_n = calculate_group_reloc_mask (abs (signed_value), group,
|
||||
g_n = calculate_group_reloc_mask (llabs (signed_value), group,
|
||||
&residual);
|
||||
|
||||
/* Check for overflow if required. */
|
||||
@ -5998,7 +5998,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
(*_bfd_error_handler)
|
||||
(_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
||||
input_bfd, input_section,
|
||||
(long) rel->r_offset, abs (signed_value), howto->name);
|
||||
(long) rel->r_offset, llabs (signed_value), howto->name);
|
||||
return bfd_reloc_overflow;
|
||||
}
|
||||
|
||||
@ -6077,7 +6077,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
|
||||
/* Calculate the value of the relevant G_{n-1} to obtain
|
||||
the residual at that stage. */
|
||||
calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
|
||||
calculate_group_reloc_mask (llabs (signed_value), group - 1, &residual);
|
||||
|
||||
/* Check for overflow. */
|
||||
if (residual >= 0x1000)
|
||||
@ -6085,7 +6085,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
(*_bfd_error_handler)
|
||||
(_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
||||
input_bfd, input_section,
|
||||
(long) rel->r_offset, abs (signed_value), howto->name);
|
||||
(long) rel->r_offset, llabs (signed_value), howto->name);
|
||||
return bfd_reloc_overflow;
|
||||
}
|
||||
|
||||
@ -6160,7 +6160,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
|
||||
/* Calculate the value of the relevant G_{n-1} to obtain
|
||||
the residual at that stage. */
|
||||
calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
|
||||
calculate_group_reloc_mask (llabs (signed_value), group - 1, &residual);
|
||||
|
||||
/* Check for overflow. */
|
||||
if (residual >= 0x100)
|
||||
@ -6168,7 +6168,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
(*_bfd_error_handler)
|
||||
(_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
||||
input_bfd, input_section,
|
||||
(long) rel->r_offset, abs (signed_value), howto->name);
|
||||
(long) rel->r_offset, llabs (signed_value), howto->name);
|
||||
return bfd_reloc_overflow;
|
||||
}
|
||||
|
||||
@ -6243,7 +6243,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
|
||||
/* Calculate the value of the relevant G_{n-1} to obtain
|
||||
the residual at that stage. */
|
||||
calculate_group_reloc_mask (abs (signed_value), group - 1, &residual);
|
||||
calculate_group_reloc_mask (llabs (signed_value), group - 1, &residual);
|
||||
|
||||
/* Check for overflow. (The absolute value to go in the place must be
|
||||
divisible by four and, after having been divided by four, must
|
||||
@ -6253,7 +6253,7 @@ elf32_arm_final_link_relocate (reloc_howto_type * howto,
|
||||
(*_bfd_error_handler)
|
||||
(_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
|
||||
input_bfd, input_section,
|
||||
(long) rel->r_offset, abs (signed_value), howto->name);
|
||||
(long) rel->r_offset, llabs (signed_value), howto->name);
|
||||
return bfd_reloc_overflow;
|
||||
}
|
||||
|
||||
|
@ -980,8 +980,6 @@ LSYM(Lover12):
|
||||
|
||||
RET
|
||||
|
||||
FUNC_END aeabi_ldiv0
|
||||
FUNC_END aeabi_idiv0
|
||||
FUNC_END div0
|
||||
|
||||
#endif /* L_divmodsi_tools */
|
||||
|
@ -6,5 +6,6 @@ PROG= cmatose
|
||||
MAN=
|
||||
SRCS= cmatose.c
|
||||
LDADD+= -libverbs -lrdmacm -lpthread
|
||||
LDADD+= -lmlx4
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
@ -6,5 +6,6 @@ PROG= mckey
|
||||
MAN=
|
||||
SRCS= mckey.c
|
||||
LDADD+= -libverbs -lrdmacm -lpthread
|
||||
LDADD+= -lmlx4
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
@ -6,5 +6,6 @@ PROG= rping
|
||||
MAN=
|
||||
SRCS= rping.c
|
||||
LDADD+= -libverbs -lrdmacm -lpthread
|
||||
LDADD+= -lmlx4
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
@ -6,5 +6,6 @@ PROG= udaddy
|
||||
MAN=
|
||||
SRCS= udaddy.c
|
||||
LDADD+= -libverbs -lrdmacm -lpthread
|
||||
LDADD+= -lmlx4
|
||||
|
||||
.include <bsd.prog.mk>
|
||||
|
@ -50,7 +50,7 @@
|
||||
#include <infiniband/common.h>
|
||||
#include <infiniband/umad.h>
|
||||
#include <infiniband/mad.h>
|
||||
#include <infiniband/complib/cl_nodenamemap.h>
|
||||
#include <complib/cl_nodenamemap.h>
|
||||
|
||||
#include "ibnetdiscover.h"
|
||||
#include "grouping.h"
|
||||
|
@ -49,7 +49,7 @@
|
||||
#include <infiniband/common.h>
|
||||
#include <infiniband/umad.h>
|
||||
#include <infiniband/mad.h>
|
||||
#include <infiniband/complib/cl_nodenamemap.h>
|
||||
#include <complib/cl_nodenamemap.h>
|
||||
|
||||
#include "ibdiag_common.h"
|
||||
|
||||
|
@ -43,7 +43,7 @@
|
||||
#include <getopt.h>
|
||||
|
||||
#include <infiniband/mad.h>
|
||||
#include <infiniband/iba/ib_types.h>
|
||||
#include <iba/ib_types.h>
|
||||
|
||||
#include "ibdiag_common.h"
|
||||
|
||||
|
@ -49,7 +49,7 @@
|
||||
#include <infiniband/common.h>
|
||||
#include <infiniband/umad.h>
|
||||
#include <infiniband/mad.h>
|
||||
#include <infiniband/complib/cl_nodenamemap.h>
|
||||
#include <complib/cl_nodenamemap.h>
|
||||
|
||||
#include "ibdiag_common.h"
|
||||
|
||||
|
@ -50,12 +50,12 @@
|
||||
#include <getopt.h>
|
||||
|
||||
#include <infiniband/mad.h>
|
||||
#include <infiniband/opensm/osm_log.h>
|
||||
#include <infiniband/vendor/osm_vendor_api.h>
|
||||
#include <infiniband/vendor/osm_vendor_sa_api.h>
|
||||
#include <infiniband/opensm/osm_mad_pool.h>
|
||||
#include <infiniband/complib/cl_debug.h>
|
||||
#include <infiniband/complib/cl_nodenamemap.h>
|
||||
#include <opensm/osm_log.h>
|
||||
#include <vendor/osm_vendor_api.h>
|
||||
#include <vendor/osm_vendor_sa_api.h>
|
||||
#include <opensm/osm_mad_pool.h>
|
||||
#include <complib/cl_debug.h>
|
||||
#include <complib/cl_nodenamemap.h>
|
||||
|
||||
#include <netinet/in.h>
|
||||
|
||||
|
@ -50,7 +50,7 @@
|
||||
#include <infiniband/common.h>
|
||||
#include <infiniband/umad.h>
|
||||
#include <infiniband/mad.h>
|
||||
#include <infiniband/complib/cl_nodenamemap.h>
|
||||
#include <complib/cl_nodenamemap.h>
|
||||
|
||||
#include "ibdiag_common.h"
|
||||
|
||||
|
@ -1,4 +1,9 @@
|
||||
DIAGPATH= ${.CURDIR}/../../management/infiniband-diags
|
||||
BINDIR?= /usr/bin
|
||||
CFLAGS+= -I${.CURDIR}/../../include/infiniband
|
||||
CFLAGS+= -I${.CURDIR}/../../include
|
||||
CFLAGS+= -I${.CURDIR}/../../management/opensm/include/
|
||||
CFLAGS+= -I${.CURDIR}/../../management/opensm
|
||||
CFLAGS+= -I${.CURDIR}/../../management/libibcommon/include
|
||||
CFLAGS+= -I${.CURDIR}/../../management/libibumad/include
|
||||
CFLAGS+= -I${.CURDIR}/../../management/libibmad/include
|
||||
|
@ -390,12 +390,6 @@ gateway_enable="NO" # Set to YES if this host will be a gateway.
|
||||
routed_enable="NO" # Set to YES to enable a routing daemon.
|
||||
routed_program="/sbin/routed" # Name of routing daemon to use if enabled.
|
||||
routed_flags="-q" # Flags for routing daemon.
|
||||
mrouted_enable="NO" # Do IPv4 multicast routing.
|
||||
mrouted_program="/usr/local/sbin/mrouted" # Name of IPv4 multicast
|
||||
# routing daemon. You need to
|
||||
# install it from package or
|
||||
# port.
|
||||
mrouted_flags="" # Flags for multicast routing daemon.
|
||||
arpproxy_all="NO" # replaces obsolete kernel option ARP_PROXYALL.
|
||||
forward_sourceroute="NO" # do source routing (only if gateway_enable is set to "YES")
|
||||
accept_sourceroute="NO" # accept source routed packets to us
|
||||
|
7
etc/rc
7
etc/rc
@ -69,19 +69,16 @@ fi
|
||||
# and to make the configuration file variables available to rc itself.
|
||||
#
|
||||
. /etc/rc.subr
|
||||
load_rc_config 'XXX'
|
||||
load_rc_config
|
||||
|
||||
# If we receive a SIGALRM, re-source /etc/rc.conf; this allows rc.d
|
||||
# scripts to perform "boot-time configuration" including enabling and
|
||||
# disabling rc.d scripts which appear later in the boot order.
|
||||
trap "_rc_conf_loaded=false; load_rc_config 'XXX'" ALRM
|
||||
trap "_rc_conf_loaded=false; load_rc_config" ALRM
|
||||
|
||||
skip="-s nostart"
|
||||
if [ `/sbin/sysctl -n security.jail.jailed` -eq 1 ]; then
|
||||
skip="$skip -s nojail"
|
||||
if [ "$early_late_divider" = "FILESYSTEMS" ]; then
|
||||
early_late_divider=NETWORKING
|
||||
fi
|
||||
if [ `/sbin/sysctl -n security.jail.vnet` -ne 1 ]; then
|
||||
skip="$skip -s nojailvnet"
|
||||
fi
|
||||
|
@ -90,7 +90,6 @@ FILES= DAEMON \
|
||||
mountd \
|
||||
moused \
|
||||
mroute6d \
|
||||
mrouted \
|
||||
msgs \
|
||||
natd \
|
||||
netif \
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
# PROVIDE: NETWORKING NETWORK
|
||||
# REQUIRE: netif netoptions routing ppp ipfw stf
|
||||
# REQUIRE: defaultroute routed mrouted route6d mroute6d resolv bridge
|
||||
# REQUIRE: defaultroute routed route6d mroute6d resolv bridge
|
||||
# REQUIRE: static_arp static_ndp local_unbound
|
||||
|
||||
# This is a dummy dependency, for services which require networking
|
||||
|
@ -4,7 +4,7 @@
|
||||
#
|
||||
|
||||
# PROVIDE: ipmon
|
||||
# REQUIRE: FILESYSTEMS hostname sysctl FILESYSTEMS ipfilter
|
||||
# REQUIRE: FILESYSTEMS hostname sysctl ipfilter
|
||||
# BEFORE: SERVERS
|
||||
# KEYWORD: nojail
|
||||
|
||||
|
@ -1,20 +0,0 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# $FreeBSD$
|
||||
#
|
||||
|
||||
# PROVIDE: mrouted
|
||||
# REQUIRE: netif routing FILESYSTEMS
|
||||
# KEYWORD: nojail
|
||||
|
||||
. /etc/rc.subr
|
||||
|
||||
name="mrouted"
|
||||
rcvar="mrouted_enable"
|
||||
command="/usr/local/sbin/${name}"
|
||||
pidfile="/var/run/${name}.pid"
|
||||
required_files="/etc/${name}.conf"
|
||||
extra_commands="reload"
|
||||
|
||||
load_rc_config $name
|
||||
run_rc_command "$1"
|
@ -4,7 +4,7 @@
|
||||
#
|
||||
|
||||
# PROVIDE: pflog
|
||||
# REQUIRE: FILESYSTEMS netif FILESYSTEMS
|
||||
# REQUIRE: FILESYSTEMS netif
|
||||
# KEYWORD: nojail
|
||||
|
||||
. /etc/rc.subr
|
||||
|
@ -1315,9 +1315,6 @@ load_rc_config()
|
||||
{
|
||||
local _name _rcvar_val _var _defval _v _msg _new _d
|
||||
_name=$1
|
||||
if [ -z "$_name" ]; then
|
||||
err 3 'USAGE: load_rc_config name'
|
||||
fi
|
||||
|
||||
if ${_rc_conf_loaded:-false}; then
|
||||
:
|
||||
@ -1333,6 +1330,9 @@ load_rc_config()
|
||||
_rc_conf_loaded=true
|
||||
fi
|
||||
|
||||
# If a service name was specified, attempt to load
|
||||
# service-specific configuration
|
||||
if [ -n "$_name" ] ; then
|
||||
for _d in /etc ${local_startup%*/rc.d}; do
|
||||
if [ -f ${_d}/rc.conf.d/"$_name" ]; then
|
||||
debug "Sourcing ${_d}/rc.conf.d/$_name"
|
||||
@ -1347,6 +1347,7 @@ load_rc_config()
|
||||
done
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Set defaults if defined.
|
||||
for _var in $rcvar $rcvars; do
|
||||
|
@ -30,7 +30,7 @@ SRCS+= ifmac.c # MAC support
|
||||
SRCS+= ifmedia.c # SIOC[GS]IFMEDIA support
|
||||
SRCS+= iffib.c # non-default FIB support
|
||||
SRCS+= ifvlan.c # SIOC[GS]ETVLAN support
|
||||
SRCS+= ifvxlan.c # VXLAN support
|
||||
#SRCS+= ifvxlan.c # VXLAN support
|
||||
SRCS+= ifgre.c # GRE keys etc
|
||||
SRCS+= ifgif.c # GIF reversed header workaround
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
.\" $FreeBSD$
|
||||
.\" Based on PR#2411
|
||||
.\"
|
||||
.Dd November 4, 2014
|
||||
.Dd November 30, 2014
|
||||
.Dt TAP 4
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -34,6 +34,17 @@ or a terminal for
|
||||
and a character-special device
|
||||
.Dq control
|
||||
interface.
|
||||
A client program transfers Ethernet frames to or from the
|
||||
.Nm
|
||||
.Dq control
|
||||
interface.
|
||||
The
|
||||
.Xr tun 4
|
||||
interface provides similar functionality at the network layer:
|
||||
a client will transfer IP (by default) packets to or from a
|
||||
.Xr tun 4
|
||||
.Dq control
|
||||
interface.
|
||||
.Pp
|
||||
The network interfaces are named
|
||||
.Dq Li tap0 ,
|
||||
@ -314,4 +325,5 @@ VMware
|
||||
.El
|
||||
.Sh SEE ALSO
|
||||
.Xr inet 4 ,
|
||||
.Xr intro 4
|
||||
.Xr intro 4 ,
|
||||
.Xr tun 4
|
||||
|
@ -2,7 +2,7 @@
|
||||
.\" $FreeBSD$
|
||||
.\" Based on PR#2411
|
||||
.\"
|
||||
.Dd February 4, 2007
|
||||
.Dd November 30, 2014
|
||||
.Dt TUN 4
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -35,6 +35,17 @@ or a terminal for
|
||||
and a character-special device
|
||||
.Dq control
|
||||
interface.
|
||||
A client program transfers IP (by default) packets to or from the
|
||||
.Nm
|
||||
.Dq control
|
||||
interface.
|
||||
The
|
||||
.Xr tap 4
|
||||
interface provides similar functionality at the Ethernet layer:
|
||||
a client will transfer Ethernet frames to or from a
|
||||
.Xr tap 4
|
||||
.Dq control
|
||||
interface.
|
||||
.Pp
|
||||
The network interfaces are named
|
||||
.Dq Li tun0 ,
|
||||
@ -307,6 +318,7 @@ them pile up.
|
||||
.Xr inet 4 ,
|
||||
.Xr intro 4 ,
|
||||
.Xr pty 4 ,
|
||||
.Xr tap 4 ,
|
||||
.Xr ifconfig 8
|
||||
.Sh AUTHORS
|
||||
This manual page was originally obtained from
|
||||
|
@ -194,7 +194,6 @@ ENTRY(armv5_idcache_wbinv_range)
|
||||
END(armv5_idcache_wbinv_range)
|
||||
|
||||
ENTRY_NP(armv5_idcache_wbinv_all)
|
||||
armv5_idcache_wbinv_all:
|
||||
.Larmv5_idcache_wbinv_all:
|
||||
/*
|
||||
* We assume that the code here can never be out of sync with the
|
||||
|
@ -144,7 +144,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
|
||||
ENTRY_NP(xscalec3_cache_syncI)
|
||||
xscalec3_cache_purgeID:
|
||||
EENTRY_NP(xscalec3_cache_purgeID)
|
||||
mcr p15, 0, r0, c7, c5, 0 /* flush I cache (D cleaned below) */
|
||||
EENTRY_NP(xscalec3_cache_cleanID)
|
||||
|
@ -129,7 +129,7 @@ EENTRY_NP(fuword32)
|
||||
str r1, [r2, #PCB_ONFAULT]
|
||||
mov r0, r3
|
||||
RET
|
||||
END(fuword32)
|
||||
EEND(fuword32)
|
||||
END(fuword)
|
||||
|
||||
/*
|
||||
@ -295,7 +295,7 @@ EENTRY_NP(suword32)
|
||||
mov r0, #0x00000000
|
||||
str r0, [r2, #PCB_ONFAULT]
|
||||
RET
|
||||
END(suword32)
|
||||
EEND(suword32)
|
||||
END(suword)
|
||||
|
||||
/*
|
||||
|
@ -130,7 +130,7 @@ ENTRY(bzero)
|
||||
.Lnormal0:
|
||||
mov r3, #0x00
|
||||
b do_memset
|
||||
EEND(bzero)
|
||||
END(bzero)
|
||||
/* LINTSTUB: Func: void *memset(void *, int, size_t) */
|
||||
ENTRY(memset)
|
||||
and r3, r1, #0xff /* We deal with bytes */
|
||||
|
@ -57,19 +57,6 @@
|
||||
#define _FNEND
|
||||
#endif
|
||||
|
||||
/*
|
||||
* gas/arm uses @ as a single comment character and thus cannot be used here
|
||||
* Instead it recognised the # instead of an @ symbols in .type directives
|
||||
* We define a couple of macros so that assembly code will not be dependent
|
||||
* on one or the other.
|
||||
*/
|
||||
#define _ASM_TYPE_FUNCTION #function
|
||||
#define _ASM_TYPE_OBJECT #object
|
||||
#define GLOBAL(X) .globl x
|
||||
#define _ENTRY(x) \
|
||||
.text; _ALIGN_TEXT; .globl x; .type x,_ASM_TYPE_FUNCTION; x: _FNSTART
|
||||
#define _END(x) .size x, . - x; _FNEND
|
||||
|
||||
/*
|
||||
* EENTRY()/EEND() mark "extra" entry/exit points from a function.
|
||||
* The unwind info cannot handle the concept of a nested function, or a function
|
||||
@ -82,6 +69,19 @@
|
||||
#define _EENTRY(x) .globl x; .type x,_ASM_TYPE_FUNCTION; x:
|
||||
#define _EEND(x) /* nothing */
|
||||
|
||||
/*
|
||||
* gas/arm uses @ as a single comment character and thus cannot be used here
|
||||
* Instead it recognised the # instead of an @ symbols in .type directives
|
||||
* We define a couple of macros so that assembly code will not be dependent
|
||||
* on one or the other.
|
||||
*/
|
||||
#define _ASM_TYPE_FUNCTION #function
|
||||
#define _ASM_TYPE_OBJECT #object
|
||||
#define GLOBAL(X) .globl x
|
||||
#define _ENTRY(x) \
|
||||
.text; _ALIGN_TEXT; _EENTRY(x) _FNSTART
|
||||
#define _END(x) .size x, . - x; _FNEND
|
||||
|
||||
#ifdef GPROF
|
||||
# define _PROF_PROLOGUE \
|
||||
mov ip, lr; bl __mcount
|
||||
|
@ -418,7 +418,7 @@ parse()
|
||||
#if SERIAL
|
||||
} else if (c == 'S') {
|
||||
j = 0;
|
||||
while ((i = *arg++ - '0') <= 9)
|
||||
while ((unsigned int)(i = *arg++ - '0') <= 9)
|
||||
j = j * 10 + i;
|
||||
if (j > 0 && i == -'0') {
|
||||
comspeed = j;
|
||||
|
@ -3219,7 +3219,7 @@ bxe_tpa_stop(struct bxe_softc *sc,
|
||||
#if __FreeBSD_version >= 800000
|
||||
/* specify what RSS queue was used for this flow */
|
||||
m->m_pkthdr.flowid = fp->index;
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
#endif
|
||||
|
||||
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
|
||||
@ -3454,7 +3454,7 @@ bxe_rxeof(struct bxe_softc *sc,
|
||||
#if __FreeBSD_version >= 800000
|
||||
/* specify what RSS queue was used for this flow */
|
||||
m->m_pkthdr.flowid = fp->index;
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
#endif
|
||||
|
||||
next_rx:
|
||||
@ -6037,10 +6037,9 @@ bxe_tx_mq_start(struct ifnet *ifp,
|
||||
|
||||
fp_index = 0; /* default is the first queue */
|
||||
|
||||
/* change the queue if using flow ID */
|
||||
if ((m->m_flags & M_FLOWID) != 0) {
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
fp_index = (m->m_pkthdr.flowid % sc->num_queues);
|
||||
}
|
||||
|
||||
fp = &sc->fp[fp_index];
|
||||
|
||||
|
@ -1734,7 +1734,8 @@ cxgb_transmit(struct ifnet *ifp, struct mbuf *m)
|
||||
return (0);
|
||||
}
|
||||
|
||||
if (m->m_flags & M_FLOWID)
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
|
||||
|
||||
qs = &pi->adapter->sge.qs[qidx];
|
||||
@ -2899,10 +2900,11 @@ process_responses(adapter_t *adap, struct sge_qset *qs, int budget)
|
||||
|
||||
eop = get_packet(adap, drop_thresh, qs, mh, r);
|
||||
if (eop) {
|
||||
if (r->rss_hdr.hash_type && !adap->timestamp)
|
||||
mh->mh_head->m_flags |= M_FLOWID;
|
||||
if (r->rss_hdr.hash_type && !adap->timestamp) {
|
||||
M_HASHTYPE_SET(mh->mh_head, M_HASHTYPE_OPAQUE);
|
||||
mh->mh_head->m_pkthdr.flowid = rss_hash;
|
||||
}
|
||||
}
|
||||
|
||||
ethpad = 2;
|
||||
} else {
|
||||
|
@ -1199,7 +1199,7 @@ do_rx_data(struct sge_qset *qs, struct rsp_desc *r, struct mbuf *m)
|
||||
}
|
||||
|
||||
toep->tp_enqueued += m->m_pkthdr.len;
|
||||
sbappendstream_locked(so_rcv, m);
|
||||
sbappendstream_locked(so_rcv, m, 0);
|
||||
sorwakeup_locked(so);
|
||||
SOCKBUF_UNLOCK_ASSERT(so_rcv);
|
||||
|
||||
|
@ -1440,7 +1440,8 @@ cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
|
||||
return (ENETDOWN);
|
||||
}
|
||||
|
||||
if (m->m_flags & M_FLOWID)
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
txq += ((m->m_pkthdr.flowid % (pi->ntxq - pi->rsrv_noflowq))
|
||||
+ pi->rsrv_noflowq);
|
||||
br = txq->br;
|
||||
|
@ -1734,7 +1734,7 @@ t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0)
|
||||
m0->m_data += fl_pktshift;
|
||||
|
||||
m0->m_pkthdr.rcvif = ifp;
|
||||
m0->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m0, M_HASHTYPE_OPAQUE);
|
||||
m0->m_pkthdr.flowid = be32toh(rss->hash_val);
|
||||
|
||||
if (cpl->csum_calc && !cpl->err_vec) {
|
||||
|
@ -1086,7 +1086,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
|
||||
#ifdef USE_DDP_RX_FLOW_CONTROL
|
||||
toep->rx_credits -= m->m_len; /* adjust for F_RX_FC_DDP */
|
||||
#endif
|
||||
sbappendstream_locked(sb, m);
|
||||
sbappendstream_locked(sb, m, 0);
|
||||
toep->sb_cc = sbused(sb);
|
||||
}
|
||||
socantrcvmore_locked(so); /* unlocks the sockbuf */
|
||||
@ -1586,7 +1586,7 @@ do_rx_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
|
||||
("%s: sb %p has more data (%d) than last time (%d).",
|
||||
__func__, sb, sbused(sb), toep->sb_cc));
|
||||
toep->rx_credits += toep->sb_cc - sbused(sb);
|
||||
sbappendstream_locked(sb, m);
|
||||
sbappendstream_locked(sb, m, 0);
|
||||
toep->sb_cc = sbused(sb);
|
||||
sorwakeup_locked(so);
|
||||
SOCKBUF_UNLOCK_ASSERT(sb);
|
||||
|
@ -231,7 +231,7 @@ insert_ddp_data(struct toepcb *toep, uint32_t n)
|
||||
#ifdef USE_DDP_RX_FLOW_CONTROL
|
||||
toep->rx_credits -= n; /* adjust for F_RX_FC_DDP */
|
||||
#endif
|
||||
sbappendstream_locked(sb, m);
|
||||
sbappendstream_locked(sb, m, 0);
|
||||
toep->sb_cc = sbused(sb);
|
||||
}
|
||||
|
||||
@ -466,7 +466,7 @@ handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
|
||||
#ifdef USE_DDP_RX_FLOW_CONTROL
|
||||
toep->rx_credits -= len; /* adjust for F_RX_FC_DDP */
|
||||
#endif
|
||||
sbappendstream_locked(sb, m);
|
||||
sbappendstream_locked(sb, m, 0);
|
||||
toep->sb_cc = sbused(sb);
|
||||
wakeup:
|
||||
KASSERT(toep->ddp_flags & db_flag,
|
||||
@ -971,8 +971,9 @@ handle_ddp(struct socket *so, struct uio *uio, int flags, int error)
|
||||
*/
|
||||
rc = sbwait(sb);
|
||||
while (toep->ddp_flags & buf_flag) {
|
||||
/* XXXGL: shouldn't here be sbwait() call? */
|
||||
sb->sb_flags |= SB_WAIT;
|
||||
msleep(&sb->sb_cc, &sb->sb_mtx, PSOCK , "sbwait", 0);
|
||||
msleep(&sb->sb_acc, &sb->sb_mtx, PSOCK , "sbwait", 0);
|
||||
}
|
||||
unwire_ddp_buffer(db);
|
||||
return (rc);
|
||||
|
@ -990,7 +990,7 @@ igb_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
* If everything is setup correctly, it should be the
|
||||
* same bucket that the current CPU we're on is.
|
||||
*/
|
||||
if ((m->m_flags & M_FLOWID) != 0) {
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
|
||||
#ifdef RSS
|
||||
if (rss_hash2bucket(m->m_pkthdr.flowid,
|
||||
M_HASHTYPE_GET(m), &bucket_id) == 0) {
|
||||
@ -5166,7 +5166,6 @@ igb_rxeof(struct igb_queue *que, int count, int *done)
|
||||
/* XXX set flowtype once this works right */
|
||||
rxr->fmp->m_pkthdr.flowid =
|
||||
le32toh(cur->wb.lower.hi_dword.rss);
|
||||
rxr->fmp->m_flags |= M_FLOWID;
|
||||
switch (pkt_info & E1000_RXDADV_RSSTYPE_MASK) {
|
||||
case E1000_RXDADV_RSSTYPE_IPV4_TCP:
|
||||
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_RSS_TCP_IPV4);
|
||||
@ -5196,11 +5195,11 @@ igb_rxeof(struct igb_queue *que, int count, int *done)
|
||||
|
||||
default:
|
||||
/* XXX fallthrough */
|
||||
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_NONE);
|
||||
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_OPAQUE);
|
||||
}
|
||||
#elif !defined(IGB_LEGACY_TX)
|
||||
rxr->fmp->m_pkthdr.flowid = que->msix;
|
||||
rxr->fmp->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(rxr->fmp, M_HASHTYPE_OPAQUE);
|
||||
#endif
|
||||
sendmp = rxr->fmp;
|
||||
/* Make sure to set M_PKTHDR. */
|
||||
|
@ -833,7 +833,7 @@ ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
* If everything is setup correctly, it should be the
|
||||
* same bucket that the current CPU we're on is.
|
||||
*/
|
||||
if ((m->m_flags & M_FLOWID) != 0) {
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
|
||||
#ifdef RSS
|
||||
if (rss_hash2bucket(m->m_pkthdr.flowid,
|
||||
M_HASHTYPE_GET(m), &bucket_id) == 0) {
|
||||
@ -4764,7 +4764,6 @@ ixgbe_rxeof(struct ix_queue *que)
|
||||
#ifdef RSS
|
||||
sendmp->m_pkthdr.flowid =
|
||||
le32toh(cur->wb.lower.hi_dword.rss);
|
||||
sendmp->m_flags |= M_FLOWID;
|
||||
switch (pkt_info & IXGBE_RXDADV_RSSTYPE_MASK) {
|
||||
case IXGBE_RXDADV_RSSTYPE_IPV4_TCP:
|
||||
M_HASHTYPE_SET(sendmp, M_HASHTYPE_RSS_TCP_IPV4);
|
||||
@ -4795,11 +4794,12 @@ ixgbe_rxeof(struct ix_queue *que)
|
||||
break;
|
||||
default:
|
||||
/* XXX fallthrough */
|
||||
M_HASHTYPE_SET(sendmp, M_HASHTYPE_NONE);
|
||||
M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
|
||||
break;
|
||||
}
|
||||
#else /* RSS */
|
||||
sendmp->m_pkthdr.flowid = que->msix;
|
||||
sendmp->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
|
||||
#endif /* RSS */
|
||||
#endif /* FreeBSD_version */
|
||||
}
|
||||
|
@ -580,7 +580,7 @@ ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
int i = 0, err = 0;
|
||||
|
||||
/* Which queue to use */
|
||||
if ((m->m_flags & M_FLOWID) != 0)
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
i = m->m_pkthdr.flowid % adapter->num_queues;
|
||||
|
||||
txr = &adapter->tx_rings[i];
|
||||
@ -3464,7 +3464,7 @@ ixv_rxeof(struct ix_queue *que, int count)
|
||||
ixv_rx_checksum(staterr, sendmp, ptype);
|
||||
#if __FreeBSD_version >= 800000
|
||||
sendmp->m_pkthdr.flowid = que->msix;
|
||||
sendmp->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
|
||||
#endif
|
||||
}
|
||||
next_desc:
|
||||
|
@ -66,8 +66,8 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
struct tx_ring *txr;
|
||||
int err, i;
|
||||
|
||||
/* Which queue to use */
|
||||
if ((m->m_flags & M_FLOWID) != 0)
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
i = m->m_pkthdr.flowid % vsi->num_queues;
|
||||
else
|
||||
i = curcpu % vsi->num_queues;
|
||||
@ -1543,7 +1543,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
|
||||
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
|
||||
ixl_rx_checksum(sendmp, status, error, ptype);
|
||||
sendmp->m_pkthdr.flowid = que->msix;
|
||||
sendmp->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(sendmp, M_HASHTYPE_OPAQUE);
|
||||
}
|
||||
next_desc:
|
||||
bus_dmamap_sync(rxr->dma.tag, rxr->dma.map,
|
||||
|
@ -2719,7 +2719,7 @@ mxge_rx_done_big(struct mxge_slice_state *ss, uint32_t len,
|
||||
/* flowid only valid if RSS hashing is enabled */
|
||||
if (sc->num_slices > 1) {
|
||||
m->m_pkthdr.flowid = (ss - sc->ss);
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
}
|
||||
/* pass the frame up the stack */
|
||||
(*ifp->if_input)(ifp, m);
|
||||
@ -2787,7 +2787,7 @@ mxge_rx_done_small(struct mxge_slice_state *ss, uint32_t len,
|
||||
/* flowid only valid if RSS hashing is enabled */
|
||||
if (sc->num_slices > 1) {
|
||||
m->m_pkthdr.flowid = (ss - sc->ss);
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
}
|
||||
/* pass the frame up the stack */
|
||||
(*ifp->if_input)(ifp, m);
|
||||
|
@ -204,7 +204,7 @@ netmap_catch_tx(struct netmap_generic_adapter *gna, int enable)
|
||||
* of the transmission does not consume resources.
|
||||
*
|
||||
* On FreeBSD, and on multiqueue cards, we can force the queue using
|
||||
* if ((m->m_flags & M_FLOWID) != 0)
|
||||
* if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
* i = m->m_pkthdr.flowid % adapter->num_queues;
|
||||
* else
|
||||
* i = curcpu % adapter->num_queues;
|
||||
@ -240,7 +240,7 @@ generic_xmit_frame(struct ifnet *ifp, struct mbuf *m,
|
||||
m->m_len = m->m_pkthdr.len = len;
|
||||
// inc refcount. All ours, we could skip the atomic
|
||||
atomic_fetchadd_int(PNT_MBUF_REFCNT(m), 1);
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
m->m_pkthdr.flowid = ring_nr;
|
||||
m->m_pkthdr.rcvif = ifp; /* used for tx notification */
|
||||
ret = NA(ifp)->if_transmit(ifp, m);
|
||||
|
@ -563,7 +563,7 @@ oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
int queue_index = 0;
|
||||
int status = 0;
|
||||
|
||||
if ((m->m_flags & M_FLOWID) != 0)
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
queue_index = m->m_pkthdr.flowid % sc->nwqs;
|
||||
|
||||
wq = sc->wq[queue_index];
|
||||
@ -1374,7 +1374,7 @@ oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
|
||||
m->m_pkthdr.flowid = (rq->queue_index - 1);
|
||||
else
|
||||
m->m_pkthdr.flowid = rq->queue_index;
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
#endif
|
||||
/* This deternies if vlan tag is Valid */
|
||||
if (oce_cqe_vtp_valid(sc, cqe)) {
|
||||
|
@ -159,7 +159,7 @@ qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx)
|
||||
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
|
||||
|
||||
mpf->m_pkthdr.flowid = sgc->rss_hash;
|
||||
mpf->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
|
||||
|
||||
(*ifp->if_input)(ifp, mpf);
|
||||
|
||||
@ -324,7 +324,7 @@ qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
|
||||
mpf->m_pkthdr.csum_data = 0xFFFF;
|
||||
|
||||
mpf->m_pkthdr.flowid = sgc->rss_hash;
|
||||
mpf->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE);
|
||||
|
||||
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
|
||||
|
||||
|
@ -1140,7 +1140,8 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp)
|
||||
|
||||
QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__));
|
||||
|
||||
if (m_head->m_flags & M_FLOWID)
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
|
||||
txr_idx = m_head->m_pkthdr.flowid & (ha->hw.num_tx_rings - 1);
|
||||
|
||||
tx_idx = ha->hw.tx_cntxt[txr_idx].txr_next;
|
||||
|
@ -190,7 +190,7 @@ qls_rx_comp(qla_host_t *ha, uint32_t rxr_idx, uint32_t cq_idx, q81_rx_t *cq_e)
|
||||
if ((cq_e->flags1 & Q81_RX_FLAGS1_RSS_MATCH_MASK)) {
|
||||
rxr->rss_int++;
|
||||
mp->m_pkthdr.flowid = cq_e->rss;
|
||||
mp->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE);
|
||||
}
|
||||
if (cq_e->flags0 & (Q81_RX_FLAGS0_TE |
|
||||
Q81_RX_FLAGS0_NU | Q81_RX_FLAGS0_IE)) {
|
||||
|
@ -1136,7 +1136,8 @@ qls_send(qla_host_t *ha, struct mbuf **m_headp)
|
||||
|
||||
QL_DPRINT8((ha->pci_dev, "%s: enter\n", __func__));
|
||||
|
||||
if (m_head->m_flags & M_FLOWID)
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE)
|
||||
txr_idx = m_head->m_pkthdr.flowid & (ha->num_tx_rings - 1);
|
||||
|
||||
tx_idx = ha->tx_ring[txr_idx].txr_next;
|
||||
|
@ -302,7 +302,7 @@ sfxge_rx_deliver(struct sfxge_softc *sc, struct sfxge_rx_sw_desc *rx_desc)
|
||||
if (rx_desc->flags & EFX_PKT_TCP) {
|
||||
m->m_pkthdr.flowid = EFX_RX_HASH_VALUE(EFX_RX_HASHALG_TOEPLITZ,
|
||||
mtod(m, uint8_t *));
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
}
|
||||
#endif
|
||||
m->m_data += sc->rx_prefix_size;
|
||||
@ -353,7 +353,7 @@ sfxge_lro_deliver(struct sfxge_lro_state *st, struct sfxge_lro_conn *c)
|
||||
|
||||
#ifdef SFXGE_HAVE_MQ
|
||||
m->m_pkthdr.flowid = c->conn_hash;
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
#endif
|
||||
m->m_pkthdr.csum_flags = csum_flags;
|
||||
__sfxge_rx_deliver(sc, m);
|
||||
|
@ -631,7 +631,8 @@ sfxge_if_transmit(struct ifnet *ifp, struct mbuf *m)
|
||||
if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_TSO)) {
|
||||
int index = 0;
|
||||
|
||||
if (m->m_flags & M_FLOWID) {
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
|
||||
uint32_t hash = m->m_pkthdr.flowid;
|
||||
|
||||
index = sc->rx_indir_table[hash % SFXGE_RX_SCALE_MAX];
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1701,7 +1701,7 @@ vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
|
||||
}
|
||||
|
||||
m->m_pkthdr.flowid = rxq->vtnrx_id;
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
|
||||
/*
|
||||
* BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum
|
||||
@ -2347,7 +2347,8 @@ vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
sc = ifp->if_softc;
|
||||
npairs = sc->vtnet_act_vq_pairs;
|
||||
|
||||
if (m->m_flags & M_FLOWID)
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
i = m->m_pkthdr.flowid % npairs;
|
||||
else
|
||||
i = curcpu % npairs;
|
||||
|
@ -2059,7 +2059,7 @@ vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq,
|
||||
}
|
||||
#else
|
||||
m->m_pkthdr.flowid = rxq->vxrxq_id;
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
#endif
|
||||
|
||||
if (!rxcd->no_csum)
|
||||
@ -3002,7 +3002,8 @@ vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
|
||||
sc = ifp->if_softc;
|
||||
ntxq = sc->vmx_ntxqueues;
|
||||
|
||||
if (m->m_flags & M_FLOWID)
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
i = m->m_pkthdr.flowid % ntxq;
|
||||
else
|
||||
i = curcpu % ntxq;
|
||||
|
@ -660,7 +660,7 @@ vxge_mq_send(ifnet_t ifp, mbuf_t m_head)
|
||||
|
||||
if (vdev->config.tx_steering) {
|
||||
i = vxge_vpath_get(vdev, m_head);
|
||||
} else if ((m_head->m_flags & M_FLOWID) != 0) {
|
||||
} else if (M_HASHTYPE_GET(m_head) != M_HASHTYPE_NONE) {
|
||||
i = m_head->m_pkthdr.flowid % vdev->no_of_vpath;
|
||||
}
|
||||
|
||||
@ -1070,7 +1070,7 @@ vxge_rx_compl(vxge_hal_vpath_h vpath_handle, vxge_hal_rxd_h rxdh,
|
||||
vxge_rx_checksum(ext_info, mbuf_up);
|
||||
|
||||
#if __FreeBSD_version >= 800000
|
||||
mbuf_up->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(mbuf_up, M_HASHTYPE_OPAQUE);
|
||||
mbuf_up->m_pkthdr.flowid = vpath->vp_index;
|
||||
#endif
|
||||
/* Post-Read sync for buffers */
|
||||
|
@ -401,7 +401,8 @@ db_print_sockbuf(struct sockbuf *sb, const char *sockbufname, int indent)
|
||||
db_printf("sb_sndptroff: %u\n", sb->sb_sndptroff);
|
||||
|
||||
db_print_indent(indent);
|
||||
db_printf("sb_cc: %u ", sb->sb_cc);
|
||||
db_printf("sb_acc: %u ", sb->sb_acc);
|
||||
db_printf("sb_ccc: %u ", sb->sb_ccc);
|
||||
db_printf("sb_hiwat: %u ", sb->sb_hiwat);
|
||||
db_printf("sb_mbcnt: %u ", sb->sb_mbcnt);
|
||||
db_printf("sb_mbmax: %u\n", sb->sb_mbmax);
|
||||
|
@ -152,6 +152,7 @@ protosw_init(struct protosw *pr)
|
||||
DEFAULT(pu->pru_sosend, sosend_generic);
|
||||
DEFAULT(pu->pru_soreceive, soreceive_generic);
|
||||
DEFAULT(pu->pru_sopoll, sopoll_generic);
|
||||
DEFAULT(pu->pru_ready, pru_ready_notsupp);
|
||||
#undef DEFAULT
|
||||
if (pr->pr_init)
|
||||
(*pr->pr_init)();
|
||||
|
@ -388,7 +388,7 @@ mb_dupcl(struct mbuf *n, struct mbuf *m)
|
||||
* cleaned too.
|
||||
*/
|
||||
void
|
||||
m_demote(struct mbuf *m0, int all)
|
||||
m_demote(struct mbuf *m0, int all, int flags)
|
||||
{
|
||||
struct mbuf *m;
|
||||
|
||||
@ -400,7 +400,7 @@ m_demote(struct mbuf *m0, int all)
|
||||
m->m_flags &= ~M_PKTHDR;
|
||||
bzero(&m->m_pkthdr, sizeof(struct pkthdr));
|
||||
}
|
||||
m->m_flags = m->m_flags & (M_EXT|M_RDONLY|M_NOFREE);
|
||||
m->m_flags = m->m_flags & (M_EXT | M_RDONLY | M_NOFREE | flags);
|
||||
}
|
||||
}
|
||||
|
||||
@ -997,7 +997,7 @@ m_catpkt(struct mbuf *m, struct mbuf *n)
|
||||
M_ASSERTPKTHDR(n);
|
||||
|
||||
m->m_pkthdr.len += n->m_pkthdr.len;
|
||||
m_demote(n, 1);
|
||||
m_demote(n, 1, 0);
|
||||
|
||||
m_cat(m, n);
|
||||
}
|
||||
|
@ -68,6 +68,123 @@ static u_long sb_efficiency = 8; /* parameter for sbreserve() */
|
||||
static struct mbuf *sbcut_internal(struct sockbuf *sb, int len);
|
||||
static void sbflush_internal(struct sockbuf *sb);
|
||||
|
||||
/*
|
||||
* Mark ready "count" mbufs starting with "m".
|
||||
*/
|
||||
int
|
||||
sbready(struct sockbuf *sb, struct mbuf *m, int count)
|
||||
{
|
||||
u_int blocker;
|
||||
|
||||
SOCKBUF_LOCK_ASSERT(sb);
|
||||
KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb));
|
||||
|
||||
blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0;
|
||||
|
||||
for (int i = 0; i < count; i++, m = m->m_next) {
|
||||
KASSERT(m->m_flags & M_NOTREADY,
|
||||
("%s: m %p !M_NOTREADY", __func__, m));
|
||||
m->m_flags &= ~(M_NOTREADY | blocker);
|
||||
if (blocker)
|
||||
sb->sb_acc += m->m_len;
|
||||
}
|
||||
|
||||
if (!blocker)
|
||||
return (EINPROGRESS);
|
||||
|
||||
/* This one was blocking all the queue. */
|
||||
for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) {
|
||||
KASSERT(m->m_flags & M_BLOCKED,
|
||||
("%s: m %p !M_BLOCKED", __func__, m));
|
||||
m->m_flags &= ~M_BLOCKED;
|
||||
sb->sb_acc += m->m_len;
|
||||
}
|
||||
|
||||
sb->sb_fnrdy = m;
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust sockbuf state reflecting allocation of m.
|
||||
*/
|
||||
void
|
||||
sballoc(struct sockbuf *sb, struct mbuf *m)
|
||||
{
|
||||
|
||||
SOCKBUF_LOCK_ASSERT(sb);
|
||||
|
||||
sb->sb_ccc += m->m_len;
|
||||
|
||||
if (sb->sb_fnrdy == NULL) {
|
||||
if (m->m_flags & M_NOTREADY)
|
||||
sb->sb_fnrdy = m;
|
||||
else
|
||||
sb->sb_acc += m->m_len;
|
||||
} else
|
||||
m->m_flags |= M_BLOCKED;
|
||||
|
||||
if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
|
||||
sb->sb_ctl += m->m_len;
|
||||
|
||||
sb->sb_mbcnt += MSIZE;
|
||||
sb->sb_mcnt += 1;
|
||||
|
||||
if (m->m_flags & M_EXT) {
|
||||
sb->sb_mbcnt += m->m_ext.ext_size;
|
||||
sb->sb_ccnt += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust sockbuf state reflecting freeing of m.
|
||||
*/
|
||||
void
|
||||
sbfree(struct sockbuf *sb, struct mbuf *m)
|
||||
{
|
||||
|
||||
#if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */
|
||||
SOCKBUF_LOCK_ASSERT(sb);
|
||||
#endif
|
||||
|
||||
sb->sb_ccc -= m->m_len;
|
||||
|
||||
if (!(m->m_flags & M_NOTAVAIL))
|
||||
sb->sb_acc -= m->m_len;
|
||||
|
||||
if (m == sb->sb_fnrdy) {
|
||||
struct mbuf *n;
|
||||
|
||||
KASSERT(m->m_flags & M_NOTREADY,
|
||||
("%s: m %p !M_NOTREADY", __func__, m));
|
||||
|
||||
n = m->m_next;
|
||||
while (n != NULL && !(n->m_flags & M_NOTREADY)) {
|
||||
n->m_flags &= ~M_BLOCKED;
|
||||
sb->sb_acc += n->m_len;
|
||||
n = n->m_next;
|
||||
}
|
||||
sb->sb_fnrdy = n;
|
||||
}
|
||||
|
||||
if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
|
||||
sb->sb_ctl -= m->m_len;
|
||||
|
||||
sb->sb_mbcnt -= MSIZE;
|
||||
sb->sb_mcnt -= 1;
|
||||
if (m->m_flags & M_EXT) {
|
||||
sb->sb_mbcnt -= m->m_ext.ext_size;
|
||||
sb->sb_ccnt -= 1;
|
||||
}
|
||||
|
||||
if (sb->sb_sndptr == m) {
|
||||
sb->sb_sndptr = NULL;
|
||||
sb->sb_sndptroff = 0;
|
||||
}
|
||||
if (sb->sb_sndptroff != 0)
|
||||
sb->sb_sndptroff -= m->m_len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Socantsendmore indicates that no more data will be sent on the socket; it
|
||||
* would normally be applied to a socket when the user informs the system
|
||||
@ -127,7 +244,7 @@ sbwait(struct sockbuf *sb)
|
||||
SOCKBUF_LOCK_ASSERT(sb);
|
||||
|
||||
sb->sb_flags |= SB_WAIT;
|
||||
return (msleep_sbt(&sb->sb_cc, &sb->sb_mtx,
|
||||
return (msleep_sbt(&sb->sb_acc, &sb->sb_mtx,
|
||||
(sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait",
|
||||
sb->sb_timeo, 0, 0));
|
||||
}
|
||||
@ -184,7 +301,7 @@ sowakeup(struct socket *so, struct sockbuf *sb)
|
||||
sb->sb_flags &= ~SB_SEL;
|
||||
if (sb->sb_flags & SB_WAIT) {
|
||||
sb->sb_flags &= ~SB_WAIT;
|
||||
wakeup(&sb->sb_cc);
|
||||
wakeup(&sb->sb_acc);
|
||||
}
|
||||
KNOTE_LOCKED(&sb->sb_sel.si_note, 0);
|
||||
if (sb->sb_upcall != NULL) {
|
||||
@ -519,7 +636,7 @@ sbappend(struct sockbuf *sb, struct mbuf *m)
|
||||
* that is, a stream protocol (such as TCP).
|
||||
*/
|
||||
void
|
||||
sbappendstream_locked(struct sockbuf *sb, struct mbuf *m)
|
||||
sbappendstream_locked(struct sockbuf *sb, struct mbuf *m, int flags)
|
||||
{
|
||||
SOCKBUF_LOCK_ASSERT(sb);
|
||||
|
||||
@ -529,7 +646,7 @@ sbappendstream_locked(struct sockbuf *sb, struct mbuf *m)
|
||||
SBLASTMBUFCHK(sb);
|
||||
|
||||
/* Remove all packet headers and mbuf tags to get a pure data chain. */
|
||||
m_demote(m, 1);
|
||||
m_demote(m, 1, flags & PRUS_NOTREADY ? M_NOTREADY : 0);
|
||||
|
||||
sbcompress(sb, m, sb->sb_mbtail);
|
||||
|
||||
@ -543,38 +660,63 @@ sbappendstream_locked(struct sockbuf *sb, struct mbuf *m)
|
||||
* that is, a stream protocol (such as TCP).
|
||||
*/
|
||||
void
|
||||
sbappendstream(struct sockbuf *sb, struct mbuf *m)
|
||||
sbappendstream(struct sockbuf *sb, struct mbuf *m, int flags)
|
||||
{
|
||||
|
||||
SOCKBUF_LOCK(sb);
|
||||
sbappendstream_locked(sb, m);
|
||||
sbappendstream_locked(sb, m, flags);
|
||||
SOCKBUF_UNLOCK(sb);
|
||||
}
|
||||
|
||||
#ifdef SOCKBUF_DEBUG
|
||||
void
|
||||
sbcheck(struct sockbuf *sb)
|
||||
sbcheck(struct sockbuf *sb, const char *file, int line)
|
||||
{
|
||||
struct mbuf *m;
|
||||
struct mbuf *n = 0;
|
||||
u_long len = 0, mbcnt = 0;
|
||||
struct mbuf *m, *n, *fnrdy;
|
||||
u_long acc, ccc, mbcnt;
|
||||
|
||||
SOCKBUF_LOCK_ASSERT(sb);
|
||||
|
||||
acc = ccc = mbcnt = 0;
|
||||
fnrdy = NULL;
|
||||
|
||||
for (m = sb->sb_mb; m; m = n) {
|
||||
n = m->m_nextpkt;
|
||||
for (; m; m = m->m_next) {
|
||||
len += m->m_len;
|
||||
if (m->m_len == 0) {
|
||||
printf("sb %p empty mbuf %p\n", sb, m);
|
||||
goto fail;
|
||||
}
|
||||
if ((m->m_flags & M_NOTREADY) && fnrdy == NULL) {
|
||||
if (m != sb->sb_fnrdy) {
|
||||
printf("sb %p: fnrdy %p != m %p\n",
|
||||
sb, sb->sb_fnrdy, m);
|
||||
goto fail;
|
||||
}
|
||||
fnrdy = m;
|
||||
}
|
||||
if (fnrdy) {
|
||||
if (!(m->m_flags & M_NOTAVAIL)) {
|
||||
printf("sb %p: fnrdy %p, m %p is avail\n",
|
||||
sb, sb->sb_fnrdy, m);
|
||||
goto fail;
|
||||
}
|
||||
} else
|
||||
acc += m->m_len;
|
||||
ccc += m->m_len;
|
||||
mbcnt += MSIZE;
|
||||
if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
|
||||
mbcnt += m->m_ext.ext_size;
|
||||
}
|
||||
}
|
||||
if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
|
||||
printf("cc %ld != %u || mbcnt %ld != %u\n", len, sb->sb_cc,
|
||||
mbcnt, sb->sb_mbcnt);
|
||||
panic("sbcheck");
|
||||
if (acc != sb->sb_acc || ccc != sb->sb_ccc || mbcnt != sb->sb_mbcnt) {
|
||||
printf("acc %ld/%u ccc %ld/%u mbcnt %ld/%u\n",
|
||||
acc, sb->sb_acc, ccc, sb->sb_ccc, mbcnt, sb->sb_mbcnt);
|
||||
goto fail;
|
||||
}
|
||||
return;
|
||||
fail:
|
||||
panic("%s from %s:%u", __func__, file, line);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -770,8 +912,8 @@ sbappendcontrol(struct sockbuf *sb, struct mbuf *m0, struct mbuf *control)
|
||||
*
|
||||
* (2) The mbuf may be coalesced -- i.e., data in the mbuf may be copied into
|
||||
* an mbuf already in the socket buffer. This can occur if an
|
||||
* appropriate mbuf exists, there is room, and no merging of data types
|
||||
* will occur.
|
||||
* appropriate mbuf exists, there is room, both mbufs are not marked as
|
||||
* not ready, and no merging of data types will occur.
|
||||
*
|
||||
* (3) The mbuf may be appended to the end of the existing mbuf chain.
|
||||
*
|
||||
@ -800,13 +942,17 @@ sbcompress(struct sockbuf *sb, struct mbuf *m, struct mbuf *n)
|
||||
if (n && (n->m_flags & M_EOR) == 0 &&
|
||||
M_WRITABLE(n) &&
|
||||
((sb->sb_flags & SB_NOCOALESCE) == 0) &&
|
||||
!(m->m_flags & M_NOTREADY) &&
|
||||
!(n->m_flags & M_NOTREADY) &&
|
||||
m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
|
||||
m->m_len <= M_TRAILINGSPACE(n) &&
|
||||
n->m_type == m->m_type) {
|
||||
bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
|
||||
(unsigned)m->m_len);
|
||||
n->m_len += m->m_len;
|
||||
sb->sb_cc += m->m_len;
|
||||
sb->sb_ccc += m->m_len;
|
||||
if (sb->sb_fnrdy == NULL)
|
||||
sb->sb_acc += m->m_len;
|
||||
if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
|
||||
/* XXX: Probably don't need.*/
|
||||
sb->sb_ctl += m->m_len;
|
||||
@ -843,13 +989,13 @@ sbflush_internal(struct sockbuf *sb)
|
||||
* Don't call sbcut(sb, 0) if the leading mbuf is non-empty:
|
||||
* we would loop forever. Panic instead.
|
||||
*/
|
||||
if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
|
||||
if (sb->sb_ccc == 0 && (sb->sb_mb == NULL || sb->sb_mb->m_len))
|
||||
break;
|
||||
m_freem(sbcut_internal(sb, (int)sb->sb_cc));
|
||||
m_freem(sbcut_internal(sb, (int)sb->sb_ccc));
|
||||
}
|
||||
if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt)
|
||||
panic("sbflush_internal: cc %u || mb %p || mbcnt %u",
|
||||
sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
|
||||
KASSERT(sb->sb_ccc == 0 && sb->sb_mb == 0 && sb->sb_mbcnt == 0,
|
||||
("%s: ccc %u mb %p mbcnt %u", __func__,
|
||||
sb->sb_ccc, (void *)sb->sb_mb, sb->sb_mbcnt));
|
||||
}
|
||||
|
||||
void
|
||||
@ -875,7 +1021,7 @@ sbflush(struct sockbuf *sb)
|
||||
static struct mbuf *
|
||||
sbcut_internal(struct sockbuf *sb, int len)
|
||||
{
|
||||
struct mbuf *m, *n, *next, *mfree;
|
||||
struct mbuf *m, *next, *mfree;
|
||||
|
||||
next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
|
||||
mfree = NULL;
|
||||
@ -887,9 +1033,12 @@ sbcut_internal(struct sockbuf *sb, int len)
|
||||
next = m->m_nextpkt;
|
||||
}
|
||||
if (m->m_len > len) {
|
||||
KASSERT(!(m->m_flags & M_NOTAVAIL),
|
||||
("%s: m %p M_NOTAVAIL", __func__, m));
|
||||
m->m_len -= len;
|
||||
m->m_data += len;
|
||||
sb->sb_cc -= len;
|
||||
sb->sb_ccc -= len;
|
||||
sb->sb_acc -= len;
|
||||
if (sb->sb_sndptroff != 0)
|
||||
sb->sb_sndptroff -= len;
|
||||
if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA)
|
||||
@ -898,11 +1047,21 @@ sbcut_internal(struct sockbuf *sb, int len)
|
||||
}
|
||||
len -= m->m_len;
|
||||
sbfree(sb, m);
|
||||
/*
|
||||
* Do not put M_NOTREADY buffers to the free list, they
|
||||
* are referenced from outside.
|
||||
*/
|
||||
if (m->m_flags & M_NOTREADY)
|
||||
m = m->m_next;
|
||||
else {
|
||||
struct mbuf *n;
|
||||
|
||||
n = m->m_next;
|
||||
m->m_next = mfree;
|
||||
mfree = m;
|
||||
m = n;
|
||||
}
|
||||
}
|
||||
if (m) {
|
||||
sb->sb_mb = m;
|
||||
m->m_nextpkt = next;
|
||||
@ -968,8 +1127,8 @@ sbsndptr(struct sockbuf *sb, u_int off, u_int len, u_int *moff)
|
||||
struct mbuf *m, *ret;
|
||||
|
||||
KASSERT(sb->sb_mb != NULL, ("%s: sb_mb is NULL", __func__));
|
||||
KASSERT(off + len <= sb->sb_cc, ("%s: beyond sb", __func__));
|
||||
KASSERT(sb->sb_sndptroff <= sb->sb_cc, ("%s: sndptroff broken", __func__));
|
||||
KASSERT(off + len <= sb->sb_acc, ("%s: beyond sb", __func__));
|
||||
KASSERT(sb->sb_sndptroff <= sb->sb_acc, ("%s: sndptroff broken", __func__));
|
||||
|
||||
/*
|
||||
* Is off below stored offset? Happens on retransmits.
|
||||
@ -1118,7 +1277,7 @@ void
|
||||
sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
|
||||
{
|
||||
|
||||
xsb->sb_cc = sb->sb_cc;
|
||||
xsb->sb_cc = sb->sb_ccc;
|
||||
xsb->sb_hiwat = sb->sb_hiwat;
|
||||
xsb->sb_mbcnt = sb->sb_mbcnt;
|
||||
xsb->sb_mcnt = sb->sb_mcnt;
|
||||
|
@ -1706,7 +1706,8 @@ soreceive_generic(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
*/
|
||||
moff = 0;
|
||||
offset = 0;
|
||||
while (m != NULL && uio->uio_resid > 0 && error == 0) {
|
||||
while (m != NULL && !(m->m_flags & M_NOTAVAIL) && uio->uio_resid > 0
|
||||
&& error == 0) {
|
||||
/*
|
||||
* If the type of mbuf has changed since the last mbuf
|
||||
* examined ('type'), end the receive operation.
|
||||
@ -2044,6 +2045,8 @@ soreceive_stream(struct socket *so, struct sockaddr **psa, struct uio *uio,
|
||||
for (m = sb->sb_mb;
|
||||
m != NULL && m->m_len <= len;
|
||||
m = m->m_next) {
|
||||
KASSERT(!(m->m_flags & M_NOTAVAIL),
|
||||
("%s: m %p not available", __func__, m));
|
||||
len -= m->m_len;
|
||||
uio->uio_resid -= m->m_len;
|
||||
sbfree(sb, m);
|
||||
@ -3175,6 +3178,13 @@ pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
|
||||
return EOPNOTSUPP;
|
||||
}
|
||||
|
||||
int
|
||||
pru_ready_notsupp(struct socket *so, struct mbuf *m, int count)
|
||||
{
|
||||
|
||||
return (EOPNOTSUPP);
|
||||
}
|
||||
|
||||
/*
|
||||
* This isn't really a ``null'' operation, but it's the default one and
|
||||
* doesn't do anything destructive.
|
||||
|
@ -1047,6 +1047,32 @@ uipc_send(struct socket *so, int flags, struct mbuf *m, struct sockaddr *nam,
|
||||
return (error);
|
||||
}
|
||||
|
||||
static int
|
||||
uipc_ready(struct socket *so, struct mbuf *m, int count)
|
||||
{
|
||||
struct unpcb *unp, *unp2;
|
||||
struct socket *so2;
|
||||
int error;
|
||||
|
||||
unp = sotounpcb(so);
|
||||
|
||||
UNP_LINK_RLOCK();
|
||||
unp2 = unp->unp_conn;
|
||||
UNP_PCB_LOCK(unp2);
|
||||
so2 = unp2->unp_socket;
|
||||
|
||||
SOCKBUF_LOCK(&so2->so_rcv);
|
||||
if ((error = sbready(&so2->so_rcv, m, count)) == 0)
|
||||
sorwakeup_locked(so2);
|
||||
else
|
||||
SOCKBUF_UNLOCK(&so2->so_rcv);
|
||||
|
||||
UNP_PCB_UNLOCK(unp2);
|
||||
UNP_LINK_RUNLOCK();
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
static int
|
||||
uipc_sense(struct socket *so, struct stat *sb)
|
||||
{
|
||||
@ -1161,6 +1187,7 @@ static struct pr_usrreqs uipc_usrreqs_stream = {
|
||||
.pru_peeraddr = uipc_peeraddr,
|
||||
.pru_rcvd = uipc_rcvd,
|
||||
.pru_send = uipc_send,
|
||||
.pru_ready = uipc_ready,
|
||||
.pru_sense = uipc_sense,
|
||||
.pru_shutdown = uipc_shutdown,
|
||||
.pru_sockaddr = uipc_sockaddr,
|
||||
|
@ -688,8 +688,8 @@ flowtable_lookup(sa_family_t sa, struct mbuf *m, struct route *ro)
|
||||
if (fle == NULL)
|
||||
return (EHOSTUNREACH);
|
||||
|
||||
if (!(m->m_flags & M_FLOWID)) {
|
||||
m->m_flags |= M_FLOWID;
|
||||
if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE) {
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
|
||||
m->m_pkthdr.flowid = fle->f_hash;
|
||||
}
|
||||
|
||||
|
@ -835,7 +835,8 @@ lacp_select_tx_port(struct lagg_softc *sc, struct mbuf *m)
|
||||
return (NULL);
|
||||
}
|
||||
|
||||
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && (m->m_flags & M_FLOWID))
|
||||
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
|
||||
M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
hash = m->m_pkthdr.flowid >> sc->flowid_shift;
|
||||
else
|
||||
hash = lagg_hashmbuf(sc, m, lsc->lsc_hashkey);
|
||||
|
@ -247,14 +247,14 @@ SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET,
|
||||
&VNET_NAME(lagg_failover_rx_all), 0,
|
||||
"Accept input from any interface in a failover lagg");
|
||||
|
||||
/* Default value for using M_FLOWID */
|
||||
/* Default value for using flowid */
|
||||
static VNET_DEFINE(int, def_use_flowid) = 1;
|
||||
#define V_def_use_flowid VNET(def_use_flowid)
|
||||
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RWTUN,
|
||||
&VNET_NAME(def_use_flowid), 0,
|
||||
"Default setting for using flow id for load sharing");
|
||||
|
||||
/* Default value for using M_FLOWID */
|
||||
/* Default value for flowid shift */
|
||||
static VNET_DEFINE(int, def_flowid_shift) = 16;
|
||||
#define V_def_flowid_shift VNET(def_flowid_shift)
|
||||
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift, CTLFLAG_RWTUN,
|
||||
@ -2148,7 +2148,8 @@ lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
|
||||
struct lagg_port *lp = NULL;
|
||||
uint32_t p = 0;
|
||||
|
||||
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) && (m->m_flags & M_FLOWID))
|
||||
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
|
||||
M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
||||
p = m->m_pkthdr.flowid >> sc->flowid_shift;
|
||||
else
|
||||
p = lagg_hashmbuf(sc, m, lb->lb_key);
|
||||
|
@ -143,9 +143,9 @@ struct lagg_reqopts {
|
||||
|
||||
int ro_opts; /* Option bitmap */
|
||||
#define LAGG_OPT_NONE 0x00
|
||||
#define LAGG_OPT_USE_FLOWID 0x01 /* use M_FLOWID */
|
||||
#define LAGG_OPT_USE_FLOWID 0x01 /* enable use of flowid */
|
||||
/* Pseudo flags which are used in ro_opts but not stored into sc_opts. */
|
||||
#define LAGG_OPT_FLOWIDSHIFT 0x02 /* Set flowid */
|
||||
#define LAGG_OPT_FLOWIDSHIFT 0x02 /* set flowid shift */
|
||||
#define LAGG_OPT_FLOWIDSHIFT_MASK 0x1f /* flowid is uint32_t */
|
||||
#define LAGG_OPT_LACP_STRICT 0x10 /* LACP strict mode */
|
||||
#define LAGG_OPT_LACP_TXTEST 0x20 /* LACP debug: txtest */
|
||||
|
@ -2236,6 +2236,7 @@ vxlan_pick_source_port(struct vxlan_softc *sc, struct mbuf *m)
|
||||
|
||||
range = sc->vxl_max_port - sc->vxl_min_port + 1;
|
||||
|
||||
/* check if flowid is set and not opaque */
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE &&
|
||||
M_HASHTYPE_GET(m) != M_HASHTYPE_OPAQUE)
|
||||
hash = m->m_pkthdr.flowid;
|
||||
|
@ -682,12 +682,13 @@ netisr_select_cpuid(struct netisr_proto *npp, u_int dispatch_policy,
|
||||
}
|
||||
|
||||
if (policy == NETISR_POLICY_FLOW) {
|
||||
if (!(m->m_flags & M_FLOWID) && npp->np_m2flow != NULL) {
|
||||
if (M_HASHTYPE_GET(m) == M_HASHTYPE_NONE &&
|
||||
npp->np_m2flow != NULL) {
|
||||
m = npp->np_m2flow(m, source);
|
||||
if (m == NULL)
|
||||
return (NULL);
|
||||
}
|
||||
if (m->m_flags & M_FLOWID) {
|
||||
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
|
||||
*cpuidp =
|
||||
netisr_default_flow2cpu(m->m_pkthdr.flowid);
|
||||
return (m);
|
||||
|
@ -530,8 +530,8 @@ short inp_so_options(const struct inpcb *inp);
|
||||
#define INP_ONESBCAST 0x02000000 /* send all-ones broadcast */
|
||||
#define INP_DROPPED 0x04000000 /* protocol drop flag */
|
||||
#define INP_SOCKREF 0x08000000 /* strong socket reference */
|
||||
#define INP_SW_FLOWID 0x10000000 /* software generated flow id */
|
||||
#define INP_HW_FLOWID 0x20000000 /* hardware generated flow id */
|
||||
#define INP_RESERVED_0 0x10000000 /* reserved field */
|
||||
#define INP_RESERVED_1 0x20000000 /* reserved field */
|
||||
#define IN6P_RFC2292 0x40000000 /* used RFC2292 API on the socket */
|
||||
#define IN6P_MTU 0x80000000 /* receive path MTU */
|
||||
|
||||
|
@ -568,6 +568,8 @@ rss_mbuf_software_hash_v4(const struct mbuf *m, int dir, uint32_t *hashval,
|
||||
const struct ip *ip;
|
||||
const struct tcphdr *th;
|
||||
const struct udphdr *uh;
|
||||
uint32_t flowid;
|
||||
uint32_t flowtype;
|
||||
uint8_t proto;
|
||||
int iphlen;
|
||||
int is_frag = 0;
|
||||
@ -617,12 +619,10 @@ rss_mbuf_software_hash_v4(const struct mbuf *m, int dir, uint32_t *hashval,
|
||||
* then we shouldn't just "trust" the 2-tuple hash. We need
|
||||
* a 4-tuple hash.
|
||||
*/
|
||||
if (m->m_flags & M_FLOWID) {
|
||||
uint32_t flowid, flowtype;
|
||||
|
||||
flowid = m->m_pkthdr.flowid;
|
||||
flowtype = M_HASHTYPE_GET(m);
|
||||
|
||||
if (flowtype != M_HASHTYPE_NONE) {
|
||||
switch (proto) {
|
||||
case IPPROTO_UDP:
|
||||
if ((rss_gethashconfig_local() & RSS_HASHTYPE_RSS_UDP_IPV4) &&
|
||||
@ -743,7 +743,6 @@ rss_soft_m2cpuid(struct mbuf *m, uintptr_t source, u_int *cpuid)
|
||||
/* hash was done; update */
|
||||
m->m_pkthdr.flowid = hash_val;
|
||||
M_HASHTYPE_SET(m, hash_type);
|
||||
m->m_flags |= M_FLOWID;
|
||||
*cpuid = rss_hash2cpuid(m->m_pkthdr.flowid, M_HASHTYPE_GET(m));
|
||||
} else { /* ret < 0 */
|
||||
/* no hash was done */
|
||||
|
@ -1196,7 +1196,6 @@ ip_reass(struct mbuf *m)
|
||||
if (rss_mbuf_software_hash_v4(m, 0, &rss_hash, &rss_type) == 0) {
|
||||
m->m_pkthdr.flowid = rss_hash;
|
||||
M_HASHTYPE_SET(m, rss_type);
|
||||
m->m_flags |= M_FLOWID;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -147,11 +147,9 @@ ip_output(struct mbuf *m, struct mbuf *opt, struct route *ro, int flags,
|
||||
if (inp != NULL) {
|
||||
INP_LOCK_ASSERT(inp);
|
||||
M_SETFIB(m, inp->inp_inc.inc_fibnum);
|
||||
if (((flags & IP_NODEFAULTFLOWID) == 0) &&
|
||||
inp->inp_flags & (INP_HW_FLOWID|INP_SW_FLOWID)) {
|
||||
if ((flags & IP_NODEFAULTFLOWID) == 0) {
|
||||
m->m_pkthdr.flowid = inp->inp_flowid;
|
||||
M_HASHTYPE_SET(m, inp->inp_flowtype);
|
||||
m->m_flags |= M_FLOWID;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,14 +70,14 @@ sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
|
||||
|
||||
/*
|
||||
* This is really set wrong with respect to a 1-2-m socket. Since
|
||||
* the sb_cc is the count that everyone as put up. When we re-write
|
||||
* the sb_ccc is the count that everyone as put up. When we re-write
|
||||
* sctp_soreceive then we will fix this so that ONLY this
|
||||
* associations data is taken into account.
|
||||
*/
|
||||
if (stcb->sctp_socket == NULL)
|
||||
return (calc);
|
||||
|
||||
if (stcb->asoc.sb_cc == 0 &&
|
||||
if (stcb->asoc.sb_ccc == 0 &&
|
||||
asoc->size_on_reasm_queue == 0 &&
|
||||
asoc->size_on_all_streams == 0) {
|
||||
/* Full rwnd granted */
|
||||
@ -1363,7 +1363,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
|
||||
* When we have NO room in the rwnd we check to make sure
|
||||
* the reader is doing its job...
|
||||
*/
|
||||
if (stcb->sctp_socket->so_rcv.sb_cc) {
|
||||
if (stcb->sctp_socket->so_rcv.sb_ccc) {
|
||||
/* some to read, wake-up */
|
||||
#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
|
||||
struct socket *so;
|
||||
|
@ -1032,7 +1032,7 @@ sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED,
|
||||
if (stcb->sctp_socket) {
|
||||
if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
|
||||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
|
||||
stcb->sctp_socket->so_snd.sb_cc = 0;
|
||||
stcb->sctp_socket->so_snd.sb_ccc = 0;
|
||||
}
|
||||
sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED);
|
||||
}
|
||||
|
@ -399,7 +399,7 @@ typedef struct callout sctp_os_timer_t;
|
||||
#define SCTP_SOWAKEUP(so) wakeup(&(so)->so_timeo)
|
||||
/* clear the socket buffer state */
|
||||
#define SCTP_SB_CLEAR(sb) \
|
||||
(sb).sb_cc = 0; \
|
||||
(sb).sb_ccc = 0; \
|
||||
(sb).sb_mb = NULL; \
|
||||
(sb).sb_mbcnt = 0;
|
||||
|
||||
|
@ -7250,7 +7250,7 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb,
|
||||
if ((stcb->sctp_socket != NULL) && \
|
||||
((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
|
||||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
|
||||
atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc, sp->length);
|
||||
atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_ccc, sp->length);
|
||||
}
|
||||
if (sp->data) {
|
||||
sctp_m_freem(sp->data);
|
||||
@ -11532,7 +11532,7 @@ sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
|
||||
drp->current_onq = htonl(asoc->size_on_reasm_queue +
|
||||
asoc->size_on_all_streams +
|
||||
asoc->my_rwnd_control_len +
|
||||
stcb->sctp_socket->so_rcv.sb_cc);
|
||||
stcb->sctp_socket->so_rcv.sb_ccc);
|
||||
} else {
|
||||
/*-
|
||||
* If my rwnd is 0, possibly from mbuf depletion as well as
|
||||
|
@ -3397,7 +3397,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
|
||||
if ((asoc->asoc.size_on_reasm_queue > 0) ||
|
||||
(asoc->asoc.control_pdapi) ||
|
||||
(asoc->asoc.size_on_all_streams > 0) ||
|
||||
(so && (so->so_rcv.sb_cc > 0))) {
|
||||
(so && (so->so_rcv.sb_ccc > 0))) {
|
||||
/* Left with Data unread */
|
||||
struct mbuf *op_err;
|
||||
|
||||
@ -3625,7 +3625,7 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
|
||||
TAILQ_REMOVE(&inp->read_queue, sq, next);
|
||||
sctp_free_remote_addr(sq->whoFrom);
|
||||
if (so)
|
||||
so->so_rcv.sb_cc -= sq->length;
|
||||
so->so_rcv.sb_ccc -= sq->length;
|
||||
if (sq->data) {
|
||||
sctp_m_freem(sq->data);
|
||||
sq->data = NULL;
|
||||
@ -4853,7 +4853,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
|
||||
inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED;
|
||||
if (so) {
|
||||
SOCK_LOCK(so);
|
||||
if (so->so_rcv.sb_cc == 0) {
|
||||
if (so->so_rcv.sb_ccc == 0) {
|
||||
so->so_state &= ~(SS_ISCONNECTING |
|
||||
SS_ISDISCONNECTING |
|
||||
SS_ISCONFIRMING |
|
||||
|
@ -369,7 +369,7 @@ struct sctp_inpcb {
|
||||
} ip_inp;
|
||||
|
||||
|
||||
/* Socket buffer lock protects read_queue and of course sb_cc */
|
||||
/* Socket buffer lock protects read_queue and of course sb_ccc */
|
||||
struct sctp_readhead read_queue;
|
||||
|
||||
LIST_ENTRY(sctp_inpcb) sctp_list; /* lists all endpoints */
|
||||
|
@ -990,7 +990,7 @@ struct sctp_association {
|
||||
|
||||
uint32_t total_output_queue_size;
|
||||
|
||||
uint32_t sb_cc; /* shadow of sb_cc */
|
||||
uint32_t sb_ccc; /* shadow of sb_ccc */
|
||||
uint32_t sb_send_resv; /* amount reserved on a send */
|
||||
uint32_t my_rwnd_control_len; /* shadow of sb_mbcnt used for rwnd
|
||||
* control */
|
||||
|
@ -586,7 +586,7 @@ sctp_close(struct socket *so)
|
||||
if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
|
||||
(atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
|
||||
if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
|
||||
(so->so_rcv.sb_cc > 0)) {
|
||||
(so->so_rcv.sb_ccc > 0)) {
|
||||
#ifdef SCTP_LOG_CLOSING
|
||||
sctp_log_closing(inp, NULL, 13);
|
||||
#endif
|
||||
@ -751,7 +751,7 @@ sctp_disconnect(struct socket *so)
|
||||
}
|
||||
if (((so->so_options & SO_LINGER) &&
|
||||
(so->so_linger == 0)) ||
|
||||
(so->so_rcv.sb_cc > 0)) {
|
||||
(so->so_rcv.sb_ccc > 0)) {
|
||||
if (SCTP_GET_STATE(asoc) !=
|
||||
SCTP_STATE_COOKIE_WAIT) {
|
||||
/* Left with Data unread */
|
||||
@ -916,7 +916,7 @@ sctp_flush(struct socket *so, int how)
|
||||
inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ;
|
||||
SCTP_INP_READ_UNLOCK(inp);
|
||||
SCTP_INP_WUNLOCK(inp);
|
||||
so->so_rcv.sb_cc = 0;
|
||||
so->so_rcv.sb_ccc = 0;
|
||||
so->so_rcv.sb_mbcnt = 0;
|
||||
so->so_rcv.sb_mb = NULL;
|
||||
}
|
||||
@ -925,7 +925,7 @@ sctp_flush(struct socket *so, int how)
|
||||
* First make sure the sb will be happy, we don't use these
|
||||
* except maybe the count
|
||||
*/
|
||||
so->so_snd.sb_cc = 0;
|
||||
so->so_snd.sb_ccc = 0;
|
||||
so->so_snd.sb_mbcnt = 0;
|
||||
so->so_snd.sb_mb = NULL;
|
||||
|
||||
|
@ -82,9 +82,9 @@ extern struct pr_usrreqs sctp_usrreqs;
|
||||
|
||||
#define sctp_maxspace(sb) (max((sb)->sb_hiwat,SCTP_MINIMAL_RWND))
|
||||
|
||||
#define sctp_sbspace(asoc, sb) ((long) ((sctp_maxspace(sb) > (asoc)->sb_cc) ? (sctp_maxspace(sb) - (asoc)->sb_cc) : 0))
|
||||
#define sctp_sbspace(asoc, sb) ((long) ((sctp_maxspace(sb) > (asoc)->sb_ccc) ? (sctp_maxspace(sb) - (asoc)->sb_ccc) : 0))
|
||||
|
||||
#define sctp_sbspace_failedmsgs(sb) ((long) ((sctp_maxspace(sb) > (sb)->sb_cc) ? (sctp_maxspace(sb) - (sb)->sb_cc) : 0))
|
||||
#define sctp_sbspace_failedmsgs(sb) ((long) ((sctp_maxspace(sb) > (sb)->sb_ccc) ? (sctp_maxspace(sb) - (sb)->sb_ccc) : 0))
|
||||
|
||||
#define sctp_sbspace_sub(a,b) ((a > b) ? (a - b) : 0)
|
||||
|
||||
@ -195,10 +195,10 @@ extern struct pr_usrreqs sctp_usrreqs;
|
||||
}
|
||||
|
||||
#define sctp_sbfree(ctl, stcb, sb, m) { \
|
||||
SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_cc, SCTP_BUF_LEN((m))); \
|
||||
SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_ccc, SCTP_BUF_LEN((m))); \
|
||||
SCTP_SAVE_ATOMIC_DECREMENT(&(sb)->sb_mbcnt, MSIZE); \
|
||||
if (((ctl)->do_not_ref_stcb == 0) && stcb) {\
|
||||
SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_cc, SCTP_BUF_LEN((m))); \
|
||||
SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.sb_ccc, SCTP_BUF_LEN((m))); \
|
||||
SCTP_SAVE_ATOMIC_DECREMENT(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
|
||||
} \
|
||||
if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
|
||||
@ -207,10 +207,10 @@ extern struct pr_usrreqs sctp_usrreqs;
|
||||
}
|
||||
|
||||
#define sctp_sballoc(stcb, sb, m) { \
|
||||
atomic_add_int(&(sb)->sb_cc,SCTP_BUF_LEN((m))); \
|
||||
atomic_add_int(&(sb)->sb_ccc,SCTP_BUF_LEN((m))); \
|
||||
atomic_add_int(&(sb)->sb_mbcnt, MSIZE); \
|
||||
if (stcb) { \
|
||||
atomic_add_int(&(stcb)->asoc.sb_cc,SCTP_BUF_LEN((m))); \
|
||||
atomic_add_int(&(stcb)->asoc.sb_ccc,SCTP_BUF_LEN((m))); \
|
||||
atomic_add_int(&(stcb)->asoc.my_rwnd_control_len, MSIZE); \
|
||||
} \
|
||||
if (SCTP_BUF_TYPE(m) != MT_DATA && SCTP_BUF_TYPE(m) != MT_HEADER && \
|
||||
|
@ -67,9 +67,9 @@ sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr)
|
||||
struct sctp_cwnd_log sctp_clog;
|
||||
|
||||
sctp_clog.x.sb.stcb = stcb;
|
||||
sctp_clog.x.sb.so_sbcc = sb->sb_cc;
|
||||
sctp_clog.x.sb.so_sbcc = sb->sb_ccc;
|
||||
if (stcb)
|
||||
sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc;
|
||||
sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_ccc;
|
||||
else
|
||||
sctp_clog.x.sb.stcb_sbcc = 0;
|
||||
sctp_clog.x.sb.incr = incr;
|
||||
@ -4363,7 +4363,7 @@ sctp_add_to_readq(struct sctp_inpcb *inp,
|
||||
{
|
||||
/*
|
||||
* Here we must place the control on the end of the socket read
|
||||
* queue AND increment sb_cc so that select will work properly on
|
||||
* queue AND increment sb_ccc so that select will work properly on
|
||||
* read.
|
||||
*/
|
||||
struct mbuf *m, *prev = NULL;
|
||||
@ -4489,7 +4489,7 @@ sctp_append_to_readq(struct sctp_inpcb *inp,
|
||||
* the reassembly queue.
|
||||
*
|
||||
* If PDAPI this means we need to add m to the end of the data.
|
||||
* Increase the length in the control AND increment the sb_cc.
|
||||
* Increase the length in the control AND increment the sb_ccc.
|
||||
* Otherwise sb is NULL and all we need to do is put it at the end
|
||||
* of the mbuf chain.
|
||||
*/
|
||||
@ -4701,10 +4701,10 @@ sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc,
|
||||
|
||||
if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) ||
|
||||
((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) {
|
||||
if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) {
|
||||
stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size;
|
||||
if (stcb->sctp_socket->so_snd.sb_ccc >= tp1->book_size) {
|
||||
stcb->sctp_socket->so_snd.sb_ccc -= tp1->book_size;
|
||||
} else {
|
||||
stcb->sctp_socket->so_snd.sb_cc = 0;
|
||||
stcb->sctp_socket->so_snd.sb_ccc = 0;
|
||||
|
||||
}
|
||||
}
|
||||
@ -5254,11 +5254,11 @@ sctp_sorecvmsg(struct socket *so,
|
||||
in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
|
||||
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
|
||||
sctp_misc_ints(SCTP_SORECV_ENTER,
|
||||
rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid);
|
||||
rwnd_req, in_eeor_mode, so->so_rcv.sb_ccc, uio->uio_resid);
|
||||
}
|
||||
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) {
|
||||
sctp_misc_ints(SCTP_SORECV_ENTERPL,
|
||||
rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid);
|
||||
rwnd_req, block_allowed, so->so_rcv.sb_ccc, uio->uio_resid);
|
||||
}
|
||||
error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
|
||||
if (error) {
|
||||
@ -5277,23 +5277,23 @@ sctp_sorecvmsg(struct socket *so,
|
||||
(inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
|
||||
goto out;
|
||||
}
|
||||
if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) {
|
||||
if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_ccc == 0)) {
|
||||
if (so->so_error) {
|
||||
error = so->so_error;
|
||||
if ((in_flags & MSG_PEEK) == 0)
|
||||
so->so_error = 0;
|
||||
goto out;
|
||||
} else {
|
||||
if (so->so_rcv.sb_cc == 0) {
|
||||
if (so->so_rcv.sb_ccc == 0) {
|
||||
/* indicate EOF */
|
||||
error = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
if ((so->so_rcv.sb_cc <= held_length) && block_allowed) {
|
||||
if ((so->so_rcv.sb_ccc <= held_length) && block_allowed) {
|
||||
/* we need to wait for data */
|
||||
if ((so->so_rcv.sb_cc == 0) &&
|
||||
if ((so->so_rcv.sb_ccc == 0) &&
|
||||
((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
|
||||
(inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) {
|
||||
if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) {
|
||||
@ -5329,7 +5329,7 @@ sctp_sorecvmsg(struct socket *so,
|
||||
}
|
||||
held_length = 0;
|
||||
goto restart_nosblocks;
|
||||
} else if (so->so_rcv.sb_cc == 0) {
|
||||
} else if (so->so_rcv.sb_ccc == 0) {
|
||||
if (so->so_error) {
|
||||
error = so->so_error;
|
||||
if ((in_flags & MSG_PEEK) == 0)
|
||||
@ -5386,11 +5386,11 @@ sctp_sorecvmsg(struct socket *so,
|
||||
SCTP_INP_READ_LOCK(inp);
|
||||
}
|
||||
control = TAILQ_FIRST(&inp->read_queue);
|
||||
if ((control == NULL) && (so->so_rcv.sb_cc != 0)) {
|
||||
if ((control == NULL) && (so->so_rcv.sb_ccc != 0)) {
|
||||
#ifdef INVARIANTS
|
||||
panic("Huh, its non zero and nothing on control?");
|
||||
#endif
|
||||
so->so_rcv.sb_cc = 0;
|
||||
so->so_rcv.sb_ccc = 0;
|
||||
}
|
||||
SCTP_INP_READ_UNLOCK(inp);
|
||||
hold_rlock = 0;
|
||||
@ -5511,11 +5511,11 @@ sctp_sorecvmsg(struct socket *so,
|
||||
}
|
||||
/*
|
||||
* if we reach here, not suitable replacement is available
|
||||
* <or> fragment interleave is NOT on. So stuff the sb_cc
|
||||
* <or> fragment interleave is NOT on. So stuff the sb_ccc
|
||||
* into the our held count, and its time to sleep again.
|
||||
*/
|
||||
held_length = so->so_rcv.sb_cc;
|
||||
control->held_length = so->so_rcv.sb_cc;
|
||||
held_length = so->so_rcv.sb_ccc;
|
||||
control->held_length = so->so_rcv.sb_ccc;
|
||||
goto restart;
|
||||
}
|
||||
/* Clear the held length since there is something to read */
|
||||
@ -5812,10 +5812,10 @@ sctp_sorecvmsg(struct socket *so,
|
||||
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) {
|
||||
sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len);
|
||||
}
|
||||
atomic_subtract_int(&so->so_rcv.sb_cc, cp_len);
|
||||
atomic_subtract_int(&so->so_rcv.sb_ccc, cp_len);
|
||||
if ((control->do_not_ref_stcb == 0) &&
|
||||
stcb) {
|
||||
atomic_subtract_int(&stcb->asoc.sb_cc, cp_len);
|
||||
atomic_subtract_int(&stcb->asoc.sb_ccc, cp_len);
|
||||
}
|
||||
copied_so_far += cp_len;
|
||||
freed_so_far += cp_len;
|
||||
@ -5960,7 +5960,7 @@ sctp_sorecvmsg(struct socket *so,
|
||||
(sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
|
||||
goto release;
|
||||
}
|
||||
if (so->so_rcv.sb_cc <= control->held_length) {
|
||||
if (so->so_rcv.sb_ccc <= control->held_length) {
|
||||
error = sbwait(&so->so_rcv);
|
||||
if (error) {
|
||||
goto release;
|
||||
@ -5987,8 +5987,8 @@ sctp_sorecvmsg(struct socket *so,
|
||||
}
|
||||
goto done_with_control;
|
||||
}
|
||||
if (so->so_rcv.sb_cc > held_length) {
|
||||
control->held_length = so->so_rcv.sb_cc;
|
||||
if (so->so_rcv.sb_ccc > held_length) {
|
||||
control->held_length = so->so_rcv.sb_ccc;
|
||||
held_length = 0;
|
||||
}
|
||||
goto wait_some_more;
|
||||
@ -6135,13 +6135,13 @@ sctp_sorecvmsg(struct socket *so,
|
||||
freed_so_far,
|
||||
((uio) ? (slen - uio->uio_resid) : slen),
|
||||
stcb->asoc.my_rwnd,
|
||||
so->so_rcv.sb_cc);
|
||||
so->so_rcv.sb_ccc);
|
||||
} else {
|
||||
sctp_misc_ints(SCTP_SORECV_DONE,
|
||||
freed_so_far,
|
||||
((uio) ? (slen - uio->uio_resid) : slen),
|
||||
0,
|
||||
so->so_rcv.sb_cc);
|
||||
so->so_rcv.sb_ccc);
|
||||
}
|
||||
}
|
||||
stage_left:
|
||||
|
@ -286,10 +286,10 @@ do { \
|
||||
} \
|
||||
if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
|
||||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
|
||||
if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { \
|
||||
atomic_subtract_int(&((stcb)->sctp_socket->so_snd.sb_cc), tp1->book_size); \
|
||||
if (stcb->sctp_socket->so_snd.sb_ccc >= tp1->book_size) { \
|
||||
atomic_subtract_int(&((stcb)->sctp_socket->so_snd.sb_ccc), tp1->book_size); \
|
||||
} else { \
|
||||
stcb->sctp_socket->so_snd.sb_cc = 0; \
|
||||
stcb->sctp_socket->so_snd.sb_ccc = 0; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
@ -307,10 +307,10 @@ do { \
|
||||
} \
|
||||
if (stcb->sctp_socket && ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
|
||||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
|
||||
if (stcb->sctp_socket->so_snd.sb_cc >= sp->length) { \
|
||||
atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_cc,sp->length); \
|
||||
if (stcb->sctp_socket->so_snd.sb_ccc >= sp->length) { \
|
||||
atomic_subtract_int(&stcb->sctp_socket->so_snd.sb_ccc,sp->length); \
|
||||
} else { \
|
||||
stcb->sctp_socket->so_snd.sb_cc = 0; \
|
||||
stcb->sctp_socket->so_snd.sb_ccc = 0; \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
@ -322,7 +322,7 @@ do { \
|
||||
if ((stcb->sctp_socket != NULL) && \
|
||||
((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || \
|
||||
(stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { \
|
||||
atomic_add_int(&stcb->sctp_socket->so_snd.sb_cc,sz); \
|
||||
atomic_add_int(&stcb->sctp_socket->so_snd.sb_ccc,sz); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
@ -884,12 +884,10 @@ tcp_input(struct mbuf **mp, int *offp, int proto)
|
||||
goto dropwithreset;
|
||||
}
|
||||
INP_WLOCK_ASSERT(inp);
|
||||
if (!(inp->inp_flags & INP_HW_FLOWID)
|
||||
&& (m->m_flags & M_FLOWID)
|
||||
&& ((inp->inp_socket == NULL)
|
||||
|| !(inp->inp_socket->so_options & SO_ACCEPTCONN))) {
|
||||
inp->inp_flags |= INP_HW_FLOWID;
|
||||
inp->inp_flags &= ~INP_SW_FLOWID;
|
||||
if ((inp->inp_flowtype == M_HASHTYPE_NONE) &&
|
||||
(M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) &&
|
||||
((inp->inp_socket == NULL) ||
|
||||
(inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) {
|
||||
inp->inp_flowid = m->m_pkthdr.flowid;
|
||||
inp->inp_flowtype = M_HASHTYPE_GET(m);
|
||||
}
|
||||
@ -1855,7 +1853,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
newsize, so, NULL))
|
||||
so->so_rcv.sb_flags &= ~SB_AUTOSIZE;
|
||||
m_adj(m, drop_hdrlen); /* delayed header drop */
|
||||
sbappendstream_locked(&so->so_rcv, m);
|
||||
sbappendstream_locked(&so->so_rcv, m, 0);
|
||||
}
|
||||
/* NB: sorwakeup_locked() does an implicit unlock. */
|
||||
sorwakeup_locked(so);
|
||||
@ -2882,7 +2880,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
|
||||
m_freem(m);
|
||||
else
|
||||
sbappendstream_locked(&so->so_rcv, m);
|
||||
sbappendstream_locked(&so->so_rcv, m, 0);
|
||||
/* NB: sorwakeup_locked() does an implicit unlock. */
|
||||
sorwakeup_locked(so);
|
||||
} else {
|
||||
|
@ -262,7 +262,7 @@ tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
|
||||
m_freem(mq);
|
||||
else {
|
||||
mq->m_nextpkt = NULL;
|
||||
sbappendstream_locked(&so->so_rcv, mq);
|
||||
sbappendstream_locked(&so->so_rcv, mq, 0);
|
||||
wakeup = 1;
|
||||
}
|
||||
}
|
||||
|
@ -713,9 +713,7 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
|
||||
* If there's an mbuf and it has a flowid, then let's initialise the
|
||||
* inp with that particular flowid.
|
||||
*/
|
||||
if (m != NULL && m->m_flags & M_FLOWID) {
|
||||
inp->inp_flags |= INP_HW_FLOWID;
|
||||
inp->inp_flags &= ~INP_SW_FLOWID;
|
||||
if (m != NULL && M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
|
||||
inp->inp_flowid = m->m_pkthdr.flowid;
|
||||
inp->inp_flowtype = M_HASHTYPE_GET(m);
|
||||
}
|
||||
|
@ -821,7 +821,11 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
|
||||
if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
|
||||
if (control)
|
||||
m_freem(control);
|
||||
if (m)
|
||||
/*
|
||||
* In case of PRUS_NOTREADY, tcp_usr_ready() is responsible
|
||||
* for freeing memory.
|
||||
*/
|
||||
if (m && (flags & PRUS_NOTREADY) == 0)
|
||||
m_freem(m);
|
||||
error = ECONNRESET;
|
||||
goto out;
|
||||
@ -843,7 +847,7 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
|
||||
m_freem(control); /* empty control, just free it */
|
||||
}
|
||||
if (!(flags & PRUS_OOB)) {
|
||||
sbappendstream(&so->so_snd, m);
|
||||
sbappendstream(&so->so_snd, m, flags);
|
||||
if (nam && tp->t_state < TCPS_SYN_SENT) {
|
||||
/*
|
||||
* Do implied connect if not yet connected,
|
||||
@ -875,7 +879,8 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
|
||||
socantsendmore(so);
|
||||
tcp_usrclosed(tp);
|
||||
}
|
||||
if (!(inp->inp_flags & INP_DROPPED)) {
|
||||
if (!(inp->inp_flags & INP_DROPPED) &&
|
||||
!(flags & PRUS_NOTREADY)) {
|
||||
if (flags & PRUS_MORETOCOME)
|
||||
tp->t_flags |= TF_MORETOCOME;
|
||||
error = tcp_output(tp);
|
||||
@ -901,7 +906,7 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
|
||||
* of data past the urgent section.
|
||||
* Otherwise, snd_up should be one lower.
|
||||
*/
|
||||
sbappendstream_locked(&so->so_snd, m);
|
||||
sbappendstream_locked(&so->so_snd, m, flags);
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
if (nam && tp->t_state < TCPS_SYN_SENT) {
|
||||
/*
|
||||
@ -925,11 +930,13 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
|
||||
tp->snd_wnd = TTCP_CLIENT_SND_WND;
|
||||
tcp_mss(tp, -1);
|
||||
}
|
||||
tp->snd_up = tp->snd_una + so->so_snd.sb_cc;
|
||||
tp->snd_up = tp->snd_una + sbavail(&so->so_snd);
|
||||
if (!(flags & PRUS_NOTREADY)) {
|
||||
tp->t_flags |= TF_FORCEDATA;
|
||||
error = tcp_output(tp);
|
||||
tp->t_flags &= ~TF_FORCEDATA;
|
||||
}
|
||||
}
|
||||
out:
|
||||
TCPDEBUG2((flags & PRUS_OOB) ? PRU_SENDOOB :
|
||||
((flags & PRUS_EOF) ? PRU_SEND_EOF : PRU_SEND));
|
||||
@ -939,6 +946,33 @@ tcp_usr_send(struct socket *so, int flags, struct mbuf *m,
|
||||
return (error);
|
||||
}
|
||||
|
||||
static int
|
||||
tcp_usr_ready(struct socket *so, struct mbuf *m, int count)
|
||||
{
|
||||
struct inpcb *inp;
|
||||
struct tcpcb *tp;
|
||||
int error;
|
||||
|
||||
inp = sotoinpcb(so);
|
||||
INP_WLOCK(inp);
|
||||
if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
|
||||
INP_WUNLOCK(inp);
|
||||
for (int i = 0; i < count; i++)
|
||||
m = m_free(m);
|
||||
return (ECONNRESET);
|
||||
}
|
||||
tp = intotcpcb(inp);
|
||||
|
||||
SOCKBUF_LOCK(&so->so_snd);
|
||||
error = sbready(&so->so_snd, m, count);
|
||||
SOCKBUF_UNLOCK(&so->so_snd);
|
||||
if (error == 0)
|
||||
error = tcp_output(tp);
|
||||
INP_WUNLOCK(inp);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
* Abort the TCP. Drop the connection abruptly.
|
||||
*/
|
||||
@ -1073,6 +1107,7 @@ struct pr_usrreqs tcp_usrreqs = {
|
||||
.pru_rcvd = tcp_usr_rcvd,
|
||||
.pru_rcvoob = tcp_usr_rcvoob,
|
||||
.pru_send = tcp_usr_send,
|
||||
.pru_ready = tcp_usr_ready,
|
||||
.pru_shutdown = tcp_usr_shutdown,
|
||||
.pru_sockaddr = in_getsockaddr,
|
||||
.pru_sosetlabel = in_pcbsosetlabel,
|
||||
@ -1095,6 +1130,7 @@ struct pr_usrreqs tcp6_usrreqs = {
|
||||
.pru_rcvd = tcp_usr_rcvd,
|
||||
.pru_rcvoob = tcp_usr_rcvoob,
|
||||
.pru_send = tcp_usr_send,
|
||||
.pru_ready = tcp_usr_ready,
|
||||
.pru_shutdown = tcp_usr_shutdown,
|
||||
.pru_sockaddr = in6_mapped_sockaddr,
|
||||
.pru_sosetlabel = in_pcbsosetlabel,
|
||||
|
@ -1106,8 +1106,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
|
||||
uint8_t pr;
|
||||
uint16_t cscov = 0;
|
||||
uint32_t flowid = 0;
|
||||
int flowid_type = 0;
|
||||
int use_flowid = 0;
|
||||
uint8_t flowtype = M_HASHTYPE_NONE;
|
||||
|
||||
/*
|
||||
* udp_output() may need to temporarily bind or connect the current
|
||||
@ -1184,8 +1183,7 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
|
||||
error = EINVAL;
|
||||
break;
|
||||
}
|
||||
flowid_type = *(uint32_t *) CMSG_DATA(cm);
|
||||
use_flowid = 1;
|
||||
flowtype = *(uint32_t *) CMSG_DATA(cm);
|
||||
break;
|
||||
|
||||
#ifdef RSS
|
||||
@ -1451,10 +1449,9 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
|
||||
* Once the UDP code decides to set a flowid some other way,
|
||||
* this allows the flowid to be overridden by userland.
|
||||
*/
|
||||
if (use_flowid) {
|
||||
m->m_flags |= M_FLOWID;
|
||||
if (flowtype != M_HASHTYPE_NONE) {
|
||||
m->m_pkthdr.flowid = flowid;
|
||||
M_HASHTYPE_SET(m, flowid_type);
|
||||
M_HASHTYPE_SET(m, flowtype);
|
||||
#ifdef RSS
|
||||
} else {
|
||||
uint32_t hash_val, hash_type;
|
||||
@ -1477,7 +1474,6 @@ udp_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr,
|
||||
if (rss_proto_software_hash_v4(faddr, laddr, fport, lport,
|
||||
pr, &hash_val, &hash_type) == 0) {
|
||||
m->m_pkthdr.flowid = hash_val;
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, hash_type);
|
||||
}
|
||||
#endif
|
||||
|
@ -1252,7 +1252,7 @@ in6_pcblookup_mbuf(struct inpcbinfo *pcbinfo, struct in6_addr *faddr,
|
||||
* XXXRW: As above, that policy belongs in the pcbgroup code.
|
||||
*/
|
||||
if (in_pcbgroup_enabled(pcbinfo) &&
|
||||
!(M_HASHTYPE_TEST(m, M_HASHTYPE_NONE))) {
|
||||
M_HASHTYPE_TEST(m, M_HASHTYPE_NONE) == 0) {
|
||||
pcbgroup = in6_pcbgroup_byhash(pcbinfo, M_HASHTYPE_GET(m),
|
||||
m->m_pkthdr.flowid);
|
||||
if (pcbgroup != NULL)
|
||||
|
@ -267,10 +267,10 @@ ip6_output(struct mbuf *m0, struct ip6_pktopts *opt,
|
||||
|
||||
if (inp != NULL) {
|
||||
M_SETFIB(m, inp->inp_inc.inc_fibnum);
|
||||
if (((flags & IP_NODEFAULTFLOWID) == 0) &&
|
||||
(inp->inp_flags & (INP_HW_FLOWID|INP_SW_FLOWID))) {
|
||||
if ((flags & IP_NODEFAULTFLOWID) == 0) {
|
||||
/* unconditionally set flowid */
|
||||
m->m_pkthdr.flowid = inp->inp_flowid;
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, inp->inp_flowtype);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -843,7 +843,6 @@ udp6_output(struct inpcb *inp, struct mbuf *m, struct sockaddr *addr6,
|
||||
*/
|
||||
#ifdef RSS
|
||||
m->m_pkthdr.flowid = rss_hash_ip6_2tuple(*faddr, *laddr);
|
||||
m->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6);
|
||||
#endif
|
||||
flags = 0;
|
||||
|
@ -889,7 +889,7 @@ sdp_append(struct sdp_sock *ssk, struct sockbuf *sb, struct mbuf *mb, int cnt)
|
||||
m_adj(mb, SDP_HEAD_SIZE);
|
||||
n->m_pkthdr.len += mb->m_pkthdr.len;
|
||||
n->m_flags |= mb->m_flags & (M_PUSH | M_URG);
|
||||
m_demote(mb, 1);
|
||||
m_demote(mb, 1, 0);
|
||||
sbcompress(sb, mb, sb->sb_mbtail);
|
||||
return;
|
||||
}
|
||||
|
@ -604,7 +604,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
||||
}
|
||||
|
||||
mb->m_pkthdr.flowid = cq->ring;
|
||||
mb->m_flags |= M_FLOWID;
|
||||
M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
|
||||
mb->m_pkthdr.rcvif = dev;
|
||||
if (be32_to_cpu(cqe->vlan_my_qpn) &
|
||||
MLX4_CQE_VLAN_PRESENT_MASK) {
|
||||
|
@ -720,7 +720,10 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb)
|
||||
up = (vlan_tag >> 13);
|
||||
}
|
||||
|
||||
/* hash mbuf */
|
||||
/* check if flowid is set */
|
||||
if (M_HASHTYPE_GET(mb) != M_HASHTYPE_NONE)
|
||||
queue_index = mb->m_pkthdr.flowid;
|
||||
else
|
||||
queue_index = mlx4_en_hashmbuf(MLX4_F_HASHL3 | MLX4_F_HASHL4, mb, hashrandom);
|
||||
|
||||
return ((queue_index % rings_p_up) + (up * rings_p_up));
|
||||
@ -1066,15 +1069,11 @@ mlx4_en_transmit(struct ifnet *dev, struct mbuf *m)
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_tx_ring *ring;
|
||||
struct mlx4_en_cq *cq;
|
||||
int i = 0, err = 0;
|
||||
int i, err = 0;
|
||||
|
||||
/* Which queue to use */
|
||||
if ((m->m_flags & (M_FLOWID | M_VLANTAG)) == M_FLOWID) {
|
||||
i = m->m_pkthdr.flowid % (priv->tx_ring_num - 1);
|
||||
}
|
||||
else {
|
||||
/* Compute which queue to use */
|
||||
i = mlx4_en_select_queue(dev, m);
|
||||
}
|
||||
|
||||
ring = priv->tx_ring[i];
|
||||
|
||||
if (spin_trylock(&ring->tx_lock)) {
|
||||
|
@ -757,6 +757,10 @@ db_trap_glue(struct trapframe *frame)
|
||||
|| frame->exc == EXC_BPT
|
||||
|| frame->exc == EXC_DSI)) {
|
||||
int type = frame->exc;
|
||||
|
||||
/* Ignore DTrace traps. */
|
||||
if (*(uint32_t *)frame->srr0 == EXC_DTRACE)
|
||||
return (0);
|
||||
if (type == EXC_PGM && (frame->srr1 & 0x20000)) {
|
||||
type = T_BREAKPOINT;
|
||||
}
|
||||
|
@ -258,7 +258,8 @@ trap(struct trapframe *frame)
|
||||
if (frame->srr1 & EXC_PGM_TRAP) {
|
||||
#ifdef KDTRACE_HOOKS
|
||||
inst = fuword32((const void *)frame->srr0);
|
||||
if (inst == 0x0FFFDDDD && dtrace_pid_probe_ptr != NULL) {
|
||||
if (inst == 0x0FFFDDDD &&
|
||||
dtrace_pid_probe_ptr != NULL) {
|
||||
struct reg regs;
|
||||
fill_regs(td, ®s);
|
||||
(*dtrace_pid_probe_ptr)(®s);
|
||||
@ -301,7 +302,7 @@ trap(struct trapframe *frame)
|
||||
#ifdef KDTRACE_HOOKS
|
||||
case EXC_PGM:
|
||||
if (frame->srr1 & EXC_PGM_TRAP) {
|
||||
if (*(uint32_t *)frame->srr0 == 0x7c810808) {
|
||||
if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
|
||||
if (dtrace_invop_jump_addr != NULL) {
|
||||
dtrace_invop_jump_addr(frame);
|
||||
return;
|
||||
|
@ -890,8 +890,7 @@ CNAME(dblow):
|
||||
mfcr %r29 /* save CR in r29 */
|
||||
mfsrr1 %r1
|
||||
mtcr %r1
|
||||
bf 17,2f /* branch if privileged */
|
||||
1:
|
||||
bf 17,1f /* branch if privileged */
|
||||
/* Unprivileged case */
|
||||
mtcr %r29 /* put the condition register back */
|
||||
mfsprg2 %r29 /* ... and r29 */
|
||||
@ -900,19 +899,7 @@ CNAME(dblow):
|
||||
li %r1, 0 /* How to get the vector from LR */
|
||||
|
||||
bla generictrap /* and we look like a generic trap */
|
||||
2:
|
||||
#ifdef KDTRACE_HOOKS
|
||||
/* Privileged, so drop to KDB */
|
||||
mfsrr0 %r1
|
||||
mtsprg3 %r3
|
||||
lwz %r1,0(%r1)
|
||||
/* Check if it's a DTrace trap. */
|
||||
li %r3,0x0808
|
||||
addis %r3,%r3,0x7c81
|
||||
cmplw %cr0,%r3,%r1
|
||||
mfsprg3 %r3
|
||||
beq %cr0,1b
|
||||
#endif
|
||||
1:
|
||||
/* Privileged, so drop to KDB */
|
||||
GET_CPUINFO(%r1)
|
||||
stw %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */
|
||||
|
@ -799,9 +799,8 @@ CNAME(dblow):
|
||||
mfcr %r29 /* save CR in r29 */
|
||||
mfsrr1 %r1
|
||||
mtcr %r1
|
||||
bf 17,2f /* branch if privileged */
|
||||
bf 17,1f /* branch if privileged */
|
||||
|
||||
1:
|
||||
/* Unprivileged case */
|
||||
mtcr %r29 /* put the condition register back */
|
||||
mfsprg2 %r29 /* ... and r29 */
|
||||
@ -810,19 +809,7 @@ CNAME(dblow):
|
||||
li %r1, 0 /* How to get the vector from LR */
|
||||
|
||||
bla generictrap /* and we look like a generic trap */
|
||||
2:
|
||||
#ifdef KDTRACE_HOOKS
|
||||
/* Privileged, so drop to KDB */
|
||||
mfsrr0 %r1
|
||||
mtsprg3 %r3
|
||||
lwz %r1,0(%r1)
|
||||
/* Check if it's a DTrace trap. */
|
||||
li %r3,0x0808
|
||||
addis %r3,%r3,0x7c81
|
||||
cmplw %cr0,%r3,%r1
|
||||
mfsprg3 %r3
|
||||
beq %cr0,1b
|
||||
#endif
|
||||
1:
|
||||
GET_CPUINFO(%r1)
|
||||
std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r1) /* free r27 */
|
||||
std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */
|
||||
|
@ -120,6 +120,9 @@
|
||||
#define EXC_PGM_PRIV (1UL << 18)
|
||||
#define EXC_PGM_TRAP (1UL << 17)
|
||||
|
||||
/* DTrace trap opcode. */
|
||||
#define EXC_DTRACE 0x7c810808
|
||||
|
||||
#ifndef LOCORE
|
||||
struct trapframe;
|
||||
struct pcb;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user