freebsd-dev/sys/netinet6/ip6_input.c

1863 lines
48 KiB
C
Raw Normal View History

/*-
* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the project nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
2007-12-10 16:03:40 +00:00
*
* $KAME: ip6_input.c,v 1.259 2002/01/21 04:58:09 jinmei Exp $
*/
/*-
* Copyright (c) 1982, 1986, 1988, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)ip_input.c 8.2 (Berkeley) 1/4/94
*/
2007-12-10 16:03:40 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_ipsec.h"
#include "opt_route.h"
#include "opt_rss.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/hhook.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/proc.h>
#include <sys/domain.h>
#include <sys/protosw.h>
#include <sys/sdt.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/errno.h>
#include <sys/time.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/rmlock.h>
#include <sys/syslog.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_types.h>
#include <net/if_dl.h>
#include <net/route.h>
#include <net/netisr.h>
#include <net/rss_config.h>
#include <net/pfil.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/in_kdtrace.h>
#include <netinet/ip_var.h>
#include <netinet/in_systm.h>
#include <net/if_llatbl.h>
#ifdef INET
#include <netinet/ip.h>
#include <netinet/ip_icmp.h>
#endif /* INET */
#include <netinet/ip6.h>
#include <netinet6/in6_var.h>
#include <netinet6/ip6_var.h>
#include <netinet/in_pcb.h>
#include <netinet/icmp6.h>
#include <netinet6/scope6_var.h>
#include <netinet6/in6_ifattach.h>
Get closer to a VIMAGE network stack teardown from top to bottom rather than removing the network interfaces first. This change is rather larger and convoluted as the ordering requirements cannot be separated. Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and related modules to their own SI_SUB_PROTO_FIREWALL. Move initialization of "physical" interfaces to SI_SUB_DRIVERS, move virtual (cloned) interfaces to SI_SUB_PSEUDO. Move Multicast to SI_SUB_PROTO_MC. Re-work parts of multicast initialisation and teardown, not taking the huge amount of memory into account if used as a module yet. For interface teardown we try to do as many of them as we can on SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling over a higher layer protocol such as IP. In that case the interface has to go along (or before) the higher layer protocol is shutdown. Kernel hhooks need to go last on teardown as they may be used at various higher layers and we cannot remove them before we cleaned up the higher layers. For interface teardown there are multiple paths: (a) a cloned interface is destroyed (inside a VIMAGE or in the base system), (b) any interface is moved from a virtual network stack to a different network stack ("vmove"), or (c) a virtual network stack is being shut down. All code paths go through if_detach_internal() where we, depending on the vmove flag or the vnet state, make a decision on how much to shut down; in case we are destroying a VNET the individual protocol layers will cleanup their own parts thus we cannot do so again for each interface as we end up with, e.g., double-frees, destroying locks twice or acquiring already destroyed locks. When calling into protocol cleanups we equally have to tell them whether they need to detach upper layer protocols ("ulp") or not (e.g., in6_ifdetach()). Provide or enahnce helper functions to do proper cleanup at a protocol rather than at an interface level. Approved by: re (hrs) Obtained from: projects/vnet Reviewed by: gnn, jhb Sponsored by: The FreeBSD Foundation MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
#include <netinet6/mld6_var.h>
#include <netinet6/nd6.h>
#include <netinet6/in6_rss.h>
Merge projects/ipsec into head/. Small summary ------------- o Almost all IPsec releated code was moved into sys/netipsec. o New kernel modules added: ipsec.ko and tcpmd5.ko. New kernel option IPSEC_SUPPORT added. It enables support for loading and unloading of ipsec.ko and tcpmd5.ko kernel modules. o IPSEC_NAT_T option was removed. Now NAT-T support is enabled by default. The UDP_ENCAP_ESPINUDP_NON_IKE encapsulation type support was removed. Added TCP/UDP checksum handling for inbound packets that were decapsulated by transport mode SAs. setkey(8) modified to show run-time NAT-T configuration of SA. o New network pseudo interface if_ipsec(4) added. For now it is build as part of ipsec.ko module (or with IPSEC kernel). It implements IPsec virtual tunnels to create route-based VPNs. o The network stack now invokes IPsec functions using special methods. The only one header file <netipsec/ipsec_support.h> should be included to declare all the needed things to work with IPsec. o All IPsec protocols handlers (ESP/AH/IPCOMP protosw) were removed. Now these protocols are handled directly via IPsec methods. o TCP_SIGNATURE support was reworked to be more close to RFC. o PF_KEY SADB was reworked: - now all security associations stored in the single SPI namespace, and all SAs MUST have unique SPI. - several hash tables added to speed up lookups in SADB. - SADB now uses rmlock to protect access, and concurrent threads can do SA lookups in the same time. - many PF_KEY message handlers were reworked to reflect changes in SADB. - SADB_UPDATE message was extended to support new PF_KEY headers: SADB_X_EXT_NEW_ADDRESS_SRC and SADB_X_EXT_NEW_ADDRESS_DST. They can be used by IKE daemon to change SA addresses. o ipsecrequest and secpolicy structures were cardinally changed to avoid locking protection for ipsecrequest. Now we support only limited number (4) of bundled SAs, but they are supported for both INET and INET6. o INPCB security policy cache was introduced. Each PCB now caches used security policies to avoid SP lookup for each packet. o For inbound security policies added the mode, when the kernel does check for full history of applied IPsec transforms. o References counting rules for security policies and security associations were changed. The proper SA locking added into xform code. o xform code was also changed. Now it is possible to unregister xforms. tdb_xxx structures were changed and renamed to reflect changes in SADB/SPDB, and changed rules for locking and refcounting. Reviewed by: gnn, wblock Obtained from: Yandex LLC Relnotes: yes Sponsored by: Yandex LLC Differential Revision: https://reviews.freebsd.org/D9352
2017-02-06 08:49:57 +00:00
#include <netipsec/ipsec_support.h>
#include <netinet6/ip6protosw.h>
extern struct domain inet6domain;
u_char ip6_protox[IPPROTO_MAX];
VNET_DEFINE(struct in6_ifaddrhead, in6_ifaddrhead);
VNET_DEFINE(struct in6_ifaddrlisthead *, in6_ifaddrhashtbl);
VNET_DEFINE(u_long, in6_ifaddrhmask);
Reimplement the netisr framework in order to support parallel netisr threads: - Support up to one netisr thread per CPU, each processings its own workstream, or set of per-protocol queues. Threads may be bound to specific CPUs, or allowed to migrate, based on a global policy. In the future it would be desirable to support topology-centric policies, such as "one netisr per package". - Allow each protocol to advertise an ordering policy, which can currently be one of: NETISR_POLICY_SOURCE: packets must maintain ordering with respect to an implicit or explicit source (such as an interface or socket). NETISR_POLICY_FLOW: make use of mbuf flow identifiers to place work, as well as allowing protocols to provide a flow generation function for mbufs without flow identifers (m2flow). Falls back on NETISR_POLICY_SOURCE if now flow ID is available. NETISR_POLICY_CPU: allow protocols to inspect and assign a CPU for each packet handled by netisr (m2cpuid). - Provide utility functions for querying the number of workstreams being used, as well as a mapping function from workstream to CPU ID, which protocols may use in work placement decisions. - Add explicit interfaces to get and set per-protocol queue limits, and get and clear drop counters, which query data or apply changes across all workstreams. - Add a more extensible netisr registration interface, in which protocols declare 'struct netisr_handler' structures for each registered NETISR_ type. These include name, handler function, optional mbuf to flow ID function, optional mbuf to CPU ID function, queue limit, and ordering policy. Padding is present to allow these to be expanded in the future. If no queue limit is declared, then a default is used. - Queue limits are now per-workstream, and raised from the previous IFQ_MAXLEN default of 50 to 256. - All protocols are updated to use the new registration interface, and with the exception of netnatm, default queue limits. Most protocols register as NETISR_POLICY_SOURCE, except IPv4 and IPv6, which use NETISR_POLICY_FLOW, and will therefore take advantage of driver- generated flow IDs if present. - Formalize a non-packet based interface between interface polling and the netisr, rather than having polling pretend to be two protocols. Provide two explicit hooks in the netisr worker for start and end events for runs: netisr_poll() and netisr_pollmore(), as well as a function, netisr_sched_poll(), to allow the polling code to schedule netisr execution. DEVICE_POLLING still embeds single-netisr assumptions in its implementation, so for now if it is compiled into the kernel, a single and un-bound netisr thread is enforced regardless of tunable configuration. In the default configuration, the new netisr implementation maintains the same basic assumptions as the previous implementation: a single, un-bound worker thread processes all deferred work, and direct dispatch is enabled by default wherever possible. Performance measurement shows a marginal performance improvement over the old implementation due to the use of batched dequeue. An rmlock is used to synchronize use and registration/unregistration using the framework; currently, synchronized use is disabled (replicating current netisr policy) due to a measurable 3%-6% hit in ping-pong micro-benchmarking. It will be enabled once further rmlock optimization has taken place. However, in practice, netisrs are rarely registered or unregistered at runtime. A new man page for netisr will follow, but since one doesn't currently exist, it hasn't been updated. This change is not appropriate for MFC, although the polling shutdown handler should be merged to 7-STABLE. Bump __FreeBSD_version. Reviewed by: bz
2009-06-01 10:41:38 +00:00
static struct netisr_handler ip6_nh = {
.nh_name = "ip6",
.nh_handler = ip6_input,
.nh_proto = NETISR_IPV6,
#ifdef RSS
.nh_m2cpuid = rss_soft_m2cpuid_v6,
.nh_policy = NETISR_POLICY_CPU,
.nh_dispatch = NETISR_DISPATCH_HYBRID,
#else
Reimplement the netisr framework in order to support parallel netisr threads: - Support up to one netisr thread per CPU, each processings its own workstream, or set of per-protocol queues. Threads may be bound to specific CPUs, or allowed to migrate, based on a global policy. In the future it would be desirable to support topology-centric policies, such as "one netisr per package". - Allow each protocol to advertise an ordering policy, which can currently be one of: NETISR_POLICY_SOURCE: packets must maintain ordering with respect to an implicit or explicit source (such as an interface or socket). NETISR_POLICY_FLOW: make use of mbuf flow identifiers to place work, as well as allowing protocols to provide a flow generation function for mbufs without flow identifers (m2flow). Falls back on NETISR_POLICY_SOURCE if now flow ID is available. NETISR_POLICY_CPU: allow protocols to inspect and assign a CPU for each packet handled by netisr (m2cpuid). - Provide utility functions for querying the number of workstreams being used, as well as a mapping function from workstream to CPU ID, which protocols may use in work placement decisions. - Add explicit interfaces to get and set per-protocol queue limits, and get and clear drop counters, which query data or apply changes across all workstreams. - Add a more extensible netisr registration interface, in which protocols declare 'struct netisr_handler' structures for each registered NETISR_ type. These include name, handler function, optional mbuf to flow ID function, optional mbuf to CPU ID function, queue limit, and ordering policy. Padding is present to allow these to be expanded in the future. If no queue limit is declared, then a default is used. - Queue limits are now per-workstream, and raised from the previous IFQ_MAXLEN default of 50 to 256. - All protocols are updated to use the new registration interface, and with the exception of netnatm, default queue limits. Most protocols register as NETISR_POLICY_SOURCE, except IPv4 and IPv6, which use NETISR_POLICY_FLOW, and will therefore take advantage of driver- generated flow IDs if present. - Formalize a non-packet based interface between interface polling and the netisr, rather than having polling pretend to be two protocols. Provide two explicit hooks in the netisr worker for start and end events for runs: netisr_poll() and netisr_pollmore(), as well as a function, netisr_sched_poll(), to allow the polling code to schedule netisr execution. DEVICE_POLLING still embeds single-netisr assumptions in its implementation, so for now if it is compiled into the kernel, a single and un-bound netisr thread is enforced regardless of tunable configuration. In the default configuration, the new netisr implementation maintains the same basic assumptions as the previous implementation: a single, un-bound worker thread processes all deferred work, and direct dispatch is enabled by default wherever possible. Performance measurement shows a marginal performance improvement over the old implementation due to the use of batched dequeue. An rmlock is used to synchronize use and registration/unregistration using the framework; currently, synchronized use is disabled (replicating current netisr policy) due to a measurable 3%-6% hit in ping-pong micro-benchmarking. It will be enabled once further rmlock optimization has taken place. However, in practice, netisrs are rarely registered or unregistered at runtime. A new man page for netisr will follow, but since one doesn't currently exist, it hasn't been updated. This change is not appropriate for MFC, although the polling shutdown handler should be merged to 7-STABLE. Bump __FreeBSD_version. Reviewed by: bz
2009-06-01 10:41:38 +00:00
.nh_policy = NETISR_POLICY_FLOW,
#endif
Reimplement the netisr framework in order to support parallel netisr threads: - Support up to one netisr thread per CPU, each processings its own workstream, or set of per-protocol queues. Threads may be bound to specific CPUs, or allowed to migrate, based on a global policy. In the future it would be desirable to support topology-centric policies, such as "one netisr per package". - Allow each protocol to advertise an ordering policy, which can currently be one of: NETISR_POLICY_SOURCE: packets must maintain ordering with respect to an implicit or explicit source (such as an interface or socket). NETISR_POLICY_FLOW: make use of mbuf flow identifiers to place work, as well as allowing protocols to provide a flow generation function for mbufs without flow identifers (m2flow). Falls back on NETISR_POLICY_SOURCE if now flow ID is available. NETISR_POLICY_CPU: allow protocols to inspect and assign a CPU for each packet handled by netisr (m2cpuid). - Provide utility functions for querying the number of workstreams being used, as well as a mapping function from workstream to CPU ID, which protocols may use in work placement decisions. - Add explicit interfaces to get and set per-protocol queue limits, and get and clear drop counters, which query data or apply changes across all workstreams. - Add a more extensible netisr registration interface, in which protocols declare 'struct netisr_handler' structures for each registered NETISR_ type. These include name, handler function, optional mbuf to flow ID function, optional mbuf to CPU ID function, queue limit, and ordering policy. Padding is present to allow these to be expanded in the future. If no queue limit is declared, then a default is used. - Queue limits are now per-workstream, and raised from the previous IFQ_MAXLEN default of 50 to 256. - All protocols are updated to use the new registration interface, and with the exception of netnatm, default queue limits. Most protocols register as NETISR_POLICY_SOURCE, except IPv4 and IPv6, which use NETISR_POLICY_FLOW, and will therefore take advantage of driver- generated flow IDs if present. - Formalize a non-packet based interface between interface polling and the netisr, rather than having polling pretend to be two protocols. Provide two explicit hooks in the netisr worker for start and end events for runs: netisr_poll() and netisr_pollmore(), as well as a function, netisr_sched_poll(), to allow the polling code to schedule netisr execution. DEVICE_POLLING still embeds single-netisr assumptions in its implementation, so for now if it is compiled into the kernel, a single and un-bound netisr thread is enforced regardless of tunable configuration. In the default configuration, the new netisr implementation maintains the same basic assumptions as the previous implementation: a single, un-bound worker thread processes all deferred work, and direct dispatch is enabled by default wherever possible. Performance measurement shows a marginal performance improvement over the old implementation due to the use of batched dequeue. An rmlock is used to synchronize use and registration/unregistration using the framework; currently, synchronized use is disabled (replicating current netisr policy) due to a measurable 3%-6% hit in ping-pong micro-benchmarking. It will be enabled once further rmlock optimization has taken place. However, in practice, netisrs are rarely registered or unregistered at runtime. A new man page for netisr will follow, but since one doesn't currently exist, it hasn't been updated. This change is not appropriate for MFC, although the polling shutdown handler should be merged to 7-STABLE. Bump __FreeBSD_version. Reviewed by: bz
2009-06-01 10:41:38 +00:00
};
static int
sysctl_netinet6_intr_queue_maxlen(SYSCTL_HANDLER_ARGS)
{
int error, qlimit;
netisr_getqlimit(&ip6_nh, &qlimit);
error = sysctl_handle_int(oidp, &qlimit, 0, req);
if (error || !req->newptr)
return (error);
if (qlimit < 1)
return (EINVAL);
return (netisr_setqlimit(&ip6_nh, qlimit));
}
SYSCTL_DECL(_net_inet6_ip6);
SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_INTRQMAXLEN, intr_queue_maxlen,
CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet6_intr_queue_maxlen, "I",
"Maximum size of the IPv6 input queue");
#ifdef RSS
static struct netisr_handler ip6_direct_nh = {
.nh_name = "ip6_direct",
.nh_handler = ip6_direct_input,
.nh_proto = NETISR_IPV6_DIRECT,
.nh_m2cpuid = rss_soft_m2cpuid_v6,
.nh_policy = NETISR_POLICY_CPU,
.nh_dispatch = NETISR_DISPATCH_HYBRID,
};
static int
sysctl_netinet6_intr_direct_queue_maxlen(SYSCTL_HANDLER_ARGS)
{
int error, qlimit;
netisr_getqlimit(&ip6_direct_nh, &qlimit);
error = sysctl_handle_int(oidp, &qlimit, 0, req);
if (error || !req->newptr)
return (error);
if (qlimit < 1)
return (EINVAL);
return (netisr_setqlimit(&ip6_direct_nh, qlimit));
}
SYSCTL_PROC(_net_inet6_ip6, IPV6CTL_INTRDQMAXLEN, intr_direct_queue_maxlen,
CTLTYPE_INT|CTLFLAG_RW, 0, 0, sysctl_netinet6_intr_direct_queue_maxlen,
"I", "Maximum size of the IPv6 direct input queue");
#endif
VNET_DEFINE(struct pfil_head, inet6_pfil_hook);
VNET_PCPUSTAT_DEFINE(struct ip6stat, ip6stat);
VNET_PCPUSTAT_SYSINIT(ip6stat);
#ifdef VIMAGE
VNET_PCPUSTAT_SYSUNINIT(ip6stat);
#endif /* VIMAGE */
struct rmlock in6_ifaddr_lock;
RM_SYSINIT(in6_ifaddr_lock, &in6_ifaddr_lock, "in6_ifaddr_lock");
2008-01-08 19:08:58 +00:00
static int ip6_hopopts_input(u_int32_t *, u_int32_t *, struct mbuf **, int *);
#ifdef PULLDOWN_TEST
2008-01-08 19:08:58 +00:00
static struct mbuf *ip6_pullexthdr(struct mbuf *, size_t, int);
#endif
/*
* IP6 initialization: fill in IP6 protocol switch table.
* All protocols not implemented in kernel go to raw IP6 protocol handler.
*/
void
ip6_init(void)
{
struct protosw *pr;
int i;
Conditionally compile out V_ globals while instantiating the appropriate container structures, depending on VIMAGE_GLOBALS compile time option. Make VIMAGE_GLOBALS a new compile-time option, which by default will not be defined, resulting in instatiations of global variables selected for V_irtualization (enclosed in #ifdef VIMAGE_GLOBALS blocks) to be effectively compiled out. Instantiate new global container structures to hold V_irtualized variables: vnet_net_0, vnet_inet_0, vnet_inet6_0, vnet_ipsec_0, vnet_netgraph_0, and vnet_gif_0. Update the VSYM() macro so that depending on VIMAGE_GLOBALS the V_ macros resolve either to the original globals, or to fields inside container structures, i.e. effectively #ifdef VIMAGE_GLOBALS #define V_rt_tables rt_tables #else #define V_rt_tables vnet_net_0._rt_tables #endif Update SYSCTL_V_*() macros to operate either on globals or on fields inside container structs. Extend the internal kldsym() lookups with the ability to resolve selected fields inside the virtualization container structs. This applies only to the fields which are explicitly registered for kldsym() visibility via VNET_MOD_DECLARE() and vnet_mod_register(), currently this is done only in sys/net/if.c. Fix a few broken instances of MODULE_GLOBAL() macro use in SCTP code, and modify the MODULE_GLOBAL() macro to resolve to V_ macros, which in turn result in proper code being generated depending on VIMAGE_GLOBALS. De-virtualize local static variables in sys/contrib/pf/net/pf_subr.c which were prematurely V_irtualized by automated V_ prepending scripts during earlier merging steps. PF virtualization will be done separately, most probably after next PF import. Convert a few variable initializations at instantiation to initialization in init functions, most notably in ipfw. Also convert TUNABLE_INT() initializers for V_ variables to TUNABLE_FETCH_INT() in initializer functions. Discussed at: devsummit Strassburg Reviewed by: bz, julian Approved by: julian (mentor) Obtained from: //depot/projects/vimage-commit2/... X-MFC after: never Sponsored by: NLnet Foundation, The FreeBSD Foundation
2008-12-10 23:12:39 +00:00
TUNABLE_INT_FETCH("net.inet6.ip6.auto_linklocal",
&V_ip6_auto_linklocal);
TUNABLE_INT_FETCH("net.inet6.ip6.accept_rtadv", &V_ip6_accept_rtadv);
TUNABLE_INT_FETCH("net.inet6.ip6.no_radr", &V_ip6_no_radr);
ifnet: Replace if_addr_lock rwlock with epoch + mutex Run on LLNW canaries and tested by pho@ gallatin: Using a 14-core, 28-HTT single socket E5-2697 v3 with a 40GbE MLX5 based ConnectX 4-LX NIC, I see an almost 12% improvement in received packet rate, and a larger improvement in bytes delivered all the way to userspace. When the host receiving 64 streams of netperf -H $DUT -t UDP_STREAM -- -m 1, I see, using nstat -I mce0 1 before the patch: InMpps OMpps InGbs OGbs err TCP Est %CPU syscalls csw irq GBfree 4.98 0.00 4.42 0.00 4235592 33 83.80 4720653 2149771 1235 247.32 4.73 0.00 4.20 0.00 4025260 33 82.99 4724900 2139833 1204 247.32 4.72 0.00 4.20 0.00 4035252 33 82.14 4719162 2132023 1264 247.32 4.71 0.00 4.21 0.00 4073206 33 83.68 4744973 2123317 1347 247.32 4.72 0.00 4.21 0.00 4061118 33 80.82 4713615 2188091 1490 247.32 4.72 0.00 4.21 0.00 4051675 33 85.29 4727399 2109011 1205 247.32 4.73 0.00 4.21 0.00 4039056 33 84.65 4724735 2102603 1053 247.32 After the patch InMpps OMpps InGbs OGbs err TCP Est %CPU syscalls csw irq GBfree 5.43 0.00 4.20 0.00 3313143 33 84.96 5434214 1900162 2656 245.51 5.43 0.00 4.20 0.00 3308527 33 85.24 5439695 1809382 2521 245.51 5.42 0.00 4.19 0.00 3316778 33 87.54 5416028 1805835 2256 245.51 5.42 0.00 4.19 0.00 3317673 33 90.44 5426044 1763056 2332 245.51 5.42 0.00 4.19 0.00 3314839 33 88.11 5435732 1792218 2499 245.52 5.44 0.00 4.19 0.00 3293228 33 91.84 5426301 1668597 2121 245.52 Similarly, netperf reports 230Mb/s before the patch, and 270Mb/s after the patch Reviewed by: gallatin Sponsored by: Limelight Networks Differential Revision: https://reviews.freebsd.org/D15366
2018-05-18 20:13:34 +00:00
CK_STAILQ_INIT(&V_in6_ifaddrhead);
V_in6_ifaddrhashtbl = hashinit(IN6ADDR_NHASH, M_IFADDR,
&V_in6_ifaddrhmask);
/* Initialize packet filter hooks. */
V_inet6_pfil_hook.ph_type = PFIL_TYPE_AF;
V_inet6_pfil_hook.ph_af = AF_INET6;
if ((i = pfil_head_register(&V_inet6_pfil_hook)) != 0)
printf("%s: WARNING: unable to register pfil hook, "
"error %d\n", __func__, i);
if (hhook_head_register(HHOOK_TYPE_IPSEC_IN, AF_INET6,
&V_ipsec_hhh_in[HHOOK_IPSEC_INET6],
HHOOK_WAITOK | HHOOK_HEADISINVNET) != 0)
printf("%s: WARNING: unable to register input helper hook\n",
__func__);
if (hhook_head_register(HHOOK_TYPE_IPSEC_OUT, AF_INET6,
&V_ipsec_hhh_out[HHOOK_IPSEC_INET6],
HHOOK_WAITOK | HHOOK_HEADISINVNET) != 0)
printf("%s: WARNING: unable to register output helper hook\n",
__func__);
scope6_init();
addrsel_policy_init();
nd6_init();
frag6_init();
V_ip6_desync_factor = arc4random() % MAX_TEMP_DESYNC_FACTOR;
/* Skip global initialization stuff for non-default instances. */
#ifdef VIMAGE
if (!IS_DEFAULT_VNET(curvnet)) {
netisr_register_vnet(&ip6_nh);
#ifdef RSS
netisr_register_vnet(&ip6_direct_nh);
#endif
return;
}
#endif
pr = pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
if (pr == NULL)
panic("ip6_init");
/* Initialize the entire ip6_protox[] array to IPPROTO_RAW. */
for (i = 0; i < IPPROTO_MAX; i++)
ip6_protox[i] = pr - inet6sw;
/*
* Cycle through IP protocols and put them into the appropriate place
* in ip6_protox[].
*/
for (pr = inet6domain.dom_protosw;
pr < inet6domain.dom_protoswNPROTOSW; pr++)
if (pr->pr_domain->dom_family == PF_INET6 &&
pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) {
/* Be careful to only index valid IP protocols. */
if (pr->pr_protocol < IPPROTO_MAX)
ip6_protox[pr->pr_protocol] = pr - inet6sw;
}
Reimplement the netisr framework in order to support parallel netisr threads: - Support up to one netisr thread per CPU, each processings its own workstream, or set of per-protocol queues. Threads may be bound to specific CPUs, or allowed to migrate, based on a global policy. In the future it would be desirable to support topology-centric policies, such as "one netisr per package". - Allow each protocol to advertise an ordering policy, which can currently be one of: NETISR_POLICY_SOURCE: packets must maintain ordering with respect to an implicit or explicit source (such as an interface or socket). NETISR_POLICY_FLOW: make use of mbuf flow identifiers to place work, as well as allowing protocols to provide a flow generation function for mbufs without flow identifers (m2flow). Falls back on NETISR_POLICY_SOURCE if now flow ID is available. NETISR_POLICY_CPU: allow protocols to inspect and assign a CPU for each packet handled by netisr (m2cpuid). - Provide utility functions for querying the number of workstreams being used, as well as a mapping function from workstream to CPU ID, which protocols may use in work placement decisions. - Add explicit interfaces to get and set per-protocol queue limits, and get and clear drop counters, which query data or apply changes across all workstreams. - Add a more extensible netisr registration interface, in which protocols declare 'struct netisr_handler' structures for each registered NETISR_ type. These include name, handler function, optional mbuf to flow ID function, optional mbuf to CPU ID function, queue limit, and ordering policy. Padding is present to allow these to be expanded in the future. If no queue limit is declared, then a default is used. - Queue limits are now per-workstream, and raised from the previous IFQ_MAXLEN default of 50 to 256. - All protocols are updated to use the new registration interface, and with the exception of netnatm, default queue limits. Most protocols register as NETISR_POLICY_SOURCE, except IPv4 and IPv6, which use NETISR_POLICY_FLOW, and will therefore take advantage of driver- generated flow IDs if present. - Formalize a non-packet based interface between interface polling and the netisr, rather than having polling pretend to be two protocols. Provide two explicit hooks in the netisr worker for start and end events for runs: netisr_poll() and netisr_pollmore(), as well as a function, netisr_sched_poll(), to allow the polling code to schedule netisr execution. DEVICE_POLLING still embeds single-netisr assumptions in its implementation, so for now if it is compiled into the kernel, a single and un-bound netisr thread is enforced regardless of tunable configuration. In the default configuration, the new netisr implementation maintains the same basic assumptions as the previous implementation: a single, un-bound worker thread processes all deferred work, and direct dispatch is enabled by default wherever possible. Performance measurement shows a marginal performance improvement over the old implementation due to the use of batched dequeue. An rmlock is used to synchronize use and registration/unregistration using the framework; currently, synchronized use is disabled (replicating current netisr policy) due to a measurable 3%-6% hit in ping-pong micro-benchmarking. It will be enabled once further rmlock optimization has taken place. However, in practice, netisrs are rarely registered or unregistered at runtime. A new man page for netisr will follow, but since one doesn't currently exist, it hasn't been updated. This change is not appropriate for MFC, although the polling shutdown handler should be merged to 7-STABLE. Bump __FreeBSD_version. Reviewed by: bz
2009-06-01 10:41:38 +00:00
netisr_register(&ip6_nh);
#ifdef RSS
netisr_register(&ip6_direct_nh);
#endif
}
/*
* The protocol to be inserted into ip6_protox[] must be already registered
* in inet6sw[], either statically or through pf_proto_register().
*/
int
ip6proto_register(short ip6proto)
{
struct protosw *pr;
/* Sanity checks. */
if (ip6proto <= 0 || ip6proto >= IPPROTO_MAX)
return (EPROTONOSUPPORT);
/*
* The protocol slot must not be occupied by another protocol
* already. An index pointing to IPPROTO_RAW is unused.
*/
pr = pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
if (pr == NULL)
return (EPFNOSUPPORT);
if (ip6_protox[ip6proto] != pr - inet6sw) /* IPPROTO_RAW */
return (EEXIST);
/*
* Find the protocol position in inet6sw[] and set the index.
*/
for (pr = inet6domain.dom_protosw;
pr < inet6domain.dom_protoswNPROTOSW; pr++) {
if (pr->pr_domain->dom_family == PF_INET6 &&
pr->pr_protocol && pr->pr_protocol == ip6proto) {
ip6_protox[pr->pr_protocol] = pr - inet6sw;
return (0);
}
}
return (EPROTONOSUPPORT);
}
int
ip6proto_unregister(short ip6proto)
{
struct protosw *pr;
/* Sanity checks. */
if (ip6proto <= 0 || ip6proto >= IPPROTO_MAX)
return (EPROTONOSUPPORT);
/* Check if the protocol was indeed registered. */
pr = pffindproto(PF_INET6, IPPROTO_RAW, SOCK_RAW);
if (pr == NULL)
return (EPFNOSUPPORT);
if (ip6_protox[ip6proto] == pr - inet6sw) /* IPPROTO_RAW */
return (ENOENT);
/* Reset the protocol slot to IPPROTO_RAW. */
ip6_protox[ip6proto] = pr - inet6sw;
return (0);
}
#ifdef VIMAGE
static void
ip6_destroy(void *unused __unused)
{
Get closer to a VIMAGE network stack teardown from top to bottom rather than removing the network interfaces first. This change is rather larger and convoluted as the ordering requirements cannot be separated. Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and related modules to their own SI_SUB_PROTO_FIREWALL. Move initialization of "physical" interfaces to SI_SUB_DRIVERS, move virtual (cloned) interfaces to SI_SUB_PSEUDO. Move Multicast to SI_SUB_PROTO_MC. Re-work parts of multicast initialisation and teardown, not taking the huge amount of memory into account if used as a module yet. For interface teardown we try to do as many of them as we can on SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling over a higher layer protocol such as IP. In that case the interface has to go along (or before) the higher layer protocol is shutdown. Kernel hhooks need to go last on teardown as they may be used at various higher layers and we cannot remove them before we cleaned up the higher layers. For interface teardown there are multiple paths: (a) a cloned interface is destroyed (inside a VIMAGE or in the base system), (b) any interface is moved from a virtual network stack to a different network stack ("vmove"), or (c) a virtual network stack is being shut down. All code paths go through if_detach_internal() where we, depending on the vmove flag or the vnet state, make a decision on how much to shut down; in case we are destroying a VNET the individual protocol layers will cleanup their own parts thus we cannot do so again for each interface as we end up with, e.g., double-frees, destroying locks twice or acquiring already destroyed locks. When calling into protocol cleanups we equally have to tell them whether they need to detach upper layer protocols ("ulp") or not (e.g., in6_ifdetach()). Provide or enahnce helper functions to do proper cleanup at a protocol rather than at an interface level. Approved by: re (hrs) Obtained from: projects/vnet Reviewed by: gnn, jhb Sponsored by: The FreeBSD Foundation MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
struct ifaddr *ifa, *nifa;
struct ifnet *ifp;
int error;
#ifdef RSS
netisr_unregister_vnet(&ip6_direct_nh);
#endif
netisr_unregister_vnet(&ip6_nh);
if ((error = pfil_head_unregister(&V_inet6_pfil_hook)) != 0)
printf("%s: WARNING: unable to unregister pfil hook, "
"error %d\n", __func__, error);
error = hhook_head_deregister(V_ipsec_hhh_in[HHOOK_IPSEC_INET6]);
if (error != 0) {
printf("%s: WARNING: unable to deregister input helper hook "
"type HHOOK_TYPE_IPSEC_IN, id HHOOK_IPSEC_INET6: "
"error %d returned\n", __func__, error);
}
error = hhook_head_deregister(V_ipsec_hhh_out[HHOOK_IPSEC_INET6]);
if (error != 0) {
printf("%s: WARNING: unable to deregister output helper hook "
"type HHOOK_TYPE_IPSEC_OUT, id HHOOK_IPSEC_INET6: "
"error %d returned\n", __func__, error);
}
Get closer to a VIMAGE network stack teardown from top to bottom rather than removing the network interfaces first. This change is rather larger and convoluted as the ordering requirements cannot be separated. Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and related modules to their own SI_SUB_PROTO_FIREWALL. Move initialization of "physical" interfaces to SI_SUB_DRIVERS, move virtual (cloned) interfaces to SI_SUB_PSEUDO. Move Multicast to SI_SUB_PROTO_MC. Re-work parts of multicast initialisation and teardown, not taking the huge amount of memory into account if used as a module yet. For interface teardown we try to do as many of them as we can on SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling over a higher layer protocol such as IP. In that case the interface has to go along (or before) the higher layer protocol is shutdown. Kernel hhooks need to go last on teardown as they may be used at various higher layers and we cannot remove them before we cleaned up the higher layers. For interface teardown there are multiple paths: (a) a cloned interface is destroyed (inside a VIMAGE or in the base system), (b) any interface is moved from a virtual network stack to a different network stack ("vmove"), or (c) a virtual network stack is being shut down. All code paths go through if_detach_internal() where we, depending on the vmove flag or the vnet state, make a decision on how much to shut down; in case we are destroying a VNET the individual protocol layers will cleanup their own parts thus we cannot do so again for each interface as we end up with, e.g., double-frees, destroying locks twice or acquiring already destroyed locks. When calling into protocol cleanups we equally have to tell them whether they need to detach upper layer protocols ("ulp") or not (e.g., in6_ifdetach()). Provide or enahnce helper functions to do proper cleanup at a protocol rather than at an interface level. Approved by: re (hrs) Obtained from: projects/vnet Reviewed by: gnn, jhb Sponsored by: The FreeBSD Foundation MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
/* Cleanup addresses. */
IFNET_RLOCK();
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
Get closer to a VIMAGE network stack teardown from top to bottom rather than removing the network interfaces first. This change is rather larger and convoluted as the ordering requirements cannot be separated. Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and related modules to their own SI_SUB_PROTO_FIREWALL. Move initialization of "physical" interfaces to SI_SUB_DRIVERS, move virtual (cloned) interfaces to SI_SUB_PSEUDO. Move Multicast to SI_SUB_PROTO_MC. Re-work parts of multicast initialisation and teardown, not taking the huge amount of memory into account if used as a module yet. For interface teardown we try to do as many of them as we can on SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling over a higher layer protocol such as IP. In that case the interface has to go along (or before) the higher layer protocol is shutdown. Kernel hhooks need to go last on teardown as they may be used at various higher layers and we cannot remove them before we cleaned up the higher layers. For interface teardown there are multiple paths: (a) a cloned interface is destroyed (inside a VIMAGE or in the base system), (b) any interface is moved from a virtual network stack to a different network stack ("vmove"), or (c) a virtual network stack is being shut down. All code paths go through if_detach_internal() where we, depending on the vmove flag or the vnet state, make a decision on how much to shut down; in case we are destroying a VNET the individual protocol layers will cleanup their own parts thus we cannot do so again for each interface as we end up with, e.g., double-frees, destroying locks twice or acquiring already destroyed locks. When calling into protocol cleanups we equally have to tell them whether they need to detach upper layer protocols ("ulp") or not (e.g., in6_ifdetach()). Provide or enahnce helper functions to do proper cleanup at a protocol rather than at an interface level. Approved by: re (hrs) Obtained from: projects/vnet Reviewed by: gnn, jhb Sponsored by: The FreeBSD Foundation MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
/* Cannot lock here - lock recursion. */
/* IF_ADDR_LOCK(ifp); */
ifnet: Replace if_addr_lock rwlock with epoch + mutex Run on LLNW canaries and tested by pho@ gallatin: Using a 14-core, 28-HTT single socket E5-2697 v3 with a 40GbE MLX5 based ConnectX 4-LX NIC, I see an almost 12% improvement in received packet rate, and a larger improvement in bytes delivered all the way to userspace. When the host receiving 64 streams of netperf -H $DUT -t UDP_STREAM -- -m 1, I see, using nstat -I mce0 1 before the patch: InMpps OMpps InGbs OGbs err TCP Est %CPU syscalls csw irq GBfree 4.98 0.00 4.42 0.00 4235592 33 83.80 4720653 2149771 1235 247.32 4.73 0.00 4.20 0.00 4025260 33 82.99 4724900 2139833 1204 247.32 4.72 0.00 4.20 0.00 4035252 33 82.14 4719162 2132023 1264 247.32 4.71 0.00 4.21 0.00 4073206 33 83.68 4744973 2123317 1347 247.32 4.72 0.00 4.21 0.00 4061118 33 80.82 4713615 2188091 1490 247.32 4.72 0.00 4.21 0.00 4051675 33 85.29 4727399 2109011 1205 247.32 4.73 0.00 4.21 0.00 4039056 33 84.65 4724735 2102603 1053 247.32 After the patch InMpps OMpps InGbs OGbs err TCP Est %CPU syscalls csw irq GBfree 5.43 0.00 4.20 0.00 3313143 33 84.96 5434214 1900162 2656 245.51 5.43 0.00 4.20 0.00 3308527 33 85.24 5439695 1809382 2521 245.51 5.42 0.00 4.19 0.00 3316778 33 87.54 5416028 1805835 2256 245.51 5.42 0.00 4.19 0.00 3317673 33 90.44 5426044 1763056 2332 245.51 5.42 0.00 4.19 0.00 3314839 33 88.11 5435732 1792218 2499 245.52 5.44 0.00 4.19 0.00 3293228 33 91.84 5426301 1668597 2121 245.52 Similarly, netperf reports 230Mb/s before the patch, and 270Mb/s after the patch Reviewed by: gallatin Sponsored by: Limelight Networks Differential Revision: https://reviews.freebsd.org/D15366
2018-05-18 20:13:34 +00:00
CK_STAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, nifa) {
Get closer to a VIMAGE network stack teardown from top to bottom rather than removing the network interfaces first. This change is rather larger and convoluted as the ordering requirements cannot be separated. Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and related modules to their own SI_SUB_PROTO_FIREWALL. Move initialization of "physical" interfaces to SI_SUB_DRIVERS, move virtual (cloned) interfaces to SI_SUB_PSEUDO. Move Multicast to SI_SUB_PROTO_MC. Re-work parts of multicast initialisation and teardown, not taking the huge amount of memory into account if used as a module yet. For interface teardown we try to do as many of them as we can on SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling over a higher layer protocol such as IP. In that case the interface has to go along (or before) the higher layer protocol is shutdown. Kernel hhooks need to go last on teardown as they may be used at various higher layers and we cannot remove them before we cleaned up the higher layers. For interface teardown there are multiple paths: (a) a cloned interface is destroyed (inside a VIMAGE or in the base system), (b) any interface is moved from a virtual network stack to a different network stack ("vmove"), or (c) a virtual network stack is being shut down. All code paths go through if_detach_internal() where we, depending on the vmove flag or the vnet state, make a decision on how much to shut down; in case we are destroying a VNET the individual protocol layers will cleanup their own parts thus we cannot do so again for each interface as we end up with, e.g., double-frees, destroying locks twice or acquiring already destroyed locks. When calling into protocol cleanups we equally have to tell them whether they need to detach upper layer protocols ("ulp") or not (e.g., in6_ifdetach()). Provide or enahnce helper functions to do proper cleanup at a protocol rather than at an interface level. Approved by: re (hrs) Obtained from: projects/vnet Reviewed by: gnn, jhb Sponsored by: The FreeBSD Foundation MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
if (ifa->ifa_addr->sa_family != AF_INET6)
continue;
in6_purgeaddr(ifa);
}
/* IF_ADDR_UNLOCK(ifp); */
in6_ifdetach_destroy(ifp);
mld_domifdetach(ifp);
/* Make sure any routes are gone as well. */
rt_flushifroutes_af(ifp, AF_INET6);
}
IFNET_RUNLOCK();
nd6_destroy();
in6_ifattach_destroy();
Get closer to a VIMAGE network stack teardown from top to bottom rather than removing the network interfaces first. This change is rather larger and convoluted as the ordering requirements cannot be separated. Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and related modules to their own SI_SUB_PROTO_FIREWALL. Move initialization of "physical" interfaces to SI_SUB_DRIVERS, move virtual (cloned) interfaces to SI_SUB_PSEUDO. Move Multicast to SI_SUB_PROTO_MC. Re-work parts of multicast initialisation and teardown, not taking the huge amount of memory into account if used as a module yet. For interface teardown we try to do as many of them as we can on SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling over a higher layer protocol such as IP. In that case the interface has to go along (or before) the higher layer protocol is shutdown. Kernel hhooks need to go last on teardown as they may be used at various higher layers and we cannot remove them before we cleaned up the higher layers. For interface teardown there are multiple paths: (a) a cloned interface is destroyed (inside a VIMAGE or in the base system), (b) any interface is moved from a virtual network stack to a different network stack ("vmove"), or (c) a virtual network stack is being shut down. All code paths go through if_detach_internal() where we, depending on the vmove flag or the vnet state, make a decision on how much to shut down; in case we are destroying a VNET the individual protocol layers will cleanup their own parts thus we cannot do so again for each interface as we end up with, e.g., double-frees, destroying locks twice or acquiring already destroyed locks. When calling into protocol cleanups we equally have to tell them whether they need to detach upper layer protocols ("ulp") or not (e.g., in6_ifdetach()). Provide or enahnce helper functions to do proper cleanup at a protocol rather than at an interface level. Approved by: re (hrs) Obtained from: projects/vnet Reviewed by: gnn, jhb Sponsored by: The FreeBSD Foundation MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
hashdestroy(V_in6_ifaddrhashtbl, M_IFADDR, V_in6_ifaddrhmask);
}
VNET_SYSUNINIT(inet6, SI_SUB_PROTO_DOMAIN, SI_ORDER_THIRD, ip6_destroy, NULL);
#endif
static int
ip6_input_hbh(struct mbuf *m, uint32_t *plen, uint32_t *rtalert, int *off,
int *nxt, int *ours)
{
struct ip6_hdr *ip6;
struct ip6_hbh *hbh;
if (ip6_hopopts_input(plen, rtalert, &m, off)) {
#if 0 /*touches NULL pointer*/
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
#endif
goto out; /* m have already been freed */
}
/* adjust pointer */
ip6 = mtod(m, struct ip6_hdr *);
/*
* if the payload length field is 0 and the next header field
* indicates Hop-by-Hop Options header, then a Jumbo Payload
* option MUST be included.
*/
if (ip6->ip6_plen == 0 && *plen == 0) {
/*
* Note that if a valid jumbo payload option is
* contained, ip6_hopopts_input() must set a valid
* (non-zero) payload length to the variable plen.
*/
IP6STAT_INC(ip6s_badoptions);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_discard);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
(caddr_t)&ip6->ip6_plen - (caddr_t)ip6);
goto out;
}
#ifndef PULLDOWN_TEST
/* ip6_hopopts_input() ensures that mbuf is contiguous */
hbh = (struct ip6_hbh *)(ip6 + 1);
#else
IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m, sizeof(struct ip6_hdr),
sizeof(struct ip6_hbh));
if (hbh == NULL) {
IP6STAT_INC(ip6s_tooshort);
goto out;
}
#endif
*nxt = hbh->ip6h_nxt;
/*
* If we are acting as a router and the packet contains a
* router alert option, see if we know the option value.
* Currently, we only support the option value for MLD, in which
* case we should pass the packet to the multicast routing
* daemon.
*/
if (*rtalert != ~0) {
switch (*rtalert) {
case IP6OPT_RTALERT_MLD:
if (V_ip6_forwarding)
*ours = 1;
break;
default:
/*
* RFC2711 requires unrecognized values must be
* silently ignored.
*/
break;
}
}
return (0);
out:
return (1);
}
#ifdef RSS
/*
* IPv6 direct input routine.
*
* This is called when reinjecting completed fragments where
* all of the previous checking and book-keeping has been done.
*/
void
ip6_direct_input(struct mbuf *m)
{
int off, nxt;
int nest;
struct m_tag *mtag;
struct ip6_direct_ctx *ip6dc;
mtag = m_tag_locate(m, MTAG_ABI_IPV6, IPV6_TAG_DIRECT, NULL);
KASSERT(mtag != NULL, ("Reinjected packet w/o direct ctx tag!"));
ip6dc = (struct ip6_direct_ctx *)(mtag + 1);
nxt = ip6dc->ip6dc_nxt;
off = ip6dc->ip6dc_off;
nest = 0;
m_tag_delete(m, mtag);
while (nxt != IPPROTO_DONE) {
if (V_ip6_hdrnestlimit && (++nest > V_ip6_hdrnestlimit)) {
IP6STAT_INC(ip6s_toomanyhdr);
goto bad;
}
/*
* protection against faulty packet - there should be
* more sanity checks in header chain processing.
*/
if (m->m_pkthdr.len < off) {
IP6STAT_INC(ip6s_tooshort);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_truncated);
goto bad;
}
Merge projects/ipsec into head/. Small summary ------------- o Almost all IPsec releated code was moved into sys/netipsec. o New kernel modules added: ipsec.ko and tcpmd5.ko. New kernel option IPSEC_SUPPORT added. It enables support for loading and unloading of ipsec.ko and tcpmd5.ko kernel modules. o IPSEC_NAT_T option was removed. Now NAT-T support is enabled by default. The UDP_ENCAP_ESPINUDP_NON_IKE encapsulation type support was removed. Added TCP/UDP checksum handling for inbound packets that were decapsulated by transport mode SAs. setkey(8) modified to show run-time NAT-T configuration of SA. o New network pseudo interface if_ipsec(4) added. For now it is build as part of ipsec.ko module (or with IPSEC kernel). It implements IPsec virtual tunnels to create route-based VPNs. o The network stack now invokes IPsec functions using special methods. The only one header file <netipsec/ipsec_support.h> should be included to declare all the needed things to work with IPsec. o All IPsec protocols handlers (ESP/AH/IPCOMP protosw) were removed. Now these protocols are handled directly via IPsec methods. o TCP_SIGNATURE support was reworked to be more close to RFC. o PF_KEY SADB was reworked: - now all security associations stored in the single SPI namespace, and all SAs MUST have unique SPI. - several hash tables added to speed up lookups in SADB. - SADB now uses rmlock to protect access, and concurrent threads can do SA lookups in the same time. - many PF_KEY message handlers were reworked to reflect changes in SADB. - SADB_UPDATE message was extended to support new PF_KEY headers: SADB_X_EXT_NEW_ADDRESS_SRC and SADB_X_EXT_NEW_ADDRESS_DST. They can be used by IKE daemon to change SA addresses. o ipsecrequest and secpolicy structures were cardinally changed to avoid locking protection for ipsecrequest. Now we support only limited number (4) of bundled SAs, but they are supported for both INET and INET6. o INPCB security policy cache was introduced. Each PCB now caches used security policies to avoid SP lookup for each packet. o For inbound security policies added the mode, when the kernel does check for full history of applied IPsec transforms. o References counting rules for security policies and security associations were changed. The proper SA locking added into xform code. o xform code was also changed. Now it is possible to unregister xforms. tdb_xxx structures were changed and renamed to reflect changes in SADB/SPDB, and changed rules for locking and refcounting. Reviewed by: gnn, wblock Obtained from: Yandex LLC Relnotes: yes Sponsored by: Yandex LLC Differential Revision: https://reviews.freebsd.org/D9352
2017-02-06 08:49:57 +00:00
#if defined(IPSEC) || defined(IPSEC_SUPPORT)
if (IPSEC_ENABLED(ipv6)) {
if (IPSEC_INPUT(ipv6, m, off, nxt) != 0)
return;
}
#endif /* IPSEC */
nxt = (*inet6sw[ip6_protox[nxt]].pr_input)(&m, &off, nxt);
}
return;
bad:
m_freem(m);
}
#endif
void
ip6_input(struct mbuf *m)
{
struct in6_addr odst;
struct ip6_hdr *ip6;
struct in6_ifaddr *ia;
struct ifnet *rcvif;
u_int32_t plen;
u_int32_t rtalert = ~0;
int off = sizeof(struct ip6_hdr), nest;
int nxt, ours = 0;
int srcrt = 0;
2003-10-16 19:55:28 +00:00
/*
* Drop the packet if IPv6 operation is disabled on the interface.
*/
rcvif = m->m_pkthdr.rcvif;
if ((ND_IFINFO(rcvif)->flags & ND6_IFF_IFDISABLED))
goto bad;
Merge projects/ipsec into head/. Small summary ------------- o Almost all IPsec releated code was moved into sys/netipsec. o New kernel modules added: ipsec.ko and tcpmd5.ko. New kernel option IPSEC_SUPPORT added. It enables support for loading and unloading of ipsec.ko and tcpmd5.ko kernel modules. o IPSEC_NAT_T option was removed. Now NAT-T support is enabled by default. The UDP_ENCAP_ESPINUDP_NON_IKE encapsulation type support was removed. Added TCP/UDP checksum handling for inbound packets that were decapsulated by transport mode SAs. setkey(8) modified to show run-time NAT-T configuration of SA. o New network pseudo interface if_ipsec(4) added. For now it is build as part of ipsec.ko module (or with IPSEC kernel). It implements IPsec virtual tunnels to create route-based VPNs. o The network stack now invokes IPsec functions using special methods. The only one header file <netipsec/ipsec_support.h> should be included to declare all the needed things to work with IPsec. o All IPsec protocols handlers (ESP/AH/IPCOMP protosw) were removed. Now these protocols are handled directly via IPsec methods. o TCP_SIGNATURE support was reworked to be more close to RFC. o PF_KEY SADB was reworked: - now all security associations stored in the single SPI namespace, and all SAs MUST have unique SPI. - several hash tables added to speed up lookups in SADB. - SADB now uses rmlock to protect access, and concurrent threads can do SA lookups in the same time. - many PF_KEY message handlers were reworked to reflect changes in SADB. - SADB_UPDATE message was extended to support new PF_KEY headers: SADB_X_EXT_NEW_ADDRESS_SRC and SADB_X_EXT_NEW_ADDRESS_DST. They can be used by IKE daemon to change SA addresses. o ipsecrequest and secpolicy structures were cardinally changed to avoid locking protection for ipsecrequest. Now we support only limited number (4) of bundled SAs, but they are supported for both INET and INET6. o INPCB security policy cache was introduced. Each PCB now caches used security policies to avoid SP lookup for each packet. o For inbound security policies added the mode, when the kernel does check for full history of applied IPsec transforms. o References counting rules for security policies and security associations were changed. The proper SA locking added into xform code. o xform code was also changed. Now it is possible to unregister xforms. tdb_xxx structures were changed and renamed to reflect changes in SADB/SPDB, and changed rules for locking and refcounting. Reviewed by: gnn, wblock Obtained from: Yandex LLC Relnotes: yes Sponsored by: Yandex LLC Differential Revision: https://reviews.freebsd.org/D9352
2017-02-06 08:49:57 +00:00
#if defined(IPSEC) || defined(IPSEC_SUPPORT)
/*
* should the inner packet be considered authentic?
* see comment in ah4_input().
* NB: m cannot be NULL when passed to the input routine
*/
m->m_flags &= ~M_AUTHIPHDR;
m->m_flags &= ~M_AUTHIPDGM;
#endif /* IPSEC */
if (m->m_flags & M_FASTFWD_OURS) {
/*
* Firewall changed destination to local.
*/
ip6 = mtod(m, struct ip6_hdr *);
goto passin;
}
/*
* mbuf statistics
*/
if (m->m_flags & M_EXT) {
if (m->m_next)
IP6STAT_INC(ip6s_mext2m);
else
IP6STAT_INC(ip6s_mext1);
} else {
if (m->m_next) {
if (m->m_flags & M_LOOP) {
IP6STAT_INC(ip6s_m2m[V_loif->if_index]);
} else if (rcvif->if_index < IP6S_M2MMAX)
IP6STAT_INC(ip6s_m2m[rcvif->if_index]);
else
IP6STAT_INC(ip6s_m2m[0]);
} else
IP6STAT_INC(ip6s_m1);
}
in6_ifstat_inc(rcvif, ifs6_in_receive);
IP6STAT_INC(ip6s_total);
#ifndef PULLDOWN_TEST
/*
* L2 bridge code and some other code can return mbuf chain
* that does not conform to KAME requirement. too bad.
* XXX: fails to join if interface MTU > MCLBYTES. jumbogram?
*/
if (m && m->m_next != NULL && m->m_pkthdr.len < MCLBYTES) {
struct mbuf *n;
if (m->m_pkthdr.len > MHLEN)
n = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
else
n = m_gethdr(M_NOWAIT, MT_DATA);
if (n == NULL)
goto bad;
m_move_pkthdr(n, m);
m_copydata(m, 0, n->m_pkthdr.len, mtod(n, caddr_t));
n->m_len = n->m_pkthdr.len;
m_freem(m);
m = n;
}
IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), /* nothing */);
#endif
if (m->m_len < sizeof(struct ip6_hdr)) {
if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) {
IP6STAT_INC(ip6s_toosmall);
in6_ifstat_inc(rcvif, ifs6_in_hdrerr);
goto bad;
}
}
ip6 = mtod(m, struct ip6_hdr *);
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
IP6STAT_INC(ip6s_badvers);
in6_ifstat_inc(rcvif, ifs6_in_hdrerr);
goto bad;
}
IP6STAT_INC(ip6s_nxthist[ip6->ip6_nxt]);
IP_PROBE(receive, NULL, NULL, ip6, rcvif, NULL, ip6);
/*
* Check against address spoofing/corruption.
*/
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_src) ||
IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst)) {
/*
* XXX: "badscope" is not very suitable for a multicast source.
*/
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
if (IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst) &&
!(m->m_flags & M_LOOP)) {
/*
* In this case, the packet should come from the loopback
* interface. However, we cannot just check the if_flags,
* because ip6_mloopback() passes the "actual" interface
* as the outgoing/incoming interface.
*/
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) &&
IPV6_ADDR_MC_SCOPE(&ip6->ip6_dst) == 0) {
/*
* RFC4291 2.7:
* Nodes must not originate a packet to a multicast address
* whose scop field contains the reserved value 0; if such
* a packet is received, it must be silently dropped.
*/
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
#ifdef ALTQ
if (altq_input != NULL && (*altq_input)(m, AF_INET6) == 0) {
/* packet is dropped by traffic conditioner */
return;
}
#endif
/*
* The following check is not documented in specs. A malicious
* party may be able to use IPv4 mapped addr to confuse tcp/udp stack
* and bypass security checks (act as if it was from 127.0.0.1 by using
* IPv6 src ::ffff:127.0.0.1). Be cautious.
*
* This check chokes if we are in an SIIT cloud. As none of BSDs
* support IPv4-less kernel compilation, we cannot support SIIT
* environment at all. So, it makes more sense for us to reject any
* malicious packets for non-SIIT environment, than try to do a
* partial support for SIIT environment.
*/
if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
#if 0
/*
* Reject packets with IPv4 compatible addresses (auto tunnel).
*
* The code forbids auto tunnel relay case in RFC1933 (the check is
* stronger than RFC1933). We may want to re-enable it if mech-xx
* is revised to forbid relaying case.
*/
if (IN6_IS_ADDR_V4COMPAT(&ip6->ip6_src) ||
IN6_IS_ADDR_V4COMPAT(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_addrerr);
goto bad;
}
#endif
/*
* Try to forward the packet, but if we fail continue.
* ip6_tryforward() does inbound and outbound packet firewall
* processing. If firewall has decided that destination becomes
* our local address, it sets M_FASTFWD_OURS flag. In this
* case skip another inbound firewall processing and update
* ip6 pointer.
*/
if (V_ip6_forwarding != 0
Merge projects/ipsec into head/. Small summary ------------- o Almost all IPsec releated code was moved into sys/netipsec. o New kernel modules added: ipsec.ko and tcpmd5.ko. New kernel option IPSEC_SUPPORT added. It enables support for loading and unloading of ipsec.ko and tcpmd5.ko kernel modules. o IPSEC_NAT_T option was removed. Now NAT-T support is enabled by default. The UDP_ENCAP_ESPINUDP_NON_IKE encapsulation type support was removed. Added TCP/UDP checksum handling for inbound packets that were decapsulated by transport mode SAs. setkey(8) modified to show run-time NAT-T configuration of SA. o New network pseudo interface if_ipsec(4) added. For now it is build as part of ipsec.ko module (or with IPSEC kernel). It implements IPsec virtual tunnels to create route-based VPNs. o The network stack now invokes IPsec functions using special methods. The only one header file <netipsec/ipsec_support.h> should be included to declare all the needed things to work with IPsec. o All IPsec protocols handlers (ESP/AH/IPCOMP protosw) were removed. Now these protocols are handled directly via IPsec methods. o TCP_SIGNATURE support was reworked to be more close to RFC. o PF_KEY SADB was reworked: - now all security associations stored in the single SPI namespace, and all SAs MUST have unique SPI. - several hash tables added to speed up lookups in SADB. - SADB now uses rmlock to protect access, and concurrent threads can do SA lookups in the same time. - many PF_KEY message handlers were reworked to reflect changes in SADB. - SADB_UPDATE message was extended to support new PF_KEY headers: SADB_X_EXT_NEW_ADDRESS_SRC and SADB_X_EXT_NEW_ADDRESS_DST. They can be used by IKE daemon to change SA addresses. o ipsecrequest and secpolicy structures were cardinally changed to avoid locking protection for ipsecrequest. Now we support only limited number (4) of bundled SAs, but they are supported for both INET and INET6. o INPCB security policy cache was introduced. Each PCB now caches used security policies to avoid SP lookup for each packet. o For inbound security policies added the mode, when the kernel does check for full history of applied IPsec transforms. o References counting rules for security policies and security associations were changed. The proper SA locking added into xform code. o xform code was also changed. Now it is possible to unregister xforms. tdb_xxx structures were changed and renamed to reflect changes in SADB/SPDB, and changed rules for locking and refcounting. Reviewed by: gnn, wblock Obtained from: Yandex LLC Relnotes: yes Sponsored by: Yandex LLC Differential Revision: https://reviews.freebsd.org/D9352
2017-02-06 08:49:57 +00:00
#if defined(IPSEC) || defined(IPSEC_SUPPORT)
&& (!IPSEC_ENABLED(ipv6) ||
IPSEC_CAPS(ipv6, m, IPSEC_CAP_OPERABLE) == 0)
#endif
) {
if ((m = ip6_tryforward(m)) == NULL)
return;
if (m->m_flags & M_FASTFWD_OURS) {
ip6 = mtod(m, struct ip6_hdr *);
goto passin;
}
}
Merge projects/ipsec into head/. Small summary ------------- o Almost all IPsec releated code was moved into sys/netipsec. o New kernel modules added: ipsec.ko and tcpmd5.ko. New kernel option IPSEC_SUPPORT added. It enables support for loading and unloading of ipsec.ko and tcpmd5.ko kernel modules. o IPSEC_NAT_T option was removed. Now NAT-T support is enabled by default. The UDP_ENCAP_ESPINUDP_NON_IKE encapsulation type support was removed. Added TCP/UDP checksum handling for inbound packets that were decapsulated by transport mode SAs. setkey(8) modified to show run-time NAT-T configuration of SA. o New network pseudo interface if_ipsec(4) added. For now it is build as part of ipsec.ko module (or with IPSEC kernel). It implements IPsec virtual tunnels to create route-based VPNs. o The network stack now invokes IPsec functions using special methods. The only one header file <netipsec/ipsec_support.h> should be included to declare all the needed things to work with IPsec. o All IPsec protocols handlers (ESP/AH/IPCOMP protosw) were removed. Now these protocols are handled directly via IPsec methods. o TCP_SIGNATURE support was reworked to be more close to RFC. o PF_KEY SADB was reworked: - now all security associations stored in the single SPI namespace, and all SAs MUST have unique SPI. - several hash tables added to speed up lookups in SADB. - SADB now uses rmlock to protect access, and concurrent threads can do SA lookups in the same time. - many PF_KEY message handlers were reworked to reflect changes in SADB. - SADB_UPDATE message was extended to support new PF_KEY headers: SADB_X_EXT_NEW_ADDRESS_SRC and SADB_X_EXT_NEW_ADDRESS_DST. They can be used by IKE daemon to change SA addresses. o ipsecrequest and secpolicy structures were cardinally changed to avoid locking protection for ipsecrequest. Now we support only limited number (4) of bundled SAs, but they are supported for both INET and INET6. o INPCB security policy cache was introduced. Each PCB now caches used security policies to avoid SP lookup for each packet. o For inbound security policies added the mode, when the kernel does check for full history of applied IPsec transforms. o References counting rules for security policies and security associations were changed. The proper SA locking added into xform code. o xform code was also changed. Now it is possible to unregister xforms. tdb_xxx structures were changed and renamed to reflect changes in SADB/SPDB, and changed rules for locking and refcounting. Reviewed by: gnn, wblock Obtained from: Yandex LLC Relnotes: yes Sponsored by: Yandex LLC Differential Revision: https://reviews.freebsd.org/D9352
2017-02-06 08:49:57 +00:00
#if defined(IPSEC) || defined(IPSEC_SUPPORT)
/*
* Bypass packet filtering for packets previously handled by IPsec.
*/
Merge projects/ipsec into head/. Small summary ------------- o Almost all IPsec releated code was moved into sys/netipsec. o New kernel modules added: ipsec.ko and tcpmd5.ko. New kernel option IPSEC_SUPPORT added. It enables support for loading and unloading of ipsec.ko and tcpmd5.ko kernel modules. o IPSEC_NAT_T option was removed. Now NAT-T support is enabled by default. The UDP_ENCAP_ESPINUDP_NON_IKE encapsulation type support was removed. Added TCP/UDP checksum handling for inbound packets that were decapsulated by transport mode SAs. setkey(8) modified to show run-time NAT-T configuration of SA. o New network pseudo interface if_ipsec(4) added. For now it is build as part of ipsec.ko module (or with IPSEC kernel). It implements IPsec virtual tunnels to create route-based VPNs. o The network stack now invokes IPsec functions using special methods. The only one header file <netipsec/ipsec_support.h> should be included to declare all the needed things to work with IPsec. o All IPsec protocols handlers (ESP/AH/IPCOMP protosw) were removed. Now these protocols are handled directly via IPsec methods. o TCP_SIGNATURE support was reworked to be more close to RFC. o PF_KEY SADB was reworked: - now all security associations stored in the single SPI namespace, and all SAs MUST have unique SPI. - several hash tables added to speed up lookups in SADB. - SADB now uses rmlock to protect access, and concurrent threads can do SA lookups in the same time. - many PF_KEY message handlers were reworked to reflect changes in SADB. - SADB_UPDATE message was extended to support new PF_KEY headers: SADB_X_EXT_NEW_ADDRESS_SRC and SADB_X_EXT_NEW_ADDRESS_DST. They can be used by IKE daemon to change SA addresses. o ipsecrequest and secpolicy structures were cardinally changed to avoid locking protection for ipsecrequest. Now we support only limited number (4) of bundled SAs, but they are supported for both INET and INET6. o INPCB security policy cache was introduced. Each PCB now caches used security policies to avoid SP lookup for each packet. o For inbound security policies added the mode, when the kernel does check for full history of applied IPsec transforms. o References counting rules for security policies and security associations were changed. The proper SA locking added into xform code. o xform code was also changed. Now it is possible to unregister xforms. tdb_xxx structures were changed and renamed to reflect changes in SADB/SPDB, and changed rules for locking and refcounting. Reviewed by: gnn, wblock Obtained from: Yandex LLC Relnotes: yes Sponsored by: Yandex LLC Differential Revision: https://reviews.freebsd.org/D9352
2017-02-06 08:49:57 +00:00
if (IPSEC_ENABLED(ipv6) &&
IPSEC_CAPS(ipv6, m, IPSEC_CAP_BYPASS_FILTER) != 0)
goto passin;
#endif
/*
* Run through list of hooks for input packets.
*
* NB: Beware of the destination address changing
* (e.g. by NAT rewriting). When this happens,
* tell ip6_forward to do the right thing.
*/
/* Jump over all PFIL processing if hooks are not active. */
if (!PFIL_HOOKED(&V_inet6_pfil_hook))
goto passin;
odst = ip6->ip6_dst;
if (pfil_run_hooks(&V_inet6_pfil_hook, &m,
m->m_pkthdr.rcvif, PFIL_IN, 0, NULL))
return;
if (m == NULL) /* consumed by filter */
return;
ip6 = mtod(m, struct ip6_hdr *);
srcrt = !IN6_ARE_ADDR_EQUAL(&odst, &ip6->ip6_dst);
if ((m->m_flags & (M_IP6_NEXTHOP | M_FASTFWD_OURS)) == M_IP6_NEXTHOP &&
m_tag_find(m, PACKET_TAG_IPFORWARD, NULL) != NULL) {
/*
* Directly ship the packet on. This allows forwarding
* packets originally destined to us to some other directly
* connected host.
*/
ip6_forward(m, 1);
return;
}
passin:
/*
* Disambiguate address scope zones (if there is ambiguity).
* We first make sure that the original source or destination address
* is not in our internal form for scoped addresses. Such addresses
* are not necessarily invalid spec-wise, but we cannot accept them due
* to the usage conflict.
* in6_setscope() then also checks and rejects the cases where src or
* dst are the loopback address and the receiving interface
* is not loopback.
*/
if (in6_clearscope(&ip6->ip6_src) || in6_clearscope(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope); /* XXX */
goto bad;
}
if (in6_setscope(&ip6->ip6_src, rcvif, NULL) ||
in6_setscope(&ip6->ip6_dst, rcvif, NULL)) {
IP6STAT_INC(ip6s_badscope);
goto bad;
}
if (m->m_flags & M_FASTFWD_OURS) {
m->m_flags &= ~M_FASTFWD_OURS;
ours = 1;
goto hbhcheck;
}
/*
Bite the bullet, and make the IPv6 SSM and MLDv2 mega-commit: import from p4 bms_netdev. Summary of changes: * Connect netinet6/in6_mcast.c to build. The legacy KAME KPIs are mostly preserved. * Eliminate now dead code from ip6_output.c. Don't do mbuf bingo, we are not going to do RFC 2292 style CMSG tricks for multicast options as they are not required by any current IPv6 normative reference. * Refactor transports (UDP, raw_ip6) to do own mcast filtering. SCTP, TCP unaffected by this change. * Add ip6_msource, in6_msource structs to in6_var.h. * Hookup mld_ifinfo state to in6_ifextra, allocate from domifattach path. * Eliminate IN6_LOOKUP_MULTI(), it is no longer referenced. Kernel consumers which need this should use in6m_lookup(). * Refactor IPv6 socket group memberships to use a vector (like IPv4). * Update ifmcstat(8) for IPv6 SSM. * Add witness lock order for IN6_MULTI_LOCK. * Move IN6_MULTI_LOCK out of lower ip6_output()/ip6_input() paths. * Introduce IP6STAT_ADD/SUB/INC/DEC as per rwatson's IPv4 cleanup. * Update carp(4) for new IPv6 SSM KPIs. * Virtualize ip6_mrouter socket. Changes mostly localized to IPv6 MROUTING. * Don't do a local group lookup in MROUTING. * Kill unused KAME prototypes in6_purgemkludge(), in6_restoremkludge(). * Preserve KAME DAD timer jitter behaviour in MLDv1 compatibility mode. * Bump __FreeBSD_version to 800084. * Update UPDATING. NOTE WELL: * This code hasn't been tested against real MLDv2 queriers (yet), although the on-wire protocol has been verified in Wireshark. * There are a few unresolved issues in the socket layer APIs to do with scope ID propagation. * There is a LOR present in ip6_output()'s use of in6_setscope() which needs to be resolved. See comments in mld6.c. This is believed to be benign and can't be avoided for the moment without re-introducing an indirect netisr. This work was mostly derived from the IGMPv3 implementation, and has been sponsored by a third party.
2009-04-29 19:19:13 +00:00
* Multicast check. Assume packet is for us to avoid
* prematurely taking locks.
*/
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
Bite the bullet, and make the IPv6 SSM and MLDv2 mega-commit: import from p4 bms_netdev. Summary of changes: * Connect netinet6/in6_mcast.c to build. The legacy KAME KPIs are mostly preserved. * Eliminate now dead code from ip6_output.c. Don't do mbuf bingo, we are not going to do RFC 2292 style CMSG tricks for multicast options as they are not required by any current IPv6 normative reference. * Refactor transports (UDP, raw_ip6) to do own mcast filtering. SCTP, TCP unaffected by this change. * Add ip6_msource, in6_msource structs to in6_var.h. * Hookup mld_ifinfo state to in6_ifextra, allocate from domifattach path. * Eliminate IN6_LOOKUP_MULTI(), it is no longer referenced. Kernel consumers which need this should use in6m_lookup(). * Refactor IPv6 socket group memberships to use a vector (like IPv4). * Update ifmcstat(8) for IPv6 SSM. * Add witness lock order for IN6_MULTI_LOCK. * Move IN6_MULTI_LOCK out of lower ip6_output()/ip6_input() paths. * Introduce IP6STAT_ADD/SUB/INC/DEC as per rwatson's IPv4 cleanup. * Update carp(4) for new IPv6 SSM KPIs. * Virtualize ip6_mrouter socket. Changes mostly localized to IPv6 MROUTING. * Don't do a local group lookup in MROUTING. * Kill unused KAME prototypes in6_purgemkludge(), in6_restoremkludge(). * Preserve KAME DAD timer jitter behaviour in MLDv1 compatibility mode. * Bump __FreeBSD_version to 800084. * Update UPDATING. NOTE WELL: * This code hasn't been tested against real MLDv2 queriers (yet), although the on-wire protocol has been verified in Wireshark. * There are a few unresolved issues in the socket layer APIs to do with scope ID propagation. * There is a LOR present in ip6_output()'s use of in6_setscope() which needs to be resolved. See comments in mld6.c. This is believed to be benign and can't be avoided for the moment without re-introducing an indirect netisr. This work was mostly derived from the IGMPv3 implementation, and has been sponsored by a third party.
2009-04-29 19:19:13 +00:00
ours = 1;
in6_ifstat_inc(rcvif, ifs6_in_mcast);
goto hbhcheck;
}
/*
* Unicast check
* XXX: For now we keep link-local IPv6 addresses with embedded
* scope zone id, therefore we use zero zoneid here.
*/
ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */);
if (ia != NULL) {
if (ia->ia6_flags & IN6_IFF_NOTREADY) {
char ip6bufs[INET6_ADDRSTRLEN];
char ip6bufd[INET6_ADDRSTRLEN];
/* address is not ready, so discard the packet. */
nd6log((LOG_INFO,
"ip6_input: packet to an unready address %s->%s\n",
ip6_sprintf(ip6bufs, &ip6->ip6_src),
ip6_sprintf(ip6bufd, &ip6->ip6_dst)));
ifa_free(&ia->ia_ifa);
goto bad;
}
/* Count the packet in the ip address stats */
counter_u64_add(ia->ia_ifa.ifa_ipackets, 1);
counter_u64_add(ia->ia_ifa.ifa_ibytes, m->m_pkthdr.len);
ifa_free(&ia->ia_ifa);
ours = 1;
goto hbhcheck;
}
/*
* Now there is no reason to process the packet if it's not our own
* and we're not a router.
*/
if (!V_ip6_forwarding) {
IP6STAT_INC(ip6s_cantforward);
goto bad;
}
hbhcheck:
/*
* Process Hop-by-Hop options header if it's contained.
* m may be modified in ip6_hopopts_input().
* If a JumboPayload option is included, plen will also be modified.
*/
plen = (u_int32_t)ntohs(ip6->ip6_plen);
if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
if (ip6_input_hbh(m, &plen, &rtalert, &off, &nxt, &ours) != 0)
return;
} else
nxt = ip6->ip6_nxt;
/*
* Use mbuf flags to propagate Router Alert option to
* ICMPv6 layer, as hop-by-hop options have been stripped.
*/
if (rtalert != ~0)
m->m_flags |= M_RTALERT_MLD;
/*
* Check that the amount of data in the buffers
* is as at least much as the IPv6 header would have us expect.
* Trim mbufs if longer than we expect.
* Drop packet if shorter than we expect.
*/
if (m->m_pkthdr.len - sizeof(struct ip6_hdr) < plen) {
IP6STAT_INC(ip6s_tooshort);
in6_ifstat_inc(rcvif, ifs6_in_truncated);
goto bad;
}
if (m->m_pkthdr.len > sizeof(struct ip6_hdr) + plen) {
if (m->m_len == m->m_pkthdr.len) {
m->m_len = sizeof(struct ip6_hdr) + plen;
m->m_pkthdr.len = sizeof(struct ip6_hdr) + plen;
} else
m_adj(m, sizeof(struct ip6_hdr) + plen - m->m_pkthdr.len);
}
/*
* Forward if desirable.
*/
Bite the bullet, and make the IPv6 SSM and MLDv2 mega-commit: import from p4 bms_netdev. Summary of changes: * Connect netinet6/in6_mcast.c to build. The legacy KAME KPIs are mostly preserved. * Eliminate now dead code from ip6_output.c. Don't do mbuf bingo, we are not going to do RFC 2292 style CMSG tricks for multicast options as they are not required by any current IPv6 normative reference. * Refactor transports (UDP, raw_ip6) to do own mcast filtering. SCTP, TCP unaffected by this change. * Add ip6_msource, in6_msource structs to in6_var.h. * Hookup mld_ifinfo state to in6_ifextra, allocate from domifattach path. * Eliminate IN6_LOOKUP_MULTI(), it is no longer referenced. Kernel consumers which need this should use in6m_lookup(). * Refactor IPv6 socket group memberships to use a vector (like IPv4). * Update ifmcstat(8) for IPv6 SSM. * Add witness lock order for IN6_MULTI_LOCK. * Move IN6_MULTI_LOCK out of lower ip6_output()/ip6_input() paths. * Introduce IP6STAT_ADD/SUB/INC/DEC as per rwatson's IPv4 cleanup. * Update carp(4) for new IPv6 SSM KPIs. * Virtualize ip6_mrouter socket. Changes mostly localized to IPv6 MROUTING. * Don't do a local group lookup in MROUTING. * Kill unused KAME prototypes in6_purgemkludge(), in6_restoremkludge(). * Preserve KAME DAD timer jitter behaviour in MLDv1 compatibility mode. * Bump __FreeBSD_version to 800084. * Update UPDATING. NOTE WELL: * This code hasn't been tested against real MLDv2 queriers (yet), although the on-wire protocol has been verified in Wireshark. * There are a few unresolved issues in the socket layer APIs to do with scope ID propagation. * There is a LOR present in ip6_output()'s use of in6_setscope() which needs to be resolved. See comments in mld6.c. This is believed to be benign and can't be avoided for the moment without re-introducing an indirect netisr. This work was mostly derived from the IGMPv3 implementation, and has been sponsored by a third party.
2009-04-29 19:19:13 +00:00
if (V_ip6_mrouter &&
IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
/*
* If we are acting as a multicast router, all
* incoming multicast packets are passed to the
* kernel-level multicast forwarding function.
* The packet is returned (relatively) intact; if
* ip6_mforward() returns a non-zero value, the packet
* must be discarded, else it may be accepted below.
Merge final round of MLD changes from p4: ip6_input.c, in6.h: * Add netinet6-specific mbuf flag M_RTALERT_MLD, shadowing M_PROTO6. * Always set this flag if HBH Router Alert option is present for MLD, even when not forwarding. icmp6.c: * In icmp6_input(), spell m->m_pkthdr.rcvif as ifp to be consistent. * Use scope ID for verifying input. Do not apply SSM filters here, no inpcb. * Check for M_RTALERT_MLD when validating MLD traffic, as we can't see IPv6 hop options outside of ip6_input(). in6_mcast.c: * Use KAME scope/zone ID in in6_multi. * Update net.inet6.ip6.mcast.filters implementation to use scope IDs for comparisons. * Fix scope ID treatment in multicast socket option processing. Scope IDs passed in from userland will be ignored as other less ambiguous APIs exist for specifying the link. * Tighten userland input checks in IPv6 SSM delta and full-state ops. * Source filter embedded scope IDs need to be revisited, for now just clear them and ignore them on input. * Adapt KAME behaviour of looking up the scope ID in the default zone for multicast leaves, when the interface is ambiguous. mld6.c: * Tighten origin checks on MLD traffic as per RFC3810 Section 6.2: * ip6_src MAY be the unspecified address for MLDv1 reports. * ip6_src MAY have link-local address scope for MLDv1 reports, MLDv1 queries, and MLDv2 queries. * Perform address field validation *before* accepting queries. * Use KAME scope/zone ID in query/report processing. * Break const correctness for mld_v1_input_report(), mld_v1_input_query() as we temporarily modify the input mbuf chain. * Clear the scope ID before handoff to userland MLD daemon. * Fix MLDv1 old querier present timer processing. With the protocol defaults, hosts should revert to MLDv2 after 260s. * Add net.inet6.mld.v1enable sysctl, default to on. ifmcstat.c: * Use sysctl by default; -K requests kvm(3) if so compiled. mld.4: * Connect man page to build. Tested using PCS.
2009-05-27 18:57:13 +00:00
*
* XXX TODO: Check hlim and multicast scope here to avoid
* unnecessarily calling into ip6_mforward().
*/
if (ip6_mforward && ip6_mforward(ip6, rcvif, m)) {
Bite the bullet, and make the IPv6 SSM and MLDv2 mega-commit: import from p4 bms_netdev. Summary of changes: * Connect netinet6/in6_mcast.c to build. The legacy KAME KPIs are mostly preserved. * Eliminate now dead code from ip6_output.c. Don't do mbuf bingo, we are not going to do RFC 2292 style CMSG tricks for multicast options as they are not required by any current IPv6 normative reference. * Refactor transports (UDP, raw_ip6) to do own mcast filtering. SCTP, TCP unaffected by this change. * Add ip6_msource, in6_msource structs to in6_var.h. * Hookup mld_ifinfo state to in6_ifextra, allocate from domifattach path. * Eliminate IN6_LOOKUP_MULTI(), it is no longer referenced. Kernel consumers which need this should use in6m_lookup(). * Refactor IPv6 socket group memberships to use a vector (like IPv4). * Update ifmcstat(8) for IPv6 SSM. * Add witness lock order for IN6_MULTI_LOCK. * Move IN6_MULTI_LOCK out of lower ip6_output()/ip6_input() paths. * Introduce IP6STAT_ADD/SUB/INC/DEC as per rwatson's IPv4 cleanup. * Update carp(4) for new IPv6 SSM KPIs. * Virtualize ip6_mrouter socket. Changes mostly localized to IPv6 MROUTING. * Don't do a local group lookup in MROUTING. * Kill unused KAME prototypes in6_purgemkludge(), in6_restoremkludge(). * Preserve KAME DAD timer jitter behaviour in MLDv1 compatibility mode. * Bump __FreeBSD_version to 800084. * Update UPDATING. NOTE WELL: * This code hasn't been tested against real MLDv2 queriers (yet), although the on-wire protocol has been verified in Wireshark. * There are a few unresolved issues in the socket layer APIs to do with scope ID propagation. * There is a LOR present in ip6_output()'s use of in6_setscope() which needs to be resolved. See comments in mld6.c. This is believed to be benign and can't be avoided for the moment without re-introducing an indirect netisr. This work was mostly derived from the IGMPv3 implementation, and has been sponsored by a third party.
2009-04-29 19:19:13 +00:00
IP6STAT_INC(ip6s_cantforward);
goto bad;
}
} else if (!ours) {
ip6_forward(m, srcrt);
return;
2003-10-29 12:49:12 +00:00
}
ip6 = mtod(m, struct ip6_hdr *);
/*
* Malicious party may be able to use IPv4 mapped addr to confuse
* tcp/udp stack and bypass security checks (act as if it was from
* 127.0.0.1 by using IPv6 src ::ffff:127.0.0.1). Be cautious.
*
* For SIIT end node behavior, you may want to disable the check.
* However, you will become vulnerable to attacks using IPv4 mapped
* source.
*/
if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
IP6STAT_INC(ip6s_badscope);
in6_ifstat_inc(rcvif, ifs6_in_addrerr);
goto bad;
}
/*
* Tell launch routine the next header
*/
IP6STAT_INC(ip6s_delivered);
in6_ifstat_inc(rcvif, ifs6_in_deliver);
nest = 0;
while (nxt != IPPROTO_DONE) {
if (V_ip6_hdrnestlimit && (++nest > V_ip6_hdrnestlimit)) {
IP6STAT_INC(ip6s_toomanyhdr);
goto bad;
}
/*
* protection against faulty packet - there should be
* more sanity checks in header chain processing.
*/
if (m->m_pkthdr.len < off) {
IP6STAT_INC(ip6s_tooshort);
in6_ifstat_inc(rcvif, ifs6_in_truncated);
goto bad;
}
Merge projects/ipsec into head/. Small summary ------------- o Almost all IPsec releated code was moved into sys/netipsec. o New kernel modules added: ipsec.ko and tcpmd5.ko. New kernel option IPSEC_SUPPORT added. It enables support for loading and unloading of ipsec.ko and tcpmd5.ko kernel modules. o IPSEC_NAT_T option was removed. Now NAT-T support is enabled by default. The UDP_ENCAP_ESPINUDP_NON_IKE encapsulation type support was removed. Added TCP/UDP checksum handling for inbound packets that were decapsulated by transport mode SAs. setkey(8) modified to show run-time NAT-T configuration of SA. o New network pseudo interface if_ipsec(4) added. For now it is build as part of ipsec.ko module (or with IPSEC kernel). It implements IPsec virtual tunnels to create route-based VPNs. o The network stack now invokes IPsec functions using special methods. The only one header file <netipsec/ipsec_support.h> should be included to declare all the needed things to work with IPsec. o All IPsec protocols handlers (ESP/AH/IPCOMP protosw) were removed. Now these protocols are handled directly via IPsec methods. o TCP_SIGNATURE support was reworked to be more close to RFC. o PF_KEY SADB was reworked: - now all security associations stored in the single SPI namespace, and all SAs MUST have unique SPI. - several hash tables added to speed up lookups in SADB. - SADB now uses rmlock to protect access, and concurrent threads can do SA lookups in the same time. - many PF_KEY message handlers were reworked to reflect changes in SADB. - SADB_UPDATE message was extended to support new PF_KEY headers: SADB_X_EXT_NEW_ADDRESS_SRC and SADB_X_EXT_NEW_ADDRESS_DST. They can be used by IKE daemon to change SA addresses. o ipsecrequest and secpolicy structures were cardinally changed to avoid locking protection for ipsecrequest. Now we support only limited number (4) of bundled SAs, but they are supported for both INET and INET6. o INPCB security policy cache was introduced. Each PCB now caches used security policies to avoid SP lookup for each packet. o For inbound security policies added the mode, when the kernel does check for full history of applied IPsec transforms. o References counting rules for security policies and security associations were changed. The proper SA locking added into xform code. o xform code was also changed. Now it is possible to unregister xforms. tdb_xxx structures were changed and renamed to reflect changes in SADB/SPDB, and changed rules for locking and refcounting. Reviewed by: gnn, wblock Obtained from: Yandex LLC Relnotes: yes Sponsored by: Yandex LLC Differential Revision: https://reviews.freebsd.org/D9352
2017-02-06 08:49:57 +00:00
#if defined(IPSEC) || defined(IPSEC_SUPPORT)
if (IPSEC_ENABLED(ipv6)) {
if (IPSEC_INPUT(ipv6, m, off, nxt) != 0)
return;
}
#endif /* IPSEC */
Merge final round of MLD changes from p4: ip6_input.c, in6.h: * Add netinet6-specific mbuf flag M_RTALERT_MLD, shadowing M_PROTO6. * Always set this flag if HBH Router Alert option is present for MLD, even when not forwarding. icmp6.c: * In icmp6_input(), spell m->m_pkthdr.rcvif as ifp to be consistent. * Use scope ID for verifying input. Do not apply SSM filters here, no inpcb. * Check for M_RTALERT_MLD when validating MLD traffic, as we can't see IPv6 hop options outside of ip6_input(). in6_mcast.c: * Use KAME scope/zone ID in in6_multi. * Update net.inet6.ip6.mcast.filters implementation to use scope IDs for comparisons. * Fix scope ID treatment in multicast socket option processing. Scope IDs passed in from userland will be ignored as other less ambiguous APIs exist for specifying the link. * Tighten userland input checks in IPv6 SSM delta and full-state ops. * Source filter embedded scope IDs need to be revisited, for now just clear them and ignore them on input. * Adapt KAME behaviour of looking up the scope ID in the default zone for multicast leaves, when the interface is ambiguous. mld6.c: * Tighten origin checks on MLD traffic as per RFC3810 Section 6.2: * ip6_src MAY be the unspecified address for MLDv1 reports. * ip6_src MAY have link-local address scope for MLDv1 reports, MLDv1 queries, and MLDv2 queries. * Perform address field validation *before* accepting queries. * Use KAME scope/zone ID in query/report processing. * Break const correctness for mld_v1_input_report(), mld_v1_input_query() as we temporarily modify the input mbuf chain. * Clear the scope ID before handoff to userland MLD daemon. * Fix MLDv1 old querier present timer processing. With the protocol defaults, hosts should revert to MLDv2 after 260s. * Add net.inet6.mld.v1enable sysctl, default to on. ifmcstat.c: * Use sysctl by default; -K requests kvm(3) if so compiled. mld.4: * Connect man page to build. Tested using PCS.
2009-05-27 18:57:13 +00:00
nxt = (*inet6sw[ip6_protox[nxt]].pr_input)(&m, &off, nxt);
}
return;
bad:
in6_ifstat_inc(rcvif, ifs6_in_discard);
if (m != NULL)
m_freem(m);
}
/*
* Hop-by-Hop options header processing. If a valid jumbo payload option is
* included, the real payload length will be stored in plenp.
*
* rtalertp - XXX: should be stored more smart way
*/
static int
ip6_hopopts_input(u_int32_t *plenp, u_int32_t *rtalertp,
struct mbuf **mp, int *offp)
{
struct mbuf *m = *mp;
int off = *offp, hbhlen;
struct ip6_hbh *hbh;
/* validation of the length of the header */
#ifndef PULLDOWN_TEST
IP6_EXTHDR_CHECK(m, off, sizeof(*hbh), -1);
hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
hbhlen = (hbh->ip6h_len + 1) << 3;
IP6_EXTHDR_CHECK(m, off, hbhlen, -1);
hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
#else
IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m,
sizeof(struct ip6_hdr), sizeof(struct ip6_hbh));
if (hbh == NULL) {
IP6STAT_INC(ip6s_tooshort);
return -1;
}
hbhlen = (hbh->ip6h_len + 1) << 3;
IP6_EXTHDR_GET(hbh, struct ip6_hbh *, m, sizeof(struct ip6_hdr),
hbhlen);
if (hbh == NULL) {
IP6STAT_INC(ip6s_tooshort);
return -1;
}
#endif
off += hbhlen;
hbhlen -= sizeof(struct ip6_hbh);
if (ip6_process_hopopts(m, (u_int8_t *)hbh + sizeof(struct ip6_hbh),
hbhlen, rtalertp, plenp) < 0)
return (-1);
*offp = off;
*mp = m;
return (0);
}
/*
* Search header for all Hop-by-hop options and process each option.
* This function is separate from ip6_hopopts_input() in order to
* handle a case where the sending node itself process its hop-by-hop
* options header. In such a case, the function is called from ip6_output().
*
* The function assumes that hbh header is located right after the IPv6 header
* (RFC2460 p7), opthead is pointer into data content in m, and opthead to
* opthead + hbhlen is located in contiguous memory region.
*/
int
ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen,
u_int32_t *rtalertp, u_int32_t *plenp)
{
struct ip6_hdr *ip6;
int optlen = 0;
u_int8_t *opt = opthead;
u_int16_t rtalert_val;
u_int32_t jumboplen;
const int erroff = sizeof(struct ip6_hdr) + sizeof(struct ip6_hbh);
for (; hbhlen > 0; hbhlen -= optlen, opt += optlen) {
switch (*opt) {
case IP6OPT_PAD1:
optlen = 1;
break;
case IP6OPT_PADN:
if (hbhlen < IP6OPT_MINLEN) {
IP6STAT_INC(ip6s_toosmall);
goto bad;
}
optlen = *(opt + 1) + 2;
break;
case IP6OPT_ROUTER_ALERT:
/* XXX may need check for alignment */
if (hbhlen < IP6OPT_RTALERT_LEN) {
IP6STAT_INC(ip6s_toosmall);
goto bad;
}
if (*(opt + 1) != IP6OPT_RTALERT_LEN - 2) {
/* XXX stat */
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt + 1 - opthead);
return (-1);
}
optlen = IP6OPT_RTALERT_LEN;
bcopy((caddr_t)(opt + 2), (caddr_t)&rtalert_val, 2);
*rtalertp = ntohs(rtalert_val);
break;
case IP6OPT_JUMBO:
/* XXX may need check for alignment */
if (hbhlen < IP6OPT_JUMBO_LEN) {
IP6STAT_INC(ip6s_toosmall);
goto bad;
}
if (*(opt + 1) != IP6OPT_JUMBO_LEN - 2) {
/* XXX stat */
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt + 1 - opthead);
return (-1);
}
optlen = IP6OPT_JUMBO_LEN;
/*
* IPv6 packets that have non 0 payload length
* must not contain a jumbo payload option.
*/
ip6 = mtod(m, struct ip6_hdr *);
if (ip6->ip6_plen) {
IP6STAT_INC(ip6s_badoptions);
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt - opthead);
return (-1);
}
/*
* We may see jumbolen in unaligned location, so
* we'd need to perform bcopy().
*/
bcopy(opt + 2, &jumboplen, sizeof(jumboplen));
jumboplen = (u_int32_t)htonl(jumboplen);
#if 1
/*
* if there are multiple jumbo payload options,
* *plenp will be non-zero and the packet will be
* rejected.
* the behavior may need some debate in ipngwg -
* multiple options does not make sense, however,
* there's no explicit mention in specification.
*/
if (*plenp != 0) {
IP6STAT_INC(ip6s_badoptions);
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt + 2 - opthead);
return (-1);
}
#endif
/*
* jumbo payload length must be larger than 65535.
*/
if (jumboplen <= IPV6_MAXPACKET) {
IP6STAT_INC(ip6s_badoptions);
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_HEADER,
erroff + opt + 2 - opthead);
return (-1);
}
*plenp = jumboplen;
break;
default: /* unknown option */
if (hbhlen < IP6OPT_MINLEN) {
IP6STAT_INC(ip6s_toosmall);
goto bad;
}
optlen = ip6_unknown_opt(opt, m,
erroff + opt - opthead);
if (optlen == -1)
return (-1);
optlen += 2;
break;
}
}
return (0);
bad:
m_freem(m);
return (-1);
}
/*
* Unknown option processing.
* The third argument `off' is the offset from the IPv6 header to the option,
* which is necessary if the IPv6 header the and option header and IPv6 header
* is not contiguous in order to return an ICMPv6 error.
*/
int
ip6_unknown_opt(u_int8_t *optp, struct mbuf *m, int off)
{
struct ip6_hdr *ip6;
switch (IP6OPT_TYPE(*optp)) {
case IP6OPT_TYPE_SKIP: /* ignore the option */
return ((int)*(optp + 1));
case IP6OPT_TYPE_DISCARD: /* silently discard */
m_freem(m);
return (-1);
case IP6OPT_TYPE_FORCEICMP: /* send ICMP even if multicasted */
IP6STAT_INC(ip6s_badoptions);
icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, off);
return (-1);
case IP6OPT_TYPE_ICMP: /* send ICMP if not multicasted */
IP6STAT_INC(ip6s_badoptions);
ip6 = mtod(m, struct ip6_hdr *);
if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
(m->m_flags & (M_BCAST|M_MCAST)))
m_freem(m);
else
icmp6_error(m, ICMP6_PARAM_PROB,
ICMP6_PARAMPROB_OPTION, off);
return (-1);
}
m_freem(m); /* XXX: NOTREACHED */
return (-1);
}
/*
* Create the "control" list for this pcb.
* These functions will not modify mbuf chain at all.
*
* With KAME mbuf chain restriction:
* The routine will be called from upper layer handlers like tcp6_input().
* Thus the routine assumes that the caller (tcp6_input) have already
* called IP6_EXTHDR_CHECK() and all the extension headers are located in the
* very first mbuf on the mbuf chain.
*
* ip6_savecontrol_v4 will handle those options that are possible to be
* set on a v4-mapped socket.
* ip6_savecontrol will directly call ip6_savecontrol_v4 to handle those
* options and handle the v6-only ones itself.
*/
struct mbuf **
ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp,
int *v4only)
{
struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
#ifdef SO_TIMESTAMP
if ((inp->inp_socket->so_options & SO_TIMESTAMP) != 0) {
union {
struct timeval tv;
struct bintime bt;
struct timespec ts;
} t;
struct bintime boottimebin, bt1;
struct timespec ts1;
bool stamped;
stamped = false;
switch (inp->inp_socket->so_ts_clock) {
case SO_TS_REALTIME_MICRO:
if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR |
M_TSTMP)) {
mbuf_tstmp2timespec(m, &ts1);
timespec2bintime(&ts1, &bt1);
getboottimebin(&boottimebin);
bintime_add(&bt1, &boottimebin);
bintime2timeval(&bt1, &t.tv);
} else {
microtime(&t.tv);
}
*mp = sbcreatecontrol((caddr_t) &t.tv, sizeof(t.tv),
SCM_TIMESTAMP, SOL_SOCKET);
if (*mp != NULL) {
mp = &(*mp)->m_next;
stamped = true;
}
break;
case SO_TS_BINTIME:
if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR |
M_TSTMP)) {
mbuf_tstmp2timespec(m, &ts1);
timespec2bintime(&ts1, &t.bt);
getboottimebin(&boottimebin);
bintime_add(&t.bt, &boottimebin);
} else {
bintime(&t.bt);
}
*mp = sbcreatecontrol((caddr_t)&t.bt, sizeof(t.bt),
SCM_BINTIME, SOL_SOCKET);
if (*mp != NULL) {
mp = &(*mp)->m_next;
stamped = true;
}
break;
case SO_TS_REALTIME:
if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR |
M_TSTMP)) {
mbuf_tstmp2timespec(m, &t.ts);
getboottimebin(&boottimebin);
bintime2timespec(&boottimebin, &ts1);
timespecadd(&t.ts, &ts1);
} else {
nanotime(&t.ts);
}
*mp = sbcreatecontrol((caddr_t)&t.ts, sizeof(t.ts),
SCM_REALTIME, SOL_SOCKET);
if (*mp != NULL) {
mp = &(*mp)->m_next;
stamped = true;
}
break;
case SO_TS_MONOTONIC:
if ((m->m_flags & (M_PKTHDR | M_TSTMP)) == (M_PKTHDR |
M_TSTMP))
mbuf_tstmp2timespec(m, &t.ts);
else
nanouptime(&t.ts);
*mp = sbcreatecontrol((caddr_t)&t.ts, sizeof(t.ts),
SCM_MONOTONIC, SOL_SOCKET);
if (*mp != NULL) {
mp = &(*mp)->m_next;
stamped = true;
}
break;
default:
panic("unknown (corrupted) so_ts_clock");
}
if (stamped && (m->m_flags & (M_PKTHDR | M_TSTMP)) ==
(M_PKTHDR | M_TSTMP)) {
struct sock_timestamp_info sti;
bzero(&sti, sizeof(sti));
sti.st_info_flags = ST_INFO_HW;
if ((m->m_flags & M_TSTMP_HPREC) != 0)
sti.st_info_flags |= ST_INFO_HW_HPREC;
*mp = sbcreatecontrol((caddr_t)&sti, sizeof(sti),
SCM_TIME_INFO, SOL_SOCKET);
if (*mp != NULL)
mp = &(*mp)->m_next;
}
}
#endif
#define IS2292(inp, x, y) (((inp)->inp_flags & IN6P_RFC2292) ? (x) : (y))
/* RFC 2292 sec. 5 */
if ((inp->inp_flags & IN6P_PKTINFO) != 0) {
struct in6_pktinfo pi6;
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
#ifdef INET
struct ip *ip;
ip = mtod(m, struct ip *);
pi6.ipi6_addr.s6_addr32[0] = 0;
pi6.ipi6_addr.s6_addr32[1] = 0;
pi6.ipi6_addr.s6_addr32[2] = IPV6_ADDR_INT32_SMP;
pi6.ipi6_addr.s6_addr32[3] = ip->ip_dst.s_addr;
#else
/* We won't hit this code */
bzero(&pi6.ipi6_addr, sizeof(struct in6_addr));
#endif
} else {
bcopy(&ip6->ip6_dst, &pi6.ipi6_addr, sizeof(struct in6_addr));
in6_clearscope(&pi6.ipi6_addr); /* XXX */
}
pi6.ipi6_ifindex =
(m && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0;
*mp = sbcreatecontrol((caddr_t) &pi6,
sizeof(struct in6_pktinfo),
IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO), IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
}
if ((inp->inp_flags & IN6P_HOPLIMIT) != 0) {
int hlim;
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
#ifdef INET
struct ip *ip;
ip = mtod(m, struct ip *);
hlim = ip->ip_ttl;
#else
/* We won't hit this code */
hlim = 0;
#endif
} else {
hlim = ip6->ip6_hlim & 0xff;
}
*mp = sbcreatecontrol((caddr_t) &hlim, sizeof(int),
IS2292(inp, IPV6_2292HOPLIMIT, IPV6_HOPLIMIT),
IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
}
if ((inp->inp_flags & IN6P_TCLASS) != 0) {
int tclass;
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
#ifdef INET
struct ip *ip;
ip = mtod(m, struct ip *);
tclass = ip->ip_tos;
#else
/* We won't hit this code */
tclass = 0;
#endif
} else {
u_int32_t flowinfo;
flowinfo = (u_int32_t)ntohl(ip6->ip6_flow & IPV6_FLOWINFO_MASK);
flowinfo >>= 20;
tclass = flowinfo & 0xff;
}
*mp = sbcreatecontrol((caddr_t) &tclass, sizeof(int),
IPV6_TCLASS, IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
}
if (v4only != NULL) {
if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
*v4only = 1;
} else {
*v4only = 0;
}
}
return (mp);
}
void
ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp)
{
struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
int v4only = 0;
mp = ip6_savecontrol_v4(in6p, m, mp, &v4only);
if (v4only)
return;
/*
* IPV6_HOPOPTS socket option. Recall that we required super-user
* privilege for the option (see ip6_ctloutput), but it might be too
* strict, since there might be some hop-by-hop options which can be
* returned to normal user.
* See also RFC 2292 section 6 (or RFC 3542 section 8).
*/
if ((in6p->inp_flags & IN6P_HOPOPTS) != 0) {
/*
* Check if a hop-by-hop options header is contatined in the
* received packet, and if so, store the options as ancillary
* data. Note that a hop-by-hop options header must be
* just after the IPv6 header, which is assured through the
* IPv6 input processing.
*/
if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
struct ip6_hbh *hbh;
int hbhlen = 0;
#ifdef PULLDOWN_TEST
struct mbuf *ext;
#endif
#ifndef PULLDOWN_TEST
hbh = (struct ip6_hbh *)(ip6 + 1);
hbhlen = (hbh->ip6h_len + 1) << 3;
#else
ext = ip6_pullexthdr(m, sizeof(struct ip6_hdr),
ip6->ip6_nxt);
if (ext == NULL) {
IP6STAT_INC(ip6s_tooshort);
return;
}
hbh = mtod(ext, struct ip6_hbh *);
hbhlen = (hbh->ip6h_len + 1) << 3;
if (hbhlen != ext->m_len) {
m_freem(ext);
IP6STAT_INC(ip6s_tooshort);
return;
}
#endif
/*
* XXX: We copy the whole header even if a
* jumbo payload option is included, the option which
* is to be removed before returning according to
* RFC2292.
* Note: this constraint is removed in RFC3542
*/
*mp = sbcreatecontrol((caddr_t)hbh, hbhlen,
IS2292(in6p, IPV6_2292HOPOPTS, IPV6_HOPOPTS),
IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
#ifdef PULLDOWN_TEST
m_freem(ext);
#endif
}
}
if ((in6p->inp_flags & (IN6P_RTHDR | IN6P_DSTOPTS)) != 0) {
int nxt = ip6->ip6_nxt, off = sizeof(struct ip6_hdr);
/*
* Search for destination options headers or routing
* header(s) through the header chain, and stores each
* header as ancillary data.
* Note that the order of the headers remains in
* the chain of ancillary data.
*/
while (1) { /* is explicit loop prevention necessary? */
struct ip6_ext *ip6e = NULL;
int elen;
#ifdef PULLDOWN_TEST
struct mbuf *ext = NULL;
#endif
/*
* if it is not an extension header, don't try to
* pull it from the chain.
*/
switch (nxt) {
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
case IPPROTO_AH: /* is it possible? */
break;
default:
goto loopend;
}
#ifndef PULLDOWN_TEST
if (off + sizeof(*ip6e) > m->m_len)
goto loopend;
ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + off);
if (nxt == IPPROTO_AH)
elen = (ip6e->ip6e_len + 2) << 2;
else
elen = (ip6e->ip6e_len + 1) << 3;
if (off + elen > m->m_len)
goto loopend;
#else
ext = ip6_pullexthdr(m, off, nxt);
if (ext == NULL) {
IP6STAT_INC(ip6s_tooshort);
return;
}
ip6e = mtod(ext, struct ip6_ext *);
if (nxt == IPPROTO_AH)
elen = (ip6e->ip6e_len + 2) << 2;
else
elen = (ip6e->ip6e_len + 1) << 3;
if (elen != ext->m_len) {
m_freem(ext);
IP6STAT_INC(ip6s_tooshort);
return;
}
#endif
switch (nxt) {
case IPPROTO_DSTOPTS:
if (!(in6p->inp_flags & IN6P_DSTOPTS))
break;
*mp = sbcreatecontrol((caddr_t)ip6e, elen,
IS2292(in6p,
IPV6_2292DSTOPTS, IPV6_DSTOPTS),
IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
break;
case IPPROTO_ROUTING:
if (!(in6p->inp_flags & IN6P_RTHDR))
break;
*mp = sbcreatecontrol((caddr_t)ip6e, elen,
IS2292(in6p, IPV6_2292RTHDR, IPV6_RTHDR),
IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
break;
case IPPROTO_HOPOPTS:
case IPPROTO_AH: /* is it possible? */
break;
default:
/*
* other cases have been filtered in the above.
* none will visit this case. here we supply
* the code just in case (nxt overwritten or
* other cases).
*/
#ifdef PULLDOWN_TEST
m_freem(ext);
#endif
goto loopend;
}
/* proceed with the next header. */
off += elen;
nxt = ip6e->ip6e_nxt;
ip6e = NULL;
#ifdef PULLDOWN_TEST
m_freem(ext);
ext = NULL;
#endif
}
loopend:
;
}
if (in6p->inp_flags2 & INP_RECVFLOWID) {
uint32_t flowid, flow_type;
flowid = m->m_pkthdr.flowid;
flow_type = M_HASHTYPE_GET(m);
/*
* XXX should handle the failure of one or the
* other - don't populate both?
*/
*mp = sbcreatecontrol((caddr_t) &flowid,
sizeof(uint32_t), IPV6_FLOWID, IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
*mp = sbcreatecontrol((caddr_t) &flow_type,
sizeof(uint32_t), IPV6_FLOWTYPE, IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
}
#ifdef RSS
if (in6p->inp_flags2 & INP_RECVRSSBUCKETID) {
uint32_t flowid, flow_type;
uint32_t rss_bucketid;
flowid = m->m_pkthdr.flowid;
flow_type = M_HASHTYPE_GET(m);
if (rss_hash2bucket(flowid, flow_type, &rss_bucketid) == 0) {
*mp = sbcreatecontrol((caddr_t) &rss_bucketid,
sizeof(uint32_t), IPV6_RSSBUCKETID, IPPROTO_IPV6);
if (*mp)
mp = &(*mp)->m_next;
}
}
#endif
}
#undef IS2292
void
ip6_notify_pmtu(struct inpcb *inp, struct sockaddr_in6 *dst, u_int32_t mtu)
{
struct socket *so;
struct mbuf *m_mtu;
struct ip6_mtuinfo mtuctl;
KASSERT(inp != NULL, ("%s: inp == NULL", __func__));
/*
* Notify the error by sending IPV6_PATHMTU ancillary data if
* application wanted to know the MTU value.
* NOTE: we notify disconnected sockets, because some udp
* applications keep sending sockets disconnected.
* NOTE: our implementation doesn't notify connected sockets that has
* foreign address that is different than given destination addresses
* (this is permitted by RFC 3542).
*/
if ((inp->inp_flags & IN6P_MTU) == 0 || (
!IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr) &&
!IN6_ARE_ADDR_EQUAL(&inp->in6p_faddr, &dst->sin6_addr)))
return;
mtuctl.ip6m_mtu = mtu;
mtuctl.ip6m_addr = *dst;
if (sa6_recoverscope(&mtuctl.ip6m_addr))
return;
if ((m_mtu = sbcreatecontrol((caddr_t)&mtuctl, sizeof(mtuctl),
IPV6_PATHMTU, IPPROTO_IPV6)) == NULL)
return;
so = inp->inp_socket;
if (sbappendaddr(&so->so_rcv, (struct sockaddr *)dst, NULL, m_mtu)
== 0) {
m_freem(m_mtu);
/* XXX: should count statistics */
} else
sorwakeup(so);
}
#ifdef PULLDOWN_TEST
/*
* pull single extension header from mbuf chain. returns single mbuf that
* contains the result, or NULL on error.
*/
static struct mbuf *
ip6_pullexthdr(struct mbuf *m, size_t off, int nxt)
{
struct ip6_ext ip6e;
size_t elen;
struct mbuf *n;
#ifdef DIAGNOSTIC
switch (nxt) {
case IPPROTO_DSTOPTS:
case IPPROTO_ROUTING:
case IPPROTO_HOPOPTS:
case IPPROTO_AH: /* is it possible? */
break;
default:
printf("ip6_pullexthdr: invalid nxt=%d\n", nxt);
}
#endif
m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
if (nxt == IPPROTO_AH)
elen = (ip6e.ip6e_len + 2) << 2;
else
elen = (ip6e.ip6e_len + 1) << 3;
if (elen > MLEN)
n = m_getcl(M_NOWAIT, MT_DATA, 0);
else
n = m_get(M_NOWAIT, MT_DATA);
if (n == NULL)
return NULL;
m_copydata(m, off, elen, mtod(n, caddr_t));
n->m_len = elen;
return n;
}
#endif
/*
* Get pointer to the previous header followed by the header
* currently processed.
*/
int
ip6_get_prevhdr(const struct mbuf *m, int off)
{
struct ip6_ext ip6e;
struct ip6_hdr *ip6;
int len, nlen, nxt;
if (off == sizeof(struct ip6_hdr))
return (offsetof(struct ip6_hdr, ip6_nxt));
if (off < sizeof(struct ip6_hdr))
panic("%s: off < sizeof(struct ip6_hdr)", __func__);
ip6 = mtod(m, struct ip6_hdr *);
nxt = ip6->ip6_nxt;
len = sizeof(struct ip6_hdr);
nlen = 0;
while (len < off) {
m_copydata(m, len, sizeof(ip6e), (caddr_t)&ip6e);
switch (nxt) {
case IPPROTO_FRAGMENT:
nlen = sizeof(struct ip6_frag);
break;
case IPPROTO_AH:
nlen = (ip6e.ip6e_len + 2) << 2;
break;
default:
nlen = (ip6e.ip6e_len + 1) << 3;
}
len += nlen;
nxt = ip6e.ip6e_nxt;
}
return (len - nlen);
}
/*
* get next header offset. m will be retained.
*/
int
ip6_nexthdr(const struct mbuf *m, int off, int proto, int *nxtp)
{
struct ip6_hdr ip6;
struct ip6_ext ip6e;
struct ip6_frag fh;
/* just in case */
if (m == NULL)
panic("ip6_nexthdr: m == NULL");
if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len < off)
return -1;
switch (proto) {
case IPPROTO_IPV6:
if (m->m_pkthdr.len < off + sizeof(ip6))
return -1;
m_copydata(m, off, sizeof(ip6), (caddr_t)&ip6);
if (nxtp)
*nxtp = ip6.ip6_nxt;
off += sizeof(ip6);
return off;
case IPPROTO_FRAGMENT:
/*
* terminate parsing if it is not the first fragment,
* it does not make sense to parse through it.
*/
if (m->m_pkthdr.len < off + sizeof(fh))
return -1;
m_copydata(m, off, sizeof(fh), (caddr_t)&fh);
/* IP6F_OFF_MASK = 0xfff8(BigEndian), 0xf8ff(LittleEndian) */
if (fh.ip6f_offlg & IP6F_OFF_MASK)
return -1;
if (nxtp)
*nxtp = fh.ip6f_nxt;
off += sizeof(struct ip6_frag);
return off;
case IPPROTO_AH:
if (m->m_pkthdr.len < off + sizeof(ip6e))
return -1;
m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
if (nxtp)
*nxtp = ip6e.ip6e_nxt;
off += (ip6e.ip6e_len + 2) << 2;
return off;
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING:
case IPPROTO_DSTOPTS:
if (m->m_pkthdr.len < off + sizeof(ip6e))
return -1;
m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
if (nxtp)
*nxtp = ip6e.ip6e_nxt;
off += (ip6e.ip6e_len + 1) << 3;
return off;
case IPPROTO_NONE:
case IPPROTO_ESP:
case IPPROTO_IPCOMP:
/* give up */
return -1;
default:
return -1;
}
/* NOTREACHED */
}
/*
* get offset for the last header in the chain. m will be kept untainted.
*/
int
ip6_lasthdr(const struct mbuf *m, int off, int proto, int *nxtp)
{
int newoff;
int nxt;
if (!nxtp) {
nxt = -1;
nxtp = &nxt;
}
while (1) {
newoff = ip6_nexthdr(m, off, proto, nxtp);
if (newoff < 0)
return off;
else if (newoff < off)
return -1; /* invalid */
else if (newoff == off)
return newoff;
off = newoff;
proto = *nxtp;
}
}
/*
* System control for IP6
*/
u_char inet6ctlerrmap[PRC_NCMDS] = {
0, 0, 0, 0,
0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
EMSGSIZE, EHOSTUNREACH, 0, 0,
0, 0, EHOSTUNREACH, 0,
ENOPROTOOPT, ECONNREFUSED
};