2005-01-07 01:45:51 +00:00
|
|
|
/*-
|
2017-11-20 19:43:44 +00:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1980, 1986, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2017-02-28 23:42:47 +00:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1994-05-24 10:09:53 +00:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
2001-10-17 10:41:00 +00:00
|
|
|
* @(#)if.c 8.5 (Berkeley) 1/9/95
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1999-12-07 17:39:16 +00:00
|
|
|
#include "opt_inet6.h"
|
1999-12-30 18:29:55 +00:00
|
|
|
#include "opt_inet.h"
|
1997-12-16 17:40:42 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
2001-09-29 05:55:04 +00:00
|
|
|
#include <sys/conf.h>
|
Extract eventfilter declarations to sys/_eventfilter.h
This allows replacing "sys/eventfilter.h" includes with "sys/_eventfilter.h"
in other header files (e.g., sys/{bus,conf,cpu}.h) and reduces header
pollution substantially.
EVENTHANDLER_DECLARE and EVENTHANDLER_LIST_DECLAREs were moved out of .c
files into appropriate headers (e.g., sys/proc.h, powernv/opal.h).
As a side effect of reduced header pollution, many .c files and headers no
longer contain needed definitions. The remainder of the patch addresses
adding appropriate includes to fix those files.
LOCK_DEBUG and LOCK_FILE_LINE_ARG are moved to sys/_lock.h, as required by
sys/mutex.h since r326106 (but silently protected by header pollution prior
to this change).
No functional change (intended). Of course, any out of tree modules that
relied on header pollution for sys/eventhandler.h, sys/lock.h, or
sys/mutex.h inclusion need to be fixed. __FreeBSD_version has been bumped.
2019-05-20 00:38:23 +00:00
|
|
|
#include <sys/eventhandler.h>
|
1997-09-02 01:19:47 +00:00
|
|
|
#include <sys/malloc.h>
|
2019-04-22 19:24:21 +00:00
|
|
|
#include <sys/domainset.h>
|
2004-09-22 08:59:41 +00:00
|
|
|
#include <sys/sbuf.h>
|
2001-10-11 18:39:05 +00:00
|
|
|
#include <sys/bus.h>
|
2018-05-18 20:13:34 +00:00
|
|
|
#include <sys/epoch.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/systm.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
|
|
|
#include <sys/protosw.h>
|
|
|
|
#include <sys/kernel.h>
|
2008-12-09 20:05:58 +00:00
|
|
|
#include <sys/lock.h>
|
Start to address a number of races relating to use of ifnet pointers
after the corresponding interface has been destroyed:
(1) Add an ifnet refcount, ifp->if_refcount. Initialize it to 1 in
if_alloc(), and modify if_free_type() to decrement and check the
refcount.
(2) Add new if_ref() and if_rele() interfaces to allow kernel code
walking global interface lists to release IFNET_[RW]LOCK() yet
keep the ifnet stable. Currently, if_rele() is a no-op wrapper
around if_free(), but this may change in the future.
(3) Add new ifnet field, if_alloctype, which caches the type passed
to if_alloc(), but unlike if_type, won't be changed by drivers.
This allows asynchronous free's of the interface after the
driver has released it to still use the right type. Use that
instead of the type passed to if_free_type(), but assert that
they are the same (might have to rethink this if that doesn't
work out).
(4) Add a new ifnet_byindex_ref(), which looks up an interface by
index and returns a reference rather than a pointer to it.
(5) Fix if_alloc() to fully initialize the if_addr_mtx before hooking
up the ifnet to global lists.
(6) Modify sysctls in if_mib.c to use ifnet_byindex_ref() and release
the ifnet when done.
When this change is MFC'd, it will need to replace if_ispare fields
rather than adding new fields in order to avoid breaking the binary
interface. Once this change is MFC'd, if_free_type() should be
removed, as its 'type' argument is now optional.
This refcount is not appropriate for counting mbuf pkthdr references,
and also not for counting entry into the device driver via ifnet
function pointers. An rmlock may be appropriate for the latter.
Rather, this is about ensuring data structure stability when reaching
an ifnet via global ifnet lists and tables followed by copy in or out
of userspace.
MFC after: 3 weeks
Reported by: mdtancsa
Reviewed by: brooks
2009-04-21 22:43:32 +00:00
|
|
|
#include <sys/refcount.h>
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
#include <sys/module.h>
|
2008-12-09 20:05:58 +00:00
|
|
|
#include <sys/rwlock.h>
|
1997-03-24 11:33:46 +00:00
|
|
|
#include <sys/sockio.h>
|
1995-09-22 17:57:48 +00:00
|
|
|
#include <sys/syslog.h>
|
1995-12-20 21:53:53 +00:00
|
|
|
#include <sys/sysctl.h>
|
2018-03-27 18:26:50 +00:00
|
|
|
#include <sys/sysent.h>
|
2004-07-27 23:20:45 +00:00
|
|
|
#include <sys/taskqueue.h>
|
2003-10-17 15:46:31 +00:00
|
|
|
#include <sys/domain.h>
|
2001-02-21 06:39:57 +00:00
|
|
|
#include <sys/jail.h>
|
2011-07-03 12:22:02 +00:00
|
|
|
#include <sys/priv.h>
|
|
|
|
|
2002-09-24 17:35:08 +00:00
|
|
|
#include <machine/stdarg.h>
|
This main goals of this project are:
1. separating L2 tables (ARP, NDP) from the L3 routing tables
2. removing as much locking dependencies among these layers as
possible to allow for some parallelism in the search operations
3. simplify the logic in the routing code,
The most notable end result is the obsolescent of the route
cloning (RTF_CLONING) concept, which translated into code reduction
in both IPv4 ARP and IPv6 NDP related modules, and size reduction in
struct rtentry{}. The change in design obsoletes the semantics of
RTF_CLONING, RTF_WASCLONE and RTF_LLINFO routing flags. The userland
applications such as "arp" and "ndp" have been modified to reflect
those changes. The output from "netstat -r" shows only the routing
entries.
Quite a few developers have contributed to this project in the
past: Glebius Smirnoff, Luigi Rizzo, Alessandro Cerri, and
Andre Oppermann. And most recently:
- Kip Macy revised the locking code completely, thus completing
the last piece of the puzzle, Kip has also been conducting
active functional testing
- Sam Leffler has helped me improving/refactoring the code, and
provided valuable reviews
- Julian Elischer setup the perforce tree for me and has helped
me maintaining that branch before the svn conversion
2008-12-15 06:10:57 +00:00
|
|
|
#include <vm/uma.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2014-06-02 17:54:39 +00:00
|
|
|
#include <net/bpf.h>
|
|
|
|
#include <net/ethernet.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <net/if.h>
|
2008-11-06 15:26:09 +00:00
|
|
|
#include <net/if_arp.h>
|
Major overhaul of pseudo-interface cloning. Highlights include:
- Split the code out into if_clone.[ch].
- Locked struct if_clone. [1]
- Add a per-cloner match function rather then simply matching names of
the form <name><unit> and <name>.
- Use the match function to allow creation of <interface>.<tag>
vlan interfaces. The old way is preserved unchanged!
- Also the match function to allow creation of stf(4) interfaces named
stf0, stf, or 6to4. This is the only major user visible change in
that "ifconfig stf" creates the interface stf rather then stf0 and
does not print "stf0" to stdout.
- Allow destroy functions to fail so they can refuse to delete
interfaces. Currently, we forbid the deletion of interfaces which
were created in the init function, particularly lo0, pflog0, and
pfsync0. In the case of lo0 this was a panic implementation so it
does not count as a user visiable change. :-)
- Since most interfaces do not need the new functionality, an family of
wrapper functions, ifc_simple_*(), were created to wrap old style
cloner functions.
- The IF_CLONE_INITIALIZER macro is replaced with a new incompatible
IFC_CLONE_INITIALIZER and ifc_simple consumers use IFC_SIMPLE_DECLARE
instead.
Submitted by: Maurycy Pawlowski-Wieronski <maurycy at fouk.org> [1]
Reviewed by: andre, mlaier
Discussed on: net
2004-06-22 20:13:25 +00:00
|
|
|
#include <net/if_clone.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <net/if_dl.h>
|
2000-08-15 00:48:38 +00:00
|
|
|
#include <net/if_types.h>
|
2001-07-02 20:49:25 +00:00
|
|
|
#include <net/if_var.h>
|
2014-06-02 17:54:39 +00:00
|
|
|
#include <net/if_media.h>
|
|
|
|
#include <net/if_vlan_var.h>
|
1994-10-08 01:40:23 +00:00
|
|
|
#include <net/radix.h>
|
1999-12-17 06:46:07 +00:00
|
|
|
#include <net/route.h>
|
2008-12-02 21:37:28 +00:00
|
|
|
#include <net/vnet.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-12-30 18:29:55 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2014-04-17 23:31:50 +00:00
|
|
|
#include <net/ethernet.h>
|
1999-11-22 02:45:11 +00:00
|
|
|
#include <netinet/in.h>
|
1999-12-30 18:29:55 +00:00
|
|
|
#include <netinet/in_var.h>
|
2013-06-03 12:55:13 +00:00
|
|
|
#include <netinet/ip.h>
|
2010-08-11 20:18:19 +00:00
|
|
|
#include <netinet/ip_carp.h>
|
2013-06-03 12:55:13 +00:00
|
|
|
#ifdef INET
|
|
|
|
#include <netinet/if_ether.h>
|
2018-05-06 00:38:29 +00:00
|
|
|
#include <netinet/netdump/netdump.h>
|
2013-06-03 12:55:13 +00:00
|
|
|
#endif /* INET */
|
2000-02-01 15:49:37 +00:00
|
|
|
#ifdef INET6
|
2000-07-16 01:46:42 +00:00
|
|
|
#include <netinet6/in6_var.h>
|
|
|
|
#include <netinet6/in6_ifattach.h>
|
2013-06-03 12:55:13 +00:00
|
|
|
#endif /* INET6 */
|
|
|
|
#endif /* INET || INET6 */
|
1999-11-22 02:45:11 +00:00
|
|
|
|
2006-10-22 11:52:19 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
|
|
|
|
2018-03-30 21:38:53 +00:00
|
|
|
/*
|
|
|
|
* Consumers of struct ifreq such as tcpdump assume no pad between ifr_name
|
|
|
|
* and ifr_ifru when it is used in SIOCGIFCONF.
|
|
|
|
*/
|
|
|
|
_Static_assert(sizeof(((struct ifreq *)0)->ifr_name) ==
|
|
|
|
offsetof(struct ifreq, ifr_ifru), "gap between ifr_name and ifr_ifru");
|
|
|
|
|
2018-05-18 17:58:15 +00:00
|
|
|
__read_mostly epoch_t net_epoch_preempt;
|
|
|
|
__read_mostly epoch_t net_epoch;
|
2010-10-21 16:20:48 +00:00
|
|
|
#ifdef COMPAT_FREEBSD32
|
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <compat/freebsd32/freebsd32.h>
|
2018-03-27 18:26:50 +00:00
|
|
|
|
|
|
|
struct ifreq_buffer32 {
|
|
|
|
uint32_t length; /* (size_t) */
|
|
|
|
uint32_t buffer; /* (void *) */
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interface request structure used for socket
|
|
|
|
* ioctl's. All interface ioctl's must have parameter
|
|
|
|
* definitions which begin with ifr_name. The
|
|
|
|
* remainder may be interface specific.
|
|
|
|
*/
|
|
|
|
struct ifreq32 {
|
|
|
|
char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */
|
|
|
|
union {
|
|
|
|
struct sockaddr ifru_addr;
|
|
|
|
struct sockaddr ifru_dstaddr;
|
|
|
|
struct sockaddr ifru_broadaddr;
|
|
|
|
struct ifreq_buffer32 ifru_buffer;
|
|
|
|
short ifru_flags[2];
|
|
|
|
short ifru_index;
|
|
|
|
int ifru_jid;
|
|
|
|
int ifru_metric;
|
|
|
|
int ifru_mtu;
|
|
|
|
int ifru_phys;
|
|
|
|
int ifru_media;
|
|
|
|
uint32_t ifru_data;
|
|
|
|
int ifru_cap[2];
|
|
|
|
u_int ifru_fib;
|
|
|
|
u_char ifru_vlan_pcp;
|
|
|
|
} ifr_ifru;
|
|
|
|
};
|
|
|
|
CTASSERT(sizeof(struct ifreq) == sizeof(struct ifreq32));
|
|
|
|
CTASSERT(__offsetof(struct ifreq, ifr_ifru) ==
|
|
|
|
__offsetof(struct ifreq32, ifr_ifru));
|
2018-04-05 22:14:55 +00:00
|
|
|
|
|
|
|
struct ifgroupreq32 {
|
|
|
|
char ifgr_name[IFNAMSIZ];
|
|
|
|
u_int ifgr_len;
|
|
|
|
union {
|
|
|
|
char ifgru_group[IFNAMSIZ];
|
|
|
|
uint32_t ifgru_groups;
|
|
|
|
} ifgr_ifgru;
|
|
|
|
};
|
2018-04-25 15:30:42 +00:00
|
|
|
|
|
|
|
struct ifmediareq32 {
|
|
|
|
char ifm_name[IFNAMSIZ];
|
|
|
|
int ifm_current;
|
|
|
|
int ifm_mask;
|
|
|
|
int ifm_status;
|
|
|
|
int ifm_active;
|
|
|
|
int ifm_count;
|
|
|
|
uint32_t ifm_ulist; /* (int *) */
|
|
|
|
};
|
|
|
|
#define SIOCGIFMEDIA32 _IOC_NEWTYPE(SIOCGIFMEDIA, struct ifmediareq32)
|
|
|
|
#define SIOCGIFXMEDIA32 _IOC_NEWTYPE(SIOCGIFXMEDIA, struct ifmediareq32)
|
|
|
|
|
2018-04-05 22:14:55 +00:00
|
|
|
#define _CASE_IOC_IFGROUPREQ_32(cmd) \
|
2019-01-22 17:39:26 +00:00
|
|
|
_IOC_NEWTYPE((cmd), struct ifgroupreq32): case
|
2018-04-25 15:30:42 +00:00
|
|
|
#else /* !COMPAT_FREEBSD32 */
|
2018-04-05 22:14:55 +00:00
|
|
|
#define _CASE_IOC_IFGROUPREQ_32(cmd)
|
2018-04-25 15:30:42 +00:00
|
|
|
#endif /* !COMPAT_FREEBSD32 */
|
2018-04-05 22:14:55 +00:00
|
|
|
|
|
|
|
#define CASE_IOC_IFGROUPREQ(cmd) \
|
|
|
|
_CASE_IOC_IFGROUPREQ_32(cmd) \
|
2019-01-22 17:39:26 +00:00
|
|
|
(cmd)
|
2010-10-21 16:20:48 +00:00
|
|
|
|
2018-03-27 18:26:50 +00:00
|
|
|
union ifreq_union {
|
2018-03-27 18:55:39 +00:00
|
|
|
struct ifreq ifr;
|
2018-03-27 18:26:50 +00:00
|
|
|
#ifdef COMPAT_FREEBSD32
|
2018-03-27 18:55:39 +00:00
|
|
|
struct ifreq32 ifr32;
|
2018-03-27 18:26:50 +00:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2018-04-05 22:14:55 +00:00
|
|
|
union ifgroupreq_union {
|
|
|
|
struct ifgroupreq ifgr;
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
|
|
|
struct ifgroupreq32 ifgr32;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2005-03-12 12:58:03 +00:00
|
|
|
SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
|
|
|
|
SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
|
|
|
|
|
2011-01-12 19:53:50 +00:00
|
|
|
SYSCTL_INT(_net_link, OID_AUTO, ifqmaxlen, CTLFLAG_RDTUN,
|
2010-05-03 07:32:50 +00:00
|
|
|
&ifqmaxlen, 0, "max send queue size");
|
|
|
|
|
2005-03-12 12:58:03 +00:00
|
|
|
/* Log link state change events */
|
|
|
|
static int log_link_state_change = 1;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW,
|
|
|
|
&log_link_state_change, 0,
|
|
|
|
"log interface link state change events");
|
|
|
|
|
2016-05-12 19:42:13 +00:00
|
|
|
/* Log promiscuous mode change events */
|
|
|
|
static int log_promisc_mode_change = 1;
|
|
|
|
|
2016-05-25 09:00:05 +00:00
|
|
|
SYSCTL_INT(_net_link, OID_AUTO, log_promisc_mode_change, CTLFLAG_RDTUN,
|
2016-05-12 19:42:13 +00:00
|
|
|
&log_promisc_mode_change, 1,
|
|
|
|
"log promiscuous mode change events");
|
|
|
|
|
2010-01-27 00:30:07 +00:00
|
|
|
/* Interface description */
|
|
|
|
static unsigned int ifdescr_maxlen = 1024;
|
|
|
|
SYSCTL_UINT(_net, OID_AUTO, ifdescr_maxlen, CTLFLAG_RW,
|
|
|
|
&ifdescr_maxlen, 0,
|
|
|
|
"administrative maximum length for interface description");
|
|
|
|
|
2011-11-07 06:44:47 +00:00
|
|
|
static MALLOC_DEFINE(M_IFDESCR, "ifdescr", "ifnet descriptions");
|
2010-01-27 00:30:07 +00:00
|
|
|
|
|
|
|
/* global sx for non-critical path ifdescr */
|
|
|
|
static struct sx ifdescr_sx;
|
|
|
|
SX_SYSINIT(ifdescr_sx, &ifdescr_sx, "ifnet descr");
|
|
|
|
|
2005-01-08 12:42:03 +00:00
|
|
|
void (*ng_ether_link_state_p)(struct ifnet *ifp, int state);
|
2015-12-17 14:41:30 +00:00
|
|
|
void (*lagg_linkstate_p)(struct ifnet *ifp, int state);
|
2010-08-11 20:18:19 +00:00
|
|
|
/* These are external hooks for CARP. */
|
2010-08-11 00:51:50 +00:00
|
|
|
void (*carp_linkstate_p)(struct ifnet *ifp);
|
Restore a feature that was present in 5.x and 6.x, and was cleared in
7.x, 8.x and 9.x with pf(4) imports: pfsync(4) should suppress CARP
preemption, while it is running its bulk update.
However, reimplement the feature in more elegant manner, that is
partially inspired by newer OpenBSD:
- Rename term "suppression" to "demotion", to match with OpenBSD.
- Keep a global demotion factor, that can be raised by several
conditions, for now these are:
- interface goes down
- carp(4) has problems with ip_output() or ip6_output()
- pfsync performs bulk update
- Unlike in OpenBSD the demotion factor isn't a counter, but
is actual value added to advskew. The adjustment values for
particular error conditions are also configurable, and their
defaults are maximum advskew value, so a single failure bumps
demotion to maximum. This is for POLA compatibility, and should
satisfy most users.
- Demotion factor is a writable sysctl, so user can do
foot shooting, if he desires to.
2011-12-20 13:53:31 +00:00
|
|
|
void (*carp_demote_adj_p)(int, char *);
|
2013-02-11 10:58:22 +00:00
|
|
|
int (*carp_master_p)(struct ifaddr *);
|
2010-08-11 20:18:19 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
A major overhaul of the CARP implementation. The ip_carp.c was started
from scratch, copying needed functionality from the old implemenation
on demand, with a thorough review of all code. The main change is that
interface layer has been removed from the CARP. Now redundant addresses
are configured exactly on the interfaces, they run on.
The CARP configuration itself is, as before, configured and read via
SIOCSVH/SIOCGVH ioctls. A new prefix created with SIOCAIFADDR or
SIOCAIFADDR_IN6 may now be configured to a particular virtual host id,
which makes the prefix redundant.
ifconfig(8) semantics has been changed too: now one doesn't need
to clone carpXX interface, he/she should directly configure a vhid
on a Ethernet interface.
To supply vhid data from the kernel to an application the getifaddrs(8)
function had been changed to pass ifam_data with each address. [1]
The new implementation definitely closes all PRs related to carp(4)
being an interface, and may close several others. It also allows
to run a single redundant IP per interface.
Big thanks to Bjoern Zeeb for his help with inet6 part of patch, for
idea on using ifam_data and for several rounds of reviewing!
PR: kern/117000, kern/126945, kern/126714, kern/120130, kern/117448
Reviewed by: bz
Submitted by: bz [1]
2011-12-16 12:16:56 +00:00
|
|
|
int (*carp_forus_p)(struct ifnet *ifp, u_char *dhost);
|
2010-08-11 20:18:19 +00:00
|
|
|
int (*carp_output_p)(struct ifnet *ifp, struct mbuf *m,
|
2013-04-26 12:50:32 +00:00
|
|
|
const struct sockaddr *sa);
|
A major overhaul of the CARP implementation. The ip_carp.c was started
from scratch, copying needed functionality from the old implemenation
on demand, with a thorough review of all code. The main change is that
interface layer has been removed from the CARP. Now redundant addresses
are configured exactly on the interfaces, they run on.
The CARP configuration itself is, as before, configured and read via
SIOCSVH/SIOCGVH ioctls. A new prefix created with SIOCAIFADDR or
SIOCAIFADDR_IN6 may now be configured to a particular virtual host id,
which makes the prefix redundant.
ifconfig(8) semantics has been changed too: now one doesn't need
to clone carpXX interface, he/she should directly configure a vhid
on a Ethernet interface.
To supply vhid data from the kernel to an application the getifaddrs(8)
function had been changed to pass ifam_data with each address. [1]
The new implementation definitely closes all PRs related to carp(4)
being an interface, and may close several others. It also allows
to run a single redundant IP per interface.
Big thanks to Bjoern Zeeb for his help with inet6 part of patch, for
idea on using ifam_data and for several rounds of reviewing!
PR: kern/117000, kern/126945, kern/126714, kern/120130, kern/117448
Reviewed by: bz
Submitted by: bz [1]
2011-12-16 12:16:56 +00:00
|
|
|
int (*carp_ioctl_p)(struct ifreq *, u_long, struct thread *);
|
|
|
|
int (*carp_attach_p)(struct ifaddr *, int);
|
2017-01-25 19:04:08 +00:00
|
|
|
void (*carp_detach_p)(struct ifaddr *, bool);
|
2010-08-11 20:18:19 +00:00
|
|
|
#endif
|
|
|
|
#ifdef INET
|
A major overhaul of the CARP implementation. The ip_carp.c was started
from scratch, copying needed functionality from the old implemenation
on demand, with a thorough review of all code. The main change is that
interface layer has been removed from the CARP. Now redundant addresses
are configured exactly on the interfaces, they run on.
The CARP configuration itself is, as before, configured and read via
SIOCSVH/SIOCGVH ioctls. A new prefix created with SIOCAIFADDR or
SIOCAIFADDR_IN6 may now be configured to a particular virtual host id,
which makes the prefix redundant.
ifconfig(8) semantics has been changed too: now one doesn't need
to clone carpXX interface, he/she should directly configure a vhid
on a Ethernet interface.
To supply vhid data from the kernel to an application the getifaddrs(8)
function had been changed to pass ifam_data with each address. [1]
The new implementation definitely closes all PRs related to carp(4)
being an interface, and may close several others. It also allows
to run a single redundant IP per interface.
Big thanks to Bjoern Zeeb for his help with inet6 part of patch, for
idea on using ifam_data and for several rounds of reviewing!
PR: kern/117000, kern/126945, kern/126714, kern/120130, kern/117448
Reviewed by: bz
Submitted by: bz [1]
2011-12-16 12:16:56 +00:00
|
|
|
int (*carp_iamatch_p)(struct ifaddr *, uint8_t **);
|
2010-08-11 20:18:19 +00:00
|
|
|
#endif
|
|
|
|
#ifdef INET6
|
|
|
|
struct ifaddr *(*carp_iamatch6_p)(struct ifnet *ifp, struct in6_addr *taddr6);
|
A major overhaul of the CARP implementation. The ip_carp.c was started
from scratch, copying needed functionality from the old implemenation
on demand, with a thorough review of all code. The main change is that
interface layer has been removed from the CARP. Now redundant addresses
are configured exactly on the interfaces, they run on.
The CARP configuration itself is, as before, configured and read via
SIOCSVH/SIOCGVH ioctls. A new prefix created with SIOCAIFADDR or
SIOCAIFADDR_IN6 may now be configured to a particular virtual host id,
which makes the prefix redundant.
ifconfig(8) semantics has been changed too: now one doesn't need
to clone carpXX interface, he/she should directly configure a vhid
on a Ethernet interface.
To supply vhid data from the kernel to an application the getifaddrs(8)
function had been changed to pass ifam_data with each address. [1]
The new implementation definitely closes all PRs related to carp(4)
being an interface, and may close several others. It also allows
to run a single redundant IP per interface.
Big thanks to Bjoern Zeeb for his help with inet6 part of patch, for
idea on using ifam_data and for several rounds of reviewing!
PR: kern/117000, kern/126945, kern/126714, kern/120130, kern/117448
Reviewed by: bz
Submitted by: bz [1]
2011-12-16 12:16:56 +00:00
|
|
|
caddr_t (*carp_macmatch6_p)(struct ifnet *ifp, struct mbuf *m,
|
2010-08-11 20:18:19 +00:00
|
|
|
const struct in6_addr *taddr);
|
|
|
|
#endif
|
2005-01-08 12:42:03 +00:00
|
|
|
|
2004-06-15 01:45:19 +00:00
|
|
|
struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL;
|
|
|
|
|
2007-03-20 00:36:10 +00:00
|
|
|
/*
|
|
|
|
* XXX: Style; these should be sorted alphabetically, and unprototyped
|
|
|
|
* static functions should be prototyped. Currently they are sorted by
|
|
|
|
* declaration order.
|
|
|
|
*/
|
2003-10-17 15:46:31 +00:00
|
|
|
static void if_attachdomain(void *);
|
|
|
|
static void if_attachdomain1(struct ifnet *);
|
2001-09-06 00:44:45 +00:00
|
|
|
static int ifconf(u_long, caddr_t);
|
2018-05-23 21:02:14 +00:00
|
|
|
static void *if_grow(void);
|
2015-03-12 14:55:33 +00:00
|
|
|
static void if_input_default(struct ifnet *, struct mbuf *);
|
2015-12-31 05:03:27 +00:00
|
|
|
static int if_requestencap_default(struct ifnet *, struct if_encap_req *);
|
2004-04-18 18:59:44 +00:00
|
|
|
static void if_route(struct ifnet *, int flag, int fam);
|
2005-07-14 13:56:51 +00:00
|
|
|
static int if_setflag(struct ifnet *, int, int, int *, int);
|
2008-11-22 05:55:56 +00:00
|
|
|
static int if_transmit(struct ifnet *ifp, struct mbuf *m);
|
2004-04-18 18:59:44 +00:00
|
|
|
static void if_unroute(struct ifnet *, int flag, int fam);
|
2007-03-20 00:36:10 +00:00
|
|
|
static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int);
|
2005-04-20 09:30:54 +00:00
|
|
|
static void do_link_state_change(void *, int);
|
2006-06-19 22:20:45 +00:00
|
|
|
static int if_getgroup(struct ifgroupreq *, struct ifnet *);
|
|
|
|
static int if_getgroupmembers(struct ifgroupreq *);
|
2009-04-10 19:16:14 +00:00
|
|
|
static void if_delgroups(struct ifnet *);
|
2015-03-02 20:00:03 +00:00
|
|
|
static void if_attach_internal(struct ifnet *, int, struct if_clone *);
|
2015-12-22 15:03:45 +00:00
|
|
|
static int if_detach_internal(struct ifnet *, int, struct if_clone **);
|
2016-05-18 20:06:45 +00:00
|
|
|
#ifdef VIMAGE
|
|
|
|
static void if_vmove(struct ifnet *, struct vnet *);
|
|
|
|
#endif
|
2008-11-22 05:55:56 +00:00
|
|
|
|
1999-11-22 02:45:11 +00:00
|
|
|
#ifdef INET6
|
|
|
|
/*
|
|
|
|
* XXX: declare here to avoid to include many inet6 related files..
|
|
|
|
* should be more generalized?
|
|
|
|
*/
|
2002-03-19 21:54:18 +00:00
|
|
|
extern void nd6_setmtu(struct ifnet *);
|
1999-11-22 02:45:11 +00:00
|
|
|
#endif
|
|
|
|
|
2015-11-25 07:31:59 +00:00
|
|
|
/* ipsec helper hooks */
|
|
|
|
VNET_DEFINE(struct hhook_head *, ipsec_hhh_in[HHOOK_IPSEC_COUNT]);
|
|
|
|
VNET_DEFINE(struct hhook_head *, ipsec_hhh_out[HHOOK_IPSEC_COUNT]);
|
|
|
|
|
2010-04-29 11:52:42 +00:00
|
|
|
VNET_DEFINE(int, if_index);
|
|
|
|
int ifqmaxlen = IFQ_MAXLEN;
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
VNET_DEFINE(struct ifnethead, ifnet); /* depend on static init XXX */
|
|
|
|
VNET_DEFINE(struct ifgrouphead, ifg_head);
|
2010-04-29 11:52:42 +00:00
|
|
|
|
2018-07-24 16:35:52 +00:00
|
|
|
VNET_DEFINE_STATIC(int, if_indexlim) = 8;
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
|
2009-08-23 20:40:19 +00:00
|
|
|
/* Table of ifnet by index. */
|
2014-11-07 09:15:39 +00:00
|
|
|
VNET_DEFINE(struct ifnet **, ifindex_table);
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
|
2009-07-16 21:13:04 +00:00
|
|
|
#define V_if_indexlim VNET(if_indexlim)
|
|
|
|
#define V_ifindex_table VNET(ifindex_table)
|
2008-11-19 09:39:34 +00:00
|
|
|
|
2009-08-23 20:40:19 +00:00
|
|
|
/*
|
|
|
|
* The global network interface list (V_ifnet) and related state (such as
|
|
|
|
* if_index, if_indexlim, and ifindex_table) are protected by an sxlock and
|
|
|
|
* an rwlock. Either may be acquired shared to stablize the list, but both
|
|
|
|
* must be acquired writable to modify the list. This model allows us to
|
|
|
|
* both stablize the interface list during interrupt thread processing, but
|
|
|
|
* also to stablize it over long-running ioctls, without introducing priority
|
|
|
|
* inversions and deadlocks.
|
|
|
|
*/
|
2013-05-06 16:42:18 +00:00
|
|
|
struct rwlock ifnet_rwlock;
|
2014-11-09 11:11:08 +00:00
|
|
|
RW_SYSINIT_FLAGS(ifnet_rw, &ifnet_rwlock, "ifnet_rw", RW_RECURSE);
|
2009-08-23 20:40:19 +00:00
|
|
|
struct sx ifnet_sxlock;
|
2014-11-09 11:11:08 +00:00
|
|
|
SX_SYSINIT_FLAGS(ifnet_sx, &ifnet_sxlock, "ifnet_sx", SX_RECURSE);
|
2009-08-23 20:40:19 +00:00
|
|
|
|
2009-08-26 11:13:10 +00:00
|
|
|
/*
|
|
|
|
* The allocation of network interfaces is a rather non-atomic affair; we
|
|
|
|
* need to select an index before we are ready to expose the interface for
|
|
|
|
* use, so will use this pointer value to indicate reservation.
|
|
|
|
*/
|
|
|
|
#define IFNET_HOLD (void *)(uintptr_t)(-1)
|
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
static if_com_alloc_t *if_com_alloc[256];
|
|
|
|
static if_com_free_t *if_com_free[256];
|
2001-09-06 00:44:45 +00:00
|
|
|
|
2011-11-07 06:44:47 +00:00
|
|
|
static MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals");
|
2001-09-06 00:44:45 +00:00
|
|
|
MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
|
|
|
|
MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
|
2001-07-02 20:49:25 +00:00
|
|
|
|
Start to address a number of races relating to use of ifnet pointers
after the corresponding interface has been destroyed:
(1) Add an ifnet refcount, ifp->if_refcount. Initialize it to 1 in
if_alloc(), and modify if_free_type() to decrement and check the
refcount.
(2) Add new if_ref() and if_rele() interfaces to allow kernel code
walking global interface lists to release IFNET_[RW]LOCK() yet
keep the ifnet stable. Currently, if_rele() is a no-op wrapper
around if_free(), but this may change in the future.
(3) Add new ifnet field, if_alloctype, which caches the type passed
to if_alloc(), but unlike if_type, won't be changed by drivers.
This allows asynchronous free's of the interface after the
driver has released it to still use the right type. Use that
instead of the type passed to if_free_type(), but assert that
they are the same (might have to rethink this if that doesn't
work out).
(4) Add a new ifnet_byindex_ref(), which looks up an interface by
index and returns a reference rather than a pointer to it.
(5) Fix if_alloc() to fully initialize the if_addr_mtx before hooking
up the ifnet to global lists.
(6) Modify sysctls in if_mib.c to use ifnet_byindex_ref() and release
the ifnet when done.
When this change is MFC'd, it will need to replace if_ispare fields
rather than adding new fields in order to avoid breaking the binary
interface. Once this change is MFC'd, if_free_type() should be
removed, as its 'type' argument is now optional.
This refcount is not appropriate for counting mbuf pkthdr references,
and also not for counting entry into the device driver via ifnet
function pointers. An rmlock may be appropriate for the latter.
Rather, this is about ensuring data structure stability when reaching
an ifnet via global ifnet lists and tables followed by copy in or out
of userspace.
MFC after: 3 weeks
Reported by: mdtancsa
Reviewed by: brooks
2009-04-21 22:43:32 +00:00
|
|
|
struct ifnet *
|
|
|
|
ifnet_byindex(u_short idx)
|
|
|
|
{
|
Introduce locking around use of ifindex_table, whose use was previously
unsynchronized. While races were extremely rare, we've now had a
couple of reports of panics in environments involving large numbers of
IPSEC tunnels being added very quickly on an active system.
- Add accessor functions ifnet_byindex(), ifaddr_byindex(),
ifdev_byindex() to replace existing accessor macros. These functions
now acquire the ifnet lock before derefencing the table.
- Add IFNET_WLOCK_ASSERT().
- Add static accessor functions ifnet_setbyindex(), ifdev_setbyindex(),
which set values in the table either asserting of acquiring the ifnet
lock.
- Use accessor functions throughout if.c to modify and read
ifindex_table.
- Rework ifnet attach/detach to lock around ifindex_table modification.
Note that these changes simply close races around use of ifindex_table,
and make no attempt to solve the probem of disappearing ifnets. Further
refinement of this work, including with respect to ifindex_table
resizing, is still required.
In a future change, the ifnet lock should be converted from a mutex to an
rwlock in order to reduce contention.
Reviewed and tested by: brooks
2008-06-26 23:05:28 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
|
2019-10-15 12:08:09 +00:00
|
|
|
if (__predict_false(idx > V_if_index))
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
ifp = *(struct ifnet * const volatile *)(V_ifindex_table + idx);
|
|
|
|
return (__predict_false(ifp == IFNET_HOLD) ? NULL : ifp);
|
2008-12-18 04:50:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct ifnet *
|
Start to address a number of races relating to use of ifnet pointers
after the corresponding interface has been destroyed:
(1) Add an ifnet refcount, ifp->if_refcount. Initialize it to 1 in
if_alloc(), and modify if_free_type() to decrement and check the
refcount.
(2) Add new if_ref() and if_rele() interfaces to allow kernel code
walking global interface lists to release IFNET_[RW]LOCK() yet
keep the ifnet stable. Currently, if_rele() is a no-op wrapper
around if_free(), but this may change in the future.
(3) Add new ifnet field, if_alloctype, which caches the type passed
to if_alloc(), but unlike if_type, won't be changed by drivers.
This allows asynchronous free's of the interface after the
driver has released it to still use the right type. Use that
instead of the type passed to if_free_type(), but assert that
they are the same (might have to rethink this if that doesn't
work out).
(4) Add a new ifnet_byindex_ref(), which looks up an interface by
index and returns a reference rather than a pointer to it.
(5) Fix if_alloc() to fully initialize the if_addr_mtx before hooking
up the ifnet to global lists.
(6) Modify sysctls in if_mib.c to use ifnet_byindex_ref() and release
the ifnet when done.
When this change is MFC'd, it will need to replace if_ispare fields
rather than adding new fields in order to avoid breaking the binary
interface. Once this change is MFC'd, if_free_type() should be
removed, as its 'type' argument is now optional.
This refcount is not appropriate for counting mbuf pkthdr references,
and also not for counting entry into the device driver via ifnet
function pointers. An rmlock may be appropriate for the latter.
Rather, this is about ensuring data structure stability when reaching
an ifnet via global ifnet lists and tables followed by copy in or out
of userspace.
MFC after: 3 weeks
Reported by: mdtancsa
Reviewed by: brooks
2009-04-21 22:43:32 +00:00
|
|
|
ifnet_byindex_ref(u_short idx)
|
2008-12-18 04:50:44 +00:00
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
NET_EPOCH_ASSERT();
|
|
|
|
|
2019-10-15 12:08:09 +00:00
|
|
|
ifp = ifnet_byindex(idx);
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
if (ifp == NULL || (ifp->if_flags & IFF_DYING))
|
Start to address a number of races relating to use of ifnet pointers
after the corresponding interface has been destroyed:
(1) Add an ifnet refcount, ifp->if_refcount. Initialize it to 1 in
if_alloc(), and modify if_free_type() to decrement and check the
refcount.
(2) Add new if_ref() and if_rele() interfaces to allow kernel code
walking global interface lists to release IFNET_[RW]LOCK() yet
keep the ifnet stable. Currently, if_rele() is a no-op wrapper
around if_free(), but this may change in the future.
(3) Add new ifnet field, if_alloctype, which caches the type passed
to if_alloc(), but unlike if_type, won't be changed by drivers.
This allows asynchronous free's of the interface after the
driver has released it to still use the right type. Use that
instead of the type passed to if_free_type(), but assert that
they are the same (might have to rethink this if that doesn't
work out).
(4) Add a new ifnet_byindex_ref(), which looks up an interface by
index and returns a reference rather than a pointer to it.
(5) Fix if_alloc() to fully initialize the if_addr_mtx before hooking
up the ifnet to global lists.
(6) Modify sysctls in if_mib.c to use ifnet_byindex_ref() and release
the ifnet when done.
When this change is MFC'd, it will need to replace if_ispare fields
rather than adding new fields in order to avoid breaking the binary
interface. Once this change is MFC'd, if_free_type() should be
removed, as its 'type' argument is now optional.
This refcount is not appropriate for counting mbuf pkthdr references,
and also not for counting entry into the device driver via ifnet
function pointers. An rmlock may be appropriate for the latter.
Rather, this is about ensuring data structure stability when reaching
an ifnet via global ifnet lists and tables followed by copy in or out
of userspace.
MFC after: 3 weeks
Reported by: mdtancsa
Reviewed by: brooks
2009-04-21 22:43:32 +00:00
|
|
|
return (NULL);
|
|
|
|
if_ref(ifp);
|
Introduce locking around use of ifindex_table, whose use was previously
unsynchronized. While races were extremely rare, we've now had a
couple of reports of panics in environments involving large numbers of
IPSEC tunnels being added very quickly on an active system.
- Add accessor functions ifnet_byindex(), ifaddr_byindex(),
ifdev_byindex() to replace existing accessor macros. These functions
now acquire the ifnet lock before derefencing the table.
- Add IFNET_WLOCK_ASSERT().
- Add static accessor functions ifnet_setbyindex(), ifdev_setbyindex(),
which set values in the table either asserting of acquiring the ifnet
lock.
- Use accessor functions throughout if.c to modify and read
ifindex_table.
- Rework ifnet attach/detach to lock around ifindex_table modification.
Note that these changes simply close races around use of ifindex_table,
and make no attempt to solve the probem of disappearing ifnets. Further
refinement of this work, including with respect to ifindex_table
resizing, is still required.
In a future change, the ifnet lock should be converted from a mutex to an
rwlock in order to reduce contention.
Reviewed and tested by: brooks
2008-06-26 23:05:28 +00:00
|
|
|
return (ifp);
|
|
|
|
}
|
|
|
|
|
2009-08-25 20:21:16 +00:00
|
|
|
/*
|
|
|
|
* Allocate an ifindex array entry; return 0 on success or an error on
|
|
|
|
* failure.
|
|
|
|
*/
|
2014-11-08 07:23:01 +00:00
|
|
|
static u_short
|
2018-05-23 21:02:14 +00:00
|
|
|
ifindex_alloc(void **old)
|
2009-08-25 20:21:16 +00:00
|
|
|
{
|
|
|
|
u_short idx;
|
|
|
|
|
|
|
|
IFNET_WLOCK_ASSERT();
|
|
|
|
/*
|
2009-08-26 11:13:10 +00:00
|
|
|
* Try to find an empty slot below V_if_index. If we fail, take the
|
2009-08-25 20:21:16 +00:00
|
|
|
* next slot.
|
|
|
|
*/
|
|
|
|
for (idx = 1; idx <= V_if_index; idx++) {
|
2014-11-07 09:15:39 +00:00
|
|
|
if (V_ifindex_table[idx] == NULL)
|
2009-08-25 20:21:16 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Catch if_index overflow. */
|
2011-01-24 22:21:58 +00:00
|
|
|
if (idx >= V_if_indexlim) {
|
2018-05-23 21:02:14 +00:00
|
|
|
*old = if_grow();
|
|
|
|
return (USHRT_MAX);
|
2011-01-24 22:21:58 +00:00
|
|
|
}
|
2009-08-25 20:21:16 +00:00
|
|
|
if (idx > V_if_index)
|
|
|
|
V_if_index = idx;
|
2014-11-08 07:23:01 +00:00
|
|
|
return (idx);
|
2009-08-25 20:21:16 +00:00
|
|
|
}
|
|
|
|
|
2009-08-26 11:13:10 +00:00
|
|
|
static void
|
|
|
|
ifindex_free_locked(u_short idx)
|
|
|
|
{
|
|
|
|
|
|
|
|
IFNET_WLOCK_ASSERT();
|
|
|
|
|
2014-11-07 09:15:39 +00:00
|
|
|
V_ifindex_table[idx] = NULL;
|
2009-08-26 11:13:10 +00:00
|
|
|
while (V_if_index > 0 &&
|
2014-11-07 09:15:39 +00:00
|
|
|
V_ifindex_table[V_if_index] == NULL)
|
2009-08-26 11:13:10 +00:00
|
|
|
V_if_index--;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ifindex_free(u_short idx)
|
|
|
|
{
|
|
|
|
|
|
|
|
IFNET_WLOCK();
|
|
|
|
ifindex_free_locked(idx);
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
}
|
|
|
|
|
2009-08-23 20:40:19 +00:00
|
|
|
static void
|
|
|
|
ifnet_setbyindex(u_short idx, struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
V_ifindex_table[idx] = ifp;
|
2009-08-23 20:40:19 +00:00
|
|
|
}
|
|
|
|
|
Introduce locking around use of ifindex_table, whose use was previously
unsynchronized. While races were extremely rare, we've now had a
couple of reports of panics in environments involving large numbers of
IPSEC tunnels being added very quickly on an active system.
- Add accessor functions ifnet_byindex(), ifaddr_byindex(),
ifdev_byindex() to replace existing accessor macros. These functions
now acquire the ifnet lock before derefencing the table.
- Add IFNET_WLOCK_ASSERT().
- Add static accessor functions ifnet_setbyindex(), ifdev_setbyindex(),
which set values in the table either asserting of acquiring the ifnet
lock.
- Use accessor functions throughout if.c to modify and read
ifindex_table.
- Rework ifnet attach/detach to lock around ifindex_table modification.
Note that these changes simply close races around use of ifindex_table,
and make no attempt to solve the probem of disappearing ifnets. Further
refinement of this work, including with respect to ifindex_table
resizing, is still required.
In a future change, the ifnet lock should be converted from a mutex to an
rwlock in order to reduce contention.
Reviewed and tested by: brooks
2008-06-26 23:05:28 +00:00
|
|
|
struct ifaddr *
|
|
|
|
ifaddr_byindex(u_short idx)
|
|
|
|
{
|
2015-07-20 08:21:51 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa = NULL;
|
Introduce locking around use of ifindex_table, whose use was previously
unsynchronized. While races were extremely rare, we've now had a
couple of reports of panics in environments involving large numbers of
IPSEC tunnels being added very quickly on an active system.
- Add accessor functions ifnet_byindex(), ifaddr_byindex(),
ifdev_byindex() to replace existing accessor macros. These functions
now acquire the ifnet lock before derefencing the table.
- Add IFNET_WLOCK_ASSERT().
- Add static accessor functions ifnet_setbyindex(), ifdev_setbyindex(),
which set values in the table either asserting of acquiring the ifnet
lock.
- Use accessor functions throughout if.c to modify and read
ifindex_table.
- Rework ifnet attach/detach to lock around ifindex_table modification.
Note that these changes simply close races around use of ifindex_table,
and make no attempt to solve the probem of disappearing ifnets. Further
refinement of this work, including with respect to ifindex_table
resizing, is still required.
In a future change, the ifnet lock should be converted from a mutex to an
rwlock in order to reduce contention.
Reviewed and tested by: brooks
2008-06-26 23:05:28 +00:00
|
|
|
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
NET_EPOCH_ASSERT();
|
|
|
|
|
2019-10-15 12:08:09 +00:00
|
|
|
ifp = ifnet_byindex(idx);
|
2015-07-20 08:21:51 +00:00
|
|
|
if (ifp != NULL && (ifa = ifp->if_addr) != NULL)
|
2009-06-23 20:19:09 +00:00
|
|
|
ifa_ref(ifa);
|
Introduce locking around use of ifindex_table, whose use was previously
unsynchronized. While races were extremely rare, we've now had a
couple of reports of panics in environments involving large numbers of
IPSEC tunnels being added very quickly on an active system.
- Add accessor functions ifnet_byindex(), ifaddr_byindex(),
ifdev_byindex() to replace existing accessor macros. These functions
now acquire the ifnet lock before derefencing the table.
- Add IFNET_WLOCK_ASSERT().
- Add static accessor functions ifnet_setbyindex(), ifdev_setbyindex(),
which set values in the table either asserting of acquiring the ifnet
lock.
- Use accessor functions throughout if.c to modify and read
ifindex_table.
- Rework ifnet attach/detach to lock around ifindex_table modification.
Note that these changes simply close races around use of ifindex_table,
and make no attempt to solve the probem of disappearing ifnets. Further
refinement of this work, including with respect to ifindex_table
resizing, is still required.
In a future change, the ifnet lock should be converted from a mutex to an
rwlock in order to reduce contention.
Reviewed and tested by: brooks
2008-06-26 23:05:28 +00:00
|
|
|
return (ifa);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Network interface utility routines.
|
|
|
|
*
|
|
|
|
* Routines with ifa_ifwith* names take sockaddr *'s as
|
|
|
|
* parameters.
|
|
|
|
*/
|
2007-05-16 19:59:01 +00:00
|
|
|
|
Introduce and use a sysinit-based initialization scheme for virtual
network stacks, VNET_SYSINIT:
- Add VNET_SYSINIT and VNET_SYSUNINIT macros to declare events that will
occur each time a network stack is instantiated and destroyed. In the
!VIMAGE case, these are simply mapped into regular SYSINIT/SYSUNINIT.
For the VIMAGE case, we instead use SYSINIT's to track their order and
properties on registration, using them for each vnet when created/
destroyed, or immediately on module load for already-started vnets.
- Remove vnet_modinfo mechanism that existed to serve this purpose
previously, as well as its dependency scheme: we now just use the
SYSINIT ordering scheme.
- Implement VNET_DOMAIN_SET() to allow protocol domains to declare that
they want init functions to be called for each virtual network stack
rather than just once at boot, compiling down to DOMAIN_SET() in the
non-VIMAGE case.
- Walk all virtualized kernel subsystems and make use of these instead
of modinfo or DOMAIN_SET() for init/uninit events. In some cases,
convert modular components from using modevent to using sysinit (where
appropriate). In some cases, do minor rejuggling of SYSINIT ordering
to make room for or better manage events.
Portions submitted by: jhb (VNET_SYSINIT), bz (cleanup)
Discussed with: jhb, bz, julian, zec
Reviewed by: bz
Approved by: re (VIMAGE blanket)
2009-07-23 20:46:49 +00:00
|
|
|
static void
|
|
|
|
vnet_if_init(const void *unused __unused)
|
|
|
|
{
|
2018-05-23 21:02:14 +00:00
|
|
|
void *old;
|
Introduce and use a sysinit-based initialization scheme for virtual
network stacks, VNET_SYSINIT:
- Add VNET_SYSINIT and VNET_SYSUNINIT macros to declare events that will
occur each time a network stack is instantiated and destroyed. In the
!VIMAGE case, these are simply mapped into regular SYSINIT/SYSUNINIT.
For the VIMAGE case, we instead use SYSINIT's to track their order and
properties on registration, using them for each vnet when created/
destroyed, or immediately on module load for already-started vnets.
- Remove vnet_modinfo mechanism that existed to serve this purpose
previously, as well as its dependency scheme: we now just use the
SYSINIT ordering scheme.
- Implement VNET_DOMAIN_SET() to allow protocol domains to declare that
they want init functions to be called for each virtual network stack
rather than just once at boot, compiling down to DOMAIN_SET() in the
non-VIMAGE case.
- Walk all virtualized kernel subsystems and make use of these instead
of modinfo or DOMAIN_SET() for init/uninit events. In some cases,
convert modular components from using modevent to using sysinit (where
appropriate). In some cases, do minor rejuggling of SYSINIT ordering
to make room for or better manage events.
Portions submitted by: jhb (VNET_SYSINIT), bz (cleanup)
Discussed with: jhb, bz, julian, zec
Reviewed by: bz
Approved by: re (VIMAGE blanket)
2009-07-23 20:46:49 +00:00
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_INIT(&V_ifnet);
|
|
|
|
CK_STAILQ_INIT(&V_ifg_head);
|
2011-01-24 22:21:58 +00:00
|
|
|
IFNET_WLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
old = if_grow(); /* create initial table */
|
2011-01-24 22:21:58 +00:00
|
|
|
IFNET_WUNLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
epoch_wait_preempt(net_epoch_preempt);
|
|
|
|
free(old, M_IFNET);
|
Introduce and use a sysinit-based initialization scheme for virtual
network stacks, VNET_SYSINIT:
- Add VNET_SYSINIT and VNET_SYSUNINIT macros to declare events that will
occur each time a network stack is instantiated and destroyed. In the
!VIMAGE case, these are simply mapped into regular SYSINIT/SYSUNINIT.
For the VIMAGE case, we instead use SYSINIT's to track their order and
properties on registration, using them for each vnet when created/
destroyed, or immediately on module load for already-started vnets.
- Remove vnet_modinfo mechanism that existed to serve this purpose
previously, as well as its dependency scheme: we now just use the
SYSINIT ordering scheme.
- Implement VNET_DOMAIN_SET() to allow protocol domains to declare that
they want init functions to be called for each virtual network stack
rather than just once at boot, compiling down to DOMAIN_SET() in the
non-VIMAGE case.
- Walk all virtualized kernel subsystems and make use of these instead
of modinfo or DOMAIN_SET() for init/uninit events. In some cases,
convert modular components from using modevent to using sysinit (where
appropriate). In some cases, do minor rejuggling of SYSINIT ordering
to make room for or better manage events.
Portions submitted by: jhb (VNET_SYSINIT), bz (cleanup)
Discussed with: jhb, bz, julian, zec
Reviewed by: bz
Approved by: re (VIMAGE blanket)
2009-07-23 20:46:49 +00:00
|
|
|
vnet_if_clone_init();
|
|
|
|
}
|
2011-01-24 22:21:58 +00:00
|
|
|
VNET_SYSINIT(vnet_if_init, SI_SUB_INIT_IF, SI_ORDER_SECOND, vnet_if_init,
|
Introduce and use a sysinit-based initialization scheme for virtual
network stacks, VNET_SYSINIT:
- Add VNET_SYSINIT and VNET_SYSUNINIT macros to declare events that will
occur each time a network stack is instantiated and destroyed. In the
!VIMAGE case, these are simply mapped into regular SYSINIT/SYSUNINIT.
For the VIMAGE case, we instead use SYSINIT's to track their order and
properties on registration, using them for each vnet when created/
destroyed, or immediately on module load for already-started vnets.
- Remove vnet_modinfo mechanism that existed to serve this purpose
previously, as well as its dependency scheme: we now just use the
SYSINIT ordering scheme.
- Implement VNET_DOMAIN_SET() to allow protocol domains to declare that
they want init functions to be called for each virtual network stack
rather than just once at boot, compiling down to DOMAIN_SET() in the
non-VIMAGE case.
- Walk all virtualized kernel subsystems and make use of these instead
of modinfo or DOMAIN_SET() for init/uninit events. In some cases,
convert modular components from using modevent to using sysinit (where
appropriate). In some cases, do minor rejuggling of SYSINIT ordering
to make room for or better manage events.
Portions submitted by: jhb (VNET_SYSINIT), bz (cleanup)
Discussed with: jhb, bz, julian, zec
Reviewed by: bz
Approved by: re (VIMAGE blanket)
2009-07-23 20:46:49 +00:00
|
|
|
NULL);
|
|
|
|
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
#ifdef VIMAGE
|
Introduce and use a sysinit-based initialization scheme for virtual
network stacks, VNET_SYSINIT:
- Add VNET_SYSINIT and VNET_SYSUNINIT macros to declare events that will
occur each time a network stack is instantiated and destroyed. In the
!VIMAGE case, these are simply mapped into regular SYSINIT/SYSUNINIT.
For the VIMAGE case, we instead use SYSINIT's to track their order and
properties on registration, using them for each vnet when created/
destroyed, or immediately on module load for already-started vnets.
- Remove vnet_modinfo mechanism that existed to serve this purpose
previously, as well as its dependency scheme: we now just use the
SYSINIT ordering scheme.
- Implement VNET_DOMAIN_SET() to allow protocol domains to declare that
they want init functions to be called for each virtual network stack
rather than just once at boot, compiling down to DOMAIN_SET() in the
non-VIMAGE case.
- Walk all virtualized kernel subsystems and make use of these instead
of modinfo or DOMAIN_SET() for init/uninit events. In some cases,
convert modular components from using modevent to using sysinit (where
appropriate). In some cases, do minor rejuggling of SYSINIT ordering
to make room for or better manage events.
Portions submitted by: jhb (VNET_SYSINIT), bz (cleanup)
Discussed with: jhb, bz, julian, zec
Reviewed by: bz
Approved by: re (VIMAGE blanket)
2009-07-23 20:46:49 +00:00
|
|
|
static void
|
|
|
|
vnet_if_uninit(const void *unused __unused)
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
{
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
VNET_ASSERT(CK_STAILQ_EMPTY(&V_ifnet), ("%s:%d tailq &V_ifnet=%p "
|
2011-02-11 13:27:00 +00:00
|
|
|
"not empty", __func__, __LINE__, &V_ifnet));
|
2018-05-23 21:02:14 +00:00
|
|
|
VNET_ASSERT(CK_STAILQ_EMPTY(&V_ifg_head), ("%s:%d tailq &V_ifg_head=%p "
|
2011-02-11 13:27:00 +00:00
|
|
|
"not empty", __func__, __LINE__, &V_ifg_head));
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
|
|
|
|
free((caddr_t)V_ifindex_table, M_IFNET);
|
|
|
|
}
|
Introduce and use a sysinit-based initialization scheme for virtual
network stacks, VNET_SYSINIT:
- Add VNET_SYSINIT and VNET_SYSUNINIT macros to declare events that will
occur each time a network stack is instantiated and destroyed. In the
!VIMAGE case, these are simply mapped into regular SYSINIT/SYSUNINIT.
For the VIMAGE case, we instead use SYSINIT's to track their order and
properties on registration, using them for each vnet when created/
destroyed, or immediately on module load for already-started vnets.
- Remove vnet_modinfo mechanism that existed to serve this purpose
previously, as well as its dependency scheme: we now just use the
SYSINIT ordering scheme.
- Implement VNET_DOMAIN_SET() to allow protocol domains to declare that
they want init functions to be called for each virtual network stack
rather than just once at boot, compiling down to DOMAIN_SET() in the
non-VIMAGE case.
- Walk all virtualized kernel subsystems and make use of these instead
of modinfo or DOMAIN_SET() for init/uninit events. In some cases,
convert modular components from using modevent to using sysinit (where
appropriate). In some cases, do minor rejuggling of SYSINIT ordering
to make room for or better manage events.
Portions submitted by: jhb (VNET_SYSINIT), bz (cleanup)
Discussed with: jhb, bz, julian, zec
Reviewed by: bz
Approved by: re (VIMAGE blanket)
2009-07-23 20:46:49 +00:00
|
|
|
VNET_SYSUNINIT(vnet_if_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST,
|
|
|
|
vnet_if_uninit, NULL);
|
2016-05-18 20:06:45 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
vnet_if_return(const void *unused __unused)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp, *nifp;
|
|
|
|
|
|
|
|
/* Return all inherited interfaces to their parent vnets. */
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH_SAFE(ifp, &V_ifnet, if_link, nifp) {
|
2016-05-18 20:06:45 +00:00
|
|
|
if (ifp->if_home_vnet != ifp->if_vnet)
|
|
|
|
if_vmove(ifp, ifp->if_home_vnet);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
VNET_SYSUNINIT(vnet_if_return, SI_SUB_VNET_DONE, SI_ORDER_ANY,
|
|
|
|
vnet_if_return, NULL);
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
#endif
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
|
|
|
|
static void *
|
2001-09-06 02:40:43 +00:00
|
|
|
if_grow(void)
|
|
|
|
{
|
2011-01-24 22:21:58 +00:00
|
|
|
int oldlim;
|
2001-09-06 02:40:43 +00:00
|
|
|
u_int n;
|
2014-11-07 09:15:39 +00:00
|
|
|
struct ifnet **e;
|
2018-05-23 21:02:14 +00:00
|
|
|
void *old;
|
2001-09-06 02:40:43 +00:00
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
old = NULL;
|
2011-01-24 22:21:58 +00:00
|
|
|
IFNET_WLOCK_ASSERT();
|
|
|
|
oldlim = V_if_indexlim;
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
n = (oldlim << 1) * sizeof(*e);
|
2005-06-10 16:49:24 +00:00
|
|
|
e = malloc(n, M_IFNET, M_WAITOK | M_ZERO);
|
2011-01-24 22:21:58 +00:00
|
|
|
IFNET_WLOCK();
|
|
|
|
if (V_if_indexlim != oldlim) {
|
|
|
|
free(e, M_IFNET);
|
2018-05-23 21:02:14 +00:00
|
|
|
return (NULL);
|
2011-01-24 22:21:58 +00:00
|
|
|
}
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
if (V_ifindex_table != NULL) {
|
|
|
|
memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2);
|
2018-05-23 21:02:14 +00:00
|
|
|
old = V_ifindex_table;
|
2001-09-06 02:40:43 +00:00
|
|
|
}
|
2011-01-24 22:21:58 +00:00
|
|
|
V_if_indexlim <<= 1;
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
V_ifindex_table = e;
|
2018-05-23 21:02:14 +00:00
|
|
|
return (old);
|
2001-09-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
/*
|
2007-05-16 19:59:01 +00:00
|
|
|
* Allocate a struct ifnet and an index for an interface. A layer 2
|
|
|
|
* common structure will also be allocated if an allocation routine is
|
|
|
|
* registered for the passed type.
|
2005-06-10 16:49:24 +00:00
|
|
|
*/
|
2009-04-23 10:59:40 +00:00
|
|
|
struct ifnet *
|
2019-04-22 19:24:21 +00:00
|
|
|
if_alloc_domain(u_char type, int numa_domain)
|
2005-06-10 16:49:24 +00:00
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
2009-08-25 20:21:16 +00:00
|
|
|
u_short idx;
|
2018-05-23 21:02:14 +00:00
|
|
|
void *old;
|
2005-06-10 16:49:24 +00:00
|
|
|
|
2019-04-22 19:24:21 +00:00
|
|
|
KASSERT(numa_domain <= IF_NODOM, ("numa_domain too large"));
|
|
|
|
if (numa_domain == IF_NODOM)
|
|
|
|
ifp = malloc(sizeof(struct ifnet), M_IFNET,
|
|
|
|
M_WAITOK | M_ZERO);
|
|
|
|
else
|
|
|
|
ifp = malloc_domainset(sizeof(struct ifnet), M_IFNET,
|
|
|
|
DOMAINSET_PREF(numa_domain), M_WAITOK | M_ZERO);
|
2018-05-23 21:02:14 +00:00
|
|
|
restart:
|
2009-08-25 20:21:16 +00:00
|
|
|
IFNET_WLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
idx = ifindex_alloc(&old);
|
|
|
|
if (__predict_false(idx == USHRT_MAX)) {
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
epoch_wait_preempt(net_epoch_preempt);
|
|
|
|
free(old, M_IFNET);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
ifnet_setbyindex(idx, IFNET_HOLD);
|
2009-08-25 20:21:16 +00:00
|
|
|
IFNET_WUNLOCK();
|
|
|
|
ifp->if_index = idx;
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp->if_type = type;
|
Start to address a number of races relating to use of ifnet pointers
after the corresponding interface has been destroyed:
(1) Add an ifnet refcount, ifp->if_refcount. Initialize it to 1 in
if_alloc(), and modify if_free_type() to decrement and check the
refcount.
(2) Add new if_ref() and if_rele() interfaces to allow kernel code
walking global interface lists to release IFNET_[RW]LOCK() yet
keep the ifnet stable. Currently, if_rele() is a no-op wrapper
around if_free(), but this may change in the future.
(3) Add new ifnet field, if_alloctype, which caches the type passed
to if_alloc(), but unlike if_type, won't be changed by drivers.
This allows asynchronous free's of the interface after the
driver has released it to still use the right type. Use that
instead of the type passed to if_free_type(), but assert that
they are the same (might have to rethink this if that doesn't
work out).
(4) Add a new ifnet_byindex_ref(), which looks up an interface by
index and returns a reference rather than a pointer to it.
(5) Fix if_alloc() to fully initialize the if_addr_mtx before hooking
up the ifnet to global lists.
(6) Modify sysctls in if_mib.c to use ifnet_byindex_ref() and release
the ifnet when done.
When this change is MFC'd, it will need to replace if_ispare fields
rather than adding new fields in order to avoid breaking the binary
interface. Once this change is MFC'd, if_free_type() should be
removed, as its 'type' argument is now optional.
This refcount is not appropriate for counting mbuf pkthdr references,
and also not for counting entry into the device driver via ifnet
function pointers. An rmlock may be appropriate for the latter.
Rather, this is about ensuring data structure stability when reaching
an ifnet via global ifnet lists and tables followed by copy in or out
of userspace.
MFC after: 3 weeks
Reported by: mdtancsa
Reviewed by: brooks
2009-04-21 22:43:32 +00:00
|
|
|
ifp->if_alloctype = type;
|
2019-04-22 19:24:21 +00:00
|
|
|
ifp->if_numa_domain = numa_domain;
|
2016-06-29 05:21:25 +00:00
|
|
|
#ifdef VIMAGE
|
|
|
|
ifp->if_vnet = curvnet;
|
|
|
|
#endif
|
2005-06-10 16:49:24 +00:00
|
|
|
if (if_com_alloc[type] != NULL) {
|
|
|
|
ifp->if_l2com = if_com_alloc[type](type, ifp);
|
2005-06-12 00:53:03 +00:00
|
|
|
if (ifp->if_l2com == NULL) {
|
2005-06-10 16:49:24 +00:00
|
|
|
free(ifp, M_IFNET);
|
2009-08-26 11:13:10 +00:00
|
|
|
ifindex_free(idx);
|
2005-06-12 00:53:03 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
2005-06-10 16:49:24 +00:00
|
|
|
}
|
Start to address a number of races relating to use of ifnet pointers
after the corresponding interface has been destroyed:
(1) Add an ifnet refcount, ifp->if_refcount. Initialize it to 1 in
if_alloc(), and modify if_free_type() to decrement and check the
refcount.
(2) Add new if_ref() and if_rele() interfaces to allow kernel code
walking global interface lists to release IFNET_[RW]LOCK() yet
keep the ifnet stable. Currently, if_rele() is a no-op wrapper
around if_free(), but this may change in the future.
(3) Add new ifnet field, if_alloctype, which caches the type passed
to if_alloc(), but unlike if_type, won't be changed by drivers.
This allows asynchronous free's of the interface after the
driver has released it to still use the right type. Use that
instead of the type passed to if_free_type(), but assert that
they are the same (might have to rethink this if that doesn't
work out).
(4) Add a new ifnet_byindex_ref(), which looks up an interface by
index and returns a reference rather than a pointer to it.
(5) Fix if_alloc() to fully initialize the if_addr_mtx before hooking
up the ifnet to global lists.
(6) Modify sysctls in if_mib.c to use ifnet_byindex_ref() and release
the ifnet when done.
When this change is MFC'd, it will need to replace if_ispare fields
rather than adding new fields in order to avoid breaking the binary
interface. Once this change is MFC'd, if_free_type() should be
removed, as its 'type' argument is now optional.
This refcount is not appropriate for counting mbuf pkthdr references,
and also not for counting entry into the device driver via ifnet
function pointers. An rmlock may be appropriate for the latter.
Rather, this is about ensuring data structure stability when reaching
an ifnet via global ifnet lists and tables followed by copy in or out
of userspace.
MFC after: 3 weeks
Reported by: mdtancsa
Reviewed by: brooks
2009-04-21 22:43:32 +00:00
|
|
|
|
|
|
|
IF_ADDR_LOCK_INIT(ifp);
|
2009-04-23 10:59:40 +00:00
|
|
|
TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp);
|
|
|
|
ifp->if_afdata_initialized = 0;
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
IF_AFDATA_LOCK_INIT(ifp);
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_INIT(&ifp->if_addrhead);
|
|
|
|
CK_STAILQ_INIT(&ifp->if_multiaddrs);
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_INIT(&ifp->if_groups);
|
2009-04-23 10:59:40 +00:00
|
|
|
#ifdef MAC
|
|
|
|
mac_ifnet_init(ifp);
|
|
|
|
#endif
|
2009-06-15 19:50:03 +00:00
|
|
|
ifq_init(&ifp->if_snd, ifp);
|
2009-04-23 10:59:40 +00:00
|
|
|
|
Start to address a number of races relating to use of ifnet pointers
after the corresponding interface has been destroyed:
(1) Add an ifnet refcount, ifp->if_refcount. Initialize it to 1 in
if_alloc(), and modify if_free_type() to decrement and check the
refcount.
(2) Add new if_ref() and if_rele() interfaces to allow kernel code
walking global interface lists to release IFNET_[RW]LOCK() yet
keep the ifnet stable. Currently, if_rele() is a no-op wrapper
around if_free(), but this may change in the future.
(3) Add new ifnet field, if_alloctype, which caches the type passed
to if_alloc(), but unlike if_type, won't be changed by drivers.
This allows asynchronous free's of the interface after the
driver has released it to still use the right type. Use that
instead of the type passed to if_free_type(), but assert that
they are the same (might have to rethink this if that doesn't
work out).
(4) Add a new ifnet_byindex_ref(), which looks up an interface by
index and returns a reference rather than a pointer to it.
(5) Fix if_alloc() to fully initialize the if_addr_mtx before hooking
up the ifnet to global lists.
(6) Modify sysctls in if_mib.c to use ifnet_byindex_ref() and release
the ifnet when done.
When this change is MFC'd, it will need to replace if_ispare fields
rather than adding new fields in order to avoid breaking the binary
interface. Once this change is MFC'd, if_free_type() should be
removed, as its 'type' argument is now optional.
This refcount is not appropriate for counting mbuf pkthdr references,
and also not for counting entry into the device driver via ifnet
function pointers. An rmlock may be appropriate for the latter.
Rather, this is about ensuring data structure stability when reaching
an ifnet via global ifnet lists and tables followed by copy in or out
of userspace.
MFC after: 3 weeks
Reported by: mdtancsa
Reviewed by: brooks
2009-04-21 22:43:32 +00:00
|
|
|
refcount_init(&ifp->if_refcount, 1); /* Index reference. */
|
2014-09-28 08:57:07 +00:00
|
|
|
for (int i = 0; i < IFCOUNTERS; i++)
|
|
|
|
ifp->if_counters[i] = counter_u64_alloc(M_WAITOK);
|
2014-10-23 14:29:52 +00:00
|
|
|
ifp->if_get_counter = if_get_counter_default;
|
2018-03-27 15:29:32 +00:00
|
|
|
ifp->if_pcp = IFNET_PCP_NONE;
|
2014-10-23 14:29:52 +00:00
|
|
|
ifnet_setbyindex(ifp->if_index, ifp);
|
2005-06-10 16:49:24 +00:00
|
|
|
return (ifp);
|
|
|
|
}
|
|
|
|
|
2019-04-22 19:24:21 +00:00
|
|
|
struct ifnet *
|
|
|
|
if_alloc_dev(u_char type, device_t dev)
|
|
|
|
{
|
|
|
|
int numa_domain;
|
|
|
|
|
|
|
|
if (dev == NULL || bus_get_domain(dev, &numa_domain) != 0)
|
|
|
|
return (if_alloc_domain(type, IF_NODOM));
|
|
|
|
return (if_alloc_domain(type, numa_domain));
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ifnet *
|
|
|
|
if_alloc(u_char type)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (if_alloc_domain(type, IF_NODOM));
|
|
|
|
}
|
2007-05-16 19:59:01 +00:00
|
|
|
/*
|
2011-04-04 07:45:08 +00:00
|
|
|
* Do the actual work of freeing a struct ifnet, and layer 2 common
|
|
|
|
* structure. This call is made when the last reference to an
|
2009-04-23 09:32:30 +00:00
|
|
|
* interface is released.
|
2007-05-16 19:59:01 +00:00
|
|
|
*/
|
2009-04-23 09:32:30 +00:00
|
|
|
static void
|
|
|
|
if_free_internal(struct ifnet *ifp)
|
2005-06-10 16:49:24 +00:00
|
|
|
{
|
|
|
|
|
2009-04-23 09:32:30 +00:00
|
|
|
KASSERT((ifp->if_flags & IFF_DYING),
|
|
|
|
("if_free_internal: interface not dying"));
|
|
|
|
|
|
|
|
if (if_com_free[ifp->if_alloctype] != NULL)
|
|
|
|
if_com_free[ifp->if_alloctype](ifp->if_l2com,
|
|
|
|
ifp->if_alloctype);
|
|
|
|
|
2009-04-23 10:59:40 +00:00
|
|
|
#ifdef MAC
|
|
|
|
mac_ifnet_destroy(ifp);
|
|
|
|
#endif /* MAC */
|
|
|
|
IF_AFDATA_DESTROY(ifp);
|
2009-04-23 09:32:30 +00:00
|
|
|
IF_ADDR_LOCK_DESTROY(ifp);
|
2009-06-15 19:50:03 +00:00
|
|
|
ifq_delete(&ifp->if_snd);
|
2014-09-28 08:57:07 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < IFCOUNTERS; i++)
|
|
|
|
counter_u64_free(ifp->if_counters[i]);
|
|
|
|
|
2018-11-30 10:36:14 +00:00
|
|
|
free(ifp->if_description, M_IFDESCR);
|
|
|
|
free(ifp->if_hw_addr, M_IFADDR);
|
2019-04-22 19:24:21 +00:00
|
|
|
if (ifp->if_numa_domain == IF_NODOM)
|
|
|
|
free(ifp, M_IFNET);
|
|
|
|
else
|
|
|
|
free_domain(ifp, M_IFNET);
|
2005-06-10 16:49:24 +00:00
|
|
|
}
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
static void
|
|
|
|
if_destroy(epoch_context_t ctx)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
|
|
|
ifp = __containerof(ctx, struct ifnet, if_epoch_ctx);
|
|
|
|
if_free_internal(ifp);
|
|
|
|
}
|
|
|
|
|
2007-05-16 19:59:01 +00:00
|
|
|
/*
|
2011-12-09 23:26:28 +00:00
|
|
|
* Deregister an interface and free the associated storage.
|
2007-05-16 19:59:01 +00:00
|
|
|
*/
|
2005-06-10 16:49:24 +00:00
|
|
|
void
|
2011-12-09 23:26:28 +00:00
|
|
|
if_free(struct ifnet *ifp)
|
2005-06-10 16:49:24 +00:00
|
|
|
{
|
|
|
|
|
2009-04-23 09:32:30 +00:00
|
|
|
ifp->if_flags |= IFF_DYING; /* XXX: Locking */
|
2011-04-04 07:45:08 +00:00
|
|
|
|
2013-07-15 01:32:55 +00:00
|
|
|
CURVNET_SET_QUIET(ifp->if_vnet);
|
2011-04-04 07:45:08 +00:00
|
|
|
IFNET_WLOCK();
|
2019-10-15 12:08:09 +00:00
|
|
|
KASSERT(ifp == ifnet_byindex(ifp->if_index),
|
2011-04-04 07:45:08 +00:00
|
|
|
("%s: freeing unallocated ifnet", ifp->if_xname));
|
|
|
|
|
|
|
|
ifindex_free_locked(ifp->if_index);
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
|
2013-07-15 01:32:55 +00:00
|
|
|
if (refcount_release(&ifp->if_refcount))
|
2018-05-23 21:02:14 +00:00
|
|
|
epoch_call(net_epoch_preempt, &ifp->if_epoch_ctx, if_destroy);
|
2013-07-15 01:32:55 +00:00
|
|
|
CURVNET_RESTORE();
|
2009-04-23 09:32:30 +00:00
|
|
|
}
|
2005-06-10 16:49:24 +00:00
|
|
|
|
2009-04-23 09:32:30 +00:00
|
|
|
/*
|
|
|
|
* Interfaces to keep an ifnet type-stable despite the possibility of the
|
|
|
|
* driver calling if_free(). If there are additional references, we defer
|
|
|
|
* freeing the underlying data structure.
|
|
|
|
*/
|
Start to address a number of races relating to use of ifnet pointers
after the corresponding interface has been destroyed:
(1) Add an ifnet refcount, ifp->if_refcount. Initialize it to 1 in
if_alloc(), and modify if_free_type() to decrement and check the
refcount.
(2) Add new if_ref() and if_rele() interfaces to allow kernel code
walking global interface lists to release IFNET_[RW]LOCK() yet
keep the ifnet stable. Currently, if_rele() is a no-op wrapper
around if_free(), but this may change in the future.
(3) Add new ifnet field, if_alloctype, which caches the type passed
to if_alloc(), but unlike if_type, won't be changed by drivers.
This allows asynchronous free's of the interface after the
driver has released it to still use the right type. Use that
instead of the type passed to if_free_type(), but assert that
they are the same (might have to rethink this if that doesn't
work out).
(4) Add a new ifnet_byindex_ref(), which looks up an interface by
index and returns a reference rather than a pointer to it.
(5) Fix if_alloc() to fully initialize the if_addr_mtx before hooking
up the ifnet to global lists.
(6) Modify sysctls in if_mib.c to use ifnet_byindex_ref() and release
the ifnet when done.
When this change is MFC'd, it will need to replace if_ispare fields
rather than adding new fields in order to avoid breaking the binary
interface. Once this change is MFC'd, if_free_type() should be
removed, as its 'type' argument is now optional.
This refcount is not appropriate for counting mbuf pkthdr references,
and also not for counting entry into the device driver via ifnet
function pointers. An rmlock may be appropriate for the latter.
Rather, this is about ensuring data structure stability when reaching
an ifnet via global ifnet lists and tables followed by copy in or out
of userspace.
MFC after: 3 weeks
Reported by: mdtancsa
Reviewed by: brooks
2009-04-21 22:43:32 +00:00
|
|
|
void
|
|
|
|
if_ref(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* We don't assert the ifnet list lock here, but arguably should. */
|
|
|
|
refcount_acquire(&ifp->if_refcount);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_rele(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
|
2009-04-23 09:32:30 +00:00
|
|
|
if (!refcount_release(&ifp->if_refcount))
|
|
|
|
return;
|
2018-05-23 21:02:14 +00:00
|
|
|
epoch_call(net_epoch_preempt, &ifp->if_epoch_ctx, if_destroy);
|
Start to address a number of races relating to use of ifnet pointers
after the corresponding interface has been destroyed:
(1) Add an ifnet refcount, ifp->if_refcount. Initialize it to 1 in
if_alloc(), and modify if_free_type() to decrement and check the
refcount.
(2) Add new if_ref() and if_rele() interfaces to allow kernel code
walking global interface lists to release IFNET_[RW]LOCK() yet
keep the ifnet stable. Currently, if_rele() is a no-op wrapper
around if_free(), but this may change in the future.
(3) Add new ifnet field, if_alloctype, which caches the type passed
to if_alloc(), but unlike if_type, won't be changed by drivers.
This allows asynchronous free's of the interface after the
driver has released it to still use the right type. Use that
instead of the type passed to if_free_type(), but assert that
they are the same (might have to rethink this if that doesn't
work out).
(4) Add a new ifnet_byindex_ref(), which looks up an interface by
index and returns a reference rather than a pointer to it.
(5) Fix if_alloc() to fully initialize the if_addr_mtx before hooking
up the ifnet to global lists.
(6) Modify sysctls in if_mib.c to use ifnet_byindex_ref() and release
the ifnet when done.
When this change is MFC'd, it will need to replace if_ispare fields
rather than adding new fields in order to avoid breaking the binary
interface. Once this change is MFC'd, if_free_type() should be
removed, as its 'type' argument is now optional.
This refcount is not appropriate for counting mbuf pkthdr references,
and also not for counting entry into the device driver via ifnet
function pointers. An rmlock may be appropriate for the latter.
Rather, this is about ensuring data structure stability when reaching
an ifnet via global ifnet lists and tables followed by copy in or out
of userspace.
MFC after: 3 weeks
Reported by: mdtancsa
Reviewed by: brooks
2009-04-21 22:43:32 +00:00
|
|
|
}
|
|
|
|
|
2008-11-22 05:55:56 +00:00
|
|
|
void
|
2009-06-15 19:50:03 +00:00
|
|
|
ifq_init(struct ifaltq *ifq, struct ifnet *ifp)
|
2008-11-22 05:55:56 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
mtx_init(&ifq->ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF);
|
|
|
|
|
|
|
|
if (ifq->ifq_maxlen == 0)
|
|
|
|
ifq->ifq_maxlen = ifqmaxlen;
|
|
|
|
|
|
|
|
ifq->altq_type = 0;
|
|
|
|
ifq->altq_disc = NULL;
|
|
|
|
ifq->altq_flags &= ALTQF_CANTCHANGE;
|
|
|
|
ifq->altq_tbr = NULL;
|
|
|
|
ifq->altq_ifp = ifp;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2009-06-15 19:50:03 +00:00
|
|
|
ifq_delete(struct ifaltq *ifq)
|
2008-11-22 05:55:56 +00:00
|
|
|
{
|
|
|
|
mtx_destroy(&ifq->ifq_mtx);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2016-05-03 18:05:43 +00:00
|
|
|
* Perform generic interface initialization tasks and attach the interface
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
* to the list of "active" interfaces. If vmove flag is set on entry
|
|
|
|
* to if_attach_internal(), perform only a limited subset of initialization
|
|
|
|
* tasks, given that we are moving from one vnet to another an ifnet which
|
|
|
|
* has already been fully initialized.
|
2007-05-16 19:59:01 +00:00
|
|
|
*
|
2015-03-02 20:00:03 +00:00
|
|
|
* Note that if_detach_internal() removes group membership unconditionally
|
|
|
|
* even when vmove flag is set, and if_attach_internal() adds only IFG_ALL.
|
|
|
|
* Thus, when if_vmove() is applied to a cloned interface, group membership
|
|
|
|
* is lost while a cloned one always joins a group whose name is
|
|
|
|
* ifc->ifc_name. To recover this after if_detach_internal() and
|
|
|
|
* if_attach_internal(), the cloner should be specified to
|
|
|
|
* if_attach_internal() via ifc. If it is non-NULL, if_attach_internal()
|
|
|
|
* attempts to join a group whose name is ifc->ifc_name.
|
|
|
|
*
|
2007-05-16 19:59:01 +00:00
|
|
|
* XXX:
|
|
|
|
* - The decision to return void and thus require this function to
|
|
|
|
* succeed is questionable.
|
|
|
|
* - We should probably do more sanity checking. For instance we don't
|
|
|
|
* do anything to insure if_xname is unique or non-empty.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_attach(struct ifnet *ifp)
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
{
|
|
|
|
|
2015-03-02 20:00:03 +00:00
|
|
|
if_attach_internal(ifp, 0, NULL);
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
}
|
|
|
|
|
2014-09-22 08:27:27 +00:00
|
|
|
/*
|
|
|
|
* Compute the least common TSO limit.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
if_hw_tsomax_common(if_t ifp, struct ifnet_hw_tsomax *pmax)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* 1) If there is no limit currently, take the limit from
|
|
|
|
* the network adapter.
|
|
|
|
*
|
|
|
|
* 2) If the network adapter has a limit below the current
|
|
|
|
* limit, apply it.
|
|
|
|
*/
|
|
|
|
if (pmax->tsomaxbytes == 0 || (ifp->if_hw_tsomax != 0 &&
|
|
|
|
ifp->if_hw_tsomax < pmax->tsomaxbytes)) {
|
|
|
|
pmax->tsomaxbytes = ifp->if_hw_tsomax;
|
|
|
|
}
|
|
|
|
if (pmax->tsomaxsegcount == 0 || (ifp->if_hw_tsomaxsegcount != 0 &&
|
|
|
|
ifp->if_hw_tsomaxsegcount < pmax->tsomaxsegcount)) {
|
|
|
|
pmax->tsomaxsegcount = ifp->if_hw_tsomaxsegcount;
|
|
|
|
}
|
|
|
|
if (pmax->tsomaxsegsize == 0 || (ifp->if_hw_tsomaxsegsize != 0 &&
|
|
|
|
ifp->if_hw_tsomaxsegsize < pmax->tsomaxsegsize)) {
|
|
|
|
pmax->tsomaxsegsize = ifp->if_hw_tsomaxsegsize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update TSO limit of a network adapter.
|
|
|
|
*
|
|
|
|
* Returns zero if no change. Else non-zero.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_hw_tsomax_update(if_t ifp, struct ifnet_hw_tsomax *pmax)
|
|
|
|
{
|
|
|
|
int retval = 0;
|
|
|
|
if (ifp->if_hw_tsomax != pmax->tsomaxbytes) {
|
|
|
|
ifp->if_hw_tsomax = pmax->tsomaxbytes;
|
|
|
|
retval++;
|
|
|
|
}
|
|
|
|
if (ifp->if_hw_tsomaxsegsize != pmax->tsomaxsegsize) {
|
|
|
|
ifp->if_hw_tsomaxsegsize = pmax->tsomaxsegsize;
|
|
|
|
retval++;
|
|
|
|
}
|
|
|
|
if (ifp->if_hw_tsomaxsegcount != pmax->tsomaxsegcount) {
|
|
|
|
ifp->if_hw_tsomaxsegcount = pmax->tsomaxsegcount;
|
|
|
|
retval++;
|
|
|
|
}
|
|
|
|
return (retval);
|
|
|
|
}
|
|
|
|
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
static void
|
2015-03-02 20:00:03 +00:00
|
|
|
if_attach_internal(struct ifnet *ifp, int vmove, struct if_clone *ifc)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
unsigned socksize, ifasize;
|
1996-01-24 21:12:23 +00:00
|
|
|
int namelen, masklen;
|
2003-10-23 13:49:10 +00:00
|
|
|
struct sockaddr_dl *sdl;
|
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index))
|
|
|
|
panic ("%s: BUG: if_attach called without if_alloc'd input()\n",
|
|
|
|
ifp->if_xname);
|
|
|
|
|
Permit buiding kernels with options VIMAGE, restricted to only a single
active network stack instance. Turning on options VIMAGE at compile
time yields the following changes relative to default kernel build:
1) V_ accessor macros for virtualized variables resolve to structure
fields via base pointers, instead of being resolved as fields in global
structs or plain global variables. As an example, V_ifnet becomes:
options VIMAGE: ((struct vnet_net *) vnet_net)->_ifnet
default build: vnet_net_0._ifnet
options VIMAGE_GLOBALS: ifnet
2) INIT_VNET_* macros will declare and set up base pointers to be used
by V_ accessor macros, instead of resolving to whitespace:
INIT_VNET_NET(ifp->if_vnet); becomes
struct vnet_net *vnet_net = (ifp->if_vnet)->mod_data[VNET_MOD_NET];
3) Memory for vnet modules registered via vnet_mod_register() is now
allocated at run time in sys/kern/kern_vimage.c, instead of per vnet
module structs being declared as globals. If required, vnet modules
can now request the framework to provide them with allocated bzeroed
memory by filling in the vmi_size field in their vmi_modinfo structures.
4) structs socket, ifnet, inpcbinfo, tcpcb and syncache_head are
extended to hold a pointer to the parent vnet. options VIMAGE builds
will fill in those fields as required.
5) curvnet is introduced as a new global variable in options VIMAGE
builds, always pointing to the default and only struct vnet.
6) struct sysctl_oid has been extended with additional two fields to
store major and minor virtualization module identifiers, oid_v_subs and
oid_v_mod. SYSCTL_V_* family of macros will fill in those fields
accordingly, and store the offset in the appropriate vnet container
struct in oid_arg1.
In sysctl handlers dealing with virtualized sysctls, the
SYSCTL_RESOLVE_V_ARG1() macro will compute the address of the target
variable and make it available in arg1 variable for further processing.
Unused fields in structs vnet_inet, vnet_inet6 and vnet_ipfw have
been deleted.
Reviewed by: bz, rwatson
Approved by: julian (mentor)
2009-04-30 13:36:26 +00:00
|
|
|
#ifdef VIMAGE
|
|
|
|
ifp->if_vnet = curvnet;
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
if (ifp->if_home_vnet == NULL)
|
|
|
|
ifp->if_home_vnet = curvnet;
|
Permit buiding kernels with options VIMAGE, restricted to only a single
active network stack instance. Turning on options VIMAGE at compile
time yields the following changes relative to default kernel build:
1) V_ accessor macros for virtualized variables resolve to structure
fields via base pointers, instead of being resolved as fields in global
structs or plain global variables. As an example, V_ifnet becomes:
options VIMAGE: ((struct vnet_net *) vnet_net)->_ifnet
default build: vnet_net_0._ifnet
options VIMAGE_GLOBALS: ifnet
2) INIT_VNET_* macros will declare and set up base pointers to be used
by V_ accessor macros, instead of resolving to whitespace:
INIT_VNET_NET(ifp->if_vnet); becomes
struct vnet_net *vnet_net = (ifp->if_vnet)->mod_data[VNET_MOD_NET];
3) Memory for vnet modules registered via vnet_mod_register() is now
allocated at run time in sys/kern/kern_vimage.c, instead of per vnet
module structs being declared as globals. If required, vnet modules
can now request the framework to provide them with allocated bzeroed
memory by filling in the vmi_size field in their vmi_modinfo structures.
4) structs socket, ifnet, inpcbinfo, tcpcb and syncache_head are
extended to hold a pointer to the parent vnet. options VIMAGE builds
will fill in those fields as required.
5) curvnet is introduced as a new global variable in options VIMAGE
builds, always pointing to the default and only struct vnet.
6) struct sysctl_oid has been extended with additional two fields to
store major and minor virtualization module identifiers, oid_v_subs and
oid_v_mod. SYSCTL_V_* family of macros will fill in those fields
accordingly, and store the offset in the appropriate vnet container
struct in oid_arg1.
In sysctl handlers dealing with virtualized sysctls, the
SYSCTL_RESOLVE_V_ARG1() macro will compute the address of the target
variable and make it available in arg1 variable for further processing.
Unused fields in structs vnet_inet, vnet_inet6 and vnet_ipfw have
been deleted.
Reviewed by: bz, rwatson
Approved by: julian (mentor)
2009-04-30 13:36:26 +00:00
|
|
|
#endif
|
|
|
|
|
2006-06-19 22:20:45 +00:00
|
|
|
if_addgroup(ifp, IFG_ALL);
|
|
|
|
|
2015-03-02 20:00:03 +00:00
|
|
|
/* Restore group membership for cloned interfaces. */
|
|
|
|
if (vmove && ifc != NULL)
|
|
|
|
if_clone_addgroup(ifp, ifc);
|
|
|
|
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2014-08-31 06:46:21 +00:00
|
|
|
ifp->if_epoch = time_uptime;
|
2009-04-23 10:59:40 +00:00
|
|
|
|
2009-04-16 23:05:10 +00:00
|
|
|
KASSERT((ifp->if_transmit == NULL && ifp->if_qflush == NULL) ||
|
|
|
|
(ifp->if_transmit != NULL && ifp->if_qflush != NULL),
|
|
|
|
("transmit and qflush must both either be set or both be NULL"));
|
|
|
|
if (ifp->if_transmit == NULL) {
|
|
|
|
ifp->if_transmit = if_transmit;
|
|
|
|
ifp->if_qflush = if_qflush;
|
|
|
|
}
|
2015-03-12 14:55:33 +00:00
|
|
|
if (ifp->if_input == NULL)
|
|
|
|
ifp->if_input = if_input_default;
|
2014-08-31 06:46:21 +00:00
|
|
|
|
2015-12-31 05:03:27 +00:00
|
|
|
if (ifp->if_requestencap == NULL)
|
|
|
|
ifp->if_requestencap = if_requestencap_default;
|
|
|
|
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
if (!vmove) {
|
2002-07-31 16:16:03 +00:00
|
|
|
#ifdef MAC
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
mac_ifnet_create(ifp);
|
2002-07-31 16:16:03 +00:00
|
|
|
#endif
|
|
|
|
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
/*
|
|
|
|
* Create a Link Level name for this device.
|
|
|
|
*/
|
|
|
|
namelen = strlen(ifp->if_xname);
|
|
|
|
/*
|
|
|
|
* Always save enough space for any possiable name so we
|
|
|
|
* can do a rename in place later.
|
|
|
|
*/
|
|
|
|
masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ;
|
|
|
|
socksize = masklen + ifp->if_addrlen;
|
|
|
|
if (socksize < sizeof(*sdl))
|
|
|
|
socksize = sizeof(*sdl);
|
|
|
|
socksize = roundup2(socksize, sizeof(long));
|
|
|
|
ifasize = sizeof(*ifa) + 2 * socksize;
|
2013-10-15 10:31:42 +00:00
|
|
|
ifa = ifa_alloc(ifasize, M_WAITOK);
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
sdl = (struct sockaddr_dl *)(ifa + 1);
|
|
|
|
sdl->sdl_len = socksize;
|
|
|
|
sdl->sdl_family = AF_LINK;
|
|
|
|
bcopy(ifp->if_xname, sdl->sdl_data, namelen);
|
|
|
|
sdl->sdl_nlen = namelen;
|
|
|
|
sdl->sdl_index = ifp->if_index;
|
|
|
|
sdl->sdl_type = ifp->if_type;
|
|
|
|
ifp->if_addr = ifa;
|
|
|
|
ifa->ifa_ifp = ifp;
|
|
|
|
ifa->ifa_addr = (struct sockaddr *)sdl;
|
|
|
|
sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
|
|
|
|
ifa->ifa_netmask = (struct sockaddr *)sdl;
|
|
|
|
sdl->sdl_len = masklen;
|
|
|
|
while (namelen != 0)
|
|
|
|
sdl->sdl_data[--namelen] = 0xff;
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
/* Reliably crash if used uninitialized. */
|
|
|
|
ifp->if_broadcastaddr = NULL;
|
2014-09-13 20:52:01 +00:00
|
|
|
|
2017-05-10 22:13:47 +00:00
|
|
|
if (ifp->if_type == IFT_ETHER) {
|
|
|
|
ifp->if_hw_addr = malloc(ifp->if_addrlen, M_IFADDR,
|
|
|
|
M_WAITOK | M_ZERO);
|
|
|
|
}
|
|
|
|
|
2014-09-13 20:52:01 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2014-09-22 08:27:27 +00:00
|
|
|
/* Use defaults for TSO, if nothing is set */
|
|
|
|
if (ifp->if_hw_tsomax == 0 &&
|
|
|
|
ifp->if_hw_tsomaxsegcount == 0 &&
|
|
|
|
ifp->if_hw_tsomaxsegsize == 0) {
|
|
|
|
/*
|
|
|
|
* The TSO defaults needs to be such that an
|
|
|
|
* NFS mbuf list of 35 mbufs totalling just
|
|
|
|
* below 64K works and that a chain of mbufs
|
|
|
|
* can be defragged into at most 32 segments:
|
|
|
|
*/
|
|
|
|
ifp->if_hw_tsomax = min(IP_MAXPACKET, (32 * MCLBYTES) -
|
2014-09-13 20:52:01 +00:00
|
|
|
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
|
2014-09-22 08:27:27 +00:00
|
|
|
ifp->if_hw_tsomaxsegcount = 35;
|
|
|
|
ifp->if_hw_tsomaxsegsize = 2048; /* 2K */
|
|
|
|
|
|
|
|
/* XXX some drivers set IFCAP_TSO after ethernet attach */
|
|
|
|
if (ifp->if_capabilities & IFCAP_TSO) {
|
|
|
|
if_printf(ifp, "Using defaults for TSO: %u/%u/%u\n",
|
|
|
|
ifp->if_hw_tsomax,
|
|
|
|
ifp->if_hw_tsomaxsegcount,
|
|
|
|
ifp->if_hw_tsomaxsegsize);
|
|
|
|
}
|
|
|
|
}
|
2014-09-13 20:52:01 +00:00
|
|
|
#endif
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
}
|
2009-08-24 10:14:09 +00:00
|
|
|
#ifdef VIMAGE
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* Update the interface index in the link layer address
|
|
|
|
* of the interface.
|
|
|
|
*/
|
|
|
|
for (ifa = ifp->if_addr; ifa != NULL;
|
2018-05-18 20:13:34 +00:00
|
|
|
ifa = CK_STAILQ_NEXT(ifa, ifa_link)) {
|
2009-08-24 10:14:09 +00:00
|
|
|
if (ifa->ifa_addr->sa_family == AF_LINK) {
|
|
|
|
sdl = (struct sockaddr_dl *)ifa->ifa_addr;
|
|
|
|
sdl->sdl_index = ifp->if_index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2008-05-17 03:38:13 +00:00
|
|
|
|
2006-06-21 06:02:35 +00:00
|
|
|
IFNET_WLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link);
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
#ifdef VIMAGE
|
2009-07-19 17:40:45 +00:00
|
|
|
curvnet->vnet_ifcnt++;
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
#endif
|
2006-06-21 06:02:35 +00:00
|
|
|
IFNET_WUNLOCK();
|
|
|
|
|
2004-11-30 22:38:37 +00:00
|
|
|
if (domain_init_status >= 2)
|
2003-10-17 15:46:31 +00:00
|
|
|
if_attachdomain1(ifp);
|
|
|
|
|
2004-02-26 04:27:55 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
if (IS_DEFAULT_VNET(curvnet))
|
|
|
|
devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
|
2004-02-26 04:27:55 +00:00
|
|
|
|
2002-01-18 14:33:04 +00:00
|
|
|
/* Announce the interface. */
|
|
|
|
rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-04-16 21:22:55 +00:00
|
|
|
|
2003-10-17 15:46:31 +00:00
|
|
|
static void
|
2018-05-18 18:48:00 +00:00
|
|
|
if_epochalloc(void *dummy __unused)
|
2003-10-17 15:46:31 +00:00
|
|
|
{
|
|
|
|
|
2019-09-25 18:26:31 +00:00
|
|
|
net_epoch_preempt = epoch_alloc("Net preemptible", EPOCH_PREEMPT);
|
|
|
|
net_epoch = epoch_alloc("Net", 0);
|
2018-05-18 18:48:00 +00:00
|
|
|
}
|
|
|
|
SYSINIT(ifepochalloc, SI_SUB_TASKQ + 1, SI_ORDER_ANY,
|
|
|
|
if_epochalloc, NULL);
|
|
|
|
|
|
|
|
static void
|
|
|
|
if_attachdomain(void *dummy)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link)
|
2003-10-17 15:46:31 +00:00
|
|
|
if_attachdomain1(ifp);
|
|
|
|
}
|
2004-11-30 22:38:37 +00:00
|
|
|
SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND,
|
2003-10-17 15:46:31 +00:00
|
|
|
if_attachdomain, NULL);
|
|
|
|
|
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_attachdomain1(struct ifnet *ifp)
|
2003-10-17 15:46:31 +00:00
|
|
|
{
|
|
|
|
struct domain *dp;
|
|
|
|
|
2003-10-24 16:57:59 +00:00
|
|
|
/*
|
|
|
|
* Since dp->dom_ifattach calls malloc() with M_WAITOK, we
|
|
|
|
* cannot lock ifp->if_afdata initialization, entirely.
|
|
|
|
*/
|
2016-05-28 08:32:15 +00:00
|
|
|
IF_AFDATA_LOCK(ifp);
|
2004-11-30 22:38:37 +00:00
|
|
|
if (ifp->if_afdata_initialized >= domain_init_status) {
|
2003-10-24 16:57:59 +00:00
|
|
|
IF_AFDATA_UNLOCK(ifp);
|
2012-10-19 10:07:55 +00:00
|
|
|
log(LOG_WARNING, "%s called more than once on %s\n",
|
|
|
|
__func__, ifp->if_xname);
|
2003-10-24 16:57:59 +00:00
|
|
|
return;
|
|
|
|
}
|
2004-11-30 22:38:37 +00:00
|
|
|
ifp->if_afdata_initialized = domain_init_status;
|
2003-10-24 16:57:59 +00:00
|
|
|
IF_AFDATA_UNLOCK(ifp);
|
|
|
|
|
2003-10-17 15:46:31 +00:00
|
|
|
/* address family dependent data region */
|
|
|
|
bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
|
|
|
|
for (dp = domains; dp; dp = dp->dom_next) {
|
|
|
|
if (dp->dom_ifattach)
|
|
|
|
ifp->if_afdata[dp->dom_family] =
|
|
|
|
(*dp->dom_ifattach)(ifp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-05-25 13:52:03 +00:00
|
|
|
/*
|
2007-03-20 00:36:10 +00:00
|
|
|
* Remove any unicast or broadcast network addresses from an interface.
|
2005-05-25 13:52:03 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
if_purgeaddrs(struct ifnet *ifp)
|
|
|
|
{
|
2018-10-22 13:25:26 +00:00
|
|
|
struct ifaddr *ifa;
|
2005-05-25 13:52:03 +00:00
|
|
|
|
2018-10-22 13:25:26 +00:00
|
|
|
while (1) {
|
2019-01-09 01:11:19 +00:00
|
|
|
struct epoch_tracker et;
|
|
|
|
|
|
|
|
NET_EPOCH_ENTER(et);
|
2018-10-22 13:25:26 +00:00
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
|
|
|
if (ifa->ifa_addr->sa_family != AF_LINK)
|
|
|
|
break;
|
|
|
|
}
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2018-10-22 13:25:26 +00:00
|
|
|
|
|
|
|
if (ifa == NULL)
|
|
|
|
break;
|
2005-05-25 13:52:03 +00:00
|
|
|
#ifdef INET
|
|
|
|
/* XXX: Ugly!! ad hoc just for INET */
|
2006-06-29 19:22:05 +00:00
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET) {
|
2005-05-25 13:52:03 +00:00
|
|
|
struct ifaliasreq ifr;
|
|
|
|
|
|
|
|
bzero(&ifr, sizeof(ifr));
|
|
|
|
ifr.ifra_addr = *ifa->ifa_addr;
|
|
|
|
if (ifa->ifa_dstaddr)
|
|
|
|
ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
|
|
|
|
if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
|
|
|
|
NULL) == 0)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif /* INET */
|
|
|
|
#ifdef INET6
|
2006-06-29 19:22:05 +00:00
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET6) {
|
2005-05-25 13:52:03 +00:00
|
|
|
in6_purgeaddr(ifa);
|
|
|
|
/* ifp_addrhead is already updated */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif /* INET6 */
|
2016-06-06 13:17:25 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_REMOVE(&ifp->if_addrhead, ifa, ifaddr, ifa_link);
|
2016-06-06 13:17:25 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2009-06-21 19:30:33 +00:00
|
|
|
ifa_free(ifa);
|
2005-05-25 13:52:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-20 00:36:10 +00:00
|
|
|
/*
|
2010-01-24 16:17:58 +00:00
|
|
|
* Remove any multicast network addresses from an interface when an ifnet
|
|
|
|
* is going away.
|
2007-03-20 00:36:10 +00:00
|
|
|
*/
|
2010-01-24 16:17:58 +00:00
|
|
|
static void
|
2007-03-20 00:36:10 +00:00
|
|
|
if_purgemaddrs(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2018-05-18 20:13:34 +00:00
|
|
|
while (!CK_STAILQ_EMPTY(&ifp->if_multiaddrs)) {
|
|
|
|
ifma = CK_STAILQ_FIRST(&ifp->if_multiaddrs);
|
|
|
|
CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link);
|
2007-03-20 00:36:10 +00:00
|
|
|
if_delmulti_locked(ifp, ifma, 1);
|
2018-05-02 19:36:29 +00:00
|
|
|
}
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2007-03-20 00:36:10 +00:00
|
|
|
}
|
|
|
|
|
1999-04-16 21:22:55 +00:00
|
|
|
/*
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
* Detach an interface, removing it from the list of "active" interfaces.
|
|
|
|
* If vmove flag is set on entry to if_detach_internal(), perform only a
|
|
|
|
* limited subset of cleanup tasks, given that we are moving an ifnet from
|
|
|
|
* one vnet to another, where it must be fully operational.
|
2005-09-18 17:36:28 +00:00
|
|
|
*
|
|
|
|
* XXXRW: There are some significant questions about event ordering, and
|
|
|
|
* how to prevent things from starting to use the interface during detach.
|
1999-04-16 21:22:55 +00:00
|
|
|
*/
|
|
|
|
void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_detach(struct ifnet *ifp)
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
{
|
|
|
|
|
2013-07-15 01:32:55 +00:00
|
|
|
CURVNET_SET_QUIET(ifp->if_vnet);
|
2015-03-02 20:00:03 +00:00
|
|
|
if_detach_internal(ifp, 0, NULL);
|
2013-07-15 01:32:55 +00:00
|
|
|
CURVNET_RESTORE();
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
}
|
|
|
|
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
/*
|
|
|
|
* The vmove flag, if set, indicates that we are called from a callpath
|
|
|
|
* that is moving an interface to a different vnet instance.
|
|
|
|
*
|
|
|
|
* The shutdown flag, if set, indicates that we are called in the
|
|
|
|
* process of shutting down a vnet instance. Currently only the
|
|
|
|
* vnet_if_return SYSUNINIT function sets it. Note: we can be called
|
|
|
|
* on a vnet instance shutdown without this flag being set, e.g., when
|
|
|
|
* the cloned interfaces are destoyed as first thing of teardown.
|
|
|
|
*/
|
2015-12-22 15:03:45 +00:00
|
|
|
static int
|
2015-03-02 20:00:03 +00:00
|
|
|
if_detach_internal(struct ifnet *ifp, int vmove, struct if_clone **ifcp)
|
1999-04-16 21:22:55 +00:00
|
|
|
{
|
2005-05-25 13:52:03 +00:00
|
|
|
struct ifaddr *ifa;
|
2015-08-08 18:14:59 +00:00
|
|
|
int i;
|
2003-10-17 15:46:31 +00:00
|
|
|
struct domain *dp;
|
2004-08-06 09:08:33 +00:00
|
|
|
struct ifnet *iter;
|
2016-06-22 11:45:30 +00:00
|
|
|
int found = 0;
|
|
|
|
#ifdef VIMAGE
|
2019-10-07 14:15:41 +00:00
|
|
|
bool shutdown;
|
2006-06-21 06:02:35 +00:00
|
|
|
|
2019-10-07 14:15:41 +00:00
|
|
|
shutdown = ifp->if_vnet->vnet_shutdown;
|
2016-06-22 11:45:30 +00:00
|
|
|
#endif
|
2006-06-21 06:02:35 +00:00
|
|
|
IFNET_WLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(iter, &V_ifnet, if_link)
|
2006-06-21 06:02:35 +00:00
|
|
|
if (iter == ifp) {
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_REMOVE(&V_ifnet, ifp, ifnet, if_link);
|
2018-11-27 09:04:06 +00:00
|
|
|
if (!vmove)
|
|
|
|
ifp->if_flags |= IFF_DYING;
|
2006-06-21 06:02:35 +00:00
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
IFNET_WUNLOCK();
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
if (!found) {
|
2015-12-22 15:03:45 +00:00
|
|
|
/*
|
|
|
|
* While we would want to panic here, we cannot
|
|
|
|
* guarantee that the interface is indeed still on
|
|
|
|
* the list given we don't hold locks all the way.
|
|
|
|
*/
|
|
|
|
return (ENOENT);
|
|
|
|
#if 0
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
if (vmove)
|
2010-02-20 21:43:36 +00:00
|
|
|
panic("%s: ifp=%p not on the ifnet tailq %p",
|
|
|
|
__func__, ifp, &V_ifnet);
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
else
|
|
|
|
return; /* XXX this should panic as well? */
|
2015-12-22 15:03:45 +00:00
|
|
|
#endif
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
}
|
1999-04-16 21:22:55 +00:00
|
|
|
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
/*
|
|
|
|
* At this point we know the interface still was on the ifnet list
|
|
|
|
* and we removed it so we are in a stable state.
|
|
|
|
*/
|
|
|
|
#ifdef VIMAGE
|
|
|
|
curvnet->vnet_ifcnt--;
|
|
|
|
#endif
|
2018-05-23 21:02:14 +00:00
|
|
|
epoch_wait_preempt(net_epoch_preempt);
|
2019-06-28 10:49:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure all pending EPOCH(9) callbacks have been executed. This
|
|
|
|
* fixes issues about late destruction of multicast options
|
|
|
|
* which lead to leave group calls, which in turn access the
|
|
|
|
* belonging ifnet structure:
|
|
|
|
*/
|
|
|
|
epoch_drain_callbacks(net_epoch_preempt);
|
|
|
|
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
/*
|
|
|
|
* In any case (destroy or vmove) detach us from the groups
|
|
|
|
* and remove/wait for pending events on the taskq.
|
|
|
|
* XXX-BZ in theory an interface could still enqueue a taskq change?
|
|
|
|
*/
|
|
|
|
if_delgroups(ifp);
|
|
|
|
|
|
|
|
taskqueue_drain(taskqueue_swi, &ifp->if_linktask);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if this is a cloned interface or not. Must do even if
|
|
|
|
* shutting down as a if_vmove_reclaim() would move the ifp and
|
|
|
|
* the if_clone_addgroup() will have a corrupted string overwise
|
|
|
|
* from a gibberish pointer.
|
|
|
|
*/
|
2015-03-02 20:00:03 +00:00
|
|
|
if (vmove && ifcp != NULL)
|
|
|
|
*ifcp = if_clone_findifc(ifp);
|
|
|
|
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
if_down(ifp);
|
|
|
|
|
2016-06-22 11:45:30 +00:00
|
|
|
#ifdef VIMAGE
|
2005-04-20 09:30:54 +00:00
|
|
|
/*
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
* On VNET shutdown abort here as the stack teardown will do all
|
|
|
|
* the work top-down for us.
|
|
|
|
*/
|
|
|
|
if (shutdown) {
|
2018-11-02 16:50:17 +00:00
|
|
|
/* Give interface users the chance to clean up. */
|
|
|
|
EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
|
|
|
|
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
/*
|
|
|
|
* In case of a vmove we are done here without error.
|
|
|
|
* If we would signal an error it would lead to the same
|
|
|
|
* abort as if we did not find the ifnet anymore.
|
|
|
|
* if_detach() calls us in void context and does not care
|
|
|
|
* about an early abort notification, so life is splendid :)
|
|
|
|
*/
|
|
|
|
goto finish_vnet_shutdown;
|
|
|
|
}
|
2016-06-22 11:45:30 +00:00
|
|
|
#endif
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point we are not tearing down a VNET and are either
|
|
|
|
* going to destroy or vmove the interface and have to cleanup
|
|
|
|
* accordingly.
|
2005-04-20 09:30:54 +00:00
|
|
|
*/
|
|
|
|
|
1999-04-16 21:22:55 +00:00
|
|
|
/*
|
|
|
|
* Remove routes and flush queues.
|
|
|
|
*/
|
2004-06-13 17:29:10 +00:00
|
|
|
#ifdef ALTQ
|
|
|
|
if (ALTQ_IS_ENABLED(&ifp->if_snd))
|
|
|
|
altq_disable(&ifp->if_snd);
|
|
|
|
if (ALTQ_IS_ATTACHED(&ifp->if_snd))
|
|
|
|
altq_detach(&ifp->if_snd);
|
|
|
|
#endif
|
1999-04-16 21:22:55 +00:00
|
|
|
|
2005-05-25 13:52:03 +00:00
|
|
|
if_purgeaddrs(ifp);
|
1999-04-16 21:22:55 +00:00
|
|
|
|
2005-09-18 17:36:28 +00:00
|
|
|
#ifdef INET
|
|
|
|
in_ifdetach(ifp);
|
|
|
|
#endif
|
|
|
|
|
2001-06-11 12:39:29 +00:00
|
|
|
#ifdef INET6
|
|
|
|
/*
|
|
|
|
* Remove all IPv6 kernel structs related to ifp. This should be done
|
|
|
|
* before removing routing entries below, since IPv6 interface direct
|
|
|
|
* routes are expected to be removed by the IPv6-specific kernel API.
|
|
|
|
* Otherwise, the kernel will detect some inconsistency and bark it.
|
|
|
|
*/
|
|
|
|
in6_ifdetach(ifp);
|
|
|
|
#endif
|
2007-03-20 00:36:10 +00:00
|
|
|
if_purgemaddrs(ifp);
|
|
|
|
|
2014-08-16 10:47:24 +00:00
|
|
|
/* Announce that the interface is gone. */
|
|
|
|
rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
|
|
|
|
EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
|
|
|
|
if (IS_DEFAULT_VNET(curvnet))
|
|
|
|
devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
|
|
|
|
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
if (!vmove) {
|
|
|
|
/*
|
|
|
|
* Prevent further calls into the device driver via ifnet.
|
|
|
|
*/
|
|
|
|
if_dead(ifp);
|
2004-04-19 17:28:15 +00:00
|
|
|
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
/*
|
|
|
|
* Clean up all addresses.
|
|
|
|
*/
|
2016-06-06 16:23:02 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2018-05-18 20:13:34 +00:00
|
|
|
if (!CK_STAILQ_EMPTY(&ifp->if_addrhead)) {
|
|
|
|
ifa = CK_STAILQ_FIRST(&ifp->if_addrhead);
|
|
|
|
CK_STAILQ_REMOVE(&ifp->if_addrhead, ifa, ifaddr, ifa_link);
|
2016-06-06 16:23:02 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2009-06-21 19:30:33 +00:00
|
|
|
ifa_free(ifa);
|
2016-06-06 16:23:02 +00:00
|
|
|
} else
|
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2004-08-06 09:08:33 +00:00
|
|
|
}
|
2003-10-16 13:38:29 +00:00
|
|
|
|
2015-08-08 18:14:59 +00:00
|
|
|
rt_flushifroutes(ifp);
|
2002-01-18 14:33:04 +00:00
|
|
|
|
2016-06-22 11:45:30 +00:00
|
|
|
#ifdef VIMAGE
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
finish_vnet_shutdown:
|
2016-06-22 11:45:30 +00:00
|
|
|
#endif
|
2010-04-11 11:51:44 +00:00
|
|
|
/*
|
|
|
|
* We cannot hold the lock over dom_ifdetach calls as they might
|
|
|
|
* sleep, for example trying to drain a callout, thus open up the
|
|
|
|
* theoretical race with re-attaching.
|
|
|
|
*/
|
2003-10-24 16:57:59 +00:00
|
|
|
IF_AFDATA_LOCK(ifp);
|
2010-04-11 11:51:44 +00:00
|
|
|
i = ifp->if_afdata_initialized;
|
|
|
|
ifp->if_afdata_initialized = 0;
|
|
|
|
IF_AFDATA_UNLOCK(ifp);
|
|
|
|
for (dp = domains; i > 0 && dp; dp = dp->dom_next) {
|
2016-06-06 22:59:58 +00:00
|
|
|
if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) {
|
2003-10-17 15:46:31 +00:00
|
|
|
(*dp->dom_ifdetach)(ifp,
|
|
|
|
ifp->if_afdata[dp->dom_family]);
|
2016-06-06 22:59:58 +00:00
|
|
|
ifp->if_afdata[dp->dom_family] = NULL;
|
|
|
|
}
|
2003-10-17 15:46:31 +00:00
|
|
|
}
|
2015-12-22 15:03:45 +00:00
|
|
|
|
|
|
|
return (0);
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
}
|
|
|
|
|
Permit buiding kernels with options VIMAGE, restricted to only a single
active network stack instance. Turning on options VIMAGE at compile
time yields the following changes relative to default kernel build:
1) V_ accessor macros for virtualized variables resolve to structure
fields via base pointers, instead of being resolved as fields in global
structs or plain global variables. As an example, V_ifnet becomes:
options VIMAGE: ((struct vnet_net *) vnet_net)->_ifnet
default build: vnet_net_0._ifnet
options VIMAGE_GLOBALS: ifnet
2) INIT_VNET_* macros will declare and set up base pointers to be used
by V_ accessor macros, instead of resolving to whitespace:
INIT_VNET_NET(ifp->if_vnet); becomes
struct vnet_net *vnet_net = (ifp->if_vnet)->mod_data[VNET_MOD_NET];
3) Memory for vnet modules registered via vnet_mod_register() is now
allocated at run time in sys/kern/kern_vimage.c, instead of per vnet
module structs being declared as globals. If required, vnet modules
can now request the framework to provide them with allocated bzeroed
memory by filling in the vmi_size field in their vmi_modinfo structures.
4) structs socket, ifnet, inpcbinfo, tcpcb and syncache_head are
extended to hold a pointer to the parent vnet. options VIMAGE builds
will fill in those fields as required.
5) curvnet is introduced as a new global variable in options VIMAGE
builds, always pointing to the default and only struct vnet.
6) struct sysctl_oid has been extended with additional two fields to
store major and minor virtualization module identifiers, oid_v_subs and
oid_v_mod. SYSCTL_V_* family of macros will fill in those fields
accordingly, and store the offset in the appropriate vnet container
struct in oid_arg1.
In sysctl handlers dealing with virtualized sysctls, the
SYSCTL_RESOLVE_V_ARG1() macro will compute the address of the target
variable and make it available in arg1 variable for further processing.
Unused fields in structs vnet_inet, vnet_inet6 and vnet_ipfw have
been deleted.
Reviewed by: bz, rwatson
Approved by: julian (mentor)
2009-04-30 13:36:26 +00:00
|
|
|
#ifdef VIMAGE
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
/*
|
|
|
|
* if_vmove() performs a limited version of if_detach() in current
|
|
|
|
* vnet and if_attach()es the ifnet to the vnet specified as 2nd arg.
|
|
|
|
* An attempt is made to shrink if_index in current vnet, find an
|
|
|
|
* unused if_index in target vnet and calls if_grow() if necessary,
|
|
|
|
* and finally find an unused if_xname for the target vnet.
|
|
|
|
*/
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
static void
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
if_vmove(struct ifnet *ifp, struct vnet *new_vnet)
|
|
|
|
{
|
2015-03-02 20:00:03 +00:00
|
|
|
struct if_clone *ifc;
|
2016-04-11 10:00:38 +00:00
|
|
|
u_int bif_dlt, bif_hdrlen;
|
2018-05-23 21:02:14 +00:00
|
|
|
void *old;
|
2015-12-22 15:03:45 +00:00
|
|
|
int rc;
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
|
2016-04-11 10:00:38 +00:00
|
|
|
/*
|
|
|
|
* if_detach_internal() will call the eventhandler to notify
|
|
|
|
* interface departure. That will detach if_bpf. We need to
|
|
|
|
* safe the dlt and hdrlen so we can re-attach it later.
|
|
|
|
*/
|
|
|
|
bpf_get_bp_params(ifp->if_bpf, &bif_dlt, &bif_hdrlen);
|
|
|
|
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
/*
|
|
|
|
* Detach from current vnet, but preserve LLADDR info, do not
|
|
|
|
* mark as dead etc. so that the ifnet can be reattached later.
|
2015-12-22 15:03:45 +00:00
|
|
|
* If we cannot find it, we lost the race to someone else.
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
*/
|
2015-12-22 15:03:45 +00:00
|
|
|
rc = if_detach_internal(ifp, 1, &ifc);
|
|
|
|
if (rc != 0)
|
|
|
|
return;
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
|
|
|
|
/*
|
2009-08-23 20:40:19 +00:00
|
|
|
* Unlink the ifnet from ifindex_table[] in current vnet, and shrink
|
|
|
|
* the if_index for that vnet if possible.
|
|
|
|
*
|
|
|
|
* NOTE: IFNET_WLOCK/IFNET_WUNLOCK() are assumed to be unvirtualized,
|
|
|
|
* or we'd lock on one vnet and unlock on another.
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
*/
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
IFNET_WLOCK();
|
2009-08-26 11:13:10 +00:00
|
|
|
ifindex_free_locked(ifp->if_index);
|
2010-08-13 18:17:32 +00:00
|
|
|
IFNET_WUNLOCK();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform interface-specific reassignment tasks, if provided by
|
|
|
|
* the driver.
|
|
|
|
*/
|
|
|
|
if (ifp->if_reassign != NULL)
|
|
|
|
ifp->if_reassign(ifp, new_vnet, NULL);
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Switch to the context of the target vnet.
|
|
|
|
*/
|
|
|
|
CURVNET_SET_QUIET(new_vnet);
|
2018-05-23 21:02:14 +00:00
|
|
|
restart:
|
2010-08-13 18:17:32 +00:00
|
|
|
IFNET_WLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
ifp->if_index = ifindex_alloc(&old);
|
|
|
|
if (__predict_false(ifp->if_index == USHRT_MAX)) {
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
epoch_wait_preempt(net_epoch_preempt);
|
|
|
|
free(old, M_IFNET);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
ifnet_setbyindex(ifp->if_index, ifp);
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
IFNET_WUNLOCK();
|
|
|
|
|
2015-03-02 20:00:03 +00:00
|
|
|
if_attach_internal(ifp, 1, ifc);
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
|
2016-04-11 10:00:38 +00:00
|
|
|
if (ifp->if_bpf == NULL)
|
|
|
|
bpfattach(ifp, bif_dlt, bif_hdrlen);
|
|
|
|
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
CURVNET_RESTORE();
|
1999-12-17 06:46:07 +00:00
|
|
|
}
|
2009-07-26 11:29:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Move an ifnet to or from another child prison/vnet, specified by the jail id.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
if_vmove_loan(struct thread *td, struct ifnet *ifp, char *ifname, int jid)
|
|
|
|
{
|
|
|
|
struct prison *pr;
|
|
|
|
struct ifnet *difp;
|
|
|
|
|
|
|
|
/* Try to find the prison within our visibility. */
|
|
|
|
sx_slock(&allprison_lock);
|
|
|
|
pr = prison_find_child(td->td_ucred->cr_prison, jid);
|
|
|
|
sx_sunlock(&allprison_lock);
|
|
|
|
if (pr == NULL)
|
|
|
|
return (ENXIO);
|
|
|
|
prison_hold_locked(pr);
|
|
|
|
mtx_unlock(&pr->pr_mtx);
|
|
|
|
|
|
|
|
/* Do not try to move the iface from and to the same prison. */
|
|
|
|
if (pr->pr_vnet == ifp->if_vnet) {
|
|
|
|
prison_free(pr);
|
|
|
|
return (EEXIST);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure the named iface does not exists in the dst. prison/vnet. */
|
|
|
|
/* XXX Lock interfaces to avoid races. */
|
2009-08-14 22:46:45 +00:00
|
|
|
CURVNET_SET_QUIET(pr->pr_vnet);
|
2009-07-26 11:29:26 +00:00
|
|
|
difp = ifunit(ifname);
|
|
|
|
if (difp != NULL) {
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
CURVNET_RESTORE();
|
2009-07-26 11:29:26 +00:00
|
|
|
prison_free(pr);
|
|
|
|
return (EEXIST);
|
|
|
|
}
|
|
|
|
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
/* Make sure the VNET is stable. */
|
2019-10-07 14:15:41 +00:00
|
|
|
if (ifp->if_vnet->vnet_shutdown) {
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
CURVNET_RESTORE();
|
|
|
|
prison_free(pr);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
CURVNET_RESTORE();
|
|
|
|
|
2009-07-26 11:29:26 +00:00
|
|
|
/* Move the interface into the child jail/vnet. */
|
|
|
|
if_vmove(ifp, pr->pr_vnet);
|
|
|
|
|
|
|
|
/* Report the new if_xname back to the userland. */
|
|
|
|
sprintf(ifname, "%s", ifp->if_xname);
|
|
|
|
|
|
|
|
prison_free(pr);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
if_vmove_reclaim(struct thread *td, char *ifname, int jid)
|
|
|
|
{
|
|
|
|
struct prison *pr;
|
|
|
|
struct vnet *vnet_dst;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
|
|
|
/* Try to find the prison within our visibility. */
|
|
|
|
sx_slock(&allprison_lock);
|
|
|
|
pr = prison_find_child(td->td_ucred->cr_prison, jid);
|
|
|
|
sx_sunlock(&allprison_lock);
|
|
|
|
if (pr == NULL)
|
|
|
|
return (ENXIO);
|
|
|
|
prison_hold_locked(pr);
|
|
|
|
mtx_unlock(&pr->pr_mtx);
|
|
|
|
|
|
|
|
/* Make sure the named iface exists in the source prison/vnet. */
|
|
|
|
CURVNET_SET(pr->pr_vnet);
|
|
|
|
ifp = ifunit(ifname); /* XXX Lock to avoid races. */
|
|
|
|
if (ifp == NULL) {
|
|
|
|
CURVNET_RESTORE();
|
|
|
|
prison_free(pr);
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do not try to move the iface from and to the same prison. */
|
|
|
|
vnet_dst = TD_TO_VNET(td);
|
|
|
|
if (vnet_dst == ifp->if_vnet) {
|
|
|
|
CURVNET_RESTORE();
|
|
|
|
prison_free(pr);
|
|
|
|
return (EEXIST);
|
|
|
|
}
|
|
|
|
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
/* Make sure the VNET is stable. */
|
2019-10-07 14:15:41 +00:00
|
|
|
if (ifp->if_vnet->vnet_shutdown) {
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
CURVNET_RESTORE();
|
|
|
|
prison_free(pr);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
|
2009-07-26 11:29:26 +00:00
|
|
|
/* Get interface back from child jail/vnet. */
|
|
|
|
if_vmove(ifp, vnet_dst);
|
|
|
|
CURVNET_RESTORE();
|
|
|
|
|
|
|
|
/* Report the new if_xname back to the userland. */
|
|
|
|
sprintf(ifname, "%s", ifp->if_xname);
|
|
|
|
|
|
|
|
prison_free(pr);
|
|
|
|
return (0);
|
|
|
|
}
|
Introduce the if_vmove() function, which will be used in the future
for reassigning ifnets from one vnet to another.
if_vmove() works by calling a restricted subset of actions normally
executed by if_detach() on an ifnet in the current vnet, and then
switches to the target vnet and executes an appropriate subset of
if_attach() actions there.
if_attach() and if_detach() have become wrapper functions around
if_attach_internal() and if_detach_internal(), where the later
variants have an additional argument, a flag indicating whether a
full attach or detach sequence is to be executed, or only a
restricted subset suitable for moving an ifnet from one vnet to
another. Hence, if_vmove() will not call if_detach() and if_attach()
directly, but will call the if_detach_internal() and
if_attach_internal() variants instead, with the vmove flag set.
While here, staticize ifnet_setbyindex() since it is not referenced
from outside of sys/net/if.c.
Also rename ifccnt field in struct vimage to ifcnt, and do some minor
whitespace garbage collection where appropriate.
This change should have no functional impact on nooptions VIMAGE kernel
builds.
Reviewed by: bz, rwatson, brooks?
Approved by: julian (mentor)
2009-05-22 22:09:00 +00:00
|
|
|
#endif /* VIMAGE */
|
1999-12-17 06:46:07 +00:00
|
|
|
|
2006-06-19 22:20:45 +00:00
|
|
|
/*
|
|
|
|
* Add a group to an interface
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_addgroup(struct ifnet *ifp, const char *groupname)
|
|
|
|
{
|
|
|
|
struct ifg_list *ifgl;
|
|
|
|
struct ifg_group *ifg = NULL;
|
|
|
|
struct ifg_member *ifgm;
|
Merge the projects/pf/head branch, that was worked on for last six months,
into head. The most significant achievements in the new code:
o Fine grained locking, thus much better performance.
o Fixes to many problems in pf, that were specific to FreeBSD port.
New code doesn't have that many ifdefs and much less OpenBSDisms, thus
is more attractive to our developers.
Those interested in details, can browse through SVN log of the
projects/pf/head branch. And for reference, here is exact list of
revisions merged:
r232043, r232044, r232062, r232148, r232149, r232150, r232298, r232330,
r232332, r232340, r232386, r232390, r232391, r232605, r232655, r232656,
r232661, r232662, r232663, r232664, r232673, r232691, r233309, r233782,
r233829, r233830, r233834, r233835, r233836, r233865, r233866, r233868,
r233873, r234056, r234096, r234100, r234108, r234175, r234187, r234223,
r234271, r234272, r234282, r234307, r234309, r234382, r234384, r234456,
r234486, r234606, r234640, r234641, r234642, r234644, r234651, r235505,
r235506, r235535, r235605, r235606, r235826, r235991, r235993, r236168,
r236173, r236179, r236180, r236181, r236186, r236223, r236227, r236230,
r236252, r236254, r236298, r236299, r236300, r236301, r236397, r236398,
r236399, r236499, r236512, r236513, r236525, r236526, r236545, r236548,
r236553, r236554, r236556, r236557, r236561, r236570, r236630, r236672,
r236673, r236679, r236706, r236710, r236718, r237154, r237155, r237169,
r237314, r237363, r237364, r237368, r237369, r237376, r237440, r237442,
r237751, r237783, r237784, r237785, r237788, r237791, r238421, r238522,
r238523, r238524, r238525, r239173, r239186, r239644, r239652, r239661,
r239773, r240125, r240130, r240131, r240136, r240186, r240196, r240212.
I'd like to thank people who participated in early testing:
Tested by: Florian Smeets <flo freebsd.org>
Tested by: Chekaluk Vitaly <artemrts ukr.net>
Tested by: Ben Wilber <ben desync.com>
Tested by: Ian FREISLICH <ianf cloudseed.co.za>
2012-09-08 06:41:54 +00:00
|
|
|
int new = 0;
|
2006-06-19 22:20:45 +00:00
|
|
|
|
|
|
|
if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' &&
|
|
|
|
groupname[strlen(groupname) - 1] <= '9')
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
IFNET_WLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
|
2006-06-19 22:20:45 +00:00
|
|
|
if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) {
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (EEXIST);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP,
|
|
|
|
M_NOWAIT)) == NULL) {
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member),
|
|
|
|
M_TEMP, M_NOWAIT)) == NULL) {
|
|
|
|
free(ifgl, M_TEMP);
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
|
2006-06-19 22:20:45 +00:00
|
|
|
if (!strcmp(ifg->ifg_group, groupname))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (ifg == NULL) {
|
|
|
|
if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group),
|
|
|
|
M_TEMP, M_NOWAIT)) == NULL) {
|
|
|
|
free(ifgl, M_TEMP);
|
|
|
|
free(ifgm, M_TEMP);
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
|
|
|
|
ifg->ifg_refcnt = 0;
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_INIT(&ifg->ifg_members);
|
|
|
|
CK_STAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next);
|
Merge the projects/pf/head branch, that was worked on for last six months,
into head. The most significant achievements in the new code:
o Fine grained locking, thus much better performance.
o Fixes to many problems in pf, that were specific to FreeBSD port.
New code doesn't have that many ifdefs and much less OpenBSDisms, thus
is more attractive to our developers.
Those interested in details, can browse through SVN log of the
projects/pf/head branch. And for reference, here is exact list of
revisions merged:
r232043, r232044, r232062, r232148, r232149, r232150, r232298, r232330,
r232332, r232340, r232386, r232390, r232391, r232605, r232655, r232656,
r232661, r232662, r232663, r232664, r232673, r232691, r233309, r233782,
r233829, r233830, r233834, r233835, r233836, r233865, r233866, r233868,
r233873, r234056, r234096, r234100, r234108, r234175, r234187, r234223,
r234271, r234272, r234282, r234307, r234309, r234382, r234384, r234456,
r234486, r234606, r234640, r234641, r234642, r234644, r234651, r235505,
r235506, r235535, r235605, r235606, r235826, r235991, r235993, r236168,
r236173, r236179, r236180, r236181, r236186, r236223, r236227, r236230,
r236252, r236254, r236298, r236299, r236300, r236301, r236397, r236398,
r236399, r236499, r236512, r236513, r236525, r236526, r236545, r236548,
r236553, r236554, r236556, r236557, r236561, r236570, r236630, r236672,
r236673, r236679, r236706, r236710, r236718, r237154, r237155, r237169,
r237314, r237363, r237364, r237368, r237369, r237376, r237440, r237442,
r237751, r237783, r237784, r237785, r237788, r237791, r238421, r238522,
r238523, r238524, r238525, r239173, r239186, r239644, r239652, r239661,
r239773, r240125, r240130, r240131, r240136, r240186, r240196, r240212.
I'd like to thank people who participated in early testing:
Tested by: Florian Smeets <flo freebsd.org>
Tested by: Chekaluk Vitaly <artemrts ukr.net>
Tested by: Ben Wilber <ben desync.com>
Tested by: Ian FREISLICH <ianf cloudseed.co.za>
2012-09-08 06:41:54 +00:00
|
|
|
new = 1;
|
2006-06-19 22:20:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ifg->ifg_refcnt++;
|
|
|
|
ifgl->ifgl_group = ifg;
|
|
|
|
ifgm->ifgm_ifp = ifp;
|
|
|
|
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next);
|
|
|
|
CK_STAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next);
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2006-06-19 22:20:45 +00:00
|
|
|
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
|
Merge the projects/pf/head branch, that was worked on for last six months,
into head. The most significant achievements in the new code:
o Fine grained locking, thus much better performance.
o Fixes to many problems in pf, that were specific to FreeBSD port.
New code doesn't have that many ifdefs and much less OpenBSDisms, thus
is more attractive to our developers.
Those interested in details, can browse through SVN log of the
projects/pf/head branch. And for reference, here is exact list of
revisions merged:
r232043, r232044, r232062, r232148, r232149, r232150, r232298, r232330,
r232332, r232340, r232386, r232390, r232391, r232605, r232655, r232656,
r232661, r232662, r232663, r232664, r232673, r232691, r233309, r233782,
r233829, r233830, r233834, r233835, r233836, r233865, r233866, r233868,
r233873, r234056, r234096, r234100, r234108, r234175, r234187, r234223,
r234271, r234272, r234282, r234307, r234309, r234382, r234384, r234456,
r234486, r234606, r234640, r234641, r234642, r234644, r234651, r235505,
r235506, r235535, r235605, r235606, r235826, r235991, r235993, r236168,
r236173, r236179, r236180, r236181, r236186, r236223, r236227, r236230,
r236252, r236254, r236298, r236299, r236300, r236301, r236397, r236398,
r236399, r236499, r236512, r236513, r236525, r236526, r236545, r236548,
r236553, r236554, r236556, r236557, r236561, r236570, r236630, r236672,
r236673, r236679, r236706, r236710, r236718, r237154, r237155, r237169,
r237314, r237363, r237364, r237368, r237369, r237376, r237440, r237442,
r237751, r237783, r237784, r237785, r237788, r237791, r238421, r238522,
r238523, r238524, r238525, r239173, r239186, r239644, r239652, r239661,
r239773, r240125, r240130, r240131, r240136, r240186, r240196, r240212.
I'd like to thank people who participated in early testing:
Tested by: Florian Smeets <flo freebsd.org>
Tested by: Chekaluk Vitaly <artemrts ukr.net>
Tested by: Ben Wilber <ben desync.com>
Tested by: Ian FREISLICH <ianf cloudseed.co.za>
2012-09-08 06:41:54 +00:00
|
|
|
if (new)
|
|
|
|
EVENTHANDLER_INVOKE(group_attach_event, ifg);
|
2006-06-19 22:20:45 +00:00
|
|
|
EVENTHANDLER_INVOKE(group_change_event, groupname);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a group from an interface
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_delgroup(struct ifnet *ifp, const char *groupname)
|
|
|
|
{
|
|
|
|
struct ifg_list *ifgl;
|
|
|
|
struct ifg_member *ifgm;
|
2018-05-23 21:02:14 +00:00
|
|
|
int freeifgl;
|
2006-06-19 22:20:45 +00:00
|
|
|
|
|
|
|
IFNET_WLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
|
2006-06-19 22:20:45 +00:00
|
|
|
if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
|
|
|
|
break;
|
|
|
|
if (ifgl == NULL) {
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
freeifgl = 0;
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_REMOVE(&ifp->if_groups, ifgl, ifg_list, ifgl_next);
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2006-06-19 22:20:45 +00:00
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
|
2006-06-19 22:20:45 +00:00
|
|
|
if (ifgm->ifgm_ifp == ifp)
|
|
|
|
break;
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
if (ifgm != NULL)
|
|
|
|
CK_STAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifg_member, ifgm_next);
|
2006-06-19 22:20:45 +00:00
|
|
|
|
|
|
|
if (--ifgl->ifgl_group->ifg_refcnt == 0) {
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_group, ifg_next);
|
|
|
|
freeifgl = 1;
|
|
|
|
}
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
|
|
|
|
epoch_wait_preempt(net_epoch_preempt);
|
|
|
|
if (freeifgl) {
|
2006-06-19 22:20:45 +00:00
|
|
|
EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group);
|
|
|
|
free(ifgl->ifgl_group, M_TEMP);
|
2018-05-23 21:02:14 +00:00
|
|
|
}
|
|
|
|
free(ifgm, M_TEMP);
|
2006-06-19 22:20:45 +00:00
|
|
|
free(ifgl, M_TEMP);
|
|
|
|
|
|
|
|
EVENTHANDLER_INVOKE(group_change_event, groupname);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2009-04-10 19:16:14 +00:00
|
|
|
/*
|
|
|
|
* Remove an interface from all groups
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
if_delgroups(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct ifg_list *ifgl;
|
|
|
|
struct ifg_member *ifgm;
|
|
|
|
char groupname[IFNAMSIZ];
|
2018-05-23 21:02:14 +00:00
|
|
|
int ifglfree;
|
2009-04-10 19:16:14 +00:00
|
|
|
|
|
|
|
IFNET_WLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
while (!CK_STAILQ_EMPTY(&ifp->if_groups)) {
|
|
|
|
ifgl = CK_STAILQ_FIRST(&ifp->if_groups);
|
2009-04-10 19:16:14 +00:00
|
|
|
|
|
|
|
strlcpy(groupname, ifgl->ifgl_group->ifg_group, IFNAMSIZ);
|
|
|
|
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_REMOVE(&ifp->if_groups, ifgl, ifg_list, ifgl_next);
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2009-04-10 19:16:14 +00:00
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
|
2009-04-10 19:16:14 +00:00
|
|
|
if (ifgm->ifgm_ifp == ifp)
|
|
|
|
break;
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
if (ifgm != NULL)
|
|
|
|
CK_STAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifg_member,
|
2009-04-10 19:16:14 +00:00
|
|
|
ifgm_next);
|
2018-05-23 21:02:14 +00:00
|
|
|
ifglfree = 0;
|
|
|
|
if (--ifgl->ifgl_group->ifg_refcnt == 0) {
|
|
|
|
CK_STAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_group, ifg_next);
|
|
|
|
ifglfree = 1;
|
2009-04-10 19:16:14 +00:00
|
|
|
}
|
|
|
|
|
2018-05-24 17:54:08 +00:00
|
|
|
IFNET_WUNLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
epoch_wait_preempt(net_epoch_preempt);
|
|
|
|
free(ifgm, M_TEMP);
|
|
|
|
if (ifglfree) {
|
2009-04-10 19:16:14 +00:00
|
|
|
EVENTHANDLER_INVOKE(group_detach_event,
|
2018-05-23 21:02:14 +00:00
|
|
|
ifgl->ifgl_group);
|
2009-04-10 19:16:14 +00:00
|
|
|
free(ifgl->ifgl_group, M_TEMP);
|
2018-05-23 21:02:14 +00:00
|
|
|
}
|
2009-04-10 19:16:14 +00:00
|
|
|
EVENTHANDLER_INVOKE(group_change_event, groupname);
|
|
|
|
|
|
|
|
IFNET_WLOCK();
|
|
|
|
}
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
}
|
|
|
|
|
2018-04-05 22:14:55 +00:00
|
|
|
static char *
|
|
|
|
ifgr_group_get(void *ifgrp)
|
|
|
|
{
|
|
|
|
union ifgroupreq_union *ifgrup;
|
|
|
|
|
|
|
|
ifgrup = ifgrp;
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
|
|
|
if (SV_CURPROC_FLAG(SV_ILP32))
|
|
|
|
return (&ifgrup->ifgr32.ifgr_ifgru.ifgru_group[0]);
|
|
|
|
#endif
|
|
|
|
return (&ifgrup->ifgr.ifgr_ifgru.ifgru_group[0]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ifg_req *
|
|
|
|
ifgr_groups_get(void *ifgrp)
|
|
|
|
{
|
|
|
|
union ifgroupreq_union *ifgrup;
|
|
|
|
|
|
|
|
ifgrup = ifgrp;
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
|
|
|
if (SV_CURPROC_FLAG(SV_ILP32))
|
|
|
|
return ((struct ifg_req *)(uintptr_t)
|
|
|
|
ifgrup->ifgr32.ifgr_ifgru.ifgru_groups);
|
|
|
|
#endif
|
|
|
|
return (ifgrup->ifgr.ifgr_ifgru.ifgru_groups);
|
|
|
|
}
|
|
|
|
|
2006-06-19 22:20:45 +00:00
|
|
|
/*
|
2018-04-05 22:14:55 +00:00
|
|
|
* Stores all groups from an interface in memory pointed to by ifgr.
|
2006-06-19 22:20:45 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-04-05 22:14:55 +00:00
|
|
|
if_getgroup(struct ifgroupreq *ifgr, struct ifnet *ifp)
|
2006-06-19 22:20:45 +00:00
|
|
|
{
|
|
|
|
int len, error;
|
|
|
|
struct ifg_list *ifgl;
|
|
|
|
struct ifg_req ifgrq, *ifgp;
|
|
|
|
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
NET_EPOCH_ASSERT();
|
|
|
|
|
2006-06-19 22:20:45 +00:00
|
|
|
if (ifgr->ifgr_len == 0) {
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
|
2006-06-19 22:20:45 +00:00
|
|
|
ifgr->ifgr_len += sizeof(struct ifg_req);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
len = ifgr->ifgr_len;
|
2018-04-05 22:14:55 +00:00
|
|
|
ifgp = ifgr_groups_get(ifgr);
|
2006-06-19 22:20:45 +00:00
|
|
|
/* XXX: wire */
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
if (len < sizeof(ifgrq))
|
2006-06-19 22:20:45 +00:00
|
|
|
return (EINVAL);
|
|
|
|
bzero(&ifgrq, sizeof ifgrq);
|
|
|
|
strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group,
|
|
|
|
sizeof(ifgrq.ifgrq_group));
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req))))
|
2006-06-19 22:20:45 +00:00
|
|
|
return (error);
|
|
|
|
len -= sizeof(ifgrq);
|
|
|
|
ifgp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-04-05 22:14:55 +00:00
|
|
|
* Stores all members of a group in memory pointed to by igfr
|
2006-06-19 22:20:45 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-04-05 22:14:55 +00:00
|
|
|
if_getgroupmembers(struct ifgroupreq *ifgr)
|
2006-06-19 22:20:45 +00:00
|
|
|
{
|
|
|
|
struct ifg_group *ifg;
|
|
|
|
struct ifg_member *ifgm;
|
|
|
|
struct ifg_req ifgrq, *ifgp;
|
|
|
|
int len, error;
|
|
|
|
|
|
|
|
IFNET_RLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
|
2006-06-19 22:20:45 +00:00
|
|
|
if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
|
|
|
|
break;
|
|
|
|
if (ifg == NULL) {
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ifgr->ifgr_len == 0) {
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)
|
2006-06-19 22:20:45 +00:00
|
|
|
ifgr->ifgr_len += sizeof(ifgrq);
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
len = ifgr->ifgr_len;
|
2018-04-05 22:14:55 +00:00
|
|
|
ifgp = ifgr_groups_get(ifgr);
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) {
|
2006-06-19 22:20:45 +00:00
|
|
|
if (len < sizeof(ifgrq)) {
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
bzero(&ifgrq, sizeof ifgrq);
|
|
|
|
strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname,
|
|
|
|
sizeof(ifgrq.ifgrq_member));
|
|
|
|
if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) {
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
len -= sizeof(ifgrq);
|
|
|
|
ifgp++;
|
|
|
|
}
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2014-08-31 06:46:21 +00:00
|
|
|
/*
|
2014-09-28 08:57:07 +00:00
|
|
|
* Return counter values from counter(9)s stored in ifnet.
|
2014-08-31 06:46:21 +00:00
|
|
|
*/
|
|
|
|
uint64_t
|
2014-09-18 14:47:13 +00:00
|
|
|
if_get_counter_default(struct ifnet *ifp, ift_counter cnt)
|
2014-08-31 06:46:21 +00:00
|
|
|
{
|
|
|
|
|
2014-09-28 08:57:07 +00:00
|
|
|
KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
|
|
|
|
|
|
|
|
return (counter_u64_fetch(ifp->if_counters[cnt]));
|
2014-08-31 06:46:21 +00:00
|
|
|
}
|
|
|
|
|
2014-09-18 09:54:57 +00:00
|
|
|
/*
|
|
|
|
* Increase an ifnet counter. Usually used for counters shared
|
|
|
|
* between the stack and a driver, but function supports them all.
|
|
|
|
*/
|
|
|
|
void
|
2014-09-18 14:47:13 +00:00
|
|
|
if_inc_counter(struct ifnet *ifp, ift_counter cnt, int64_t inc)
|
2014-09-18 09:54:57 +00:00
|
|
|
{
|
|
|
|
|
2014-09-28 08:57:07 +00:00
|
|
|
KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
|
|
|
|
|
|
|
|
counter_u64_add(ifp->if_counters[cnt], inc);
|
2014-09-18 09:54:57 +00:00
|
|
|
}
|
|
|
|
|
2014-08-31 06:46:21 +00:00
|
|
|
/*
|
|
|
|
* Copy data from ifnet to userland API structure if_data.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
if_data_copy(struct ifnet *ifp, struct if_data *ifd)
|
|
|
|
{
|
|
|
|
|
|
|
|
ifd->ifi_type = ifp->if_type;
|
|
|
|
ifd->ifi_physical = 0;
|
|
|
|
ifd->ifi_addrlen = ifp->if_addrlen;
|
|
|
|
ifd->ifi_hdrlen = ifp->if_hdrlen;
|
|
|
|
ifd->ifi_link_state = ifp->if_link_state;
|
|
|
|
ifd->ifi_vhid = 0;
|
|
|
|
ifd->ifi_datalen = sizeof(struct if_data);
|
|
|
|
ifd->ifi_mtu = ifp->if_mtu;
|
|
|
|
ifd->ifi_metric = ifp->if_metric;
|
|
|
|
ifd->ifi_baudrate = ifp->if_baudrate;
|
|
|
|
ifd->ifi_hwassist = ifp->if_hwassist;
|
|
|
|
ifd->ifi_epoch = ifp->if_epoch;
|
|
|
|
ifd->ifi_lastchange = ifp->if_lastchange;
|
|
|
|
|
|
|
|
ifd->ifi_ipackets = ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS);
|
|
|
|
ifd->ifi_ierrors = ifp->if_get_counter(ifp, IFCOUNTER_IERRORS);
|
|
|
|
ifd->ifi_opackets = ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS);
|
|
|
|
ifd->ifi_oerrors = ifp->if_get_counter(ifp, IFCOUNTER_OERRORS);
|
|
|
|
ifd->ifi_collisions = ifp->if_get_counter(ifp, IFCOUNTER_COLLISIONS);
|
|
|
|
ifd->ifi_ibytes = ifp->if_get_counter(ifp, IFCOUNTER_IBYTES);
|
|
|
|
ifd->ifi_obytes = ifp->if_get_counter(ifp, IFCOUNTER_OBYTES);
|
|
|
|
ifd->ifi_imcasts = ifp->if_get_counter(ifp, IFCOUNTER_IMCASTS);
|
|
|
|
ifd->ifi_omcasts = ifp->if_get_counter(ifp, IFCOUNTER_OMCASTS);
|
|
|
|
ifd->ifi_iqdrops = ifp->if_get_counter(ifp, IFCOUNTER_IQDROPS);
|
|
|
|
ifd->ifi_oqdrops = ifp->if_get_counter(ifp, IFCOUNTER_OQDROPS);
|
|
|
|
ifd->ifi_noproto = ifp->if_get_counter(ifp, IFCOUNTER_NOPROTO);
|
|
|
|
}
|
|
|
|
|
2009-06-26 00:36:47 +00:00
|
|
|
/*
|
|
|
|
* Wrapper functions for struct ifnet address list locking macros. These are
|
|
|
|
* used by kernel modules to avoid encoding programming interface or binary
|
|
|
|
* interface assumptions that may be violated when kernel-internal locking
|
|
|
|
* approaches change.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
if_addr_rlock(struct ifnet *ifp)
|
|
|
|
{
|
2018-11-13 22:58:38 +00:00
|
|
|
|
|
|
|
epoch_enter_preempt(net_epoch_preempt, curthread->td_et);
|
2009-06-26 00:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_addr_runlock(struct ifnet *ifp)
|
|
|
|
{
|
2018-11-13 22:58:38 +00:00
|
|
|
|
|
|
|
epoch_exit_preempt(net_epoch_preempt, curthread->td_et);
|
2009-06-26 00:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-06-02 17:54:39 +00:00
|
|
|
if_maddr_rlock(if_t ifp)
|
2009-06-26 00:36:47 +00:00
|
|
|
{
|
|
|
|
|
2018-11-13 22:58:38 +00:00
|
|
|
epoch_enter_preempt(net_epoch_preempt, curthread->td_et);
|
2009-06-26 00:36:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-06-02 17:54:39 +00:00
|
|
|
if_maddr_runlock(if_t ifp)
|
2009-06-26 00:36:47 +00:00
|
|
|
{
|
|
|
|
|
2018-11-13 22:58:38 +00:00
|
|
|
epoch_exit_preempt(net_epoch_preempt, curthread->td_et);
|
2009-06-26 00:36:47 +00:00
|
|
|
}
|
|
|
|
|
2009-06-21 19:30:33 +00:00
|
|
|
/*
|
2012-02-05 12:52:28 +00:00
|
|
|
* Initialization, destruction and refcounting functions for ifaddrs.
|
2009-06-21 19:30:33 +00:00
|
|
|
*/
|
2013-10-15 10:31:42 +00:00
|
|
|
struct ifaddr *
|
|
|
|
ifa_alloc(size_t size, int flags)
|
2009-06-21 19:30:33 +00:00
|
|
|
{
|
2013-10-15 10:31:42 +00:00
|
|
|
struct ifaddr *ifa;
|
|
|
|
|
|
|
|
KASSERT(size >= sizeof(struct ifaddr),
|
|
|
|
("%s: invalid size %zu", __func__, size));
|
|
|
|
|
|
|
|
ifa = malloc(size, M_IFADDR, M_ZERO | flags);
|
|
|
|
if (ifa == NULL)
|
|
|
|
return (NULL);
|
2009-06-21 19:30:33 +00:00
|
|
|
|
2013-10-15 11:37:57 +00:00
|
|
|
if ((ifa->ifa_opackets = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto fail;
|
|
|
|
if ((ifa->ifa_ipackets = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto fail;
|
|
|
|
if ((ifa->ifa_obytes = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto fail;
|
|
|
|
if ((ifa->ifa_ibytes = counter_u64_alloc(flags)) == NULL)
|
|
|
|
goto fail;
|
|
|
|
|
2009-06-21 19:30:33 +00:00
|
|
|
refcount_init(&ifa->ifa_refcnt, 1);
|
2013-10-15 10:31:42 +00:00
|
|
|
|
|
|
|
return (ifa);
|
2013-10-15 11:37:57 +00:00
|
|
|
|
|
|
|
fail:
|
|
|
|
/* free(NULL) is okay */
|
|
|
|
counter_u64_free(ifa->ifa_opackets);
|
|
|
|
counter_u64_free(ifa->ifa_ipackets);
|
|
|
|
counter_u64_free(ifa->ifa_obytes);
|
|
|
|
counter_u64_free(ifa->ifa_ibytes);
|
|
|
|
free(ifa, M_IFADDR);
|
|
|
|
|
|
|
|
return (NULL);
|
2009-06-21 19:30:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ifa_ref(struct ifaddr *ifa)
|
|
|
|
{
|
|
|
|
|
|
|
|
refcount_acquire(&ifa->ifa_refcnt);
|
|
|
|
}
|
|
|
|
|
2018-05-18 20:13:34 +00:00
|
|
|
static void
|
|
|
|
ifa_destroy(epoch_context_t ctx)
|
|
|
|
{
|
|
|
|
struct ifaddr *ifa;
|
|
|
|
|
|
|
|
ifa = __containerof(ctx, struct ifaddr, ifa_epoch_ctx);
|
|
|
|
counter_u64_free(ifa->ifa_opackets);
|
|
|
|
counter_u64_free(ifa->ifa_ipackets);
|
|
|
|
counter_u64_free(ifa->ifa_obytes);
|
|
|
|
counter_u64_free(ifa->ifa_ibytes);
|
|
|
|
free(ifa, M_IFADDR);
|
|
|
|
}
|
|
|
|
|
2009-06-21 19:30:33 +00:00
|
|
|
void
|
|
|
|
ifa_free(struct ifaddr *ifa)
|
|
|
|
{
|
|
|
|
|
2018-05-18 20:13:34 +00:00
|
|
|
if (refcount_release(&ifa->ifa_refcnt))
|
|
|
|
epoch_call(net_epoch_preempt, &ifa->ifa_epoch_ctx, ifa_destroy);
|
2009-06-21 19:30:33 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 20:13:34 +00:00
|
|
|
|
2015-09-16 06:23:15 +00:00
|
|
|
static int
|
|
|
|
ifa_maintain_loopback_route(int cmd, const char *otype, struct ifaddr *ifa,
|
|
|
|
struct sockaddr *ia)
|
2009-09-15 19:18:34 +00:00
|
|
|
{
|
2019-05-19 21:49:56 +00:00
|
|
|
struct epoch_tracker et;
|
2015-09-16 06:23:15 +00:00
|
|
|
int error;
|
2009-09-15 19:18:34 +00:00
|
|
|
struct rt_addrinfo info;
|
2015-09-16 06:23:15 +00:00
|
|
|
struct sockaddr_dl null_sdl;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
|
|
|
ifp = ifa->ifa_ifp;
|
2009-09-15 19:18:34 +00:00
|
|
|
|
|
|
|
bzero(&info, sizeof(info));
|
2015-09-16 06:23:15 +00:00
|
|
|
if (cmd != RTM_DELETE)
|
|
|
|
info.rti_ifp = V_loif;
|
2019-05-19 21:49:56 +00:00
|
|
|
if (cmd == RTM_ADD) {
|
|
|
|
/* explicitly specify (loopback) ifa */
|
|
|
|
if (info.rti_ifp != NULL) {
|
|
|
|
NET_EPOCH_ENTER(et);
|
|
|
|
info.rti_ifa = ifaof_ifpforaddr(ifa->ifa_addr, info.rti_ifp);
|
|
|
|
if (info.rti_ifa != NULL)
|
|
|
|
ifa_ref(info.rti_ifa);
|
|
|
|
NET_EPOCH_EXIT(et);
|
|
|
|
}
|
|
|
|
}
|
2017-11-05 14:41:48 +00:00
|
|
|
info.rti_flags = ifa->ifa_flags | RTF_HOST | RTF_STATIC | RTF_PINNED;
|
2009-09-15 19:18:34 +00:00
|
|
|
info.rti_info[RTAX_DST] = ia;
|
|
|
|
info.rti_info[RTAX_GATEWAY] = (struct sockaddr *)&null_sdl;
|
2015-09-16 06:23:15 +00:00
|
|
|
link_init_sdl(ifp, (struct sockaddr *)&null_sdl, ifp->if_type);
|
|
|
|
|
|
|
|
error = rtrequest1_fib(cmd, &info, NULL, ifp->if_fib);
|
|
|
|
|
2018-05-11 00:19:49 +00:00
|
|
|
if (error != 0 &&
|
|
|
|
!(cmd == RTM_ADD && error == EEXIST) &&
|
|
|
|
!(cmd == RTM_DELETE && error == ENOENT))
|
|
|
|
if_printf(ifp, "%s failed: %d\n", otype, error);
|
2009-09-15 19:18:34 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2015-09-16 06:23:15 +00:00
|
|
|
ifa_add_loopback_route(struct ifaddr *ifa, struct sockaddr *ia)
|
2009-09-15 19:18:34 +00:00
|
|
|
{
|
|
|
|
|
2015-09-16 06:23:15 +00:00
|
|
|
return (ifa_maintain_loopback_route(RTM_ADD, "insertion", ifa, ia));
|
|
|
|
}
|
2009-09-15 19:18:34 +00:00
|
|
|
|
2015-09-16 06:23:15 +00:00
|
|
|
int
|
|
|
|
ifa_del_loopback_route(struct ifaddr *ifa, struct sockaddr *ia)
|
|
|
|
{
|
2009-09-15 19:18:34 +00:00
|
|
|
|
2015-09-16 06:23:15 +00:00
|
|
|
return (ifa_maintain_loopback_route(RTM_DELETE, "deletion", ifa, ia));
|
2009-09-15 19:18:34 +00:00
|
|
|
}
|
|
|
|
|
2013-11-05 07:36:17 +00:00
|
|
|
int
|
2015-09-16 06:23:15 +00:00
|
|
|
ifa_switch_loopback_route(struct ifaddr *ifa, struct sockaddr *ia)
|
2013-11-05 07:36:17 +00:00
|
|
|
{
|
|
|
|
|
2015-09-16 06:23:15 +00:00
|
|
|
return (ifa_maintain_loopback_route(RTM_CHANGE, "switch", ifa, ia));
|
2013-11-05 07:36:17 +00:00
|
|
|
}
|
|
|
|
|
2007-02-22 00:14:02 +00:00
|
|
|
/*
|
|
|
|
* XXX: Because sockaddr_dl has deeper structure than the sockaddr
|
|
|
|
* structs used to represent other address families, it is necessary
|
|
|
|
* to perform a different comparison.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define sa_dl_equal(a1, a2) \
|
2015-09-05 05:33:20 +00:00
|
|
|
((((const struct sockaddr_dl *)(a1))->sdl_len == \
|
|
|
|
((const struct sockaddr_dl *)(a2))->sdl_len) && \
|
|
|
|
(bcmp(CLLADDR((const struct sockaddr_dl *)(a1)), \
|
|
|
|
CLLADDR((const struct sockaddr_dl *)(a2)), \
|
|
|
|
((const struct sockaddr_dl *)(a1))->sdl_alen) == 0))
|
2002-12-18 11:46:59 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Locate an interface based on a complete address.
|
|
|
|
*/
|
|
|
|
/*ARGSUSED*/
|
2018-05-23 21:02:14 +00:00
|
|
|
struct ifaddr *
|
|
|
|
ifa_ifwithaddr(const struct sockaddr *addr)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-06 00:44:45 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
NET_EPOCH_ASSERT();
|
|
|
|
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
2001-09-06 00:44:45 +00:00
|
|
|
if (ifa->ifa_addr->sa_family != addr->sa_family)
|
|
|
|
continue;
|
2009-04-21 19:06:47 +00:00
|
|
|
if (sa_equal(addr, ifa->ifa_addr)) {
|
2001-09-06 00:44:45 +00:00
|
|
|
goto done;
|
2009-04-21 19:06:47 +00:00
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
/* IP6 doesn't have broadcast */
|
|
|
|
if ((ifp->if_flags & IFF_BROADCAST) &&
|
|
|
|
ifa->ifa_broadaddr &&
|
|
|
|
ifa->ifa_broadaddr->sa_len != 0 &&
|
2009-04-21 19:06:47 +00:00
|
|
|
sa_equal(ifa->ifa_broadaddr, addr)) {
|
2001-09-06 00:44:45 +00:00
|
|
|
goto done;
|
2009-04-21 19:06:47 +00:00
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
}
|
2009-04-21 19:06:47 +00:00
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
ifa = NULL;
|
|
|
|
done:
|
|
|
|
return (ifa);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
|
2009-06-22 10:59:34 +00:00
|
|
|
int
|
2015-09-05 05:33:20 +00:00
|
|
|
ifa_ifwithaddr_check(const struct sockaddr *addr)
|
2009-06-22 10:59:34 +00:00
|
|
|
{
|
2019-01-09 01:11:19 +00:00
|
|
|
struct epoch_tracker et;
|
2018-05-23 21:02:14 +00:00
|
|
|
int rc;
|
2009-06-22 10:59:34 +00:00
|
|
|
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2018-05-23 21:02:14 +00:00
|
|
|
rc = (ifa_ifwithaddr(addr) != NULL);
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2018-05-23 21:02:14 +00:00
|
|
|
return (rc);
|
2009-06-22 10:59:34 +00:00
|
|
|
}
|
|
|
|
|
2006-09-06 17:12:10 +00:00
|
|
|
/*
|
|
|
|
* Locate an interface based on the broadcast address.
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
|
|
|
struct ifaddr *
|
2015-09-05 05:33:20 +00:00
|
|
|
ifa_ifwithbroadaddr(const struct sockaddr *addr, int fibnum)
|
2006-09-06 17:12:10 +00:00
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa;
|
|
|
|
|
2018-07-04 02:47:16 +00:00
|
|
|
MPASS(in_epoch(net_epoch_preempt));
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
|
2014-09-11 20:21:03 +00:00
|
|
|
if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum))
|
|
|
|
continue;
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
2006-09-06 17:12:10 +00:00
|
|
|
if (ifa->ifa_addr->sa_family != addr->sa_family)
|
|
|
|
continue;
|
|
|
|
if ((ifp->if_flags & IFF_BROADCAST) &&
|
|
|
|
ifa->ifa_broadaddr &&
|
|
|
|
ifa->ifa_broadaddr->sa_len != 0 &&
|
2009-04-21 19:06:47 +00:00
|
|
|
sa_equal(ifa->ifa_broadaddr, addr)) {
|
2006-09-06 17:12:10 +00:00
|
|
|
goto done;
|
2009-04-21 19:06:47 +00:00
|
|
|
}
|
2006-09-06 17:12:10 +00:00
|
|
|
}
|
2009-04-21 19:06:47 +00:00
|
|
|
}
|
2006-09-06 17:12:10 +00:00
|
|
|
ifa = NULL;
|
|
|
|
done:
|
|
|
|
return (ifa);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Locate the point to point interface with a given destination address.
|
|
|
|
*/
|
|
|
|
/*ARGSUSED*/
|
|
|
|
struct ifaddr *
|
2015-09-05 05:33:20 +00:00
|
|
|
ifa_ifwithdstaddr(const struct sockaddr *addr, int fibnum)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-06 00:44:45 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2018-07-04 02:47:16 +00:00
|
|
|
MPASS(in_epoch(net_epoch_preempt));
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
|
2001-09-06 00:44:45 +00:00
|
|
|
if ((ifp->if_flags & IFF_POINTOPOINT) == 0)
|
|
|
|
continue;
|
2014-05-29 21:03:49 +00:00
|
|
|
if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum))
|
2014-04-24 23:56:56 +00:00
|
|
|
continue;
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ifa->ifa_addr->sa_family != addr->sa_family)
|
|
|
|
continue;
|
2008-08-24 11:03:43 +00:00
|
|
|
if (ifa->ifa_dstaddr != NULL &&
|
2009-04-21 19:06:47 +00:00
|
|
|
sa_equal(addr, ifa->ifa_dstaddr)) {
|
2001-09-06 00:44:45 +00:00
|
|
|
goto done;
|
2009-04-21 19:06:47 +00:00
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
ifa = NULL;
|
|
|
|
done:
|
|
|
|
return (ifa);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find an interface on a specific network. If many, choice
|
|
|
|
* is most specific found.
|
|
|
|
*/
|
|
|
|
struct ifaddr *
|
2015-09-05 05:33:20 +00:00
|
|
|
ifa_ifwithnet(const struct sockaddr *addr, int ignore_ptp, int fibnum)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa;
|
2009-06-23 20:19:09 +00:00
|
|
|
struct ifaddr *ifa_maybe = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
u_int af = addr->sa_family;
|
2015-09-05 05:33:20 +00:00
|
|
|
const char *addr_data = addr->sa_data, *cplim;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2018-07-04 02:47:16 +00:00
|
|
|
MPASS(in_epoch(net_epoch_preempt));
|
1997-08-22 22:47:27 +00:00
|
|
|
/*
|
|
|
|
* AF_LINK addresses can be looked up directly by their index number,
|
|
|
|
* so do that if we can.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
if (af == AF_LINK) {
|
2015-09-05 05:33:20 +00:00
|
|
|
const struct sockaddr_dl *sdl = (const struct sockaddr_dl *)addr;
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
if (sdl->sdl_index && sdl->sdl_index <= V_if_index)
|
2001-09-06 02:40:43 +00:00
|
|
|
return (ifaddr_byindex(sdl->sdl_index));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-08-22 22:47:27 +00:00
|
|
|
|
1999-11-22 02:45:11 +00:00
|
|
|
/*
|
2009-06-23 20:19:09 +00:00
|
|
|
* Scan though each interface, looking for ones that have addresses
|
2019-01-09 00:38:16 +00:00
|
|
|
* in this address family and the requested fib.
|
1997-08-22 22:47:27 +00:00
|
|
|
*/
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
|
2014-05-29 21:03:49 +00:00
|
|
|
if ((fibnum != RT_ALL_FIBS) && (ifp->if_fib != fibnum))
|
2014-04-24 23:56:56 +00:00
|
|
|
continue;
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
2015-09-05 05:33:20 +00:00
|
|
|
const char *cp, *cp2, *cp3;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-06-28 05:31:03 +00:00
|
|
|
if (ifa->ifa_addr->sa_family != af)
|
1997-08-22 22:47:27 +00:00
|
|
|
next: continue;
|
2010-05-25 20:42:35 +00:00
|
|
|
if (af == AF_INET &&
|
|
|
|
ifp->if_flags & IFF_POINTOPOINT && !ignore_ptp) {
|
1997-08-22 22:47:27 +00:00
|
|
|
/*
|
1999-11-22 02:45:11 +00:00
|
|
|
* This is a bit broken as it doesn't
|
|
|
|
* take into account that the remote end may
|
1997-08-22 22:47:27 +00:00
|
|
|
* be a single node in the network we are
|
|
|
|
* looking for.
|
1999-11-22 02:45:11 +00:00
|
|
|
* The trouble is that we don't know the
|
1997-08-22 22:47:27 +00:00
|
|
|
* netmask for the remote end.
|
|
|
|
*/
|
2008-08-24 11:03:43 +00:00
|
|
|
if (ifa->ifa_dstaddr != NULL &&
|
2009-04-21 19:06:47 +00:00
|
|
|
sa_equal(addr, ifa->ifa_dstaddr)) {
|
2001-09-06 00:44:45 +00:00
|
|
|
goto done;
|
2009-04-21 19:06:47 +00:00
|
|
|
}
|
1995-06-15 00:19:56 +00:00
|
|
|
} else {
|
1997-08-22 22:47:27 +00:00
|
|
|
/*
|
|
|
|
* Scan all the bits in the ifa's address.
|
|
|
|
* If a bit dissagrees with what we are
|
|
|
|
* looking for, mask it with the netmask
|
|
|
|
* to see if it really matters.
|
|
|
|
* (A byte at a time)
|
|
|
|
*/
|
1995-06-28 05:31:03 +00:00
|
|
|
if (ifa->ifa_netmask == 0)
|
|
|
|
continue;
|
1995-05-27 04:37:24 +00:00
|
|
|
cp = addr_data;
|
|
|
|
cp2 = ifa->ifa_addr->sa_data;
|
|
|
|
cp3 = ifa->ifa_netmask->sa_data;
|
1997-08-22 22:47:27 +00:00
|
|
|
cplim = ifa->ifa_netmask->sa_len
|
|
|
|
+ (char *)ifa->ifa_netmask;
|
1995-05-27 04:37:24 +00:00
|
|
|
while (cp3 < cplim)
|
|
|
|
if ((*cp++ ^ *cp2++) & *cp3++)
|
1997-08-22 22:47:27 +00:00
|
|
|
goto next; /* next address! */
|
|
|
|
/*
|
|
|
|
* If the netmask of what we just found
|
|
|
|
* is more specific than what we had before
|
2013-02-11 10:58:22 +00:00
|
|
|
* (if we had one), or if the virtual status
|
|
|
|
* of new prefix is better than of the old one,
|
|
|
|
* then remember the new one before continuing
|
|
|
|
* to search for an even better one.
|
1997-08-22 22:47:27 +00:00
|
|
|
*/
|
2009-06-23 20:19:09 +00:00
|
|
|
if (ifa_maybe == NULL ||
|
2013-02-11 10:58:22 +00:00
|
|
|
ifa_preferred(ifa_maybe, ifa) ||
|
1995-05-27 04:37:24 +00:00
|
|
|
rn_refines((caddr_t)ifa->ifa_netmask,
|
2009-06-23 20:19:09 +00:00
|
|
|
(caddr_t)ifa_maybe->ifa_netmask)) {
|
1995-05-27 04:37:24 +00:00
|
|
|
ifa_maybe = ifa;
|
2009-06-23 20:19:09 +00:00
|
|
|
}
|
1995-05-27 04:37:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
ifa = ifa_maybe;
|
2009-06-23 20:19:09 +00:00
|
|
|
ifa_maybe = NULL;
|
2001-09-06 00:44:45 +00:00
|
|
|
done:
|
|
|
|
return (ifa);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find an interface address specific to an interface best matching
|
|
|
|
* a given address.
|
|
|
|
*/
|
|
|
|
struct ifaddr *
|
2015-09-05 05:33:20 +00:00
|
|
|
ifaof_ifpforaddr(const struct sockaddr *addr, struct ifnet *ifp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifaddr *ifa;
|
2015-09-05 05:33:20 +00:00
|
|
|
const char *cp, *cp2, *cp3;
|
2003-10-23 13:49:10 +00:00
|
|
|
char *cplim;
|
2009-06-23 20:19:09 +00:00
|
|
|
struct ifaddr *ifa_maybe = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
u_int af = addr->sa_family;
|
|
|
|
|
|
|
|
if (af >= AF_MAX)
|
2010-07-27 11:54:01 +00:00
|
|
|
return (NULL);
|
2018-07-04 02:47:16 +00:00
|
|
|
|
|
|
|
MPASS(in_epoch(net_epoch_preempt));
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ifa->ifa_addr->sa_family != af)
|
|
|
|
continue;
|
2009-06-23 20:19:09 +00:00
|
|
|
if (ifa_maybe == NULL)
|
1996-08-07 04:09:05 +00:00
|
|
|
ifa_maybe = ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ifa->ifa_netmask == 0) {
|
2005-07-19 10:03:47 +00:00
|
|
|
if (sa_equal(addr, ifa->ifa_addr) ||
|
|
|
|
(ifa->ifa_dstaddr &&
|
|
|
|
sa_equal(addr, ifa->ifa_dstaddr)))
|
2001-09-07 05:32:54 +00:00
|
|
|
goto done;
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
|
|
|
}
|
1995-05-27 04:37:24 +00:00
|
|
|
if (ifp->if_flags & IFF_POINTOPOINT) {
|
2005-07-19 10:03:47 +00:00
|
|
|
if (sa_equal(addr, ifa->ifa_dstaddr))
|
2001-09-07 05:39:47 +00:00
|
|
|
goto done;
|
1995-06-15 00:19:56 +00:00
|
|
|
} else {
|
1995-05-27 04:37:24 +00:00
|
|
|
cp = addr->sa_data;
|
|
|
|
cp2 = ifa->ifa_addr->sa_data;
|
|
|
|
cp3 = ifa->ifa_netmask->sa_data;
|
|
|
|
cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
|
|
|
|
for (; cp3 < cplim; cp3++)
|
|
|
|
if ((*cp++ ^ *cp2++) & *cp3)
|
|
|
|
break;
|
|
|
|
if (cp3 == cplim)
|
2001-09-07 05:32:54 +00:00
|
|
|
goto done;
|
1995-05-27 04:37:24 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-06 02:40:43 +00:00
|
|
|
ifa = ifa_maybe;
|
|
|
|
done:
|
|
|
|
return (ifa);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2013-02-11 10:58:22 +00:00
|
|
|
/*
|
|
|
|
* See whether new ifa is better than current one:
|
|
|
|
* 1) A non-virtual one is preferred over virtual.
|
|
|
|
* 2) A virtual in master state preferred over any other state.
|
|
|
|
*
|
|
|
|
* Used in several address selecting functions.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ifa_preferred(struct ifaddr *cur, struct ifaddr *next)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (cur->ifa_carp && (!next->ifa_carp ||
|
|
|
|
((*carp_master_p)(next) && !(*carp_master_p)(cur))));
|
|
|
|
}
|
|
|
|
|
2014-01-18 23:24:51 +00:00
|
|
|
struct sockaddr_dl *
|
|
|
|
link_alloc_sdl(size_t size, int flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (malloc(size, M_TEMP, flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
link_free_sdl(struct sockaddr *sa)
|
|
|
|
{
|
|
|
|
free(sa, M_TEMP);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fills in given sdl with interface basic info.
|
|
|
|
* Returns pointer to filled sdl.
|
|
|
|
*/
|
|
|
|
struct sockaddr_dl *
|
|
|
|
link_init_sdl(struct ifnet *ifp, struct sockaddr *paddr, u_char iftype)
|
|
|
|
{
|
|
|
|
struct sockaddr_dl *sdl;
|
|
|
|
|
|
|
|
sdl = (struct sockaddr_dl *)paddr;
|
|
|
|
memset(sdl, 0, sizeof(struct sockaddr_dl));
|
|
|
|
sdl->sdl_len = sizeof(struct sockaddr_dl);
|
|
|
|
sdl->sdl_family = AF_LINK;
|
|
|
|
sdl->sdl_index = ifp->if_index;
|
|
|
|
sdl->sdl_type = iftype;
|
|
|
|
|
|
|
|
return (sdl);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Mark an interface down and notify protocols of
|
|
|
|
* the transition.
|
|
|
|
*/
|
2004-04-18 18:59:44 +00:00
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_unroute(struct ifnet *ifp, int flag, int fam)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP"));
|
|
|
|
|
1998-12-16 18:30:43 +00:00
|
|
|
ifp->if_flags &= ~flag;
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
|
1998-12-16 18:30:43 +00:00
|
|
|
if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
|
|
|
|
pfctlinput(PRC_IFDOWN, ifa->ifa_addr);
|
2008-11-22 05:55:56 +00:00
|
|
|
ifp->if_qflush(ifp);
|
2009-04-16 23:05:10 +00:00
|
|
|
|
2005-02-22 13:04:05 +00:00
|
|
|
if (ifp->if_carp)
|
2010-08-11 00:51:50 +00:00
|
|
|
(*carp_linkstate_p)(ifp);
|
1994-05-24 10:09:53 +00:00
|
|
|
rt_ifmsg(ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark an interface up and notify protocols of
|
|
|
|
* the transition.
|
|
|
|
*/
|
2004-04-18 18:59:44 +00:00
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_route(struct ifnet *ifp, int flag, int fam)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP"));
|
|
|
|
|
1998-12-16 18:30:43 +00:00
|
|
|
ifp->if_flags |= flag;
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
|
1998-12-16 18:30:43 +00:00
|
|
|
if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
|
|
|
|
pfctlinput(PRC_IFUP, ifa->ifa_addr);
|
2005-02-22 13:04:05 +00:00
|
|
|
if (ifp->if_carp)
|
2010-08-11 00:51:50 +00:00
|
|
|
(*carp_linkstate_p)(ifp);
|
1994-05-24 10:09:53 +00:00
|
|
|
rt_ifmsg(ifp);
|
1999-11-22 02:45:11 +00:00
|
|
|
#ifdef INET6
|
|
|
|
in6_if_up(ifp);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2009-12-31 20:29:58 +00:00
|
|
|
void (*vlan_link_state_p)(struct ifnet *); /* XXX: private from if_vlan */
|
Merge the //depot/user/yar/vlan branch into CVS. It contains some collective
work by yar, thompsa and myself. The checksum offloading part also involves
work done by Mihail Balikov.
The most important changes:
o Instead of global linked list of all vlan softc use a per-trunk
hash. The size of hash is dynamically adjusted, depending on
number of entries. This changes struct ifnet, replacing counter
of vlans with a pointer to trunk structure. This change is an
improvement for setups with big number of VLANs, several interfaces
and several CPUs. It is a small regression for a setup with a single
VLAN interface.
An alternative to dynamic hash is a per-trunk static array with
4096 entries, which is a compile time option - VLAN_ARRAY. In my
experiments the array is not an improvement, probably because such
a big trunk structure doesn't fit into CPU cache.
o Introduce an UMA zone for VLAN tags. Since drivers depend on it,
the zone is declared in kern_mbuf.c, not in optional vlan(4) driver.
This change is a big improvement for any setup utilizing vlan(4).
o Use rwlock(9) instead of mutex(9) for locking. We are the first
ones to do this! :)
o Some drivers can do hardware VLAN tagging + hardware checksum
offloading. Add an infrastructure for this. Whenever vlan(4) is
attached to a parent or parent configuration is changed, the flags
on vlan(4) interface are updated.
In collaboration with: yar, thompsa
In collaboration with: Mihail Balikov <mihail.balikov interbgc.com>
2006-01-30 13:45:15 +00:00
|
|
|
void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */
|
2011-03-21 09:40:01 +00:00
|
|
|
struct ifnet *(*vlan_trunkdev_p)(struct ifnet *);
|
|
|
|
struct ifnet *(*vlan_devat_p)(struct ifnet *, uint16_t);
|
|
|
|
int (*vlan_tag_p)(struct ifnet *, uint16_t *);
|
2018-08-16 23:46:38 +00:00
|
|
|
int (*vlan_pcp_p)(struct ifnet *, uint16_t *);
|
2011-03-21 09:40:01 +00:00
|
|
|
int (*vlan_setcookie_p)(struct ifnet *, void *);
|
|
|
|
void *(*vlan_cookie_p)(struct ifnet *);
|
2004-12-08 05:45:59 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-20 09:30:54 +00:00
|
|
|
* Handle a change in the interface link state. To avoid LORs
|
|
|
|
* between driver lock and upper layer locks, as well as possible
|
|
|
|
* recursions, we post event to taskqueue, and all job
|
|
|
|
* is done in static do_link_state_change().
|
2004-12-08 05:45:59 +00:00
|
|
|
*/
|
|
|
|
void
|
2015-12-17 14:41:30 +00:00
|
|
|
if_link_state_change(struct ifnet *ifp, int link_state)
|
2004-12-08 05:45:59 +00:00
|
|
|
{
|
2015-12-17 14:41:30 +00:00
|
|
|
/* Return if state hasn't changed. */
|
|
|
|
if (ifp->if_link_state == link_state)
|
2005-02-22 14:21:59 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
ifp->if_link_state = link_state;
|
|
|
|
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
/* XXXGL: reference ifp? */
|
2005-04-20 09:30:54 +00:00
|
|
|
taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
do_link_state_change(void *arg, int pending)
|
|
|
|
{
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
int link_state;
|
|
|
|
|
|
|
|
ifp = arg;
|
|
|
|
link_state = ifp->if_link_state;
|
2005-04-20 09:30:54 +00:00
|
|
|
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
CURVNET_SET(ifp->if_vnet);
|
2005-02-22 14:21:59 +00:00
|
|
|
rt_ifmsg(ifp);
|
Merge the //depot/user/yar/vlan branch into CVS. It contains some collective
work by yar, thompsa and myself. The checksum offloading part also involves
work done by Mihail Balikov.
The most important changes:
o Instead of global linked list of all vlan softc use a per-trunk
hash. The size of hash is dynamically adjusted, depending on
number of entries. This changes struct ifnet, replacing counter
of vlans with a pointer to trunk structure. This change is an
improvement for setups with big number of VLANs, several interfaces
and several CPUs. It is a small regression for a setup with a single
VLAN interface.
An alternative to dynamic hash is a per-trunk static array with
4096 entries, which is a compile time option - VLAN_ARRAY. In my
experiments the array is not an improvement, probably because such
a big trunk structure doesn't fit into CPU cache.
o Introduce an UMA zone for VLAN tags. Since drivers depend on it,
the zone is declared in kern_mbuf.c, not in optional vlan(4) driver.
This change is a big improvement for any setup utilizing vlan(4).
o Use rwlock(9) instead of mutex(9) for locking. We are the first
ones to do this! :)
o Some drivers can do hardware VLAN tagging + hardware checksum
offloading. Add an infrastructure for this. Whenever vlan(4) is
attached to a parent or parent configuration is changed, the flags
on vlan(4) interface are updated.
In collaboration with: yar, thompsa
In collaboration with: Mihail Balikov <mihail.balikov interbgc.com>
2006-01-30 13:45:15 +00:00
|
|
|
if (ifp->if_vlantrunk != NULL)
|
2009-12-31 20:29:58 +00:00
|
|
|
(*vlan_link_state_p)(ifp);
|
2005-02-22 14:21:59 +00:00
|
|
|
|
|
|
|
if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) &&
|
2014-11-07 15:14:10 +00:00
|
|
|
ifp->if_l2com != NULL)
|
2005-02-22 14:21:59 +00:00
|
|
|
(*ng_ether_link_state_p)(ifp, link_state);
|
|
|
|
if (ifp->if_carp)
|
2010-08-11 00:51:50 +00:00
|
|
|
(*carp_linkstate_p)(ifp);
|
2012-04-18 01:39:14 +00:00
|
|
|
if (ifp->if_bridge)
|
2018-05-11 05:00:40 +00:00
|
|
|
ifp->if_bridge_linkstate(ifp);
|
2012-04-18 01:39:14 +00:00
|
|
|
if (ifp->if_lagg)
|
2015-12-17 14:41:30 +00:00
|
|
|
(*lagg_linkstate_p)(ifp, link_state);
|
2005-06-05 03:13:13 +00:00
|
|
|
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
if (IS_DEFAULT_VNET(curvnet))
|
|
|
|
devctl_notify("IFNET", ifp->if_xname,
|
|
|
|
(link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN",
|
|
|
|
NULL);
|
2005-04-20 09:30:54 +00:00
|
|
|
if (pending > 1)
|
|
|
|
if_printf(ifp, "%d link states coalesced\n", pending);
|
2005-03-12 12:58:03 +00:00
|
|
|
if (log_link_state_change)
|
2018-05-11 00:19:49 +00:00
|
|
|
if_printf(ifp, "link state changed to %s\n",
|
2005-03-12 12:58:03 +00:00
|
|
|
(link_state == LINK_STATE_UP) ? "UP" : "DOWN" );
|
2016-10-12 01:52:29 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_link_event, ifp, link_state);
|
Step 1.5 of importing the network stack virtualization infrastructure
from the vimage project, as per plan established at devsummit 08/08:
http://wiki.freebsd.org/Image/Notes200808DevSummit
Introduce INIT_VNET_*() initializer macros, VNET_FOREACH() iterator
macros, and CURVNET_SET() context setting macros, all currently
resolving to NOPs.
Prepare for virtualization of selected SYSCTL objects by introducing a
family of SYSCTL_V_*() macros, currently resolving to their global
counterparts, i.e. SYSCTL_V_INT() == SYSCTL_INT().
Move selected #defines from sys/sys/vimage.h to newly introduced header
files specific to virtualized subsystems (sys/net/vnet.h,
sys/netinet/vinet.h etc.).
All the changes are verified to have zero functional impact at this
point in time by doing MD5 comparision between pre- and post-change
object files(*).
(*) netipsec/keysock.c did not validate depending on compile time options.
Implemented by: julian, bz, brooks, zec
Reviewed by: julian, bz, brooks, kris, rwatson, ...
Approved by: julian (mentor)
Obtained from: //depot/projects/vimage-commit2/...
X-MFC after: never
Sponsored by: NLnet Foundation, The FreeBSD Foundation
2008-10-02 15:37:58 +00:00
|
|
|
CURVNET_RESTORE();
|
2004-12-08 05:45:59 +00:00
|
|
|
}
|
|
|
|
|
1998-12-16 18:30:43 +00:00
|
|
|
/*
|
|
|
|
* Mark an interface down and notify protocols of
|
|
|
|
* the transition.
|
|
|
|
*/
|
|
|
|
void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_down(struct ifnet *ifp)
|
1998-12-16 18:30:43 +00:00
|
|
|
{
|
|
|
|
|
2017-01-24 09:19:46 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_DOWN);
|
1998-12-16 18:30:43 +00:00
|
|
|
if_unroute(ifp, IFF_UP, AF_UNSPEC);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark an interface up and notify protocols of
|
|
|
|
* the transition.
|
|
|
|
*/
|
|
|
|
void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_up(struct ifnet *ifp)
|
1998-12-16 18:30:43 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
if_route(ifp, IFF_UP, AF_UNSPEC);
|
2017-01-24 09:19:46 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_UP);
|
1998-12-16 18:30:43 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Flush an interface queue.
|
|
|
|
*/
|
2009-04-16 23:05:10 +00:00
|
|
|
void
|
2008-11-22 05:55:56 +00:00
|
|
|
if_qflush(struct ifnet *ifp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct mbuf *m, *n;
|
2008-11-22 05:55:56 +00:00
|
|
|
struct ifaltq *ifq;
|
|
|
|
|
|
|
|
ifq = &ifp->if_snd;
|
2004-09-01 19:56:47 +00:00
|
|
|
IFQ_LOCK(ifq);
|
2004-06-13 17:29:10 +00:00
|
|
|
#ifdef ALTQ
|
|
|
|
if (ALTQ_IS_ENABLED(ifq))
|
|
|
|
ALTQ_PURGE(ifq);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
n = ifq->ifq_head;
|
2016-04-15 17:30:33 +00:00
|
|
|
while ((m = n) != NULL) {
|
2014-07-17 05:21:16 +00:00
|
|
|
n = m->m_nextpkt;
|
1994-05-24 10:09:53 +00:00
|
|
|
m_freem(m);
|
|
|
|
}
|
|
|
|
ifq->ifq_head = 0;
|
|
|
|
ifq->ifq_tail = 0;
|
|
|
|
ifq->ifq_len = 0;
|
2004-09-01 19:56:47 +00:00
|
|
|
IFQ_UNLOCK(ifq);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-04-23 13:08:47 +00:00
|
|
|
* Map interface name to interface structure pointer, with or without
|
|
|
|
* returning a reference.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2009-04-23 13:08:47 +00:00
|
|
|
struct ifnet *
|
|
|
|
ifunit_ref(const char *name)
|
|
|
|
{
|
2019-01-09 01:11:19 +00:00
|
|
|
struct epoch_tracker et;
|
2009-04-23 13:08:47 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
|
2009-04-23 15:56:01 +00:00
|
|
|
if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 &&
|
|
|
|
!(ifp->if_flags & IFF_DYING))
|
2009-04-23 13:08:47 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (ifp != NULL)
|
|
|
|
if_ref(ifp);
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2009-04-23 13:08:47 +00:00
|
|
|
return (ifp);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ifnet *
|
2001-07-02 20:49:25 +00:00
|
|
|
ifunit(const char *name)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2019-01-09 01:11:19 +00:00
|
|
|
struct epoch_tracker et;
|
1999-12-13 15:57:11 +00:00
|
|
|
struct ifnet *ifp;
|
2001-10-17 18:58:14 +00:00
|
|
|
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
|
2004-02-04 02:54:25 +00:00
|
|
|
if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
}
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (ifp);
|
|
|
|
}
|
|
|
|
|
2018-03-27 18:26:50 +00:00
|
|
|
static void *
|
2018-04-06 23:25:54 +00:00
|
|
|
ifr_buffer_get_buffer(void *data)
|
2018-03-27 18:26:50 +00:00
|
|
|
{
|
|
|
|
union ifreq_union *ifrup;
|
|
|
|
|
|
|
|
ifrup = data;
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
2018-04-06 23:25:54 +00:00
|
|
|
if (SV_CURPROC_FLAG(SV_ILP32))
|
2018-03-27 18:26:50 +00:00
|
|
|
return ((void *)(uintptr_t)
|
|
|
|
ifrup->ifr32.ifr_ifru.ifru_buffer.buffer);
|
|
|
|
#endif
|
|
|
|
return (ifrup->ifr.ifr_ifru.ifru_buffer.buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-04-06 23:25:54 +00:00
|
|
|
ifr_buffer_set_buffer_null(void *data)
|
2018-03-27 18:26:50 +00:00
|
|
|
{
|
|
|
|
union ifreq_union *ifrup;
|
|
|
|
|
|
|
|
ifrup = data;
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
2018-04-06 23:25:54 +00:00
|
|
|
if (SV_CURPROC_FLAG(SV_ILP32))
|
2018-03-27 18:26:50 +00:00
|
|
|
ifrup->ifr32.ifr_ifru.ifru_buffer.buffer = 0;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
ifrup->ifr.ifr_ifru.ifru_buffer.buffer = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static size_t
|
2018-04-06 23:25:54 +00:00
|
|
|
ifr_buffer_get_length(void *data)
|
2018-03-27 18:26:50 +00:00
|
|
|
{
|
|
|
|
union ifreq_union *ifrup;
|
|
|
|
|
|
|
|
ifrup = data;
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
2018-04-06 23:25:54 +00:00
|
|
|
if (SV_CURPROC_FLAG(SV_ILP32))
|
2018-03-27 18:26:50 +00:00
|
|
|
return (ifrup->ifr32.ifr_ifru.ifru_buffer.length);
|
|
|
|
#endif
|
|
|
|
return (ifrup->ifr.ifr_ifru.ifru_buffer.length);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2018-04-06 23:25:54 +00:00
|
|
|
ifr_buffer_set_length(void *data, size_t len)
|
2018-03-27 18:26:50 +00:00
|
|
|
{
|
|
|
|
union ifreq_union *ifrup;
|
|
|
|
|
|
|
|
ifrup = data;
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
2018-04-06 23:25:54 +00:00
|
|
|
if (SV_CURPROC_FLAG(SV_ILP32))
|
2018-03-27 18:26:50 +00:00
|
|
|
ifrup->ifr32.ifr_ifru.ifru_buffer.length = len;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
ifrup->ifr.ifr_ifru.ifru_buffer.length = len;
|
|
|
|
}
|
|
|
|
|
2018-03-30 18:50:13 +00:00
|
|
|
void *
|
|
|
|
ifr_data_get_ptr(void *ifrp)
|
|
|
|
{
|
|
|
|
union ifreq_union *ifrup;
|
|
|
|
|
|
|
|
ifrup = ifrp;
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
|
|
|
if (SV_CURPROC_FLAG(SV_ILP32))
|
|
|
|
return ((void *)(uintptr_t)
|
|
|
|
ifrup->ifr32.ifr_ifru.ifru_data);
|
|
|
|
#endif
|
|
|
|
return (ifrup->ifr.ifr_ifru.ifru_data);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2001-09-29 05:55:04 +00:00
|
|
|
* Hardware specific interface ioctls.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2018-09-29 13:01:23 +00:00
|
|
|
int
|
2001-09-29 05:55:04 +00:00
|
|
|
ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-29 05:55:04 +00:00
|
|
|
struct ifreq *ifr;
|
2017-01-06 05:10:49 +00:00
|
|
|
int error = 0, do_ifup = 0;
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
int new_flags, temp_flags;
|
2004-02-04 02:54:25 +00:00
|
|
|
size_t namelen, onamelen;
|
2010-01-27 00:30:07 +00:00
|
|
|
size_t descrlen;
|
|
|
|
char *descrbuf, *odescrbuf;
|
2004-02-04 02:54:25 +00:00
|
|
|
char new_name[IFNAMSIZ];
|
|
|
|
struct ifaddr *ifa;
|
|
|
|
struct sockaddr_dl *sdl;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
ifr = (struct ifreq *)data;
|
|
|
|
switch (cmd) {
|
2001-10-17 19:40:44 +00:00
|
|
|
case SIOCGIFINDEX:
|
|
|
|
ifr->ifr_index = ifp->if_index;
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCGIFFLAGS:
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
temp_flags = ifp->if_flags | ifp->if_drv_flags;
|
|
|
|
ifr->ifr_flags = temp_flags & 0xffff;
|
|
|
|
ifr->ifr_flagshigh = temp_flags >> 16;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
2001-09-18 17:41:42 +00:00
|
|
|
case SIOCGIFCAP:
|
|
|
|
ifr->ifr_reqcap = ifp->if_capabilities;
|
|
|
|
ifr->ifr_curcap = ifp->if_capenable;
|
|
|
|
break;
|
|
|
|
|
2002-08-01 21:15:53 +00:00
|
|
|
#ifdef MAC
|
|
|
|
case SIOCGIFMAC:
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp);
|
2002-08-01 21:15:53 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCGIFMETRIC:
|
|
|
|
ifr->ifr_metric = ifp->if_metric;
|
|
|
|
break;
|
|
|
|
|
1994-08-08 10:49:26 +00:00
|
|
|
case SIOCGIFMTU:
|
|
|
|
ifr->ifr_mtu = ifp->if_mtu;
|
|
|
|
break;
|
|
|
|
|
1994-12-21 22:57:05 +00:00
|
|
|
case SIOCGIFPHYS:
|
2014-08-31 06:46:21 +00:00
|
|
|
/* XXXGL: did this ever worked? */
|
|
|
|
ifr->ifr_phys = 0;
|
1994-12-21 22:57:05 +00:00
|
|
|
break;
|
2010-01-27 00:30:07 +00:00
|
|
|
|
|
|
|
case SIOCGIFDESCR:
|
|
|
|
error = 0;
|
|
|
|
sx_slock(&ifdescr_sx);
|
2010-04-14 22:02:19 +00:00
|
|
|
if (ifp->if_description == NULL)
|
2010-01-27 00:30:07 +00:00
|
|
|
error = ENOMSG;
|
2010-04-14 22:02:19 +00:00
|
|
|
else {
|
2010-01-27 00:30:07 +00:00
|
|
|
/* space for terminating nul */
|
|
|
|
descrlen = strlen(ifp->if_description) + 1;
|
2018-04-06 23:25:54 +00:00
|
|
|
if (ifr_buffer_get_length(ifr) < descrlen)
|
|
|
|
ifr_buffer_set_buffer_null(ifr);
|
2010-01-27 00:30:07 +00:00
|
|
|
else
|
|
|
|
error = copyout(ifp->if_description,
|
2018-04-06 23:25:54 +00:00
|
|
|
ifr_buffer_get_buffer(ifr), descrlen);
|
|
|
|
ifr_buffer_set_length(ifr, descrlen);
|
2010-01-27 00:30:07 +00:00
|
|
|
}
|
|
|
|
sx_sunlock(&ifdescr_sx);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SIOCSIFDESCR:
|
|
|
|
error = priv_check(td, PRIV_NET_SETIFDESCR);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy only (length-1) bytes to make sure that
|
|
|
|
* if_description is always nul terminated. The
|
|
|
|
* length parameter is supposed to count the
|
|
|
|
* terminating nul in.
|
|
|
|
*/
|
2018-04-06 23:25:54 +00:00
|
|
|
if (ifr_buffer_get_length(ifr) > ifdescr_maxlen)
|
2010-01-27 00:30:07 +00:00
|
|
|
return (ENAMETOOLONG);
|
2018-04-06 23:25:54 +00:00
|
|
|
else if (ifr_buffer_get_length(ifr) == 0)
|
2010-01-27 00:30:07 +00:00
|
|
|
descrbuf = NULL;
|
|
|
|
else {
|
2018-04-06 23:25:54 +00:00
|
|
|
descrbuf = malloc(ifr_buffer_get_length(ifr),
|
2018-03-27 18:26:50 +00:00
|
|
|
M_IFDESCR, M_WAITOK | M_ZERO);
|
2018-04-06 23:25:54 +00:00
|
|
|
error = copyin(ifr_buffer_get_buffer(ifr), descrbuf,
|
|
|
|
ifr_buffer_get_length(ifr) - 1);
|
2010-01-27 00:30:07 +00:00
|
|
|
if (error) {
|
|
|
|
free(descrbuf, M_IFDESCR);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sx_xlock(&ifdescr_sx);
|
|
|
|
odescrbuf = ifp->if_description;
|
|
|
|
ifp->if_description = descrbuf;
|
|
|
|
sx_xunlock(&ifdescr_sx);
|
|
|
|
|
|
|
|
getmicrotime(&ifp->if_lastchange);
|
|
|
|
free(odescrbuf, M_IFDESCR);
|
|
|
|
break;
|
1994-12-21 22:57:05 +00:00
|
|
|
|
2011-07-03 12:22:02 +00:00
|
|
|
case SIOCGIFFIB:
|
|
|
|
ifr->ifr_fib = ifp->if_fib;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SIOCSIFFIB:
|
|
|
|
error = priv_check(td, PRIV_NET_SETIFFIB);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (ifr->ifr_fib >= rt_numfibs)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
ifp->if_fib = ifr->ifr_fib;
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCSIFFLAGS:
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_SETIFFLAGS);
|
1994-10-08 01:40:23 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
/*
|
|
|
|
* Currently, no driver owned flags pass the IFF_CANTCHANGE
|
|
|
|
* check, so we don't need special handling here yet.
|
|
|
|
*/
|
2002-08-18 07:05:00 +00:00
|
|
|
new_flags = (ifr->ifr_flags & 0xffff) |
|
|
|
|
(ifr->ifr_flagshigh << 16);
|
2013-11-05 12:52:56 +00:00
|
|
|
if (ifp->if_flags & IFF_UP &&
|
2002-08-18 07:05:00 +00:00
|
|
|
(new_flags & IFF_UP) == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if_down(ifp);
|
2002-08-18 07:05:00 +00:00
|
|
|
} else if (new_flags & IFF_UP &&
|
1999-06-06 09:17:51 +00:00
|
|
|
(ifp->if_flags & IFF_UP) == 0) {
|
2017-01-06 05:10:49 +00:00
|
|
|
do_ifup = 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2005-10-03 01:47:43 +00:00
|
|
|
/* See if permanently promiscuous mode bit is about to flip */
|
|
|
|
if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) {
|
|
|
|
if (new_flags & IFF_PPROMISC)
|
|
|
|
ifp->if_flags |= IFF_PROMISC;
|
|
|
|
else if (ifp->if_pcount == 0)
|
|
|
|
ifp->if_flags &= ~IFF_PROMISC;
|
2016-05-12 19:42:13 +00:00
|
|
|
if (log_promisc_mode_change)
|
2018-05-11 00:19:49 +00:00
|
|
|
if_printf(ifp, "permanently promiscuous mode %s\n",
|
2016-05-12 19:42:13 +00:00
|
|
|
((new_flags & IFF_PPROMISC) ?
|
|
|
|
"enabled" : "disabled"));
|
2005-10-03 01:47:43 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
|
2002-08-18 07:05:00 +00:00
|
|
|
(new_flags &~ IFF_CANTCHANGE);
|
2004-10-19 18:11:55 +00:00
|
|
|
if (ifp->if_ioctl) {
|
1994-05-24 10:09:53 +00:00
|
|
|
(void) (*ifp->if_ioctl)(ifp, cmd, data);
|
2004-10-19 18:11:55 +00:00
|
|
|
}
|
2017-01-06 05:10:49 +00:00
|
|
|
if (do_ifup)
|
|
|
|
if_up(ifp);
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
2001-09-18 17:41:42 +00:00
|
|
|
case SIOCSIFCAP:
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_SETIFCAP);
|
2001-09-18 17:41:42 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2004-02-21 12:48:25 +00:00
|
|
|
if (ifp->if_ioctl == NULL)
|
|
|
|
return (EOPNOTSUPP);
|
2001-09-18 17:41:42 +00:00
|
|
|
if (ifr->ifr_reqcap & ~ifp->if_capabilities)
|
|
|
|
return (EINVAL);
|
2004-02-21 12:48:25 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
|
|
|
if (error == 0)
|
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2001-09-18 17:41:42 +00:00
|
|
|
break;
|
|
|
|
|
2002-08-01 21:15:53 +00:00
|
|
|
#ifdef MAC
|
|
|
|
case SIOCSIFMAC:
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp);
|
2002-08-01 21:15:53 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
2004-02-04 02:54:25 +00:00
|
|
|
case SIOCSIFNAME:
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_SETIFNAME);
|
|
|
|
if (error)
|
2004-02-04 02:54:25 +00:00
|
|
|
return (error);
|
2018-03-30 18:50:13 +00:00
|
|
|
error = copyinstr(ifr_data_get_ptr(ifr), new_name, IFNAMSIZ,
|
|
|
|
NULL);
|
2004-03-13 02:35:03 +00:00
|
|
|
if (error != 0)
|
2004-02-04 02:54:25 +00:00
|
|
|
return (error);
|
2004-03-13 02:35:03 +00:00
|
|
|
if (new_name[0] == '\0')
|
|
|
|
return (EINVAL);
|
2016-05-15 21:37:36 +00:00
|
|
|
if (new_name[IFNAMSIZ-1] != '\0') {
|
|
|
|
new_name[IFNAMSIZ-1] = '\0';
|
|
|
|
if (strlen(new_name) == IFNAMSIZ-1)
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2019-09-12 15:36:48 +00:00
|
|
|
if (strcmp(new_name, ifp->if_xname) == 0)
|
|
|
|
break;
|
2004-02-04 02:54:25 +00:00
|
|
|
if (ifunit(new_name) != NULL)
|
|
|
|
return (EEXIST);
|
2009-12-29 13:35:18 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX: Locking. Nothing else seems to lock if_flags,
|
|
|
|
* and there are numerous other races with the
|
|
|
|
* ifunit() checks not being atomic with namespace
|
|
|
|
* changes (renames, vmoves, if_attach, etc).
|
|
|
|
*/
|
|
|
|
ifp->if_flags |= IFF_RENAMING;
|
2004-02-04 02:54:25 +00:00
|
|
|
|
|
|
|
/* Announce the departure of the interface. */
|
|
|
|
rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
|
2005-07-14 20:26:43 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
|
2004-02-04 02:54:25 +00:00
|
|
|
|
2018-05-11 00:19:49 +00:00
|
|
|
if_printf(ifp, "changing name to '%s'\n", new_name);
|
2004-09-18 05:02:08 +00:00
|
|
|
|
2013-10-15 10:41:22 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2004-02-04 02:54:25 +00:00
|
|
|
strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
|
2005-11-11 16:04:59 +00:00
|
|
|
ifa = ifp->if_addr;
|
2004-02-04 02:54:25 +00:00
|
|
|
sdl = (struct sockaddr_dl *)ifa->ifa_addr;
|
|
|
|
namelen = strlen(new_name);
|
|
|
|
onamelen = sdl->sdl_nlen;
|
|
|
|
/*
|
|
|
|
* Move the address if needed. This is safe because we
|
|
|
|
* allocate space for a name of length IFNAMSIZ when we
|
|
|
|
* create this in if_attach().
|
|
|
|
*/
|
|
|
|
if (namelen != onamelen) {
|
|
|
|
bcopy(sdl->sdl_data + onamelen,
|
|
|
|
sdl->sdl_data + namelen, sdl->sdl_alen);
|
|
|
|
}
|
|
|
|
bcopy(new_name, sdl->sdl_data, namelen);
|
|
|
|
sdl->sdl_nlen = namelen;
|
|
|
|
sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
|
|
|
|
bzero(sdl->sdl_data, onamelen);
|
|
|
|
while (namelen != 0)
|
|
|
|
sdl->sdl_data[--namelen] = 0xff;
|
2013-10-15 10:41:22 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2004-02-04 02:54:25 +00:00
|
|
|
|
2004-02-26 04:27:55 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
|
2004-02-04 02:54:25 +00:00
|
|
|
/* Announce the return of the interface. */
|
|
|
|
rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
|
2009-12-29 13:35:18 +00:00
|
|
|
|
|
|
|
ifp->if_flags &= ~IFF_RENAMING;
|
2004-02-04 02:54:25 +00:00
|
|
|
break;
|
|
|
|
|
2009-06-15 18:59:29 +00:00
|
|
|
#ifdef VIMAGE
|
|
|
|
case SIOCSIFVNET:
|
|
|
|
error = priv_check(td, PRIV_NET_SETIFVNET);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2009-07-26 11:29:26 +00:00
|
|
|
error = if_vmove_loan(td, ifp, ifr->ifr_name, ifr->ifr_jid);
|
2009-06-15 18:59:29 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCSIFMETRIC:
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_SETIFMETRIC);
|
1994-10-08 01:40:23 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
ifp->if_metric = ifr->ifr_metric;
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
1994-12-21 22:57:05 +00:00
|
|
|
case SIOCSIFPHYS:
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_SETIFPHYS);
|
1996-06-10 23:07:36 +00:00
|
|
|
if (error)
|
2004-02-21 12:56:09 +00:00
|
|
|
return (error);
|
|
|
|
if (ifp->if_ioctl == NULL)
|
|
|
|
return (EOPNOTSUPP);
|
1996-06-10 23:07:36 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
|
|
|
if (error == 0)
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2004-02-21 12:56:09 +00:00
|
|
|
break;
|
1994-12-21 22:57:05 +00:00
|
|
|
|
1994-08-08 10:49:26 +00:00
|
|
|
case SIOCSIFMTU:
|
1999-11-22 02:45:11 +00:00
|
|
|
{
|
|
|
|
u_long oldmtu = ifp->if_mtu;
|
|
|
|
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_SETIFMTU);
|
1994-10-08 01:40:23 +00:00
|
|
|
if (error)
|
1994-08-08 10:49:26 +00:00
|
|
|
return (error);
|
1999-08-06 13:53:03 +00:00
|
|
|
if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
|
1994-08-08 10:58:30 +00:00
|
|
|
return (EINVAL);
|
2001-09-29 05:55:04 +00:00
|
|
|
if (ifp->if_ioctl == NULL)
|
|
|
|
return (EOPNOTSUPP);
|
1996-06-10 23:07:36 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
2000-01-24 08:53:39 +00:00
|
|
|
if (error == 0) {
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2000-01-24 08:53:39 +00:00
|
|
|
rt_ifmsg(ifp);
|
2018-05-06 00:38:29 +00:00
|
|
|
#ifdef INET
|
|
|
|
NETDUMP_REINIT(ifp);
|
|
|
|
#endif
|
2000-01-24 08:53:39 +00:00
|
|
|
}
|
1999-11-22 02:45:11 +00:00
|
|
|
/*
|
|
|
|
* If the link MTU changed, do network layer specific procedure.
|
|
|
|
*/
|
|
|
|
if (ifp->if_mtu != oldmtu) {
|
|
|
|
#ifdef INET6
|
|
|
|
nd6_setmtu(ifp);
|
|
|
|
#endif
|
2014-11-17 01:05:29 +00:00
|
|
|
rt_updatemtu(ifp);
|
1999-11-22 02:45:11 +00:00
|
|
|
}
|
2001-09-29 05:55:04 +00:00
|
|
|
break;
|
1999-11-22 02:45:11 +00:00
|
|
|
}
|
1994-08-08 10:49:26 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
2006-11-06 13:42:10 +00:00
|
|
|
if (cmd == SIOCADDMULTI)
|
|
|
|
error = priv_check(td, PRIV_NET_ADDMULTI);
|
|
|
|
else
|
|
|
|
error = priv_check(td, PRIV_NET_DELMULTI);
|
1994-10-08 01:40:23 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-01-13 21:26:53 +00:00
|
|
|
|
|
|
|
/* Don't allow group membership on non-multicast interfaces. */
|
|
|
|
if ((ifp->if_flags & IFF_MULTICAST) == 0)
|
2001-09-29 05:55:04 +00:00
|
|
|
return (EOPNOTSUPP);
|
1997-01-13 21:26:53 +00:00
|
|
|
|
|
|
|
/* Don't let users screw up protocols' entries. */
|
|
|
|
if (ifr->ifr_addr.sa_family != AF_LINK)
|
2001-09-29 05:55:04 +00:00
|
|
|
return (EINVAL);
|
1997-01-13 21:26:53 +00:00
|
|
|
|
|
|
|
if (cmd == SIOCADDMULTI) {
|
2019-01-09 01:11:19 +00:00
|
|
|
struct epoch_tracker et;
|
1997-01-13 21:26:53 +00:00
|
|
|
struct ifmultiaddr *ifma;
|
2007-03-20 00:36:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Userland is only permitted to join groups once
|
|
|
|
* via the if_addmulti() KPI, because it cannot hold
|
|
|
|
* struct ifmultiaddr * between calls. It may also
|
|
|
|
* lose a race while we check if the membership
|
|
|
|
* already exists.
|
|
|
|
*/
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2007-03-20 00:36:10 +00:00
|
|
|
ifma = if_findmulti(ifp, &ifr->ifr_addr);
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2007-03-20 00:36:10 +00:00
|
|
|
if (ifma != NULL)
|
|
|
|
error = EADDRINUSE;
|
|
|
|
else
|
|
|
|
error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
|
1997-01-13 21:26:53 +00:00
|
|
|
} else {
|
|
|
|
error = if_delmulti(ifp, &ifr->ifr_addr);
|
|
|
|
}
|
|
|
|
if (error == 0)
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2001-09-29 05:55:04 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2000-10-04 23:16:29 +00:00
|
|
|
case SIOCSIFPHYADDR:
|
|
|
|
case SIOCDIFPHYADDR:
|
|
|
|
#ifdef INET6
|
|
|
|
case SIOCSIFPHYADDR_IN6:
|
|
|
|
#endif
|
2003-10-23 13:49:10 +00:00
|
|
|
case SIOCSIFMEDIA:
|
1997-10-07 07:40:35 +00:00
|
|
|
case SIOCSIFGENERIC:
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_HWIOCTL);
|
1997-05-03 21:07:13 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2001-09-29 05:55:04 +00:00
|
|
|
if (ifp->if_ioctl == NULL)
|
1997-05-03 21:07:13 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
|
|
|
if (error == 0)
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2001-09-29 05:55:04 +00:00
|
|
|
break;
|
1997-05-03 21:07:13 +00:00
|
|
|
|
1999-06-19 18:42:31 +00:00
|
|
|
case SIOCGIFSTATUS:
|
2001-06-11 12:39:29 +00:00
|
|
|
case SIOCGIFPSRCADDR:
|
|
|
|
case SIOCGIFPDSTADDR:
|
1997-05-03 21:07:13 +00:00
|
|
|
case SIOCGIFMEDIA:
|
2015-04-07 21:31:17 +00:00
|
|
|
case SIOCGIFXMEDIA:
|
1997-10-07 07:40:35 +00:00
|
|
|
case SIOCGIFGENERIC:
|
2017-09-05 05:28:52 +00:00
|
|
|
case SIOCGIFRSSKEY:
|
|
|
|
case SIOCGIFRSSHASH:
|
2019-09-17 18:49:13 +00:00
|
|
|
case SIOCGIFDOWNREASON:
|
2004-02-21 12:56:09 +00:00
|
|
|
if (ifp->if_ioctl == NULL)
|
1997-05-03 21:07:13 +00:00
|
|
|
return (EOPNOTSUPP);
|
2001-09-29 05:55:04 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
|
|
|
break;
|
1997-05-03 21:07:13 +00:00
|
|
|
|
2000-06-16 20:14:43 +00:00
|
|
|
case SIOCSIFLLADDR:
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_SETLLADDR);
|
2000-06-16 20:14:43 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2001-09-29 05:55:04 +00:00
|
|
|
error = if_setlladdr(ifp,
|
2000-08-15 00:48:38 +00:00
|
|
|
ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
|
2001-09-29 05:55:04 +00:00
|
|
|
break;
|
2000-08-15 00:48:38 +00:00
|
|
|
|
2017-05-10 22:13:47 +00:00
|
|
|
case SIOCGHWADDR:
|
|
|
|
error = if_gethwaddr(ifp, ifr);
|
|
|
|
break;
|
|
|
|
|
2019-01-22 17:39:26 +00:00
|
|
|
case CASE_IOC_IFGROUPREQ(SIOCAIFGROUP):
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_ADDIFGROUP);
|
2006-06-19 22:20:45 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2018-04-05 22:14:55 +00:00
|
|
|
if ((error = if_addgroup(ifp,
|
|
|
|
ifgr_group_get((struct ifgroupreq *)data))))
|
2006-06-19 22:20:45 +00:00
|
|
|
return (error);
|
|
|
|
break;
|
|
|
|
|
2019-01-22 17:39:26 +00:00
|
|
|
case CASE_IOC_IFGROUPREQ(SIOCGIFGROUP):
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
{
|
|
|
|
struct epoch_tracker et;
|
|
|
|
|
|
|
|
NET_EPOCH_ENTER(et);
|
|
|
|
error = if_getgroup((struct ifgroupreq *)data, ifp);
|
|
|
|
NET_EPOCH_EXIT(et);
|
2006-06-19 22:20:45 +00:00
|
|
|
break;
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
}
|
2006-06-19 22:20:45 +00:00
|
|
|
|
2019-01-22 17:39:26 +00:00
|
|
|
case CASE_IOC_IFGROUPREQ(SIOCDIFGROUP):
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_DELIFGROUP);
|
2006-06-19 22:20:45 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2018-04-05 22:14:55 +00:00
|
|
|
if ((error = if_delgroup(ifp,
|
|
|
|
ifgr_group_get((struct ifgroupreq *)data))))
|
2006-06-19 22:20:45 +00:00
|
|
|
return (error);
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
default:
|
2001-09-29 05:55:04 +00:00
|
|
|
error = ENOIOCTL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2010-10-21 16:20:48 +00:00
|
|
|
#ifdef COMPAT_FREEBSD32
|
|
|
|
struct ifconf32 {
|
|
|
|
int32_t ifc_len;
|
|
|
|
union {
|
|
|
|
uint32_t ifcu_buf;
|
|
|
|
uint32_t ifcu_req;
|
|
|
|
} ifc_ifcu;
|
|
|
|
};
|
|
|
|
#define SIOCGIFCONF32 _IOWR('i', 36, struct ifconf32)
|
|
|
|
#endif
|
|
|
|
|
2018-04-25 15:30:42 +00:00
|
|
|
#ifdef COMPAT_FREEBSD32
|
|
|
|
static void
|
|
|
|
ifmr_init(struct ifmediareq *ifmr, caddr_t data)
|
|
|
|
{
|
|
|
|
struct ifmediareq32 *ifmr32;
|
|
|
|
|
|
|
|
ifmr32 = (struct ifmediareq32 *)data;
|
|
|
|
memcpy(ifmr->ifm_name, ifmr32->ifm_name,
|
|
|
|
sizeof(ifmr->ifm_name));
|
|
|
|
ifmr->ifm_current = ifmr32->ifm_current;
|
|
|
|
ifmr->ifm_mask = ifmr32->ifm_mask;
|
|
|
|
ifmr->ifm_status = ifmr32->ifm_status;
|
|
|
|
ifmr->ifm_active = ifmr32->ifm_active;
|
|
|
|
ifmr->ifm_count = ifmr32->ifm_count;
|
|
|
|
ifmr->ifm_ulist = (int *)(uintptr_t)ifmr32->ifm_ulist;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ifmr_update(const struct ifmediareq *ifmr, caddr_t data)
|
|
|
|
{
|
|
|
|
struct ifmediareq32 *ifmr32;
|
|
|
|
|
|
|
|
ifmr32 = (struct ifmediareq32 *)data;
|
|
|
|
ifmr32->ifm_current = ifmr->ifm_current;
|
|
|
|
ifmr32->ifm_mask = ifmr->ifm_mask;
|
|
|
|
ifmr32->ifm_status = ifmr->ifm_status;
|
|
|
|
ifmr32->ifm_active = ifmr->ifm_active;
|
|
|
|
ifmr32->ifm_count = ifmr->ifm_count;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2001-09-29 05:55:04 +00:00
|
|
|
/*
|
|
|
|
* Interface ioctls.
|
|
|
|
*/
|
|
|
|
int
|
2003-10-23 13:49:10 +00:00
|
|
|
ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td)
|
2001-09-29 05:55:04 +00:00
|
|
|
{
|
2018-04-25 15:30:42 +00:00
|
|
|
#ifdef COMPAT_FREEBSD32
|
2018-05-19 19:00:04 +00:00
|
|
|
caddr_t saved_data = NULL;
|
2018-04-25 15:30:42 +00:00
|
|
|
struct ifmediareq ifmr;
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
struct ifmediareq *ifmrp = NULL;
|
2018-07-07 13:35:06 +00:00
|
|
|
#endif
|
2001-09-29 05:55:04 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifreq *ifr;
|
|
|
|
int error;
|
2002-08-18 07:05:00 +00:00
|
|
|
int oif_flags;
|
2001-09-29 05:55:04 +00:00
|
|
|
|
2011-02-16 21:29:13 +00:00
|
|
|
CURVNET_SET(so->so_vnet);
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
#ifdef VIMAGE
|
|
|
|
/* Make sure the VNET is stable. */
|
2019-10-07 14:15:41 +00:00
|
|
|
if (so->so_vnet->vnet_shutdown) {
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
CURVNET_RESTORE();
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2001-09-29 05:55:04 +00:00
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGIFCONF:
|
2011-02-16 21:29:13 +00:00
|
|
|
error = ifconf(cmd, data);
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
goto out_noref;
|
2010-10-21 16:20:48 +00:00
|
|
|
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
2006-02-02 19:58:37 +00:00
|
|
|
case SIOCGIFCONF32:
|
2010-10-21 16:20:48 +00:00
|
|
|
{
|
|
|
|
struct ifconf32 *ifc32;
|
|
|
|
struct ifconf ifc;
|
|
|
|
|
|
|
|
ifc32 = (struct ifconf32 *)data;
|
|
|
|
ifc.ifc_len = ifc32->ifc_len;
|
|
|
|
ifc.ifc_buf = PTRIN(ifc32->ifc_buf);
|
|
|
|
|
2011-02-16 21:29:13 +00:00
|
|
|
error = ifconf(SIOCGIFCONF, (void *)&ifc);
|
2011-06-28 08:41:44 +00:00
|
|
|
if (error == 0)
|
|
|
|
ifc32->ifc_len = ifc.ifc_len;
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
goto out_noref;
|
2010-10-21 16:20:48 +00:00
|
|
|
}
|
2006-02-02 19:58:37 +00:00
|
|
|
#endif
|
2001-09-29 05:55:04 +00:00
|
|
|
}
|
|
|
|
|
2018-04-25 15:30:42 +00:00
|
|
|
#ifdef COMPAT_FREEBSD32
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGIFMEDIA32:
|
|
|
|
case SIOCGIFXMEDIA32:
|
|
|
|
ifmrp = &ifmr;
|
|
|
|
ifmr_init(ifmrp, data);
|
|
|
|
cmd = _IOC_NEWTYPE(cmd, struct ifmediareq);
|
|
|
|
saved_data = data;
|
|
|
|
data = (caddr_t)ifmrp;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ifr = (struct ifreq *)data;
|
2001-09-29 05:55:04 +00:00
|
|
|
switch (cmd) {
|
2009-05-31 12:10:04 +00:00
|
|
|
#ifdef VIMAGE
|
2009-06-15 18:59:29 +00:00
|
|
|
case SIOCSIFRVNET:
|
|
|
|
error = priv_check(td, PRIV_NET_SETIFVNET);
|
2011-02-16 21:29:13 +00:00
|
|
|
if (error == 0)
|
|
|
|
error = if_vmove_reclaim(td, ifr->ifr_name,
|
|
|
|
ifr->ifr_jid);
|
2018-04-25 15:30:42 +00:00
|
|
|
goto out_noref;
|
2009-05-31 12:10:04 +00:00
|
|
|
#endif
|
2001-09-29 05:55:04 +00:00
|
|
|
case SIOCIFCREATE:
|
2006-07-09 06:04:01 +00:00
|
|
|
case SIOCIFCREATE2:
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_IFCREATE);
|
2011-02-16 21:29:13 +00:00
|
|
|
if (error == 0)
|
|
|
|
error = if_clone_create(ifr->ifr_name,
|
2018-03-30 18:50:13 +00:00
|
|
|
sizeof(ifr->ifr_name), cmd == SIOCIFCREATE2 ?
|
|
|
|
ifr_data_get_ptr(ifr) : NULL);
|
2018-04-25 15:30:42 +00:00
|
|
|
goto out_noref;
|
2001-09-29 05:55:04 +00:00
|
|
|
case SIOCIFDESTROY:
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(td, PRIV_NET_IFDESTROY);
|
2011-02-16 21:29:13 +00:00
|
|
|
if (error == 0)
|
|
|
|
error = if_clone_destroy(ifr->ifr_name);
|
2018-04-25 15:30:42 +00:00
|
|
|
goto out_noref;
|
2003-10-23 13:49:10 +00:00
|
|
|
|
2001-09-29 05:55:04 +00:00
|
|
|
case SIOCIFGCLONERS:
|
2011-02-16 21:29:13 +00:00
|
|
|
error = if_clone_list((struct if_clonereq *)data);
|
2018-04-25 15:30:42 +00:00
|
|
|
goto out_noref;
|
|
|
|
|
2019-01-22 17:39:26 +00:00
|
|
|
case CASE_IOC_IFGROUPREQ(SIOCGIFGMEMB):
|
2011-02-16 21:29:13 +00:00
|
|
|
error = if_getgroupmembers((struct ifgroupreq *)data);
|
2018-04-25 15:30:42 +00:00
|
|
|
goto out_noref;
|
|
|
|
|
A major overhaul of the CARP implementation. The ip_carp.c was started
from scratch, copying needed functionality from the old implemenation
on demand, with a thorough review of all code. The main change is that
interface layer has been removed from the CARP. Now redundant addresses
are configured exactly on the interfaces, they run on.
The CARP configuration itself is, as before, configured and read via
SIOCSVH/SIOCGVH ioctls. A new prefix created with SIOCAIFADDR or
SIOCAIFADDR_IN6 may now be configured to a particular virtual host id,
which makes the prefix redundant.
ifconfig(8) semantics has been changed too: now one doesn't need
to clone carpXX interface, he/she should directly configure a vhid
on a Ethernet interface.
To supply vhid data from the kernel to an application the getifaddrs(8)
function had been changed to pass ifam_data with each address. [1]
The new implementation definitely closes all PRs related to carp(4)
being an interface, and may close several others. It also allows
to run a single redundant IP per interface.
Big thanks to Bjoern Zeeb for his help with inet6 part of patch, for
idea on using ifam_data and for several rounds of reviewing!
PR: kern/117000, kern/126945, kern/126714, kern/120130, kern/117448
Reviewed by: bz
Submitted by: bz [1]
2011-12-16 12:16:56 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
|
|
|
case SIOCSVH:
|
|
|
|
case SIOCGVH:
|
|
|
|
if (carp_ioctl_p == NULL)
|
|
|
|
error = EPROTONOSUPPORT;
|
|
|
|
else
|
|
|
|
error = (*carp_ioctl_p)(ifr, cmd, td);
|
2018-04-25 15:30:42 +00:00
|
|
|
goto out_noref;
|
A major overhaul of the CARP implementation. The ip_carp.c was started
from scratch, copying needed functionality from the old implemenation
on demand, with a thorough review of all code. The main change is that
interface layer has been removed from the CARP. Now redundant addresses
are configured exactly on the interfaces, they run on.
The CARP configuration itself is, as before, configured and read via
SIOCSVH/SIOCGVH ioctls. A new prefix created with SIOCAIFADDR or
SIOCAIFADDR_IN6 may now be configured to a particular virtual host id,
which makes the prefix redundant.
ifconfig(8) semantics has been changed too: now one doesn't need
to clone carpXX interface, he/she should directly configure a vhid
on a Ethernet interface.
To supply vhid data from the kernel to an application the getifaddrs(8)
function had been changed to pass ifam_data with each address. [1]
The new implementation definitely closes all PRs related to carp(4)
being an interface, and may close several others. It also allows
to run a single redundant IP per interface.
Big thanks to Bjoern Zeeb for his help with inet6 part of patch, for
idea on using ifam_data and for several rounds of reviewing!
PR: kern/117000, kern/126945, kern/126714, kern/120130, kern/117448
Reviewed by: bz
Submitted by: bz [1]
2011-12-16 12:16:56 +00:00
|
|
|
#endif
|
2001-09-29 05:55:04 +00:00
|
|
|
}
|
|
|
|
|
2009-04-23 13:08:47 +00:00
|
|
|
ifp = ifunit_ref(ifr->ifr_name);
|
2011-02-16 21:29:13 +00:00
|
|
|
if (ifp == NULL) {
|
2018-04-25 15:30:42 +00:00
|
|
|
error = ENXIO;
|
|
|
|
goto out_noref;
|
2011-02-16 21:29:13 +00:00
|
|
|
}
|
2001-09-29 05:55:04 +00:00
|
|
|
|
|
|
|
error = ifhwioctl(cmd, ifp, data, td);
|
2018-04-25 15:30:42 +00:00
|
|
|
if (error != ENOIOCTL)
|
|
|
|
goto out_ref;
|
2001-09-29 05:55:04 +00:00
|
|
|
|
|
|
|
oif_flags = ifp->if_flags;
|
2009-04-23 13:08:47 +00:00
|
|
|
if (so->so_proto == NULL) {
|
2018-04-25 15:30:42 +00:00
|
|
|
error = EOPNOTSUPP;
|
|
|
|
goto out_ref;
|
2009-04-23 13:08:47 +00:00
|
|
|
}
|
2013-09-10 10:05:59 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pass the request on to the socket control method, and if the
|
|
|
|
* latter returns EOPNOTSUPP, directly to the interface.
|
|
|
|
*
|
|
|
|
* Make an exception for the legacy SIOCSIF* requests. Drivers
|
|
|
|
* trust SIOCSIFADDR et al to come from an already privileged
|
|
|
|
* layer, and do not perform any credentials checks or input
|
|
|
|
* validation.
|
|
|
|
*/
|
2013-11-05 10:29:47 +00:00
|
|
|
error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, data,
|
|
|
|
ifp, td));
|
2013-09-10 10:05:59 +00:00
|
|
|
if (error == EOPNOTSUPP && ifp != NULL && ifp->if_ioctl != NULL &&
|
|
|
|
cmd != SIOCSIFADDR && cmd != SIOCSIFBRDADDR &&
|
|
|
|
cmd != SIOCSIFDSTADDR && cmd != SIOCSIFNETMASK)
|
2009-03-20 13:41:23 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
1999-11-22 02:45:11 +00:00
|
|
|
|
2001-09-29 05:55:04 +00:00
|
|
|
if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
|
1999-11-22 02:45:11 +00:00
|
|
|
#ifdef INET6
|
2012-10-18 13:57:24 +00:00
|
|
|
if (ifp->if_flags & IFF_UP)
|
2001-09-29 05:55:04 +00:00
|
|
|
in6_if_up(ifp);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2018-04-25 15:30:42 +00:00
|
|
|
|
|
|
|
out_ref:
|
2009-04-23 13:08:47 +00:00
|
|
|
if_rele(ifp);
|
2018-04-25 15:30:42 +00:00
|
|
|
out_noref:
|
|
|
|
#ifdef COMPAT_FREEBSD32
|
|
|
|
if (ifmrp != NULL) {
|
|
|
|
KASSERT((cmd == SIOCGIFMEDIA || cmd == SIOCGIFXMEDIA),
|
|
|
|
("ifmrp non-NULL, but cmd is not an ifmedia req 0x%lx",
|
|
|
|
cmd));
|
|
|
|
data = saved_data;
|
|
|
|
ifmr_update(ifmrp, data);
|
|
|
|
}
|
|
|
|
#endif
|
2011-02-16 21:29:13 +00:00
|
|
|
CURVNET_RESTORE();
|
2001-09-29 05:55:04 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-09-22 17:57:48 +00:00
|
|
|
/*
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
* The code common to handling reference counted flags,
|
2005-07-14 13:56:51 +00:00
|
|
|
* e.g., in ifpromisc() and if_allmulti().
|
2005-10-03 02:14:51 +00:00
|
|
|
* The "pflag" argument can specify a permanent mode flag to check,
|
2005-07-14 13:56:51 +00:00
|
|
|
* such as IFF_PPROMISC for promiscuous mode; should be 0 if none.
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
*
|
|
|
|
* Only to be used on stack-owned flags, not driver-owned flags.
|
1995-09-22 17:57:48 +00:00
|
|
|
*/
|
2005-07-14 13:56:51 +00:00
|
|
|
static int
|
|
|
|
if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch)
|
1995-09-22 17:57:48 +00:00
|
|
|
{
|
|
|
|
struct ifreq ifr;
|
1997-02-14 15:30:54 +00:00
|
|
|
int error;
|
2005-07-14 13:56:51 +00:00
|
|
|
int oldflags, oldcount;
|
1995-09-22 17:57:48 +00:00
|
|
|
|
2005-10-03 02:14:51 +00:00
|
|
|
/* Sanity checks to catch programming errors */
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0,
|
2005-10-03 02:14:51 +00:00
|
|
|
("%s: setting driver-owned flag %d", __func__, flag));
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
|
2005-10-03 02:14:51 +00:00
|
|
|
if (onswitch)
|
|
|
|
KASSERT(*refcount >= 0,
|
|
|
|
("%s: increment negative refcount %d for flag %d",
|
|
|
|
__func__, *refcount, flag));
|
|
|
|
else
|
|
|
|
KASSERT(*refcount > 0,
|
|
|
|
("%s: decrement non-positive refcount %d for flag %d",
|
|
|
|
__func__, *refcount, flag));
|
2005-07-14 13:56:51 +00:00
|
|
|
|
|
|
|
/* In case this mode is permanent, just touch refcount */
|
|
|
|
if (ifp->if_flags & pflag) {
|
|
|
|
*refcount += onswitch ? 1 : -1;
|
2002-08-19 15:16:38 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2005-07-14 13:56:51 +00:00
|
|
|
|
|
|
|
/* Save ifnet parameters for if_ioctl() may fail */
|
|
|
|
oldcount = *refcount;
|
|
|
|
oldflags = ifp->if_flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if we aren't the only and touching refcount is enough.
|
|
|
|
* Actually toggle interface flag if we are the first or last.
|
|
|
|
*/
|
|
|
|
if (onswitch) {
|
|
|
|
if ((*refcount)++)
|
1995-09-22 17:57:48 +00:00
|
|
|
return (0);
|
2005-07-14 13:56:51 +00:00
|
|
|
ifp->if_flags |= flag;
|
1995-09-22 17:57:48 +00:00
|
|
|
} else {
|
2005-07-14 13:56:51 +00:00
|
|
|
if (--(*refcount))
|
1995-09-22 17:57:48 +00:00
|
|
|
return (0);
|
2005-07-14 13:56:51 +00:00
|
|
|
ifp->if_flags &= ~flag;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Call down the driver since we've changed interface flags */
|
|
|
|
if (ifp->if_ioctl == NULL) {
|
|
|
|
error = EOPNOTSUPP;
|
|
|
|
goto recover;
|
1995-09-22 17:57:48 +00:00
|
|
|
}
|
2002-08-18 07:05:00 +00:00
|
|
|
ifr.ifr_flags = ifp->if_flags & 0xffff;
|
|
|
|
ifr.ifr_flagshigh = ifp->if_flags >> 16;
|
1997-02-14 15:30:54 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
|
2005-07-14 13:56:51 +00:00
|
|
|
if (error)
|
|
|
|
goto recover;
|
|
|
|
/* Notify userland that interface flags have changed */
|
|
|
|
rt_ifmsg(ifp);
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
recover:
|
|
|
|
/* Recover after driver error */
|
|
|
|
*refcount = oldcount;
|
|
|
|
ifp->if_flags = oldflags;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set/clear promiscuous mode on interface ifp based on the truth value
|
|
|
|
* of pswitch. The calls are reference counted so that only the first
|
|
|
|
* "on" request actually has an effect, as does the final "off" request.
|
|
|
|
* Results are undefined if the "off" and "on" requests are not matched.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ifpromisc(struct ifnet *ifp, int pswitch)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int oldflags = ifp->if_flags;
|
|
|
|
|
|
|
|
error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC,
|
|
|
|
&ifp->if_pcount, pswitch);
|
|
|
|
/* If promiscuous mode status has changed, log a message */
|
2016-05-12 19:42:13 +00:00
|
|
|
if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC) &&
|
|
|
|
log_promisc_mode_change)
|
2018-05-11 00:19:49 +00:00
|
|
|
if_printf(ifp, "promiscuous mode %s\n",
|
2001-04-27 22:20:22 +00:00
|
|
|
(ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled");
|
2005-07-14 13:56:51 +00:00
|
|
|
return (error);
|
1995-09-22 17:57:48 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Return interface configuration
|
|
|
|
* of system. List may be used
|
|
|
|
* in later ioctl's (above) to get
|
|
|
|
* other information.
|
|
|
|
*/
|
|
|
|
/*ARGSUSED*/
|
1995-12-09 20:47:15 +00:00
|
|
|
static int
|
2003-10-23 13:49:10 +00:00
|
|
|
ifconf(u_long cmd, caddr_t data)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-06 00:44:45 +00:00
|
|
|
struct ifconf *ifc = (struct ifconf *)data;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa;
|
2004-09-22 08:59:41 +00:00
|
|
|
struct ifreq ifr;
|
|
|
|
struct sbuf *sb;
|
|
|
|
int error, full = 0, valid_len, max_len;
|
|
|
|
|
|
|
|
/* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */
|
|
|
|
max_len = MAXPHYS - 1;
|
|
|
|
|
2005-02-12 17:51:12 +00:00
|
|
|
/* Prevent hostile input from being able to crash the system */
|
|
|
|
if (ifc->ifc_len <= 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2004-09-22 08:59:41 +00:00
|
|
|
again:
|
|
|
|
if (ifc->ifc_len <= max_len) {
|
|
|
|
max_len = ifc->ifc_len;
|
|
|
|
full = 1;
|
|
|
|
}
|
|
|
|
sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN);
|
|
|
|
max_len = 0;
|
|
|
|
valid_len = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2009-08-23 20:40:19 +00:00
|
|
|
IFNET_RLOCK();
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
|
2019-01-09 01:11:19 +00:00
|
|
|
struct epoch_tracker et;
|
2003-10-31 18:32:15 +00:00
|
|
|
int addrs;
|
1995-05-30 08:16:23 +00:00
|
|
|
|
2005-04-15 01:52:40 +00:00
|
|
|
/*
|
2018-04-05 21:58:28 +00:00
|
|
|
* Zero the ifr to make sure we don't disclose the contents
|
|
|
|
* of the stack.
|
2005-04-15 01:52:40 +00:00
|
|
|
*/
|
2018-04-05 21:58:28 +00:00
|
|
|
memset(&ifr, 0, sizeof(ifr));
|
2005-04-15 01:52:40 +00:00
|
|
|
|
2003-10-31 18:32:15 +00:00
|
|
|
if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
|
2005-09-04 17:32:47 +00:00
|
|
|
>= sizeof(ifr.ifr_name)) {
|
|
|
|
sbuf_delete(sb);
|
|
|
|
IFNET_RUNLOCK();
|
2004-09-22 08:59:41 +00:00
|
|
|
return (ENAMETOOLONG);
|
2005-09-04 17:32:47 +00:00
|
|
|
}
|
1994-10-05 20:11:28 +00:00
|
|
|
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
addrs = 0;
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
2001-09-07 05:32:54 +00:00
|
|
|
struct sockaddr *sa = ifa->ifa_addr;
|
|
|
|
|
2009-02-05 14:06:09 +00:00
|
|
|
if (prison_if(curthread->td_ucred, sa) != 0)
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
continue;
|
|
|
|
addrs++;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (sa->sa_len <= sizeof(*sa)) {
|
2018-04-06 20:26:56 +00:00
|
|
|
if (sa->sa_len < sizeof(*sa)) {
|
|
|
|
memset(&ifr.ifr_ifru.ifru_addr, 0,
|
|
|
|
sizeof(ifr.ifr_ifru.ifru_addr));
|
|
|
|
memcpy(&ifr.ifr_ifru.ifru_addr, sa,
|
|
|
|
sa->sa_len);
|
|
|
|
} else
|
|
|
|
ifr.ifr_ifru.ifru_addr = *sa;
|
2004-09-22 08:59:41 +00:00
|
|
|
sbuf_bcat(sb, &ifr, sizeof(ifr));
|
|
|
|
max_len += sizeof(ifr);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
2004-09-22 08:59:41 +00:00
|
|
|
sbuf_bcat(sb, &ifr,
|
|
|
|
offsetof(struct ifreq, ifr_addr));
|
|
|
|
max_len += offsetof(struct ifreq, ifr_addr);
|
|
|
|
sbuf_bcat(sb, sa, sa->sa_len);
|
|
|
|
max_len += sa->sa_len;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-09-22 08:59:41 +00:00
|
|
|
|
2010-09-10 16:42:16 +00:00
|
|
|
if (sbuf_error(sb) == 0)
|
2004-09-22 08:59:41 +00:00
|
|
|
valid_len = sbuf_len(sb);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2019-01-09 01:11:19 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2004-09-22 08:59:41 +00:00
|
|
|
if (addrs == 0) {
|
|
|
|
sbuf_bcat(sb, &ifr, sizeof(ifr));
|
|
|
|
max_len += sizeof(ifr);
|
|
|
|
|
2010-09-10 16:42:16 +00:00
|
|
|
if (sbuf_error(sb) == 0)
|
2004-09-22 08:59:41 +00:00
|
|
|
valid_len = sbuf_len(sb);
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RUNLOCK();
|
2004-09-22 08:59:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we didn't allocate enough space (uncommon), try again. If
|
|
|
|
* we have already allocated as much space as we are allowed,
|
|
|
|
* return what we've got.
|
|
|
|
*/
|
|
|
|
if (valid_len != max_len && !full) {
|
|
|
|
sbuf_delete(sb);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
ifc->ifc_len = valid_len;
|
2004-09-22 12:53:27 +00:00
|
|
|
sbuf_finish(sb);
|
2004-09-22 08:59:41 +00:00
|
|
|
error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len);
|
|
|
|
sbuf_delete(sb);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-01-07 19:15:32 +00:00
|
|
|
/*
|
2005-02-22 15:29:29 +00:00
|
|
|
* Just like ifpromisc(), but for all-multicast-reception mode.
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
|
|
|
int
|
2003-10-23 13:49:10 +00:00
|
|
|
if_allmulti(struct ifnet *ifp, int onswitch)
|
1997-01-07 19:15:32 +00:00
|
|
|
{
|
1997-02-14 15:30:54 +00:00
|
|
|
|
2005-07-14 13:56:51 +00:00
|
|
|
return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch));
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
|
|
|
|
2007-03-20 03:15:43 +00:00
|
|
|
struct ifmultiaddr *
|
2015-09-05 05:33:20 +00:00
|
|
|
if_findmulti(struct ifnet *ifp, const struct sockaddr *sa)
|
2005-08-02 23:23:26 +00:00
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
|
|
|
|
IF_ADDR_LOCK_ASSERT(ifp);
|
|
|
|
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
|
2007-02-22 00:14:02 +00:00
|
|
|
if (sa->sa_family == AF_LINK) {
|
|
|
|
if (sa_dl_equal(ifma->ifma_addr, sa))
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
if (sa_equal(ifma->ifma_addr, sa))
|
|
|
|
break;
|
|
|
|
}
|
2005-08-02 23:23:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ifma;
|
|
|
|
}
|
|
|
|
|
1997-01-07 19:15:32 +00:00
|
|
|
/*
|
2005-08-02 23:23:26 +00:00
|
|
|
* Allocate a new ifmultiaddr and initialize based on passed arguments. We
|
|
|
|
* make copies of passed sockaddrs. The ifmultiaddr will not be added to
|
|
|
|
* the ifnet multicast address list here, so the caller must do that and
|
|
|
|
* other setup work (such as notifying the device driver). The reference
|
|
|
|
* count is initialized to 1.
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
2005-08-02 23:23:26 +00:00
|
|
|
static struct ifmultiaddr *
|
|
|
|
if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa,
|
|
|
|
int mflags)
|
1997-01-07 19:15:32 +00:00
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
2005-08-02 23:23:26 +00:00
|
|
|
struct sockaddr *dupsa;
|
|
|
|
|
2008-10-23 15:53:51 +00:00
|
|
|
ifma = malloc(sizeof *ifma, M_IFMADDR, mflags |
|
2005-08-02 23:23:26 +00:00
|
|
|
M_ZERO);
|
|
|
|
if (ifma == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
2008-10-23 15:53:51 +00:00
|
|
|
dupsa = malloc(sa->sa_len, M_IFMADDR, mflags);
|
2005-08-02 23:23:26 +00:00
|
|
|
if (dupsa == NULL) {
|
2008-10-23 15:53:51 +00:00
|
|
|
free(ifma, M_IFMADDR);
|
2005-08-02 23:23:26 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
bcopy(sa, dupsa, sa->sa_len);
|
|
|
|
ifma->ifma_addr = dupsa;
|
|
|
|
|
|
|
|
ifma->ifma_ifp = ifp;
|
|
|
|
ifma->ifma_refcount = 1;
|
|
|
|
ifma->ifma_protospec = NULL;
|
|
|
|
|
|
|
|
if (llsa == NULL) {
|
|
|
|
ifma->ifma_lladdr = NULL;
|
|
|
|
return (ifma);
|
|
|
|
}
|
|
|
|
|
2008-10-23 15:53:51 +00:00
|
|
|
dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags);
|
2005-08-02 23:23:26 +00:00
|
|
|
if (dupsa == NULL) {
|
2008-10-23 15:53:51 +00:00
|
|
|
free(ifma->ifma_addr, M_IFMADDR);
|
|
|
|
free(ifma, M_IFMADDR);
|
2005-08-02 23:23:26 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
bcopy(llsa, dupsa, llsa->sa_len);
|
|
|
|
ifma->ifma_lladdr = dupsa;
|
|
|
|
|
|
|
|
return (ifma);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if_freemulti: free ifmultiaddr structure and possibly attached related
|
|
|
|
* addresses. The caller is responsible for implementing reference
|
|
|
|
* counting, notifying the driver, handling routing messages, and releasing
|
|
|
|
* any dependent link layer state.
|
|
|
|
*/
|
2018-05-06 20:34:13 +00:00
|
|
|
#ifdef MCAST_VERBOSE
|
|
|
|
extern void kdb_backtrace(void);
|
|
|
|
#endif
|
2018-05-18 20:13:34 +00:00
|
|
|
static void
|
|
|
|
if_freemulti_internal(struct ifmultiaddr *ifma)
|
2005-08-02 23:23:26 +00:00
|
|
|
{
|
|
|
|
|
2007-03-20 00:36:10 +00:00
|
|
|
KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d",
|
2005-08-02 23:23:26 +00:00
|
|
|
ifma->ifma_refcount));
|
|
|
|
|
|
|
|
if (ifma->ifma_lladdr != NULL)
|
2008-10-23 15:53:51 +00:00
|
|
|
free(ifma->ifma_lladdr, M_IFMADDR);
|
2018-05-06 20:34:13 +00:00
|
|
|
#ifdef MCAST_VERBOSE
|
|
|
|
kdb_backtrace();
|
|
|
|
printf("%s freeing ifma: %p\n", __func__, ifma);
|
|
|
|
#endif
|
2008-10-23 15:53:51 +00:00
|
|
|
free(ifma->ifma_addr, M_IFMADDR);
|
|
|
|
free(ifma, M_IFMADDR);
|
2005-08-02 23:23:26 +00:00
|
|
|
}
|
|
|
|
|
2018-05-18 20:13:34 +00:00
|
|
|
static void
|
|
|
|
if_destroymulti(epoch_context_t ctx)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
|
|
|
|
ifma = __containerof(ctx, struct ifmultiaddr, ifma_epoch_ctx);
|
|
|
|
if_freemulti_internal(ifma);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_freemulti(struct ifmultiaddr *ifma)
|
|
|
|
{
|
|
|
|
KASSERT(ifma->ifma_refcount == 0, ("if_freemulti_epoch: refcount %d",
|
|
|
|
ifma->ifma_refcount));
|
|
|
|
|
|
|
|
epoch_call(net_epoch_preempt, &ifma->ifma_epoch_ctx, if_destroymulti);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* Register an additional multicast address with a network interface.
|
|
|
|
*
|
|
|
|
* - If the address is already present, bump the reference count on the
|
|
|
|
* address and return.
|
|
|
|
* - If the address is not link-layer, look up a link layer address.
|
|
|
|
* - Allocate address structures for one or both addresses, and attach to the
|
|
|
|
* multicast address list on the interface. If automatically adding a link
|
|
|
|
* layer address, the protocol address will own a reference to the link
|
|
|
|
* layer address, to be freed when it is freed.
|
|
|
|
* - Notify the network device driver of an addition to the multicast address
|
|
|
|
* list.
|
|
|
|
*
|
|
|
|
* 'sa' points to caller-owned memory with the desired multicast address.
|
|
|
|
*
|
|
|
|
* 'retifma' will be used to return a pointer to the resulting multicast
|
|
|
|
* address reference, if desired.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_addmulti(struct ifnet *ifp, struct sockaddr *sa,
|
|
|
|
struct ifmultiaddr **retifma)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma, *ll_ifma;
|
|
|
|
struct sockaddr *llsa;
|
2014-01-18 23:24:51 +00:00
|
|
|
struct sockaddr_dl sdl;
|
2005-08-02 23:23:26 +00:00
|
|
|
int error;
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2018-05-02 19:36:29 +00:00
|
|
|
#ifdef INET
|
|
|
|
IN_MULTI_LIST_UNLOCK_ASSERT();
|
|
|
|
#endif
|
|
|
|
#ifdef INET6
|
|
|
|
IN6_MULTI_LIST_UNLOCK_ASSERT();
|
|
|
|
#endif
|
1997-07-07 17:36:06 +00:00
|
|
|
/*
|
2005-08-02 23:23:26 +00:00
|
|
|
* If the address is already present, return a new reference to it;
|
|
|
|
* otherwise, allocate storage and set up a new address.
|
1997-07-07 17:36:06 +00:00
|
|
|
*/
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2005-08-02 23:23:26 +00:00
|
|
|
ifma = if_findmulti(ifp, sa);
|
|
|
|
if (ifma != NULL) {
|
|
|
|
ifma->ifma_refcount++;
|
|
|
|
if (retifma != NULL)
|
|
|
|
*retifma = ifma;
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2005-08-02 23:23:26 +00:00
|
|
|
return (0);
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-08-02 23:23:26 +00:00
|
|
|
* The address isn't already present; resolve the protocol address
|
|
|
|
* into a link layer address, and then look that up, bump its
|
2014-01-18 23:24:51 +00:00
|
|
|
* refcount or allocate an ifma for that also.
|
|
|
|
* Most link layer resolving functions returns address data which
|
|
|
|
* fits inside default sockaddr_dl structure. However callback
|
|
|
|
* can allocate another sockaddr structure, in that case we need to
|
|
|
|
* free it later.
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
2005-08-02 23:23:26 +00:00
|
|
|
llsa = NULL;
|
|
|
|
ll_ifma = NULL;
|
2005-07-19 10:12:58 +00:00
|
|
|
if (ifp->if_resolvemulti != NULL) {
|
2014-01-18 23:24:51 +00:00
|
|
|
/* Provide called function with buffer size information */
|
|
|
|
sdl.sdl_len = sizeof(sdl);
|
|
|
|
llsa = (struct sockaddr *)&sdl;
|
1997-01-07 19:15:32 +00:00
|
|
|
error = ifp->if_resolvemulti(ifp, &llsa, sa);
|
2005-08-02 23:23:26 +00:00
|
|
|
if (error)
|
|
|
|
goto unlock_out;
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* Allocate the new address. Don't hook it up yet, as we may also
|
|
|
|
* need to allocate a link layer multicast address.
|
|
|
|
*/
|
|
|
|
ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT);
|
|
|
|
if (ifma == NULL) {
|
|
|
|
error = ENOMEM;
|
|
|
|
goto free_llsa_out;
|
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* If a link layer address is found, we'll need to see if it's
|
|
|
|
* already present in the address list, or allocate is as well.
|
|
|
|
* When this block finishes, the link layer address will be on the
|
|
|
|
* list.
|
|
|
|
*/
|
|
|
|
if (llsa != NULL) {
|
|
|
|
ll_ifma = if_findmulti(ifp, llsa);
|
|
|
|
if (ll_ifma == NULL) {
|
|
|
|
ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT);
|
|
|
|
if (ll_ifma == NULL) {
|
2007-03-20 00:36:10 +00:00
|
|
|
--ifma->ifma_refcount;
|
2005-08-02 23:23:26 +00:00
|
|
|
if_freemulti(ifma);
|
|
|
|
error = ENOMEM;
|
|
|
|
goto free_llsa_out;
|
|
|
|
}
|
2018-08-15 20:23:08 +00:00
|
|
|
ll_ifma->ifma_flags |= IFMA_F_ENQUEUED;
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma,
|
2005-08-02 23:23:26 +00:00
|
|
|
ifma_link);
|
|
|
|
} else
|
|
|
|
ll_ifma->ifma_refcount++;
|
2007-03-20 00:36:10 +00:00
|
|
|
ifma->ifma_llifma = ll_ifma;
|
2005-08-02 23:23:26 +00:00
|
|
|
}
|
1997-01-08 13:20:25 +00:00
|
|
|
|
1997-01-07 19:15:32 +00:00
|
|
|
/*
|
2005-08-02 23:23:26 +00:00
|
|
|
* We now have a new multicast address, ifma, and possibly a new or
|
|
|
|
* referenced link layer address. Add the primary address to the
|
|
|
|
* ifnet address list.
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
2018-08-15 20:23:08 +00:00
|
|
|
ifma->ifma_flags |= IFMA_F_ENQUEUED;
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
|
2005-08-02 23:23:26 +00:00
|
|
|
|
2002-07-02 08:23:00 +00:00
|
|
|
if (retifma != NULL)
|
|
|
|
*retifma = ifma;
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* Must generate the message while holding the lock so that 'ifma'
|
|
|
|
* pointer is still valid.
|
|
|
|
*/
|
|
|
|
rt_newmaddrmsg(RTM_NEWMADDR, ifma);
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2005-08-02 23:23:26 +00:00
|
|
|
|
1997-01-07 19:15:32 +00:00
|
|
|
/*
|
|
|
|
* We are certain we have added something, so call down to the
|
|
|
|
* interface to let them know about it.
|
|
|
|
*/
|
2005-07-19 10:12:58 +00:00
|
|
|
if (ifp->if_ioctl != NULL) {
|
2005-07-14 13:56:51 +00:00
|
|
|
(void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0);
|
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2014-01-18 23:24:51 +00:00
|
|
|
if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl))
|
|
|
|
link_free_sdl(llsa);
|
2005-08-02 23:23:26 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
free_llsa_out:
|
2014-01-18 23:24:51 +00:00
|
|
|
if ((llsa != NULL) && (llsa != (struct sockaddr *)&sdl))
|
|
|
|
link_free_sdl(llsa);
|
2005-08-02 23:23:26 +00:00
|
|
|
|
|
|
|
unlock_out:
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2005-08-02 23:23:26 +00:00
|
|
|
return (error);
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-03-20 00:36:10 +00:00
|
|
|
* Delete a multicast group membership by network-layer group address.
|
|
|
|
*
|
|
|
|
* Returns ENOENT if the entry could not be found. If ifp no longer
|
|
|
|
* exists, results are undefined. This entry point should only be used
|
|
|
|
* from subsystems which do appropriate locking to hold ifp for the
|
|
|
|
* duration of the call.
|
|
|
|
* Network-layer protocol domains must use if_delmulti_ifma().
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
|
|
|
int
|
2003-10-23 13:49:10 +00:00
|
|
|
if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
|
1997-01-07 19:15:32 +00:00
|
|
|
{
|
2007-03-20 00:36:10 +00:00
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
int lastref;
|
|
|
|
|
2019-10-14 21:18:37 +00:00
|
|
|
KASSERT(ifp, ("%s: NULL ifp", __func__));
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2007-03-20 00:36:10 +00:00
|
|
|
lastref = 0;
|
2005-08-02 23:23:26 +00:00
|
|
|
ifma = if_findmulti(ifp, sa);
|
2007-03-20 00:36:10 +00:00
|
|
|
if (ifma != NULL)
|
|
|
|
lastref = if_delmulti_locked(ifp, ifma, 0);
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2007-03-20 00:36:10 +00:00
|
|
|
|
|
|
|
if (ifma == NULL)
|
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
if (lastref && ifp->if_ioctl != NULL) {
|
|
|
|
(void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0);
|
2005-08-02 23:23:26 +00:00
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2007-03-20 00:36:10 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2010-01-24 16:17:58 +00:00
|
|
|
/*
|
|
|
|
* Delete all multicast group membership for an interface.
|
|
|
|
* Should be used to quickly flush all multicast filters.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
if_delallmulti(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
struct ifmultiaddr *next;
|
|
|
|
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next)
|
2010-01-24 16:17:58 +00:00
|
|
|
if_delmulti_locked(ifp, ifma, 0);
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2010-01-24 16:17:58 +00:00
|
|
|
}
|
|
|
|
|
2018-05-06 20:34:13 +00:00
|
|
|
void
|
|
|
|
if_delmulti_ifma(struct ifmultiaddr *ifma)
|
|
|
|
{
|
|
|
|
if_delmulti_ifma_flags(ifma, 0);
|
|
|
|
}
|
|
|
|
|
2007-03-20 00:36:10 +00:00
|
|
|
/*
|
|
|
|
* Delete a multicast group membership by group membership pointer.
|
|
|
|
* Network-layer protocol domains must use this routine.
|
|
|
|
*
|
2009-03-15 14:21:05 +00:00
|
|
|
* It is safe to call this routine if the ifp disappeared.
|
2007-03-20 00:36:10 +00:00
|
|
|
*/
|
|
|
|
void
|
2018-05-06 20:34:13 +00:00
|
|
|
if_delmulti_ifma_flags(struct ifmultiaddr *ifma, int flags)
|
2007-03-20 00:36:10 +00:00
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
int lastref;
|
2018-05-06 20:34:13 +00:00
|
|
|
MCDPRINTF("%s freeing ifma: %p\n", __func__, ifma);
|
2018-05-02 19:36:29 +00:00
|
|
|
#ifdef INET
|
|
|
|
IN_MULTI_LIST_UNLOCK_ASSERT();
|
|
|
|
#endif
|
2007-03-20 00:36:10 +00:00
|
|
|
ifp = ifma->ifma_ifp;
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
if (ifp == NULL) {
|
|
|
|
printf("%s: ifma_ifp seems to be detached\n", __func__);
|
|
|
|
} else {
|
2019-10-08 16:45:56 +00:00
|
|
|
struct epoch_tracker et;
|
2007-03-20 00:36:10 +00:00
|
|
|
struct ifnet *oifp;
|
|
|
|
|
2019-10-08 16:45:56 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2018-05-23 21:02:14 +00:00
|
|
|
CK_STAILQ_FOREACH(oifp, &V_ifnet, if_link)
|
2007-03-20 00:36:10 +00:00
|
|
|
if (ifp == oifp)
|
|
|
|
break;
|
2019-10-08 16:45:56 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2018-05-29 00:53:53 +00:00
|
|
|
if (ifp != oifp)
|
2007-03-20 00:36:10 +00:00
|
|
|
ifp = NULL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* If and only if the ifnet instance exists: Acquire the address lock.
|
|
|
|
*/
|
|
|
|
if (ifp != NULL)
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WLOCK(ifp);
|
2007-03-20 00:36:10 +00:00
|
|
|
|
2018-05-06 20:34:13 +00:00
|
|
|
lastref = if_delmulti_locked(ifp, ifma, flags);
|
2007-03-20 00:36:10 +00:00
|
|
|
|
|
|
|
if (ifp != NULL) {
|
|
|
|
/*
|
|
|
|
* If and only if the ifnet instance exists:
|
|
|
|
* Release the address lock.
|
|
|
|
* If the group was left: update the hardware hash filter.
|
|
|
|
*/
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WUNLOCK(ifp);
|
2007-03-20 00:36:10 +00:00
|
|
|
if (lastref && ifp->if_ioctl != NULL) {
|
|
|
|
(void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0);
|
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
2007-03-20 00:36:10 +00:00
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2007-03-20 00:36:10 +00:00
|
|
|
/*
|
|
|
|
* Perform deletion of network-layer and/or link-layer multicast address.
|
|
|
|
*
|
|
|
|
* Return 0 if the reference count was decremented.
|
|
|
|
* Return 1 if the final reference was released, indicating that the
|
|
|
|
* hardware hash filter should be reprogrammed.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ll_ifma;
|
|
|
|
|
|
|
|
if (ifp != NULL && ifma->ifma_ifp != NULL) {
|
|
|
|
KASSERT(ifma->ifma_ifp == ifp,
|
|
|
|
("%s: inconsistent ifp %p", __func__, ifp));
|
2012-01-05 19:00:36 +00:00
|
|
|
IF_ADDR_WLOCK_ASSERT(ifp);
|
2007-03-20 00:36:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ifp = ifma->ifma_ifp;
|
2018-05-06 20:34:13 +00:00
|
|
|
MCDPRINTF("%s freeing %p from %s \n", __func__, ifma, ifp ? ifp->if_xname : "");
|
1997-01-07 19:15:32 +00:00
|
|
|
|
|
|
|
/*
|
2007-03-20 00:36:10 +00:00
|
|
|
* If the ifnet is detaching, null out references to ifnet,
|
|
|
|
* so that upper protocol layers will notice, and not attempt
|
2007-03-27 16:11:28 +00:00
|
|
|
* to obtain locks for an ifnet which no longer exists. The
|
|
|
|
* routing socket announcement must happen before the ifnet
|
|
|
|
* instance is detached from the system.
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
2007-03-20 00:36:10 +00:00
|
|
|
if (detaching) {
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
printf("%s: detaching ifnet instance %p\n", __func__, ifp);
|
|
|
|
#endif
|
2007-03-27 16:11:28 +00:00
|
|
|
/*
|
|
|
|
* ifp may already be nulled out if we are being reentered
|
|
|
|
* to delete the ll_ifma.
|
|
|
|
*/
|
|
|
|
if (ifp != NULL) {
|
|
|
|
rt_newmaddrmsg(RTM_DELMADDR, ifma);
|
|
|
|
ifma->ifma_ifp = NULL;
|
|
|
|
}
|
2007-03-20 00:36:10 +00:00
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2007-03-20 00:36:10 +00:00
|
|
|
if (--ifma->ifma_refcount > 0)
|
|
|
|
return 0;
|
|
|
|
|
2018-08-15 20:23:08 +00:00
|
|
|
if (ifp != NULL && detaching == 0 && (ifma->ifma_flags & IFMA_F_ENQUEUED)) {
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifmultiaddr, ifma_link);
|
2018-08-15 20:23:08 +00:00
|
|
|
ifma->ifma_flags &= ~IFMA_F_ENQUEUED;
|
|
|
|
}
|
2007-03-20 00:36:10 +00:00
|
|
|
/*
|
|
|
|
* If this ifma is a network-layer ifma, a link-layer ifma may
|
|
|
|
* have been associated with it. Release it first if so.
|
|
|
|
*/
|
|
|
|
ll_ifma = ifma->ifma_llifma;
|
2005-08-02 23:23:26 +00:00
|
|
|
if (ll_ifma != NULL) {
|
2007-03-20 00:36:10 +00:00
|
|
|
KASSERT(ifma->ifma_lladdr != NULL,
|
|
|
|
("%s: llifma w/o lladdr", __func__));
|
|
|
|
if (detaching)
|
|
|
|
ll_ifma->ifma_ifp = NULL; /* XXX */
|
|
|
|
if (--ll_ifma->ifma_refcount == 0) {
|
|
|
|
if (ifp != NULL) {
|
2018-08-15 20:23:08 +00:00
|
|
|
if (ll_ifma->ifma_flags & IFMA_F_ENQUEUED) {
|
|
|
|
CK_STAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifmultiaddr,
|
|
|
|
ifma_link);
|
2018-08-21 22:59:22 +00:00
|
|
|
ll_ifma->ifma_flags &= ~IFMA_F_ENQUEUED;
|
2018-08-15 20:23:08 +00:00
|
|
|
}
|
2007-03-20 00:36:10 +00:00
|
|
|
}
|
2005-08-02 23:23:26 +00:00
|
|
|
if_freemulti(ll_ifma);
|
2007-03-20 00:36:10 +00:00
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
2018-05-06 20:34:13 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (ifp) {
|
|
|
|
struct ifmultiaddr *ifmatmp;
|
2007-03-20 00:36:10 +00:00
|
|
|
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifmatmp, &ifp->if_multiaddrs, ifma_link)
|
2018-05-06 20:34:13 +00:00
|
|
|
MPASS(ifma != ifmatmp);
|
|
|
|
}
|
|
|
|
#endif
|
2007-03-20 00:36:10 +00:00
|
|
|
if_freemulti(ifma);
|
2005-08-02 23:23:26 +00:00
|
|
|
/*
|
2007-03-20 00:36:10 +00:00
|
|
|
* The last reference to this instance of struct ifmultiaddr
|
|
|
|
* was released; the hardware should be notified of this change.
|
2005-08-02 23:23:26 +00:00
|
|
|
*/
|
2007-03-20 00:36:10 +00:00
|
|
|
return 1;
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
|
|
|
|
2000-08-15 00:48:38 +00:00
|
|
|
/*
|
|
|
|
* Set the link layer address on an interface.
|
|
|
|
*
|
|
|
|
* At this time we only support certain types of interfaces,
|
|
|
|
* and we don't allow the length of the address to change.
|
2015-11-01 19:59:04 +00:00
|
|
|
*
|
|
|
|
* Set noinline to be dtrace-friendly
|
2000-08-15 00:48:38 +00:00
|
|
|
*/
|
2015-11-01 19:59:04 +00:00
|
|
|
__noinline int
|
2000-08-15 00:48:38 +00:00
|
|
|
if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
|
|
|
|
{
|
|
|
|
struct sockaddr_dl *sdl;
|
|
|
|
struct ifaddr *ifa;
|
2002-04-10 06:07:16 +00:00
|
|
|
struct ifreq ifr;
|
Widen NET_EPOCH coverage.
When epoch(9) was introduced to network stack, it was basically
dropped in place of existing locking, which was mutexes and
rwlocks. For the sake of performance mutex covered areas were
as small as possible, so became epoch covered areas.
However, epoch doesn't introduce any contention, it just delays
memory reclaim. So, there is no point to minimise epoch covered
areas in sense of performance. Meanwhile entering/exiting epoch
also has non-zero CPU usage, so doing this less often is a win.
Not the least is also code maintainability. In the new paradigm
we can assume that at any stage of processing a packet, we are
inside network epoch. This makes coding both input and output
path way easier.
On output path we already enter epoch quite early - in the
ip_output(), in the ip6_output().
This patch does the same for the input path. All ISR processing,
network related callouts, other ways of packet injection to the
network stack shall be performed in net_epoch. Any leaf function
that walks network configuration now asserts epoch.
Tricky part is configuration code paths - ioctls, sysctls. They
also call into leaf functions, so some need to be changed.
This patch would introduce more epoch recursions (see EPOCH_TRACE)
than we had before. They will be cleaned up separately, as several
of them aren't trivial. Note, that unlike a lock recursion the
epoch recursion is safe and just wastes a bit of resources.
Reviewed by: gallatin, hselasky, cy, adrian, kristof
Differential Revision: https://reviews.freebsd.org/D19111
2019-10-07 22:40:05 +00:00
|
|
|
|
2005-11-11 16:04:59 +00:00
|
|
|
ifa = ifp->if_addr;
|
2019-10-08 17:55:45 +00:00
|
|
|
if (ifa == NULL)
|
|
|
|
return (EINVAL);
|
2018-05-23 21:02:14 +00:00
|
|
|
|
2000-08-15 00:48:38 +00:00
|
|
|
sdl = (struct sockaddr_dl *)ifa->ifa_addr;
|
2019-10-08 17:55:45 +00:00
|
|
|
if (sdl == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
if (len != sdl->sdl_alen) /* don't allow length to change */
|
|
|
|
return (EINVAL);
|
|
|
|
|
2000-08-15 00:48:38 +00:00
|
|
|
switch (ifp->if_type) {
|
2005-11-11 07:36:14 +00:00
|
|
|
case IFT_ETHER:
|
2000-08-15 00:48:38 +00:00
|
|
|
case IFT_XETHER:
|
2001-04-04 15:10:58 +00:00
|
|
|
case IFT_L2VLAN:
|
2005-06-05 03:13:13 +00:00
|
|
|
case IFT_BRIDGE:
|
2007-04-10 00:27:25 +00:00
|
|
|
case IFT_IEEE8023ADLAG:
|
2000-08-15 00:48:38 +00:00
|
|
|
bcopy(lladdr, LLADDR(sdl), len);
|
|
|
|
break;
|
|
|
|
default:
|
2019-10-08 17:55:45 +00:00
|
|
|
return (ENODEV);
|
2000-08-15 00:48:38 +00:00
|
|
|
}
|
2009-06-24 10:36:48 +00:00
|
|
|
|
2000-08-15 00:48:38 +00:00
|
|
|
/*
|
|
|
|
* If the interface is already up, we need
|
|
|
|
* to re-init it in order to reprogram its
|
|
|
|
* address filter.
|
|
|
|
*/
|
|
|
|
if ((ifp->if_flags & IFF_UP) != 0) {
|
2005-07-14 13:56:51 +00:00
|
|
|
if (ifp->if_ioctl) {
|
|
|
|
ifp->if_flags &= ~IFF_UP;
|
|
|
|
ifr.ifr_flags = ifp->if_flags & 0xffff;
|
|
|
|
ifr.ifr_flagshigh = ifp->if_flags >> 16;
|
|
|
|
(*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
|
|
|
|
ifp->if_flags |= IFF_UP;
|
|
|
|
ifr.ifr_flags = ifp->if_flags & 0xffff;
|
|
|
|
ifr.ifr_flagshigh = ifp->if_flags >> 16;
|
|
|
|
(*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
|
|
|
|
}
|
2000-08-15 00:48:38 +00:00
|
|
|
}
|
2015-11-14 13:34:03 +00:00
|
|
|
EVENTHANDLER_INVOKE(iflladdr_event, ifp);
|
2019-10-08 17:55:45 +00:00
|
|
|
|
2018-05-30 21:46:10 +00:00
|
|
|
return (0);
|
2000-08-15 00:48:38 +00:00
|
|
|
}
|
|
|
|
|
2015-12-31 05:03:27 +00:00
|
|
|
/*
|
|
|
|
* Compat function for handling basic encapsulation requests.
|
|
|
|
* Not converted stacks (FDDI, IB, ..) supports traditional
|
|
|
|
* output model: ARP (and other similar L2 protocols) are handled
|
|
|
|
* inside output routine, arpresolve/nd6_resolve() returns MAC
|
|
|
|
* address instead of full prepend.
|
|
|
|
*
|
|
|
|
* This function creates calculated header==MAC for IPv4/IPv6 and
|
|
|
|
* returns EAFNOSUPPORT (which is then handled in ARP code) for other
|
|
|
|
* address families.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
if_requestencap_default(struct ifnet *ifp, struct if_encap_req *req)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (req->rtype != IFENCAP_LL)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
|
|
|
if (req->bufsize < req->lladdr_len)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
|
|
|
switch (req->family) {
|
|
|
|
case AF_INET:
|
|
|
|
case AF_INET6:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EAFNOSUPPORT);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy lladdr to storage as is */
|
|
|
|
memmove(req->buf, req->lladdr, req->lladdr_len);
|
|
|
|
req->bufsize = req->lladdr_len;
|
|
|
|
req->lladdr_off = 0;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2018-07-09 11:03:28 +00:00
|
|
|
/*
|
|
|
|
* Tunnel interfaces can nest, also they may cause infinite recursion
|
|
|
|
* calls when misconfigured. We'll prevent this by detecting loops.
|
|
|
|
* High nesting level may cause stack exhaustion. We'll prevent this
|
|
|
|
* by introducing upper limit.
|
|
|
|
*
|
|
|
|
* Return 0, if tunnel nesting count is equal or less than limit.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_tunnel_check_nesting(struct ifnet *ifp, struct mbuf *m, uint32_t cookie,
|
|
|
|
int limit)
|
|
|
|
{
|
|
|
|
struct m_tag *mtag;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
count = 1;
|
|
|
|
mtag = NULL;
|
|
|
|
while ((mtag = m_tag_locate(m, cookie, 0, mtag)) != NULL) {
|
|
|
|
if (*(struct ifnet **)(mtag + 1) == ifp) {
|
|
|
|
log(LOG_NOTICE, "%s: loop detected\n", if_name(ifp));
|
|
|
|
return (EIO);
|
|
|
|
}
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
if (count > limit) {
|
|
|
|
log(LOG_NOTICE,
|
|
|
|
"%s: if_output recursively called too many times(%d)\n",
|
|
|
|
if_name(ifp), count);
|
|
|
|
return (EIO);
|
|
|
|
}
|
|
|
|
mtag = m_tag_alloc(cookie, 0, sizeof(struct ifnet *), M_NOWAIT);
|
|
|
|
if (mtag == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
*(struct ifnet **)(mtag + 1) = ifp;
|
|
|
|
m_tag_prepend(m, mtag);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2017-05-10 22:13:47 +00:00
|
|
|
/*
|
|
|
|
* Get the link layer address that was read from the hardware at attach.
|
|
|
|
*
|
|
|
|
* This is only set by Ethernet NICs (IFT_ETHER), but laggX interfaces re-type
|
|
|
|
* their component interfaces as IFT_IEEE8023ADLAG.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_gethwaddr(struct ifnet *ifp, struct ifreq *ifr)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (ifp->if_hw_addr == NULL)
|
|
|
|
return (ENODEV);
|
|
|
|
|
|
|
|
switch (ifp->if_type) {
|
|
|
|
case IFT_ETHER:
|
|
|
|
case IFT_IEEE8023ADLAG:
|
|
|
|
bcopy(ifp->if_hw_addr, ifr->ifr_addr.sa_data, ifp->if_addrlen);
|
|
|
|
return (0);
|
|
|
|
default:
|
|
|
|
return (ENODEV);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-10-31 18:32:15 +00:00
|
|
|
/*
|
|
|
|
* The name argument must be a pointer to storage which will last as
|
|
|
|
* long as the interface does. For physical devices, the result of
|
|
|
|
* device_get_name(dev) is a good choice and for pseudo-devices a
|
|
|
|
* static string works well.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
if_initname(struct ifnet *ifp, const char *name, int unit)
|
|
|
|
{
|
|
|
|
ifp->if_dname = name;
|
|
|
|
ifp->if_dunit = unit;
|
|
|
|
if (unit != IF_DUNIT_NONE)
|
|
|
|
snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
|
|
|
|
else
|
|
|
|
strlcpy(ifp->if_xname, name, IFNAMSIZ);
|
|
|
|
}
|
|
|
|
|
2002-09-24 17:35:08 +00:00
|
|
|
int
|
2018-05-11 00:19:49 +00:00
|
|
|
if_printf(struct ifnet *ifp, const char *fmt, ...)
|
2002-09-24 17:35:08 +00:00
|
|
|
{
|
2018-05-11 00:19:49 +00:00
|
|
|
char if_fmt[256];
|
2002-09-24 17:35:08 +00:00
|
|
|
va_list ap;
|
|
|
|
|
2018-05-11 00:19:49 +00:00
|
|
|
snprintf(if_fmt, sizeof(if_fmt), "%s: %s", ifp->if_xname, fmt);
|
2002-09-24 17:35:08 +00:00
|
|
|
va_start(ap, fmt);
|
2018-05-11 00:19:49 +00:00
|
|
|
vlog(LOG_INFO, if_fmt, ap);
|
2002-09-24 17:35:08 +00:00
|
|
|
va_end(ap);
|
2018-05-11 00:19:49 +00:00
|
|
|
return (0);
|
2002-09-24 17:35:08 +00:00
|
|
|
}
|
|
|
|
|
2004-07-27 23:20:45 +00:00
|
|
|
void
|
|
|
|
if_start(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
|
2009-03-15 14:21:05 +00:00
|
|
|
(*(ifp)->if_start)(ifp);
|
2004-07-27 23:20:45 +00:00
|
|
|
}
|
|
|
|
|
2008-11-22 05:55:56 +00:00
|
|
|
/*
|
|
|
|
* Backwards compatibility interface for drivers
|
|
|
|
* that have not implemented it
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
if_transmit(struct ifnet *ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
IFQ_HANDOFF(ifp, m, error);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2015-03-12 14:55:33 +00:00
|
|
|
static void
|
|
|
|
if_input_default(struct ifnet *ifp __unused, struct mbuf *m)
|
|
|
|
{
|
|
|
|
|
|
|
|
m_freem(m);
|
|
|
|
}
|
|
|
|
|
2004-10-30 09:39:13 +00:00
|
|
|
int
|
|
|
|
if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust)
|
|
|
|
{
|
|
|
|
int active = 0;
|
|
|
|
|
|
|
|
IF_LOCK(ifq);
|
|
|
|
if (_IF_QFULL(ifq)) {
|
|
|
|
IF_UNLOCK(ifq);
|
2014-09-28 08:57:07 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
|
2004-10-30 09:39:13 +00:00
|
|
|
m_freem(m);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (ifp != NULL) {
|
2014-09-28 08:57:07 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len + adjust);
|
2004-10-30 09:39:13 +00:00
|
|
|
if (m->m_flags & (M_BCAST|M_MCAST))
|
2014-09-28 08:57:07 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
active = ifp->if_drv_flags & IFF_DRV_OACTIVE;
|
2004-10-30 09:39:13 +00:00
|
|
|
}
|
|
|
|
_IF_ENQUEUE(ifq, m);
|
|
|
|
IF_UNLOCK(ifq);
|
|
|
|
if (ifp != NULL && !active)
|
2009-03-15 14:21:05 +00:00
|
|
|
(*(ifp)->if_start)(ifp);
|
2004-10-30 09:39:13 +00:00
|
|
|
return (1);
|
|
|
|
}
|
2005-06-10 16:49:24 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
if_register_com_alloc(u_char type,
|
|
|
|
if_com_alloc_t *a, if_com_free_t *f)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(if_com_alloc[type] == NULL,
|
|
|
|
("if_register_com_alloc: %d already registered", type));
|
|
|
|
KASSERT(if_com_free[type] == NULL,
|
|
|
|
("if_register_com_alloc: %d free already registered", type));
|
|
|
|
|
|
|
|
if_com_alloc[type] = a;
|
|
|
|
if_com_free[type] = f;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_deregister_com_alloc(u_char type)
|
|
|
|
{
|
|
|
|
|
2006-06-11 22:09:28 +00:00
|
|
|
KASSERT(if_com_alloc[type] != NULL,
|
2005-06-10 16:49:24 +00:00
|
|
|
("if_deregister_com_alloc: %d not registered", type));
|
2006-06-11 22:09:28 +00:00
|
|
|
KASSERT(if_com_free[type] != NULL,
|
2005-06-10 16:49:24 +00:00
|
|
|
("if_deregister_com_alloc: %d free not registered", type));
|
|
|
|
if_com_alloc[type] = NULL;
|
|
|
|
if_com_free[type] = NULL;
|
|
|
|
}
|
2014-06-02 17:54:39 +00:00
|
|
|
|
|
|
|
/* API for driver access to network stack owned ifnet.*/
|
|
|
|
uint64_t
|
2014-08-31 12:48:13 +00:00
|
|
|
if_setbaudrate(struct ifnet *ifp, uint64_t baudrate)
|
2014-06-02 17:54:39 +00:00
|
|
|
{
|
|
|
|
uint64_t oldbrate;
|
|
|
|
|
|
|
|
oldbrate = ifp->if_baudrate;
|
|
|
|
ifp->if_baudrate = baudrate;
|
|
|
|
return (oldbrate);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
if_getbaudrate(if_t ifp)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (((struct ifnet *)ifp)->if_baudrate);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setcapabilities(if_t ifp, int capabilities)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_capabilities = capabilities;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setcapabilitiesbit(if_t ifp, int setbit, int clearbit)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_capabilities |= setbit;
|
|
|
|
((struct ifnet *)ifp)->if_capabilities &= ~clearbit;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_getcapabilities(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_capabilities;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setcapenable(if_t ifp, int capabilities)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_capenable = capabilities;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setcapenablebit(if_t ifp, int setcap, int clearcap)
|
|
|
|
{
|
|
|
|
if(setcap)
|
|
|
|
((struct ifnet *)ifp)->if_capenable |= setcap;
|
|
|
|
if(clearcap)
|
|
|
|
((struct ifnet *)ifp)->if_capenable &= ~clearcap;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
const char *
|
|
|
|
if_getdname(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_dname;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_togglecapenable(if_t ifp, int togglecap)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_capenable ^= togglecap;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_getcapenable(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_capenable;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is largely undesirable because it ties ifnet to a device, but does
|
|
|
|
* provide flexiblity for an embedded product vendor. Should be used with
|
|
|
|
* the understanding that it violates the interface boundaries, and should be
|
|
|
|
* a last resort only.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_setdev(if_t ifp, void *dev)
|
|
|
|
{
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setdrvflagbits(if_t ifp, int set_flags, int clear_flags)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_drv_flags |= set_flags;
|
|
|
|
((struct ifnet *)ifp)->if_drv_flags &= ~clear_flags;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_getdrvflags(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_drv_flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setdrvflags(if_t ifp, int flags)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_drv_flags = flags;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setflags(if_t ifp, int flags)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_flags = flags;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setflagbits(if_t ifp, int set, int clear)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_flags |= set;
|
|
|
|
((struct ifnet *)ifp)->if_flags &= ~clear;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_getflags(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_clearhwassist(if_t ifp)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_hwassist = 0;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_sethwassistbits(if_t ifp, int toset, int toclear)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_hwassist |= toset;
|
|
|
|
((struct ifnet *)ifp)->if_hwassist &= ~toclear;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_sethwassist(if_t ifp, int hwassist_bit)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_hwassist = hwassist_bit;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_gethwassist(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_hwassist;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setmtu(if_t ifp, int mtu)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_mtu = mtu;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_getmtu(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_mtu;
|
|
|
|
}
|
|
|
|
|
Make checks for rt_mtu generic:
Some virtual if drivers has (ab)used ifa ifa_rtrequest hook to enforce
route MTU to be not bigger that interface MTU. While ifa_rtrequest hooking
might be an option in some situation, it is not feasible to do MTU checks
there: generic (or per-domain) routing code is perfectly capable of doing
this.
We currrently have 3 places where MTU is altered:
1) route addition.
In this case domain overrides radix _addroute callback (in[6]_addroute)
and all necessary checks/fixes are/can be done there.
2) route change (especially, GW change).
In this case, there are no explicit per-domain calls, but one can
override rte by setting ifa_rtrequest hook to domain handler
(inet6 does this).
3) ifconfig ifaceX mtu YYYY
In this case, we have no callbacks, but ip[6]_output performes runtime
checks and decreases rt_mtu if necessary.
Generally, the goals are to be able to handle all MTU changes in
control plane, not in runtime part, and properly deal with increased
interface MTU.
This commit changes the following:
* removes hooks setting MTU from drivers side
* adds proper per-doman MTU checks for case 1)
* adds generic MTU check for case 2)
* The latter is done by using new dom_ifmtu callback since
if_mtu denotes L3 interface MTU, e.g. maximum trasmitted _packet_ size.
However, IPv6 mtu might be different from if_mtu one (e.g. default 1280)
for some cases, so we need an abstract way to know maximum MTU size
for given interface and domain.
* moves rt_setmetrics() before MTU/ifa_rtrequest hooks since it copies
user-supplied data which must be checked.
* removes RT_LOCK_ASSERT() from other ifa_rtrequest hooks to be able to
use this functions on new non-inserted rte.
More changes will follow soon.
MFC after: 1 month
Sponsored by: Yandex LLC
2014-11-06 13:13:09 +00:00
|
|
|
int
|
|
|
|
if_getmtu_family(if_t ifp, int family)
|
|
|
|
{
|
|
|
|
struct domain *dp;
|
|
|
|
|
|
|
|
for (dp = domains; dp; dp = dp->dom_next) {
|
|
|
|
if (dp->dom_family == family && dp->dom_ifmtu != NULL)
|
|
|
|
return (dp->dom_ifmtu((struct ifnet *)ifp));
|
|
|
|
}
|
|
|
|
|
|
|
|
return (((struct ifnet *)ifp)->if_mtu);
|
|
|
|
}
|
|
|
|
|
2019-10-10 23:42:55 +00:00
|
|
|
/*
|
|
|
|
* Methods for drivers to access interface unicast and multicast
|
|
|
|
* link level addresses. Driver shall not know 'struct ifaddr' neither
|
|
|
|
* 'struct ifmultiaddr'.
|
|
|
|
*/
|
2019-10-10 23:44:56 +00:00
|
|
|
u_int
|
|
|
|
if_lladdr_count(if_t ifp)
|
|
|
|
{
|
|
|
|
struct epoch_tracker et;
|
|
|
|
struct ifaddr *ifa;
|
|
|
|
u_int count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
NET_EPOCH_ENTER(et);
|
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
|
|
|
|
if (ifa->ifa_addr->sa_family == AF_LINK)
|
|
|
|
count++;
|
|
|
|
NET_EPOCH_EXIT(et);
|
|
|
|
|
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2019-10-10 23:42:55 +00:00
|
|
|
u_int
|
|
|
|
if_foreach_lladdr(if_t ifp, iflladdr_cb_t cb, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct epoch_tracker et;
|
|
|
|
struct ifaddr *ifa;
|
|
|
|
u_int count;
|
|
|
|
|
|
|
|
MPASS(cb);
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
NET_EPOCH_ENTER(et);
|
|
|
|
CK_STAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
|
|
|
if (ifa->ifa_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
|
|
|
count += (*cb)(cb_arg, (struct sockaddr_dl *)ifa->ifa_addr,
|
|
|
|
count);
|
|
|
|
}
|
|
|
|
NET_EPOCH_EXIT(et);
|
|
|
|
|
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2019-10-10 23:44:56 +00:00
|
|
|
u_int
|
|
|
|
if_llmaddr_count(if_t ifp)
|
|
|
|
{
|
|
|
|
struct epoch_tracker et;
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
NET_EPOCH_ENTER(et);
|
|
|
|
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
|
|
|
|
if (ifma->ifma_addr->sa_family == AF_LINK)
|
|
|
|
count++;
|
|
|
|
NET_EPOCH_EXIT(et);
|
|
|
|
|
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2019-10-10 23:42:55 +00:00
|
|
|
u_int
|
|
|
|
if_foreach_llmaddr(if_t ifp, iflladdr_cb_t cb, void *cb_arg)
|
|
|
|
{
|
|
|
|
struct epoch_tracker et;
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
u_int count;
|
|
|
|
|
|
|
|
MPASS(cb);
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
NET_EPOCH_ENTER(et);
|
|
|
|
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
|
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
|
|
|
count += (*cb)(cb_arg, (struct sockaddr_dl *)ifma->ifma_addr,
|
|
|
|
count);
|
|
|
|
}
|
|
|
|
NET_EPOCH_EXIT(et);
|
|
|
|
|
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2014-06-02 17:54:39 +00:00
|
|
|
int
|
|
|
|
if_setsoftc(if_t ifp, void *softc)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_softc = softc;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
if_getsoftc(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_softc;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_setrcvif(struct mbuf *m, if_t ifp)
|
|
|
|
{
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
|
|
|
|
MPASS((m->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0);
|
2014-06-02 17:54:39 +00:00
|
|
|
m->m_pkthdr.rcvif = (struct ifnet *)ifp;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_setvtag(struct mbuf *m, uint16_t tag)
|
|
|
|
{
|
|
|
|
m->m_pkthdr.ether_vtag = tag;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint16_t
|
|
|
|
if_getvtag(struct mbuf *m)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (m->m_pkthdr.ether_vtag);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_sendq_empty(if_t ifp)
|
|
|
|
{
|
|
|
|
return IFQ_DRV_IS_EMPTY(&((struct ifnet *)ifp)->if_snd);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ifaddr *
|
|
|
|
if_getifaddr(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_getamcount(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_amcount;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setsendqready(if_t ifp)
|
|
|
|
{
|
|
|
|
IFQ_SET_READY(&((struct ifnet *)ifp)->if_snd);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setsendqlen(if_t ifp, int tx_desc_count)
|
|
|
|
{
|
|
|
|
IFQ_SET_MAXLEN(&((struct ifnet *)ifp)->if_snd, tx_desc_count);
|
|
|
|
((struct ifnet *)ifp)->if_snd.ifq_drv_maxlen = tx_desc_count;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_vlantrunkinuse(if_t ifp)
|
|
|
|
{
|
|
|
|
return ((struct ifnet *)ifp)->if_vlantrunk != NULL?1:0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_input(if_t ifp, struct mbuf* sendmp)
|
|
|
|
{
|
|
|
|
(*((struct ifnet *)ifp)->if_input)((struct ifnet *)ifp, sendmp);
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* XXX */
|
|
|
|
#ifndef ETH_ADDR_LEN
|
|
|
|
#define ETH_ADDR_LEN 6
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setupmultiaddr(if_t ifp, void *mta, int *cnt, int max)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
uint8_t *lmta = (uint8_t *)mta;
|
|
|
|
int mcnt = 0;
|
|
|
|
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifma, &((struct ifnet *)ifp)->if_multiaddrs, ifma_link) {
|
2014-06-02 17:54:39 +00:00
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (mcnt == max)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
|
|
|
|
&lmta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
|
|
|
|
mcnt++;
|
|
|
|
}
|
|
|
|
*cnt = mcnt;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_multiaddr_array(if_t ifp, void *mta, int *cnt, int max)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if_maddr_rlock(ifp);
|
|
|
|
error = if_setupmultiaddr(ifp, mta, cnt, max);
|
|
|
|
if_maddr_runlock(ifp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_multiaddr_count(if_t ifp, int max)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
count = 0;
|
|
|
|
if_maddr_rlock(ifp);
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifma, &((struct ifnet *)ifp)->if_multiaddrs, ifma_link) {
|
2014-06-02 17:54:39 +00:00
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
|
|
|
count++;
|
|
|
|
if (count == max)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if_maddr_runlock(ifp);
|
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2016-05-18 04:35:58 +00:00
|
|
|
int
|
|
|
|
if_multi_apply(struct ifnet *ifp, int (*filter)(void *, struct ifmultiaddr *, int), void *arg)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
int cnt = 0;
|
|
|
|
|
|
|
|
if_maddr_rlock(ifp);
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link)
|
2016-05-18 04:35:58 +00:00
|
|
|
cnt += filter(arg, ifma, cnt);
|
|
|
|
if_maddr_runlock(ifp);
|
|
|
|
return (cnt);
|
|
|
|
}
|
|
|
|
|
2014-06-02 17:54:39 +00:00
|
|
|
struct mbuf *
|
|
|
|
if_dequeue(if_t ifp)
|
|
|
|
{
|
|
|
|
struct mbuf *m;
|
|
|
|
IFQ_DRV_DEQUEUE(&((struct ifnet *)ifp)->if_snd, m);
|
|
|
|
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_sendq_prepend(if_t ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
IFQ_DRV_PREPEND(&((struct ifnet *)ifp)->if_snd, m);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_setifheaderlen(if_t ifp, int len)
|
|
|
|
{
|
2014-08-31 06:46:21 +00:00
|
|
|
((struct ifnet *)ifp)->if_hdrlen = len;
|
2014-06-02 17:54:39 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
caddr_t
|
|
|
|
if_getlladdr(if_t ifp)
|
|
|
|
{
|
|
|
|
return (IF_LLADDR((struct ifnet *)ifp));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
if_gethandle(u_char type)
|
|
|
|
{
|
|
|
|
return (if_alloc(type));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_bpfmtap(if_t ifh, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = (struct ifnet *)ifh;
|
|
|
|
|
|
|
|
BPF_MTAP(ifp, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_etherbpfmtap(if_t ifh, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = (struct ifnet *)ifh;
|
|
|
|
|
|
|
|
ETHER_BPF_MTAP(ifp, m);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_vlancap(if_t ifh)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = (struct ifnet *)ifh;
|
|
|
|
VLAN_CAPABILITIES(ifp);
|
|
|
|
}
|
|
|
|
|
2017-01-31 16:12:31 +00:00
|
|
|
int
|
|
|
|
if_sethwtsomax(if_t ifp, u_int if_hw_tsomax)
|
|
|
|
{
|
|
|
|
|
|
|
|
((struct ifnet *)ifp)->if_hw_tsomax = if_hw_tsomax;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_sethwtsomaxsegcount(if_t ifp, u_int if_hw_tsomaxsegcount)
|
|
|
|
{
|
|
|
|
|
|
|
|
((struct ifnet *)ifp)->if_hw_tsomaxsegcount = if_hw_tsomaxsegcount;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
if_sethwtsomaxsegsize(if_t ifp, u_int if_hw_tsomaxsegsize)
|
|
|
|
{
|
|
|
|
|
|
|
|
((struct ifnet *)ifp)->if_hw_tsomaxsegsize = if_hw_tsomaxsegsize;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
u_int
|
|
|
|
if_gethwtsomax(if_t ifp)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (((struct ifnet *)ifp)->if_hw_tsomax);
|
|
|
|
}
|
|
|
|
|
|
|
|
u_int
|
|
|
|
if_gethwtsomaxsegcount(if_t ifp)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (((struct ifnet *)ifp)->if_hw_tsomaxsegcount);
|
|
|
|
}
|
|
|
|
|
|
|
|
u_int
|
|
|
|
if_gethwtsomaxsegsize(if_t ifp)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (((struct ifnet *)ifp)->if_hw_tsomaxsegsize);
|
|
|
|
}
|
|
|
|
|
2014-06-02 17:54:39 +00:00
|
|
|
void
|
|
|
|
if_setinitfn(if_t ifp, void (*init_fn)(void *))
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_init = init_fn;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-08-31 12:48:13 +00:00
|
|
|
if_setioctlfn(if_t ifp, int (*ioctl_fn)(if_t, u_long, caddr_t))
|
2014-06-02 17:54:39 +00:00
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_ioctl = (void *)ioctl_fn;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-08-31 12:48:13 +00:00
|
|
|
if_setstartfn(if_t ifp, void (*start_fn)(if_t))
|
2014-06-02 17:54:39 +00:00
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_start = (void *)start_fn;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_settransmitfn(if_t ifp, if_transmit_fn_t start_fn)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_transmit = start_fn;
|
|
|
|
}
|
|
|
|
|
|
|
|
void if_setqflushfn(if_t ifp, if_qflush_fn_t flush_fn)
|
|
|
|
{
|
|
|
|
((struct ifnet *)ifp)->if_qflush = flush_fn;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-09-18 14:38:28 +00:00
|
|
|
void
|
|
|
|
if_setgetcounterfn(if_t ifp, if_get_counter_t fn)
|
|
|
|
{
|
|
|
|
|
|
|
|
ifp->if_get_counter = fn;
|
|
|
|
}
|
|
|
|
|
2014-06-02 17:54:39 +00:00
|
|
|
/* Revisit these - These are inline functions originally. */
|
|
|
|
int
|
|
|
|
drbr_inuse_drv(if_t ifh, struct buf_ring *br)
|
|
|
|
{
|
2015-06-23 18:48:41 +00:00
|
|
|
return drbr_inuse(ifh, br);
|
2014-06-02 17:54:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct mbuf*
|
|
|
|
drbr_dequeue_drv(if_t ifh, struct buf_ring *br)
|
|
|
|
{
|
|
|
|
return drbr_dequeue(ifh, br);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
drbr_needs_enqueue_drv(if_t ifh, struct buf_ring *br)
|
|
|
|
{
|
|
|
|
return drbr_needs_enqueue(ifh, br);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
drbr_enqueue_drv(if_t ifh, struct buf_ring *br, struct mbuf *m)
|
|
|
|
{
|
|
|
|
return drbr_enqueue(ifh, br, m);
|
|
|
|
|
|
|
|
}
|