2005-01-07 01:45:51 +00:00
|
|
|
/*-
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1980, 1986, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
2001-10-17 10:41:00 +00:00
|
|
|
* @(#)if.c 8.5 (Berkeley) 1/9/95
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1997-12-16 17:40:42 +00:00
|
|
|
#include "opt_compat.h"
|
1999-12-07 17:39:16 +00:00
|
|
|
#include "opt_inet6.h"
|
1999-12-30 18:29:55 +00:00
|
|
|
#include "opt_inet.h"
|
2002-07-31 16:16:03 +00:00
|
|
|
#include "opt_mac.h"
|
2005-02-22 13:04:05 +00:00
|
|
|
#include "opt_carp.h"
|
1997-12-16 17:40:42 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
2004-09-22 08:59:41 +00:00
|
|
|
#include <sys/types.h>
|
2001-09-29 05:55:04 +00:00
|
|
|
#include <sys/conf.h>
|
2002-07-31 16:16:03 +00:00
|
|
|
#include <sys/mac.h>
|
1997-09-02 01:19:47 +00:00
|
|
|
#include <sys/malloc.h>
|
2004-09-22 08:59:41 +00:00
|
|
|
#include <sys/sbuf.h>
|
2001-10-11 18:39:05 +00:00
|
|
|
#include <sys/bus.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
|
|
|
#include <sys/protosw.h>
|
|
|
|
#include <sys/kernel.h>
|
1997-03-24 11:33:46 +00:00
|
|
|
#include <sys/sockio.h>
|
1995-09-22 17:57:48 +00:00
|
|
|
#include <sys/syslog.h>
|
1995-12-20 21:53:53 +00:00
|
|
|
#include <sys/sysctl.h>
|
2004-07-27 23:20:45 +00:00
|
|
|
#include <sys/taskqueue.h>
|
2003-10-17 15:46:31 +00:00
|
|
|
#include <sys/domain.h>
|
2001-02-21 06:39:57 +00:00
|
|
|
#include <sys/jail.h>
|
2002-09-24 17:35:08 +00:00
|
|
|
#include <machine/stdarg.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <net/if.h>
|
Major overhaul of pseudo-interface cloning. Highlights include:
- Split the code out into if_clone.[ch].
- Locked struct if_clone. [1]
- Add a per-cloner match function rather then simply matching names of
the form <name><unit> and <name>.
- Use the match function to allow creation of <interface>.<tag>
vlan interfaces. The old way is preserved unchanged!
- Also the match function to allow creation of stf(4) interfaces named
stf0, stf, or 6to4. This is the only major user visible change in
that "ifconfig stf" creates the interface stf rather then stf0 and
does not print "stf0" to stdout.
- Allow destroy functions to fail so they can refuse to delete
interfaces. Currently, we forbid the deletion of interfaces which
were created in the init function, particularly lo0, pflog0, and
pfsync0. In the case of lo0 this was a panic implementation so it
does not count as a user visiable change. :-)
- Since most interfaces do not need the new functionality, an family of
wrapper functions, ifc_simple_*(), were created to wrap old style
cloner functions.
- The IF_CLONE_INITIALIZER macro is replaced with a new incompatible
IFC_CLONE_INITIALIZER and ifc_simple consumers use IFC_SIMPLE_DECLARE
instead.
Submitted by: Maurycy Pawlowski-Wieronski <maurycy at fouk.org> [1]
Reviewed by: andre, mlaier
Discussed on: net
2004-06-22 20:13:25 +00:00
|
|
|
#include <net/if_clone.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <net/if_dl.h>
|
2000-08-15 00:48:38 +00:00
|
|
|
#include <net/if_types.h>
|
2001-07-02 20:49:25 +00:00
|
|
|
#include <net/if_var.h>
|
1994-10-08 01:40:23 +00:00
|
|
|
#include <net/radix.h>
|
1999-12-17 06:46:07 +00:00
|
|
|
#include <net/route.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-12-30 18:29:55 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
1999-11-22 02:45:11 +00:00
|
|
|
/*XXX*/
|
|
|
|
#include <netinet/in.h>
|
1999-12-30 18:29:55 +00:00
|
|
|
#include <netinet/in_var.h>
|
2000-02-01 15:49:37 +00:00
|
|
|
#ifdef INET6
|
2000-07-16 01:46:42 +00:00
|
|
|
#include <netinet6/in6_var.h>
|
|
|
|
#include <netinet6/in6_ifattach.h>
|
2000-02-01 15:49:37 +00:00
|
|
|
#endif
|
1999-11-22 02:45:11 +00:00
|
|
|
#endif
|
2002-02-26 01:11:08 +00:00
|
|
|
#ifdef INET
|
|
|
|
#include <netinet/if_ether.h>
|
|
|
|
#endif
|
2005-02-22 13:04:05 +00:00
|
|
|
#ifdef DEV_CARP
|
|
|
|
#include <netinet/ip_carp.h>
|
|
|
|
#endif
|
1999-11-22 02:45:11 +00:00
|
|
|
|
2005-03-12 12:58:03 +00:00
|
|
|
SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
|
|
|
|
SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
|
|
|
|
|
|
|
|
/* Log link state change events */
|
|
|
|
static int log_link_state_change = 1;
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW,
|
|
|
|
&log_link_state_change, 0,
|
|
|
|
"log interface link state change events");
|
|
|
|
|
2005-06-05 03:13:13 +00:00
|
|
|
void (*bstp_linkstate_p)(struct ifnet *ifp, int state);
|
2005-01-08 12:42:03 +00:00
|
|
|
void (*ng_ether_link_state_p)(struct ifnet *ifp, int state);
|
|
|
|
|
2004-06-15 01:45:19 +00:00
|
|
|
struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL;
|
|
|
|
|
2003-10-17 15:46:31 +00:00
|
|
|
static void if_attachdomain(void *);
|
|
|
|
static void if_attachdomain1(struct ifnet *);
|
2001-09-06 00:44:45 +00:00
|
|
|
static int ifconf(u_long, caddr_t);
|
2001-09-06 02:40:43 +00:00
|
|
|
static void if_grow(void);
|
|
|
|
static void if_init(void *);
|
|
|
|
static void if_check(void *);
|
2004-06-13 17:29:10 +00:00
|
|
|
static void if_qflush(struct ifaltq *);
|
2004-04-18 18:59:44 +00:00
|
|
|
static void if_route(struct ifnet *, int flag, int fam);
|
2005-07-14 13:56:51 +00:00
|
|
|
static int if_setflag(struct ifnet *, int, int, int *, int);
|
2001-09-06 00:44:45 +00:00
|
|
|
static void if_slowtimo(void *);
|
2004-04-18 18:59:44 +00:00
|
|
|
static void if_unroute(struct ifnet *, int flag, int fam);
|
2001-10-17 18:07:05 +00:00
|
|
|
static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
|
2001-09-06 00:44:45 +00:00
|
|
|
static int if_rtdel(struct radix_node *, void *);
|
2001-09-29 05:55:04 +00:00
|
|
|
static int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *);
|
2004-07-27 23:20:45 +00:00
|
|
|
static void if_start_deferred(void *context, int pending);
|
2005-04-20 09:30:54 +00:00
|
|
|
static void do_link_state_change(void *, int);
|
2006-06-19 22:20:45 +00:00
|
|
|
static int if_getgroup(struct ifgroupreq *, struct ifnet *);
|
|
|
|
static int if_getgroupmembers(struct ifgroupreq *);
|
1999-11-22 02:45:11 +00:00
|
|
|
#ifdef INET6
|
|
|
|
/*
|
|
|
|
* XXX: declare here to avoid to include many inet6 related files..
|
|
|
|
* should be more generalized?
|
|
|
|
*/
|
2002-03-19 21:54:18 +00:00
|
|
|
extern void nd6_setmtu(struct ifnet *);
|
1999-11-22 02:45:11 +00:00
|
|
|
#endif
|
|
|
|
|
2001-09-06 00:44:45 +00:00
|
|
|
int if_index = 0;
|
2001-09-06 02:40:43 +00:00
|
|
|
struct ifindex_entry *ifindex_table = NULL;
|
2001-09-06 00:44:45 +00:00
|
|
|
int ifqmaxlen = IFQ_MAXLEN;
|
|
|
|
struct ifnethead ifnet; /* depend on static init XXX */
|
2006-06-19 22:20:45 +00:00
|
|
|
struct ifgrouphead ifg_head;
|
2002-12-22 05:35:03 +00:00
|
|
|
struct mtx ifnet_lock;
|
2005-06-10 16:49:24 +00:00
|
|
|
static if_com_alloc_t *if_com_alloc[256];
|
|
|
|
static if_com_free_t *if_com_free[256];
|
2001-09-06 00:44:45 +00:00
|
|
|
|
2001-09-06 02:40:43 +00:00
|
|
|
static int if_indexlim = 8;
|
2004-08-15 06:24:42 +00:00
|
|
|
static struct knlist ifklist;
|
2001-09-06 02:40:43 +00:00
|
|
|
|
2001-09-29 18:32:35 +00:00
|
|
|
static void filt_netdetach(struct knote *kn);
|
|
|
|
static int filt_netdev(struct knote *kn, long hint);
|
|
|
|
|
|
|
|
static struct filterops netdev_filtops =
|
|
|
|
{ 1, NULL, filt_netdetach, filt_netdev };
|
|
|
|
|
2001-09-06 00:44:45 +00:00
|
|
|
/*
|
|
|
|
* System initialization
|
|
|
|
*/
|
2001-09-06 02:40:43 +00:00
|
|
|
SYSINIT(interfaces, SI_SUB_INIT_IF, SI_ORDER_FIRST, if_init, NULL)
|
|
|
|
SYSINIT(interface_check, SI_SUB_PROTO_IF, SI_ORDER_FIRST, if_check, NULL)
|
2001-09-06 00:44:45 +00:00
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals");
|
2001-09-06 00:44:45 +00:00
|
|
|
MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
|
|
|
|
MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
|
2001-07-02 20:49:25 +00:00
|
|
|
|
2001-09-29 05:55:04 +00:00
|
|
|
static d_open_t netopen;
|
|
|
|
static d_close_t netclose;
|
|
|
|
static d_ioctl_t netioctl;
|
2001-09-29 18:32:35 +00:00
|
|
|
static d_kqfilter_t netkqfilter;
|
2001-09-29 05:55:04 +00:00
|
|
|
|
|
|
|
static struct cdevsw net_cdevsw = {
|
2004-02-21 21:10:55 +00:00
|
|
|
.d_version = D_VERSION,
|
|
|
|
.d_flags = D_NEEDGIANT,
|
2003-03-03 12:15:54 +00:00
|
|
|
.d_open = netopen,
|
|
|
|
.d_close = netclose,
|
|
|
|
.d_ioctl = netioctl,
|
|
|
|
.d_name = "net",
|
|
|
|
.d_kqfilter = netkqfilter,
|
2001-09-29 05:55:04 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
2004-06-16 09:47:26 +00:00
|
|
|
netopen(struct cdev *dev, int flag, int mode, struct thread *td)
|
2001-09-29 05:55:04 +00:00
|
|
|
{
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2004-06-16 09:47:26 +00:00
|
|
|
netclose(struct cdev *dev, int flags, int fmt, struct thread *td)
|
2001-09-29 05:55:04 +00:00
|
|
|
{
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2004-06-16 09:47:26 +00:00
|
|
|
netioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
|
2001-09-29 05:55:04 +00:00
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
int error, idx;
|
|
|
|
|
|
|
|
/* only support interface specific ioctls */
|
|
|
|
if (IOCGROUP(cmd) != 'i')
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
idx = minor(dev);
|
|
|
|
if (idx == 0) {
|
|
|
|
/*
|
|
|
|
* special network device, not interface.
|
|
|
|
*/
|
|
|
|
if (cmd == SIOCGIFCONF)
|
|
|
|
return (ifconf(cmd, data)); /* XXX remove cmd */
|
2006-02-02 19:58:37 +00:00
|
|
|
#ifdef __amd64__
|
|
|
|
if (cmd == SIOCGIFCONF32)
|
|
|
|
return (ifconf(cmd, data)); /* XXX remove cmd */
|
|
|
|
#endif
|
2001-09-29 05:55:04 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
|
|
|
|
ifp = ifnet_byindex(idx);
|
|
|
|
if (ifp == NULL)
|
|
|
|
return (ENXIO);
|
|
|
|
|
|
|
|
error = ifhwioctl(cmd, ifp, data, td);
|
|
|
|
if (error == ENOIOCTL)
|
|
|
|
error = EOPNOTSUPP;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2001-09-29 18:32:35 +00:00
|
|
|
static int
|
2004-06-16 09:47:26 +00:00
|
|
|
netkqfilter(struct cdev *dev, struct knote *kn)
|
2001-09-29 18:32:35 +00:00
|
|
|
{
|
2004-08-15 06:24:42 +00:00
|
|
|
struct knlist *klist;
|
2001-09-29 18:32:35 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
int idx;
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
switch (kn->kn_filter) {
|
|
|
|
case EVFILT_NETDEV:
|
|
|
|
kn->kn_fop = &netdev_filtops;
|
|
|
|
break;
|
|
|
|
default:
|
2005-09-12 19:26:03 +00:00
|
|
|
return (EINVAL);
|
2004-08-15 06:24:42 +00:00
|
|
|
}
|
|
|
|
|
2001-09-29 18:32:35 +00:00
|
|
|
idx = minor(dev);
|
|
|
|
if (idx == 0) {
|
|
|
|
klist = &ifklist;
|
|
|
|
} else {
|
|
|
|
ifp = ifnet_byindex(idx);
|
|
|
|
if (ifp == NULL)
|
|
|
|
return (1);
|
|
|
|
klist = &ifp->if_klist;
|
|
|
|
}
|
|
|
|
|
|
|
|
kn->kn_hook = (caddr_t)klist;
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_add(klist, kn, 0);
|
2001-09-29 18:32:35 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
filt_netdetach(struct knote *kn)
|
|
|
|
{
|
2004-08-15 06:24:42 +00:00
|
|
|
struct knlist *klist = (struct knlist *)kn->kn_hook;
|
2001-09-29 18:32:35 +00:00
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_remove(klist, kn, 0);
|
2001-09-29 18:32:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
filt_netdev(struct knote *kn, long hint)
|
|
|
|
{
|
2004-08-15 06:24:42 +00:00
|
|
|
struct knlist *klist = (struct knlist *)kn->kn_hook;
|
2001-09-29 18:32:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently NOTE_EXIT is abused to indicate device detach.
|
|
|
|
*/
|
|
|
|
if (hint == NOTE_EXIT) {
|
|
|
|
kn->kn_data = NOTE_LINKINV;
|
2003-10-23 13:49:10 +00:00
|
|
|
kn->kn_flags |= (EV_EOF | EV_ONESHOT);
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_remove_inevent(klist, kn);
|
2003-10-23 13:49:10 +00:00
|
|
|
return (1);
|
|
|
|
}
|
2004-08-15 06:24:42 +00:00
|
|
|
if (hint != 0)
|
|
|
|
kn->kn_data = hint; /* current status */
|
2001-09-29 18:32:35 +00:00
|
|
|
if (kn->kn_sfflags & hint)
|
|
|
|
kn->kn_fflags |= hint;
|
|
|
|
return (kn->kn_fflags != 0);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Network interface utility routines.
|
|
|
|
*
|
|
|
|
* Routines with ifa_ifwith* names take sockaddr *'s as
|
|
|
|
* parameters.
|
|
|
|
*/
|
1995-08-28 09:19:25 +00:00
|
|
|
/* ARGSUSED*/
|
2001-09-06 02:40:43 +00:00
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_init(void *dummy __unused)
|
2001-09-06 02:40:43 +00:00
|
|
|
{
|
|
|
|
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_LOCK_INIT();
|
2001-09-06 02:40:43 +00:00
|
|
|
TAILQ_INIT(&ifnet);
|
2006-06-19 22:20:45 +00:00
|
|
|
TAILQ_INIT(&ifg_head);
|
2005-07-01 16:28:32 +00:00
|
|
|
knlist_init(&ifklist, NULL, NULL, NULL, NULL);
|
2001-09-06 02:40:43 +00:00
|
|
|
if_grow(); /* create initial table */
|
2001-09-29 05:55:04 +00:00
|
|
|
ifdev_byindex(0) = make_dev(&net_cdevsw, 0,
|
|
|
|
UID_ROOT, GID_WHEEL, 0600, "network");
|
Major overhaul of pseudo-interface cloning. Highlights include:
- Split the code out into if_clone.[ch].
- Locked struct if_clone. [1]
- Add a per-cloner match function rather then simply matching names of
the form <name><unit> and <name>.
- Use the match function to allow creation of <interface>.<tag>
vlan interfaces. The old way is preserved unchanged!
- Also the match function to allow creation of stf(4) interfaces named
stf0, stf, or 6to4. This is the only major user visible change in
that "ifconfig stf" creates the interface stf rather then stf0 and
does not print "stf0" to stdout.
- Allow destroy functions to fail so they can refuse to delete
interfaces. Currently, we forbid the deletion of interfaces which
were created in the init function, particularly lo0, pflog0, and
pfsync0. In the case of lo0 this was a panic implementation so it
does not count as a user visiable change. :-)
- Since most interfaces do not need the new functionality, an family of
wrapper functions, ifc_simple_*(), were created to wrap old style
cloner functions.
- The IF_CLONE_INITIALIZER macro is replaced with a new incompatible
IFC_CLONE_INITIALIZER and ifc_simple consumers use IFC_SIMPLE_DECLARE
instead.
Submitted by: Maurycy Pawlowski-Wieronski <maurycy at fouk.org> [1]
Reviewed by: andre, mlaier
Discussed on: net
2004-06-22 20:13:25 +00:00
|
|
|
if_clone_init();
|
2001-09-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
if_grow(void)
|
|
|
|
{
|
|
|
|
u_int n;
|
|
|
|
struct ifindex_entry *e;
|
|
|
|
|
|
|
|
if_indexlim <<= 1;
|
|
|
|
n = if_indexlim * sizeof(*e);
|
2005-06-10 16:49:24 +00:00
|
|
|
e = malloc(n, M_IFNET, M_WAITOK | M_ZERO);
|
2001-09-06 02:40:43 +00:00
|
|
|
if (ifindex_table != NULL) {
|
|
|
|
memcpy((caddr_t)e, (caddr_t)ifindex_table, n/2);
|
2005-06-10 16:49:24 +00:00
|
|
|
free((caddr_t)ifindex_table, M_IFNET);
|
2001-09-06 02:40:43 +00:00
|
|
|
}
|
|
|
|
ifindex_table = e;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED*/
|
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_check(void *dummy __unused)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1999-04-26 09:02:40 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
int s;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-04-26 09:02:40 +00:00
|
|
|
s = splimp();
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RLOCK(); /* could sleep on rare error; mostly okay XXX */
|
2001-02-04 13:13:25 +00:00
|
|
|
TAILQ_FOREACH(ifp, &ifnet, if_link) {
|
1999-02-01 20:03:27 +00:00
|
|
|
if (ifp->if_snd.ifq_maxlen == 0) {
|
2003-10-31 01:35:07 +00:00
|
|
|
if_printf(ifp, "XXX: driver didn't set ifq_maxlen\n");
|
1994-05-24 10:09:53 +00:00
|
|
|
ifp->if_snd.ifq_maxlen = ifqmaxlen;
|
1999-02-01 20:03:27 +00:00
|
|
|
}
|
2001-03-28 09:04:25 +00:00
|
|
|
if (!mtx_initialized(&ifp->if_snd.ifq_mtx)) {
|
2003-10-31 01:35:07 +00:00
|
|
|
if_printf(ifp,
|
|
|
|
"XXX: driver didn't initialize queue mtx\n");
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&ifp->if_snd.ifq_mtx, "unknown",
|
|
|
|
MTX_NETWORK_LOCK, MTX_DEF);
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
}
|
|
|
|
}
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RUNLOCK();
|
1999-04-26 09:02:40 +00:00
|
|
|
splx(s);
|
1994-05-24 10:09:53 +00:00
|
|
|
if_slowtimo(0);
|
|
|
|
}
|
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
/*
|
|
|
|
* Allocate a struct ifnet and in index for an interface.
|
|
|
|
*/
|
|
|
|
struct ifnet*
|
|
|
|
if_alloc(u_char type)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
|
|
|
ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO);
|
|
|
|
|
2005-08-18 18:36:40 +00:00
|
|
|
/*
|
|
|
|
* Try to find an empty slot below if_index. If we fail, take
|
|
|
|
* the next slot.
|
|
|
|
*
|
|
|
|
* XXX: should be locked!
|
|
|
|
*/
|
|
|
|
for (ifp->if_index = 1; ifp->if_index <= if_index; ifp->if_index++) {
|
|
|
|
if (ifnet_byindex(ifp->if_index) == NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Catch if_index overflow. */
|
|
|
|
if (ifp->if_index < 1) {
|
|
|
|
free(ifp, M_IFNET);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2005-06-10 16:49:24 +00:00
|
|
|
if (ifp->if_index > if_index)
|
|
|
|
if_index = ifp->if_index;
|
|
|
|
if (if_index >= if_indexlim)
|
|
|
|
if_grow();
|
|
|
|
ifnet_byindex(ifp->if_index) = ifp;
|
|
|
|
|
|
|
|
ifp->if_type = type;
|
|
|
|
|
|
|
|
if (if_com_alloc[type] != NULL) {
|
|
|
|
ifp->if_l2com = if_com_alloc[type](type, ifp);
|
2005-06-12 00:53:03 +00:00
|
|
|
if (ifp->if_l2com == NULL) {
|
2005-06-10 16:49:24 +00:00
|
|
|
free(ifp, M_IFNET);
|
2005-06-12 00:53:03 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
2005-06-10 16:49:24 +00:00
|
|
|
}
|
2005-08-04 14:39:47 +00:00
|
|
|
IF_ADDR_LOCK_INIT(ifp);
|
2005-06-10 16:49:24 +00:00
|
|
|
|
|
|
|
return (ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_free(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
|
2005-08-16 17:02:35 +00:00
|
|
|
/* Do not add code to this function! Add it to if_free_type(). */
|
2005-08-06 18:42:01 +00:00
|
|
|
if_free_type(ifp, ifp->if_type);
|
2005-06-10 16:49:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_free_type(struct ifnet *ifp, u_char type)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (ifp != ifnet_byindex(ifp->if_index)) {
|
|
|
|
if_printf(ifp, "%s: value was not if_alloced, skipping\n",
|
|
|
|
__func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2005-08-16 17:02:35 +00:00
|
|
|
IF_ADDR_LOCK_DESTROY(ifp);
|
|
|
|
|
2005-06-12 00:53:03 +00:00
|
|
|
ifnet_byindex(ifp->if_index) = NULL;
|
|
|
|
|
|
|
|
/* XXX: should be locked with if_findindex() */
|
2005-08-18 18:36:40 +00:00
|
|
|
while (if_index > 0 && ifnet_byindex(if_index) == NULL)
|
2005-06-12 00:53:03 +00:00
|
|
|
if_index--;
|
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
if (if_com_free[type] != NULL)
|
|
|
|
if_com_free[type](ifp->if_l2com, type);
|
|
|
|
|
|
|
|
free(ifp, M_IFNET);
|
|
|
|
};
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Attach an interface to the
|
|
|
|
* list of "active" interfaces.
|
|
|
|
*/
|
|
|
|
void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_attach(struct ifnet *ifp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
unsigned socksize, ifasize;
|
1996-01-24 21:12:23 +00:00
|
|
|
int namelen, masklen;
|
2003-10-23 13:49:10 +00:00
|
|
|
struct sockaddr_dl *sdl;
|
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index))
|
|
|
|
panic ("%s: BUG: if_attach called without if_alloc'd input()\n",
|
|
|
|
ifp->if_xname);
|
|
|
|
|
2004-07-27 23:20:45 +00:00
|
|
|
TASK_INIT(&ifp->if_starttask, 0, if_start_deferred, ifp);
|
2005-04-20 09:30:54 +00:00
|
|
|
TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp);
|
2003-10-24 16:57:59 +00:00
|
|
|
IF_AFDATA_LOCK_INIT(ifp);
|
|
|
|
ifp->if_afdata_initialized = 0;
|
1996-12-13 21:29:07 +00:00
|
|
|
/*
|
|
|
|
* XXX -
|
|
|
|
* The old code would work if the interface passed a pre-existing
|
|
|
|
* chain of ifaddrs to this code. We don't trust our callers to
|
|
|
|
* properly initialize the tailq, however, so we no longer allow
|
|
|
|
* this unlikely case.
|
|
|
|
*/
|
|
|
|
TAILQ_INIT(&ifp->if_addrhead);
|
1999-11-22 02:45:11 +00:00
|
|
|
TAILQ_INIT(&ifp->if_prefixhead);
|
2001-02-06 10:12:15 +00:00
|
|
|
TAILQ_INIT(&ifp->if_multiaddrs);
|
2006-06-19 22:20:45 +00:00
|
|
|
TAILQ_INIT(&ifp->if_groups);
|
|
|
|
|
|
|
|
if_addgroup(ifp, IFG_ALL);
|
|
|
|
|
2005-07-01 16:28:32 +00:00
|
|
|
knlist_init(&ifp->if_klist, NULL, NULL, NULL, NULL);
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2005-02-25 19:46:41 +00:00
|
|
|
ifp->if_data.ifi_epoch = time_uptime;
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp->if_data.ifi_datalen = sizeof(struct if_data);
|
2002-07-31 16:16:03 +00:00
|
|
|
|
|
|
|
#ifdef MAC
|
|
|
|
mac_init_ifnet(ifp);
|
|
|
|
mac_create_ifnet(ifp);
|
|
|
|
#endif
|
|
|
|
|
2004-01-23 15:53:23 +00:00
|
|
|
ifdev_byindex(ifp->if_index) = make_dev(&net_cdevsw,
|
|
|
|
unit2minor(ifp->if_index),
|
2003-10-31 18:32:15 +00:00
|
|
|
UID_ROOT, GID_WHEEL, 0600, "%s/%s",
|
|
|
|
net_cdevsw.d_name, ifp->if_xname);
|
2001-10-11 05:54:39 +00:00
|
|
|
make_dev_alias(ifdev_byindex(ifp->if_index), "%s%d",
|
|
|
|
net_cdevsw.d_name, ifp->if_index);
|
2001-09-29 05:55:04 +00:00
|
|
|
|
2003-10-31 18:32:15 +00:00
|
|
|
mtx_init(&ifp->if_snd.ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF);
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* create a Link Level name for this device
|
|
|
|
*/
|
2003-10-31 18:32:15 +00:00
|
|
|
namelen = strlen(ifp->if_xname);
|
2004-02-04 02:54:25 +00:00
|
|
|
/*
|
|
|
|
* Always save enough space for any possiable name so we can do
|
|
|
|
* a rename in place later.
|
|
|
|
*/
|
|
|
|
masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ;
|
1994-05-24 10:09:53 +00:00
|
|
|
socksize = masklen + ifp->if_addrlen;
|
|
|
|
if (socksize < sizeof(*sdl))
|
|
|
|
socksize = sizeof(*sdl);
|
2004-02-02 21:55:34 +00:00
|
|
|
socksize = roundup2(socksize, sizeof(long));
|
1994-05-24 10:09:53 +00:00
|
|
|
ifasize = sizeof(*ifa) + 2 * socksize;
|
2004-01-27 19:35:05 +00:00
|
|
|
ifa = malloc(ifasize, M_IFADDR, M_WAITOK | M_ZERO);
|
|
|
|
IFA_LOCK_INIT(ifa);
|
|
|
|
sdl = (struct sockaddr_dl *)(ifa + 1);
|
|
|
|
sdl->sdl_len = socksize;
|
|
|
|
sdl->sdl_family = AF_LINK;
|
|
|
|
bcopy(ifp->if_xname, sdl->sdl_data, namelen);
|
|
|
|
sdl->sdl_nlen = namelen;
|
|
|
|
sdl->sdl_index = ifp->if_index;
|
|
|
|
sdl->sdl_type = ifp->if_type;
|
2005-11-11 16:04:59 +00:00
|
|
|
ifp->if_addr = ifa;
|
2004-01-27 19:35:05 +00:00
|
|
|
ifa->ifa_ifp = ifp;
|
|
|
|
ifa->ifa_rtrequest = link_rtrequest;
|
|
|
|
ifa->ifa_addr = (struct sockaddr *)sdl;
|
|
|
|
sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
|
|
|
|
ifa->ifa_netmask = (struct sockaddr *)sdl;
|
|
|
|
sdl->sdl_len = masklen;
|
|
|
|
while (namelen != 0)
|
|
|
|
sdl->sdl_data[--namelen] = 0xff;
|
|
|
|
ifa->ifa_refcnt = 1;
|
|
|
|
TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
|
2004-11-23 23:31:33 +00:00
|
|
|
ifp->if_broadcastaddr = NULL; /* reliably crash if used uninitialized */
|
2004-06-13 17:29:10 +00:00
|
|
|
ifp->if_snd.altq_type = 0;
|
|
|
|
ifp->if_snd.altq_disc = NULL;
|
|
|
|
ifp->if_snd.altq_flags &= ALTQF_CANTCHANGE;
|
|
|
|
ifp->if_snd.altq_tbr = NULL;
|
|
|
|
ifp->if_snd.altq_ifp = ifp;
|
2002-01-18 14:33:04 +00:00
|
|
|
|
2006-06-21 06:02:35 +00:00
|
|
|
IFNET_WLOCK();
|
|
|
|
TAILQ_INSERT_TAIL(&ifnet, ifp, if_link);
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
|
2004-11-30 22:38:37 +00:00
|
|
|
if (domain_init_status >= 2)
|
2003-10-17 15:46:31 +00:00
|
|
|
if_attachdomain1(ifp);
|
|
|
|
|
2004-02-26 04:27:55 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
|
2006-06-01 00:41:07 +00:00
|
|
|
devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
|
2004-02-26 04:27:55 +00:00
|
|
|
|
2002-01-18 14:33:04 +00:00
|
|
|
/* Announce the interface. */
|
|
|
|
rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-04-16 21:22:55 +00:00
|
|
|
|
2003-10-17 15:46:31 +00:00
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_attachdomain(void *dummy)
|
2003-10-17 15:46:31 +00:00
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
s = splnet();
|
2004-04-16 10:32:13 +00:00
|
|
|
TAILQ_FOREACH(ifp, &ifnet, if_link)
|
2003-10-17 15:46:31 +00:00
|
|
|
if_attachdomain1(ifp);
|
|
|
|
splx(s);
|
|
|
|
}
|
2004-11-30 22:38:37 +00:00
|
|
|
SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND,
|
2003-10-17 15:46:31 +00:00
|
|
|
if_attachdomain, NULL);
|
|
|
|
|
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_attachdomain1(struct ifnet *ifp)
|
2003-10-17 15:46:31 +00:00
|
|
|
{
|
|
|
|
struct domain *dp;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
s = splnet();
|
|
|
|
|
2003-10-24 16:57:59 +00:00
|
|
|
/*
|
|
|
|
* Since dp->dom_ifattach calls malloc() with M_WAITOK, we
|
|
|
|
* cannot lock ifp->if_afdata initialization, entirely.
|
|
|
|
*/
|
|
|
|
if (IF_AFDATA_TRYLOCK(ifp) == 0) {
|
|
|
|
splx(s);
|
|
|
|
return;
|
|
|
|
}
|
2004-11-30 22:38:37 +00:00
|
|
|
if (ifp->if_afdata_initialized >= domain_init_status) {
|
2003-10-24 16:57:59 +00:00
|
|
|
IF_AFDATA_UNLOCK(ifp);
|
|
|
|
splx(s);
|
2004-11-23 23:31:33 +00:00
|
|
|
printf("if_attachdomain called more than once on %s\n",
|
|
|
|
ifp->if_xname);
|
2003-10-24 16:57:59 +00:00
|
|
|
return;
|
|
|
|
}
|
2004-11-30 22:38:37 +00:00
|
|
|
ifp->if_afdata_initialized = domain_init_status;
|
2003-10-24 16:57:59 +00:00
|
|
|
IF_AFDATA_UNLOCK(ifp);
|
|
|
|
|
2003-10-17 15:46:31 +00:00
|
|
|
/* address family dependent data region */
|
|
|
|
bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
|
|
|
|
for (dp = domains; dp; dp = dp->dom_next) {
|
|
|
|
if (dp->dom_ifattach)
|
|
|
|
ifp->if_afdata[dp->dom_family] =
|
|
|
|
(*dp->dom_ifattach)(ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
2005-05-25 13:52:03 +00:00
|
|
|
/*
|
|
|
|
* Remove any network addresses from an interface.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void
|
|
|
|
if_purgeaddrs(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct ifaddr *ifa, *next;
|
|
|
|
|
|
|
|
TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) {
|
2006-06-29 19:22:05 +00:00
|
|
|
if (ifa->ifa_addr->sa_family == AF_LINK)
|
2005-05-25 13:52:03 +00:00
|
|
|
continue;
|
|
|
|
#ifdef INET
|
|
|
|
/* XXX: Ugly!! ad hoc just for INET */
|
2006-06-29 19:22:05 +00:00
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET) {
|
2005-05-25 13:52:03 +00:00
|
|
|
struct ifaliasreq ifr;
|
|
|
|
|
|
|
|
bzero(&ifr, sizeof(ifr));
|
|
|
|
ifr.ifra_addr = *ifa->ifa_addr;
|
|
|
|
if (ifa->ifa_dstaddr)
|
|
|
|
ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
|
|
|
|
if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
|
|
|
|
NULL) == 0)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif /* INET */
|
|
|
|
#ifdef INET6
|
2006-06-29 19:22:05 +00:00
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET6) {
|
2005-05-25 13:52:03 +00:00
|
|
|
in6_purgeaddr(ifa);
|
|
|
|
/* ifp_addrhead is already updated */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif /* INET6 */
|
|
|
|
TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
|
|
|
|
IFAFREE(ifa);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-04-16 21:22:55 +00:00
|
|
|
/*
|
|
|
|
* Detach an interface, removing it from the
|
2005-11-11 16:04:59 +00:00
|
|
|
* list of "active" interfaces.
|
2005-09-18 17:36:28 +00:00
|
|
|
*
|
|
|
|
* XXXRW: There are some significant questions about event ordering, and
|
|
|
|
* how to prevent things from starting to use the interface during detach.
|
1999-04-16 21:22:55 +00:00
|
|
|
*/
|
|
|
|
void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_detach(struct ifnet *ifp)
|
1999-04-16 21:22:55 +00:00
|
|
|
{
|
2005-05-25 13:52:03 +00:00
|
|
|
struct ifaddr *ifa;
|
1999-12-17 06:46:07 +00:00
|
|
|
struct radix_node_head *rnh;
|
|
|
|
int s;
|
|
|
|
int i;
|
2003-10-17 15:46:31 +00:00
|
|
|
struct domain *dp;
|
2004-08-06 09:08:33 +00:00
|
|
|
struct ifnet *iter;
|
2006-06-21 06:02:35 +00:00
|
|
|
int found = 0;
|
|
|
|
|
|
|
|
IFNET_WLOCK();
|
|
|
|
TAILQ_FOREACH(iter, &ifnet, if_link)
|
|
|
|
if (iter == ifp) {
|
|
|
|
TAILQ_REMOVE(&ifnet, ifp, if_link);
|
|
|
|
found = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
if (!found)
|
|
|
|
return;
|
1999-04-16 21:22:55 +00:00
|
|
|
|
2005-04-20 09:30:54 +00:00
|
|
|
/*
|
|
|
|
* Remove/wait for pending events.
|
|
|
|
*/
|
|
|
|
taskqueue_drain(taskqueue_swi, &ifp->if_linktask);
|
|
|
|
|
1999-04-16 21:22:55 +00:00
|
|
|
/*
|
|
|
|
* Remove routes and flush queues.
|
|
|
|
*/
|
1999-12-17 06:46:07 +00:00
|
|
|
s = splnet();
|
1999-04-16 21:22:55 +00:00
|
|
|
if_down(ifp);
|
2004-06-13 17:29:10 +00:00
|
|
|
#ifdef ALTQ
|
|
|
|
if (ALTQ_IS_ENABLED(&ifp->if_snd))
|
|
|
|
altq_disable(&ifp->if_snd);
|
|
|
|
if (ALTQ_IS_ATTACHED(&ifp->if_snd))
|
|
|
|
altq_detach(&ifp->if_snd);
|
|
|
|
#endif
|
1999-04-16 21:22:55 +00:00
|
|
|
|
2005-05-25 13:52:03 +00:00
|
|
|
if_purgeaddrs(ifp);
|
1999-04-16 21:22:55 +00:00
|
|
|
|
2005-09-18 17:36:28 +00:00
|
|
|
#ifdef INET
|
|
|
|
in_ifdetach(ifp);
|
|
|
|
#endif
|
|
|
|
|
2001-06-11 12:39:29 +00:00
|
|
|
#ifdef INET6
|
|
|
|
/*
|
|
|
|
* Remove all IPv6 kernel structs related to ifp. This should be done
|
|
|
|
* before removing routing entries below, since IPv6 interface direct
|
|
|
|
* routes are expected to be removed by the IPv6-specific kernel API.
|
|
|
|
* Otherwise, the kernel will detect some inconsistency and bark it.
|
|
|
|
*/
|
|
|
|
in6_ifdetach(ifp);
|
|
|
|
#endif
|
2004-04-19 17:28:15 +00:00
|
|
|
/*
|
2005-11-11 16:04:59 +00:00
|
|
|
* Remove link ifaddr pointer and maybe decrement if_index.
|
2004-04-19 17:28:15 +00:00
|
|
|
* Clean up all addresses.
|
|
|
|
*/
|
2005-11-11 16:04:59 +00:00
|
|
|
ifp->if_addr = NULL;
|
2004-04-19 17:28:15 +00:00
|
|
|
destroy_dev(ifdev_byindex(ifp->if_index));
|
|
|
|
ifdev_byindex(ifp->if_index) = NULL;
|
|
|
|
|
2003-10-16 13:38:29 +00:00
|
|
|
/* We can now free link ifaddr. */
|
2004-08-06 09:08:33 +00:00
|
|
|
if (!TAILQ_EMPTY(&ifp->if_addrhead)) {
|
|
|
|
ifa = TAILQ_FIRST(&ifp->if_addrhead);
|
|
|
|
TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
|
|
|
|
IFAFREE(ifa);
|
|
|
|
}
|
2003-10-16 13:38:29 +00:00
|
|
|
|
1999-12-17 06:46:07 +00:00
|
|
|
/*
|
|
|
|
* Delete all remaining routes using this interface
|
|
|
|
* Unfortuneatly the only way to do this is to slog through
|
|
|
|
* the entire routing table looking for routes which point
|
|
|
|
* to this interface...oh well...
|
|
|
|
*/
|
|
|
|
for (i = 1; i <= AF_MAX; i++) {
|
|
|
|
if ((rnh = rt_tables[i]) == NULL)
|
|
|
|
continue;
|
2002-12-24 03:03:39 +00:00
|
|
|
RADIX_NODE_HEAD_LOCK(rnh);
|
1999-12-17 06:46:07 +00:00
|
|
|
(void) rnh->rnh_walktree(rnh, if_rtdel, ifp);
|
2002-12-24 03:03:39 +00:00
|
|
|
RADIX_NODE_HEAD_UNLOCK(rnh);
|
1999-12-17 06:46:07 +00:00
|
|
|
}
|
|
|
|
|
2002-01-18 14:33:04 +00:00
|
|
|
/* Announce that the interface is gone. */
|
|
|
|
rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
|
2005-07-14 20:26:43 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
|
2006-06-01 00:41:07 +00:00
|
|
|
devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
|
2002-01-18 14:33:04 +00:00
|
|
|
|
2003-10-24 16:57:59 +00:00
|
|
|
IF_AFDATA_LOCK(ifp);
|
2003-10-17 15:46:31 +00:00
|
|
|
for (dp = domains; dp; dp = dp->dom_next) {
|
|
|
|
if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
|
|
|
|
(*dp->dom_ifdetach)(ifp,
|
|
|
|
ifp->if_afdata[dp->dom_family]);
|
|
|
|
}
|
2003-10-24 16:57:59 +00:00
|
|
|
IF_AFDATA_UNLOCK(ifp);
|
2003-10-17 15:46:31 +00:00
|
|
|
|
2002-07-31 16:16:03 +00:00
|
|
|
#ifdef MAC
|
|
|
|
mac_destroy_ifnet(ifp);
|
|
|
|
#endif /* MAC */
|
2004-08-15 06:24:42 +00:00
|
|
|
KNOTE_UNLOCKED(&ifp->if_klist, NOTE_EXIT);
|
|
|
|
knlist_clear(&ifp->if_klist, 0);
|
|
|
|
knlist_destroy(&ifp->if_klist);
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
mtx_destroy(&ifp->if_snd.ifq_mtx);
|
2003-10-24 16:57:59 +00:00
|
|
|
IF_AFDATA_DESTROY(ifp);
|
1999-12-17 06:46:07 +00:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
2006-06-19 22:20:45 +00:00
|
|
|
/*
|
|
|
|
* Add a group to an interface
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_addgroup(struct ifnet *ifp, const char *groupname)
|
|
|
|
{
|
|
|
|
struct ifg_list *ifgl;
|
|
|
|
struct ifg_group *ifg = NULL;
|
|
|
|
struct ifg_member *ifgm;
|
|
|
|
|
|
|
|
if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' &&
|
|
|
|
groupname[strlen(groupname) - 1] <= '9')
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
IFNET_WLOCK();
|
|
|
|
TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
|
|
|
|
if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) {
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (EEXIST);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP,
|
|
|
|
M_NOWAIT)) == NULL) {
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member),
|
|
|
|
M_TEMP, M_NOWAIT)) == NULL) {
|
|
|
|
free(ifgl, M_TEMP);
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
|
|
|
|
if (!strcmp(ifg->ifg_group, groupname))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (ifg == NULL) {
|
|
|
|
if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group),
|
|
|
|
M_TEMP, M_NOWAIT)) == NULL) {
|
|
|
|
free(ifgl, M_TEMP);
|
|
|
|
free(ifgm, M_TEMP);
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
|
|
|
|
ifg->ifg_refcnt = 0;
|
|
|
|
TAILQ_INIT(&ifg->ifg_members);
|
|
|
|
EVENTHANDLER_INVOKE(group_attach_event, ifg);
|
|
|
|
TAILQ_INSERT_TAIL(&ifg_head, ifg, ifg_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
ifg->ifg_refcnt++;
|
|
|
|
ifgl->ifgl_group = ifg;
|
|
|
|
ifgm->ifgm_ifp = ifp;
|
|
|
|
|
|
|
|
IF_ADDR_LOCK(ifp);
|
|
|
|
TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next);
|
|
|
|
TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next);
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
|
|
|
|
EVENTHANDLER_INVOKE(group_change_event, groupname);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a group from an interface
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_delgroup(struct ifnet *ifp, const char *groupname)
|
|
|
|
{
|
|
|
|
struct ifg_list *ifgl;
|
|
|
|
struct ifg_member *ifgm;
|
|
|
|
|
|
|
|
IFNET_WLOCK();
|
|
|
|
TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
|
|
|
|
if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
|
|
|
|
break;
|
|
|
|
if (ifgl == NULL) {
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
IF_ADDR_LOCK(ifp);
|
|
|
|
TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
|
|
|
|
TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
|
|
|
|
if (ifgm->ifgm_ifp == ifp)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (ifgm != NULL) {
|
|
|
|
TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next);
|
|
|
|
free(ifgm, M_TEMP);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (--ifgl->ifgl_group->ifg_refcnt == 0) {
|
|
|
|
TAILQ_REMOVE(&ifg_head, ifgl->ifgl_group, ifg_next);
|
|
|
|
EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group);
|
|
|
|
free(ifgl->ifgl_group, M_TEMP);
|
|
|
|
}
|
|
|
|
IFNET_WUNLOCK();
|
|
|
|
|
|
|
|
free(ifgl, M_TEMP);
|
|
|
|
|
|
|
|
EVENTHANDLER_INVOKE(group_change_event, groupname);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stores all groups from an interface in memory pointed
|
|
|
|
* to by data
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
if_getgroup(struct ifgroupreq *data, struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
int len, error;
|
|
|
|
struct ifg_list *ifgl;
|
|
|
|
struct ifg_req ifgrq, *ifgp;
|
|
|
|
struct ifgroupreq *ifgr = data;
|
|
|
|
|
|
|
|
if (ifgr->ifgr_len == 0) {
|
|
|
|
IF_ADDR_LOCK(ifp);
|
|
|
|
TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
|
|
|
|
ifgr->ifgr_len += sizeof(struct ifg_req);
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
len = ifgr->ifgr_len;
|
|
|
|
ifgp = ifgr->ifgr_groups;
|
|
|
|
/* XXX: wire */
|
|
|
|
IF_ADDR_LOCK(ifp);
|
|
|
|
TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
|
|
|
|
if (len < sizeof(ifgrq)) {
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
bzero(&ifgrq, sizeof ifgrq);
|
|
|
|
strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group,
|
|
|
|
sizeof(ifgrq.ifgrq_group));
|
|
|
|
if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) {
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
len -= sizeof(ifgrq);
|
|
|
|
ifgp++;
|
|
|
|
}
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Stores all members of a group in memory pointed to by data
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
if_getgroupmembers(struct ifgroupreq *data)
|
|
|
|
{
|
|
|
|
struct ifgroupreq *ifgr = data;
|
|
|
|
struct ifg_group *ifg;
|
|
|
|
struct ifg_member *ifgm;
|
|
|
|
struct ifg_req ifgrq, *ifgp;
|
|
|
|
int len, error;
|
|
|
|
|
|
|
|
IFNET_RLOCK();
|
|
|
|
TAILQ_FOREACH(ifg, &ifg_head, ifg_next)
|
|
|
|
if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
|
|
|
|
break;
|
|
|
|
if (ifg == NULL) {
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ifgr->ifgr_len == 0) {
|
|
|
|
TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)
|
|
|
|
ifgr->ifgr_len += sizeof(ifgrq);
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
len = ifgr->ifgr_len;
|
|
|
|
ifgp = ifgr->ifgr_groups;
|
|
|
|
TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) {
|
|
|
|
if (len < sizeof(ifgrq)) {
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
bzero(&ifgrq, sizeof ifgrq);
|
|
|
|
strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname,
|
|
|
|
sizeof(ifgrq.ifgrq_member));
|
|
|
|
if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) {
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
len -= sizeof(ifgrq);
|
|
|
|
ifgp++;
|
|
|
|
}
|
|
|
|
IFNET_RUNLOCK();
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1999-12-17 06:46:07 +00:00
|
|
|
/*
|
|
|
|
* Delete Routes for a Network Interface
|
2003-10-23 13:49:10 +00:00
|
|
|
*
|
1999-12-17 06:46:07 +00:00
|
|
|
* Called for each routing entry via the rnh->rnh_walktree() call above
|
|
|
|
* to delete all route entries referencing a detaching network interface.
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* rn pointer to node in the routing table
|
|
|
|
* arg argument passed to rnh->rnh_walktree() - detaching interface
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 successful
|
|
|
|
* errno failed - reason indicated
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static int
|
2003-10-23 13:49:10 +00:00
|
|
|
if_rtdel(struct radix_node *rn, void *arg)
|
1999-12-17 06:46:07 +00:00
|
|
|
{
|
|
|
|
struct rtentry *rt = (struct rtentry *)rn;
|
|
|
|
struct ifnet *ifp = arg;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (rt->rt_ifp == ifp) {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protect (sorta) against walktree recursion problems
|
|
|
|
* with cloned routes
|
|
|
|
*/
|
|
|
|
if ((rt->rt_flags & RTF_UP) == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
err = rtrequest(RTM_DELETE, rt_key(rt), rt->rt_gateway,
|
|
|
|
rt_mask(rt), rt->rt_flags,
|
|
|
|
(struct rtentry **) NULL);
|
|
|
|
if (err) {
|
|
|
|
log(LOG_WARNING, "if_rtdel: error %d\n", err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
1999-04-16 21:22:55 +00:00
|
|
|
}
|
|
|
|
|
2005-07-19 10:03:47 +00:00
|
|
|
#define sa_equal(a1, a2) (bcmp((a1), (a2), ((a1))->sa_len) == 0)
|
2002-12-18 11:46:59 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Locate an interface based on a complete address.
|
|
|
|
*/
|
|
|
|
/*ARGSUSED*/
|
|
|
|
struct ifaddr *
|
2003-10-23 13:49:10 +00:00
|
|
|
ifa_ifwithaddr(struct sockaddr *addr)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-06 00:44:45 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RLOCK();
|
2001-02-04 13:13:25 +00:00
|
|
|
TAILQ_FOREACH(ifp, &ifnet, if_link)
|
2001-09-06 00:44:45 +00:00
|
|
|
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
|
|
|
if (ifa->ifa_addr->sa_family != addr->sa_family)
|
|
|
|
continue;
|
2005-07-19 10:03:47 +00:00
|
|
|
if (sa_equal(addr, ifa->ifa_addr))
|
2001-09-06 00:44:45 +00:00
|
|
|
goto done;
|
|
|
|
/* IP6 doesn't have broadcast */
|
|
|
|
if ((ifp->if_flags & IFF_BROADCAST) &&
|
|
|
|
ifa->ifa_broadaddr &&
|
|
|
|
ifa->ifa_broadaddr->sa_len != 0 &&
|
2005-07-19 10:03:47 +00:00
|
|
|
sa_equal(ifa->ifa_broadaddr, addr))
|
2001-09-06 00:44:45 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
ifa = NULL;
|
|
|
|
done:
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RUNLOCK();
|
2001-09-06 00:44:45 +00:00
|
|
|
return (ifa);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Locate the point to point interface with a given destination address.
|
|
|
|
*/
|
|
|
|
/*ARGSUSED*/
|
|
|
|
struct ifaddr *
|
2003-10-23 13:49:10 +00:00
|
|
|
ifa_ifwithdstaddr(struct sockaddr *addr)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-06 00:44:45 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RLOCK();
|
2001-09-06 00:44:45 +00:00
|
|
|
TAILQ_FOREACH(ifp, &ifnet, if_link) {
|
|
|
|
if ((ifp->if_flags & IFF_POINTOPOINT) == 0)
|
|
|
|
continue;
|
2001-02-04 16:08:18 +00:00
|
|
|
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ifa->ifa_addr->sa_family != addr->sa_family)
|
|
|
|
continue;
|
2005-07-19 10:03:47 +00:00
|
|
|
if (ifa->ifa_dstaddr &&
|
|
|
|
sa_equal(addr, ifa->ifa_dstaddr))
|
2001-09-06 00:44:45 +00:00
|
|
|
goto done;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
ifa = NULL;
|
|
|
|
done:
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RUNLOCK();
|
2001-09-06 00:44:45 +00:00
|
|
|
return (ifa);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find an interface on a specific network. If many, choice
|
|
|
|
* is most specific found.
|
|
|
|
*/
|
|
|
|
struct ifaddr *
|
2003-10-23 13:49:10 +00:00
|
|
|
ifa_ifwithnet(struct sockaddr *addr)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ifaddr *ifa_maybe = (struct ifaddr *) 0;
|
|
|
|
u_int af = addr->sa_family;
|
|
|
|
char *addr_data = addr->sa_data, *cplim;
|
|
|
|
|
1997-08-22 22:47:27 +00:00
|
|
|
/*
|
|
|
|
* AF_LINK addresses can be looked up directly by their index number,
|
|
|
|
* so do that if we can.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
if (af == AF_LINK) {
|
2003-10-04 03:44:50 +00:00
|
|
|
struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (sdl->sdl_index && sdl->sdl_index <= if_index)
|
2001-09-06 02:40:43 +00:00
|
|
|
return (ifaddr_byindex(sdl->sdl_index));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-08-22 22:47:27 +00:00
|
|
|
|
1999-11-22 02:45:11 +00:00
|
|
|
/*
|
1997-08-22 22:47:27 +00:00
|
|
|
* Scan though each interface, looking for ones that have
|
|
|
|
* addresses in this address family.
|
|
|
|
*/
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RLOCK();
|
2001-02-04 13:13:25 +00:00
|
|
|
TAILQ_FOREACH(ifp, &ifnet, if_link) {
|
2001-02-04 16:08:18 +00:00
|
|
|
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
2003-10-23 13:49:10 +00:00
|
|
|
char *cp, *cp2, *cp3;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-06-28 05:31:03 +00:00
|
|
|
if (ifa->ifa_addr->sa_family != af)
|
1997-08-22 22:47:27 +00:00
|
|
|
next: continue;
|
2002-04-01 16:17:13 +00:00
|
|
|
if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) {
|
1997-08-22 22:47:27 +00:00
|
|
|
/*
|
1999-11-22 02:45:11 +00:00
|
|
|
* This is a bit broken as it doesn't
|
|
|
|
* take into account that the remote end may
|
1997-08-22 22:47:27 +00:00
|
|
|
* be a single node in the network we are
|
|
|
|
* looking for.
|
1999-11-22 02:45:11 +00:00
|
|
|
* The trouble is that we don't know the
|
1997-08-22 22:47:27 +00:00
|
|
|
* netmask for the remote end.
|
|
|
|
*/
|
2005-07-19 10:03:47 +00:00
|
|
|
if (ifa->ifa_dstaddr != 0 &&
|
|
|
|
sa_equal(addr, ifa->ifa_dstaddr))
|
2001-09-06 00:44:45 +00:00
|
|
|
goto done;
|
1995-06-15 00:19:56 +00:00
|
|
|
} else {
|
1997-08-28 01:17:12 +00:00
|
|
|
/*
|
|
|
|
* if we have a special address handler,
|
|
|
|
* then use it instead of the generic one.
|
|
|
|
*/
|
2003-10-23 13:49:10 +00:00
|
|
|
if (ifa->ifa_claim_addr) {
|
2001-09-06 00:44:45 +00:00
|
|
|
if ((*ifa->ifa_claim_addr)(ifa, addr))
|
|
|
|
goto done;
|
|
|
|
continue;
|
1997-08-28 01:17:12 +00:00
|
|
|
}
|
|
|
|
|
1997-08-22 22:47:27 +00:00
|
|
|
/*
|
|
|
|
* Scan all the bits in the ifa's address.
|
|
|
|
* If a bit dissagrees with what we are
|
|
|
|
* looking for, mask it with the netmask
|
|
|
|
* to see if it really matters.
|
|
|
|
* (A byte at a time)
|
|
|
|
*/
|
1995-06-28 05:31:03 +00:00
|
|
|
if (ifa->ifa_netmask == 0)
|
|
|
|
continue;
|
1995-05-27 04:37:24 +00:00
|
|
|
cp = addr_data;
|
|
|
|
cp2 = ifa->ifa_addr->sa_data;
|
|
|
|
cp3 = ifa->ifa_netmask->sa_data;
|
1997-08-22 22:47:27 +00:00
|
|
|
cplim = ifa->ifa_netmask->sa_len
|
|
|
|
+ (char *)ifa->ifa_netmask;
|
1995-05-27 04:37:24 +00:00
|
|
|
while (cp3 < cplim)
|
|
|
|
if ((*cp++ ^ *cp2++) & *cp3++)
|
1997-08-22 22:47:27 +00:00
|
|
|
goto next; /* next address! */
|
|
|
|
/*
|
|
|
|
* If the netmask of what we just found
|
|
|
|
* is more specific than what we had before
|
|
|
|
* (if we had one) then remember the new one
|
|
|
|
* before continuing to search
|
|
|
|
* for an even better one.
|
|
|
|
*/
|
1995-05-27 04:37:24 +00:00
|
|
|
if (ifa_maybe == 0 ||
|
|
|
|
rn_refines((caddr_t)ifa->ifa_netmask,
|
|
|
|
(caddr_t)ifa_maybe->ifa_netmask))
|
|
|
|
ifa_maybe = ifa;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2001-09-06 00:44:45 +00:00
|
|
|
ifa = ifa_maybe;
|
|
|
|
done:
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RUNLOCK();
|
2001-09-06 00:44:45 +00:00
|
|
|
return (ifa);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find an interface address specific to an interface best matching
|
|
|
|
* a given address.
|
|
|
|
*/
|
|
|
|
struct ifaddr *
|
2003-10-23 13:49:10 +00:00
|
|
|
ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifaddr *ifa;
|
|
|
|
char *cp, *cp2, *cp3;
|
|
|
|
char *cplim;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ifaddr *ifa_maybe = 0;
|
|
|
|
u_int af = addr->sa_family;
|
|
|
|
|
|
|
|
if (af >= AF_MAX)
|
|
|
|
return (0);
|
2001-02-04 16:08:18 +00:00
|
|
|
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ifa->ifa_addr->sa_family != af)
|
|
|
|
continue;
|
1996-08-07 04:09:05 +00:00
|
|
|
if (ifa_maybe == 0)
|
|
|
|
ifa_maybe = ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ifa->ifa_netmask == 0) {
|
2005-07-19 10:03:47 +00:00
|
|
|
if (sa_equal(addr, ifa->ifa_addr) ||
|
|
|
|
(ifa->ifa_dstaddr &&
|
|
|
|
sa_equal(addr, ifa->ifa_dstaddr)))
|
2001-09-07 05:32:54 +00:00
|
|
|
goto done;
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
|
|
|
}
|
1995-05-27 04:37:24 +00:00
|
|
|
if (ifp->if_flags & IFF_POINTOPOINT) {
|
2005-07-19 10:03:47 +00:00
|
|
|
if (sa_equal(addr, ifa->ifa_dstaddr))
|
2001-09-07 05:39:47 +00:00
|
|
|
goto done;
|
1995-06-15 00:19:56 +00:00
|
|
|
} else {
|
1995-05-27 04:37:24 +00:00
|
|
|
cp = addr->sa_data;
|
|
|
|
cp2 = ifa->ifa_addr->sa_data;
|
|
|
|
cp3 = ifa->ifa_netmask->sa_data;
|
|
|
|
cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
|
|
|
|
for (; cp3 < cplim; cp3++)
|
|
|
|
if ((*cp++ ^ *cp2++) & *cp3)
|
|
|
|
break;
|
|
|
|
if (cp3 == cplim)
|
2001-09-07 05:32:54 +00:00
|
|
|
goto done;
|
1995-05-27 04:37:24 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-06 02:40:43 +00:00
|
|
|
ifa = ifa_maybe;
|
|
|
|
done:
|
|
|
|
return (ifa);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#include <net/route.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Default action when installing a route with a Link Level gateway.
|
|
|
|
* Lookup an appropriate real ifa to point to.
|
|
|
|
* This should be moved to /sys/net/link.c eventually.
|
|
|
|
*/
|
1995-12-09 20:47:15 +00:00
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifaddr *ifa, *oifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sockaddr *dst;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
2003-10-04 03:44:50 +00:00
|
|
|
RT_LOCK_ASSERT(rt);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) ||
|
|
|
|
((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0))
|
|
|
|
return;
|
1994-10-08 01:40:23 +00:00
|
|
|
ifa = ifaof_ifpforaddr(dst, ifp);
|
|
|
|
if (ifa) {
|
2002-12-18 11:46:59 +00:00
|
|
|
IFAREF(ifa); /* XXX */
|
2003-10-04 03:44:50 +00:00
|
|
|
oifa = rt->rt_ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
rt->rt_ifa = ifa;
|
2003-10-04 03:44:50 +00:00
|
|
|
IFAFREE(oifa);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
|
2001-10-17 18:07:05 +00:00
|
|
|
ifa->ifa_rtrequest(cmd, rt, info);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark an interface down and notify protocols of
|
|
|
|
* the transition.
|
|
|
|
* NOTE: must be called at splnet or eqivalent.
|
|
|
|
*/
|
2004-04-18 18:59:44 +00:00
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_unroute(struct ifnet *ifp, int flag, int fam)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP"));
|
|
|
|
|
1998-12-16 18:30:43 +00:00
|
|
|
ifp->if_flags &= ~flag;
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
1998-12-16 18:30:43 +00:00
|
|
|
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
|
|
|
|
if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
|
|
|
|
pfctlinput(PRC_IFDOWN, ifa->ifa_addr);
|
1994-05-24 10:09:53 +00:00
|
|
|
if_qflush(&ifp->if_snd);
|
2005-02-22 13:04:05 +00:00
|
|
|
#ifdef DEV_CARP
|
|
|
|
if (ifp->if_carp)
|
|
|
|
carp_carpdev_state(ifp->if_carp);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
rt_ifmsg(ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark an interface up and notify protocols of
|
|
|
|
* the transition.
|
|
|
|
* NOTE: must be called at splnet or eqivalent.
|
|
|
|
*/
|
2004-04-18 18:59:44 +00:00
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_route(struct ifnet *ifp, int flag, int fam)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifaddr *ifa;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP"));
|
|
|
|
|
1998-12-16 18:30:43 +00:00
|
|
|
ifp->if_flags |= flag;
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
1998-12-16 18:30:43 +00:00
|
|
|
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
|
|
|
|
if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
|
|
|
|
pfctlinput(PRC_IFUP, ifa->ifa_addr);
|
2005-02-22 13:04:05 +00:00
|
|
|
#ifdef DEV_CARP
|
|
|
|
if (ifp->if_carp)
|
|
|
|
carp_carpdev_state(ifp->if_carp);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
rt_ifmsg(ifp);
|
1999-11-22 02:45:11 +00:00
|
|
|
#ifdef INET6
|
|
|
|
in6_if_up(ifp);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2004-12-08 05:45:59 +00:00
|
|
|
void (*vlan_link_state_p)(struct ifnet *, int); /* XXX: private from if_vlan */
|
Merge the //depot/user/yar/vlan branch into CVS. It contains some collective
work by yar, thompsa and myself. The checksum offloading part also involves
work done by Mihail Balikov.
The most important changes:
o Instead of global linked list of all vlan softc use a per-trunk
hash. The size of hash is dynamically adjusted, depending on
number of entries. This changes struct ifnet, replacing counter
of vlans with a pointer to trunk structure. This change is an
improvement for setups with big number of VLANs, several interfaces
and several CPUs. It is a small regression for a setup with a single
VLAN interface.
An alternative to dynamic hash is a per-trunk static array with
4096 entries, which is a compile time option - VLAN_ARRAY. In my
experiments the array is not an improvement, probably because such
a big trunk structure doesn't fit into CPU cache.
o Introduce an UMA zone for VLAN tags. Since drivers depend on it,
the zone is declared in kern_mbuf.c, not in optional vlan(4) driver.
This change is a big improvement for any setup utilizing vlan(4).
o Use rwlock(9) instead of mutex(9) for locking. We are the first
ones to do this! :)
o Some drivers can do hardware VLAN tagging + hardware checksum
offloading. Add an infrastructure for this. Whenever vlan(4) is
attached to a parent or parent configuration is changed, the flags
on vlan(4) interface are updated.
In collaboration with: yar, thompsa
In collaboration with: Mihail Balikov <mihail.balikov interbgc.com>
2006-01-30 13:45:15 +00:00
|
|
|
void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */
|
2004-12-08 05:45:59 +00:00
|
|
|
|
|
|
|
/*
|
2005-04-20 09:30:54 +00:00
|
|
|
* Handle a change in the interface link state. To avoid LORs
|
|
|
|
* between driver lock and upper layer locks, as well as possible
|
|
|
|
* recursions, we post event to taskqueue, and all job
|
|
|
|
* is done in static do_link_state_change().
|
2004-12-08 05:45:59 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
if_link_state_change(struct ifnet *ifp, int link_state)
|
|
|
|
{
|
2005-02-22 14:21:59 +00:00
|
|
|
/* Return if state hasn't changed. */
|
|
|
|
if (ifp->if_link_state == link_state)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ifp->if_link_state = link_state;
|
|
|
|
|
2005-04-20 09:30:54 +00:00
|
|
|
taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
do_link_state_change(void *arg, int pending)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = (struct ifnet *)arg;
|
|
|
|
int link_state = ifp->if_link_state;
|
|
|
|
int link;
|
|
|
|
|
2004-12-08 05:45:59 +00:00
|
|
|
/* Notify that the link state has changed. */
|
2005-02-22 14:21:59 +00:00
|
|
|
rt_ifmsg(ifp);
|
|
|
|
if (link_state == LINK_STATE_UP)
|
|
|
|
link = NOTE_LINKUP;
|
|
|
|
else if (link_state == LINK_STATE_DOWN)
|
|
|
|
link = NOTE_LINKDOWN;
|
|
|
|
else
|
|
|
|
link = NOTE_LINKINV;
|
|
|
|
KNOTE_UNLOCKED(&ifp->if_klist, link);
|
Merge the //depot/user/yar/vlan branch into CVS. It contains some collective
work by yar, thompsa and myself. The checksum offloading part also involves
work done by Mihail Balikov.
The most important changes:
o Instead of global linked list of all vlan softc use a per-trunk
hash. The size of hash is dynamically adjusted, depending on
number of entries. This changes struct ifnet, replacing counter
of vlans with a pointer to trunk structure. This change is an
improvement for setups with big number of VLANs, several interfaces
and several CPUs. It is a small regression for a setup with a single
VLAN interface.
An alternative to dynamic hash is a per-trunk static array with
4096 entries, which is a compile time option - VLAN_ARRAY. In my
experiments the array is not an improvement, probably because such
a big trunk structure doesn't fit into CPU cache.
o Introduce an UMA zone for VLAN tags. Since drivers depend on it,
the zone is declared in kern_mbuf.c, not in optional vlan(4) driver.
This change is a big improvement for any setup utilizing vlan(4).
o Use rwlock(9) instead of mutex(9) for locking. We are the first
ones to do this! :)
o Some drivers can do hardware VLAN tagging + hardware checksum
offloading. Add an infrastructure for this. Whenever vlan(4) is
attached to a parent or parent configuration is changed, the flags
on vlan(4) interface are updated.
In collaboration with: yar, thompsa
In collaboration with: Mihail Balikov <mihail.balikov interbgc.com>
2006-01-30 13:45:15 +00:00
|
|
|
if (ifp->if_vlantrunk != NULL)
|
2005-02-22 14:21:59 +00:00
|
|
|
(*vlan_link_state_p)(ifp, link);
|
|
|
|
|
|
|
|
if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) &&
|
|
|
|
IFP2AC(ifp)->ac_netgraph != NULL)
|
|
|
|
(*ng_ether_link_state_p)(ifp, link_state);
|
|
|
|
#ifdef DEV_CARP
|
|
|
|
if (ifp->if_carp)
|
|
|
|
carp_carpdev_state(ifp->if_carp);
|
|
|
|
#endif
|
2005-06-05 03:13:13 +00:00
|
|
|
if (ifp->if_bridge) {
|
|
|
|
KASSERT(bstp_linkstate_p != NULL,("if_bridge bstp not loaded!"));
|
|
|
|
(*bstp_linkstate_p)(ifp, link_state);
|
|
|
|
}
|
|
|
|
|
2005-06-06 19:08:11 +00:00
|
|
|
devctl_notify("IFNET", ifp->if_xname,
|
|
|
|
(link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL);
|
2005-04-20 09:30:54 +00:00
|
|
|
if (pending > 1)
|
|
|
|
if_printf(ifp, "%d link states coalesced\n", pending);
|
2005-03-12 12:58:03 +00:00
|
|
|
if (log_link_state_change)
|
|
|
|
log(LOG_NOTICE, "%s: link state changed to %s\n", ifp->if_xname,
|
|
|
|
(link_state == LINK_STATE_UP) ? "UP" : "DOWN" );
|
2004-12-08 05:45:59 +00:00
|
|
|
}
|
|
|
|
|
1998-12-16 18:30:43 +00:00
|
|
|
/*
|
|
|
|
* Mark an interface down and notify protocols of
|
|
|
|
* the transition.
|
|
|
|
* NOTE: must be called at splnet or eqivalent.
|
|
|
|
*/
|
|
|
|
void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_down(struct ifnet *ifp)
|
1998-12-16 18:30:43 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
if_unroute(ifp, IFF_UP, AF_UNSPEC);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark an interface up and notify protocols of
|
|
|
|
* the transition.
|
|
|
|
* NOTE: must be called at splnet or eqivalent.
|
|
|
|
*/
|
|
|
|
void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_up(struct ifnet *ifp)
|
1998-12-16 18:30:43 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
if_route(ifp, IFF_UP, AF_UNSPEC);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Flush an interface queue.
|
|
|
|
*/
|
1995-12-09 20:47:15 +00:00
|
|
|
static void
|
2004-06-13 17:29:10 +00:00
|
|
|
if_qflush(struct ifaltq *ifq)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct mbuf *m, *n;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-09-01 19:56:47 +00:00
|
|
|
IFQ_LOCK(ifq);
|
2004-06-13 17:29:10 +00:00
|
|
|
#ifdef ALTQ
|
|
|
|
if (ALTQ_IS_ENABLED(ifq))
|
|
|
|
ALTQ_PURGE(ifq);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
n = ifq->ifq_head;
|
1994-10-08 01:40:23 +00:00
|
|
|
while ((m = n) != 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
n = m->m_act;
|
|
|
|
m_freem(m);
|
|
|
|
}
|
|
|
|
ifq->ifq_head = 0;
|
|
|
|
ifq->ifq_tail = 0;
|
|
|
|
ifq->ifq_len = 0;
|
2004-09-01 19:56:47 +00:00
|
|
|
IFQ_UNLOCK(ifq);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle interface watchdog timer routines. Called
|
|
|
|
* from softclock, we decrement timers (if set) and
|
|
|
|
* call the appropriate interface routine on expiration.
|
2004-07-27 23:20:45 +00:00
|
|
|
*
|
|
|
|
* XXXRW: Note that because timeouts run with Giant, if_watchdog() is called
|
|
|
|
* holding Giant. If we switch to an MPSAFE callout, we likely need to grab
|
|
|
|
* Giant before entering if_watchdog() on an IFF_NEEDSGIANT interface.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-12-09 20:47:15 +00:00
|
|
|
static void
|
2003-10-23 13:49:10 +00:00
|
|
|
if_slowtimo(void *arg)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-10-23 13:49:10 +00:00
|
|
|
struct ifnet *ifp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int s = splimp();
|
|
|
|
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RLOCK();
|
2001-02-04 13:13:25 +00:00
|
|
|
TAILQ_FOREACH(ifp, &ifnet, if_link) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ifp->if_timer == 0 || --ifp->if_timer)
|
|
|
|
continue;
|
|
|
|
if (ifp->if_watchdog)
|
1995-12-05 02:01:59 +00:00
|
|
|
(*ifp->if_watchdog)(ifp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RUNLOCK();
|
1994-05-24 10:09:53 +00:00
|
|
|
splx(s);
|
|
|
|
timeout(if_slowtimo, (void *)0, hz / IFNET_SLOWHZ);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map interface name to
|
|
|
|
* interface structure pointer.
|
|
|
|
*/
|
|
|
|
struct ifnet *
|
2001-07-02 20:49:25 +00:00
|
|
|
ifunit(const char *name)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1999-12-13 15:57:11 +00:00
|
|
|
struct ifnet *ifp;
|
2001-10-17 18:58:14 +00:00
|
|
|
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RLOCK();
|
2001-02-04 13:13:25 +00:00
|
|
|
TAILQ_FOREACH(ifp, &ifnet, if_link) {
|
2004-02-04 02:54:25 +00:00
|
|
|
if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
}
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RUNLOCK();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (ifp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-09-29 05:55:04 +00:00
|
|
|
* Hardware specific interface ioctls.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2001-09-29 05:55:04 +00:00
|
|
|
static int
|
|
|
|
ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-29 05:55:04 +00:00
|
|
|
struct ifreq *ifr;
|
1999-06-19 18:42:31 +00:00
|
|
|
struct ifstat *ifs;
|
2001-09-29 05:55:04 +00:00
|
|
|
int error = 0;
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
int new_flags, temp_flags;
|
2004-02-04 02:54:25 +00:00
|
|
|
size_t namelen, onamelen;
|
|
|
|
char new_name[IFNAMSIZ];
|
|
|
|
struct ifaddr *ifa;
|
|
|
|
struct sockaddr_dl *sdl;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
ifr = (struct ifreq *)data;
|
|
|
|
switch (cmd) {
|
2001-10-17 19:40:44 +00:00
|
|
|
case SIOCGIFINDEX:
|
|
|
|
ifr->ifr_index = ifp->if_index;
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCGIFFLAGS:
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
temp_flags = ifp->if_flags | ifp->if_drv_flags;
|
|
|
|
ifr->ifr_flags = temp_flags & 0xffff;
|
|
|
|
ifr->ifr_flagshigh = temp_flags >> 16;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
2001-09-18 17:41:42 +00:00
|
|
|
case SIOCGIFCAP:
|
|
|
|
ifr->ifr_reqcap = ifp->if_capabilities;
|
|
|
|
ifr->ifr_curcap = ifp->if_capenable;
|
|
|
|
break;
|
|
|
|
|
2002-08-01 21:15:53 +00:00
|
|
|
#ifdef MAC
|
|
|
|
case SIOCGIFMAC:
|
2003-03-20 21:17:40 +00:00
|
|
|
error = mac_ioctl_ifnet_get(td->td_ucred, ifr, ifp);
|
2002-08-01 21:15:53 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCGIFMETRIC:
|
|
|
|
ifr->ifr_metric = ifp->if_metric;
|
|
|
|
break;
|
|
|
|
|
1994-08-08 10:49:26 +00:00
|
|
|
case SIOCGIFMTU:
|
|
|
|
ifr->ifr_mtu = ifp->if_mtu;
|
|
|
|
break;
|
|
|
|
|
1994-12-21 22:57:05 +00:00
|
|
|
case SIOCGIFPHYS:
|
|
|
|
ifr->ifr_phys = ifp->if_physical;
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCSIFFLAGS:
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
1994-10-08 01:40:23 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
/*
|
|
|
|
* Currently, no driver owned flags pass the IFF_CANTCHANGE
|
|
|
|
* check, so we don't need special handling here yet.
|
|
|
|
*/
|
2002-08-18 07:05:00 +00:00
|
|
|
new_flags = (ifr->ifr_flags & 0xffff) |
|
|
|
|
(ifr->ifr_flagshigh << 16);
|
1999-06-06 09:17:51 +00:00
|
|
|
if (ifp->if_flags & IFF_SMART) {
|
|
|
|
/* Smart drivers twiddle their own routes */
|
1999-06-06 09:28:01 +00:00
|
|
|
} else if (ifp->if_flags & IFF_UP &&
|
2002-08-18 07:05:00 +00:00
|
|
|
(new_flags & IFF_UP) == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
int s = splimp();
|
|
|
|
if_down(ifp);
|
|
|
|
splx(s);
|
2002-08-18 07:05:00 +00:00
|
|
|
} else if (new_flags & IFF_UP &&
|
1999-06-06 09:17:51 +00:00
|
|
|
(ifp->if_flags & IFF_UP) == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
int s = splimp();
|
|
|
|
if_up(ifp);
|
|
|
|
splx(s);
|
|
|
|
}
|
2005-10-03 01:47:43 +00:00
|
|
|
/* See if permanently promiscuous mode bit is about to flip */
|
|
|
|
if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) {
|
|
|
|
if (new_flags & IFF_PPROMISC)
|
|
|
|
ifp->if_flags |= IFF_PROMISC;
|
|
|
|
else if (ifp->if_pcount == 0)
|
|
|
|
ifp->if_flags &= ~IFF_PROMISC;
|
|
|
|
log(LOG_INFO, "%s: permanently promiscuous mode %s\n",
|
|
|
|
ifp->if_xname,
|
|
|
|
(new_flags & IFF_PPROMISC) ? "enabled" : "disabled");
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
|
2002-08-18 07:05:00 +00:00
|
|
|
(new_flags &~ IFF_CANTCHANGE);
|
2004-10-19 18:11:55 +00:00
|
|
|
if (ifp->if_ioctl) {
|
|
|
|
IFF_LOCKGIANT(ifp);
|
1994-05-24 10:09:53 +00:00
|
|
|
(void) (*ifp->if_ioctl)(ifp, cmd, data);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_UNLOCKGIANT(ifp);
|
|
|
|
}
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
2001-09-18 17:41:42 +00:00
|
|
|
case SIOCSIFCAP:
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
2001-09-18 17:41:42 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2004-02-21 12:48:25 +00:00
|
|
|
if (ifp->if_ioctl == NULL)
|
|
|
|
return (EOPNOTSUPP);
|
2001-09-18 17:41:42 +00:00
|
|
|
if (ifr->ifr_reqcap & ~ifp->if_capabilities)
|
|
|
|
return (EINVAL);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_LOCKGIANT(ifp);
|
2004-02-21 12:48:25 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_UNLOCKGIANT(ifp);
|
2004-02-21 12:48:25 +00:00
|
|
|
if (error == 0)
|
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2001-09-18 17:41:42 +00:00
|
|
|
break;
|
|
|
|
|
2002-08-01 21:15:53 +00:00
|
|
|
#ifdef MAC
|
|
|
|
case SIOCSIFMAC:
|
2003-03-20 21:17:40 +00:00
|
|
|
error = mac_ioctl_ifnet_set(td->td_ucred, ifr, ifp);
|
2002-08-01 21:15:53 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
2004-02-04 02:54:25 +00:00
|
|
|
case SIOCSIFNAME:
|
|
|
|
error = suser(td);
|
2004-03-13 02:35:03 +00:00
|
|
|
if (error != 0)
|
2004-02-04 02:54:25 +00:00
|
|
|
return (error);
|
|
|
|
error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
|
2004-03-13 02:35:03 +00:00
|
|
|
if (error != 0)
|
2004-02-04 02:54:25 +00:00
|
|
|
return (error);
|
2004-03-13 02:35:03 +00:00
|
|
|
if (new_name[0] == '\0')
|
|
|
|
return (EINVAL);
|
2004-02-04 02:54:25 +00:00
|
|
|
if (ifunit(new_name) != NULL)
|
|
|
|
return (EEXIST);
|
|
|
|
|
|
|
|
/* Announce the departure of the interface. */
|
|
|
|
rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
|
2005-07-14 20:26:43 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
|
2004-02-04 02:54:25 +00:00
|
|
|
|
2004-09-18 05:02:08 +00:00
|
|
|
log(LOG_INFO, "%s: changing name to '%s'\n",
|
|
|
|
ifp->if_xname, new_name);
|
|
|
|
|
2004-02-04 02:54:25 +00:00
|
|
|
strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
|
2005-11-11 16:04:59 +00:00
|
|
|
ifa = ifp->if_addr;
|
2004-02-04 02:54:25 +00:00
|
|
|
IFA_LOCK(ifa);
|
|
|
|
sdl = (struct sockaddr_dl *)ifa->ifa_addr;
|
|
|
|
namelen = strlen(new_name);
|
|
|
|
onamelen = sdl->sdl_nlen;
|
|
|
|
/*
|
|
|
|
* Move the address if needed. This is safe because we
|
|
|
|
* allocate space for a name of length IFNAMSIZ when we
|
|
|
|
* create this in if_attach().
|
|
|
|
*/
|
|
|
|
if (namelen != onamelen) {
|
|
|
|
bcopy(sdl->sdl_data + onamelen,
|
|
|
|
sdl->sdl_data + namelen, sdl->sdl_alen);
|
|
|
|
}
|
|
|
|
bcopy(new_name, sdl->sdl_data, namelen);
|
|
|
|
sdl->sdl_nlen = namelen;
|
|
|
|
sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
|
|
|
|
bzero(sdl->sdl_data, onamelen);
|
|
|
|
while (namelen != 0)
|
|
|
|
sdl->sdl_data[--namelen] = 0xff;
|
|
|
|
IFA_UNLOCK(ifa);
|
|
|
|
|
2004-02-26 04:27:55 +00:00
|
|
|
EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
|
2004-02-04 02:54:25 +00:00
|
|
|
/* Announce the return of the interface. */
|
|
|
|
rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCSIFMETRIC:
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
1994-10-08 01:40:23 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
ifp->if_metric = ifr->ifr_metric;
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
1994-12-21 22:57:05 +00:00
|
|
|
case SIOCSIFPHYS:
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
1996-06-10 23:07:36 +00:00
|
|
|
if (error)
|
2004-02-21 12:56:09 +00:00
|
|
|
return (error);
|
|
|
|
if (ifp->if_ioctl == NULL)
|
|
|
|
return (EOPNOTSUPP);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_LOCKGIANT(ifp);
|
1996-06-10 23:07:36 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_UNLOCKGIANT(ifp);
|
1996-06-10 23:07:36 +00:00
|
|
|
if (error == 0)
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2004-02-21 12:56:09 +00:00
|
|
|
break;
|
1994-12-21 22:57:05 +00:00
|
|
|
|
1994-08-08 10:49:26 +00:00
|
|
|
case SIOCSIFMTU:
|
1999-11-22 02:45:11 +00:00
|
|
|
{
|
|
|
|
u_long oldmtu = ifp->if_mtu;
|
|
|
|
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
1994-10-08 01:40:23 +00:00
|
|
|
if (error)
|
1994-08-08 10:49:26 +00:00
|
|
|
return (error);
|
1999-08-06 13:53:03 +00:00
|
|
|
if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
|
1994-08-08 10:58:30 +00:00
|
|
|
return (EINVAL);
|
2001-09-29 05:55:04 +00:00
|
|
|
if (ifp->if_ioctl == NULL)
|
|
|
|
return (EOPNOTSUPP);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_LOCKGIANT(ifp);
|
1996-06-10 23:07:36 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_UNLOCKGIANT(ifp);
|
2000-01-24 08:53:39 +00:00
|
|
|
if (error == 0) {
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2000-01-24 08:53:39 +00:00
|
|
|
rt_ifmsg(ifp);
|
|
|
|
}
|
1999-11-22 02:45:11 +00:00
|
|
|
/*
|
|
|
|
* If the link MTU changed, do network layer specific procedure.
|
|
|
|
*/
|
|
|
|
if (ifp->if_mtu != oldmtu) {
|
|
|
|
#ifdef INET6
|
|
|
|
nd6_setmtu(ifp);
|
|
|
|
#endif
|
|
|
|
}
|
2001-09-29 05:55:04 +00:00
|
|
|
break;
|
1999-11-22 02:45:11 +00:00
|
|
|
}
|
1994-08-08 10:49:26 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
1994-10-08 01:40:23 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-01-13 21:26:53 +00:00
|
|
|
|
|
|
|
/* Don't allow group membership on non-multicast interfaces. */
|
|
|
|
if ((ifp->if_flags & IFF_MULTICAST) == 0)
|
2001-09-29 05:55:04 +00:00
|
|
|
return (EOPNOTSUPP);
|
1997-01-13 21:26:53 +00:00
|
|
|
|
|
|
|
/* Don't let users screw up protocols' entries. */
|
|
|
|
if (ifr->ifr_addr.sa_family != AF_LINK)
|
2001-09-29 05:55:04 +00:00
|
|
|
return (EINVAL);
|
1997-01-13 21:26:53 +00:00
|
|
|
|
|
|
|
if (cmd == SIOCADDMULTI) {
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
|
|
|
|
} else {
|
|
|
|
error = if_delmulti(ifp, &ifr->ifr_addr);
|
|
|
|
}
|
|
|
|
if (error == 0)
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2001-09-29 05:55:04 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2000-10-04 23:16:29 +00:00
|
|
|
case SIOCSIFPHYADDR:
|
|
|
|
case SIOCDIFPHYADDR:
|
|
|
|
#ifdef INET6
|
|
|
|
case SIOCSIFPHYADDR_IN6:
|
|
|
|
#endif
|
2001-06-11 12:39:29 +00:00
|
|
|
case SIOCSLIFPHYADDR:
|
2003-10-23 13:49:10 +00:00
|
|
|
case SIOCSIFMEDIA:
|
1997-10-07 07:40:35 +00:00
|
|
|
case SIOCSIFGENERIC:
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
1997-05-03 21:07:13 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2001-09-29 05:55:04 +00:00
|
|
|
if (ifp->if_ioctl == NULL)
|
1997-05-03 21:07:13 +00:00
|
|
|
return (EOPNOTSUPP);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_LOCKGIANT(ifp);
|
1997-05-03 21:07:13 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_UNLOCKGIANT(ifp);
|
1997-05-03 21:07:13 +00:00
|
|
|
if (error == 0)
|
1998-04-06 11:43:12 +00:00
|
|
|
getmicrotime(&ifp->if_lastchange);
|
2001-09-29 05:55:04 +00:00
|
|
|
break;
|
1997-05-03 21:07:13 +00:00
|
|
|
|
1999-06-19 18:42:31 +00:00
|
|
|
case SIOCGIFSTATUS:
|
|
|
|
ifs = (struct ifstat *)data;
|
|
|
|
ifs->ascii[0] = '\0';
|
2003-10-23 13:49:10 +00:00
|
|
|
|
2001-06-11 12:39:29 +00:00
|
|
|
case SIOCGIFPSRCADDR:
|
|
|
|
case SIOCGIFPDSTADDR:
|
|
|
|
case SIOCGLIFPHYADDR:
|
1997-05-03 21:07:13 +00:00
|
|
|
case SIOCGIFMEDIA:
|
1997-10-07 07:40:35 +00:00
|
|
|
case SIOCGIFGENERIC:
|
2004-02-21 12:56:09 +00:00
|
|
|
if (ifp->if_ioctl == NULL)
|
1997-05-03 21:07:13 +00:00
|
|
|
return (EOPNOTSUPP);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_LOCKGIANT(ifp);
|
2001-09-29 05:55:04 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, data);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_UNLOCKGIANT(ifp);
|
2001-09-29 05:55:04 +00:00
|
|
|
break;
|
1997-05-03 21:07:13 +00:00
|
|
|
|
2000-06-16 20:14:43 +00:00
|
|
|
case SIOCSIFLLADDR:
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
2000-06-16 20:14:43 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
2001-09-29 05:55:04 +00:00
|
|
|
error = if_setlladdr(ifp,
|
2000-08-15 00:48:38 +00:00
|
|
|
ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
|
2001-09-29 05:55:04 +00:00
|
|
|
break;
|
2000-08-15 00:48:38 +00:00
|
|
|
|
2006-06-19 22:20:45 +00:00
|
|
|
case SIOCAIFGROUP:
|
|
|
|
{
|
|
|
|
struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr;
|
|
|
|
|
|
|
|
error = suser(td);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if ((error = if_addgroup(ifp, ifgr->ifgr_group)))
|
|
|
|
return (error);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case SIOCGIFGROUP:
|
|
|
|
if ((error = if_getgroup((struct ifgroupreq *)ifr, ifp)))
|
|
|
|
return (error);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SIOCDIFGROUP:
|
|
|
|
{
|
|
|
|
struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr;
|
|
|
|
|
|
|
|
error = suser(td);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if ((error = if_delgroup(ifp, ifgr->ifgr_group)))
|
|
|
|
return (error);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
default:
|
2001-09-29 05:55:04 +00:00
|
|
|
error = ENOIOCTL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interface ioctls.
|
|
|
|
*/
|
|
|
|
int
|
2003-10-23 13:49:10 +00:00
|
|
|
ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td)
|
2001-09-29 05:55:04 +00:00
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifreq *ifr;
|
|
|
|
int error;
|
2002-08-18 07:05:00 +00:00
|
|
|
int oif_flags;
|
2001-09-29 05:55:04 +00:00
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGIFCONF:
|
|
|
|
case OSIOCGIFCONF:
|
2006-02-02 19:58:37 +00:00
|
|
|
#ifdef __amd64__
|
|
|
|
case SIOCGIFCONF32:
|
|
|
|
#endif
|
2001-09-29 05:55:04 +00:00
|
|
|
return (ifconf(cmd, data));
|
|
|
|
}
|
|
|
|
ifr = (struct ifreq *)data;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCIFCREATE:
|
2006-07-09 06:04:01 +00:00
|
|
|
case SIOCIFCREATE2:
|
|
|
|
if ((error = suser(td)) != 0)
|
|
|
|
return (error);
|
|
|
|
return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
|
|
|
|
cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
|
2001-09-29 05:55:04 +00:00
|
|
|
case SIOCIFDESTROY:
|
2002-04-01 21:31:13 +00:00
|
|
|
if ((error = suser(td)) != 0)
|
2001-09-29 05:55:04 +00:00
|
|
|
return (error);
|
2006-07-09 06:04:01 +00:00
|
|
|
return if_clone_destroy(ifr->ifr_name);
|
2003-10-23 13:49:10 +00:00
|
|
|
|
2001-09-29 05:55:04 +00:00
|
|
|
case SIOCIFGCLONERS:
|
|
|
|
return (if_clone_list((struct if_clonereq *)data));
|
2006-06-19 22:20:45 +00:00
|
|
|
case SIOCGIFGMEMB:
|
|
|
|
return (if_getgroupmembers((struct ifgroupreq *)data));
|
2001-09-29 05:55:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ifp = ifunit(ifr->ifr_name);
|
|
|
|
if (ifp == 0)
|
|
|
|
return (ENXIO);
|
|
|
|
|
|
|
|
error = ifhwioctl(cmd, ifp, data, td);
|
|
|
|
if (error != ENOIOCTL)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
oif_flags = ifp->if_flags;
|
|
|
|
if (so->so_proto == 0)
|
|
|
|
return (EOPNOTSUPP);
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifndef COMPAT_43
|
2001-09-29 05:55:04 +00:00
|
|
|
error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd,
|
1996-07-11 16:32:50 +00:00
|
|
|
data,
|
2001-09-12 08:38:13 +00:00
|
|
|
ifp, td));
|
1994-05-24 10:09:53 +00:00
|
|
|
#else
|
2001-09-29 05:55:04 +00:00
|
|
|
{
|
1994-05-24 10:09:53 +00:00
|
|
|
int ocmd = cmd;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
|
|
|
|
case SIOCSIFDSTADDR:
|
|
|
|
case SIOCSIFADDR:
|
|
|
|
case SIOCSIFBRDADDR:
|
|
|
|
case SIOCSIFNETMASK:
|
|
|
|
#if BYTE_ORDER != BIG_ENDIAN
|
|
|
|
if (ifr->ifr_addr.sa_family == 0 &&
|
|
|
|
ifr->ifr_addr.sa_len < 16) {
|
|
|
|
ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
|
|
|
|
ifr->ifr_addr.sa_len = 16;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (ifr->ifr_addr.sa_len == 0)
|
|
|
|
ifr->ifr_addr.sa_len = 16;
|
|
|
|
#endif
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OSIOCGIFADDR:
|
|
|
|
cmd = SIOCGIFADDR;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OSIOCGIFDSTADDR:
|
|
|
|
cmd = SIOCGIFDSTADDR;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OSIOCGIFBRDADDR:
|
|
|
|
cmd = SIOCGIFBRDADDR;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case OSIOCGIFNETMASK:
|
|
|
|
cmd = SIOCGIFNETMASK;
|
|
|
|
}
|
1996-07-11 16:32:50 +00:00
|
|
|
error = ((*so->so_proto->pr_usrreqs->pru_control)(so,
|
|
|
|
cmd,
|
|
|
|
data,
|
2001-09-12 08:38:13 +00:00
|
|
|
ifp, td));
|
1994-05-24 10:09:53 +00:00
|
|
|
switch (ocmd) {
|
|
|
|
|
|
|
|
case OSIOCGIFADDR:
|
|
|
|
case OSIOCGIFDSTADDR:
|
|
|
|
case OSIOCGIFBRDADDR:
|
|
|
|
case OSIOCGIFNETMASK:
|
|
|
|
*(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
|
|
|
|
|
1999-11-22 02:45:11 +00:00
|
|
|
}
|
2001-09-29 05:55:04 +00:00
|
|
|
}
|
1999-11-22 02:45:11 +00:00
|
|
|
#endif /* COMPAT_43 */
|
|
|
|
|
2001-09-29 05:55:04 +00:00
|
|
|
if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
|
1999-11-22 02:45:11 +00:00
|
|
|
#ifdef INET6
|
2002-04-19 04:46:24 +00:00
|
|
|
DELAY(100);/* XXX: temporary workaround for fxp issue*/
|
2001-09-29 05:55:04 +00:00
|
|
|
if (ifp->if_flags & IFF_UP) {
|
|
|
|
int s = splimp();
|
|
|
|
in6_if_up(ifp);
|
|
|
|
splx(s);
|
1999-11-22 02:45:11 +00:00
|
|
|
}
|
2001-09-29 05:55:04 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-29 05:55:04 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-09-22 17:57:48 +00:00
|
|
|
/*
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
* The code common to handling reference counted flags,
|
2005-07-14 13:56:51 +00:00
|
|
|
* e.g., in ifpromisc() and if_allmulti().
|
2005-10-03 02:14:51 +00:00
|
|
|
* The "pflag" argument can specify a permanent mode flag to check,
|
2005-07-14 13:56:51 +00:00
|
|
|
* such as IFF_PPROMISC for promiscuous mode; should be 0 if none.
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
*
|
|
|
|
* Only to be used on stack-owned flags, not driver-owned flags.
|
1995-09-22 17:57:48 +00:00
|
|
|
*/
|
2005-07-14 13:56:51 +00:00
|
|
|
static int
|
|
|
|
if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch)
|
1995-09-22 17:57:48 +00:00
|
|
|
{
|
|
|
|
struct ifreq ifr;
|
1997-02-14 15:30:54 +00:00
|
|
|
int error;
|
2005-07-14 13:56:51 +00:00
|
|
|
int oldflags, oldcount;
|
1995-09-22 17:57:48 +00:00
|
|
|
|
2005-10-03 02:14:51 +00:00
|
|
|
/* Sanity checks to catch programming errors */
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0,
|
2005-10-03 02:14:51 +00:00
|
|
|
("%s: setting driver-owned flag %d", __func__, flag));
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
|
2005-10-03 02:14:51 +00:00
|
|
|
if (onswitch)
|
|
|
|
KASSERT(*refcount >= 0,
|
|
|
|
("%s: increment negative refcount %d for flag %d",
|
|
|
|
__func__, *refcount, flag));
|
|
|
|
else
|
|
|
|
KASSERT(*refcount > 0,
|
|
|
|
("%s: decrement non-positive refcount %d for flag %d",
|
|
|
|
__func__, *refcount, flag));
|
2005-07-14 13:56:51 +00:00
|
|
|
|
|
|
|
/* In case this mode is permanent, just touch refcount */
|
|
|
|
if (ifp->if_flags & pflag) {
|
|
|
|
*refcount += onswitch ? 1 : -1;
|
2002-08-19 15:16:38 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2005-07-14 13:56:51 +00:00
|
|
|
|
|
|
|
/* Save ifnet parameters for if_ioctl() may fail */
|
|
|
|
oldcount = *refcount;
|
|
|
|
oldflags = ifp->if_flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if we aren't the only and touching refcount is enough.
|
|
|
|
* Actually toggle interface flag if we are the first or last.
|
|
|
|
*/
|
|
|
|
if (onswitch) {
|
|
|
|
if ((*refcount)++)
|
1995-09-22 17:57:48 +00:00
|
|
|
return (0);
|
2005-07-14 13:56:51 +00:00
|
|
|
ifp->if_flags |= flag;
|
1995-09-22 17:57:48 +00:00
|
|
|
} else {
|
2005-07-14 13:56:51 +00:00
|
|
|
if (--(*refcount))
|
1995-09-22 17:57:48 +00:00
|
|
|
return (0);
|
2005-07-14 13:56:51 +00:00
|
|
|
ifp->if_flags &= ~flag;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Call down the driver since we've changed interface flags */
|
|
|
|
if (ifp->if_ioctl == NULL) {
|
|
|
|
error = EOPNOTSUPP;
|
|
|
|
goto recover;
|
1995-09-22 17:57:48 +00:00
|
|
|
}
|
2002-08-18 07:05:00 +00:00
|
|
|
ifr.ifr_flags = ifp->if_flags & 0xffff;
|
|
|
|
ifr.ifr_flagshigh = ifp->if_flags >> 16;
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_LOCKGIANT(ifp);
|
1997-02-14 15:30:54 +00:00
|
|
|
error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
|
2004-10-19 18:11:55 +00:00
|
|
|
IFF_UNLOCKGIANT(ifp);
|
2005-07-14 13:56:51 +00:00
|
|
|
if (error)
|
|
|
|
goto recover;
|
|
|
|
/* Notify userland that interface flags have changed */
|
|
|
|
rt_ifmsg(ifp);
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
recover:
|
|
|
|
/* Recover after driver error */
|
|
|
|
*refcount = oldcount;
|
|
|
|
ifp->if_flags = oldflags;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set/clear promiscuous mode on interface ifp based on the truth value
|
|
|
|
* of pswitch. The calls are reference counted so that only the first
|
|
|
|
* "on" request actually has an effect, as does the final "off" request.
|
|
|
|
* Results are undefined if the "off" and "on" requests are not matched.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ifpromisc(struct ifnet *ifp, int pswitch)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int oldflags = ifp->if_flags;
|
|
|
|
|
|
|
|
error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC,
|
|
|
|
&ifp->if_pcount, pswitch);
|
|
|
|
/* If promiscuous mode status has changed, log a message */
|
|
|
|
if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC))
|
2003-10-31 18:32:15 +00:00
|
|
|
log(LOG_INFO, "%s: promiscuous mode %s\n",
|
|
|
|
ifp->if_xname,
|
2001-04-27 22:20:22 +00:00
|
|
|
(ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled");
|
2005-07-14 13:56:51 +00:00
|
|
|
return (error);
|
1995-09-22 17:57:48 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Return interface configuration
|
|
|
|
* of system. List may be used
|
|
|
|
* in later ioctl's (above) to get
|
|
|
|
* other information.
|
|
|
|
*/
|
|
|
|
/*ARGSUSED*/
|
1995-12-09 20:47:15 +00:00
|
|
|
static int
|
2003-10-23 13:49:10 +00:00
|
|
|
ifconf(u_long cmd, caddr_t data)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-06 00:44:45 +00:00
|
|
|
struct ifconf *ifc = (struct ifconf *)data;
|
2006-02-02 19:58:37 +00:00
|
|
|
#ifdef __amd64__
|
|
|
|
struct ifconf32 *ifc32 = (struct ifconf32 *)data;
|
|
|
|
struct ifconf ifc_swab;
|
|
|
|
#endif
|
2001-09-06 00:44:45 +00:00
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifaddr *ifa;
|
2004-09-22 08:59:41 +00:00
|
|
|
struct ifreq ifr;
|
|
|
|
struct sbuf *sb;
|
|
|
|
int error, full = 0, valid_len, max_len;
|
|
|
|
|
2006-02-02 19:58:37 +00:00
|
|
|
#ifdef __amd64__
|
|
|
|
if (cmd == SIOCGIFCONF32) {
|
|
|
|
ifc_swab.ifc_len = ifc32->ifc_len;
|
|
|
|
ifc_swab.ifc_buf = (caddr_t)(uintptr_t)ifc32->ifc_buf;
|
|
|
|
ifc = &ifc_swab;
|
|
|
|
}
|
|
|
|
#endif
|
2004-09-22 08:59:41 +00:00
|
|
|
/* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */
|
|
|
|
max_len = MAXPHYS - 1;
|
|
|
|
|
2005-02-12 17:51:12 +00:00
|
|
|
/* Prevent hostile input from being able to crash the system */
|
|
|
|
if (ifc->ifc_len <= 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2004-09-22 08:59:41 +00:00
|
|
|
again:
|
|
|
|
if (ifc->ifc_len <= max_len) {
|
|
|
|
max_len = ifc->ifc_len;
|
|
|
|
full = 1;
|
|
|
|
}
|
|
|
|
sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN);
|
|
|
|
max_len = 0;
|
|
|
|
valid_len = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RLOCK(); /* could sleep XXX */
|
2001-09-06 00:44:45 +00:00
|
|
|
TAILQ_FOREACH(ifp, &ifnet, if_link) {
|
2003-10-31 18:32:15 +00:00
|
|
|
int addrs;
|
1995-05-30 08:16:23 +00:00
|
|
|
|
2005-04-15 01:52:40 +00:00
|
|
|
/*
|
|
|
|
* Zero the ifr_name buffer to make sure we don't
|
|
|
|
* disclose the contents of the stack.
|
|
|
|
*/
|
|
|
|
memset(ifr.ifr_name, 0, sizeof(ifr.ifr_name));
|
|
|
|
|
2003-10-31 18:32:15 +00:00
|
|
|
if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
|
2005-09-04 17:32:47 +00:00
|
|
|
>= sizeof(ifr.ifr_name)) {
|
|
|
|
sbuf_delete(sb);
|
|
|
|
IFNET_RUNLOCK();
|
2004-09-22 08:59:41 +00:00
|
|
|
return (ENAMETOOLONG);
|
2005-09-04 17:32:47 +00:00
|
|
|
}
|
1994-10-05 20:11:28 +00:00
|
|
|
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
addrs = 0;
|
2001-09-07 05:32:54 +00:00
|
|
|
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
|
|
|
struct sockaddr *sa = ifa->ifa_addr;
|
|
|
|
|
2002-02-27 18:32:23 +00:00
|
|
|
if (jailed(curthread->td_ucred) &&
|
|
|
|
prison_if(curthread->td_ucred, sa))
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
continue;
|
|
|
|
addrs++;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef COMPAT_43
|
|
|
|
if (cmd == OSIOCGIFCONF) {
|
|
|
|
struct osockaddr *osa =
|
|
|
|
(struct osockaddr *)&ifr.ifr_addr;
|
|
|
|
ifr.ifr_addr = *sa;
|
|
|
|
osa->sa_family = sa->sa_family;
|
2004-09-22 08:59:41 +00:00
|
|
|
sbuf_bcat(sb, &ifr, sizeof(ifr));
|
|
|
|
max_len += sizeof(ifr);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
if (sa->sa_len <= sizeof(*sa)) {
|
|
|
|
ifr.ifr_addr = *sa;
|
2004-09-22 08:59:41 +00:00
|
|
|
sbuf_bcat(sb, &ifr, sizeof(ifr));
|
|
|
|
max_len += sizeof(ifr);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
2004-09-22 08:59:41 +00:00
|
|
|
sbuf_bcat(sb, &ifr,
|
|
|
|
offsetof(struct ifreq, ifr_addr));
|
|
|
|
max_len += offsetof(struct ifreq, ifr_addr);
|
|
|
|
sbuf_bcat(sb, sa, sa->sa_len);
|
|
|
|
max_len += sa->sa_len;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-09-22 08:59:41 +00:00
|
|
|
|
|
|
|
if (!sbuf_overflowed(sb))
|
|
|
|
valid_len = sbuf_len(sb);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-09-22 08:59:41 +00:00
|
|
|
if (addrs == 0) {
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr));
|
2004-09-22 08:59:41 +00:00
|
|
|
sbuf_bcat(sb, &ifr, sizeof(ifr));
|
|
|
|
max_len += sizeof(ifr);
|
|
|
|
|
|
|
|
if (!sbuf_overflowed(sb))
|
|
|
|
valid_len = sbuf_len(sb);
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-12-22 05:35:03 +00:00
|
|
|
IFNET_RUNLOCK();
|
2004-09-22 08:59:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we didn't allocate enough space (uncommon), try again. If
|
|
|
|
* we have already allocated as much space as we are allowed,
|
|
|
|
* return what we've got.
|
|
|
|
*/
|
|
|
|
if (valid_len != max_len && !full) {
|
|
|
|
sbuf_delete(sb);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
ifc->ifc_len = valid_len;
|
2006-02-02 19:58:37 +00:00
|
|
|
#ifdef __amd64__
|
|
|
|
if (cmd == SIOCGIFCONF32)
|
|
|
|
ifc32->ifc_len = valid_len;
|
|
|
|
#endif
|
2004-09-22 12:53:27 +00:00
|
|
|
sbuf_finish(sb);
|
2004-09-22 08:59:41 +00:00
|
|
|
error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len);
|
|
|
|
sbuf_delete(sb);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-01-07 19:15:32 +00:00
|
|
|
/*
|
2005-02-22 15:29:29 +00:00
|
|
|
* Just like ifpromisc(), but for all-multicast-reception mode.
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
|
|
|
int
|
2003-10-23 13:49:10 +00:00
|
|
|
if_allmulti(struct ifnet *ifp, int onswitch)
|
1997-01-07 19:15:32 +00:00
|
|
|
{
|
1997-02-14 15:30:54 +00:00
|
|
|
|
2005-07-14 13:56:51 +00:00
|
|
|
return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch));
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
static struct ifmultiaddr *
|
|
|
|
if_findmulti(struct ifnet *ifp, struct sockaddr *sa)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
|
|
|
|
IF_ADDR_LOCK_ASSERT(ifp);
|
|
|
|
|
|
|
|
TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
|
|
|
|
if (sa_equal(ifma->ifma_addr, sa))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ifma;
|
|
|
|
}
|
|
|
|
|
1997-01-07 19:15:32 +00:00
|
|
|
/*
|
2005-08-02 23:23:26 +00:00
|
|
|
* Allocate a new ifmultiaddr and initialize based on passed arguments. We
|
|
|
|
* make copies of passed sockaddrs. The ifmultiaddr will not be added to
|
|
|
|
* the ifnet multicast address list here, so the caller must do that and
|
|
|
|
* other setup work (such as notifying the device driver). The reference
|
|
|
|
* count is initialized to 1.
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
2005-08-02 23:23:26 +00:00
|
|
|
static struct ifmultiaddr *
|
|
|
|
if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa,
|
|
|
|
int mflags)
|
1997-01-07 19:15:32 +00:00
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma;
|
2005-08-02 23:23:26 +00:00
|
|
|
struct sockaddr *dupsa;
|
|
|
|
|
|
|
|
MALLOC(ifma, struct ifmultiaddr *, sizeof *ifma, M_IFMADDR, mflags |
|
|
|
|
M_ZERO);
|
|
|
|
if (ifma == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
MALLOC(dupsa, struct sockaddr *, sa->sa_len, M_IFMADDR, mflags);
|
|
|
|
if (dupsa == NULL) {
|
|
|
|
FREE(ifma, M_IFMADDR);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
bcopy(sa, dupsa, sa->sa_len);
|
|
|
|
ifma->ifma_addr = dupsa;
|
|
|
|
|
|
|
|
ifma->ifma_ifp = ifp;
|
|
|
|
ifma->ifma_refcount = 1;
|
|
|
|
ifma->ifma_protospec = NULL;
|
|
|
|
|
|
|
|
if (llsa == NULL) {
|
|
|
|
ifma->ifma_lladdr = NULL;
|
|
|
|
return (ifma);
|
|
|
|
}
|
|
|
|
|
|
|
|
MALLOC(dupsa, struct sockaddr *, llsa->sa_len, M_IFMADDR, mflags);
|
|
|
|
if (dupsa == NULL) {
|
|
|
|
FREE(ifma->ifma_addr, M_IFMADDR);
|
|
|
|
FREE(ifma, M_IFMADDR);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
bcopy(llsa, dupsa, llsa->sa_len);
|
|
|
|
ifma->ifma_lladdr = dupsa;
|
|
|
|
|
|
|
|
return (ifma);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if_freemulti: free ifmultiaddr structure and possibly attached related
|
|
|
|
* addresses. The caller is responsible for implementing reference
|
|
|
|
* counting, notifying the driver, handling routing messages, and releasing
|
|
|
|
* any dependent link layer state.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
if_freemulti(struct ifmultiaddr *ifma)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(ifma->ifma_refcount == 1, ("if_freemulti: refcount %d",
|
|
|
|
ifma->ifma_refcount));
|
|
|
|
KASSERT(ifma->ifma_protospec == NULL,
|
|
|
|
("if_freemulti: protospec not NULL"));
|
|
|
|
|
|
|
|
if (ifma->ifma_lladdr != NULL)
|
|
|
|
FREE(ifma->ifma_lladdr, M_IFMADDR);
|
|
|
|
FREE(ifma->ifma_addr, M_IFMADDR);
|
|
|
|
FREE(ifma, M_IFMADDR);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register an additional multicast address with a network interface.
|
|
|
|
*
|
|
|
|
* - If the address is already present, bump the reference count on the
|
|
|
|
* address and return.
|
|
|
|
* - If the address is not link-layer, look up a link layer address.
|
|
|
|
* - Allocate address structures for one or both addresses, and attach to the
|
|
|
|
* multicast address list on the interface. If automatically adding a link
|
|
|
|
* layer address, the protocol address will own a reference to the link
|
|
|
|
* layer address, to be freed when it is freed.
|
|
|
|
* - Notify the network device driver of an addition to the multicast address
|
|
|
|
* list.
|
|
|
|
*
|
|
|
|
* 'sa' points to caller-owned memory with the desired multicast address.
|
|
|
|
*
|
|
|
|
* 'retifma' will be used to return a pointer to the resulting multicast
|
|
|
|
* address reference, if desired.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_addmulti(struct ifnet *ifp, struct sockaddr *sa,
|
|
|
|
struct ifmultiaddr **retifma)
|
|
|
|
{
|
|
|
|
struct ifmultiaddr *ifma, *ll_ifma;
|
|
|
|
struct sockaddr *llsa;
|
|
|
|
int error;
|
1997-01-07 19:15:32 +00:00
|
|
|
|
1997-07-07 17:36:06 +00:00
|
|
|
/*
|
2005-08-02 23:23:26 +00:00
|
|
|
* If the address is already present, return a new reference to it;
|
|
|
|
* otherwise, allocate storage and set up a new address.
|
1997-07-07 17:36:06 +00:00
|
|
|
*/
|
2005-08-02 23:23:26 +00:00
|
|
|
IF_ADDR_LOCK(ifp);
|
|
|
|
ifma = if_findmulti(ifp, sa);
|
|
|
|
if (ifma != NULL) {
|
|
|
|
ifma->ifma_refcount++;
|
|
|
|
if (retifma != NULL)
|
|
|
|
*retifma = ifma;
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
return (0);
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-08-02 23:23:26 +00:00
|
|
|
* The address isn't already present; resolve the protocol address
|
|
|
|
* into a link layer address, and then look that up, bump its
|
|
|
|
* refcount or allocate an ifma for that also. If 'llsa' was
|
|
|
|
* returned, we will need to free it later.
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
2005-08-02 23:23:26 +00:00
|
|
|
llsa = NULL;
|
|
|
|
ll_ifma = NULL;
|
2005-07-19 10:12:58 +00:00
|
|
|
if (ifp->if_resolvemulti != NULL) {
|
1997-01-07 19:15:32 +00:00
|
|
|
error = ifp->if_resolvemulti(ifp, &llsa, sa);
|
2005-08-02 23:23:26 +00:00
|
|
|
if (error)
|
|
|
|
goto unlock_out;
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* Allocate the new address. Don't hook it up yet, as we may also
|
|
|
|
* need to allocate a link layer multicast address.
|
|
|
|
*/
|
|
|
|
ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT);
|
|
|
|
if (ifma == NULL) {
|
|
|
|
error = ENOMEM;
|
|
|
|
goto free_llsa_out;
|
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* If a link layer address is found, we'll need to see if it's
|
|
|
|
* already present in the address list, or allocate is as well.
|
|
|
|
* When this block finishes, the link layer address will be on the
|
|
|
|
* list.
|
|
|
|
*/
|
|
|
|
if (llsa != NULL) {
|
|
|
|
ll_ifma = if_findmulti(ifp, llsa);
|
|
|
|
if (ll_ifma == NULL) {
|
|
|
|
ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT);
|
|
|
|
if (ll_ifma == NULL) {
|
|
|
|
if_freemulti(ifma);
|
|
|
|
error = ENOMEM;
|
|
|
|
goto free_llsa_out;
|
|
|
|
}
|
|
|
|
TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma,
|
|
|
|
ifma_link);
|
|
|
|
} else
|
|
|
|
ll_ifma->ifma_refcount++;
|
|
|
|
}
|
1997-01-08 13:20:25 +00:00
|
|
|
|
1997-01-07 19:15:32 +00:00
|
|
|
/*
|
2005-08-02 23:23:26 +00:00
|
|
|
* We now have a new multicast address, ifma, and possibly a new or
|
|
|
|
* referenced link layer address. Add the primary address to the
|
|
|
|
* ifnet address list.
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
2001-02-06 10:12:15 +00:00
|
|
|
TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
|
2005-08-02 23:23:26 +00:00
|
|
|
|
2002-07-02 08:23:00 +00:00
|
|
|
if (retifma != NULL)
|
|
|
|
*retifma = ifma;
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* Must generate the message while holding the lock so that 'ifma'
|
|
|
|
* pointer is still valid.
|
|
|
|
*
|
|
|
|
* XXXRW: How come we don't announce ll_ifma?
|
|
|
|
*/
|
|
|
|
rt_newmaddrmsg(RTM_NEWMADDR, ifma);
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
|
1997-01-07 19:15:32 +00:00
|
|
|
/*
|
|
|
|
* We are certain we have added something, so call down to the
|
|
|
|
* interface to let them know about it.
|
|
|
|
*/
|
2005-07-19 10:12:58 +00:00
|
|
|
if (ifp->if_ioctl != NULL) {
|
2005-07-14 13:56:51 +00:00
|
|
|
IFF_LOCKGIANT(ifp);
|
|
|
|
(void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0);
|
|
|
|
IFF_UNLOCKGIANT(ifp);
|
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
if (llsa != NULL)
|
|
|
|
FREE(llsa, M_IFMADDR);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
free_llsa_out:
|
|
|
|
if (llsa != NULL)
|
|
|
|
FREE(llsa, M_IFMADDR);
|
|
|
|
|
|
|
|
unlock_out:
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
|
|
|
return (error);
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a reference to a multicast address on this interface. Yell
|
|
|
|
* if the request does not match an existing membership.
|
|
|
|
*/
|
|
|
|
int
|
2003-10-23 13:49:10 +00:00
|
|
|
if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
|
1997-01-07 19:15:32 +00:00
|
|
|
{
|
2005-08-02 23:23:26 +00:00
|
|
|
struct ifmultiaddr *ifma, *ll_ifma;
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
IF_ADDR_LOCK(ifp);
|
|
|
|
ifma = if_findmulti(ifp, sa);
|
|
|
|
if (ifma == NULL) {
|
|
|
|
IF_ADDR_UNLOCK(ifp);
|
1997-01-07 19:15:32 +00:00
|
|
|
return ENOENT;
|
2005-08-02 23:23:26 +00:00
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
|
|
|
|
if (ifma->ifma_refcount > 1) {
|
|
|
|
ifma->ifma_refcount--;
|
2005-08-02 23:23:26 +00:00
|
|
|
IF_ADDR_UNLOCK(ifp);
|
1997-01-07 19:15:32 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
sa = ifma->ifma_lladdr;
|
2005-08-02 23:23:26 +00:00
|
|
|
if (sa != NULL)
|
|
|
|
ll_ifma = if_findmulti(ifp, sa);
|
|
|
|
else
|
|
|
|
ll_ifma = NULL;
|
1997-01-07 19:15:32 +00:00
|
|
|
|
|
|
|
/*
|
2005-08-02 23:23:26 +00:00
|
|
|
* XXXRW: How come we don't announce ll_ifma?
|
1997-01-07 19:15:32 +00:00
|
|
|
*/
|
2005-08-02 23:23:26 +00:00
|
|
|
rt_newmaddrmsg(RTM_DELMADDR, ifma);
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
|
|
|
|
if_freemulti(ifma);
|
|
|
|
|
|
|
|
if (ll_ifma != NULL) {
|
|
|
|
if (ll_ifma->ifma_refcount == 1) {
|
|
|
|
TAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, ifma_link);
|
|
|
|
if_freemulti(ll_ifma);
|
|
|
|
} else
|
|
|
|
ll_ifma->ifma_refcount--;
|
1997-01-07 19:15:32 +00:00
|
|
|
}
|
2005-08-02 23:23:26 +00:00
|
|
|
IF_ADDR_UNLOCK(ifp);
|
1997-01-07 19:15:32 +00:00
|
|
|
|
2005-08-02 23:23:26 +00:00
|
|
|
/*
|
|
|
|
* Make sure the interface driver is notified
|
|
|
|
* in the case of a link layer mcast group being left.
|
|
|
|
*/
|
|
|
|
if (ifp->if_ioctl) {
|
2005-07-14 13:56:51 +00:00
|
|
|
IFF_LOCKGIANT(ifp);
|
|
|
|
(void) (*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0);
|
|
|
|
IFF_UNLOCKGIANT(ifp);
|
|
|
|
}
|
1997-01-07 19:15:32 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2000-08-15 00:48:38 +00:00
|
|
|
/*
|
|
|
|
* Set the link layer address on an interface.
|
|
|
|
*
|
|
|
|
* At this time we only support certain types of interfaces,
|
|
|
|
* and we don't allow the length of the address to change.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
|
|
|
|
{
|
|
|
|
struct sockaddr_dl *sdl;
|
|
|
|
struct ifaddr *ifa;
|
2002-04-10 06:07:16 +00:00
|
|
|
struct ifreq ifr;
|
2000-08-15 00:48:38 +00:00
|
|
|
|
2005-11-11 16:04:59 +00:00
|
|
|
ifa = ifp->if_addr;
|
2000-08-15 00:48:38 +00:00
|
|
|
if (ifa == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
sdl = (struct sockaddr_dl *)ifa->ifa_addr;
|
|
|
|
if (sdl == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
if (len != sdl->sdl_alen) /* don't allow length to change */
|
|
|
|
return (EINVAL);
|
|
|
|
switch (ifp->if_type) {
|
2005-11-11 07:36:14 +00:00
|
|
|
case IFT_ETHER:
|
2000-08-15 00:48:38 +00:00
|
|
|
case IFT_FDDI:
|
|
|
|
case IFT_XETHER:
|
|
|
|
case IFT_ISO88025:
|
2001-04-04 15:10:58 +00:00
|
|
|
case IFT_L2VLAN:
|
2005-06-05 03:13:13 +00:00
|
|
|
case IFT_BRIDGE:
|
2003-01-24 01:32:20 +00:00
|
|
|
case IFT_ARCNET:
|
2000-08-15 00:48:38 +00:00
|
|
|
bcopy(lladdr, LLADDR(sdl), len);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (ENODEV);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If the interface is already up, we need
|
|
|
|
* to re-init it in order to reprogram its
|
|
|
|
* address filter.
|
|
|
|
*/
|
|
|
|
if ((ifp->if_flags & IFF_UP) != 0) {
|
2005-07-14 13:56:51 +00:00
|
|
|
if (ifp->if_ioctl) {
|
|
|
|
IFF_LOCKGIANT(ifp);
|
|
|
|
ifp->if_flags &= ~IFF_UP;
|
|
|
|
ifr.ifr_flags = ifp->if_flags & 0xffff;
|
|
|
|
ifr.ifr_flagshigh = ifp->if_flags >> 16;
|
|
|
|
(*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
|
|
|
|
ifp->if_flags |= IFF_UP;
|
|
|
|
ifr.ifr_flags = ifp->if_flags & 0xffff;
|
|
|
|
ifr.ifr_flagshigh = ifp->if_flags >> 16;
|
|
|
|
(*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
|
|
|
|
IFF_UNLOCKGIANT(ifp);
|
|
|
|
}
|
2002-02-18 22:50:13 +00:00
|
|
|
#ifdef INET
|
|
|
|
/*
|
|
|
|
* Also send gratuitous ARPs to notify other nodes about
|
|
|
|
* the address change.
|
|
|
|
*/
|
|
|
|
TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
|
2006-06-29 19:22:05 +00:00
|
|
|
if (ifa->ifa_addr->sa_family == AF_INET)
|
2002-02-26 01:11:08 +00:00
|
|
|
arp_ifinit(ifp, ifa);
|
2002-02-18 22:50:13 +00:00
|
|
|
}
|
|
|
|
#endif
|
2000-08-15 00:48:38 +00:00
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2003-10-31 18:32:15 +00:00
|
|
|
/*
|
|
|
|
* The name argument must be a pointer to storage which will last as
|
|
|
|
* long as the interface does. For physical devices, the result of
|
|
|
|
* device_get_name(dev) is a good choice and for pseudo-devices a
|
|
|
|
* static string works well.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
if_initname(struct ifnet *ifp, const char *name, int unit)
|
|
|
|
{
|
|
|
|
ifp->if_dname = name;
|
|
|
|
ifp->if_dunit = unit;
|
|
|
|
if (unit != IF_DUNIT_NONE)
|
|
|
|
snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
|
|
|
|
else
|
|
|
|
strlcpy(ifp->if_xname, name, IFNAMSIZ);
|
|
|
|
}
|
|
|
|
|
2002-09-24 17:35:08 +00:00
|
|
|
int
|
|
|
|
if_printf(struct ifnet *ifp, const char * fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
int retval;
|
|
|
|
|
2003-10-31 18:32:15 +00:00
|
|
|
retval = printf("%s: ", ifp->if_xname);
|
2002-09-24 17:35:08 +00:00
|
|
|
va_start(ap, fmt);
|
|
|
|
retval += vprintf(fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
return (retval);
|
|
|
|
}
|
|
|
|
|
2004-07-27 23:20:45 +00:00
|
|
|
/*
|
|
|
|
* When an interface is marked IFF_NEEDSGIANT, its if_start() routine cannot
|
|
|
|
* be called without Giant. However, we often can't acquire the Giant lock
|
|
|
|
* at those points; instead, we run it via a task queue that holds Giant via
|
|
|
|
* if_start_deferred.
|
|
|
|
*
|
|
|
|
* XXXRW: We need to make sure that the ifnet isn't fully detached until any
|
|
|
|
* outstanding if_start_deferred() tasks that will run after the free. This
|
|
|
|
* probably means waiting in if_detach().
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
if_start(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
|
|
|
|
NET_ASSERT_GIANT();
|
2004-11-23 23:31:33 +00:00
|
|
|
|
|
|
|
if ((ifp->if_flags & IFF_NEEDSGIANT) != 0 && debug_mpsafenet != 0) {
|
|
|
|
if (mtx_owned(&Giant))
|
|
|
|
(*(ifp)->if_start)(ifp);
|
|
|
|
else
|
2004-07-27 23:20:45 +00:00
|
|
|
taskqueue_enqueue(taskqueue_swi_giant,
|
|
|
|
&ifp->if_starttask);
|
2004-11-23 23:31:33 +00:00
|
|
|
} else
|
|
|
|
(*(ifp)->if_start)(ifp);
|
2004-07-27 23:20:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
if_start_deferred(void *context, int pending)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This code must be entered with Giant, and should never run if
|
|
|
|
* we're not running with debug.mpsafenet.
|
|
|
|
*/
|
|
|
|
KASSERT(debug_mpsafenet != 0, ("if_start_deferred: debug.mpsafenet"));
|
|
|
|
GIANT_REQUIRED;
|
|
|
|
|
2005-06-10 16:49:24 +00:00
|
|
|
ifp = context;
|
2004-07-27 23:20:45 +00:00
|
|
|
(ifp->if_start)(ifp);
|
|
|
|
}
|
|
|
|
|
2004-10-30 09:39:13 +00:00
|
|
|
int
|
|
|
|
if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust)
|
|
|
|
{
|
|
|
|
int active = 0;
|
|
|
|
|
|
|
|
IF_LOCK(ifq);
|
|
|
|
if (_IF_QFULL(ifq)) {
|
|
|
|
_IF_DROP(ifq);
|
|
|
|
IF_UNLOCK(ifq);
|
|
|
|
m_freem(m);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (ifp != NULL) {
|
|
|
|
ifp->if_obytes += m->m_pkthdr.len + adjust;
|
|
|
|
if (m->m_flags & (M_BCAST|M_MCAST))
|
|
|
|
ifp->if_omcasts++;
|
Rename IFF_RUNNING to IFF_DRV_RUNNING, IFF_OACTIVE to IFF_DRV_OACTIVE,
and move both flags from ifnet.if_flags to ifnet.if_drv_flags, making
and documenting the locking of these flags the responsibility of the
device driver, not the network stack. The flags for these two fields
will be mutually exclusive so that they can be exposed to user space as
though they were stored in the same variable.
Provide #defines to provide the old names #ifndef _KERNEL, so that user
applications (such as ifconfig) can use the old flag names. Using the
old names in a device driver will result in a compile error in order to
help device driver writers adopt the new model.
When exposing the interface flags to user space, via interface ioctls
or routing sockets, or the two fields together. Since the driver flags
cannot currently be set for user space, no new logic is currently
required to handle this case.
Add some assertions that general purpose network stack routines, such
as if_setflags(), are not improperly used on driver-owned flags.
With this change, a large number of very minor network stack races are
closed, subject to correct device driver locking. Most were likely
never triggered.
Driver sweep to follow; many thanks to pjd and bz for the line-by-line
review they gave this patch.
Reviewed by: pjd, bz
MFC after: 7 days
2005-08-09 10:16:17 +00:00
|
|
|
active = ifp->if_drv_flags & IFF_DRV_OACTIVE;
|
2004-10-30 09:39:13 +00:00
|
|
|
}
|
|
|
|
_IF_ENQUEUE(ifq, m);
|
|
|
|
IF_UNLOCK(ifq);
|
|
|
|
if (ifp != NULL && !active)
|
|
|
|
if_start(ifp);
|
|
|
|
return (1);
|
|
|
|
}
|
2005-06-10 16:49:24 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
if_register_com_alloc(u_char type,
|
|
|
|
if_com_alloc_t *a, if_com_free_t *f)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(if_com_alloc[type] == NULL,
|
|
|
|
("if_register_com_alloc: %d already registered", type));
|
|
|
|
KASSERT(if_com_free[type] == NULL,
|
|
|
|
("if_register_com_alloc: %d free already registered", type));
|
|
|
|
|
|
|
|
if_com_alloc[type] = a;
|
|
|
|
if_com_free[type] = f;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
if_deregister_com_alloc(u_char type)
|
|
|
|
{
|
|
|
|
|
2006-06-11 22:09:28 +00:00
|
|
|
KASSERT(if_com_alloc[type] != NULL,
|
2005-06-10 16:49:24 +00:00
|
|
|
("if_deregister_com_alloc: %d not registered", type));
|
2006-06-11 22:09:28 +00:00
|
|
|
KASSERT(if_com_free[type] != NULL,
|
2005-06-10 16:49:24 +00:00
|
|
|
("if_deregister_com_alloc: %d free not registered", type));
|
|
|
|
if_com_alloc[type] = NULL;
|
|
|
|
if_com_free[type] = NULL;
|
|
|
|
}
|