2007-04-17 00:35:11 +00:00
|
|
|
/* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
|
2007-12-17 18:49:44 +00:00
|
|
|
* Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
|
2016-01-23 04:18:44 +00:00
|
|
|
* Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
|
2007-04-17 00:35:11 +00:00
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include "opt_inet.h"
|
|
|
|
#include "opt_inet6.h"
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
#include "opt_ratelimit.h"
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/sockio.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/priv.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/proc.h>
|
2007-05-15 07:41:46 +00:00
|
|
|
#include <sys/lock.h>
|
2013-08-29 19:35:14 +00:00
|
|
|
#include <sys/rmlock.h>
|
2007-05-07 00:35:15 +00:00
|
|
|
#include <sys/taskqueue.h>
|
2010-02-06 13:49:35 +00:00
|
|
|
#include <sys/eventhandler.h>
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_clone.h>
|
|
|
|
#include <net/if_arp.h>
|
|
|
|
#include <net/if_dl.h>
|
|
|
|
#include <net/if_media.h>
|
|
|
|
#include <net/if_types.h>
|
|
|
|
#include <net/if_var.h>
|
|
|
|
#include <net/bpf.h>
|
2014-10-01 21:37:32 +00:00
|
|
|
#include <net/vnet.h>
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2011-04-27 19:30:44 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2007-04-17 00:35:11 +00:00
|
|
|
#include <netinet/in.h>
|
2014-04-15 13:28:54 +00:00
|
|
|
#include <netinet/ip.h>
|
2011-04-27 19:30:44 +00:00
|
|
|
#endif
|
|
|
|
#ifdef INET
|
2007-04-17 00:35:11 +00:00
|
|
|
#include <netinet/in_systm.h>
|
|
|
|
#include <netinet/if_ether.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef INET6
|
|
|
|
#include <netinet/ip6.h>
|
2013-07-02 16:58:15 +00:00
|
|
|
#include <netinet6/in6_var.h>
|
|
|
|
#include <netinet6/in6_ifattach.h>
|
2007-04-17 00:35:11 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <net/if_vlan_var.h>
|
|
|
|
#include <net/if_lagg.h>
|
|
|
|
#include <net/ieee8023ad_lacp.h>
|
|
|
|
|
|
|
|
/* Special flags we should propagate to the lagg ports. */
|
|
|
|
static struct {
|
|
|
|
int flag;
|
|
|
|
int (*func)(struct ifnet *, int);
|
|
|
|
} lagg_pflags[] = {
|
|
|
|
{IFF_PROMISC, ifpromisc},
|
|
|
|
{IFF_ALLMULTI, if_allmulti},
|
|
|
|
{0, NULL}
|
|
|
|
};
|
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
VNET_DEFINE(SLIST_HEAD(__trhead, lagg_softc), lagg_list); /* list of laggs */
|
|
|
|
#define V_lagg_list VNET(lagg_list)
|
|
|
|
static VNET_DEFINE(struct mtx, lagg_list_mtx);
|
|
|
|
#define V_lagg_list_mtx VNET(lagg_list_mtx)
|
|
|
|
#define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \
|
|
|
|
"if_lagg list", NULL, MTX_DEF)
|
|
|
|
#define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx)
|
|
|
|
#define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx)
|
|
|
|
#define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx)
|
2007-04-17 00:35:11 +00:00
|
|
|
eventhandler_tag lagg_detach_cookie = NULL;
|
|
|
|
|
|
|
|
static int lagg_clone_create(struct if_clone *, int, caddr_t);
|
|
|
|
static void lagg_clone_destroy(struct ifnet *);
|
2014-10-01 21:37:32 +00:00
|
|
|
static VNET_DEFINE(struct if_clone *, lagg_cloner);
|
|
|
|
#define V_lagg_cloner VNET(lagg_cloner)
|
2012-10-16 13:37:54 +00:00
|
|
|
static const char laggname[] = "lagg";
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static void lagg_lladdr(struct lagg_softc *, uint8_t *);
|
2007-07-30 20:17:22 +00:00
|
|
|
static void lagg_capabilities(struct lagg_softc *);
|
2015-11-01 19:59:04 +00:00
|
|
|
static void lagg_port_lladdr(struct lagg_port *, uint8_t *, lagg_llqtype);
|
2016-04-06 04:58:20 +00:00
|
|
|
static void lagg_port_setlladdr(void *, int);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_port_create(struct lagg_softc *, struct ifnet *);
|
|
|
|
static int lagg_port_destroy(struct lagg_port *, int);
|
|
|
|
static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
|
2007-11-25 06:30:46 +00:00
|
|
|
static void lagg_linkstate(struct lagg_softc *);
|
2015-12-17 14:41:30 +00:00
|
|
|
static void lagg_port_state(struct ifnet *, int);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
|
|
|
|
static int lagg_port_output(struct ifnet *, struct mbuf *,
|
2013-04-26 12:50:32 +00:00
|
|
|
const struct sockaddr *, struct route *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
|
2010-01-08 16:44:33 +00:00
|
|
|
#ifdef LAGG_PORT_STACKING
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_port_checkstacking(struct lagg_softc *);
|
2010-01-08 16:44:33 +00:00
|
|
|
#endif
|
2007-04-17 00:35:11 +00:00
|
|
|
static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
|
|
|
|
static void lagg_init(void *);
|
|
|
|
static void lagg_stop(struct lagg_softc *);
|
|
|
|
static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
#ifdef RATELIMIT
|
|
|
|
static int lagg_snd_tag_alloc(struct ifnet *,
|
|
|
|
union if_snd_tag_alloc_params *,
|
|
|
|
struct m_snd_tag **);
|
|
|
|
#endif
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_ether_setmulti(struct lagg_softc *);
|
|
|
|
static int lagg_ether_cmdmulti(struct lagg_port *, int);
|
|
|
|
static int lagg_setflag(struct lagg_port *, int, int,
|
|
|
|
int (*func)(struct ifnet *, int));
|
|
|
|
static int lagg_setflags(struct lagg_port *, int status);
|
2014-09-27 13:57:48 +00:00
|
|
|
static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt);
|
2012-09-20 10:05:10 +00:00
|
|
|
static int lagg_transmit(struct ifnet *, struct mbuf *);
|
|
|
|
static void lagg_qflush(struct ifnet *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_media_change(struct ifnet *);
|
|
|
|
static void lagg_media_status(struct ifnet *, struct ifmediareq *);
|
|
|
|
static struct lagg_port *lagg_link_active(struct lagg_softc *,
|
|
|
|
struct lagg_port *);
|
|
|
|
|
|
|
|
/* Simple round robin */
|
2014-09-26 08:42:32 +00:00
|
|
|
static void lagg_rr_attach(struct lagg_softc *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
|
|
|
|
/* Active failover */
|
|
|
|
static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
|
|
|
|
/* Loadbalancing */
|
2014-09-26 08:42:32 +00:00
|
|
|
static void lagg_lb_attach(struct lagg_softc *);
|
2014-09-26 07:12:40 +00:00
|
|
|
static void lagg_lb_detach(struct lagg_softc *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_lb_port_create(struct lagg_port *);
|
|
|
|
static void lagg_lb_port_destroy(struct lagg_port *);
|
|
|
|
static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
|
|
|
|
|
2014-09-18 02:12:48 +00:00
|
|
|
/* Broadcast */
|
|
|
|
static int lagg_bcast_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_bcast_input(struct lagg_softc *, struct lagg_port *,
|
2014-09-26 12:35:58 +00:00
|
|
|
struct mbuf *);
|
2014-09-18 02:12:48 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* 802.3ad LACP */
|
2014-09-26 08:42:32 +00:00
|
|
|
static void lagg_lacp_attach(struct lagg_softc *);
|
2014-09-26 07:12:40 +00:00
|
|
|
static void lagg_lacp_detach(struct lagg_softc *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
static void lagg_lacp_lladdr(struct lagg_softc *);
|
|
|
|
|
|
|
|
/* lagg protocol table */
|
2014-09-26 08:42:32 +00:00
|
|
|
static const struct lagg_proto {
|
2014-09-26 11:01:04 +00:00
|
|
|
lagg_proto pr_num;
|
|
|
|
void (*pr_attach)(struct lagg_softc *);
|
|
|
|
void (*pr_detach)(struct lagg_softc *);
|
2014-09-26 12:54:24 +00:00
|
|
|
int (*pr_start)(struct lagg_softc *, struct mbuf *);
|
|
|
|
struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
int (*pr_addport)(struct lagg_port *);
|
|
|
|
void (*pr_delport)(struct lagg_port *);
|
|
|
|
void (*pr_linkstate)(struct lagg_port *);
|
|
|
|
void (*pr_init)(struct lagg_softc *);
|
|
|
|
void (*pr_stop)(struct lagg_softc *);
|
|
|
|
void (*pr_lladdr)(struct lagg_softc *);
|
|
|
|
void (*pr_request)(struct lagg_softc *, void *);
|
|
|
|
void (*pr_portreq)(struct lagg_port *, void *);
|
2007-04-17 00:35:11 +00:00
|
|
|
} lagg_protos[] = {
|
2014-09-26 11:01:04 +00:00
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_NONE
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_ROUNDROBIN,
|
|
|
|
.pr_attach = lagg_rr_attach,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_rr_start,
|
|
|
|
.pr_input = lagg_rr_input,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_FAILOVER,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_fail_start,
|
|
|
|
.pr_input = lagg_fail_input,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_LOADBALANCE,
|
|
|
|
.pr_attach = lagg_lb_attach,
|
|
|
|
.pr_detach = lagg_lb_detach,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_lb_start,
|
|
|
|
.pr_input = lagg_lb_input,
|
|
|
|
.pr_addport = lagg_lb_port_create,
|
|
|
|
.pr_delport = lagg_lb_port_destroy,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_LACP,
|
|
|
|
.pr_attach = lagg_lacp_attach,
|
|
|
|
.pr_detach = lagg_lacp_detach,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_lacp_start,
|
|
|
|
.pr_input = lagg_lacp_input,
|
|
|
|
.pr_addport = lacp_port_create,
|
|
|
|
.pr_delport = lacp_port_destroy,
|
|
|
|
.pr_linkstate = lacp_linkstate,
|
|
|
|
.pr_init = lacp_init,
|
|
|
|
.pr_stop = lacp_stop,
|
|
|
|
.pr_lladdr = lagg_lacp_lladdr,
|
|
|
|
.pr_request = lacp_req,
|
|
|
|
.pr_portreq = lacp_portreq,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_BROADCAST,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_bcast_start,
|
|
|
|
.pr_input = lagg_bcast_input,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
2007-04-17 00:35:11 +00:00
|
|
|
};
|
|
|
|
|
2010-09-01 16:53:38 +00:00
|
|
|
SYSCTL_DECL(_net_link);
|
2013-07-13 04:25:03 +00:00
|
|
|
SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0,
|
2011-11-07 15:43:11 +00:00
|
|
|
"Link Aggregation");
|
2010-09-01 16:53:38 +00:00
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
/* Allow input on any failover links */
|
|
|
|
static VNET_DEFINE(int, lagg_failover_rx_all);
|
|
|
|
#define V_lagg_failover_rx_all VNET(lagg_failover_rx_all)
|
|
|
|
SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET,
|
|
|
|
&VNET_NAME(lagg_failover_rx_all), 0,
|
2010-09-01 16:53:38 +00:00
|
|
|
"Accept input from any interface in a failover lagg");
|
2014-10-01 21:37:32 +00:00
|
|
|
|
2014-12-01 11:45:24 +00:00
|
|
|
/* Default value for using flowid */
|
2014-10-01 21:37:32 +00:00
|
|
|
static VNET_DEFINE(int, def_use_flowid) = 1;
|
|
|
|
#define V_def_use_flowid VNET(def_use_flowid)
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RWTUN,
|
2014-10-01 21:37:32 +00:00
|
|
|
&VNET_NAME(def_use_flowid), 0,
|
2012-02-23 21:56:53 +00:00
|
|
|
"Default setting for using flow id for load sharing");
|
2014-10-01 21:37:32 +00:00
|
|
|
|
2014-12-01 11:45:24 +00:00
|
|
|
/* Default value for flowid shift */
|
2014-10-01 21:37:32 +00:00
|
|
|
static VNET_DEFINE(int, def_flowid_shift) = 16;
|
|
|
|
#define V_def_flowid_shift VNET(def_flowid_shift)
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift, CTLFLAG_RWTUN,
|
2014-10-01 21:37:32 +00:00
|
|
|
&VNET_NAME(def_flowid_shift), 0,
|
2013-12-30 01:32:17 +00:00
|
|
|
"Default setting for flowid shift for load sharing");
|
2010-09-01 16:53:38 +00:00
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
static void
|
|
|
|
vnet_lagg_init(const void *unused __unused)
|
|
|
|
{
|
|
|
|
|
|
|
|
LAGG_LIST_LOCK_INIT();
|
|
|
|
SLIST_INIT(&V_lagg_list);
|
|
|
|
V_lagg_cloner = if_clone_simple(laggname, lagg_clone_create,
|
|
|
|
lagg_clone_destroy, 0);
|
|
|
|
}
|
|
|
|
VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
|
|
|
|
vnet_lagg_init, NULL);
|
|
|
|
|
|
|
|
static void
|
|
|
|
vnet_lagg_uninit(const void *unused __unused)
|
|
|
|
{
|
|
|
|
|
|
|
|
if_clone_detach(V_lagg_cloner);
|
|
|
|
LAGG_LIST_LOCK_DESTROY();
|
|
|
|
}
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
|
2014-10-01 21:37:32 +00:00
|
|
|
vnet_lagg_uninit, NULL);
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
|
|
|
lagg_modevent(module_t mod, int type, void *data)
|
|
|
|
{
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case MOD_LOAD:
|
|
|
|
lagg_input_p = lagg_input;
|
|
|
|
lagg_linkstate_p = lagg_port_state;
|
|
|
|
lagg_detach_cookie = EVENTHANDLER_REGISTER(
|
|
|
|
ifnet_departure_event, lagg_port_ifdetach, NULL,
|
|
|
|
EVENTHANDLER_PRI_ANY);
|
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
|
|
|
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
|
|
|
|
lagg_detach_cookie);
|
|
|
|
lagg_input_p = NULL;
|
|
|
|
lagg_linkstate_p = NULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static moduledata_t lagg_mod = {
|
|
|
|
"if_lagg",
|
|
|
|
lagg_modevent,
|
2012-10-10 08:36:38 +00:00
|
|
|
0
|
2007-04-17 00:35:11 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
|
2011-08-01 11:24:55 +00:00
|
|
|
MODULE_VERSION(if_lagg, 1);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-26 11:01:04 +00:00
|
|
|
static void
|
|
|
|
lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr)
|
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto",
|
|
|
|
__func__, sc));
|
|
|
|
|
|
|
|
if (sc->sc_ifflags & IFF_DEBUG)
|
|
|
|
if_printf(sc->sc_ifp, "using proto %u\n", pr);
|
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
if (lagg_protos[pr].pr_attach != NULL)
|
|
|
|
lagg_protos[pr].pr_attach(sc);
|
2014-09-26 11:01:04 +00:00
|
|
|
sc->sc_proto = pr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_detach(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
lagg_proto pr;
|
|
|
|
|
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
|
|
|
|
|
|
|
pr = sc->sc_proto;
|
2014-09-26 12:35:58 +00:00
|
|
|
sc->sc_proto = LAGG_PROTO_NONE;
|
2014-09-26 11:01:04 +00:00
|
|
|
|
|
|
|
if (lagg_protos[pr].pr_detach != NULL)
|
|
|
|
lagg_protos[pr].pr_detach(sc);
|
|
|
|
else
|
|
|
|
LAGG_WUNLOCK(sc);
|
|
|
|
}
|
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
static int
|
|
|
|
lagg_proto_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (lagg_protos[sc->sc_proto].pr_start(sc, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (lagg_protos[sc->sc_proto].pr_input(sc, lp, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_proto_addport(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_addport == NULL)
|
|
|
|
return (0);
|
|
|
|
else
|
|
|
|
return (lagg_protos[sc->sc_proto].pr_addport(lp));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_delport(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_delport != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_delport(lp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_linkstate != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_linkstate(lp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_init(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_init != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_init(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_stop(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_stop != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_stop(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_lladdr(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_lladdr != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_lladdr(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_request(struct lagg_softc *sc, void *v)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_request != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_request(sc, v);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_portreq(struct lagg_softc *sc, struct lagg_port *lp, void *v)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_portreq != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_portreq(lp, v);
|
|
|
|
}
|
|
|
|
|
2010-02-06 13:49:35 +00:00
|
|
|
/*
|
|
|
|
* This routine is run via an vlan
|
|
|
|
* config EVENT
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
|
|
|
|
{
|
2014-09-26 12:35:58 +00:00
|
|
|
struct lagg_softc *sc = ifp->if_softc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct rm_priotracker tracker;
|
|
|
|
|
|
|
|
if (ifp->if_softc != arg) /* Not our event */
|
|
|
|
return;
|
|
|
|
|
|
|
|
LAGG_RLOCK(sc, &tracker);
|
2017-04-22 08:38:49 +00:00
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
|
2014-09-26 12:35:58 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2010-02-06 13:49:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This routine is run via an vlan
|
|
|
|
* unconfig EVENT
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
|
|
|
|
{
|
2014-09-26 12:35:58 +00:00
|
|
|
struct lagg_softc *sc = ifp->if_softc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct rm_priotracker tracker;
|
|
|
|
|
|
|
|
if (ifp->if_softc != arg) /* Not our event */
|
|
|
|
return;
|
|
|
|
|
|
|
|
LAGG_RLOCK(sc, &tracker);
|
2017-04-22 08:38:49 +00:00
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
|
2014-09-26 12:35:58 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2010-02-06 13:49:35 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
|
|
|
lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
|
|
|
|
|
|
|
|
sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
|
|
|
|
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
|
|
|
|
if (ifp == NULL) {
|
|
|
|
free(sc, M_DEVBUF);
|
|
|
|
return (ENOSPC);
|
|
|
|
}
|
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
if (V_def_use_flowid)
|
|
|
|
sc->sc_opts |= LAGG_OPT_USE_FLOWID;
|
|
|
|
sc->flowid_shift = V_def_flowid_shift;
|
|
|
|
|
2012-03-06 22:58:13 +00:00
|
|
|
/* Hash all layers by default */
|
2015-03-11 16:02:24 +00:00
|
|
|
sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4;
|
2012-02-22 22:01:30 +00:00
|
|
|
|
2014-09-26 11:01:04 +00:00
|
|
|
lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
LAGG_LOCK_INIT(sc);
|
|
|
|
SLIST_INIT(&sc->sc_ports);
|
2016-04-06 04:58:20 +00:00
|
|
|
TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
|
2013-08-29 19:35:14 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Initialise pseudo media types */
|
|
|
|
ifmedia_init(&sc->sc_media, 0, lagg_media_change,
|
|
|
|
lagg_media_status);
|
|
|
|
ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
|
|
|
|
ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
|
|
|
|
|
2012-10-16 13:37:54 +00:00
|
|
|
if_initname(ifp, laggname, unit);
|
2007-04-17 00:35:11 +00:00
|
|
|
ifp->if_softc = sc;
|
2012-09-20 10:05:10 +00:00
|
|
|
ifp->if_transmit = lagg_transmit;
|
|
|
|
ifp->if_qflush = lagg_qflush;
|
2007-04-17 00:35:11 +00:00
|
|
|
ifp->if_init = lagg_init;
|
|
|
|
ifp->if_ioctl = lagg_ioctl;
|
2014-09-27 13:57:48 +00:00
|
|
|
ifp->if_get_counter = lagg_get_counter;
|
2007-04-17 00:35:11 +00:00
|
|
|
ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
#ifdef RATELIMIT
|
|
|
|
ifp->if_snd_tag_alloc = lagg_snd_tag_alloc;
|
|
|
|
ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS | IFCAP_TXRTLMT;
|
|
|
|
#else
|
2013-10-09 19:04:40 +00:00
|
|
|
ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS;
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
#endif
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/*
|
2011-11-11 22:57:52 +00:00
|
|
|
* Attach as an ordinary ethernet device, children will be attached
|
2007-04-17 00:35:11 +00:00
|
|
|
* as special device IFT_IEEE8023ADLAG.
|
|
|
|
*/
|
|
|
|
ether_ifattach(ifp, eaddr);
|
|
|
|
|
2010-02-06 13:49:35 +00:00
|
|
|
sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
|
|
|
|
lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
|
|
|
|
sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
|
|
|
|
lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Insert into the global list of laggs */
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_LOCK();
|
|
|
|
SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries);
|
|
|
|
LAGG_LIST_UNLOCK();
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_clone_destroy(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
lagg_stop(sc);
|
|
|
|
ifp->if_flags &= ~IFF_UP;
|
|
|
|
|
2010-02-06 13:49:35 +00:00
|
|
|
EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Shutdown and remove lagg ports */
|
2017-01-30 03:04:33 +00:00
|
|
|
while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL) {
|
|
|
|
lp->lp_detaching = LAGG_CLONE_DESTROY;
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_port_destroy(lp, 1);
|
2017-01-30 03:04:33 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Unhook the aggregation protocol */
|
2014-09-26 11:01:04 +00:00
|
|
|
lagg_proto_detach(sc);
|
2015-11-01 19:59:04 +00:00
|
|
|
LAGG_UNLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-01-30 03:04:33 +00:00
|
|
|
taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
|
2007-04-17 00:35:11 +00:00
|
|
|
ifmedia_removeall(&sc->sc_media);
|
|
|
|
ether_ifdetach(ifp);
|
2011-11-11 22:57:52 +00:00
|
|
|
if_free(ifp);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_LOCK();
|
|
|
|
SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries);
|
|
|
|
LAGG_LIST_UNLOCK();
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
LAGG_LOCK_DESTROY(sc);
|
|
|
|
free(sc, M_DEVBUF);
|
|
|
|
}
|
|
|
|
|
2015-11-01 19:59:04 +00:00
|
|
|
/*
|
|
|
|
* Set link-layer address on the lagg interface itself.
|
|
|
|
*
|
|
|
|
* Set noinline to be dtrace-friendly
|
|
|
|
*/
|
|
|
|
static __noinline void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
|
|
|
|
{
|
2014-10-17 09:08:44 +00:00
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
2014-10-05 02:34:21 +00:00
|
|
|
struct lagg_port lp;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-10-17 09:08:44 +00:00
|
|
|
if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
|
|
|
|
return;
|
|
|
|
|
2014-10-05 02:34:21 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2014-10-17 09:08:44 +00:00
|
|
|
/*
|
|
|
|
* Set the link layer address on the lagg interface.
|
|
|
|
* lagg_proto_lladdr() notifies the MAC change to
|
|
|
|
* the aggregation protocol. iflladdr_event handler which
|
|
|
|
* may trigger gratuitous ARPs for INET will be handled in
|
|
|
|
* a taskqueue.
|
|
|
|
*/
|
|
|
|
bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
|
|
|
|
lagg_proto_lladdr(sc);
|
2014-10-05 02:34:21 +00:00
|
|
|
|
2015-11-01 19:59:04 +00:00
|
|
|
/*
|
|
|
|
* Send notification request for lagg interface
|
|
|
|
* itself. Note that new lladdr is already set.
|
|
|
|
*/
|
2014-10-05 02:34:21 +00:00
|
|
|
bzero(&lp, sizeof(lp));
|
|
|
|
lp.lp_ifp = sc->sc_ifp;
|
|
|
|
lp.lp_softc = sc;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2015-11-01 19:59:04 +00:00
|
|
|
/* Do not request lladdr change */
|
|
|
|
lagg_port_lladdr(&lp, lladdr, LAGG_LLQTYPE_VIRT);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2007-07-30 20:17:22 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_capabilities(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
2007-07-30 20:17:22 +00:00
|
|
|
int cap = ~0, ena = ~0;
|
2008-12-16 22:16:34 +00:00
|
|
|
u_long hwa = ~0UL;
|
2014-09-22 08:27:27 +00:00
|
|
|
struct ifnet_hw_tsomax hw_tsomax;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-22 08:27:27 +00:00
|
|
|
memset(&hw_tsomax, 0, sizeof(hw_tsomax));
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Get capabilities from the lagg ports */
|
2007-07-30 20:17:22 +00:00
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
|
|
|
cap &= lp->lp_ifp->if_capabilities;
|
|
|
|
ena &= lp->lp_ifp->if_capenable;
|
2008-12-16 22:16:34 +00:00
|
|
|
hwa &= lp->lp_ifp->if_hwassist;
|
2014-09-22 08:27:27 +00:00
|
|
|
if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax);
|
2007-07-30 20:17:22 +00:00
|
|
|
}
|
|
|
|
cap = (cap == ~0 ? 0 : cap);
|
|
|
|
ena = (ena == ~0 ? 0 : ena);
|
2008-12-16 22:16:34 +00:00
|
|
|
hwa = (hwa == ~0 ? 0 : hwa);
|
2007-07-30 20:17:22 +00:00
|
|
|
|
|
|
|
if (sc->sc_ifp->if_capabilities != cap ||
|
2008-12-16 22:16:34 +00:00
|
|
|
sc->sc_ifp->if_capenable != ena ||
|
2014-04-14 20:34:48 +00:00
|
|
|
sc->sc_ifp->if_hwassist != hwa ||
|
2014-09-22 08:27:27 +00:00
|
|
|
if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax) != 0) {
|
2007-07-30 20:17:22 +00:00
|
|
|
sc->sc_ifp->if_capabilities = cap;
|
|
|
|
sc->sc_ifp->if_capenable = ena;
|
2008-12-16 22:16:34 +00:00
|
|
|
sc->sc_ifp->if_hwassist = hwa;
|
2007-07-30 20:17:22 +00:00
|
|
|
getmicrotime(&sc->sc_ifp->if_lastchange);
|
|
|
|
|
|
|
|
if (sc->sc_ifflags & IFF_DEBUG)
|
|
|
|
if_printf(sc->sc_ifp,
|
|
|
|
"capabilities 0x%08x enabled 0x%08x\n", cap, ena);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-01 19:59:04 +00:00
|
|
|
/*
|
|
|
|
* Enqueue interface lladdr notification.
|
|
|
|
* If request is already queued, it is updated.
|
|
|
|
* If setting lladdr is also desired, @do_change has to be set to 1.
|
|
|
|
*
|
|
|
|
* Set noinline to be dtrace-friendly
|
|
|
|
*/
|
|
|
|
static __noinline void
|
|
|
|
lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr, lagg_llqtype llq_type)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct ifnet *ifp = lp->lp_ifp;
|
2016-04-06 04:58:20 +00:00
|
|
|
struct lagg_llq *llq;
|
2007-05-07 00:35:15 +00:00
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2015-11-01 19:59:04 +00:00
|
|
|
/*
|
|
|
|
* Do not enqueue requests where lladdr is the same for
|
|
|
|
* "physical" interfaces (e.g. ports in lagg)
|
|
|
|
*/
|
|
|
|
if (llq_type == LAGG_LLQTYPE_PHYS &&
|
|
|
|
memcmp(IF_LLADDR(ifp), lladdr, ETHER_ADDR_LEN) == 0)
|
2007-04-17 00:35:11 +00:00
|
|
|
return;
|
|
|
|
|
2007-05-07 00:35:15 +00:00
|
|
|
/* Check to make sure its not already queued to be changed */
|
2016-04-06 04:58:20 +00:00
|
|
|
SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
|
|
|
|
if (llq->llq_ifp == ifp) {
|
2015-11-01 19:59:04 +00:00
|
|
|
/* Update lladdr, it may have changed */
|
2016-04-06 04:58:20 +00:00
|
|
|
bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
|
2015-11-01 19:59:04 +00:00
|
|
|
return;
|
2007-05-07 00:35:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-06 04:58:20 +00:00
|
|
|
llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT | M_ZERO);
|
|
|
|
if (llq == NULL) /* XXX what to do */
|
2015-11-01 19:59:04 +00:00
|
|
|
return;
|
2007-05-07 00:35:15 +00:00
|
|
|
|
2017-04-21 13:45:01 +00:00
|
|
|
if_ref(ifp);
|
2016-04-06 04:58:20 +00:00
|
|
|
llq->llq_ifp = ifp;
|
|
|
|
llq->llq_type = llq_type;
|
|
|
|
bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
|
2015-11-01 19:59:04 +00:00
|
|
|
/* XXX: We should insert to tail */
|
2016-04-06 04:58:20 +00:00
|
|
|
SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
|
2007-05-07 00:35:15 +00:00
|
|
|
|
2016-04-06 04:58:20 +00:00
|
|
|
taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
|
2007-05-07 00:35:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-04-06 04:58:20 +00:00
|
|
|
* Set the interface MAC address from a taskqueue to avoid a LOR.
|
2015-11-01 19:59:04 +00:00
|
|
|
*
|
|
|
|
* Set noinline to be dtrace-friendly
|
2007-05-07 00:35:15 +00:00
|
|
|
*/
|
2015-11-01 19:59:04 +00:00
|
|
|
static __noinline void
|
2016-04-06 04:58:20 +00:00
|
|
|
lagg_port_setlladdr(void *arg, int pending)
|
2007-05-07 00:35:15 +00:00
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)arg;
|
2016-04-06 04:58:20 +00:00
|
|
|
struct lagg_llq *llq, *head;
|
|
|
|
struct ifnet *ifp;
|
2016-04-06 04:50:28 +00:00
|
|
|
|
2016-04-06 04:58:20 +00:00
|
|
|
/* Grab a local reference of the queue and remove it from the softc */
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2016-04-06 04:58:20 +00:00
|
|
|
head = SLIST_FIRST(&sc->sc_llq_head);
|
|
|
|
SLIST_FIRST(&sc->sc_llq_head) = NULL;
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2007-05-07 00:35:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Traverse the queue and set the lladdr on each ifp. It is safe to do
|
|
|
|
* unlocked as we have the only reference to it.
|
|
|
|
*/
|
|
|
|
for (llq = head; llq != NULL; llq = head) {
|
2016-04-06 04:58:20 +00:00
|
|
|
ifp = llq->llq_ifp;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2013-06-07 10:27:50 +00:00
|
|
|
CURVNET_SET(ifp->if_vnet);
|
2015-11-01 19:59:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the link layer address on the laggport interface.
|
|
|
|
* Note that if_setlladdr() or iflladdr_event handler
|
|
|
|
* may result in arp transmission / lltable updates.
|
|
|
|
*/
|
2016-04-06 04:58:20 +00:00
|
|
|
if (llq->llq_type == LAGG_LLQTYPE_PHYS)
|
|
|
|
if_setlladdr(ifp, llq->llq_lladdr,
|
|
|
|
ETHER_ADDR_LEN);
|
2015-11-01 19:59:04 +00:00
|
|
|
else
|
2014-10-17 09:08:44 +00:00
|
|
|
EVENTHANDLER_INVOKE(iflladdr_event, ifp);
|
2013-06-07 10:27:50 +00:00
|
|
|
CURVNET_RESTORE();
|
2007-05-07 00:35:15 +00:00
|
|
|
head = SLIST_NEXT(llq, llq_entries);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(ifp);
|
2007-05-07 00:35:15 +00:00
|
|
|
free(llq, M_DEVBUF);
|
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc_ptr;
|
2014-09-26 12:42:06 +00:00
|
|
|
struct lagg_port *lp, *tlp;
|
2014-09-27 13:57:48 +00:00
|
|
|
int error, i;
|
|
|
|
uint64_t *pval;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Limit the maximal number of lagg ports */
|
|
|
|
if (sc->sc_count >= LAGG_MAX_PORTS)
|
|
|
|
return (ENOSPC);
|
|
|
|
|
|
|
|
/* Check if port has already been associated to a lagg */
|
2012-05-28 12:13:04 +00:00
|
|
|
if (ifp->if_lagg != NULL) {
|
|
|
|
/* Port is already in the current lagg? */
|
|
|
|
lp = (struct lagg_port *)ifp->if_lagg;
|
|
|
|
if (lp->lp_softc == sc)
|
|
|
|
return (EEXIST);
|
2007-04-17 00:35:11 +00:00
|
|
|
return (EBUSY);
|
2012-05-28 12:13:04 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* XXX Disallow non-ethernet interfaces (this should be any of 802) */
|
2015-11-01 19:59:04 +00:00
|
|
|
if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN)
|
2007-04-17 00:35:11 +00:00
|
|
|
return (EPROTONOSUPPORT);
|
|
|
|
|
2007-07-30 20:17:22 +00:00
|
|
|
/* Allow the first Ethernet member to define the MTU */
|
|
|
|
if (SLIST_EMPTY(&sc->sc_ports))
|
|
|
|
sc->sc_ifp->if_mtu = ifp->if_mtu;
|
|
|
|
else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
|
|
|
|
if_printf(sc->sc_ifp, "invalid MTU for %s\n",
|
|
|
|
ifp->if_xname);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
if ((lp = malloc(sizeof(struct lagg_port),
|
|
|
|
M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
|
|
|
/* Check if port is a stacked lagg */
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_LOCK();
|
|
|
|
SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) {
|
2007-04-17 00:35:11 +00:00
|
|
|
if (ifp == sc_ptr->sc_ifp) {
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_UNLOCK();
|
2007-04-17 00:35:11 +00:00
|
|
|
free(lp, M_DEVBUF);
|
|
|
|
return (EINVAL);
|
2010-01-08 16:44:33 +00:00
|
|
|
/* XXX disable stacking for the moment, its untested */
|
|
|
|
#ifdef LAGG_PORT_STACKING
|
2007-04-17 00:35:11 +00:00
|
|
|
lp->lp_flags |= LAGG_PORT_STACK;
|
|
|
|
if (lagg_port_checkstacking(sc_ptr) >=
|
|
|
|
LAGG_MAX_STACKING) {
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_UNLOCK();
|
2007-04-17 00:35:11 +00:00
|
|
|
free(lp, M_DEVBUF);
|
|
|
|
return (E2BIG);
|
|
|
|
}
|
2010-01-08 16:44:33 +00:00
|
|
|
#endif
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
}
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_UNLOCK();
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Change the interface type */
|
|
|
|
lp->lp_iftype = ifp->if_type;
|
|
|
|
ifp->if_type = IFT_IEEE8023ADLAG;
|
|
|
|
ifp->if_lagg = lp;
|
|
|
|
lp->lp_ioctl = ifp->if_ioctl;
|
|
|
|
ifp->if_ioctl = lagg_port_ioctl;
|
|
|
|
lp->lp_output = ifp->if_output;
|
|
|
|
ifp->if_output = lagg_port_output;
|
|
|
|
|
2017-04-21 13:45:01 +00:00
|
|
|
if_ref(ifp);
|
2007-04-17 00:35:11 +00:00
|
|
|
lp->lp_ifp = ifp;
|
2007-06-12 07:29:11 +00:00
|
|
|
lp->lp_softc = sc;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Save port link layer address */
|
|
|
|
bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
|
|
|
|
|
|
|
|
if (SLIST_EMPTY(&sc->sc_ports)) {
|
|
|
|
sc->sc_primary = lp;
|
2015-11-01 19:59:04 +00:00
|
|
|
/* First port in lagg. Update/notify lagg lladdress */
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lladdr(sc, IF_LLADDR(ifp));
|
|
|
|
} else {
|
2015-11-01 19:59:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Update link layer address for this port and
|
|
|
|
* send notifications to other subsystems.
|
|
|
|
*/
|
|
|
|
lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp), LAGG_LLQTYPE_PHYS);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2015-01-17 11:32:09 +00:00
|
|
|
/*
|
|
|
|
* Insert into the list of ports.
|
|
|
|
* Keep ports sorted by if_index. It is handy, when configuration
|
|
|
|
* is predictable and `ifconfig laggN create ...` command
|
|
|
|
* will lead to the same result each time.
|
|
|
|
*/
|
2014-09-26 12:42:06 +00:00
|
|
|
SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
|
|
|
|
if (tlp->lp_ifp->if_index < ifp->if_index && (
|
|
|
|
SLIST_NEXT(tlp, lp_entries) == NULL ||
|
2015-01-17 11:32:09 +00:00
|
|
|
SLIST_NEXT(tlp, lp_entries)->lp_ifp->if_index >
|
2014-09-26 12:42:06 +00:00
|
|
|
ifp->if_index))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (tlp != NULL)
|
|
|
|
SLIST_INSERT_AFTER(tlp, lp, lp_entries);
|
|
|
|
else
|
|
|
|
SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
|
2007-04-17 00:35:11 +00:00
|
|
|
sc->sc_count++;
|
|
|
|
|
|
|
|
/* Update lagg capabilities */
|
2007-07-30 20:17:22 +00:00
|
|
|
lagg_capabilities(sc);
|
2007-11-25 06:30:46 +00:00
|
|
|
lagg_linkstate(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-27 13:57:48 +00:00
|
|
|
/* Read port counters */
|
|
|
|
pval = lp->port_counters.val;
|
2014-09-28 08:57:07 +00:00
|
|
|
for (i = 0; i < IFCOUNTERS; i++, pval++)
|
2014-09-27 13:57:48 +00:00
|
|
|
*pval = ifp->if_get_counter(ifp, i);
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Add multicast addresses and interface flags to this port */
|
|
|
|
lagg_ether_cmdmulti(lp, 1);
|
|
|
|
lagg_setflags(lp, 1);
|
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
if ((error = lagg_proto_addport(sc, lp)) != 0) {
|
|
|
|
/* Remove the port, without calling pr_delport. */
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_port_destroy(lp, 0);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
return (0);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2010-01-08 16:44:33 +00:00
|
|
|
#ifdef LAGG_PORT_STACKING
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
|
|
|
lagg_port_checkstacking(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc_ptr;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
int m = 0;
|
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
|
|
|
if (lp->lp_flags & LAGG_PORT_STACK) {
|
|
|
|
sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
|
|
|
|
m = MAX(m, lagg_port_checkstacking(sc_ptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (m + 1);
|
|
|
|
}
|
2010-01-08 16:44:33 +00:00
|
|
|
#endif
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
static int
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_port_destroy(struct lagg_port *lp, int rundelport)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2015-10-07 06:32:34 +00:00
|
|
|
struct lagg_port *lp_ptr, *lp0;
|
2016-04-06 04:58:20 +00:00
|
|
|
struct lagg_llq *llq;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct ifnet *ifp = lp->lp_ifp;
|
2014-09-27 13:57:48 +00:00
|
|
|
uint64_t *pval, vdiff;
|
|
|
|
int i;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
if (rundelport)
|
|
|
|
lagg_proto_delport(sc, lp);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-07 00:28:55 +00:00
|
|
|
/*
|
|
|
|
* Remove multicast addresses and interface flags from this port and
|
|
|
|
* reset the MAC address, skip if the interface is being detached.
|
|
|
|
*/
|
2017-01-30 03:04:33 +00:00
|
|
|
if (lp->lp_detaching == 0) {
|
2007-05-07 00:28:55 +00:00
|
|
|
lagg_ether_cmdmulti(lp, 0);
|
|
|
|
lagg_setflags(lp, 0);
|
2015-11-01 19:59:04 +00:00
|
|
|
lagg_port_lladdr(lp, lp->lp_lladdr, LAGG_LLQTYPE_PHYS);
|
2007-05-07 00:28:55 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Restore interface */
|
|
|
|
ifp->if_type = lp->lp_iftype;
|
|
|
|
ifp->if_ioctl = lp->lp_ioctl;
|
|
|
|
ifp->if_output = lp->lp_output;
|
|
|
|
ifp->if_lagg = NULL;
|
|
|
|
|
2014-09-27 13:57:48 +00:00
|
|
|
/* Update detached port counters */
|
|
|
|
pval = lp->port_counters.val;
|
2014-10-01 11:23:54 +00:00
|
|
|
for (i = 0; i < IFCOUNTERS; i++, pval++) {
|
2014-09-27 13:57:48 +00:00
|
|
|
vdiff = ifp->if_get_counter(ifp, i) - *pval;
|
2014-09-28 08:57:07 +00:00
|
|
|
sc->detached_counters.val[i] += vdiff;
|
2014-09-27 13:57:48 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Finally, remove the port from the lagg */
|
|
|
|
SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
|
|
|
|
sc->sc_count--;
|
|
|
|
|
|
|
|
/* Update the primary interface */
|
|
|
|
if (lp == sc->sc_primary) {
|
|
|
|
uint8_t lladdr[ETHER_ADDR_LEN];
|
|
|
|
|
2015-10-07 06:32:34 +00:00
|
|
|
if ((lp0 = SLIST_FIRST(&sc->sc_ports)) == NULL) {
|
2007-04-17 00:35:11 +00:00
|
|
|
bzero(&lladdr, ETHER_ADDR_LEN);
|
|
|
|
} else {
|
2015-10-07 06:32:34 +00:00
|
|
|
bcopy(lp0->lp_lladdr,
|
2007-04-17 00:35:11 +00:00
|
|
|
lladdr, ETHER_ADDR_LEN);
|
|
|
|
}
|
2017-01-30 03:04:33 +00:00
|
|
|
if (lp->lp_detaching != LAGG_CLONE_DESTROY)
|
|
|
|
lagg_lladdr(sc, lladdr);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2015-11-01 19:59:04 +00:00
|
|
|
/* Mark lp0 as new primary */
|
|
|
|
sc->sc_primary = lp0;
|
|
|
|
|
2015-10-07 06:32:34 +00:00
|
|
|
/*
|
2015-11-01 19:59:04 +00:00
|
|
|
* Enqueue lladdr update/notification for each port
|
|
|
|
* (new primary needs update as well, to switch from
|
|
|
|
* old lladdr to its 'real' one).
|
2015-10-07 06:32:34 +00:00
|
|
|
*/
|
2007-04-17 00:35:11 +00:00
|
|
|
SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
|
2015-11-01 19:59:04 +00:00
|
|
|
lagg_port_lladdr(lp_ptr, lladdr, LAGG_LLQTYPE_PHYS);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2007-05-07 00:35:15 +00:00
|
|
|
/* Remove any pending lladdr changes from the queue */
|
2017-01-30 03:04:33 +00:00
|
|
|
if (lp->lp_detaching != 0) {
|
2016-04-06 04:58:20 +00:00
|
|
|
SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
|
|
|
|
if (llq->llq_ifp == ifp) {
|
|
|
|
SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
|
|
|
|
llq_entries);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(llq->llq_ifp);
|
2016-04-06 04:58:20 +00:00
|
|
|
free(llq, M_DEVBUF);
|
2007-05-07 00:35:15 +00:00
|
|
|
break; /* Only appears once */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
if (lp->lp_ifflags)
|
|
|
|
if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
|
|
|
|
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(ifp);
|
2007-04-17 00:35:11 +00:00
|
|
|
free(lp, M_DEVBUF);
|
|
|
|
|
|
|
|
/* Update lagg capabilities */
|
2007-07-30 20:17:22 +00:00
|
|
|
lagg_capabilities(sc);
|
2007-11-25 06:30:46 +00:00
|
|
|
lagg_linkstate(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
|
|
|
{
|
|
|
|
struct lagg_reqport *rp = (struct lagg_reqport *)data;
|
|
|
|
struct lagg_softc *sc;
|
|
|
|
struct lagg_port *lp = NULL;
|
|
|
|
int error = 0;
|
2013-08-29 19:35:14 +00:00
|
|
|
struct rm_priotracker tracker;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Should be checked by the caller */
|
|
|
|
if (ifp->if_type != IFT_IEEE8023ADLAG ||
|
2007-06-12 07:29:11 +00:00
|
|
|
(lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
|
2007-04-17 00:35:11 +00:00
|
|
|
goto fallback;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGLAGGPORT:
|
|
|
|
if (rp->rp_portname[0] == '\0' ||
|
|
|
|
ifunit(rp->rp_portname) != ifp) {
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RLOCK(sc, &tracker);
|
2007-07-26 20:30:18 +00:00
|
|
|
if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = ENOENT;
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
lagg_port2req(lp, rp);
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
2007-07-30 20:17:22 +00:00
|
|
|
|
|
|
|
case SIOCSIFCAP:
|
|
|
|
if (lp->lp_ioctl == NULL) {
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error = (*lp->lp_ioctl)(ifp, cmd, data);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Update lagg interface capabilities */
|
|
|
|
LAGG_WLOCK(sc);
|
|
|
|
lagg_capabilities(sc);
|
|
|
|
LAGG_WUNLOCK(sc);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case SIOCSIFMTU:
|
|
|
|
/* Do not allow the MTU to be changed once joined */
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
default:
|
|
|
|
goto fallback;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
fallback:
|
2016-12-16 22:39:30 +00:00
|
|
|
if (lp != NULL && lp->lp_ioctl != NULL)
|
2007-04-17 00:35:11 +00:00
|
|
|
return ((*lp->lp_ioctl)(ifp, cmd, data));
|
|
|
|
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
2014-09-27 13:57:48 +00:00
|
|
|
/*
|
|
|
|
* Requests counter @cnt data.
|
|
|
|
*
|
|
|
|
* Counter value is calculated the following way:
|
|
|
|
* 1) for each port, sum difference between current and "initial" measurements.
|
|
|
|
* 2) add lagg logical interface counters.
|
|
|
|
* 3) add data from detached_counters array.
|
|
|
|
*
|
|
|
|
* We also do the following things on ports attach/detach:
|
|
|
|
* 1) On port attach we store all counters it has into port_counter array.
|
|
|
|
* 2) On port detach we add the different between "initial" and
|
|
|
|
* current counters data to detached_counters array.
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct ifnet *lpifp;
|
|
|
|
struct rm_priotracker tracker;
|
|
|
|
uint64_t newval, oldval, vsum;
|
|
|
|
|
2014-09-28 08:57:07 +00:00
|
|
|
/* Revise this when we've got non-generic counters. */
|
|
|
|
KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
|
2014-09-27 13:57:48 +00:00
|
|
|
|
|
|
|
sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
LAGG_RLOCK(sc, &tracker);
|
|
|
|
|
|
|
|
vsum = 0;
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
|
|
|
/* Saved attached value */
|
2014-09-28 08:57:07 +00:00
|
|
|
oldval = lp->port_counters.val[cnt];
|
2014-09-27 13:57:48 +00:00
|
|
|
/* current value */
|
|
|
|
lpifp = lp->lp_ifp;
|
|
|
|
newval = lpifp->if_get_counter(lpifp, cnt);
|
|
|
|
/* Calculate diff and save new */
|
|
|
|
vsum += newval - oldval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add counter data which might be added by upper
|
|
|
|
* layer protocols operating on logical interface.
|
|
|
|
*/
|
|
|
|
vsum += if_get_counter_default(ifp, cnt);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add counter data from detached ports counters
|
|
|
|
*/
|
2014-09-28 08:57:07 +00:00
|
|
|
vsum += sc->detached_counters.val[cnt];
|
2014-09-27 13:57:48 +00:00
|
|
|
|
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
|
|
|
|
|
|
|
return (vsum);
|
|
|
|
}
|
|
|
|
|
2012-05-03 01:41:12 +00:00
|
|
|
/*
|
|
|
|
* For direct output to child ports.
|
|
|
|
*/
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
|
|
|
lagg_port_output(struct ifnet *ifp, struct mbuf *m,
|
2013-04-26 12:50:32 +00:00
|
|
|
const struct sockaddr *dst, struct route *ro)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
|
|
|
struct lagg_port *lp = ifp->if_lagg;
|
|
|
|
|
|
|
|
switch (dst->sa_family) {
|
|
|
|
case pseudo_AF_HDRCMPLT:
|
|
|
|
case AF_UNSPEC:
|
2009-04-16 20:30:28 +00:00
|
|
|
return ((*lp->lp_output)(ifp, m, dst, ro));
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* drop any other frames */
|
|
|
|
m_freem(m);
|
2013-01-21 08:59:31 +00:00
|
|
|
return (ENETDOWN);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct lagg_softc *sc;
|
|
|
|
|
|
|
|
if ((lp = ifp->if_lagg) == NULL)
|
|
|
|
return;
|
2012-06-30 19:09:02 +00:00
|
|
|
/* If the ifnet is just being renamed, don't do anything. */
|
|
|
|
if (ifp->if_flags & IFF_RENAMING)
|
|
|
|
return;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-06-12 07:29:11 +00:00
|
|
|
sc = lp->lp_softc;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2017-01-30 03:04:33 +00:00
|
|
|
lp->lp_detaching = LAGG_PORT_DETACH;
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_port_destroy(lp, 1);
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
|
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2007-08-30 19:12:10 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
|
|
|
|
strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
|
|
|
|
rp->rp_prio = lp->lp_prio;
|
|
|
|
rp->rp_flags = lp->lp_flags;
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_portreq(sc, lp, &rp->rp_psc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Add protocol specific flags */
|
|
|
|
switch (sc->sc_proto) {
|
|
|
|
case LAGG_PROTO_FAILOVER:
|
|
|
|
if (lp == sc->sc_primary)
|
2007-05-02 08:58:28 +00:00
|
|
|
rp->rp_flags |= LAGG_PORT_MASTER;
|
2007-08-30 19:12:10 +00:00
|
|
|
if (lp == lagg_link_active(sc, sc->sc_primary))
|
|
|
|
rp->rp_flags |= LAGG_PORT_ACTIVE;
|
|
|
|
break;
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
case LAGG_PROTO_ROUNDROBIN:
|
|
|
|
case LAGG_PROTO_LOADBALANCE:
|
2014-09-26 12:35:58 +00:00
|
|
|
case LAGG_PROTO_BROADCAST:
|
2007-04-17 00:35:11 +00:00
|
|
|
if (LAGG_PORTACTIVE(lp))
|
|
|
|
rp->rp_flags |= LAGG_PORT_ACTIVE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LAGG_PROTO_LACP:
|
|
|
|
/* LACP has a different definition of active */
|
2008-03-16 19:25:30 +00:00
|
|
|
if (lacp_isactive(lp))
|
2007-04-17 00:35:11 +00:00
|
|
|
rp->rp_flags |= LAGG_PORT_ACTIVE;
|
2008-03-16 19:25:30 +00:00
|
|
|
if (lacp_iscollecting(lp))
|
|
|
|
rp->rp_flags |= LAGG_PORT_COLLECTING;
|
|
|
|
if (lacp_isdistributing(lp))
|
|
|
|
rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_init(void *xsc)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)xsc;
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
2015-11-01 19:59:04 +00:00
|
|
|
struct lagg_port *lp;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING)
|
|
|
|
return;
|
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
ifp->if_drv_flags |= IFF_DRV_RUNNING;
|
2015-11-01 19:59:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the port lladdrs if needed.
|
|
|
|
* This might be if_setlladdr() notification
|
|
|
|
* that lladdr has been changed.
|
|
|
|
*/
|
2007-04-17 00:35:11 +00:00
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2015-11-01 19:59:04 +00:00
|
|
|
lagg_port_lladdr(lp, IF_LLADDR(ifp), LAGG_LLQTYPE_PHYS);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_init(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_stop(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_stop(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
struct lagg_reqall *ra = (struct lagg_reqall *)data;
|
2014-10-02 20:01:13 +00:00
|
|
|
struct lagg_reqopts *ro = (struct lagg_reqopts *)data;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
|
2012-03-06 22:58:13 +00:00
|
|
|
struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct ifreq *ifr = (struct ifreq *)data;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct ifnet *tpif;
|
|
|
|
struct thread *td = curthread;
|
2007-07-26 20:30:18 +00:00
|
|
|
char *buf, *outbuf;
|
|
|
|
int count, buflen, len, error = 0;
|
2013-08-29 19:35:14 +00:00
|
|
|
struct rm_priotracker tracker;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
bzero(&rpbuf, sizeof(rpbuf));
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGLAGG:
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RLOCK(sc, &tracker);
|
2007-07-26 20:30:18 +00:00
|
|
|
count = 0;
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
count++;
|
|
|
|
buflen = count * sizeof(struct lagg_reqport);
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2007-07-26 20:30:18 +00:00
|
|
|
|
|
|
|
outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
|
|
|
|
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RLOCK(sc, &tracker);
|
2007-04-17 00:35:11 +00:00
|
|
|
ra->ra_proto = sc->sc_proto;
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_request(sc, &ra->ra_psc);
|
2007-07-26 20:30:18 +00:00
|
|
|
count = 0;
|
|
|
|
buf = outbuf;
|
|
|
|
len = min(ra->ra_size, buflen);
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
|
|
|
if (len < sizeof(rpbuf))
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
2007-07-26 20:30:18 +00:00
|
|
|
|
|
|
|
lagg_port2req(lp, &rpbuf);
|
|
|
|
memcpy(buf, &rpbuf, sizeof(rpbuf));
|
|
|
|
count++;
|
|
|
|
buf += sizeof(rpbuf);
|
|
|
|
len -= sizeof(rpbuf);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2007-07-26 20:30:18 +00:00
|
|
|
ra->ra_ports = count;
|
|
|
|
ra->ra_size = count * sizeof(rpbuf);
|
|
|
|
error = copyout(outbuf, ra->ra_port, ra->ra_size);
|
|
|
|
free(outbuf, M_TEMP);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSLAGG:
|
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
2016-02-19 06:35:53 +00:00
|
|
|
if (ra->ra_proto >= LAGG_PROTO_MAX) {
|
2014-10-02 20:01:13 +00:00
|
|
|
error = EPROTONOSUPPORT;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
LAGG_WLOCK(sc);
|
|
|
|
lagg_proto_detach(sc);
|
2015-11-01 19:59:04 +00:00
|
|
|
LAGG_UNLOCK_ASSERT(sc);
|
2014-10-02 20:01:13 +00:00
|
|
|
lagg_proto_attach(sc, ra->ra_proto);
|
|
|
|
break;
|
|
|
|
case SIOCGLAGGOPTS:
|
|
|
|
ro->ro_opts = sc->sc_opts;
|
|
|
|
if (sc->sc_proto == LAGG_PROTO_LACP) {
|
|
|
|
struct lacp_softc *lsc;
|
|
|
|
|
|
|
|
lsc = (struct lacp_softc *)sc->sc_psc;
|
|
|
|
if (lsc->lsc_debug.lsc_tx_test != 0)
|
|
|
|
ro->ro_opts |= LAGG_OPT_LACP_TXTEST;
|
|
|
|
if (lsc->lsc_debug.lsc_rx_test != 0)
|
|
|
|
ro->ro_opts |= LAGG_OPT_LACP_RXTEST;
|
|
|
|
if (lsc->lsc_strict_mode != 0)
|
|
|
|
ro->ro_opts |= LAGG_OPT_LACP_STRICT;
|
2015-08-12 20:21:04 +00:00
|
|
|
if (lsc->lsc_fast_timeout != 0)
|
|
|
|
ro->ro_opts |= LAGG_OPT_LACP_TIMEOUT;
|
2014-10-02 20:01:13 +00:00
|
|
|
|
|
|
|
ro->ro_active = sc->sc_active;
|
|
|
|
} else {
|
|
|
|
ro->ro_active = 0;
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
ro->ro_active += LAGG_PORTACTIVE(lp);
|
|
|
|
}
|
2016-01-23 04:18:44 +00:00
|
|
|
ro->ro_bkt = sc->sc_bkt;
|
2014-10-02 20:01:13 +00:00
|
|
|
ro->ro_flapping = sc->sc_flapping;
|
|
|
|
ro->ro_flowid_shift = sc->flowid_shift;
|
|
|
|
break;
|
|
|
|
case SIOCSLAGGOPTS:
|
2016-01-23 04:18:44 +00:00
|
|
|
if (sc->sc_proto == LAGG_PROTO_ROUNDROBIN) {
|
|
|
|
if (ro->ro_bkt == 0)
|
|
|
|
sc->sc_bkt = 1; // Minimum 1 packet per iface.
|
|
|
|
else
|
|
|
|
sc->sc_bkt = ro->ro_bkt;
|
|
|
|
}
|
2014-10-02 20:01:13 +00:00
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
if (ro->ro_opts == 0)
|
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* Set options. LACP options are stored in sc->sc_psc,
|
|
|
|
* not in sc_opts.
|
|
|
|
*/
|
|
|
|
int valid, lacp;
|
|
|
|
|
|
|
|
switch (ro->ro_opts) {
|
|
|
|
case LAGG_OPT_USE_FLOWID:
|
|
|
|
case -LAGG_OPT_USE_FLOWID:
|
|
|
|
case LAGG_OPT_FLOWIDSHIFT:
|
|
|
|
valid = 1;
|
|
|
|
lacp = 0;
|
|
|
|
break;
|
|
|
|
case LAGG_OPT_LACP_TXTEST:
|
|
|
|
case -LAGG_OPT_LACP_TXTEST:
|
|
|
|
case LAGG_OPT_LACP_RXTEST:
|
|
|
|
case -LAGG_OPT_LACP_RXTEST:
|
|
|
|
case LAGG_OPT_LACP_STRICT:
|
|
|
|
case -LAGG_OPT_LACP_STRICT:
|
2015-08-12 20:21:04 +00:00
|
|
|
case LAGG_OPT_LACP_TIMEOUT:
|
|
|
|
case -LAGG_OPT_LACP_TIMEOUT:
|
2014-10-02 20:01:13 +00:00
|
|
|
valid = lacp = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
valid = lacp = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
LAGG_WLOCK(sc);
|
2016-01-23 04:18:44 +00:00
|
|
|
|
2014-10-02 20:01:13 +00:00
|
|
|
if (valid == 0 ||
|
|
|
|
(lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) {
|
|
|
|
/* Invalid combination of options specified. */
|
|
|
|
error = EINVAL;
|
|
|
|
LAGG_WUNLOCK(sc);
|
|
|
|
break; /* Return from SIOCSLAGGOPTS. */
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Store new options into sc->sc_opts except for
|
|
|
|
* FLOWIDSHIFT and LACP options.
|
|
|
|
*/
|
|
|
|
if (lacp == 0) {
|
|
|
|
if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT)
|
|
|
|
sc->flowid_shift = ro->ro_flowid_shift;
|
|
|
|
else if (ro->ro_opts > 0)
|
|
|
|
sc->sc_opts |= ro->ro_opts;
|
|
|
|
else
|
|
|
|
sc->sc_opts &= ~ro->ro_opts;
|
|
|
|
} else {
|
|
|
|
struct lacp_softc *lsc;
|
2015-08-12 20:21:04 +00:00
|
|
|
struct lacp_port *lp;
|
2014-10-02 20:01:13 +00:00
|
|
|
|
|
|
|
lsc = (struct lacp_softc *)sc->sc_psc;
|
|
|
|
|
|
|
|
switch (ro->ro_opts) {
|
2014-10-01 21:37:32 +00:00
|
|
|
case LAGG_OPT_LACP_TXTEST:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_debug.lsc_tx_test = 1;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
case -LAGG_OPT_LACP_TXTEST:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_debug.lsc_tx_test = 0;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
case LAGG_OPT_LACP_RXTEST:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_debug.lsc_rx_test = 1;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
case -LAGG_OPT_LACP_RXTEST:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_debug.lsc_rx_test = 0;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
case LAGG_OPT_LACP_STRICT:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_strict_mode = 1;
|
2014-10-01 21:37:32 +00:00
|
|
|
break;
|
2014-10-02 20:01:13 +00:00
|
|
|
case -LAGG_OPT_LACP_STRICT:
|
|
|
|
lsc->lsc_strict_mode = 0;
|
2014-10-01 21:37:32 +00:00
|
|
|
break;
|
2015-08-12 20:21:04 +00:00
|
|
|
case LAGG_OPT_LACP_TIMEOUT:
|
|
|
|
LACP_LOCK(lsc);
|
|
|
|
LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
|
|
|
|
lp->lp_state |= LACP_STATE_TIMEOUT;
|
|
|
|
LACP_UNLOCK(lsc);
|
|
|
|
lsc->lsc_fast_timeout = 1;
|
|
|
|
break;
|
|
|
|
case -LAGG_OPT_LACP_TIMEOUT:
|
|
|
|
LACP_LOCK(lsc);
|
|
|
|
LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
|
|
|
|
lp->lp_state &= ~LACP_STATE_TIMEOUT;
|
|
|
|
LACP_UNLOCK(lsc);
|
|
|
|
lsc->lsc_fast_timeout = 0;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
}
|
|
|
|
}
|
2014-10-02 20:01:13 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
2012-03-06 22:58:13 +00:00
|
|
|
case SIOCGLAGGFLAGS:
|
2015-03-11 16:02:24 +00:00
|
|
|
rf->rf_flags = 0;
|
|
|
|
LAGG_RLOCK(sc, &tracker);
|
|
|
|
if (sc->sc_flags & MBUF_HASHFLAG_L2)
|
|
|
|
rf->rf_flags |= LAGG_F_HASHL2;
|
|
|
|
if (sc->sc_flags & MBUF_HASHFLAG_L3)
|
|
|
|
rf->rf_flags |= LAGG_F_HASHL3;
|
|
|
|
if (sc->sc_flags & MBUF_HASHFLAG_L4)
|
|
|
|
rf->rf_flags |= LAGG_F_HASHL4;
|
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2012-03-06 22:58:13 +00:00
|
|
|
break;
|
|
|
|
case SIOCSLAGGHASH:
|
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
LAGG_WLOCK(sc);
|
2015-03-11 16:02:24 +00:00
|
|
|
sc->sc_flags = 0;
|
|
|
|
if (rf->rf_flags & LAGG_F_HASHL2)
|
|
|
|
sc->sc_flags |= MBUF_HASHFLAG_L2;
|
|
|
|
if (rf->rf_flags & LAGG_F_HASHL3)
|
|
|
|
sc->sc_flags |= MBUF_HASHFLAG_L3;
|
|
|
|
if (rf->rf_flags & LAGG_F_HASHL4)
|
|
|
|
sc->sc_flags |= MBUF_HASHFLAG_L4;
|
2012-03-06 22:58:13 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
|
|
|
break;
|
2007-04-17 00:35:11 +00:00
|
|
|
case SIOCGLAGGPORT:
|
|
|
|
if (rp->rp_portname[0] == '\0' ||
|
2017-04-21 13:45:01 +00:00
|
|
|
(tpif = ifunit_ref(rp->rp_portname)) == NULL) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RLOCK(sc, &tracker);
|
2007-04-17 00:35:11 +00:00
|
|
|
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
|
2007-06-12 07:29:11 +00:00
|
|
|
lp->lp_softc != sc) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = ENOENT;
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
lagg_port2req(lp, rp);
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSLAGGPORT:
|
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
if (rp->rp_portname[0] == '\0' ||
|
2017-04-21 13:45:01 +00:00
|
|
|
(tpif = ifunit_ref(rp->rp_portname)) == NULL) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2014-10-17 09:08:44 +00:00
|
|
|
#ifdef INET6
|
|
|
|
/*
|
|
|
|
* A laggport interface should not have inet6 address
|
|
|
|
* because two interfaces with a valid link-local
|
|
|
|
* scope zone must not be merged in any form. This
|
|
|
|
* restriction is needed to prevent violation of
|
|
|
|
* link-local scope zone. Attempts to add a laggport
|
|
|
|
* interface which has inet6 addresses triggers
|
|
|
|
* removal of all inet6 addresses on the member
|
|
|
|
* interface.
|
|
|
|
*/
|
|
|
|
if (in6ifa_llaonifp(tpif)) {
|
|
|
|
in6_ifdetach(tpif);
|
|
|
|
if_printf(sc->sc_ifp,
|
|
|
|
"IPv6 addresses on %s have been removed "
|
|
|
|
"before adding it as a member to prevent "
|
|
|
|
"IPv6 address scope violation.\n",
|
|
|
|
tpif->if_xname);
|
|
|
|
}
|
|
|
|
#endif
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
error = lagg_port_create(sc, tpif);
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSLAGGDELPORT:
|
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
if (rp->rp_portname[0] == '\0' ||
|
2017-04-21 13:45:01 +00:00
|
|
|
(tpif = ifunit_ref(rp->rp_portname)) == NULL) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
|
2007-06-12 07:29:11 +00:00
|
|
|
lp->lp_softc != sc) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = ENOENT;
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = lagg_port_destroy(lp, 1);
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSIFFLAGS:
|
|
|
|
/* Set flags on ports too */
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
|
|
|
lagg_setflags(lp, 1);
|
|
|
|
}
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
if (!(ifp->if_flags & IFF_UP) &&
|
|
|
|
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
|
|
|
|
/*
|
|
|
|
* If interface is marked down and it is running,
|
|
|
|
* then stop and disable it.
|
|
|
|
*/
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_stop(sc);
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
} else if ((ifp->if_flags & IFF_UP) &&
|
|
|
|
!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
|
|
|
|
/*
|
|
|
|
* If interface is marked up and it is stopped, then
|
|
|
|
* start it.
|
|
|
|
*/
|
|
|
|
(*ifp->if_init)(sc);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
error = lagg_ether_setmulti(sc);
|
2007-07-26 20:30:18 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSIFMEDIA:
|
|
|
|
case SIOCGIFMEDIA:
|
|
|
|
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
|
|
|
|
break;
|
2007-07-30 20:17:22 +00:00
|
|
|
|
|
|
|
case SIOCSIFCAP:
|
2016-04-06 04:50:28 +00:00
|
|
|
case SIOCSIFMTU:
|
2016-04-06 04:58:20 +00:00
|
|
|
/* Do not allow the MTU or caps to be directly changed */
|
|
|
|
error = EINVAL;
|
2016-04-06 04:50:28 +00:00
|
|
|
break;
|
2007-07-30 20:17:22 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
default:
|
|
|
|
error = ether_ioctl(ifp, cmd, data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
#ifdef RATELIMIT
|
|
|
|
static int
|
|
|
|
lagg_snd_tag_alloc(struct ifnet *ifp,
|
|
|
|
union if_snd_tag_alloc_params *params,
|
|
|
|
struct m_snd_tag **ppmt)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct lagg_lb *lb;
|
|
|
|
uint32_t p;
|
|
|
|
|
|
|
|
switch (sc->sc_proto) {
|
|
|
|
case LAGG_PROTO_FAILOVER:
|
|
|
|
lp = lagg_link_active(sc, sc->sc_primary);
|
|
|
|
break;
|
|
|
|
case LAGG_PROTO_LOADBALANCE:
|
|
|
|
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
|
|
|
|
params->hdr.flowtype == M_HASHTYPE_NONE)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
p = params->hdr.flowid >> sc->flowid_shift;
|
|
|
|
p %= sc->sc_count;
|
|
|
|
lb = (struct lagg_lb *)sc->sc_psc;
|
|
|
|
lp = lb->lb_ports[p];
|
|
|
|
lp = lagg_link_active(sc, lp);
|
|
|
|
break;
|
|
|
|
case LAGG_PROTO_LACP:
|
|
|
|
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
|
|
|
|
params->hdr.flowtype == M_HASHTYPE_NONE)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
lp = lacp_select_tx_port_by_hash(sc, params->hdr.flowid);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
if (lp == NULL)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
ifp = lp->lp_ifp;
|
|
|
|
if (ifp == NULL || ifp->if_snd_tag_alloc == NULL ||
|
|
|
|
(ifp->if_capenable & IFCAP_TXRTLMT) == 0)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
|
|
|
|
/* forward allocation request */
|
|
|
|
return (ifp->if_snd_tag_alloc(ifp, params, ppmt));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
|
|
|
lagg_ether_setmulti(struct lagg_softc *sc)
|
|
|
|
{
|
2007-05-07 00:18:56 +00:00
|
|
|
struct lagg_port *lp;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-07 00:18:56 +00:00
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2007-05-07 09:53:02 +00:00
|
|
|
/* First, remove any existing filter entries. */
|
|
|
|
lagg_ether_cmdmulti(lp, 0);
|
|
|
|
/* copy all addresses from the lagg interface to the port */
|
2007-05-07 00:18:56 +00:00
|
|
|
lagg_ether_cmdmulti(lp, 1);
|
2007-05-07 09:53:02 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_ether_cmdmulti(struct lagg_port *lp, int set)
|
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2007-05-07 00:18:56 +00:00
|
|
|
struct ifnet *ifp = lp->lp_ifp;
|
2007-06-12 07:29:11 +00:00
|
|
|
struct ifnet *scifp = sc->sc_ifp;
|
2007-05-07 00:18:56 +00:00
|
|
|
struct lagg_mc *mc;
|
2014-08-04 00:58:12 +00:00
|
|
|
struct ifmultiaddr *ifma;
|
2007-05-07 00:18:56 +00:00
|
|
|
int error;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-05-07 00:18:56 +00:00
|
|
|
if (set) {
|
2014-08-04 00:58:12 +00:00
|
|
|
IF_ADDR_WLOCK(scifp);
|
2007-06-12 07:29:11 +00:00
|
|
|
TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
|
2007-05-07 00:18:56 +00:00
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
|
|
|
mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
|
2014-08-04 00:58:12 +00:00
|
|
|
if (mc == NULL) {
|
|
|
|
IF_ADDR_WUNLOCK(scifp);
|
2007-05-07 00:18:56 +00:00
|
|
|
return (ENOMEM);
|
2014-08-04 00:58:12 +00:00
|
|
|
}
|
|
|
|
bcopy(ifma->ifma_addr, &mc->mc_addr,
|
|
|
|
ifma->ifma_addr->sa_len);
|
|
|
|
mc->mc_addr.sdl_index = ifp->if_index;
|
|
|
|
mc->mc_ifma = NULL;
|
2007-05-07 00:18:56 +00:00
|
|
|
SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
|
|
|
|
}
|
2014-08-04 00:58:12 +00:00
|
|
|
IF_ADDR_WUNLOCK(scifp);
|
|
|
|
SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
|
|
|
|
error = if_addmulti(ifp,
|
|
|
|
(struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
2007-05-07 00:18:56 +00:00
|
|
|
} else {
|
|
|
|
while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
|
|
|
|
SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
|
2017-01-30 03:04:33 +00:00
|
|
|
if (mc->mc_ifma && lp->lp_detaching == 0)
|
2014-08-04 00:58:12 +00:00
|
|
|
if_delmulti_ifma(mc->mc_ifma);
|
2007-05-07 00:18:56 +00:00
|
|
|
free(mc, M_DEVBUF);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle a ref counted flag that should be set on the lagg port as well */
|
|
|
|
static int
|
|
|
|
lagg_setflag(struct lagg_port *lp, int flag, int status,
|
2014-09-26 12:35:58 +00:00
|
|
|
int (*func)(struct ifnet *, int))
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
|
|
|
struct ifnet *scifp = sc->sc_ifp;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct ifnet *ifp = lp->lp_ifp;
|
|
|
|
int error;
|
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-06-12 07:29:11 +00:00
|
|
|
status = status ? (scifp->if_flags & flag) : 0;
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Now "status" contains the flag value or 0 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if recorded ports status is different from what
|
|
|
|
* we want it to be. If it is, flip it. We record ports
|
|
|
|
* status in lp_ifflags so that we won't clear ports flag
|
|
|
|
* we haven't set. In fact, we don't clear or set ports
|
|
|
|
* flags directly, but get or release references to them.
|
|
|
|
* That's why we can be sure that recorded flags still are
|
|
|
|
* in accord with actual ports flags.
|
|
|
|
*/
|
|
|
|
if (status != (lp->lp_ifflags & flag)) {
|
|
|
|
error = (*func)(ifp, status);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
lp->lp_ifflags &= ~flag;
|
|
|
|
lp->lp_ifflags |= status;
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle IFF_* flags that require certain changes on the lagg port
|
|
|
|
* if "status" is true, update ports flags respective to the lagg
|
|
|
|
* if "status" is false, forcedly clear the flags set on port.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
lagg_setflags(struct lagg_port *lp, int status)
|
|
|
|
{
|
|
|
|
int error, i;
|
2007-06-12 07:29:11 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
for (i = 0; lagg_pflags[i].flag; i++) {
|
|
|
|
error = lagg_setflag(lp, lagg_pflags[i].flag,
|
|
|
|
status, lagg_pflags[i].func);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2012-09-20 10:05:10 +00:00
|
|
|
static int
|
|
|
|
lagg_transmit(struct ifnet *ifp, struct mbuf *m)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
2012-09-20 10:05:10 +00:00
|
|
|
int error, len, mcast;
|
2013-08-29 19:35:14 +00:00
|
|
|
struct rm_priotracker tracker;
|
2012-09-20 10:05:10 +00:00
|
|
|
|
|
|
|
len = m->m_pkthdr.len;
|
|
|
|
mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RLOCK(sc, &tracker);
|
2008-09-18 20:56:35 +00:00
|
|
|
/* We need a Tx algorithm and at least one port */
|
|
|
|
if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2012-09-20 10:05:10 +00:00
|
|
|
m_freem(m);
|
2014-09-28 07:43:38 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
|
2012-09-20 10:05:10 +00:00
|
|
|
return (ENXIO);
|
2008-09-18 20:56:35 +00:00
|
|
|
}
|
|
|
|
|
2012-09-20 10:05:10 +00:00
|
|
|
ETHER_BPF_MTAP(ifp, m);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
error = lagg_proto_start(sc, m);
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2012-09-20 10:05:10 +00:00
|
|
|
|
2014-09-27 13:57:48 +00:00
|
|
|
if (error != 0)
|
2014-09-28 07:43:38 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
|
2012-09-20 10:05:10 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The ifp->if_qflush entry point for lagg(4) is no-op.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
lagg_qflush(struct ifnet *ifp __unused)
|
|
|
|
{
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_input(struct ifnet *ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp = ifp->if_lagg;
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
|
|
|
struct ifnet *scifp = sc->sc_ifp;
|
2013-08-29 19:35:14 +00:00
|
|
|
struct rm_priotracker tracker;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RLOCK(sc, &tracker);
|
2007-06-12 07:29:11 +00:00
|
|
|
if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
|
2007-05-03 08:56:20 +00:00
|
|
|
(lp->lp_flags & LAGG_PORT_DISABLED) ||
|
2007-04-17 00:35:11 +00:00
|
|
|
sc->sc_proto == LAGG_PROTO_NONE) {
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2007-04-17 00:35:11 +00:00
|
|
|
m_freem(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2007-10-20 02:43:23 +00:00
|
|
|
ETHER_BPF_MTAP(scifp, m);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2015-03-26 23:40:22 +00:00
|
|
|
if (lp->lp_detaching != 0) {
|
|
|
|
m_freem(m);
|
|
|
|
m = NULL;
|
|
|
|
} else
|
|
|
|
m = lagg_proto_input(sc, lp, m);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
if (m != NULL) {
|
2007-12-05 00:42:28 +00:00
|
|
|
if (scifp->if_flags & IFF_MONITOR) {
|
|
|
|
m_freem(m);
|
|
|
|
m = NULL;
|
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2007-04-17 00:35:11 +00:00
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_media_change(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
|
|
|
|
if (sc->sc_ifflags & IFF_DEBUG)
|
|
|
|
printf("%s\n", __func__);
|
|
|
|
|
|
|
|
/* Ignore */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
struct lagg_port *lp;
|
2013-08-29 19:35:14 +00:00
|
|
|
struct rm_priotracker tracker;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
imr->ifm_status = IFM_AVALID;
|
|
|
|
imr->ifm_active = IFM_ETHER | IFM_AUTO;
|
|
|
|
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RLOCK(sc, &tracker);
|
2007-05-07 09:53:02 +00:00
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
|
|
|
if (LAGG_PORTACTIVE(lp))
|
|
|
|
imr->ifm_status |= IFM_ACTIVE;
|
|
|
|
}
|
2013-08-29 19:35:14 +00:00
|
|
|
LAGG_RUNLOCK(sc, &tracker);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2007-11-25 06:30:46 +00:00
|
|
|
static void
|
|
|
|
lagg_linkstate(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
int new_link = LINK_STATE_DOWN;
|
2008-12-17 21:04:43 +00:00
|
|
|
uint64_t speed;
|
2007-11-25 06:30:46 +00:00
|
|
|
|
|
|
|
/* Our link is considered up if at least one of our ports is active */
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2014-09-26 13:02:29 +00:00
|
|
|
if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
|
2007-11-25 06:30:46 +00:00
|
|
|
new_link = LINK_STATE_UP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-12-17 14:41:30 +00:00
|
|
|
if_link_state_change(sc->sc_ifp, new_link);
|
2008-12-17 20:58:10 +00:00
|
|
|
|
|
|
|
/* Update if_baudrate to reflect the max possible speed */
|
|
|
|
switch (sc->sc_proto) {
|
|
|
|
case LAGG_PROTO_FAILOVER:
|
2008-12-17 21:04:43 +00:00
|
|
|
sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
|
|
|
|
sc->sc_primary->lp_ifp->if_baudrate : 0;
|
2008-12-17 20:58:10 +00:00
|
|
|
break;
|
|
|
|
case LAGG_PROTO_ROUNDROBIN:
|
|
|
|
case LAGG_PROTO_LOADBALANCE:
|
2014-09-26 12:35:58 +00:00
|
|
|
case LAGG_PROTO_BROADCAST:
|
2008-12-17 21:04:43 +00:00
|
|
|
speed = 0;
|
2008-12-17 20:58:10 +00:00
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
speed += lp->lp_ifp->if_baudrate;
|
|
|
|
sc->sc_ifp->if_baudrate = speed;
|
|
|
|
break;
|
|
|
|
case LAGG_PROTO_LACP:
|
|
|
|
/* LACP updates if_baudrate itself */
|
|
|
|
break;
|
|
|
|
}
|
2007-11-25 06:30:46 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static void
|
2015-12-17 14:41:30 +00:00
|
|
|
lagg_port_state(struct ifnet *ifp, int state)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
|
|
|
struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
|
|
|
|
struct lagg_softc *sc = NULL;
|
|
|
|
|
|
|
|
if (lp != NULL)
|
2007-06-12 07:29:11 +00:00
|
|
|
sc = lp->lp_softc;
|
2007-04-17 00:35:11 +00:00
|
|
|
if (sc == NULL)
|
|
|
|
return;
|
|
|
|
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WLOCK(sc);
|
2007-11-25 06:30:46 +00:00
|
|
|
lagg_linkstate(sc);
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_linkstate(sc, lp);
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2015-12-17 14:41:30 +00:00
|
|
|
struct lagg_port *
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp_next, *rval = NULL;
|
|
|
|
// int new_link = LINK_STATE_DOWN;
|
|
|
|
|
2007-05-18 23:38:35 +00:00
|
|
|
LAGG_RLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
/*
|
|
|
|
* Search a port which reports an active link state.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (lp == NULL)
|
|
|
|
goto search;
|
|
|
|
if (LAGG_PORTACTIVE(lp)) {
|
|
|
|
rval = lp;
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
|
|
|
|
LAGG_PORTACTIVE(lp_next)) {
|
|
|
|
rval = lp_next;
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
|
|
|
|
search:
|
|
|
|
SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
|
|
|
|
if (LAGG_PORTACTIVE(lp_next)) {
|
|
|
|
rval = lp_next;
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
found:
|
|
|
|
if (rval != NULL) {
|
|
|
|
/*
|
|
|
|
* The IEEE 802.1D standard assumes that a lagg with
|
|
|
|
* multiple ports is always full duplex. This is valid
|
|
|
|
* for load sharing laggs and if at least two links
|
|
|
|
* are active. Unfortunately, checking the latter would
|
|
|
|
* be too expensive at this point.
|
|
|
|
XXX
|
|
|
|
if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
|
|
|
|
(sc->sc_count > 1))
|
|
|
|
new_link = LINK_STATE_FULL_DUPLEX;
|
|
|
|
else
|
|
|
|
new_link = rval->lp_link_state;
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
return (rval);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
|
2008-11-22 07:35:45 +00:00
|
|
|
return (ifp->if_transmit)(ifp, m);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Simple round robin aggregation
|
|
|
|
*/
|
2014-09-26 08:42:32 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_rr_attach(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
|
2007-10-12 03:03:16 +00:00
|
|
|
sc->sc_seq = 0;
|
2016-01-23 04:18:44 +00:00
|
|
|
sc->sc_bkt_count = sc->sc_bkt;
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
2007-10-12 03:03:16 +00:00
|
|
|
struct lagg_port *lp;
|
|
|
|
uint32_t p;
|
|
|
|
|
2016-01-23 04:18:44 +00:00
|
|
|
if (sc->sc_bkt_count == 0 && sc->sc_bkt > 0)
|
|
|
|
sc->sc_bkt_count = sc->sc_bkt;
|
|
|
|
|
|
|
|
if (sc->sc_bkt > 0) {
|
|
|
|
atomic_subtract_int(&sc->sc_bkt_count, 1);
|
|
|
|
if (atomic_cmpset_int(&sc->sc_bkt_count, 0, sc->sc_bkt))
|
|
|
|
p = atomic_fetchadd_32(&sc->sc_seq, 1);
|
|
|
|
else
|
|
|
|
p = sc->sc_seq;
|
|
|
|
} else
|
|
|
|
p = atomic_fetchadd_32(&sc->sc_seq, 1);
|
|
|
|
|
2007-10-12 03:03:16 +00:00
|
|
|
p %= sc->sc_count;
|
|
|
|
lp = SLIST_FIRST(&sc->sc_ports);
|
2016-01-23 04:18:44 +00:00
|
|
|
|
2007-10-12 03:03:16 +00:00
|
|
|
while (p--)
|
|
|
|
lp = SLIST_NEXT(lp, lp_entries);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-10-12 03:03:16 +00:00
|
|
|
/*
|
|
|
|
* Check the port's link state. This will return the next active
|
|
|
|
* port if the link is down or the port is NULL.
|
|
|
|
*/
|
|
|
|
if ((lp = lagg_link_active(sc, lp)) == NULL) {
|
|
|
|
m_freem(m);
|
2013-06-17 19:31:03 +00:00
|
|
|
return (ENETDOWN);
|
2007-10-12 03:03:16 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Send mbuf */
|
2007-10-12 03:03:16 +00:00
|
|
|
return (lagg_enqueue(lp->lp_ifp, m));
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
|
|
|
|
/* Just pass in the packet to our lagg device */
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
2014-09-18 02:12:48 +00:00
|
|
|
/*
|
|
|
|
* Broadcast mode
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
2014-09-26 12:35:58 +00:00
|
|
|
int active_ports = 0;
|
|
|
|
int errors = 0;
|
|
|
|
int ret;
|
|
|
|
struct lagg_port *lp, *last = NULL;
|
|
|
|
struct mbuf *m0;
|
|
|
|
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
|
|
|
if (!LAGG_PORTACTIVE(lp))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
active_ports++;
|
|
|
|
|
|
|
|
if (last != NULL) {
|
|
|
|
m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
|
|
|
|
if (m0 == NULL) {
|
|
|
|
ret = ENOBUFS;
|
|
|
|
errors++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = lagg_enqueue(last->lp_ifp, m0);
|
|
|
|
if (ret != 0)
|
|
|
|
errors++;
|
|
|
|
}
|
|
|
|
last = lp;
|
|
|
|
}
|
|
|
|
if (last == NULL) {
|
|
|
|
m_freem(m);
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
if ((last = lagg_link_active(sc, last)) == NULL) {
|
|
|
|
m_freem(m);
|
|
|
|
return (ENETDOWN);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = lagg_enqueue(last->lp_ifp, m);
|
|
|
|
if (ret != 0)
|
|
|
|
errors++;
|
|
|
|
|
|
|
|
if (errors == 0)
|
|
|
|
return (ret);
|
|
|
|
|
|
|
|
return (0);
|
2014-09-18 02:12:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf*
|
2014-09-26 12:35:58 +00:00
|
|
|
lagg_bcast_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
2014-09-18 02:12:48 +00:00
|
|
|
{
|
2014-09-26 12:35:58 +00:00
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
2014-09-18 02:12:48 +00:00
|
|
|
|
2014-09-26 12:35:58 +00:00
|
|
|
/* Just pass in the packet to our lagg device */
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
return (m);
|
2014-09-18 02:12:48 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/*
|
|
|
|
* Active failover
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
|
|
|
/* Use the master port if active or the next available port */
|
2007-10-12 03:03:16 +00:00
|
|
|
if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
|
|
|
|
m_freem(m);
|
2013-06-17 19:31:03 +00:00
|
|
|
return (ENETDOWN);
|
2007-10-12 03:03:16 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Send mbuf */
|
|
|
|
return (lagg_enqueue(lp->lp_ifp, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
struct lagg_port *tmp_tp;
|
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
if (lp == sc->sc_primary || V_lagg_failover_rx_all) {
|
2007-04-17 00:35:11 +00:00
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
2007-12-18 02:12:03 +00:00
|
|
|
if (!LAGG_PORTACTIVE(sc->sc_primary)) {
|
|
|
|
tmp_tp = lagg_link_active(sc, sc->sc_primary);
|
2007-04-17 00:35:11 +00:00
|
|
|
/*
|
2016-05-03 18:05:43 +00:00
|
|
|
* If tmp_tp is null, we've received a packet when all
|
2007-04-17 00:35:11 +00:00
|
|
|
* our links are down. Weird, but process it anyways.
|
|
|
|
*/
|
|
|
|
if ((tmp_tp == NULL || tmp_tp == lp)) {
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
m_freem(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loadbalancing
|
|
|
|
*/
|
2014-09-26 08:42:32 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lb_attach(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct lagg_lb *lb;
|
|
|
|
|
2014-09-26 08:42:32 +00:00
|
|
|
lb = malloc(sizeof(struct lagg_lb), M_DEVBUF, M_WAITOK | M_ZERO);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
|
|
|
|
|
2015-03-11 16:02:24 +00:00
|
|
|
lb->lb_key = m_ether_tcpip_hash_init();
|
2014-09-26 12:35:58 +00:00
|
|
|
sc->sc_psc = lb;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
lagg_lb_port_create(lp);
|
|
|
|
}
|
|
|
|
|
2014-09-26 07:12:40 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lb_detach(struct lagg_softc *sc)
|
|
|
|
{
|
2014-09-26 07:12:40 +00:00
|
|
|
struct lagg_lb *lb;
|
|
|
|
|
|
|
|
lb = (struct lagg_lb *)sc->sc_psc;
|
2014-09-26 08:42:32 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
if (lb != NULL)
|
|
|
|
free(lb, M_DEVBUF);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
|
|
|
|
struct lagg_port *lp_next;
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
bzero(&lb->lb_ports, sizeof(lb->lb_ports));
|
|
|
|
SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
|
|
|
|
if (lp_next == lp)
|
|
|
|
continue;
|
|
|
|
if (i >= LAGG_MAX_PORTS)
|
|
|
|
return (EINVAL);
|
|
|
|
if (sc->sc_ifflags & IFF_DEBUG)
|
|
|
|
printf("%s: port %s at index %d\n",
|
2014-09-26 13:02:29 +00:00
|
|
|
sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
|
2007-04-17 00:35:11 +00:00
|
|
|
lb->lb_ports[i++] = lp_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_lb_port_create(struct lagg_port *lp)
|
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2007-04-17 00:35:11 +00:00
|
|
|
return (lagg_lb_porttable(sc, NULL));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_lb_port_destroy(struct lagg_port *lp)
|
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lb_porttable(sc, lp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
|
|
|
|
struct lagg_port *lp = NULL;
|
|
|
|
uint32_t p = 0;
|
|
|
|
|
2014-12-01 11:45:24 +00:00
|
|
|
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
|
|
|
|
M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
2013-12-30 01:32:17 +00:00
|
|
|
p = m->m_pkthdr.flowid >> sc->flowid_shift;
|
2009-04-30 14:25:44 +00:00
|
|
|
else
|
2015-03-11 16:02:24 +00:00
|
|
|
p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key);
|
2008-07-04 05:33:58 +00:00
|
|
|
p %= sc->sc_count;
|
|
|
|
lp = lb->lb_ports[p];
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the port's link state. This will return the next active
|
|
|
|
* port if the link is down or the port is NULL.
|
|
|
|
*/
|
2007-10-12 03:03:16 +00:00
|
|
|
if ((lp = lagg_link_active(sc, lp)) == NULL) {
|
|
|
|
m_freem(m);
|
2013-06-17 19:31:03 +00:00
|
|
|
return (ENETDOWN);
|
2007-10-12 03:03:16 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Send mbuf */
|
|
|
|
return (lagg_enqueue(lp->lp_ifp, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
|
|
|
|
/* Just pass in the packet to our lagg device */
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 802.3ad LACP
|
|
|
|
*/
|
2014-09-26 08:42:32 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lacp_attach(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
2014-09-26 08:42:32 +00:00
|
|
|
lacp_attach(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
lacp_port_create(lp);
|
|
|
|
}
|
|
|
|
|
2014-09-26 07:12:40 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lacp_detach(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
2014-09-26 08:42:32 +00:00
|
|
|
void *psc;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
lacp_port_destroy(lp);
|
|
|
|
|
2014-09-26 08:42:32 +00:00
|
|
|
psc = sc->sc_psc;
|
|
|
|
sc->sc_psc = NULL;
|
2007-05-15 07:41:46 +00:00
|
|
|
LAGG_WUNLOCK(sc);
|
2014-09-26 08:42:32 +00:00
|
|
|
|
|
|
|
lacp_detach(psc);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_lacp_lladdr(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
|
|
|
/* purge all the lacp ports */
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
lacp_port_destroy(lp);
|
|
|
|
|
|
|
|
/* add them back in */
|
|
|
|
SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
|
|
|
lacp_port_create(lp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
|
|
|
lp = lacp_select_tx_port(sc, m);
|
2007-10-12 03:03:16 +00:00
|
|
|
if (lp == NULL) {
|
|
|
|
m_freem(m);
|
2013-01-21 08:59:31 +00:00
|
|
|
return (ENETDOWN);
|
2007-10-12 03:03:16 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Send mbuf */
|
|
|
|
return (lagg_enqueue(lp->lp_ifp, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
struct ether_header *eh;
|
|
|
|
u_short etype;
|
|
|
|
|
|
|
|
eh = mtod(m, struct ether_header *);
|
|
|
|
etype = ntohs(eh->ether_type);
|
|
|
|
|
|
|
|
/* Tap off LACP control messages */
|
2011-04-30 20:34:52 +00:00
|
|
|
if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
|
2007-12-31 01:16:35 +00:00
|
|
|
m = lacp_input(lp, m);
|
|
|
|
if (m == NULL)
|
|
|
|
return (NULL);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the port is not collecting or not in the active aggregator then
|
|
|
|
* free and return.
|
|
|
|
*/
|
2008-03-16 19:25:30 +00:00
|
|
|
if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
|
2007-04-17 00:35:11 +00:00
|
|
|
m_freem(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
return (m);
|
|
|
|
}
|
2013-04-15 13:00:42 +00:00
|
|
|
|