2007-04-17 00:35:11 +00:00
|
|
|
/* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
|
2007-12-17 18:49:44 +00:00
|
|
|
* Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
|
2016-01-23 04:18:44 +00:00
|
|
|
* Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
|
2007-04-17 00:35:11 +00:00
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software for any
|
|
|
|
* purpose with or without fee is hereby granted, provided that the above
|
|
|
|
* copyright notice and this permission notice appear in all copies.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
|
|
|
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
|
|
|
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
|
|
|
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
|
|
|
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include "opt_inet.h"
|
|
|
|
#include "opt_inet6.h"
|
Add kernel-side support for in-kernel TLS.
KTLS adds support for in-kernel framing and encryption of Transport
Layer Security (1.0-1.2) data on TCP sockets. KTLS only supports
offload of TLS for transmitted data. Key negotation must still be
performed in userland. Once completed, transmit session keys for a
connection are provided to the kernel via a new TCP_TXTLS_ENABLE
socket option. All subsequent data transmitted on the socket is
placed into TLS frames and encrypted using the supplied keys.
Any data written to a KTLS-enabled socket via write(2), aio_write(2),
or sendfile(2) is assumed to be application data and is encoded in TLS
frames with an application data type. Individual records can be sent
with a custom type (e.g. handshake messages) via sendmsg(2) with a new
control message (TLS_SET_RECORD_TYPE) specifying the record type.
At present, rekeying is not supported though the in-kernel framework
should support rekeying.
KTLS makes use of the recently added unmapped mbufs to store TLS
frames in the socket buffer. Each TLS frame is described by a single
ext_pgs mbuf. The ext_pgs structure contains the header of the TLS
record (and trailer for encrypted records) as well as references to
the associated TLS session.
KTLS supports two primary methods of encrypting TLS frames: software
TLS and ifnet TLS.
Software TLS marks mbufs holding socket data as not ready via
M_NOTREADY similar to sendfile(2) when TLS framing information is
added to an unmapped mbuf in ktls_frame(). ktls_enqueue() is then
called to schedule TLS frames for encryption. In the case of
sendfile_iodone() calls ktls_enqueue() instead of pru_ready() leaving
the mbufs marked M_NOTREADY until encryption is completed. For other
writes (vn_sendfile when pages are available, write(2), etc.), the
PRUS_NOTREADY is set when invoking pru_send() along with invoking
ktls_enqueue().
A pool of worker threads (the "KTLS" kernel process) encrypts TLS
frames queued via ktls_enqueue(). Each TLS frame is temporarily
mapped using the direct map and passed to a software encryption
backend to perform the actual encryption.
(Note: The use of PHYS_TO_DMAP could be replaced with sf_bufs if
someone wished to make this work on architectures without a direct
map.)
KTLS supports pluggable software encryption backends. Internally,
Netflix uses proprietary pure-software backends. This commit includes
a simple backend in a new ktls_ocf.ko module that uses the kernel's
OpenCrypto framework to provide AES-GCM encryption of TLS frames. As
a result, software TLS is now a bit of a misnomer as it can make use
of hardware crypto accelerators.
Once software encryption has finished, the TLS frame mbufs are marked
ready via pru_ready(). At this point, the encrypted data appears as
regular payload to the TCP stack stored in unmapped mbufs.
ifnet TLS permits a NIC to offload the TLS encryption and TCP
segmentation. In this mode, a new send tag type (IF_SND_TAG_TYPE_TLS)
is allocated on the interface a socket is routed over and associated
with a TLS session. TLS records for a TLS session using ifnet TLS are
not marked M_NOTREADY but are passed down the stack unencrypted. The
ip_output_send() and ip6_output_send() helper functions that apply
send tags to outbound IP packets verify that the send tag of the TLS
record matches the outbound interface. If so, the packet is tagged
with the TLS send tag and sent to the interface. The NIC device
driver must recognize packets with the TLS send tag and schedule them
for TLS encryption and TCP segmentation. If the the outbound
interface does not match the interface in the TLS send tag, the packet
is dropped. In addition, a task is scheduled to refresh the TLS send
tag for the TLS session. If a new TLS send tag cannot be allocated,
the connection is dropped. If a new TLS send tag is allocated,
however, subsequent packets will be tagged with the correct TLS send
tag. (This latter case has been tested by configuring both ports of a
Chelsio T6 in a lagg and failing over from one port to another. As
the connections migrated to the new port, new TLS send tags were
allocated for the new port and connections resumed without being
dropped.)
ifnet TLS can be enabled and disabled on supported network interfaces
via new '[-]txtls[46]' options to ifconfig(8). ifnet TLS is supported
across both vlan devices and lagg interfaces using failover, lacp with
flowid enabled, or lacp with flowid enabled.
Applications may request the current KTLS mode of a connection via a
new TCP_TXTLS_MODE socket option. They can also use this socket
option to toggle between software and ifnet TLS modes.
In addition, a testing tool is available in tools/tools/switch_tls.
This is modeled on tcpdrop and uses similar syntax. However, instead
of dropping connections, -s is used to force KTLS connections to
switch to software TLS and -i is used to switch to ifnet TLS.
Various sysctls and counters are available under the kern.ipc.tls
sysctl node. The kern.ipc.tls.enable node must be set to true to
enable KTLS (it is off by default). The use of unmapped mbufs must
also be enabled via kern.ipc.mb_use_ext_pgs to enable KTLS.
KTLS is enabled via the KERN_TLS kernel option.
This patch is the culmination of years of work by several folks
including Scott Long and Randall Stewart for the original design and
implementation; Drew Gallatin for several optimizations including the
use of ext_pgs mbufs, the M_NOTREADY mechanism for TLS records
awaiting software encryption, and pluggable software crypto backends;
and John Baldwin for modifications to support hardware TLS offload.
Reviewed by: gallatin, hselasky, rrs
Obtained from: Netflix
Sponsored by: Netflix, Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D21277
2019-08-27 00:01:56 +00:00
|
|
|
#include "opt_kern_tls.h"
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
#include "opt_ratelimit.h"
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/sockio.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/priv.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/proc.h>
|
2007-05-15 07:41:46 +00:00
|
|
|
#include <sys/lock.h>
|
2013-08-29 19:35:14 +00:00
|
|
|
#include <sys/rmlock.h>
|
2017-05-02 19:09:11 +00:00
|
|
|
#include <sys/sx.h>
|
2007-05-07 00:35:15 +00:00
|
|
|
#include <sys/taskqueue.h>
|
2010-02-06 13:49:35 +00:00
|
|
|
#include <sys/eventhandler.h>
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
#include <net/ethernet.h>
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_clone.h>
|
|
|
|
#include <net/if_arp.h>
|
|
|
|
#include <net/if_dl.h>
|
|
|
|
#include <net/if_media.h>
|
|
|
|
#include <net/if_types.h>
|
|
|
|
#include <net/if_var.h>
|
|
|
|
#include <net/bpf.h>
|
2020-01-22 20:36:45 +00:00
|
|
|
#include <net/route.h>
|
2014-10-01 21:37:32 +00:00
|
|
|
#include <net/vnet.h>
|
2020-10-22 09:47:12 +00:00
|
|
|
#include <net/infiniband.h>
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2011-04-27 19:30:44 +00:00
|
|
|
#if defined(INET) || defined(INET6)
|
2007-04-17 00:35:11 +00:00
|
|
|
#include <netinet/in.h>
|
2014-04-15 13:28:54 +00:00
|
|
|
#include <netinet/ip.h>
|
2011-04-27 19:30:44 +00:00
|
|
|
#endif
|
|
|
|
#ifdef INET
|
2007-04-17 00:35:11 +00:00
|
|
|
#include <netinet/in_systm.h>
|
|
|
|
#include <netinet/if_ether.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef INET6
|
|
|
|
#include <netinet/ip6.h>
|
2013-07-02 16:58:15 +00:00
|
|
|
#include <netinet6/in6_var.h>
|
|
|
|
#include <netinet6/in6_ifattach.h>
|
2007-04-17 00:35:11 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <net/if_vlan_var.h>
|
|
|
|
#include <net/if_lagg.h>
|
|
|
|
#include <net/ieee8023ad_lacp.h>
|
|
|
|
|
2020-01-22 20:36:45 +00:00
|
|
|
#ifdef INET6
|
|
|
|
/*
|
|
|
|
* XXX: declare here to avoid to include many inet6 related files..
|
|
|
|
* should be more generalized?
|
|
|
|
*/
|
|
|
|
extern void nd6_setmtu(struct ifnet *);
|
|
|
|
#endif
|
|
|
|
|
2018-05-14 20:06:49 +00:00
|
|
|
#define LAGG_SX_INIT(_sc) sx_init(&(_sc)->sc_sx, "if_lagg sx")
|
|
|
|
#define LAGG_SX_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx)
|
|
|
|
#define LAGG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx)
|
|
|
|
#define LAGG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
|
|
|
|
#define LAGG_SXLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED)
|
|
|
|
#define LAGG_XLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_XLOCKED)
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Special flags we should propagate to the lagg ports. */
|
|
|
|
static struct {
|
|
|
|
int flag;
|
|
|
|
int (*func)(struct ifnet *, int);
|
|
|
|
} lagg_pflags[] = {
|
|
|
|
{IFF_PROMISC, ifpromisc},
|
|
|
|
{IFF_ALLMULTI, if_allmulti},
|
|
|
|
{0, NULL}
|
|
|
|
};
|
|
|
|
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
struct lagg_snd_tag {
|
|
|
|
struct m_snd_tag com;
|
|
|
|
struct m_snd_tag *tag;
|
|
|
|
};
|
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
VNET_DEFINE(SLIST_HEAD(__trhead, lagg_softc), lagg_list); /* list of laggs */
|
|
|
|
#define V_lagg_list VNET(lagg_list)
|
2018-07-24 16:35:52 +00:00
|
|
|
VNET_DEFINE_STATIC(struct mtx, lagg_list_mtx);
|
2014-10-01 21:37:32 +00:00
|
|
|
#define V_lagg_list_mtx VNET(lagg_list_mtx)
|
|
|
|
#define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \
|
|
|
|
"if_lagg list", NULL, MTX_DEF)
|
|
|
|
#define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx)
|
|
|
|
#define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx)
|
|
|
|
#define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx)
|
2007-04-17 00:35:11 +00:00
|
|
|
eventhandler_tag lagg_detach_cookie = NULL;
|
|
|
|
|
2022-09-22 12:30:09 +00:00
|
|
|
static int lagg_clone_create(struct if_clone *, char *, size_t,
|
|
|
|
struct ifc_data *, struct ifnet **);
|
|
|
|
static int lagg_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
|
2018-07-24 16:35:52 +00:00
|
|
|
VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner);
|
2014-10-01 21:37:32 +00:00
|
|
|
#define V_lagg_cloner VNET(lagg_cloner)
|
2012-10-16 13:37:54 +00:00
|
|
|
static const char laggname[] = "lagg";
|
2019-03-28 21:00:54 +00:00
|
|
|
static MALLOC_DEFINE(M_LAGG, laggname, "802.3AD Link Aggregation Interface");
|
2012-10-16 13:37:54 +00:00
|
|
|
|
2007-07-30 20:17:22 +00:00
|
|
|
static void lagg_capabilities(struct lagg_softc *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_port_create(struct lagg_softc *, struct ifnet *);
|
|
|
|
static int lagg_port_destroy(struct lagg_port *, int);
|
2020-10-22 09:47:12 +00:00
|
|
|
static struct mbuf *lagg_input_ethernet(struct ifnet *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_input_infiniband(struct ifnet *, struct mbuf *);
|
2007-11-25 06:30:46 +00:00
|
|
|
static void lagg_linkstate(struct lagg_softc *);
|
2015-12-17 14:41:30 +00:00
|
|
|
static void lagg_port_state(struct ifnet *, int);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
|
|
|
|
static int lagg_port_output(struct ifnet *, struct mbuf *,
|
2013-04-26 12:50:32 +00:00
|
|
|
const struct sockaddr *, struct route *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
|
2010-01-08 16:44:33 +00:00
|
|
|
#ifdef LAGG_PORT_STACKING
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_port_checkstacking(struct lagg_softc *);
|
2010-01-08 16:44:33 +00:00
|
|
|
#endif
|
2007-04-17 00:35:11 +00:00
|
|
|
static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
|
|
|
|
static void lagg_init(void *);
|
|
|
|
static void lagg_stop(struct lagg_softc *);
|
|
|
|
static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
|
Add kernel-side support for in-kernel TLS.
KTLS adds support for in-kernel framing and encryption of Transport
Layer Security (1.0-1.2) data on TCP sockets. KTLS only supports
offload of TLS for transmitted data. Key negotation must still be
performed in userland. Once completed, transmit session keys for a
connection are provided to the kernel via a new TCP_TXTLS_ENABLE
socket option. All subsequent data transmitted on the socket is
placed into TLS frames and encrypted using the supplied keys.
Any data written to a KTLS-enabled socket via write(2), aio_write(2),
or sendfile(2) is assumed to be application data and is encoded in TLS
frames with an application data type. Individual records can be sent
with a custom type (e.g. handshake messages) via sendmsg(2) with a new
control message (TLS_SET_RECORD_TYPE) specifying the record type.
At present, rekeying is not supported though the in-kernel framework
should support rekeying.
KTLS makes use of the recently added unmapped mbufs to store TLS
frames in the socket buffer. Each TLS frame is described by a single
ext_pgs mbuf. The ext_pgs structure contains the header of the TLS
record (and trailer for encrypted records) as well as references to
the associated TLS session.
KTLS supports two primary methods of encrypting TLS frames: software
TLS and ifnet TLS.
Software TLS marks mbufs holding socket data as not ready via
M_NOTREADY similar to sendfile(2) when TLS framing information is
added to an unmapped mbuf in ktls_frame(). ktls_enqueue() is then
called to schedule TLS frames for encryption. In the case of
sendfile_iodone() calls ktls_enqueue() instead of pru_ready() leaving
the mbufs marked M_NOTREADY until encryption is completed. For other
writes (vn_sendfile when pages are available, write(2), etc.), the
PRUS_NOTREADY is set when invoking pru_send() along with invoking
ktls_enqueue().
A pool of worker threads (the "KTLS" kernel process) encrypts TLS
frames queued via ktls_enqueue(). Each TLS frame is temporarily
mapped using the direct map and passed to a software encryption
backend to perform the actual encryption.
(Note: The use of PHYS_TO_DMAP could be replaced with sf_bufs if
someone wished to make this work on architectures without a direct
map.)
KTLS supports pluggable software encryption backends. Internally,
Netflix uses proprietary pure-software backends. This commit includes
a simple backend in a new ktls_ocf.ko module that uses the kernel's
OpenCrypto framework to provide AES-GCM encryption of TLS frames. As
a result, software TLS is now a bit of a misnomer as it can make use
of hardware crypto accelerators.
Once software encryption has finished, the TLS frame mbufs are marked
ready via pru_ready(). At this point, the encrypted data appears as
regular payload to the TCP stack stored in unmapped mbufs.
ifnet TLS permits a NIC to offload the TLS encryption and TCP
segmentation. In this mode, a new send tag type (IF_SND_TAG_TYPE_TLS)
is allocated on the interface a socket is routed over and associated
with a TLS session. TLS records for a TLS session using ifnet TLS are
not marked M_NOTREADY but are passed down the stack unencrypted. The
ip_output_send() and ip6_output_send() helper functions that apply
send tags to outbound IP packets verify that the send tag of the TLS
record matches the outbound interface. If so, the packet is tagged
with the TLS send tag and sent to the interface. The NIC device
driver must recognize packets with the TLS send tag and schedule them
for TLS encryption and TCP segmentation. If the the outbound
interface does not match the interface in the TLS send tag, the packet
is dropped. In addition, a task is scheduled to refresh the TLS send
tag for the TLS session. If a new TLS send tag cannot be allocated,
the connection is dropped. If a new TLS send tag is allocated,
however, subsequent packets will be tagged with the correct TLS send
tag. (This latter case has been tested by configuring both ports of a
Chelsio T6 in a lagg and failing over from one port to another. As
the connections migrated to the new port, new TLS send tags were
allocated for the new port and connections resumed without being
dropped.)
ifnet TLS can be enabled and disabled on supported network interfaces
via new '[-]txtls[46]' options to ifconfig(8). ifnet TLS is supported
across both vlan devices and lagg interfaces using failover, lacp with
flowid enabled, or lacp with flowid enabled.
Applications may request the current KTLS mode of a connection via a
new TCP_TXTLS_MODE socket option. They can also use this socket
option to toggle between software and ifnet TLS modes.
In addition, a testing tool is available in tools/tools/switch_tls.
This is modeled on tcpdrop and uses similar syntax. However, instead
of dropping connections, -s is used to force KTLS connections to
switch to software TLS and -i is used to switch to ifnet TLS.
Various sysctls and counters are available under the kern.ipc.tls
sysctl node. The kern.ipc.tls.enable node must be set to true to
enable KTLS (it is off by default). The use of unmapped mbufs must
also be enabled via kern.ipc.mb_use_ext_pgs to enable KTLS.
KTLS is enabled via the KERN_TLS kernel option.
This patch is the culmination of years of work by several folks
including Scott Long and Randall Stewart for the original design and
implementation; Drew Gallatin for several optimizations including the
use of ext_pgs mbufs, the M_NOTREADY mechanism for TLS records
awaiting software encryption, and pluggable software crypto backends;
and John Baldwin for modifications to support hardware TLS offload.
Reviewed by: gallatin, hselasky, rrs
Obtained from: Netflix
Sponsored by: Netflix, Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D21277
2019-08-27 00:01:56 +00:00
|
|
|
#if defined(KERN_TLS) || defined(RATELIMIT)
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
static int lagg_snd_tag_alloc(struct ifnet *,
|
|
|
|
union if_snd_tag_alloc_params *,
|
|
|
|
struct m_snd_tag **);
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
static int lagg_snd_tag_modify(struct m_snd_tag *,
|
|
|
|
union if_snd_tag_modify_params *);
|
|
|
|
static int lagg_snd_tag_query(struct m_snd_tag *,
|
|
|
|
union if_snd_tag_query_params *);
|
2019-02-13 14:57:59 +00:00
|
|
|
static void lagg_snd_tag_free(struct m_snd_tag *);
|
2021-01-26 16:54:42 +00:00
|
|
|
static struct m_snd_tag *lagg_next_snd_tag(struct m_snd_tag *);
|
2019-08-01 14:17:31 +00:00
|
|
|
static void lagg_ratelimit_query(struct ifnet *,
|
|
|
|
struct if_ratelimit_query_results *);
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
#endif
|
2017-05-02 19:09:11 +00:00
|
|
|
static int lagg_setmulti(struct lagg_port *);
|
|
|
|
static int lagg_clrmulti(struct lagg_port *);
|
2022-07-28 14:36:22 +00:00
|
|
|
static void lagg_setcaps(struct lagg_port *, int cap, int cap2);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_setflag(struct lagg_port *, int, int,
|
|
|
|
int (*func)(struct ifnet *, int));
|
|
|
|
static int lagg_setflags(struct lagg_port *, int status);
|
2014-09-27 13:57:48 +00:00
|
|
|
static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt);
|
2020-10-22 09:47:12 +00:00
|
|
|
static int lagg_transmit_ethernet(struct ifnet *, struct mbuf *);
|
|
|
|
static int lagg_transmit_infiniband(struct ifnet *, struct mbuf *);
|
2012-09-20 10:05:10 +00:00
|
|
|
static void lagg_qflush(struct ifnet *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_media_change(struct ifnet *);
|
|
|
|
static void lagg_media_status(struct ifnet *, struct ifmediareq *);
|
|
|
|
static struct lagg_port *lagg_link_active(struct lagg_softc *,
|
|
|
|
struct lagg_port *);
|
|
|
|
|
|
|
|
/* Simple round robin */
|
2014-09-26 08:42:32 +00:00
|
|
|
static void lagg_rr_attach(struct lagg_softc *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
|
|
|
|
/* Active failover */
|
|
|
|
static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
|
|
|
|
/* Loadbalancing */
|
2014-09-26 08:42:32 +00:00
|
|
|
static void lagg_lb_attach(struct lagg_softc *);
|
2014-09-26 07:12:40 +00:00
|
|
|
static void lagg_lb_detach(struct lagg_softc *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_lb_port_create(struct lagg_port *);
|
|
|
|
static void lagg_lb_port_destroy(struct lagg_port *);
|
|
|
|
static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
|
|
|
|
|
2014-09-18 02:12:48 +00:00
|
|
|
/* Broadcast */
|
|
|
|
static int lagg_bcast_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_bcast_input(struct lagg_softc *, struct lagg_port *,
|
2014-09-26 12:35:58 +00:00
|
|
|
struct mbuf *);
|
2014-09-18 02:12:48 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* 802.3ad LACP */
|
2014-09-26 08:42:32 +00:00
|
|
|
static void lagg_lacp_attach(struct lagg_softc *);
|
2014-09-26 07:12:40 +00:00
|
|
|
static void lagg_lacp_detach(struct lagg_softc *);
|
2007-04-17 00:35:11 +00:00
|
|
|
static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
|
|
|
|
static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
static void lagg_lacp_lladdr(struct lagg_softc *);
|
|
|
|
|
|
|
|
/* lagg protocol table */
|
2014-09-26 08:42:32 +00:00
|
|
|
static const struct lagg_proto {
|
2014-09-26 11:01:04 +00:00
|
|
|
lagg_proto pr_num;
|
|
|
|
void (*pr_attach)(struct lagg_softc *);
|
|
|
|
void (*pr_detach)(struct lagg_softc *);
|
2014-09-26 12:54:24 +00:00
|
|
|
int (*pr_start)(struct lagg_softc *, struct mbuf *);
|
|
|
|
struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *,
|
|
|
|
struct mbuf *);
|
|
|
|
int (*pr_addport)(struct lagg_port *);
|
|
|
|
void (*pr_delport)(struct lagg_port *);
|
|
|
|
void (*pr_linkstate)(struct lagg_port *);
|
|
|
|
void (*pr_init)(struct lagg_softc *);
|
|
|
|
void (*pr_stop)(struct lagg_softc *);
|
|
|
|
void (*pr_lladdr)(struct lagg_softc *);
|
|
|
|
void (*pr_request)(struct lagg_softc *, void *);
|
|
|
|
void (*pr_portreq)(struct lagg_port *, void *);
|
2007-04-17 00:35:11 +00:00
|
|
|
} lagg_protos[] = {
|
2014-09-26 11:01:04 +00:00
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_NONE
|
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_ROUNDROBIN,
|
|
|
|
.pr_attach = lagg_rr_attach,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_rr_start,
|
|
|
|
.pr_input = lagg_rr_input,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_FAILOVER,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_fail_start,
|
|
|
|
.pr_input = lagg_fail_input,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_LOADBALANCE,
|
|
|
|
.pr_attach = lagg_lb_attach,
|
|
|
|
.pr_detach = lagg_lb_detach,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_lb_start,
|
|
|
|
.pr_input = lagg_lb_input,
|
|
|
|
.pr_addport = lagg_lb_port_create,
|
|
|
|
.pr_delport = lagg_lb_port_destroy,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_LACP,
|
|
|
|
.pr_attach = lagg_lacp_attach,
|
|
|
|
.pr_detach = lagg_lacp_detach,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_lacp_start,
|
|
|
|
.pr_input = lagg_lacp_input,
|
|
|
|
.pr_addport = lacp_port_create,
|
|
|
|
.pr_delport = lacp_port_destroy,
|
|
|
|
.pr_linkstate = lacp_linkstate,
|
|
|
|
.pr_init = lacp_init,
|
|
|
|
.pr_stop = lacp_stop,
|
|
|
|
.pr_lladdr = lagg_lacp_lladdr,
|
|
|
|
.pr_request = lacp_req,
|
|
|
|
.pr_portreq = lacp_portreq,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
.pr_num = LAGG_PROTO_BROADCAST,
|
2014-09-26 12:54:24 +00:00
|
|
|
.pr_start = lagg_bcast_start,
|
|
|
|
.pr_input = lagg_bcast_input,
|
2014-09-26 11:01:04 +00:00
|
|
|
},
|
2007-04-17 00:35:11 +00:00
|
|
|
};
|
|
|
|
|
2010-09-01 16:53:38 +00:00
|
|
|
SYSCTL_DECL(_net_link);
|
2020-02-26 14:26:36 +00:00
|
|
|
SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
|
2011-11-07 15:43:11 +00:00
|
|
|
"Link Aggregation");
|
2010-09-01 16:53:38 +00:00
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
/* Allow input on any failover links */
|
2018-07-24 16:35:52 +00:00
|
|
|
VNET_DEFINE_STATIC(int, lagg_failover_rx_all);
|
2014-10-01 21:37:32 +00:00
|
|
|
#define V_lagg_failover_rx_all VNET(lagg_failover_rx_all)
|
|
|
|
SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET,
|
|
|
|
&VNET_NAME(lagg_failover_rx_all), 0,
|
2010-09-01 16:53:38 +00:00
|
|
|
"Accept input from any interface in a failover lagg");
|
2014-10-01 21:37:32 +00:00
|
|
|
|
2014-12-01 11:45:24 +00:00
|
|
|
/* Default value for using flowid */
|
2018-07-24 16:35:52 +00:00
|
|
|
VNET_DEFINE_STATIC(int, def_use_flowid) = 0;
|
2014-10-01 21:37:32 +00:00
|
|
|
#define V_def_use_flowid VNET(def_use_flowid)
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RWTUN,
|
2014-10-01 21:37:32 +00:00
|
|
|
&VNET_NAME(def_use_flowid), 0,
|
2012-02-23 21:56:53 +00:00
|
|
|
"Default setting for using flow id for load sharing");
|
2014-10-01 21:37:32 +00:00
|
|
|
|
2019-05-03 14:43:21 +00:00
|
|
|
/* Default value for using numa */
|
|
|
|
VNET_DEFINE_STATIC(int, def_use_numa) = 1;
|
|
|
|
#define V_def_use_numa VNET(def_use_numa)
|
|
|
|
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_numa, CTLFLAG_RWTUN,
|
|
|
|
&VNET_NAME(def_use_numa), 0,
|
|
|
|
"Use numa to steer flows");
|
|
|
|
|
2014-12-01 11:45:24 +00:00
|
|
|
/* Default value for flowid shift */
|
2018-07-24 16:35:52 +00:00
|
|
|
VNET_DEFINE_STATIC(int, def_flowid_shift) = 16;
|
2014-10-01 21:37:32 +00:00
|
|
|
#define V_def_flowid_shift VNET(def_flowid_shift)
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift, CTLFLAG_RWTUN,
|
2014-10-01 21:37:32 +00:00
|
|
|
&VNET_NAME(def_flowid_shift), 0,
|
2013-12-30 01:32:17 +00:00
|
|
|
"Default setting for flowid shift for load sharing");
|
2010-09-01 16:53:38 +00:00
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
static void
|
|
|
|
vnet_lagg_init(const void *unused __unused)
|
|
|
|
{
|
|
|
|
|
|
|
|
LAGG_LIST_LOCK_INIT();
|
|
|
|
SLIST_INIT(&V_lagg_list);
|
2022-09-22 12:30:09 +00:00
|
|
|
struct if_clone_addreq req = {
|
|
|
|
.create_f = lagg_clone_create,
|
|
|
|
.destroy_f = lagg_clone_destroy,
|
|
|
|
.flags = IFC_F_AUTOUNIT,
|
|
|
|
};
|
|
|
|
V_lagg_cloner = ifc_attach_cloner(laggname, &req);
|
2014-10-01 21:37:32 +00:00
|
|
|
}
|
|
|
|
VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
|
|
|
|
vnet_lagg_init, NULL);
|
|
|
|
|
|
|
|
static void
|
|
|
|
vnet_lagg_uninit(const void *unused __unused)
|
|
|
|
{
|
|
|
|
|
2022-09-22 12:30:09 +00:00
|
|
|
ifc_detach_cloner(V_lagg_cloner);
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_LOCK_DESTROY();
|
|
|
|
}
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
|
2014-10-01 21:37:32 +00:00
|
|
|
vnet_lagg_uninit, NULL);
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
|
|
|
lagg_modevent(module_t mod, int type, void *data)
|
|
|
|
{
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case MOD_LOAD:
|
2020-10-22 09:47:12 +00:00
|
|
|
lagg_input_ethernet_p = lagg_input_ethernet;
|
|
|
|
lagg_input_infiniband_p = lagg_input_infiniband;
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_linkstate_p = lagg_port_state;
|
|
|
|
lagg_detach_cookie = EVENTHANDLER_REGISTER(
|
|
|
|
ifnet_departure_event, lagg_port_ifdetach, NULL,
|
|
|
|
EVENTHANDLER_PRI_ANY);
|
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
|
|
|
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
|
|
|
|
lagg_detach_cookie);
|
2020-10-22 09:47:12 +00:00
|
|
|
lagg_input_ethernet_p = NULL;
|
|
|
|
lagg_input_infiniband_p = NULL;
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_linkstate_p = NULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static moduledata_t lagg_mod = {
|
|
|
|
"if_lagg",
|
|
|
|
lagg_modevent,
|
2012-10-10 08:36:38 +00:00
|
|
|
0
|
2007-04-17 00:35:11 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
|
2011-08-01 11:24:55 +00:00
|
|
|
MODULE_VERSION(if_lagg, 1);
|
2020-10-22 09:47:12 +00:00
|
|
|
MODULE_DEPEND(if_lagg, if_infiniband, 1, 1, 1);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-26 11:01:04 +00:00
|
|
|
static void
|
|
|
|
lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr)
|
|
|
|
{
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2014-09-26 11:01:04 +00:00
|
|
|
KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto",
|
|
|
|
__func__, sc));
|
|
|
|
|
|
|
|
if (sc->sc_ifflags & IFF_DEBUG)
|
|
|
|
if_printf(sc->sc_ifp, "using proto %u\n", pr);
|
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
if (lagg_protos[pr].pr_attach != NULL)
|
|
|
|
lagg_protos[pr].pr_attach(sc);
|
2014-09-26 11:01:04 +00:00
|
|
|
sc->sc_proto = pr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_detach(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
lagg_proto pr;
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2014-09-26 11:01:04 +00:00
|
|
|
pr = sc->sc_proto;
|
2014-09-26 12:35:58 +00:00
|
|
|
sc->sc_proto = LAGG_PROTO_NONE;
|
2014-09-26 11:01:04 +00:00
|
|
|
|
|
|
|
if (lagg_protos[pr].pr_detach != NULL)
|
|
|
|
lagg_protos[pr].pr_detach(sc);
|
|
|
|
}
|
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
static int
|
|
|
|
lagg_proto_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (lagg_protos[sc->sc_proto].pr_start(sc, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (lagg_protos[sc->sc_proto].pr_input(sc, lp, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_proto_addport(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_addport == NULL)
|
|
|
|
return (0);
|
|
|
|
else
|
|
|
|
return (lagg_protos[sc->sc_proto].pr_addport(lp));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_delport(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_delport != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_delport(lp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_linkstate != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_linkstate(lp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_init(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_init != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_init(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_stop(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_stop != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_stop(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_lladdr(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_lladdr != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_lladdr(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_request(struct lagg_softc *sc, void *v)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_request != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_request(sc, v);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_proto_portreq(struct lagg_softc *sc, struct lagg_port *lp, void *v)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (lagg_protos[sc->sc_proto].pr_portreq != NULL)
|
|
|
|
lagg_protos[sc->sc_proto].pr_portreq(lp, v);
|
|
|
|
}
|
|
|
|
|
2010-02-06 13:49:35 +00:00
|
|
|
/*
|
|
|
|
* This routine is run via an vlan
|
|
|
|
* config EVENT
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
|
|
|
|
{
|
2014-09-26 12:35:58 +00:00
|
|
|
struct lagg_softc *sc = ifp->if_softc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
|
|
|
if (ifp->if_softc != arg) /* Not our event */
|
|
|
|
return;
|
|
|
|
|
2020-12-08 16:46:00 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2017-04-22 08:38:49 +00:00
|
|
|
EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
|
2020-12-08 16:46:00 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2010-02-06 13:49:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This routine is run via an vlan
|
|
|
|
* unconfig EVENT
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
|
|
|
|
{
|
2014-09-26 12:35:58 +00:00
|
|
|
struct lagg_softc *sc = ifp->if_softc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
|
|
|
if (ifp->if_softc != arg) /* Not our event */
|
|
|
|
return;
|
|
|
|
|
2020-12-08 16:46:00 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2017-04-22 08:38:49 +00:00
|
|
|
EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
|
2020-12-08 16:46:00 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2010-02-06 13:49:35 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
2022-09-22 12:30:09 +00:00
|
|
|
lagg_clone_create(struct if_clone *ifc, char *name, size_t len,
|
|
|
|
struct ifc_data *ifd, struct ifnet **ifpp)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2020-10-22 09:47:12 +00:00
|
|
|
struct iflaggparam iflp;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct lagg_softc *sc;
|
|
|
|
struct ifnet *ifp;
|
2020-10-22 09:47:12 +00:00
|
|
|
int if_type;
|
|
|
|
int error;
|
|
|
|
static const uint8_t eaddr[LAGG_ADDR_LEN];
|
|
|
|
|
2022-09-22 12:30:09 +00:00
|
|
|
if (ifd->params != NULL) {
|
|
|
|
error = ifc_copyin(ifd, &iflp, sizeof(iflp));
|
2020-10-22 09:47:12 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
switch (iflp.lagg_type) {
|
|
|
|
case LAGG_TYPE_ETHERNET:
|
|
|
|
if_type = IFT_ETHER;
|
|
|
|
break;
|
|
|
|
case LAGG_TYPE_INFINIBAND:
|
|
|
|
if_type = IFT_INFINIBAND;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if_type = IFT_ETHER;
|
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2019-03-28 21:00:54 +00:00
|
|
|
sc = malloc(sizeof(*sc), M_LAGG, M_WAITOK|M_ZERO);
|
2020-10-22 09:47:12 +00:00
|
|
|
ifp = sc->sc_ifp = if_alloc(if_type);
|
2007-04-17 00:35:11 +00:00
|
|
|
if (ifp == NULL) {
|
2019-03-28 21:00:54 +00:00
|
|
|
free(sc, M_LAGG);
|
2007-04-17 00:35:11 +00:00
|
|
|
return (ENOSPC);
|
|
|
|
}
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_SX_INIT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
mtx_init(&sc->sc_mtx, "lagg-mtx", NULL, MTX_DEF);
|
|
|
|
callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2014-10-01 21:37:32 +00:00
|
|
|
if (V_def_use_flowid)
|
|
|
|
sc->sc_opts |= LAGG_OPT_USE_FLOWID;
|
2019-05-03 14:43:21 +00:00
|
|
|
if (V_def_use_numa)
|
|
|
|
sc->sc_opts |= LAGG_OPT_USE_NUMA;
|
2014-10-01 21:37:32 +00:00
|
|
|
sc->flowid_shift = V_def_flowid_shift;
|
|
|
|
|
2012-03-06 22:58:13 +00:00
|
|
|
/* Hash all layers by default */
|
2015-03-11 16:02:24 +00:00
|
|
|
sc->sc_flags = MBUF_HASHFLAG_L2|MBUF_HASHFLAG_L3|MBUF_HASHFLAG_L4;
|
2012-02-22 22:01:30 +00:00
|
|
|
|
2014-09-26 11:01:04 +00:00
|
|
|
lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
|
|
|
|
|
2018-05-24 23:21:23 +00:00
|
|
|
CK_SLIST_INIT(&sc->sc_ports);
|
2013-08-29 19:35:14 +00:00
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
switch (if_type) {
|
|
|
|
case IFT_ETHER:
|
|
|
|
/* Initialise pseudo media types */
|
|
|
|
ifmedia_init(&sc->sc_media, 0, lagg_media_change,
|
|
|
|
lagg_media_status);
|
|
|
|
ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
|
|
|
|
ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2022-09-22 12:30:09 +00:00
|
|
|
if_initname(ifp, laggname, ifd->unit);
|
2020-10-22 09:47:12 +00:00
|
|
|
ifp->if_transmit = lagg_transmit_ethernet;
|
|
|
|
break;
|
|
|
|
case IFT_INFINIBAND:
|
2022-09-22 12:30:09 +00:00
|
|
|
if_initname(ifp, laggname, ifd->unit);
|
2020-10-22 09:47:12 +00:00
|
|
|
ifp->if_transmit = lagg_transmit_infiniband;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
ifp->if_softc = sc;
|
2012-09-20 10:05:10 +00:00
|
|
|
ifp->if_qflush = lagg_qflush;
|
2007-04-17 00:35:11 +00:00
|
|
|
ifp->if_init = lagg_init;
|
|
|
|
ifp->if_ioctl = lagg_ioctl;
|
2014-09-27 13:57:48 +00:00
|
|
|
ifp->if_get_counter = lagg_get_counter;
|
2007-04-17 00:35:11 +00:00
|
|
|
ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
|
Add kernel-side support for in-kernel TLS.
KTLS adds support for in-kernel framing and encryption of Transport
Layer Security (1.0-1.2) data on TCP sockets. KTLS only supports
offload of TLS for transmitted data. Key negotation must still be
performed in userland. Once completed, transmit session keys for a
connection are provided to the kernel via a new TCP_TXTLS_ENABLE
socket option. All subsequent data transmitted on the socket is
placed into TLS frames and encrypted using the supplied keys.
Any data written to a KTLS-enabled socket via write(2), aio_write(2),
or sendfile(2) is assumed to be application data and is encoded in TLS
frames with an application data type. Individual records can be sent
with a custom type (e.g. handshake messages) via sendmsg(2) with a new
control message (TLS_SET_RECORD_TYPE) specifying the record type.
At present, rekeying is not supported though the in-kernel framework
should support rekeying.
KTLS makes use of the recently added unmapped mbufs to store TLS
frames in the socket buffer. Each TLS frame is described by a single
ext_pgs mbuf. The ext_pgs structure contains the header of the TLS
record (and trailer for encrypted records) as well as references to
the associated TLS session.
KTLS supports two primary methods of encrypting TLS frames: software
TLS and ifnet TLS.
Software TLS marks mbufs holding socket data as not ready via
M_NOTREADY similar to sendfile(2) when TLS framing information is
added to an unmapped mbuf in ktls_frame(). ktls_enqueue() is then
called to schedule TLS frames for encryption. In the case of
sendfile_iodone() calls ktls_enqueue() instead of pru_ready() leaving
the mbufs marked M_NOTREADY until encryption is completed. For other
writes (vn_sendfile when pages are available, write(2), etc.), the
PRUS_NOTREADY is set when invoking pru_send() along with invoking
ktls_enqueue().
A pool of worker threads (the "KTLS" kernel process) encrypts TLS
frames queued via ktls_enqueue(). Each TLS frame is temporarily
mapped using the direct map and passed to a software encryption
backend to perform the actual encryption.
(Note: The use of PHYS_TO_DMAP could be replaced with sf_bufs if
someone wished to make this work on architectures without a direct
map.)
KTLS supports pluggable software encryption backends. Internally,
Netflix uses proprietary pure-software backends. This commit includes
a simple backend in a new ktls_ocf.ko module that uses the kernel's
OpenCrypto framework to provide AES-GCM encryption of TLS frames. As
a result, software TLS is now a bit of a misnomer as it can make use
of hardware crypto accelerators.
Once software encryption has finished, the TLS frame mbufs are marked
ready via pru_ready(). At this point, the encrypted data appears as
regular payload to the TCP stack stored in unmapped mbufs.
ifnet TLS permits a NIC to offload the TLS encryption and TCP
segmentation. In this mode, a new send tag type (IF_SND_TAG_TYPE_TLS)
is allocated on the interface a socket is routed over and associated
with a TLS session. TLS records for a TLS session using ifnet TLS are
not marked M_NOTREADY but are passed down the stack unencrypted. The
ip_output_send() and ip6_output_send() helper functions that apply
send tags to outbound IP packets verify that the send tag of the TLS
record matches the outbound interface. If so, the packet is tagged
with the TLS send tag and sent to the interface. The NIC device
driver must recognize packets with the TLS send tag and schedule them
for TLS encryption and TCP segmentation. If the the outbound
interface does not match the interface in the TLS send tag, the packet
is dropped. In addition, a task is scheduled to refresh the TLS send
tag for the TLS session. If a new TLS send tag cannot be allocated,
the connection is dropped. If a new TLS send tag is allocated,
however, subsequent packets will be tagged with the correct TLS send
tag. (This latter case has been tested by configuring both ports of a
Chelsio T6 in a lagg and failing over from one port to another. As
the connections migrated to the new port, new TLS send tags were
allocated for the new port and connections resumed without being
dropped.)
ifnet TLS can be enabled and disabled on supported network interfaces
via new '[-]txtls[46]' options to ifconfig(8). ifnet TLS is supported
across both vlan devices and lagg interfaces using failover, lacp with
flowid enabled, or lacp with flowid enabled.
Applications may request the current KTLS mode of a connection via a
new TCP_TXTLS_MODE socket option. They can also use this socket
option to toggle between software and ifnet TLS modes.
In addition, a testing tool is available in tools/tools/switch_tls.
This is modeled on tcpdrop and uses similar syntax. However, instead
of dropping connections, -s is used to force KTLS connections to
switch to software TLS and -i is used to switch to ifnet TLS.
Various sysctls and counters are available under the kern.ipc.tls
sysctl node. The kern.ipc.tls.enable node must be set to true to
enable KTLS (it is off by default). The use of unmapped mbufs must
also be enabled via kern.ipc.mb_use_ext_pgs to enable KTLS.
KTLS is enabled via the KERN_TLS kernel option.
This patch is the culmination of years of work by several folks
including Scott Long and Randall Stewart for the original design and
implementation; Drew Gallatin for several optimizations including the
use of ext_pgs mbufs, the M_NOTREADY mechanism for TLS records
awaiting software encryption, and pluggable software crypto backends;
and John Baldwin for modifications to support hardware TLS offload.
Reviewed by: gallatin, hselasky, rrs
Obtained from: Netflix
Sponsored by: Netflix, Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D21277
2019-08-27 00:01:56 +00:00
|
|
|
#if defined(KERN_TLS) || defined(RATELIMIT)
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
ifp->if_snd_tag_alloc = lagg_snd_tag_alloc;
|
2019-08-01 14:17:31 +00:00
|
|
|
ifp->if_ratelimit_query = lagg_ratelimit_query;
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
#endif
|
2019-01-31 21:35:37 +00:00
|
|
|
ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/*
|
2011-11-11 22:57:52 +00:00
|
|
|
* Attach as an ordinary ethernet device, children will be attached
|
2020-10-22 09:47:12 +00:00
|
|
|
* as special device IFT_IEEE8023ADLAG or IFT_INFINIBANDLAG.
|
2007-04-17 00:35:11 +00:00
|
|
|
*/
|
2020-10-22 09:47:12 +00:00
|
|
|
switch (if_type) {
|
|
|
|
case IFT_ETHER:
|
|
|
|
ether_ifattach(ifp, eaddr);
|
|
|
|
break;
|
|
|
|
case IFT_INFINIBAND:
|
2020-12-29 16:34:01 +00:00
|
|
|
infiniband_ifattach(ifp, eaddr, sc->sc_bcast_addr);
|
2020-10-22 09:47:12 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2010-02-06 13:49:35 +00:00
|
|
|
sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
|
|
|
|
lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
|
|
|
|
sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
|
|
|
|
lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Insert into the global list of laggs */
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_LOCK();
|
|
|
|
SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries);
|
|
|
|
LAGG_LIST_UNLOCK();
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2022-09-22 12:30:09 +00:00
|
|
|
*ifpp = ifp;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2022-09-22 12:30:09 +00:00
|
|
|
static int
|
|
|
|
lagg_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
|
|
|
sc->sc_destroying = 1;
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_stop(sc);
|
|
|
|
ifp->if_flags &= ~IFF_UP;
|
|
|
|
|
2010-02-06 13:49:35 +00:00
|
|
|
EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
|
|
|
|
EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Shutdown and remove lagg ports */
|
2018-05-24 23:21:23 +00:00
|
|
|
while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL)
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_port_destroy(lp, 1);
|
2017-05-02 19:09:11 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Unhook the aggregation protocol */
|
2014-09-26 11:01:04 +00:00
|
|
|
lagg_proto_detach(sc);
|
2017-05-05 16:51:53 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
switch (ifp->if_type) {
|
|
|
|
case IFT_ETHER:
|
|
|
|
ifmedia_removeall(&sc->sc_media);
|
|
|
|
ether_ifdetach(ifp);
|
|
|
|
break;
|
|
|
|
case IFT_INFINIBAND:
|
|
|
|
infiniband_ifdetach(ifp);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2011-11-11 22:57:52 +00:00
|
|
|
if_free(ifp);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_LOCK();
|
|
|
|
SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries);
|
|
|
|
LAGG_LIST_UNLOCK();
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
mtx_destroy(&sc->sc_mtx);
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_SX_DESTROY(sc);
|
2019-03-28 21:00:54 +00:00
|
|
|
free(sc, M_LAGG);
|
2022-09-22 12:30:09 +00:00
|
|
|
|
|
|
|
return (0);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2007-07-30 20:17:22 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_capabilities(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
2022-07-28 14:36:22 +00:00
|
|
|
int cap, cap2, ena, ena2, pena, pena2;
|
2017-05-26 20:15:33 +00:00
|
|
|
uint64_t hwa;
|
2014-09-22 08:27:27 +00:00
|
|
|
struct ifnet_hw_tsomax hw_tsomax;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-05-26 20:15:33 +00:00
|
|
|
/* Get common enabled capabilities for the lagg ports */
|
2022-07-28 14:36:22 +00:00
|
|
|
ena = ena2 = ~0;
|
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2017-05-26 20:15:33 +00:00
|
|
|
ena &= lp->lp_ifp->if_capenable;
|
2022-07-28 14:36:22 +00:00
|
|
|
ena2 &= lp->lp_ifp->if_capenable2;
|
|
|
|
}
|
|
|
|
if (CK_SLIST_FIRST(&sc->sc_ports) == NULL)
|
|
|
|
ena = ena2 = 0;
|
2014-09-22 08:27:27 +00:00
|
|
|
|
2017-05-26 20:15:33 +00:00
|
|
|
/*
|
|
|
|
* Apply common enabled capabilities back to the lagg ports.
|
|
|
|
* May require several iterations if they are dependent.
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
pena = ena;
|
2022-07-28 14:36:22 +00:00
|
|
|
pena2 = ena2;
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2022-07-28 14:36:22 +00:00
|
|
|
lagg_setcaps(lp, ena, ena2);
|
2017-05-26 20:15:33 +00:00
|
|
|
ena &= lp->lp_ifp->if_capenable;
|
2022-07-28 14:36:22 +00:00
|
|
|
ena2 &= lp->lp_ifp->if_capenable2;
|
2017-05-26 20:15:33 +00:00
|
|
|
}
|
2022-07-28 14:36:22 +00:00
|
|
|
} while (pena != ena || pena2 != ena2);
|
2017-05-26 20:15:33 +00:00
|
|
|
|
|
|
|
/* Get other capabilities from the lagg ports */
|
2022-07-28 14:36:22 +00:00
|
|
|
cap = cap2 = ~0;
|
2017-05-26 20:15:33 +00:00
|
|
|
hwa = ~(uint64_t)0;
|
|
|
|
memset(&hw_tsomax, 0, sizeof(hw_tsomax));
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2007-07-30 20:17:22 +00:00
|
|
|
cap &= lp->lp_ifp->if_capabilities;
|
2022-07-28 14:36:22 +00:00
|
|
|
cap2 &= lp->lp_ifp->if_capabilities2;
|
2008-12-16 22:16:34 +00:00
|
|
|
hwa &= lp->lp_ifp->if_hwassist;
|
2014-09-22 08:27:27 +00:00
|
|
|
if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax);
|
2007-07-30 20:17:22 +00:00
|
|
|
}
|
2022-07-28 14:36:22 +00:00
|
|
|
if (CK_SLIST_FIRST(&sc->sc_ports) == NULL)
|
|
|
|
cap = cap2 = hwa = 0;
|
2007-07-30 20:17:22 +00:00
|
|
|
|
|
|
|
if (sc->sc_ifp->if_capabilities != cap ||
|
2008-12-16 22:16:34 +00:00
|
|
|
sc->sc_ifp->if_capenable != ena ||
|
2022-07-28 14:36:22 +00:00
|
|
|
sc->sc_ifp->if_capenable2 != ena2 ||
|
2014-04-14 20:34:48 +00:00
|
|
|
sc->sc_ifp->if_hwassist != hwa ||
|
2014-09-22 08:27:27 +00:00
|
|
|
if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax) != 0) {
|
2007-07-30 20:17:22 +00:00
|
|
|
sc->sc_ifp->if_capabilities = cap;
|
2022-07-28 14:36:22 +00:00
|
|
|
sc->sc_ifp->if_capabilities2 = cap2;
|
2007-07-30 20:17:22 +00:00
|
|
|
sc->sc_ifp->if_capenable = ena;
|
2022-07-28 14:36:22 +00:00
|
|
|
sc->sc_ifp->if_capenable2 = ena2;
|
2008-12-16 22:16:34 +00:00
|
|
|
sc->sc_ifp->if_hwassist = hwa;
|
2007-07-30 20:17:22 +00:00
|
|
|
getmicrotime(&sc->sc_ifp->if_lastchange);
|
|
|
|
|
|
|
|
if (sc->sc_ifflags & IFF_DEBUG)
|
|
|
|
if_printf(sc->sc_ifp,
|
|
|
|
"capabilities 0x%08x enabled 0x%08x\n", cap, ena);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc_ptr;
|
2014-09-26 12:42:06 +00:00
|
|
|
struct lagg_port *lp, *tlp;
|
2018-10-30 09:53:57 +00:00
|
|
|
struct ifreq ifr;
|
|
|
|
int error, i, oldmtu;
|
2020-10-22 09:47:12 +00:00
|
|
|
int if_type;
|
2014-09-27 13:57:48 +00:00
|
|
|
uint64_t *pval;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2018-10-30 09:53:57 +00:00
|
|
|
if (sc->sc_ifp == ifp) {
|
|
|
|
if_printf(sc->sc_ifp,
|
|
|
|
"cannot add a lagg to itself as a port\n");
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
2020-08-13 22:06:27 +00:00
|
|
|
if (sc->sc_destroying == 1)
|
|
|
|
return (ENXIO);
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Limit the maximal number of lagg ports */
|
|
|
|
if (sc->sc_count >= LAGG_MAX_PORTS)
|
|
|
|
return (ENOSPC);
|
|
|
|
|
|
|
|
/* Check if port has already been associated to a lagg */
|
2012-05-28 12:13:04 +00:00
|
|
|
if (ifp->if_lagg != NULL) {
|
|
|
|
/* Port is already in the current lagg? */
|
|
|
|
lp = (struct lagg_port *)ifp->if_lagg;
|
|
|
|
if (lp->lp_softc == sc)
|
|
|
|
return (EEXIST);
|
2007-04-17 00:35:11 +00:00
|
|
|
return (EBUSY);
|
2012-05-28 12:13:04 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
switch (sc->sc_ifp->if_type) {
|
|
|
|
case IFT_ETHER:
|
|
|
|
/* XXX Disallow non-ethernet interfaces (this should be any of 802) */
|
|
|
|
if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN)
|
|
|
|
return (EPROTONOSUPPORT);
|
|
|
|
if_type = IFT_IEEE8023ADLAG;
|
|
|
|
break;
|
|
|
|
case IFT_INFINIBAND:
|
|
|
|
/* XXX Disallow non-infiniband interfaces */
|
|
|
|
if (ifp->if_type != IFT_INFINIBAND)
|
|
|
|
return (EPROTONOSUPPORT);
|
|
|
|
if_type = IFT_INFINIBANDLAG;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-07-30 20:17:22 +00:00
|
|
|
/* Allow the first Ethernet member to define the MTU */
|
2018-10-30 09:53:57 +00:00
|
|
|
oldmtu = -1;
|
|
|
|
if (CK_SLIST_EMPTY(&sc->sc_ports)) {
|
2007-07-30 20:17:22 +00:00
|
|
|
sc->sc_ifp->if_mtu = ifp->if_mtu;
|
2018-10-30 09:53:57 +00:00
|
|
|
} else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
|
|
|
|
if (ifp->if_ioctl == NULL) {
|
|
|
|
if_printf(sc->sc_ifp, "cannot change MTU for %s\n",
|
|
|
|
ifp->if_xname);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
oldmtu = ifp->if_mtu;
|
|
|
|
strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name));
|
|
|
|
ifr.ifr_mtu = sc->sc_ifp->if_mtu;
|
|
|
|
error = (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
|
|
|
|
if (error != 0) {
|
|
|
|
if_printf(sc->sc_ifp, "invalid MTU for %s\n",
|
|
|
|
ifp->if_xname);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
ifr.ifr_mtu = oldmtu;
|
2007-07-30 20:17:22 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 21:00:54 +00:00
|
|
|
lp = malloc(sizeof(struct lagg_port), M_LAGG, M_WAITOK|M_ZERO);
|
2017-05-02 19:09:11 +00:00
|
|
|
lp->lp_softc = sc;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Check if port is a stacked lagg */
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_LOCK();
|
|
|
|
SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) {
|
2007-04-17 00:35:11 +00:00
|
|
|
if (ifp == sc_ptr->sc_ifp) {
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_UNLOCK();
|
2019-03-28 21:00:54 +00:00
|
|
|
free(lp, M_LAGG);
|
2018-10-30 09:53:57 +00:00
|
|
|
if (oldmtu != -1)
|
|
|
|
(*ifp->if_ioctl)(ifp, SIOCSIFMTU,
|
|
|
|
(caddr_t)&ifr);
|
2007-04-17 00:35:11 +00:00
|
|
|
return (EINVAL);
|
2010-01-08 16:44:33 +00:00
|
|
|
/* XXX disable stacking for the moment, its untested */
|
|
|
|
#ifdef LAGG_PORT_STACKING
|
2007-04-17 00:35:11 +00:00
|
|
|
lp->lp_flags |= LAGG_PORT_STACK;
|
|
|
|
if (lagg_port_checkstacking(sc_ptr) >=
|
|
|
|
LAGG_MAX_STACKING) {
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_UNLOCK();
|
2019-03-28 21:00:54 +00:00
|
|
|
free(lp, M_LAGG);
|
2018-10-30 09:53:57 +00:00
|
|
|
if (oldmtu != -1)
|
|
|
|
(*ifp->if_ioctl)(ifp, SIOCSIFMTU,
|
|
|
|
(caddr_t)&ifr);
|
2007-04-17 00:35:11 +00:00
|
|
|
return (E2BIG);
|
|
|
|
}
|
2010-01-08 16:44:33 +00:00
|
|
|
#endif
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
}
|
2014-10-01 21:37:32 +00:00
|
|
|
LAGG_LIST_UNLOCK();
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
if_ref(ifp);
|
|
|
|
lp->lp_ifp = ifp;
|
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ifp->if_addrlen);
|
2017-05-02 19:09:11 +00:00
|
|
|
lp->lp_ifcapenable = ifp->if_capenable;
|
2018-05-24 23:21:23 +00:00
|
|
|
if (CK_SLIST_EMPTY(&sc->sc_ports)) {
|
2020-10-22 09:47:12 +00:00
|
|
|
bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
|
2017-05-02 19:09:11 +00:00
|
|
|
lagg_proto_lladdr(sc);
|
|
|
|
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
|
|
|
|
} else {
|
2020-10-22 09:47:12 +00:00
|
|
|
if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
|
2017-05-02 19:09:11 +00:00
|
|
|
}
|
|
|
|
lagg_setflags(lp, 1);
|
|
|
|
|
2018-05-24 23:21:23 +00:00
|
|
|
if (CK_SLIST_EMPTY(&sc->sc_ports))
|
2017-05-02 19:09:11 +00:00
|
|
|
sc->sc_primary = lp;
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Change the interface type */
|
|
|
|
lp->lp_iftype = ifp->if_type;
|
2020-10-22 09:47:12 +00:00
|
|
|
ifp->if_type = if_type;
|
2007-04-17 00:35:11 +00:00
|
|
|
ifp->if_lagg = lp;
|
|
|
|
lp->lp_ioctl = ifp->if_ioctl;
|
|
|
|
ifp->if_ioctl = lagg_port_ioctl;
|
|
|
|
lp->lp_output = ifp->if_output;
|
|
|
|
ifp->if_output = lagg_port_output;
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
/* Read port counters */
|
|
|
|
pval = lp->port_counters.val;
|
|
|
|
for (i = 0; i < IFCOUNTERS; i++, pval++)
|
|
|
|
*pval = ifp->if_get_counter(ifp, i);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2015-01-17 11:32:09 +00:00
|
|
|
/*
|
|
|
|
* Insert into the list of ports.
|
|
|
|
* Keep ports sorted by if_index. It is handy, when configuration
|
|
|
|
* is predictable and `ifconfig laggN create ...` command
|
|
|
|
* will lead to the same result each time.
|
|
|
|
*/
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
|
2014-09-26 12:42:06 +00:00
|
|
|
if (tlp->lp_ifp->if_index < ifp->if_index && (
|
2018-05-24 23:21:23 +00:00
|
|
|
CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
|
|
|
|
((struct lagg_port*)CK_SLIST_NEXT(tlp, lp_entries))->lp_ifp->if_index >
|
2014-09-26 12:42:06 +00:00
|
|
|
ifp->if_index))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (tlp != NULL)
|
2018-05-24 23:21:23 +00:00
|
|
|
CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
|
2014-09-26 12:42:06 +00:00
|
|
|
else
|
2018-05-24 23:21:23 +00:00
|
|
|
CK_SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
|
2007-04-17 00:35:11 +00:00
|
|
|
sc->sc_count++;
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
lagg_setmulti(lp);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
if ((error = lagg_proto_addport(sc, lp)) != 0) {
|
|
|
|
/* Remove the port, without calling pr_delport. */
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_port_destroy(lp, 0);
|
2018-10-30 09:53:57 +00:00
|
|
|
if (oldmtu != -1)
|
|
|
|
(*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
|
2007-04-17 00:35:11 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
/* Update lagg capabilities */
|
|
|
|
lagg_capabilities(sc);
|
|
|
|
lagg_linkstate(sc);
|
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
return (0);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2010-01-08 16:44:33 +00:00
|
|
|
#ifdef LAGG_PORT_STACKING
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
|
|
|
lagg_port_checkstacking(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc_ptr;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
int m = 0;
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_SXLOCK_ASSERT(sc);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2007-04-17 00:35:11 +00:00
|
|
|
if (lp->lp_flags & LAGG_PORT_STACK) {
|
|
|
|
sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
|
|
|
|
m = MAX(m, lagg_port_checkstacking(sc_ptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (m + 1);
|
|
|
|
}
|
2010-01-08 16:44:33 +00:00
|
|
|
#endif
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2018-05-14 20:06:49 +00:00
|
|
|
static void
|
|
|
|
lagg_port_destroy_cb(epoch_context_t ec)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
|
|
|
lp = __containerof(ec, struct lagg_port, lp_epoch_ctx);
|
|
|
|
ifp = lp->lp_ifp;
|
|
|
|
|
|
|
|
if_rele(ifp);
|
2019-03-28 21:00:54 +00:00
|
|
|
free(lp, M_LAGG);
|
2018-05-14 20:06:49 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_port_destroy(struct lagg_port *lp, int rundelport)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2015-10-07 06:32:34 +00:00
|
|
|
struct lagg_port *lp_ptr, *lp0;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct ifnet *ifp = lp->lp_ifp;
|
2014-09-27 13:57:48 +00:00
|
|
|
uint64_t *pval, vdiff;
|
|
|
|
int i;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2018-05-14 20:06:49 +00:00
|
|
|
if (rundelport)
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_delport(sc, lp);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
if (lp->lp_detaching == 0)
|
|
|
|
lagg_clrmulti(lp);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Restore interface */
|
|
|
|
ifp->if_type = lp->lp_iftype;
|
|
|
|
ifp->if_ioctl = lp->lp_ioctl;
|
|
|
|
ifp->if_output = lp->lp_output;
|
|
|
|
ifp->if_lagg = NULL;
|
|
|
|
|
2014-09-27 13:57:48 +00:00
|
|
|
/* Update detached port counters */
|
|
|
|
pval = lp->port_counters.val;
|
2014-10-01 11:23:54 +00:00
|
|
|
for (i = 0; i < IFCOUNTERS; i++, pval++) {
|
2014-09-27 13:57:48 +00:00
|
|
|
vdiff = ifp->if_get_counter(ifp, i) - *pval;
|
2014-09-28 08:57:07 +00:00
|
|
|
sc->detached_counters.val[i] += vdiff;
|
2014-09-27 13:57:48 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Finally, remove the port from the lagg */
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
|
2007-04-17 00:35:11 +00:00
|
|
|
sc->sc_count--;
|
|
|
|
|
|
|
|
/* Update the primary interface */
|
|
|
|
if (lp == sc->sc_primary) {
|
2020-10-22 09:47:12 +00:00
|
|
|
uint8_t lladdr[LAGG_ADDR_LEN];
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2018-05-24 23:21:23 +00:00
|
|
|
if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL)
|
2020-10-22 09:47:12 +00:00
|
|
|
bzero(&lladdr, LAGG_ADDR_LEN);
|
2017-05-02 19:09:11 +00:00
|
|
|
else
|
2020-10-22 09:47:12 +00:00
|
|
|
bcopy(lp0->lp_lladdr, lladdr, LAGG_ADDR_LEN);
|
2015-11-01 19:59:04 +00:00
|
|
|
sc->sc_primary = lp0;
|
2017-05-02 19:09:11 +00:00
|
|
|
if (sc->sc_destroying == 0) {
|
2020-10-22 09:47:12 +00:00
|
|
|
bcopy(lladdr, IF_LLADDR(sc->sc_ifp), sc->sc_ifp->if_addrlen);
|
2017-05-02 19:09:11 +00:00
|
|
|
lagg_proto_lladdr(sc);
|
|
|
|
EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
|
2015-11-01 19:59:04 +00:00
|
|
|
|
2021-08-17 14:23:50 +00:00
|
|
|
/*
|
|
|
|
* Update lladdr for each port (new primary needs update
|
|
|
|
* as well, to switch from old lladdr to its 'real' one).
|
|
|
|
* We can skip this if the lagg is being destroyed.
|
|
|
|
*/
|
|
|
|
CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
|
|
|
|
if_setlladdr(lp_ptr->lp_ifp, lladdr,
|
|
|
|
lp_ptr->lp_ifp->if_addrlen);
|
|
|
|
}
|
2018-05-14 20:06:49 +00:00
|
|
|
}
|
2007-05-07 00:35:15 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
if (lp->lp_ifflags)
|
|
|
|
if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
if (lp->lp_detaching == 0) {
|
|
|
|
lagg_setflags(lp, 0);
|
2022-07-28 14:36:22 +00:00
|
|
|
lagg_setcaps(lp, lp->lp_ifcapenable, lp->lp_ifcapenable2);
|
2020-10-22 09:47:12 +00:00
|
|
|
if_setlladdr(ifp, lp->lp_lladdr, ifp->if_addrlen);
|
2017-05-02 19:09:11 +00:00
|
|
|
}
|
|
|
|
|
2018-05-14 20:06:49 +00:00
|
|
|
/*
|
|
|
|
* free port and release it's ifnet reference after a grace period has
|
|
|
|
* elapsed.
|
|
|
|
*/
|
2020-01-15 06:05:20 +00:00
|
|
|
NET_EPOCH_CALL(lagg_port_destroy_cb, &lp->lp_epoch_ctx);
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Update lagg capabilities */
|
2007-07-30 20:17:22 +00:00
|
|
|
lagg_capabilities(sc);
|
2007-11-25 06:30:46 +00:00
|
|
|
lagg_linkstate(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct lagg_reqport *rp = (struct lagg_reqport *)data;
|
|
|
|
struct lagg_softc *sc;
|
|
|
|
struct lagg_port *lp = NULL;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
/* Should be checked by the caller */
|
2020-10-22 09:47:12 +00:00
|
|
|
switch (ifp->if_type) {
|
|
|
|
case IFT_IEEE8023ADLAG:
|
|
|
|
case IFT_INFINIBANDLAG:
|
|
|
|
if ((lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
|
|
|
|
goto fallback;
|
|
|
|
break;
|
|
|
|
default:
|
2007-04-17 00:35:11 +00:00
|
|
|
goto fallback;
|
2020-10-22 09:47:12 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGLAGGPORT:
|
|
|
|
if (rp->rp_portname[0] == '\0' ||
|
|
|
|
ifunit(rp->rp_portname) != ifp) {
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2007-07-26 20:30:18 +00:00
|
|
|
if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = ENOENT;
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
lagg_port2req(lp, rp);
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
2007-07-30 20:17:22 +00:00
|
|
|
|
|
|
|
case SIOCSIFCAP:
|
2022-07-28 14:36:22 +00:00
|
|
|
case SIOCSIFCAPNV:
|
2007-07-30 20:17:22 +00:00
|
|
|
if (lp->lp_ioctl == NULL) {
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error = (*lp->lp_ioctl)(ifp, cmd, data);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* Update lagg interface capabilities */
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2007-07-30 20:17:22 +00:00
|
|
|
lagg_capabilities(sc);
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2017-05-26 22:22:48 +00:00
|
|
|
VLAN_CAPABILITIES(sc->sc_ifp);
|
2007-07-30 20:17:22 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case SIOCSIFMTU:
|
|
|
|
/* Do not allow the MTU to be changed once joined */
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
default:
|
|
|
|
goto fallback;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
fallback:
|
2016-12-16 22:39:30 +00:00
|
|
|
if (lp != NULL && lp->lp_ioctl != NULL)
|
2007-04-17 00:35:11 +00:00
|
|
|
return ((*lp->lp_ioctl)(ifp, cmd, data));
|
|
|
|
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
2014-09-27 13:57:48 +00:00
|
|
|
/*
|
|
|
|
* Requests counter @cnt data.
|
|
|
|
*
|
|
|
|
* Counter value is calculated the following way:
|
|
|
|
* 1) for each port, sum difference between current and "initial" measurements.
|
|
|
|
* 2) add lagg logical interface counters.
|
|
|
|
* 3) add data from detached_counters array.
|
|
|
|
*
|
|
|
|
* We also do the following things on ports attach/detach:
|
|
|
|
* 1) On port attach we store all counters it has into port_counter array.
|
|
|
|
* 2) On port detach we add the different between "initial" and
|
|
|
|
* current counters data to detached_counters array.
|
|
|
|
*/
|
|
|
|
static uint64_t
|
|
|
|
lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
|
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2014-09-27 13:57:48 +00:00
|
|
|
struct lagg_softc *sc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct ifnet *lpifp;
|
|
|
|
uint64_t newval, oldval, vsum;
|
|
|
|
|
2014-09-28 08:57:07 +00:00
|
|
|
/* Revise this when we've got non-generic counters. */
|
|
|
|
KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
|
2014-09-27 13:57:48 +00:00
|
|
|
|
|
|
|
sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
|
|
|
|
vsum = 0;
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2014-09-27 13:57:48 +00:00
|
|
|
/* Saved attached value */
|
2014-09-28 08:57:07 +00:00
|
|
|
oldval = lp->port_counters.val[cnt];
|
2014-09-27 13:57:48 +00:00
|
|
|
/* current value */
|
|
|
|
lpifp = lp->lp_ifp;
|
|
|
|
newval = lpifp->if_get_counter(lpifp, cnt);
|
|
|
|
/* Calculate diff and save new */
|
|
|
|
vsum += newval - oldval;
|
|
|
|
}
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2014-09-27 13:57:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Add counter data which might be added by upper
|
|
|
|
* layer protocols operating on logical interface.
|
|
|
|
*/
|
|
|
|
vsum += if_get_counter_default(ifp, cnt);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add counter data from detached ports counters
|
|
|
|
*/
|
2014-09-28 08:57:07 +00:00
|
|
|
vsum += sc->detached_counters.val[cnt];
|
2014-09-27 13:57:48 +00:00
|
|
|
|
|
|
|
return (vsum);
|
|
|
|
}
|
|
|
|
|
2012-05-03 01:41:12 +00:00
|
|
|
/*
|
|
|
|
* For direct output to child ports.
|
|
|
|
*/
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
|
|
|
lagg_port_output(struct ifnet *ifp, struct mbuf *m,
|
2013-04-26 12:50:32 +00:00
|
|
|
const struct sockaddr *dst, struct route *ro)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
|
|
|
struct lagg_port *lp = ifp->if_lagg;
|
|
|
|
|
|
|
|
switch (dst->sa_family) {
|
|
|
|
case pseudo_AF_HDRCMPLT:
|
|
|
|
case AF_UNSPEC:
|
2020-11-11 15:53:36 +00:00
|
|
|
if (lp != NULL)
|
|
|
|
return ((*lp->lp_output)(ifp, m, dst, ro));
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* drop any other frames */
|
|
|
|
m_freem(m);
|
2013-01-21 08:59:31 +00:00
|
|
|
return (ENETDOWN);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct lagg_softc *sc;
|
|
|
|
|
|
|
|
if ((lp = ifp->if_lagg) == NULL)
|
|
|
|
return;
|
2012-06-30 19:09:02 +00:00
|
|
|
/* If the ifnet is just being renamed, don't do anything. */
|
|
|
|
if (ifp->if_flags & IFF_RENAMING)
|
|
|
|
return;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-06-12 07:29:11 +00:00
|
|
|
sc = lp->lp_softc;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
|
|
|
lp->lp_detaching = 1;
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_port_destroy(lp, 1);
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2017-05-26 22:22:48 +00:00
|
|
|
VLAN_CAPABILITIES(sc->sc_ifp);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
|
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2007-08-30 19:12:10 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
|
|
|
|
strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
|
|
|
|
rp->rp_prio = lp->lp_prio;
|
|
|
|
rp->rp_flags = lp->lp_flags;
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_portreq(sc, lp, &rp->rp_psc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Add protocol specific flags */
|
|
|
|
switch (sc->sc_proto) {
|
|
|
|
case LAGG_PROTO_FAILOVER:
|
|
|
|
if (lp == sc->sc_primary)
|
2007-05-02 08:58:28 +00:00
|
|
|
rp->rp_flags |= LAGG_PORT_MASTER;
|
2007-08-30 19:12:10 +00:00
|
|
|
if (lp == lagg_link_active(sc, sc->sc_primary))
|
|
|
|
rp->rp_flags |= LAGG_PORT_ACTIVE;
|
|
|
|
break;
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
case LAGG_PROTO_ROUNDROBIN:
|
|
|
|
case LAGG_PROTO_LOADBALANCE:
|
2014-09-26 12:35:58 +00:00
|
|
|
case LAGG_PROTO_BROADCAST:
|
2007-04-17 00:35:11 +00:00
|
|
|
if (LAGG_PORTACTIVE(lp))
|
|
|
|
rp->rp_flags |= LAGG_PORT_ACTIVE;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LAGG_PROTO_LACP:
|
|
|
|
/* LACP has a different definition of active */
|
2008-03-16 19:25:30 +00:00
|
|
|
if (lacp_isactive(lp))
|
2007-04-17 00:35:11 +00:00
|
|
|
rp->rp_flags |= LAGG_PORT_ACTIVE;
|
2008-03-16 19:25:30 +00:00
|
|
|
if (lacp_iscollecting(lp))
|
|
|
|
rp->rp_flags |= LAGG_PORT_COLLECTING;
|
|
|
|
if (lacp_isdistributing(lp))
|
|
|
|
rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
static void
|
|
|
|
lagg_watchdog_infiniband(void *arg)
|
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2020-10-22 09:47:12 +00:00
|
|
|
struct lagg_softc *sc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct ifnet *lp_ifp;
|
|
|
|
|
|
|
|
sc = arg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Because infiniband nodes have a fixed MAC address, which is
|
|
|
|
* generated by the so-called GID, we need to regularly update
|
|
|
|
* the link level address of the parent lagg<N> device when
|
|
|
|
* the active port changes. Possibly we could piggy-back on
|
|
|
|
* link up/down events aswell, but using a timer also provides
|
|
|
|
* a guarantee against too frequent events. This operation
|
|
|
|
* does not have to be atomic.
|
|
|
|
*/
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2020-10-22 09:47:12 +00:00
|
|
|
lp = lagg_link_active(sc, sc->sc_primary);
|
|
|
|
if (lp != NULL) {
|
|
|
|
ifp = sc->sc_ifp;
|
|
|
|
lp_ifp = lp->lp_ifp;
|
|
|
|
|
|
|
|
if (ifp != NULL && lp_ifp != NULL &&
|
2020-12-29 16:34:01 +00:00
|
|
|
(memcmp(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen) != 0 ||
|
|
|
|
memcmp(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen) != 0)) {
|
2020-10-22 09:47:12 +00:00
|
|
|
memcpy(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen);
|
2020-12-29 16:34:01 +00:00
|
|
|
memcpy(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen);
|
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
CURVNET_SET(ifp->if_vnet);
|
|
|
|
EVENTHANDLER_INVOKE(iflladdr_event, ifp);
|
|
|
|
CURVNET_RESTORE();
|
|
|
|
}
|
|
|
|
}
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2020-10-22 09:47:12 +00:00
|
|
|
|
|
|
|
callout_reset(&sc->sc_watchdog, hz, &lagg_watchdog_infiniband, arg);
|
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static void
|
|
|
|
lagg_init(void *xsc)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)xsc;
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
2015-11-01 19:59:04 +00:00
|
|
|
struct lagg_port *lp;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
|
|
|
if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
|
|
|
|
LAGG_XUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
return;
|
2017-05-02 19:09:11 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
ifp->if_drv_flags |= IFF_DRV_RUNNING;
|
2015-11-01 19:59:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the port lladdrs if needed.
|
|
|
|
* This might be if_setlladdr() notification
|
|
|
|
* that lladdr has been changed.
|
|
|
|
*/
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2017-05-02 19:09:11 +00:00
|
|
|
if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp),
|
2020-10-22 09:47:12 +00:00
|
|
|
ifp->if_addrlen) != 0)
|
|
|
|
if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ifp->if_addrlen);
|
2017-05-02 19:09:11 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_init(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
if (ifp->if_type == IFT_INFINIBAND) {
|
|
|
|
mtx_lock(&sc->sc_mtx);
|
|
|
|
lagg_watchdog_infiniband(sc);
|
|
|
|
mtx_unlock(&sc->sc_mtx);
|
|
|
|
}
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_stop(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
|
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_stop(sc);
|
2020-10-22 09:47:12 +00:00
|
|
|
|
|
|
|
mtx_lock(&sc->sc_mtx);
|
|
|
|
callout_stop(&sc->sc_watchdog);
|
|
|
|
mtx_unlock(&sc->sc_mtx);
|
|
|
|
|
|
|
|
callout_drain(&sc->sc_watchdog);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
|
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
struct lagg_reqall *ra = (struct lagg_reqall *)data;
|
2014-10-02 20:01:13 +00:00
|
|
|
struct lagg_reqopts *ro = (struct lagg_reqopts *)data;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
|
2012-03-06 22:58:13 +00:00
|
|
|
struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct ifreq *ifr = (struct ifreq *)data;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct ifnet *tpif;
|
|
|
|
struct thread *td = curthread;
|
2007-07-26 20:30:18 +00:00
|
|
|
char *buf, *outbuf;
|
2020-01-22 20:36:45 +00:00
|
|
|
int count, buflen, len, error = 0, oldmtu;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
bzero(&rpbuf, sizeof(rpbuf));
|
|
|
|
|
2020-08-13 22:06:27 +00:00
|
|
|
/* XXX: This can race with lagg_clone_destroy. */
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGLAGG:
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2017-05-02 19:09:11 +00:00
|
|
|
buflen = sc->sc_count * sizeof(struct lagg_reqport);
|
2007-07-26 20:30:18 +00:00
|
|
|
outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
|
2007-04-17 00:35:11 +00:00
|
|
|
ra->ra_proto = sc->sc_proto;
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_request(sc, &ra->ra_psc);
|
2007-07-26 20:30:18 +00:00
|
|
|
count = 0;
|
|
|
|
buf = outbuf;
|
|
|
|
len = min(ra->ra_size, buflen);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2007-07-26 20:30:18 +00:00
|
|
|
if (len < sizeof(rpbuf))
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
2007-07-26 20:30:18 +00:00
|
|
|
|
|
|
|
lagg_port2req(lp, &rpbuf);
|
|
|
|
memcpy(buf, &rpbuf, sizeof(rpbuf));
|
|
|
|
count++;
|
|
|
|
buf += sizeof(rpbuf);
|
|
|
|
len -= sizeof(rpbuf);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2007-07-26 20:30:18 +00:00
|
|
|
ra->ra_ports = count;
|
|
|
|
ra->ra_size = count * sizeof(rpbuf);
|
|
|
|
error = copyout(outbuf, ra->ra_port, ra->ra_size);
|
|
|
|
free(outbuf, M_TEMP);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSLAGG:
|
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
2016-02-19 06:35:53 +00:00
|
|
|
if (ra->ra_proto >= LAGG_PROTO_MAX) {
|
2014-10-02 20:01:13 +00:00
|
|
|
error = EPROTONOSUPPORT;
|
|
|
|
break;
|
|
|
|
}
|
2020-10-22 09:47:12 +00:00
|
|
|
/* Infiniband only supports the failover protocol. */
|
|
|
|
if (ra->ra_proto != LAGG_PROTO_FAILOVER &&
|
|
|
|
ifp->if_type == IFT_INFINIBAND) {
|
|
|
|
error = EPROTONOSUPPORT;
|
|
|
|
break;
|
|
|
|
}
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2014-10-02 20:01:13 +00:00
|
|
|
lagg_proto_detach(sc);
|
|
|
|
lagg_proto_attach(sc, ra->ra_proto);
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2014-10-02 20:01:13 +00:00
|
|
|
break;
|
|
|
|
case SIOCGLAGGOPTS:
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2014-10-02 20:01:13 +00:00
|
|
|
ro->ro_opts = sc->sc_opts;
|
|
|
|
if (sc->sc_proto == LAGG_PROTO_LACP) {
|
|
|
|
struct lacp_softc *lsc;
|
|
|
|
|
|
|
|
lsc = (struct lacp_softc *)sc->sc_psc;
|
|
|
|
if (lsc->lsc_debug.lsc_tx_test != 0)
|
|
|
|
ro->ro_opts |= LAGG_OPT_LACP_TXTEST;
|
|
|
|
if (lsc->lsc_debug.lsc_rx_test != 0)
|
|
|
|
ro->ro_opts |= LAGG_OPT_LACP_RXTEST;
|
|
|
|
if (lsc->lsc_strict_mode != 0)
|
|
|
|
ro->ro_opts |= LAGG_OPT_LACP_STRICT;
|
2015-08-12 20:21:04 +00:00
|
|
|
if (lsc->lsc_fast_timeout != 0)
|
2020-06-11 22:46:08 +00:00
|
|
|
ro->ro_opts |= LAGG_OPT_LACP_FAST_TIMO;
|
2014-10-02 20:01:13 +00:00
|
|
|
|
|
|
|
ro->ro_active = sc->sc_active;
|
|
|
|
} else {
|
|
|
|
ro->ro_active = 0;
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2014-10-02 20:01:13 +00:00
|
|
|
ro->ro_active += LAGG_PORTACTIVE(lp);
|
|
|
|
}
|
2019-12-22 21:56:47 +00:00
|
|
|
ro->ro_bkt = sc->sc_stride;
|
2014-10-02 20:01:13 +00:00
|
|
|
ro->ro_flapping = sc->sc_flapping;
|
|
|
|
ro->ro_flowid_shift = sc->flowid_shift;
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2014-10-02 20:01:13 +00:00
|
|
|
break;
|
|
|
|
case SIOCSLAGGOPTS:
|
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
2019-12-22 21:56:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The stride option was added without defining a corresponding
|
2020-01-09 14:58:41 +00:00
|
|
|
* LAGG_OPT flag, so handle a non-zero value before checking
|
|
|
|
* anything else to preserve compatibility.
|
2019-12-22 21:56:47 +00:00
|
|
|
*/
|
|
|
|
LAGG_XLOCK(sc);
|
2020-01-09 14:58:41 +00:00
|
|
|
if (ro->ro_opts == 0 && ro->ro_bkt != 0) {
|
2019-12-22 21:56:47 +00:00
|
|
|
if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN) {
|
|
|
|
LAGG_XUNLOCK(sc);
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
sc->sc_stride = ro->ro_bkt;
|
|
|
|
}
|
|
|
|
if (ro->ro_opts == 0) {
|
|
|
|
LAGG_XUNLOCK(sc);
|
2014-10-02 20:01:13 +00:00
|
|
|
break;
|
2019-12-22 21:56:47 +00:00
|
|
|
}
|
|
|
|
|
2014-10-02 20:01:13 +00:00
|
|
|
/*
|
|
|
|
* Set options. LACP options are stored in sc->sc_psc,
|
|
|
|
* not in sc_opts.
|
|
|
|
*/
|
|
|
|
int valid, lacp;
|
|
|
|
|
|
|
|
switch (ro->ro_opts) {
|
|
|
|
case LAGG_OPT_USE_FLOWID:
|
|
|
|
case -LAGG_OPT_USE_FLOWID:
|
2019-05-03 14:43:21 +00:00
|
|
|
case LAGG_OPT_USE_NUMA:
|
|
|
|
case -LAGG_OPT_USE_NUMA:
|
2014-10-02 20:01:13 +00:00
|
|
|
case LAGG_OPT_FLOWIDSHIFT:
|
2020-01-09 14:58:41 +00:00
|
|
|
case LAGG_OPT_RR_LIMIT:
|
2014-10-02 20:01:13 +00:00
|
|
|
valid = 1;
|
|
|
|
lacp = 0;
|
|
|
|
break;
|
|
|
|
case LAGG_OPT_LACP_TXTEST:
|
|
|
|
case -LAGG_OPT_LACP_TXTEST:
|
|
|
|
case LAGG_OPT_LACP_RXTEST:
|
|
|
|
case -LAGG_OPT_LACP_RXTEST:
|
|
|
|
case LAGG_OPT_LACP_STRICT:
|
|
|
|
case -LAGG_OPT_LACP_STRICT:
|
2020-06-11 22:46:08 +00:00
|
|
|
case LAGG_OPT_LACP_FAST_TIMO:
|
|
|
|
case -LAGG_OPT_LACP_FAST_TIMO:
|
2014-10-02 20:01:13 +00:00
|
|
|
valid = lacp = 1;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
valid = lacp = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (valid == 0 ||
|
|
|
|
(lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) {
|
|
|
|
/* Invalid combination of options specified. */
|
|
|
|
error = EINVAL;
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2014-10-02 20:01:13 +00:00
|
|
|
break; /* Return from SIOCSLAGGOPTS. */
|
|
|
|
}
|
2020-01-09 14:58:41 +00:00
|
|
|
|
2014-10-02 20:01:13 +00:00
|
|
|
/*
|
|
|
|
* Store new options into sc->sc_opts except for
|
2020-01-09 14:58:41 +00:00
|
|
|
* FLOWIDSHIFT, RR and LACP options.
|
2014-10-02 20:01:13 +00:00
|
|
|
*/
|
|
|
|
if (lacp == 0) {
|
|
|
|
if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT)
|
|
|
|
sc->flowid_shift = ro->ro_flowid_shift;
|
2020-01-09 14:58:41 +00:00
|
|
|
else if (ro->ro_opts == LAGG_OPT_RR_LIMIT) {
|
|
|
|
if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN ||
|
|
|
|
ro->ro_bkt == 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
LAGG_XUNLOCK(sc);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
sc->sc_stride = ro->ro_bkt;
|
|
|
|
} else if (ro->ro_opts > 0)
|
2014-10-02 20:01:13 +00:00
|
|
|
sc->sc_opts |= ro->ro_opts;
|
|
|
|
else
|
|
|
|
sc->sc_opts &= ~ro->ro_opts;
|
|
|
|
} else {
|
|
|
|
struct lacp_softc *lsc;
|
2015-08-12 20:21:04 +00:00
|
|
|
struct lacp_port *lp;
|
2014-10-02 20:01:13 +00:00
|
|
|
|
|
|
|
lsc = (struct lacp_softc *)sc->sc_psc;
|
|
|
|
|
|
|
|
switch (ro->ro_opts) {
|
2014-10-01 21:37:32 +00:00
|
|
|
case LAGG_OPT_LACP_TXTEST:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_debug.lsc_tx_test = 1;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
case -LAGG_OPT_LACP_TXTEST:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_debug.lsc_tx_test = 0;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
case LAGG_OPT_LACP_RXTEST:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_debug.lsc_rx_test = 1;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
case -LAGG_OPT_LACP_RXTEST:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_debug.lsc_rx_test = 0;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
case LAGG_OPT_LACP_STRICT:
|
2014-10-02 20:01:13 +00:00
|
|
|
lsc->lsc_strict_mode = 1;
|
2014-10-01 21:37:32 +00:00
|
|
|
break;
|
2014-10-02 20:01:13 +00:00
|
|
|
case -LAGG_OPT_LACP_STRICT:
|
|
|
|
lsc->lsc_strict_mode = 0;
|
2014-10-01 21:37:32 +00:00
|
|
|
break;
|
2020-06-11 22:46:08 +00:00
|
|
|
case LAGG_OPT_LACP_FAST_TIMO:
|
2015-08-12 20:21:04 +00:00
|
|
|
LACP_LOCK(lsc);
|
|
|
|
LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
|
|
|
|
lp->lp_state |= LACP_STATE_TIMEOUT;
|
|
|
|
LACP_UNLOCK(lsc);
|
|
|
|
lsc->lsc_fast_timeout = 1;
|
|
|
|
break;
|
2020-06-11 22:46:08 +00:00
|
|
|
case -LAGG_OPT_LACP_FAST_TIMO:
|
2015-08-12 20:21:04 +00:00
|
|
|
LACP_LOCK(lsc);
|
|
|
|
LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
|
|
|
|
lp->lp_state &= ~LACP_STATE_TIMEOUT;
|
|
|
|
LACP_UNLOCK(lsc);
|
|
|
|
lsc->lsc_fast_timeout = 0;
|
|
|
|
break;
|
2014-10-01 21:37:32 +00:00
|
|
|
}
|
|
|
|
}
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
2012-03-06 22:58:13 +00:00
|
|
|
case SIOCGLAGGFLAGS:
|
2015-03-11 16:02:24 +00:00
|
|
|
rf->rf_flags = 0;
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2015-03-11 16:02:24 +00:00
|
|
|
if (sc->sc_flags & MBUF_HASHFLAG_L2)
|
|
|
|
rf->rf_flags |= LAGG_F_HASHL2;
|
|
|
|
if (sc->sc_flags & MBUF_HASHFLAG_L3)
|
|
|
|
rf->rf_flags |= LAGG_F_HASHL3;
|
|
|
|
if (sc->sc_flags & MBUF_HASHFLAG_L4)
|
|
|
|
rf->rf_flags |= LAGG_F_HASHL4;
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2012-03-06 22:58:13 +00:00
|
|
|
break;
|
|
|
|
case SIOCSLAGGHASH:
|
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2015-03-11 16:02:24 +00:00
|
|
|
sc->sc_flags = 0;
|
|
|
|
if (rf->rf_flags & LAGG_F_HASHL2)
|
|
|
|
sc->sc_flags |= MBUF_HASHFLAG_L2;
|
|
|
|
if (rf->rf_flags & LAGG_F_HASHL3)
|
|
|
|
sc->sc_flags |= MBUF_HASHFLAG_L3;
|
|
|
|
if (rf->rf_flags & LAGG_F_HASHL4)
|
|
|
|
sc->sc_flags |= MBUF_HASHFLAG_L4;
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2012-03-06 22:58:13 +00:00
|
|
|
break;
|
2007-04-17 00:35:11 +00:00
|
|
|
case SIOCGLAGGPORT:
|
|
|
|
if (rp->rp_portname[0] == '\0' ||
|
2017-04-21 13:45:01 +00:00
|
|
|
(tpif = ifunit_ref(rp->rp_portname)) == NULL) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2007-04-17 00:35:11 +00:00
|
|
|
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
|
2007-06-12 07:29:11 +00:00
|
|
|
lp->lp_softc != sc) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = ENOENT;
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
lagg_port2req(lp, rp);
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSLAGGPORT:
|
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
if (rp->rp_portname[0] == '\0' ||
|
2017-04-21 13:45:01 +00:00
|
|
|
(tpif = ifunit_ref(rp->rp_portname)) == NULL) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2014-10-17 09:08:44 +00:00
|
|
|
#ifdef INET6
|
|
|
|
/*
|
|
|
|
* A laggport interface should not have inet6 address
|
|
|
|
* because two interfaces with a valid link-local
|
|
|
|
* scope zone must not be merged in any form. This
|
|
|
|
* restriction is needed to prevent violation of
|
|
|
|
* link-local scope zone. Attempts to add a laggport
|
|
|
|
* interface which has inet6 addresses triggers
|
|
|
|
* removal of all inet6 addresses on the member
|
|
|
|
* interface.
|
|
|
|
*/
|
|
|
|
if (in6ifa_llaonifp(tpif)) {
|
|
|
|
in6_ifdetach(tpif);
|
|
|
|
if_printf(sc->sc_ifp,
|
|
|
|
"IPv6 addresses on %s have been removed "
|
|
|
|
"before adding it as a member to prevent "
|
|
|
|
"IPv6 address scope violation.\n",
|
|
|
|
tpif->if_xname);
|
|
|
|
}
|
|
|
|
#endif
|
2020-01-22 20:36:45 +00:00
|
|
|
oldmtu = ifp->if_mtu;
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
error = lagg_port_create(sc, tpif);
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2020-01-22 20:36:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* LAGG MTU may change during addition of the first port.
|
|
|
|
* If it did, do network layer specific procedure.
|
|
|
|
*/
|
|
|
|
if (ifp->if_mtu != oldmtu) {
|
|
|
|
#ifdef INET6
|
|
|
|
nd6_setmtu(ifp);
|
|
|
|
#endif
|
|
|
|
rt_updatemtu(ifp);
|
|
|
|
}
|
|
|
|
|
2017-05-26 22:22:48 +00:00
|
|
|
VLAN_CAPABILITIES(ifp);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSLAGGDELPORT:
|
|
|
|
error = priv_check(td, PRIV_NET_LAGG);
|
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
if (rp->rp_portname[0] == '\0' ||
|
2017-04-21 13:45:01 +00:00
|
|
|
(tpif = ifunit_ref(rp->rp_portname)) == NULL) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
|
2007-06-12 07:29:11 +00:00
|
|
|
lp->lp_softc != sc) {
|
2007-04-17 00:35:11 +00:00
|
|
|
error = ENOENT;
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = lagg_port_destroy(lp, 1);
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2017-04-21 13:45:01 +00:00
|
|
|
if_rele(tpif);
|
2017-05-26 22:22:48 +00:00
|
|
|
VLAN_CAPABILITIES(ifp);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSIFFLAGS:
|
|
|
|
/* Set flags on ports too */
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_setflags(lp, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(ifp->if_flags & IFF_UP) &&
|
|
|
|
(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
|
|
|
|
/*
|
|
|
|
* If interface is marked down and it is running,
|
|
|
|
* then stop and disable it.
|
|
|
|
*/
|
|
|
|
lagg_stop(sc);
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
} else if ((ifp->if_flags & IFF_UP) &&
|
|
|
|
!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
|
|
|
|
/*
|
|
|
|
* If interface is marked up and it is stopped, then
|
|
|
|
* start it.
|
|
|
|
*/
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
(*ifp->if_init)(sc);
|
2017-05-02 19:09:11 +00:00
|
|
|
} else
|
|
|
|
LAGG_XUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCADDMULTI:
|
|
|
|
case SIOCDELMULTI:
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XLOCK(sc);
|
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2017-05-02 19:09:11 +00:00
|
|
|
lagg_clrmulti(lp);
|
|
|
|
lagg_setmulti(lp);
|
|
|
|
}
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2017-05-02 19:09:11 +00:00
|
|
|
error = 0;
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
|
|
|
case SIOCSIFMEDIA:
|
|
|
|
case SIOCGIFMEDIA:
|
2020-10-22 09:47:12 +00:00
|
|
|
if (ifp->if_type == IFT_INFINIBAND)
|
|
|
|
error = EINVAL;
|
|
|
|
else
|
|
|
|
error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
|
2007-04-17 00:35:11 +00:00
|
|
|
break;
|
2007-07-30 20:17:22 +00:00
|
|
|
|
|
|
|
case SIOCSIFCAP:
|
2022-07-28 14:36:22 +00:00
|
|
|
case SIOCSIFCAPNV:
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2017-05-02 19:09:11 +00:00
|
|
|
if (lp->lp_ioctl != NULL)
|
|
|
|
(*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
|
|
|
|
}
|
|
|
|
lagg_capabilities(sc);
|
|
|
|
LAGG_XUNLOCK(sc);
|
2017-05-26 22:22:48 +00:00
|
|
|
VLAN_CAPABILITIES(ifp);
|
2017-05-02 19:09:11 +00:00
|
|
|
error = 0;
|
|
|
|
break;
|
|
|
|
|
2022-07-28 14:36:22 +00:00
|
|
|
case SIOCGIFCAPNV:
|
|
|
|
error = 0;
|
|
|
|
break;
|
|
|
|
|
2016-04-06 04:50:28 +00:00
|
|
|
case SIOCSIFMTU:
|
2018-10-30 09:53:57 +00:00
|
|
|
LAGG_XLOCK(sc);
|
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
|
|
|
if (lp->lp_ioctl != NULL)
|
|
|
|
error = (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
|
|
|
|
else
|
|
|
|
error = EINVAL;
|
|
|
|
if (error != 0) {
|
|
|
|
if_printf(ifp,
|
|
|
|
"failed to change MTU to %d on port %s, "
|
|
|
|
"reverting all ports to original MTU (%d)\n",
|
|
|
|
ifr->ifr_mtu, lp->lp_ifp->if_xname, ifp->if_mtu);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (error == 0) {
|
|
|
|
ifp->if_mtu = ifr->ifr_mtu;
|
|
|
|
} else {
|
|
|
|
/* set every port back to the original MTU */
|
|
|
|
ifr->ifr_mtu = ifp->if_mtu;
|
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
|
|
|
if (lp->lp_ioctl != NULL)
|
|
|
|
(*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
|
|
|
|
}
|
|
|
|
}
|
2021-11-06 09:41:43 +00:00
|
|
|
lagg_capabilities(sc);
|
2018-10-30 09:53:57 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2021-11-06 09:41:43 +00:00
|
|
|
VLAN_CAPABILITIES(ifp);
|
2016-04-06 04:50:28 +00:00
|
|
|
break;
|
2007-07-30 20:17:22 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
default:
|
|
|
|
error = ether_ioctl(ifp, cmd, data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
Add kernel-side support for in-kernel TLS.
KTLS adds support for in-kernel framing and encryption of Transport
Layer Security (1.0-1.2) data on TCP sockets. KTLS only supports
offload of TLS for transmitted data. Key negotation must still be
performed in userland. Once completed, transmit session keys for a
connection are provided to the kernel via a new TCP_TXTLS_ENABLE
socket option. All subsequent data transmitted on the socket is
placed into TLS frames and encrypted using the supplied keys.
Any data written to a KTLS-enabled socket via write(2), aio_write(2),
or sendfile(2) is assumed to be application data and is encoded in TLS
frames with an application data type. Individual records can be sent
with a custom type (e.g. handshake messages) via sendmsg(2) with a new
control message (TLS_SET_RECORD_TYPE) specifying the record type.
At present, rekeying is not supported though the in-kernel framework
should support rekeying.
KTLS makes use of the recently added unmapped mbufs to store TLS
frames in the socket buffer. Each TLS frame is described by a single
ext_pgs mbuf. The ext_pgs structure contains the header of the TLS
record (and trailer for encrypted records) as well as references to
the associated TLS session.
KTLS supports two primary methods of encrypting TLS frames: software
TLS and ifnet TLS.
Software TLS marks mbufs holding socket data as not ready via
M_NOTREADY similar to sendfile(2) when TLS framing information is
added to an unmapped mbuf in ktls_frame(). ktls_enqueue() is then
called to schedule TLS frames for encryption. In the case of
sendfile_iodone() calls ktls_enqueue() instead of pru_ready() leaving
the mbufs marked M_NOTREADY until encryption is completed. For other
writes (vn_sendfile when pages are available, write(2), etc.), the
PRUS_NOTREADY is set when invoking pru_send() along with invoking
ktls_enqueue().
A pool of worker threads (the "KTLS" kernel process) encrypts TLS
frames queued via ktls_enqueue(). Each TLS frame is temporarily
mapped using the direct map and passed to a software encryption
backend to perform the actual encryption.
(Note: The use of PHYS_TO_DMAP could be replaced with sf_bufs if
someone wished to make this work on architectures without a direct
map.)
KTLS supports pluggable software encryption backends. Internally,
Netflix uses proprietary pure-software backends. This commit includes
a simple backend in a new ktls_ocf.ko module that uses the kernel's
OpenCrypto framework to provide AES-GCM encryption of TLS frames. As
a result, software TLS is now a bit of a misnomer as it can make use
of hardware crypto accelerators.
Once software encryption has finished, the TLS frame mbufs are marked
ready via pru_ready(). At this point, the encrypted data appears as
regular payload to the TCP stack stored in unmapped mbufs.
ifnet TLS permits a NIC to offload the TLS encryption and TCP
segmentation. In this mode, a new send tag type (IF_SND_TAG_TYPE_TLS)
is allocated on the interface a socket is routed over and associated
with a TLS session. TLS records for a TLS session using ifnet TLS are
not marked M_NOTREADY but are passed down the stack unencrypted. The
ip_output_send() and ip6_output_send() helper functions that apply
send tags to outbound IP packets verify that the send tag of the TLS
record matches the outbound interface. If so, the packet is tagged
with the TLS send tag and sent to the interface. The NIC device
driver must recognize packets with the TLS send tag and schedule them
for TLS encryption and TCP segmentation. If the the outbound
interface does not match the interface in the TLS send tag, the packet
is dropped. In addition, a task is scheduled to refresh the TLS send
tag for the TLS session. If a new TLS send tag cannot be allocated,
the connection is dropped. If a new TLS send tag is allocated,
however, subsequent packets will be tagged with the correct TLS send
tag. (This latter case has been tested by configuring both ports of a
Chelsio T6 in a lagg and failing over from one port to another. As
the connections migrated to the new port, new TLS send tags were
allocated for the new port and connections resumed without being
dropped.)
ifnet TLS can be enabled and disabled on supported network interfaces
via new '[-]txtls[46]' options to ifconfig(8). ifnet TLS is supported
across both vlan devices and lagg interfaces using failover, lacp with
flowid enabled, or lacp with flowid enabled.
Applications may request the current KTLS mode of a connection via a
new TCP_TXTLS_MODE socket option. They can also use this socket
option to toggle between software and ifnet TLS modes.
In addition, a testing tool is available in tools/tools/switch_tls.
This is modeled on tcpdrop and uses similar syntax. However, instead
of dropping connections, -s is used to force KTLS connections to
switch to software TLS and -i is used to switch to ifnet TLS.
Various sysctls and counters are available under the kern.ipc.tls
sysctl node. The kern.ipc.tls.enable node must be set to true to
enable KTLS (it is off by default). The use of unmapped mbufs must
also be enabled via kern.ipc.mb_use_ext_pgs to enable KTLS.
KTLS is enabled via the KERN_TLS kernel option.
This patch is the culmination of years of work by several folks
including Scott Long and Randall Stewart for the original design and
implementation; Drew Gallatin for several optimizations including the
use of ext_pgs mbufs, the M_NOTREADY mechanism for TLS records
awaiting software encryption, and pluggable software crypto backends;
and John Baldwin for modifications to support hardware TLS offload.
Reviewed by: gallatin, hselasky, rrs
Obtained from: Netflix
Sponsored by: Netflix, Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D21277
2019-08-27 00:01:56 +00:00
|
|
|
#if defined(KERN_TLS) || defined(RATELIMIT)
|
2021-09-14 18:43:41 +00:00
|
|
|
#ifdef RATELIMIT
|
|
|
|
static const struct if_snd_tag_sw lagg_snd_tag_ul_sw = {
|
|
|
|
.snd_tag_modify = lagg_snd_tag_modify,
|
|
|
|
.snd_tag_query = lagg_snd_tag_query,
|
|
|
|
.snd_tag_free = lagg_snd_tag_free,
|
|
|
|
.next_snd_tag = lagg_next_snd_tag,
|
|
|
|
.type = IF_SND_TAG_TYPE_UNLIMITED
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct if_snd_tag_sw lagg_snd_tag_rl_sw = {
|
|
|
|
.snd_tag_modify = lagg_snd_tag_modify,
|
|
|
|
.snd_tag_query = lagg_snd_tag_query,
|
|
|
|
.snd_tag_free = lagg_snd_tag_free,
|
|
|
|
.next_snd_tag = lagg_next_snd_tag,
|
|
|
|
.type = IF_SND_TAG_TYPE_RATE_LIMIT
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef KERN_TLS
|
|
|
|
static const struct if_snd_tag_sw lagg_snd_tag_tls_sw = {
|
|
|
|
.snd_tag_modify = lagg_snd_tag_modify,
|
|
|
|
.snd_tag_query = lagg_snd_tag_query,
|
|
|
|
.snd_tag_free = lagg_snd_tag_free,
|
|
|
|
.next_snd_tag = lagg_next_snd_tag,
|
|
|
|
.type = IF_SND_TAG_TYPE_TLS
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef RATELIMIT
|
|
|
|
static const struct if_snd_tag_sw lagg_snd_tag_tls_rl_sw = {
|
|
|
|
.snd_tag_modify = lagg_snd_tag_modify,
|
|
|
|
.snd_tag_query = lagg_snd_tag_query,
|
|
|
|
.snd_tag_free = lagg_snd_tag_free,
|
|
|
|
.next_snd_tag = lagg_next_snd_tag,
|
|
|
|
.type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
static inline struct lagg_snd_tag *
|
|
|
|
mst_to_lst(struct m_snd_tag *mst)
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
{
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
|
|
|
|
return (__containerof(mst, struct lagg_snd_tag, com));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up the port used by a specific flow. This only works for lagg
|
|
|
|
* protocols with deterministic port mappings (e.g. not roundrobin).
|
|
|
|
* In addition protocols which use a hash to map flows to ports must
|
|
|
|
* be configured to use the mbuf flowid rather than hashing packet
|
|
|
|
* contents.
|
|
|
|
*/
|
|
|
|
static struct lagg_port *
|
2020-03-09 13:44:51 +00:00
|
|
|
lookup_snd_tag_port(struct ifnet *ifp, uint32_t flowid, uint32_t flowtype,
|
|
|
|
uint8_t numa_domain)
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
{
|
|
|
|
struct lagg_softc *sc;
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
struct lagg_port *lp;
|
|
|
|
struct lagg_lb *lb;
|
2020-03-09 13:44:51 +00:00
|
|
|
uint32_t hash, p;
|
2020-11-18 14:55:49 +00:00
|
|
|
int err;
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
sc = ifp->if_softc;
|
|
|
|
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
switch (sc->sc_proto) {
|
|
|
|
case LAGG_PROTO_FAILOVER:
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
return (lagg_link_active(sc, sc->sc_primary));
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
case LAGG_PROTO_LOADBALANCE:
|
|
|
|
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
flowtype == M_HASHTYPE_NONE)
|
|
|
|
return (NULL);
|
|
|
|
p = flowid >> sc->flowid_shift;
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
p %= sc->sc_count;
|
|
|
|
lb = (struct lagg_lb *)sc->sc_psc;
|
|
|
|
lp = lb->lb_ports[p];
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
return (lagg_link_active(sc, lp));
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
case LAGG_PROTO_LACP:
|
|
|
|
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
flowtype == M_HASHTYPE_NONE)
|
|
|
|
return (NULL);
|
2020-03-09 13:44:51 +00:00
|
|
|
hash = flowid >> sc->flowid_shift;
|
2020-11-18 14:55:49 +00:00
|
|
|
return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, &err));
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
default:
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_snd_tag_alloc(struct ifnet *ifp,
|
|
|
|
union if_snd_tag_alloc_params *params,
|
|
|
|
struct m_snd_tag **ppmt)
|
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2021-09-14 18:43:41 +00:00
|
|
|
const struct if_snd_tag_sw *sw;
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
struct lagg_snd_tag *lst;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct ifnet *lp_ifp;
|
2022-05-25 10:38:30 +00:00
|
|
|
struct m_snd_tag *mst;
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
int error;
|
|
|
|
|
2021-09-14 18:43:41 +00:00
|
|
|
switch (params->hdr.type) {
|
|
|
|
#ifdef RATELIMIT
|
|
|
|
case IF_SND_TAG_TYPE_UNLIMITED:
|
|
|
|
sw = &lagg_snd_tag_ul_sw;
|
|
|
|
break;
|
|
|
|
case IF_SND_TAG_TYPE_RATE_LIMIT:
|
|
|
|
sw = &lagg_snd_tag_rl_sw;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
#ifdef KERN_TLS
|
|
|
|
case IF_SND_TAG_TYPE_TLS:
|
|
|
|
sw = &lagg_snd_tag_tls_sw;
|
|
|
|
break;
|
2022-05-25 10:38:30 +00:00
|
|
|
case IF_SND_TAG_TYPE_TLS_RX:
|
|
|
|
/* Return tag from port interface directly. */
|
|
|
|
sw = NULL;
|
|
|
|
break;
|
2021-09-14 18:43:41 +00:00
|
|
|
#ifdef RATELIMIT
|
|
|
|
case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
|
|
|
|
sw = &lagg_snd_tag_tls_rl_sw;
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2020-03-09 13:44:51 +00:00
|
|
|
lp = lookup_snd_tag_port(ifp, params->hdr.flowid,
|
|
|
|
params->hdr.flowtype, params->hdr.numa_domain);
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
if (lp == NULL) {
|
2020-12-08 23:54:09 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
2020-10-29 23:28:39 +00:00
|
|
|
if (lp->lp_ifp == NULL) {
|
2020-12-08 23:54:09 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
return (EOPNOTSUPP);
|
2019-03-28 20:25:36 +00:00
|
|
|
}
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
lp_ifp = lp->lp_ifp;
|
|
|
|
if_ref(lp_ifp);
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
|
2022-05-25 10:38:30 +00:00
|
|
|
if (sw != NULL) {
|
|
|
|
lst = malloc(sizeof(*lst), M_LAGG, M_NOWAIT);
|
|
|
|
if (lst == NULL) {
|
|
|
|
if_rele(lp_ifp);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
lst = NULL;
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
|
2022-05-25 10:38:30 +00:00
|
|
|
error = m_snd_tag_alloc(lp_ifp, params, &mst);
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
if_rele(lp_ifp);
|
|
|
|
if (error) {
|
|
|
|
free(lst, M_LAGG);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2022-05-25 10:38:30 +00:00
|
|
|
if (sw != NULL) {
|
|
|
|
m_snd_tag_init(&lst->com, ifp, sw);
|
|
|
|
lst->tag = mst;
|
|
|
|
|
|
|
|
*ppmt = &lst->com;
|
|
|
|
} else
|
|
|
|
*ppmt = mst;
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2021-01-26 16:54:42 +00:00
|
|
|
static struct m_snd_tag *
|
|
|
|
lagg_next_snd_tag(struct m_snd_tag *mst)
|
|
|
|
{
|
|
|
|
struct lagg_snd_tag *lst;
|
|
|
|
|
|
|
|
lst = mst_to_lst(mst);
|
|
|
|
return (lst->tag);
|
|
|
|
}
|
|
|
|
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
static int
|
|
|
|
lagg_snd_tag_modify(struct m_snd_tag *mst,
|
|
|
|
union if_snd_tag_modify_params *params)
|
|
|
|
{
|
|
|
|
struct lagg_snd_tag *lst;
|
|
|
|
|
|
|
|
lst = mst_to_lst(mst);
|
2021-09-14 18:43:41 +00:00
|
|
|
return (lst->tag->sw->snd_tag_modify(lst->tag, params));
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_snd_tag_query(struct m_snd_tag *mst,
|
|
|
|
union if_snd_tag_query_params *params)
|
|
|
|
{
|
|
|
|
struct lagg_snd_tag *lst;
|
|
|
|
|
|
|
|
lst = mst_to_lst(mst);
|
2021-09-14 18:43:41 +00:00
|
|
|
return (lst->tag->sw->snd_tag_query(lst->tag, params));
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
}
|
2019-02-13 14:57:59 +00:00
|
|
|
|
|
|
|
static void
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
lagg_snd_tag_free(struct m_snd_tag *mst)
|
2019-02-13 14:57:59 +00:00
|
|
|
{
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
struct lagg_snd_tag *lst;
|
|
|
|
|
|
|
|
lst = mst_to_lst(mst);
|
|
|
|
m_snd_tag_rele(lst->tag);
|
|
|
|
free(lst, M_LAGG);
|
2019-02-13 14:57:59 +00:00
|
|
|
}
|
|
|
|
|
2019-08-01 14:17:31 +00:00
|
|
|
static void
|
|
|
|
lagg_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* For lagg, we have an indirect
|
|
|
|
* interface. The caller needs to
|
|
|
|
* get a ratelimit tag on the actual
|
|
|
|
* interface the flow will go on.
|
|
|
|
*/
|
|
|
|
q->rate_table = NULL;
|
|
|
|
q->flags = RT_IS_INDIRECT;
|
|
|
|
q->max_flows = 0;
|
|
|
|
q->number_of_rates = 0;
|
|
|
|
}
|
Implement kernel support for hardware rate limited sockets.
- Add RATELIMIT kernel configuration keyword which must be set to
enable the new functionality.
- Add support for hardware driven, Receive Side Scaling, RSS aware, rate
limited sendqueues and expose the functionality through the already
established SO_MAX_PACING_RATE setsockopt(). The API support rates in
the range from 1 to 4Gbytes/s which are suitable for regular TCP and
UDP streams. The setsockopt(2) manual page has been updated.
- Add rate limit function callback API to "struct ifnet" which supports
the following operations: if_snd_tag_alloc(), if_snd_tag_modify(),
if_snd_tag_query() and if_snd_tag_free().
- Add support to ifconfig to view, set and clear the IFCAP_TXRTLMT
flag, which tells if a network driver supports rate limiting or not.
- This patch also adds support for rate limiting through VLAN and LAGG
intermediate network devices.
- How rate limiting works:
1) The userspace application calls setsockopt() after accepting or
making a new connection to set the rate which is then stored in the
socket structure in the kernel. Later on when packets are transmitted
a check is made in the transmit path for rate changes. A rate change
implies a non-blocking ifp->if_snd_tag_alloc() call will be made to the
destination network interface, which then sets up a custom sendqueue
with the given rate limitation parameter. A "struct m_snd_tag" pointer is
returned which serves as a "snd_tag" hint in the m_pkthdr for the
subsequently transmitted mbufs.
2) When the network driver sees the "m->m_pkthdr.snd_tag" different
from NULL, it will move the packets into a designated rate limited sendqueue
given by the snd_tag pointer. It is up to the individual drivers how the rate
limited traffic will be rate limited.
3) Route changes are detected by the NIC drivers in the ifp->if_transmit()
routine when the ifnet pointer in the incoming snd_tag mismatches the
one of the network interface. The network adapter frees the mbuf and
returns EAGAIN which causes the ip_output() to release and clear the send
tag. Upon next ip_output() a new "snd_tag" will be tried allocated.
4) When the PCB is detached the custom sendqueue will be released by a
non-blocking ifp->if_snd_tag_free() call to the currently bound network
interface.
Reviewed by: wblock (manpages), adrian, gallatin, scottl (network)
Differential Revision: https://reviews.freebsd.org/D3687
Sponsored by: Mellanox Technologies
MFC after: 3 months
2017-01-18 13:31:17 +00:00
|
|
|
#endif
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
2017-05-02 19:09:11 +00:00
|
|
|
lagg_setmulti(struct lagg_port *lp)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2017-05-02 19:09:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
|
|
|
struct ifnet *ifp = lp->lp_ifp;
|
|
|
|
struct ifnet *scifp = sc->sc_ifp;
|
|
|
|
struct lagg_mc *mc;
|
|
|
|
struct ifmultiaddr *ifma;
|
|
|
|
int error;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
IF_ADDR_WLOCK(scifp);
|
2018-05-18 20:13:34 +00:00
|
|
|
CK_STAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
|
2017-05-02 19:09:11 +00:00
|
|
|
if (ifma->ifma_addr->sa_family != AF_LINK)
|
|
|
|
continue;
|
2019-03-28 21:00:54 +00:00
|
|
|
mc = malloc(sizeof(struct lagg_mc), M_LAGG, M_NOWAIT);
|
2017-05-02 19:09:11 +00:00
|
|
|
if (mc == NULL) {
|
|
|
|
IF_ADDR_WUNLOCK(scifp);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
bcopy(ifma->ifma_addr, &mc->mc_addr,
|
|
|
|
ifma->ifma_addr->sa_len);
|
|
|
|
mc->mc_addr.sdl_index = ifp->if_index;
|
|
|
|
mc->mc_ifma = NULL;
|
|
|
|
SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
|
|
|
|
}
|
|
|
|
IF_ADDR_WUNLOCK(scifp);
|
|
|
|
SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
|
|
|
|
error = if_addmulti(ifp,
|
|
|
|
(struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2007-05-07 09:53:02 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2017-05-02 19:09:11 +00:00
|
|
|
lagg_clrmulti(struct lagg_port *lp)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2007-05-07 00:18:56 +00:00
|
|
|
struct lagg_mc *mc;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XLOCK_ASSERT(lp->lp_softc);
|
2017-05-02 19:09:11 +00:00
|
|
|
while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
|
|
|
|
SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
|
|
|
|
if (mc->mc_ifma && lp->lp_detaching == 0)
|
|
|
|
if_delmulti_ifma(mc->mc_ifma);
|
2019-03-28 21:00:54 +00:00
|
|
|
free(mc, M_LAGG);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2022-07-28 14:36:22 +00:00
|
|
|
static void
|
|
|
|
lagg_setcaps(struct lagg_port *lp, int cap, int cap2)
|
2017-05-02 19:09:11 +00:00
|
|
|
{
|
|
|
|
struct ifreq ifr;
|
2022-07-28 14:36:22 +00:00
|
|
|
struct siocsifcapnv_driver_data drv_ioctl_data;
|
2017-05-02 19:09:11 +00:00
|
|
|
|
2022-07-28 14:36:22 +00:00
|
|
|
if (lp->lp_ifp->if_capenable == cap &&
|
|
|
|
lp->lp_ifp->if_capenable2 == cap2)
|
|
|
|
return;
|
2017-05-02 19:09:11 +00:00
|
|
|
if (lp->lp_ioctl == NULL)
|
2022-07-28 14:36:22 +00:00
|
|
|
return;
|
|
|
|
/* XXX */
|
|
|
|
if ((lp->lp_ifp->if_capabilities & IFCAP_NV) != 0) {
|
|
|
|
drv_ioctl_data.reqcap = cap;
|
|
|
|
drv_ioctl_data.reqcap2 = cap2;
|
|
|
|
drv_ioctl_data.nvcap = NULL;
|
|
|
|
(*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAPNV,
|
|
|
|
(caddr_t)&drv_ioctl_data);
|
|
|
|
} else {
|
|
|
|
ifr.ifr_reqcap = cap;
|
|
|
|
(*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAP, (caddr_t)&ifr);
|
|
|
|
}
|
2017-05-02 19:09:11 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Handle a ref counted flag that should be set on the lagg port as well */
|
|
|
|
static int
|
|
|
|
lagg_setflag(struct lagg_port *lp, int flag, int status,
|
2014-09-26 12:35:58 +00:00
|
|
|
int (*func)(struct ifnet *, int))
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
|
|
|
struct ifnet *scifp = sc->sc_ifp;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct ifnet *ifp = lp->lp_ifp;
|
|
|
|
int error;
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-06-12 07:29:11 +00:00
|
|
|
status = status ? (scifp->if_flags & flag) : 0;
|
2007-04-17 00:35:11 +00:00
|
|
|
/* Now "status" contains the flag value or 0 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if recorded ports status is different from what
|
|
|
|
* we want it to be. If it is, flip it. We record ports
|
|
|
|
* status in lp_ifflags so that we won't clear ports flag
|
|
|
|
* we haven't set. In fact, we don't clear or set ports
|
|
|
|
* flags directly, but get or release references to them.
|
|
|
|
* That's why we can be sure that recorded flags still are
|
|
|
|
* in accord with actual ports flags.
|
|
|
|
*/
|
|
|
|
if (status != (lp->lp_ifflags & flag)) {
|
|
|
|
error = (*func)(ifp, status);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
lp->lp_ifflags &= ~flag;
|
|
|
|
lp->lp_ifflags |= status;
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle IFF_* flags that require certain changes on the lagg port
|
|
|
|
* if "status" is true, update ports flags respective to the lagg
|
|
|
|
* if "status" is false, forcedly clear the flags set on port.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
lagg_setflags(struct lagg_port *lp, int status)
|
|
|
|
{
|
|
|
|
int error, i;
|
2007-06-12 07:29:11 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
for (i = 0; lagg_pflags[i].flag; i++) {
|
|
|
|
error = lagg_setflag(lp, lagg_pflags[i].flag,
|
|
|
|
status, lagg_pflags[i].func);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2012-09-20 10:05:10 +00:00
|
|
|
static int
|
2020-10-22 09:47:12 +00:00
|
|
|
lagg_transmit_ethernet(struct ifnet *ifp, struct mbuf *m)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
2018-05-19 05:27:49 +00:00
|
|
|
int error;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
Add kernel-side support for in-kernel TLS.
KTLS adds support for in-kernel framing and encryption of Transport
Layer Security (1.0-1.2) data on TCP sockets. KTLS only supports
offload of TLS for transmitted data. Key negotation must still be
performed in userland. Once completed, transmit session keys for a
connection are provided to the kernel via a new TCP_TXTLS_ENABLE
socket option. All subsequent data transmitted on the socket is
placed into TLS frames and encrypted using the supplied keys.
Any data written to a KTLS-enabled socket via write(2), aio_write(2),
or sendfile(2) is assumed to be application data and is encoded in TLS
frames with an application data type. Individual records can be sent
with a custom type (e.g. handshake messages) via sendmsg(2) with a new
control message (TLS_SET_RECORD_TYPE) specifying the record type.
At present, rekeying is not supported though the in-kernel framework
should support rekeying.
KTLS makes use of the recently added unmapped mbufs to store TLS
frames in the socket buffer. Each TLS frame is described by a single
ext_pgs mbuf. The ext_pgs structure contains the header of the TLS
record (and trailer for encrypted records) as well as references to
the associated TLS session.
KTLS supports two primary methods of encrypting TLS frames: software
TLS and ifnet TLS.
Software TLS marks mbufs holding socket data as not ready via
M_NOTREADY similar to sendfile(2) when TLS framing information is
added to an unmapped mbuf in ktls_frame(). ktls_enqueue() is then
called to schedule TLS frames for encryption. In the case of
sendfile_iodone() calls ktls_enqueue() instead of pru_ready() leaving
the mbufs marked M_NOTREADY until encryption is completed. For other
writes (vn_sendfile when pages are available, write(2), etc.), the
PRUS_NOTREADY is set when invoking pru_send() along with invoking
ktls_enqueue().
A pool of worker threads (the "KTLS" kernel process) encrypts TLS
frames queued via ktls_enqueue(). Each TLS frame is temporarily
mapped using the direct map and passed to a software encryption
backend to perform the actual encryption.
(Note: The use of PHYS_TO_DMAP could be replaced with sf_bufs if
someone wished to make this work on architectures without a direct
map.)
KTLS supports pluggable software encryption backends. Internally,
Netflix uses proprietary pure-software backends. This commit includes
a simple backend in a new ktls_ocf.ko module that uses the kernel's
OpenCrypto framework to provide AES-GCM encryption of TLS frames. As
a result, software TLS is now a bit of a misnomer as it can make use
of hardware crypto accelerators.
Once software encryption has finished, the TLS frame mbufs are marked
ready via pru_ready(). At this point, the encrypted data appears as
regular payload to the TCP stack stored in unmapped mbufs.
ifnet TLS permits a NIC to offload the TLS encryption and TCP
segmentation. In this mode, a new send tag type (IF_SND_TAG_TYPE_TLS)
is allocated on the interface a socket is routed over and associated
with a TLS session. TLS records for a TLS session using ifnet TLS are
not marked M_NOTREADY but are passed down the stack unencrypted. The
ip_output_send() and ip6_output_send() helper functions that apply
send tags to outbound IP packets verify that the send tag of the TLS
record matches the outbound interface. If so, the packet is tagged
with the TLS send tag and sent to the interface. The NIC device
driver must recognize packets with the TLS send tag and schedule them
for TLS encryption and TCP segmentation. If the the outbound
interface does not match the interface in the TLS send tag, the packet
is dropped. In addition, a task is scheduled to refresh the TLS send
tag for the TLS session. If a new TLS send tag cannot be allocated,
the connection is dropped. If a new TLS send tag is allocated,
however, subsequent packets will be tagged with the correct TLS send
tag. (This latter case has been tested by configuring both ports of a
Chelsio T6 in a lagg and failing over from one port to another. As
the connections migrated to the new port, new TLS send tags were
allocated for the new port and connections resumed without being
dropped.)
ifnet TLS can be enabled and disabled on supported network interfaces
via new '[-]txtls[46]' options to ifconfig(8). ifnet TLS is supported
across both vlan devices and lagg interfaces using failover, lacp with
flowid enabled, or lacp with flowid enabled.
Applications may request the current KTLS mode of a connection via a
new TCP_TXTLS_MODE socket option. They can also use this socket
option to toggle between software and ifnet TLS modes.
In addition, a testing tool is available in tools/tools/switch_tls.
This is modeled on tcpdrop and uses similar syntax. However, instead
of dropping connections, -s is used to force KTLS connections to
switch to software TLS and -i is used to switch to ifnet TLS.
Various sysctls and counters are available under the kern.ipc.tls
sysctl node. The kern.ipc.tls.enable node must be set to true to
enable KTLS (it is off by default). The use of unmapped mbufs must
also be enabled via kern.ipc.mb_use_ext_pgs to enable KTLS.
KTLS is enabled via the KERN_TLS kernel option.
This patch is the culmination of years of work by several folks
including Scott Long and Randall Stewart for the original design and
implementation; Drew Gallatin for several optimizations including the
use of ext_pgs mbufs, the M_NOTREADY mechanism for TLS records
awaiting software encryption, and pluggable software crypto backends;
and John Baldwin for modifications to support hardware TLS offload.
Reviewed by: gallatin, hselasky, rrs
Obtained from: Netflix
Sponsored by: Netflix, Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D21277
2019-08-27 00:01:56 +00:00
|
|
|
#if defined(KERN_TLS) || defined(RATELIMIT)
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
|
|
|
|
MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
|
|
|
|
#endif
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2008-09-18 20:56:35 +00:00
|
|
|
/* We need a Tx algorithm and at least one port */
|
|
|
|
if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2012-09-20 10:05:10 +00:00
|
|
|
m_freem(m);
|
2014-09-28 07:43:38 +00:00
|
|
|
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
|
2012-09-20 10:05:10 +00:00
|
|
|
return (ENXIO);
|
2008-09-18 20:56:35 +00:00
|
|
|
}
|
|
|
|
|
2012-09-20 10:05:10 +00:00
|
|
|
ETHER_BPF_MTAP(ifp, m);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2014-09-26 12:54:24 +00:00
|
|
|
error = lagg_proto_start(sc, m);
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2012-09-20 10:05:10 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
static int
|
|
|
|
lagg_transmit_infiniband(struct ifnet *ifp, struct mbuf *m)
|
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2020-10-22 09:47:12 +00:00
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
#if defined(KERN_TLS) || defined(RATELIMIT)
|
|
|
|
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
|
|
|
|
MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
|
|
|
|
#endif
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2020-10-22 09:47:12 +00:00
|
|
|
/* We need a Tx algorithm and at least one port */
|
|
|
|
if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2020-10-22 09:47:12 +00:00
|
|
|
m_freem(m);
|
|
|
|
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
|
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
INFINIBAND_BPF_MTAP(ifp, m);
|
|
|
|
|
|
|
|
error = lagg_proto_start(sc, m);
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2020-10-22 09:47:12 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2012-09-20 10:05:10 +00:00
|
|
|
/*
|
|
|
|
* The ifp->if_qflush entry point for lagg(4) is no-op.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
lagg_qflush(struct ifnet *ifp __unused)
|
|
|
|
{
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
2020-10-22 09:47:12 +00:00
|
|
|
lagg_input_ethernet(struct ifnet *ifp, struct mbuf *m)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct lagg_port *lp = ifp->if_lagg;
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
|
|
|
struct ifnet *scifp = sc->sc_ifp;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2007-06-12 07:29:11 +00:00
|
|
|
if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
|
2018-05-22 15:35:38 +00:00
|
|
|
lp->lp_detaching != 0 ||
|
2007-04-17 00:35:11 +00:00
|
|
|
sc->sc_proto == LAGG_PROTO_NONE) {
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2007-04-17 00:35:11 +00:00
|
|
|
m_freem(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2007-10-20 02:43:23 +00:00
|
|
|
ETHER_BPF_MTAP(scifp, m);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2018-05-22 15:35:38 +00:00
|
|
|
m = lagg_proto_input(sc, lp, m);
|
|
|
|
if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) {
|
2015-03-26 23:40:22 +00:00
|
|
|
m_freem(m);
|
|
|
|
m = NULL;
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2007-04-17 00:35:11 +00:00
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
2020-10-22 09:47:12 +00:00
|
|
|
static struct mbuf *
|
|
|
|
lagg_input_infiniband(struct ifnet *ifp, struct mbuf *m)
|
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2020-10-22 09:47:12 +00:00
|
|
|
struct lagg_port *lp = ifp->if_lagg;
|
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
|
|
|
struct ifnet *scifp = sc->sc_ifp;
|
|
|
|
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2020-10-22 09:47:12 +00:00
|
|
|
if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
|
|
|
|
lp->lp_detaching != 0 ||
|
|
|
|
sc->sc_proto == LAGG_PROTO_NONE) {
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2020-10-22 09:47:12 +00:00
|
|
|
m_freem(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
INFINIBAND_BPF_MTAP(scifp, m);
|
|
|
|
|
|
|
|
m = lagg_proto_input(sc, lp, m);
|
|
|
|
if (m != NULL && (scifp->if_flags & IFF_MONITOR) != 0) {
|
|
|
|
m_freem(m);
|
|
|
|
m = NULL;
|
|
|
|
}
|
|
|
|
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2020-10-22 09:47:12 +00:00
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static int
|
|
|
|
lagg_media_change(struct ifnet *ifp)
|
|
|
|
{
|
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
|
|
|
|
if (sc->sc_ifflags & IFF_DEBUG)
|
|
|
|
printf("%s\n", __func__);
|
|
|
|
|
|
|
|
/* Ignore */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
|
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2007-04-17 00:35:11 +00:00
|
|
|
struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
|
|
|
imr->ifm_status = IFM_AVALID;
|
|
|
|
imr->ifm_active = IFM_ETHER | IFM_AUTO;
|
|
|
|
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2007-05-07 09:53:02 +00:00
|
|
|
if (LAGG_PORTACTIVE(lp))
|
|
|
|
imr->ifm_status |= IFM_ACTIVE;
|
|
|
|
}
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2007-11-25 06:30:46 +00:00
|
|
|
static void
|
|
|
|
lagg_linkstate(struct lagg_softc *sc)
|
|
|
|
{
|
2020-12-08 16:36:46 +00:00
|
|
|
struct epoch_tracker et;
|
2007-11-25 06:30:46 +00:00
|
|
|
struct lagg_port *lp;
|
|
|
|
int new_link = LINK_STATE_DOWN;
|
2008-12-17 21:04:43 +00:00
|
|
|
uint64_t speed;
|
2007-11-25 06:30:46 +00:00
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
|
|
|
|
2018-08-13 14:13:25 +00:00
|
|
|
/* LACP handles link state itself */
|
|
|
|
if (sc->sc_proto == LAGG_PROTO_LACP)
|
|
|
|
return;
|
|
|
|
|
2007-11-25 06:30:46 +00:00
|
|
|
/* Our link is considered up if at least one of our ports is active */
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2014-09-26 13:02:29 +00:00
|
|
|
if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
|
2007-11-25 06:30:46 +00:00
|
|
|
new_link = LINK_STATE_UP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2015-12-17 14:41:30 +00:00
|
|
|
if_link_state_change(sc->sc_ifp, new_link);
|
2008-12-17 20:58:10 +00:00
|
|
|
|
|
|
|
/* Update if_baudrate to reflect the max possible speed */
|
|
|
|
switch (sc->sc_proto) {
|
|
|
|
case LAGG_PROTO_FAILOVER:
|
2008-12-17 21:04:43 +00:00
|
|
|
sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
|
|
|
|
sc->sc_primary->lp_ifp->if_baudrate : 0;
|
2008-12-17 20:58:10 +00:00
|
|
|
break;
|
|
|
|
case LAGG_PROTO_ROUNDROBIN:
|
|
|
|
case LAGG_PROTO_LOADBALANCE:
|
2014-09-26 12:35:58 +00:00
|
|
|
case LAGG_PROTO_BROADCAST:
|
2008-12-17 21:04:43 +00:00
|
|
|
speed = 0;
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2008-12-17 20:58:10 +00:00
|
|
|
speed += lp->lp_ifp->if_baudrate;
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_EXIT(et);
|
2008-12-17 20:58:10 +00:00
|
|
|
sc->sc_ifp->if_baudrate = speed;
|
|
|
|
break;
|
|
|
|
case LAGG_PROTO_LACP:
|
|
|
|
/* LACP updates if_baudrate itself */
|
|
|
|
break;
|
|
|
|
}
|
2007-11-25 06:30:46 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
static void
|
2015-12-17 14:41:30 +00:00
|
|
|
lagg_port_state(struct ifnet *ifp, int state)
|
2007-04-17 00:35:11 +00:00
|
|
|
{
|
|
|
|
struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
|
|
|
|
struct lagg_softc *sc = NULL;
|
|
|
|
|
|
|
|
if (lp != NULL)
|
2007-06-12 07:29:11 +00:00
|
|
|
sc = lp->lp_softc;
|
2007-04-17 00:35:11 +00:00
|
|
|
if (sc == NULL)
|
|
|
|
return;
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XLOCK(sc);
|
2007-11-25 06:30:46 +00:00
|
|
|
lagg_linkstate(sc);
|
2014-09-26 12:54:24 +00:00
|
|
|
lagg_proto_linkstate(sc, lp);
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_XUNLOCK(sc);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
2015-12-17 14:41:30 +00:00
|
|
|
struct lagg_port *
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp_next, *rval = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search a port which reports an active link state.
|
|
|
|
*/
|
|
|
|
|
2019-06-06 16:22:29 +00:00
|
|
|
#ifdef INVARIANTS
|
2019-06-06 17:20:35 +00:00
|
|
|
/*
|
2020-12-08 16:36:46 +00:00
|
|
|
* This is called with either in the network epoch
|
|
|
|
* or with LAGG_XLOCK(sc) held.
|
2019-06-06 17:20:35 +00:00
|
|
|
*/
|
2019-03-28 20:25:36 +00:00
|
|
|
if (!in_epoch(net_epoch_preempt))
|
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2019-06-06 16:22:29 +00:00
|
|
|
#endif
|
2019-03-28 20:25:36 +00:00
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
if (lp == NULL)
|
|
|
|
goto search;
|
|
|
|
if (LAGG_PORTACTIVE(lp)) {
|
|
|
|
rval = lp;
|
|
|
|
goto found;
|
|
|
|
}
|
2018-05-24 23:21:23 +00:00
|
|
|
if ((lp_next = CK_SLIST_NEXT(lp, lp_entries)) != NULL &&
|
2007-04-17 00:35:11 +00:00
|
|
|
LAGG_PORTACTIVE(lp_next)) {
|
|
|
|
rval = lp_next;
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
|
2019-03-28 20:25:36 +00:00
|
|
|
search:
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
|
2007-04-17 00:35:11 +00:00
|
|
|
if (LAGG_PORTACTIVE(lp_next)) {
|
2018-07-04 02:47:16 +00:00
|
|
|
return (lp_next);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
found:
|
|
|
|
return (rval);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
|
Add kernel-side support for in-kernel TLS.
KTLS adds support for in-kernel framing and encryption of Transport
Layer Security (1.0-1.2) data on TCP sockets. KTLS only supports
offload of TLS for transmitted data. Key negotation must still be
performed in userland. Once completed, transmit session keys for a
connection are provided to the kernel via a new TCP_TXTLS_ENABLE
socket option. All subsequent data transmitted on the socket is
placed into TLS frames and encrypted using the supplied keys.
Any data written to a KTLS-enabled socket via write(2), aio_write(2),
or sendfile(2) is assumed to be application data and is encoded in TLS
frames with an application data type. Individual records can be sent
with a custom type (e.g. handshake messages) via sendmsg(2) with a new
control message (TLS_SET_RECORD_TYPE) specifying the record type.
At present, rekeying is not supported though the in-kernel framework
should support rekeying.
KTLS makes use of the recently added unmapped mbufs to store TLS
frames in the socket buffer. Each TLS frame is described by a single
ext_pgs mbuf. The ext_pgs structure contains the header of the TLS
record (and trailer for encrypted records) as well as references to
the associated TLS session.
KTLS supports two primary methods of encrypting TLS frames: software
TLS and ifnet TLS.
Software TLS marks mbufs holding socket data as not ready via
M_NOTREADY similar to sendfile(2) when TLS framing information is
added to an unmapped mbuf in ktls_frame(). ktls_enqueue() is then
called to schedule TLS frames for encryption. In the case of
sendfile_iodone() calls ktls_enqueue() instead of pru_ready() leaving
the mbufs marked M_NOTREADY until encryption is completed. For other
writes (vn_sendfile when pages are available, write(2), etc.), the
PRUS_NOTREADY is set when invoking pru_send() along with invoking
ktls_enqueue().
A pool of worker threads (the "KTLS" kernel process) encrypts TLS
frames queued via ktls_enqueue(). Each TLS frame is temporarily
mapped using the direct map and passed to a software encryption
backend to perform the actual encryption.
(Note: The use of PHYS_TO_DMAP could be replaced with sf_bufs if
someone wished to make this work on architectures without a direct
map.)
KTLS supports pluggable software encryption backends. Internally,
Netflix uses proprietary pure-software backends. This commit includes
a simple backend in a new ktls_ocf.ko module that uses the kernel's
OpenCrypto framework to provide AES-GCM encryption of TLS frames. As
a result, software TLS is now a bit of a misnomer as it can make use
of hardware crypto accelerators.
Once software encryption has finished, the TLS frame mbufs are marked
ready via pru_ready(). At this point, the encrypted data appears as
regular payload to the TCP stack stored in unmapped mbufs.
ifnet TLS permits a NIC to offload the TLS encryption and TCP
segmentation. In this mode, a new send tag type (IF_SND_TAG_TYPE_TLS)
is allocated on the interface a socket is routed over and associated
with a TLS session. TLS records for a TLS session using ifnet TLS are
not marked M_NOTREADY but are passed down the stack unencrypted. The
ip_output_send() and ip6_output_send() helper functions that apply
send tags to outbound IP packets verify that the send tag of the TLS
record matches the outbound interface. If so, the packet is tagged
with the TLS send tag and sent to the interface. The NIC device
driver must recognize packets with the TLS send tag and schedule them
for TLS encryption and TCP segmentation. If the the outbound
interface does not match the interface in the TLS send tag, the packet
is dropped. In addition, a task is scheduled to refresh the TLS send
tag for the TLS session. If a new TLS send tag cannot be allocated,
the connection is dropped. If a new TLS send tag is allocated,
however, subsequent packets will be tagged with the correct TLS send
tag. (This latter case has been tested by configuring both ports of a
Chelsio T6 in a lagg and failing over from one port to another. As
the connections migrated to the new port, new TLS send tags were
allocated for the new port and connections resumed without being
dropped.)
ifnet TLS can be enabled and disabled on supported network interfaces
via new '[-]txtls[46]' options to ifconfig(8). ifnet TLS is supported
across both vlan devices and lagg interfaces using failover, lacp with
flowid enabled, or lacp with flowid enabled.
Applications may request the current KTLS mode of a connection via a
new TCP_TXTLS_MODE socket option. They can also use this socket
option to toggle between software and ifnet TLS modes.
In addition, a testing tool is available in tools/tools/switch_tls.
This is modeled on tcpdrop and uses similar syntax. However, instead
of dropping connections, -s is used to force KTLS connections to
switch to software TLS and -i is used to switch to ifnet TLS.
Various sysctls and counters are available under the kern.ipc.tls
sysctl node. The kern.ipc.tls.enable node must be set to true to
enable KTLS (it is off by default). The use of unmapped mbufs must
also be enabled via kern.ipc.mb_use_ext_pgs to enable KTLS.
KTLS is enabled via the KERN_TLS kernel option.
This patch is the culmination of years of work by several folks
including Scott Long and Randall Stewart for the original design and
implementation; Drew Gallatin for several optimizations including the
use of ext_pgs mbufs, the M_NOTREADY mechanism for TLS records
awaiting software encryption, and pluggable software crypto backends;
and John Baldwin for modifications to support hardware TLS offload.
Reviewed by: gallatin, hselasky, rrs
Obtained from: Netflix
Sponsored by: Netflix, Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D21277
2019-08-27 00:01:56 +00:00
|
|
|
#if defined(KERN_TLS) || defined(RATELIMIT)
|
Restructure mbuf send tags to provide stronger guarantees.
- Perform ifp mismatch checks (to determine if a send tag is allocated
for a different ifp than the one the packet is being output on), in
ip_output() and ip6_output(). This avoids sending packets with send
tags to ifnet drivers that don't support send tags.
Since we are now checking for ifp mismatches before invoking
if_output, we can now try to allocate a new tag before invoking
if_output sending the original packet on the new tag if allocation
succeeds.
To avoid code duplication for the fragment and unfragmented cases,
add ip_output_send() and ip6_output_send() as wrappers around
if_output and nd6_output_ifp, respectively. All of the logic for
setting send tags and dealing with send tag-related errors is done
in these wrapper functions.
For pseudo interfaces that wrap other network interfaces (vlan and
lagg), wrapper send tags are now allocated so that ip*_output see
the wrapper ifp as the ifp in the send tag. The if_transmit
routines rewrite the send tags after performing an ifp mismatch
check. If an ifp mismatch is detected, the transmit routines fail
with EAGAIN.
- To provide clearer life cycle management of send tags, especially
in the presence of vlan and lagg wrapper tags, add a reference count
to send tags managed via m_snd_tag_ref() and m_snd_tag_rele().
Provide a helper function (m_snd_tag_init()) for use by drivers
supporting send tags. m_snd_tag_init() takes care of the if_ref
on the ifp meaning that code alloating send tags via if_snd_tag_alloc
no longer has to manage that manually. Similarly, m_snd_tag_rele
drops the refcount on the ifp after invoking if_snd_tag_free when
the last reference to a send tag is dropped.
This also closes use after free races if there are pending packets in
driver tx rings after the socket is closed (e.g. from tcpdrop).
In order for m_free to work reliably, add a new CSUM_SND_TAG flag in
csum_flags to indicate 'snd_tag' is set (rather than 'rcvif').
Drivers now also check this flag instead of checking snd_tag against
NULL. This avoids false positive matches when a forwarded packet
has a non-NULL rcvif that was treated as a send tag.
- cxgbe was relying on snd_tag_free being called when the inp was
detached so that it could kick the firmware to flush any pending
work on the flow. This is because the driver doesn't require ACK
messages from the firmware for every request, but instead does a
kind of manual interrupt coalescing by only setting a flag to
request a completion on a subset of requests. If all of the
in-flight requests don't have the flag when the tag is detached from
the inp, the flow might never return the credits. The current
snd_tag_free command issues a flush command to force the credits to
return. However, the credit return is what also frees the mbufs,
and since those mbufs now hold references on the tag, this meant
that snd_tag_free would never be called.
To fix, explicitly drop the mbuf's reference on the snd tag when the
mbuf is queued in the firmware work queue. This means that once the
inp's reference on the tag goes away and all in-flight mbufs have
been queued to the firmware, tag's refcount will drop to zero and
snd_tag_free will kick in and send the flush request. Note that we
need to avoid doing this in the middle of ethofld_tx(), so the
driver grabs a temporary reference on the tag around that loop to
defer the free to the end of the function in case it sends the last
mbuf to the queue after the inp has dropped its reference on the
tag.
- mlx5 preallocates send tags and was using the ifp pointer even when
the send tag wasn't in use. Explicitly use the ifp from other data
structures instead.
- Sprinkle some assertions in various places to assert that received
packets don't have a send tag, and that other places that overwrite
rcvif (e.g. 802.11 transmit) don't clobber a send tag pointer.
Reviewed by: gallatin, hselasky, rgrimes, ae
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D20117
2019-05-24 22:30:40 +00:00
|
|
|
if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
|
|
|
|
struct lagg_snd_tag *lst;
|
|
|
|
struct m_snd_tag *mst;
|
|
|
|
|
|
|
|
mst = m->m_pkthdr.snd_tag;
|
|
|
|
lst = mst_to_lst(mst);
|
|
|
|
if (lst->tag->ifp != ifp) {
|
|
|
|
m_freem(m);
|
|
|
|
return (EAGAIN);
|
|
|
|
}
|
|
|
|
m->m_pkthdr.snd_tag = m_snd_tag_ref(lst->tag);
|
|
|
|
m_snd_tag_rele(mst);
|
|
|
|
}
|
|
|
|
#endif
|
2008-11-22 07:35:45 +00:00
|
|
|
return (ifp->if_transmit)(ifp, m);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Simple round robin aggregation
|
|
|
|
*/
|
2014-09-26 08:42:32 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_rr_attach(struct lagg_softc *sc)
|
|
|
|
{
|
2007-10-12 03:03:16 +00:00
|
|
|
sc->sc_seq = 0;
|
2020-01-09 14:58:41 +00:00
|
|
|
sc->sc_stride = 1;
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
2007-10-12 03:03:16 +00:00
|
|
|
struct lagg_port *lp;
|
|
|
|
uint32_t p;
|
|
|
|
|
2019-12-22 21:56:47 +00:00
|
|
|
p = atomic_fetchadd_32(&sc->sc_seq, 1);
|
2020-01-09 14:58:41 +00:00
|
|
|
p /= sc->sc_stride;
|
2007-10-12 03:03:16 +00:00
|
|
|
p %= sc->sc_count;
|
2018-05-24 23:21:23 +00:00
|
|
|
lp = CK_SLIST_FIRST(&sc->sc_ports);
|
2016-01-23 04:18:44 +00:00
|
|
|
|
2007-10-12 03:03:16 +00:00
|
|
|
while (p--)
|
2018-05-24 23:21:23 +00:00
|
|
|
lp = CK_SLIST_NEXT(lp, lp_entries);
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2007-10-12 03:03:16 +00:00
|
|
|
/*
|
|
|
|
* Check the port's link state. This will return the next active
|
|
|
|
* port if the link is down or the port is NULL.
|
|
|
|
*/
|
|
|
|
if ((lp = lagg_link_active(sc, lp)) == NULL) {
|
2020-04-13 23:06:56 +00:00
|
|
|
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
|
2007-10-12 03:03:16 +00:00
|
|
|
m_freem(m);
|
2013-06-17 19:31:03 +00:00
|
|
|
return (ENETDOWN);
|
2007-10-12 03:03:16 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Send mbuf */
|
2007-10-12 03:03:16 +00:00
|
|
|
return (lagg_enqueue(lp->lp_ifp, m));
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
|
|
|
|
/* Just pass in the packet to our lagg device */
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
2014-09-18 02:12:48 +00:00
|
|
|
/*
|
|
|
|
* Broadcast mode
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
2014-09-26 12:35:58 +00:00
|
|
|
int errors = 0;
|
|
|
|
int ret;
|
|
|
|
struct lagg_port *lp, *last = NULL;
|
|
|
|
struct mbuf *m0;
|
|
|
|
|
2020-12-08 16:36:46 +00:00
|
|
|
NET_EPOCH_ASSERT();
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
|
2014-09-26 12:35:58 +00:00
|
|
|
if (!LAGG_PORTACTIVE(lp))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (last != NULL) {
|
|
|
|
m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
|
|
|
|
if (m0 == NULL) {
|
|
|
|
ret = ENOBUFS;
|
|
|
|
errors++;
|
|
|
|
break;
|
|
|
|
}
|
2020-04-13 23:06:56 +00:00
|
|
|
lagg_enqueue(last->lp_ifp, m0);
|
2014-09-26 12:35:58 +00:00
|
|
|
}
|
|
|
|
last = lp;
|
|
|
|
}
|
2018-05-14 20:06:49 +00:00
|
|
|
|
2014-09-26 12:35:58 +00:00
|
|
|
if (last == NULL) {
|
2020-04-13 23:06:56 +00:00
|
|
|
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
|
2014-09-26 12:35:58 +00:00
|
|
|
m_freem(m);
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
if ((last = lagg_link_active(sc, last)) == NULL) {
|
2020-04-13 23:06:56 +00:00
|
|
|
errors++;
|
|
|
|
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
|
2014-09-26 12:35:58 +00:00
|
|
|
m_freem(m);
|
|
|
|
return (ENETDOWN);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = lagg_enqueue(last->lp_ifp, m);
|
2020-04-13 23:06:56 +00:00
|
|
|
if (errors != 0)
|
|
|
|
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
|
2014-09-26 12:35:58 +00:00
|
|
|
|
2020-04-13 23:06:56 +00:00
|
|
|
return (ret);
|
2014-09-18 02:12:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf*
|
2014-09-26 12:35:58 +00:00
|
|
|
lagg_bcast_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
2014-09-18 02:12:48 +00:00
|
|
|
{
|
2014-09-26 12:35:58 +00:00
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
2014-09-18 02:12:48 +00:00
|
|
|
|
2014-09-26 12:35:58 +00:00
|
|
|
/* Just pass in the packet to our lagg device */
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
return (m);
|
2014-09-18 02:12:48 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/*
|
|
|
|
* Active failover
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
|
|
|
/* Use the master port if active or the next available port */
|
2007-10-12 03:03:16 +00:00
|
|
|
if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
|
2020-04-13 23:06:56 +00:00
|
|
|
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
|
2007-10-12 03:03:16 +00:00
|
|
|
m_freem(m);
|
2013-06-17 19:31:03 +00:00
|
|
|
return (ENETDOWN);
|
2007-10-12 03:03:16 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Send mbuf */
|
|
|
|
return (lagg_enqueue(lp->lp_ifp, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
struct lagg_port *tmp_tp;
|
|
|
|
|
2014-10-01 21:37:32 +00:00
|
|
|
if (lp == sc->sc_primary || V_lagg_failover_rx_all) {
|
2007-04-17 00:35:11 +00:00
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
2007-12-18 02:12:03 +00:00
|
|
|
if (!LAGG_PORTACTIVE(sc->sc_primary)) {
|
|
|
|
tmp_tp = lagg_link_active(sc, sc->sc_primary);
|
2007-04-17 00:35:11 +00:00
|
|
|
/*
|
2016-05-03 18:05:43 +00:00
|
|
|
* If tmp_tp is null, we've received a packet when all
|
2007-04-17 00:35:11 +00:00
|
|
|
* our links are down. Weird, but process it anyways.
|
|
|
|
*/
|
|
|
|
if ((tmp_tp == NULL || tmp_tp == lp)) {
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
m_freem(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loadbalancing
|
|
|
|
*/
|
2014-09-26 08:42:32 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lb_attach(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
struct lagg_lb *lb;
|
|
|
|
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2019-03-28 21:00:54 +00:00
|
|
|
lb = malloc(sizeof(struct lagg_lb), M_LAGG, M_WAITOK | M_ZERO);
|
2015-03-11 16:02:24 +00:00
|
|
|
lb->lb_key = m_ether_tcpip_hash_init();
|
2014-09-26 12:35:58 +00:00
|
|
|
sc->sc_psc = lb;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lb_port_create(lp);
|
|
|
|
}
|
|
|
|
|
2014-09-26 07:12:40 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lb_detach(struct lagg_softc *sc)
|
|
|
|
{
|
2014-09-26 07:12:40 +00:00
|
|
|
struct lagg_lb *lb;
|
|
|
|
|
|
|
|
lb = (struct lagg_lb *)sc->sc_psc;
|
2007-04-17 00:35:11 +00:00
|
|
|
if (lb != NULL)
|
2019-03-28 21:00:54 +00:00
|
|
|
free(lb, M_LAGG);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
|
|
|
|
{
|
|
|
|
struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
|
|
|
|
struct lagg_port *lp_next;
|
2018-10-09 13:26:06 +00:00
|
|
|
int i = 0, rv;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2018-10-09 13:26:06 +00:00
|
|
|
rv = 0;
|
2007-04-17 00:35:11 +00:00
|
|
|
bzero(&lb->lb_ports, sizeof(lb->lb_ports));
|
2019-03-28 20:25:36 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
|
2007-04-17 00:35:11 +00:00
|
|
|
if (lp_next == lp)
|
|
|
|
continue;
|
2018-10-09 13:26:06 +00:00
|
|
|
if (i >= LAGG_MAX_PORTS) {
|
|
|
|
rv = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
if (sc->sc_ifflags & IFF_DEBUG)
|
|
|
|
printf("%s: port %s at index %d\n",
|
2014-09-26 13:02:29 +00:00
|
|
|
sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
|
2007-04-17 00:35:11 +00:00
|
|
|
lb->lb_ports[i++] = lp_next;
|
|
|
|
}
|
|
|
|
|
2018-10-09 13:26:06 +00:00
|
|
|
return (rv);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_lb_port_create(struct lagg_port *lp)
|
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2007-04-17 00:35:11 +00:00
|
|
|
return (lagg_lb_porttable(sc, NULL));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_lb_port_destroy(struct lagg_port *lp)
|
|
|
|
{
|
2007-06-12 07:29:11 +00:00
|
|
|
struct lagg_softc *sc = lp->lp_softc;
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lb_porttable(sc, lp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
|
|
|
|
struct lagg_port *lp = NULL;
|
|
|
|
uint32_t p = 0;
|
|
|
|
|
2014-12-01 11:45:24 +00:00
|
|
|
if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
|
|
|
|
M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
|
2013-12-30 01:32:17 +00:00
|
|
|
p = m->m_pkthdr.flowid >> sc->flowid_shift;
|
2009-04-30 14:25:44 +00:00
|
|
|
else
|
2015-03-11 16:02:24 +00:00
|
|
|
p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key);
|
2008-07-04 05:33:58 +00:00
|
|
|
p %= sc->sc_count;
|
|
|
|
lp = lb->lb_ports[p];
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the port's link state. This will return the next active
|
|
|
|
* port if the link is down or the port is NULL.
|
|
|
|
*/
|
2007-10-12 03:03:16 +00:00
|
|
|
if ((lp = lagg_link_active(sc, lp)) == NULL) {
|
2020-04-13 23:06:56 +00:00
|
|
|
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
|
2007-10-12 03:03:16 +00:00
|
|
|
m_freem(m);
|
2013-06-17 19:31:03 +00:00
|
|
|
return (ENETDOWN);
|
2007-10-12 03:03:16 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Send mbuf */
|
|
|
|
return (lagg_enqueue(lp->lp_ifp, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
|
|
|
|
/* Just pass in the packet to our lagg device */
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
|
|
|
|
return (m);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 802.3ad LACP
|
|
|
|
*/
|
2014-09-26 08:42:32 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lacp_attach(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
2014-09-26 08:42:32 +00:00
|
|
|
lacp_attach(sc);
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2007-04-17 00:35:11 +00:00
|
|
|
lacp_port_create(lp);
|
|
|
|
}
|
|
|
|
|
2014-09-26 07:12:40 +00:00
|
|
|
static void
|
2007-04-17 00:35:11 +00:00
|
|
|
lagg_lacp_detach(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
2014-09-26 08:42:32 +00:00
|
|
|
void *psc;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2018-05-14 20:06:49 +00:00
|
|
|
LAGG_XLOCK_ASSERT(sc);
|
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2007-04-17 00:35:11 +00:00
|
|
|
lacp_port_destroy(lp);
|
|
|
|
|
2014-09-26 08:42:32 +00:00
|
|
|
psc = sc->sc_psc;
|
|
|
|
sc->sc_psc = NULL;
|
|
|
|
lacp_detach(psc);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
lagg_lacp_lladdr(struct lagg_softc *sc)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
|
|
|
|
2017-05-02 19:09:11 +00:00
|
|
|
LAGG_SXLOCK_ASSERT(sc);
|
|
|
|
|
2007-04-17 00:35:11 +00:00
|
|
|
/* purge all the lacp ports */
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2007-04-17 00:35:11 +00:00
|
|
|
lacp_port_destroy(lp);
|
|
|
|
|
|
|
|
/* add them back in */
|
2018-05-14 20:06:49 +00:00
|
|
|
CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
|
2007-04-17 00:35:11 +00:00
|
|
|
lacp_port_create(lp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct lagg_port *lp;
|
2020-11-18 14:55:49 +00:00
|
|
|
int err;
|
2007-04-17 00:35:11 +00:00
|
|
|
|
2020-11-18 14:55:49 +00:00
|
|
|
lp = lacp_select_tx_port(sc, m, &err);
|
2007-10-12 03:03:16 +00:00
|
|
|
if (lp == NULL) {
|
2020-04-13 23:06:56 +00:00
|
|
|
if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
|
2007-10-12 03:03:16 +00:00
|
|
|
m_freem(m);
|
2020-11-18 14:55:49 +00:00
|
|
|
return (err);
|
2007-10-12 03:03:16 +00:00
|
|
|
}
|
2007-04-17 00:35:11 +00:00
|
|
|
|
|
|
|
/* Send mbuf */
|
|
|
|
return (lagg_enqueue(lp->lp_ifp, m));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mbuf *
|
|
|
|
lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct ifnet *ifp = sc->sc_ifp;
|
|
|
|
struct ether_header *eh;
|
|
|
|
u_short etype;
|
|
|
|
|
|
|
|
eh = mtod(m, struct ether_header *);
|
|
|
|
etype = ntohs(eh->ether_type);
|
|
|
|
|
|
|
|
/* Tap off LACP control messages */
|
2011-04-30 20:34:52 +00:00
|
|
|
if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
|
2007-12-31 01:16:35 +00:00
|
|
|
m = lacp_input(lp, m);
|
|
|
|
if (m == NULL)
|
|
|
|
return (NULL);
|
2007-04-17 00:35:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the port is not collecting or not in the active aggregator then
|
|
|
|
* free and return.
|
|
|
|
*/
|
2008-03-16 19:25:30 +00:00
|
|
|
if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
|
2007-04-17 00:35:11 +00:00
|
|
|
m_freem(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
m->m_pkthdr.rcvif = ifp;
|
|
|
|
return (m);
|
|
|
|
}
|