2004-09-16 20:24:23 +00:00
|
|
|
/*-
|
2005-03-21 15:34:03 +00:00
|
|
|
* Copyright (c) 2004-2005 Gleb Smirnoff <glebius@FreeBSD.org>
|
2004-09-16 20:24:23 +00:00
|
|
|
* Copyright (c) 2001-2003 Roman V. Palagin <romanp@unshadow.net>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $SourceForge: netflow.c,v 1.41 2004/09/05 11:41:10 glebius Exp $
|
|
|
|
*/
|
|
|
|
|
|
|
|
static const char rcs_id[] =
|
|
|
|
"@(#) $FreeBSD$";
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/limits.h>
|
|
|
|
#include <sys/mbuf.h>
|
2005-01-20 13:28:39 +00:00
|
|
|
#include <sys/syslog.h>
|
2004-09-16 20:24:23 +00:00
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
#include <machine/atomic.h>
|
|
|
|
|
2004-09-16 20:24:23 +00:00
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/in_systm.h>
|
|
|
|
#include <netinet/ip.h>
|
|
|
|
#include <netinet/tcp.h>
|
|
|
|
#include <netinet/udp.h>
|
|
|
|
|
|
|
|
#include <netgraph/ng_message.h>
|
|
|
|
#include <netgraph/netgraph.h>
|
|
|
|
|
|
|
|
#include <netgraph/netflow/netflow.h>
|
|
|
|
#include <netgraph/netflow/ng_netflow.h>
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
#define NBUCKETS (65536) /* must be power of 2 */
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2006-10-11 12:31:14 +00:00
|
|
|
/* This hash is for TCP or UDP packets. */
|
|
|
|
#define FULL_HASH(addr1, addr2, port1, port2) \
|
|
|
|
(((addr1 ^ (addr1 >> 16) ^ \
|
|
|
|
htons(addr2 ^ (addr2 >> 16))) ^ \
|
2006-10-11 13:28:37 +00:00
|
|
|
port1 ^ htons(port2)) & \
|
2006-10-11 12:31:14 +00:00
|
|
|
(NBUCKETS - 1))
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2006-10-11 12:31:14 +00:00
|
|
|
/* This hash is for all other IP packets. */
|
|
|
|
#define ADDR_HASH(addr1, addr2) \
|
|
|
|
((addr1 ^ (addr1 >> 16) ^ \
|
|
|
|
htons(addr2 ^ (addr2 >> 16))) & \
|
|
|
|
(NBUCKETS - 1))
|
2004-09-16 20:24:23 +00:00
|
|
|
|
|
|
|
/* Macros to shorten logical constructions */
|
|
|
|
/* XXX: priv must exist in namespace */
|
|
|
|
#define INACTIVE(fle) (time_uptime - fle->f.last > priv->info.nfinfo_inact_t)
|
|
|
|
#define AGED(fle) (time_uptime - fle->f.first > priv->info.nfinfo_act_t)
|
|
|
|
#define ISFREE(fle) (fle->f.packets == 0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 4 is a magical number: statistically number of 4-packet flows is
|
|
|
|
* bigger than 5,6,7...-packet flows by an order of magnitude. Most UDP/ICMP
|
|
|
|
* scans are 1 packet (~ 90% of flow cache). TCP scans are 2-packet in case
|
|
|
|
* of reachable host and 4-packet otherwise.
|
|
|
|
*/
|
|
|
|
#define SMALL(fle) (fle->f.packets <= 4)
|
2005-03-03 11:01:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Cisco uses milliseconds for uptime. Bad idea, since it overflows
|
|
|
|
* every 48+ days. But we will do same to keep compatibility. This macro
|
|
|
|
* does overflowable multiplication to 1000.
|
|
|
|
*/
|
|
|
|
#define MILLIUPTIME(t) (((t) << 9) + /* 512 */ \
|
|
|
|
((t) << 8) + /* 256 */ \
|
|
|
|
((t) << 7) + /* 128 */ \
|
|
|
|
((t) << 6) + /* 64 */ \
|
|
|
|
((t) << 5) + /* 32 */ \
|
|
|
|
((t) << 3)) /* 8 */
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
MALLOC_DECLARE(M_NETFLOW_HASH);
|
2005-10-31 15:41:29 +00:00
|
|
|
MALLOC_DEFINE(M_NETFLOW_HASH, "netflow_hash", "NetFlow hash");
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
static int export_add(item_p, struct flow_entry *);
|
2006-01-12 22:48:12 +00:00
|
|
|
static int export_send(priv_p, item_p, int flags);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Generate hash for a given flow record. */
|
2004-09-16 20:24:23 +00:00
|
|
|
static __inline uint32_t
|
|
|
|
ip_hash(struct flow_rec *r)
|
|
|
|
{
|
|
|
|
switch (r->r_ip_p) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
return FULL_HASH(r->r_src.s_addr, r->r_dst.s_addr,
|
|
|
|
r->r_sport, r->r_dport);
|
|
|
|
default:
|
|
|
|
return ADDR_HASH(r->r_src.s_addr, r->r_dst.s_addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* This is callback from uma(9), called on alloc. */
|
|
|
|
static int
|
|
|
|
uma_ctor_flow(void *mem, int size, void *arg, int how)
|
2004-09-16 20:24:23 +00:00
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
priv_p priv = (priv_p )arg;
|
|
|
|
|
|
|
|
if (atomic_load_acq_32(&priv->info.nfinfo_used) >= CACHESIZE)
|
|
|
|
return (ENOMEM);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
atomic_add_32(&priv->info.nfinfo_used, 1);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
return (0);
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* This is callback from uma(9), called on free. */
|
|
|
|
static void
|
|
|
|
uma_dtor_flow(void *mem, int size, void *arg)
|
2004-09-16 20:24:23 +00:00
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
priv_p priv = (priv_p )arg;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
atomic_subtract_32(&priv->info.nfinfo_used, 1);
|
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/*
|
|
|
|
* Detach export datagram from priv, if there is any.
|
|
|
|
* If there is no, allocate a new one.
|
|
|
|
*/
|
|
|
|
static item_p
|
|
|
|
get_export_dgram(priv_p priv)
|
|
|
|
{
|
|
|
|
item_p item = NULL;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
mtx_lock(&priv->export_mtx);
|
|
|
|
if (priv->export_item != NULL) {
|
|
|
|
item = priv->export_item;
|
|
|
|
priv->export_item = NULL;
|
|
|
|
}
|
|
|
|
mtx_unlock(&priv->export_mtx);
|
|
|
|
|
|
|
|
if (item == NULL) {
|
|
|
|
struct netflow_v5_export_dgram *dgram;
|
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
|
|
|
|
if (m == NULL)
|
|
|
|
return (NULL);
|
2005-05-16 17:10:08 +00:00
|
|
|
item = ng_package_data(m, NG_NOFLAGS);
|
2005-05-11 11:26:24 +00:00
|
|
|
if (item == NULL)
|
|
|
|
return (NULL);
|
|
|
|
dgram = mtod(m, struct netflow_v5_export_dgram *);
|
|
|
|
dgram->header.count = 0;
|
|
|
|
dgram->header.version = htons(NETFLOW_V5);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
return (item);
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/*
|
|
|
|
* Re-attach incomplete datagram back to priv.
|
|
|
|
* If there is already another one, then send incomplete. */
|
|
|
|
static void
|
2006-01-12 22:48:12 +00:00
|
|
|
return_export_dgram(priv_p priv, item_p item, int flags)
|
2004-09-16 20:24:23 +00:00
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
/*
|
|
|
|
* It may happen on SMP, that some thread has already
|
|
|
|
* put its item there, in this case we bail out and
|
|
|
|
* send what we have to collector.
|
|
|
|
*/
|
|
|
|
mtx_lock(&priv->export_mtx);
|
|
|
|
if (priv->export_item == NULL) {
|
|
|
|
priv->export_item = item;
|
|
|
|
mtx_unlock(&priv->export_mtx);
|
|
|
|
} else {
|
|
|
|
mtx_unlock(&priv->export_mtx);
|
2006-01-12 22:48:12 +00:00
|
|
|
export_send(priv, item, flags);
|
2005-05-11 11:26:24 +00:00
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/*
|
|
|
|
* The flow is over. Call export_add() and free it. If datagram is
|
|
|
|
* full, then call export_send().
|
|
|
|
*/
|
2004-09-16 20:24:23 +00:00
|
|
|
static __inline void
|
2006-01-12 22:48:12 +00:00
|
|
|
expire_flow(priv_p priv, item_p *item, struct flow_entry *fle, int flags)
|
2004-09-16 20:24:23 +00:00
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
if (*item == NULL)
|
|
|
|
*item = get_export_dgram(priv);
|
|
|
|
if (*item == NULL) {
|
2005-05-12 13:52:49 +00:00
|
|
|
atomic_add_32(&priv->info.nfinfo_export_failed, 1);
|
2005-05-11 11:26:24 +00:00
|
|
|
uma_zfree_arg(priv->zone, fle, priv);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (export_add(*item, fle) > 0) {
|
2006-01-12 22:48:12 +00:00
|
|
|
export_send(priv, *item, flags);
|
2005-05-11 11:26:24 +00:00
|
|
|
*item = NULL;
|
|
|
|
}
|
|
|
|
uma_zfree_arg(priv->zone, fle, priv);
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Get a snapshot of node statistics */
|
|
|
|
void
|
|
|
|
ng_netflow_copyinfo(priv_p priv, struct ng_netflow_info *i)
|
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
/* XXX: atomic */
|
2004-09-16 20:24:23 +00:00
|
|
|
memcpy((void *)i, (void *)&priv->info, sizeof(priv->info));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert a record into defined slot.
|
|
|
|
*
|
|
|
|
* First we get for us a free flow entry, then fill in all
|
2005-05-11 11:26:24 +00:00
|
|
|
* possible fields in it.
|
|
|
|
*
|
|
|
|
* TODO: consider dropping hash mutex while filling in datagram,
|
|
|
|
* as this was done in previous version. Need to test & profile
|
|
|
|
* to be sure.
|
2004-09-16 20:24:23 +00:00
|
|
|
*/
|
|
|
|
static __inline int
|
2005-05-11 11:26:24 +00:00
|
|
|
hash_insert(priv_p priv, struct flow_hash_entry *hsh, struct flow_rec *r,
|
|
|
|
int plen, uint8_t tcp_flags)
|
2004-09-16 20:24:23 +00:00
|
|
|
{
|
2008-02-07 11:10:17 +00:00
|
|
|
struct flow_entry *fle;
|
|
|
|
struct sockaddr_in sin;
|
|
|
|
struct rtentry *rt;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
mtx_assert(&hsh->mtx, MA_OWNED);
|
|
|
|
|
|
|
|
fle = uma_zalloc_arg(priv->zone, priv, M_NOWAIT);
|
|
|
|
if (fle == NULL) {
|
2005-05-12 13:52:49 +00:00
|
|
|
atomic_add_32(&priv->info.nfinfo_alloc_failed, 1);
|
2004-09-16 20:24:23 +00:00
|
|
|
return (ENOMEM);
|
2005-05-11 11:26:24 +00:00
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now fle is totally ours. It is detached from all lists,
|
|
|
|
* we can safely edit it.
|
|
|
|
*/
|
|
|
|
|
|
|
|
bcopy(r, &fle->f.r, sizeof(struct flow_rec));
|
|
|
|
fle->f.bytes = plen;
|
|
|
|
fle->f.packets = 1;
|
2005-03-20 21:03:43 +00:00
|
|
|
fle->f.tcp_flags = tcp_flags;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
|
|
|
fle->f.first = fle->f.last = time_uptime;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First we do route table lookup on destination address. So we can
|
|
|
|
* fill in out_ifx, dst_mask, nexthop, and dst_as in future releases.
|
|
|
|
*/
|
2008-02-07 11:10:17 +00:00
|
|
|
bzero(&sin, sizeof(sin));
|
|
|
|
sin.sin_len = sizeof(struct sockaddr_in);
|
|
|
|
sin.sin_family = AF_INET;
|
|
|
|
sin.sin_addr = fle->f.r.r_dst;
|
Add code to allow the system to handle multiple routing tables.
This particular implementation is designed to be fully backwards compatible
and to be MFC-able to 7.x (and 6.x)
Currently the only protocol that can make use of the multiple tables is IPv4
Similar functionality exists in OpenBSD and Linux.
From my notes:
-----
One thing where FreeBSD has been falling behind, and which by chance I
have some time to work on is "policy based routing", which allows
different
packet streams to be routed by more than just the destination address.
Constraints:
------------
I want to make some form of this available in the 6.x tree
(and by extension 7.x) , but FreeBSD in general needs it so I might as
well do it in -current and back port the portions I need.
One of the ways that this can be done is to have the ability to
instantiate multiple kernel routing tables (which I will now
refer to as "Forwarding Information Bases" or "FIBs" for political
correctness reasons). Which FIB a particular packet uses to make
the next hop decision can be decided by a number of mechanisms.
The policies these mechanisms implement are the "Policies" referred
to in "Policy based routing".
One of the constraints I have if I try to back port this work to
6.x is that it must be implemented as a EXTENSION to the existing
ABIs in 6.x so that third party applications do not need to be
recompiled in timespan of the branch.
This first version will not have some of the bells and whistles that
will come with later versions. It will, for example, be limited to 16
tables in the first commit.
Implementation method, Compatible version. (part 1)
-------------------------------
For this reason I have implemented a "sufficient subset" of a
multiple routing table solution in Perforce, and back-ported it
to 6.x. (also in Perforce though not always caught up with what I
have done in -current/P4). The subset allows a number of FIBs
to be defined at compile time (8 is sufficient for my purposes in 6.x)
and implements the changes needed to allow IPV4 to use them. I have not
done the changes for ipv6 simply because I do not need it, and I do not
have enough knowledge of ipv6 (e.g. neighbor discovery) needed to do it.
Other protocol families are left untouched and should there be
users with proprietary protocol families, they should continue to work
and be oblivious to the existence of the extra FIBs.
To understand how this is done, one must know that the current FIB
code starts everything off with a single dimensional array of
pointers to FIB head structures (One per protocol family), each of
which in turn points to the trie of routes available to that family.
The basic change in the ABI compatible version of the change is to
extent that array to be a 2 dimensional array, so that
instead of protocol family X looking at rt_tables[X] for the
table it needs, it looks at rt_tables[Y][X] when for all
protocol families except ipv4 Y is always 0.
Code that is unaware of the change always just sees the first row
of the table, which of course looks just like the one dimensional
array that existed before.
The entry points rtrequest(), rtalloc(), rtalloc1(), rtalloc_ign()
are all maintained, but refer only to the first row of the array,
so that existing callers in proprietary protocols can continue to
do the "right thing".
Some new entry points are added, for the exclusive use of ipv4 code
called in_rtrequest(), in_rtalloc(), in_rtalloc1() and in_rtalloc_ign(),
which have an extra argument which refers the code to the correct row.
In addition, there are some new entry points (currently called
rtalloc_fib() and friends) that check the Address family being
looked up and call either rtalloc() (and friends) if the protocol
is not IPv4 forcing the action to row 0 or to the appropriate row
if it IS IPv4 (and that info is available). These are for calling
from code that is not specific to any particular protocol. The way
these are implemented would change in the non ABI preserving code
to be added later.
One feature of the first version of the code is that for ipv4,
the interface routes show up automatically on all the FIBs, so
that no matter what FIB you select you always have the basic
direct attached hosts available to you. (rtinit() does this
automatically).
You CAN delete an interface route from one FIB should you want
to but by default it's there. ARP information is also available
in each FIB. It's assumed that the same machine would have the
same MAC address, regardless of which FIB you are using to get
to it.
This brings us as to how the correct FIB is selected for an outgoing
IPV4 packet.
Firstly, all packets have a FIB associated with them. if nothing
has been done to change it, it will be FIB 0. The FIB is changed
in the following ways.
Packets fall into one of a number of classes.
1/ locally generated packets, coming from a socket/PCB.
Such packets select a FIB from a number associated with the
socket/PCB. This in turn is inherited from the process,
but can be changed by a socket option. The process in turn
inherits it on fork. I have written a utility call setfib
that acts a bit like nice..
setfib -3 ping target.example.com # will use fib 3 for ping.
It is an obvious extension to make it a property of a jail
but I have not done so. It can be achieved by combining the setfib and
jail commands.
2/ packets received on an interface for forwarding.
By default these packets would use table 0,
(or possibly a number settable in a sysctl(not yet)).
but prior to routing the firewall can inspect them (see below).
(possibly in the future you may be able to associate a FIB
with packets received on an interface.. An ifconfig arg, but not yet.)
3/ packets inspected by a packet classifier, which can arbitrarily
associate a fib with it on a packet by packet basis.
A fib assigned to a packet by a packet classifier
(such as ipfw) would over-ride a fib associated by
a more default source. (such as cases 1 or 2).
4/ a tcp listen socket associated with a fib will generate
accept sockets that are associated with that same fib.
5/ Packets generated in response to some other packet (e.g. reset
or icmp packets). These should use the FIB associated with the
packet being reponded to.
6/ Packets generated during encapsulation.
gif, tun and other tunnel interfaces will encapsulate using the FIB
that was in effect withthe proces that set up the tunnel.
thus setfib 1 ifconfig gif0 [tunnel instructions]
will set the fib for the tunnel to use to be fib 1.
Routing messages would be associated with their
process, and thus select one FIB or another.
messages from the kernel would be associated with the fib they
refer to and would only be received by a routing socket associated
with that fib. (not yet implemented)
In addition Netstat has been edited to be able to cope with the
fact that the array is now 2 dimensional. (It looks in system
memory using libkvm (!)). Old versions of netstat see only the first FIB.
In addition two sysctls are added to give:
a) the number of FIBs compiled in (active)
b) the default FIB of the calling process.
Early testing experience:
-------------------------
Basically our (IronPort's) appliance does this functionality already
using ipfw fwd but that method has some drawbacks.
For example,
It can't fully simulate a routing table because it can't influence the
socket's choice of local address when a connect() is done.
Testing during the generating of these changes has been
remarkably smooth so far. Multiple tables have co-existed
with no notable side effects, and packets have been routes
accordingly.
ipfw has grown 2 new keywords:
setfib N ip from anay to any
count ip from any to any fib N
In pf there seems to be a requirement to be able to give symbolic names to the
fibs but I do not have that capacity. I am not sure if it is required.
SCTP has interestingly enough built in support for this, called VRFs
in Cisco parlance. it will be interesting to see how that handles it
when it suddenly actually does something.
Where to next:
--------------------
After committing the ABI compatible version and MFCing it, I'd
like to proceed in a forward direction in -current. this will
result in some roto-tilling in the routing code.
Firstly: the current code's idea of having a separate tree per
protocol family, all of the same format, and pointed to by the
1 dimensional array is a bit silly. Especially when one considers that
there is code that makes assumptions about every protocol having the
same internal structures there. Some protocols don't WANT that
sort of structure. (for example the whole idea of a netmask is foreign
to appletalk). This needs to be made opaque to the external code.
My suggested first change is to add routing method pointers to the
'domain' structure, along with information pointing the data.
instead of having an array of pointers to uniform structures,
there would be an array pointing to the 'domain' structures
for each protocol address domain (protocol family),
and the methods this reached would be called. The methods would have
an argument that gives FIB number, but the protocol would be free
to ignore it.
When the ABI can be changed it raises the possibilty of the
addition of a fib entry into the "struct route". Currently,
the structure contains the sockaddr of the desination, and the resulting
fib entry. To make this work fully, one could add a fib number
so that given an address and a fib, one can find the third element, the
fib entry.
Interaction with the ARP layer/ LL layer would need to be
revisited as well. Qing Li has been working on this already.
This work was sponsored by Ironport Systems/Cisco
Reviewed by: several including rwatson, bz and mlair (parts each)
Obtained from: Ironport systems/Cisco
2008-05-09 23:03:00 +00:00
|
|
|
/* XXX MRT 0 as a default.. need the m here to get fib */
|
This main goals of this project are:
1. separating L2 tables (ARP, NDP) from the L3 routing tables
2. removing as much locking dependencies among these layers as
possible to allow for some parallelism in the search operations
3. simplify the logic in the routing code,
The most notable end result is the obsolescent of the route
cloning (RTF_CLONING) concept, which translated into code reduction
in both IPv4 ARP and IPv6 NDP related modules, and size reduction in
struct rtentry{}. The change in design obsoletes the semantics of
RTF_CLONING, RTF_WASCLONE and RTF_LLINFO routing flags. The userland
applications such as "arp" and "ndp" have been modified to reflect
those changes. The output from "netstat -r" shows only the routing
entries.
Quite a few developers have contributed to this project in the
past: Glebius Smirnoff, Luigi Rizzo, Alessandro Cerri, and
Andre Oppermann. And most recently:
- Kip Macy revised the locking code completely, thus completing
the last piece of the puzzle, Kip has also been conducting
active functional testing
- Sam Leffler has helped me improving/refactoring the code, and
provided valuable reviews
- Julian Elischer setup the perforce tree for me and has helped
me maintaining that branch before the svn conversion
2008-12-15 06:10:57 +00:00
|
|
|
rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, 0);
|
2008-02-07 11:10:17 +00:00
|
|
|
if (rt != NULL) {
|
2004-09-16 20:24:23 +00:00
|
|
|
fle->f.fle_o_ifx = rt->rt_ifp->if_index;
|
|
|
|
|
|
|
|
if (rt->rt_flags & RTF_GATEWAY &&
|
|
|
|
rt->rt_gateway->sa_family == AF_INET)
|
|
|
|
fle->f.next_hop =
|
|
|
|
((struct sockaddr_in *)(rt->rt_gateway))->sin_addr;
|
|
|
|
|
|
|
|
if (rt_mask(rt))
|
2006-10-11 10:47:44 +00:00
|
|
|
fle->f.dst_mask = bitcount32(((struct sockaddr_in *)
|
|
|
|
rt_mask(rt))->sin_addr.s_addr);
|
2004-09-16 20:24:23 +00:00
|
|
|
else if (rt->rt_flags & RTF_HOST)
|
|
|
|
/* Give up. We can't determine mask :( */
|
|
|
|
fle->f.dst_mask = 32;
|
|
|
|
|
2008-02-07 11:10:17 +00:00
|
|
|
RTFREE_LOCKED(rt);
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Do route lookup on source address, to fill in src_mask. */
|
2008-02-07 11:10:17 +00:00
|
|
|
bzero(&sin, sizeof(sin));
|
|
|
|
sin.sin_len = sizeof(struct sockaddr_in);
|
|
|
|
sin.sin_family = AF_INET;
|
|
|
|
sin.sin_addr = fle->f.r.r_src;
|
Add code to allow the system to handle multiple routing tables.
This particular implementation is designed to be fully backwards compatible
and to be MFC-able to 7.x (and 6.x)
Currently the only protocol that can make use of the multiple tables is IPv4
Similar functionality exists in OpenBSD and Linux.
From my notes:
-----
One thing where FreeBSD has been falling behind, and which by chance I
have some time to work on is "policy based routing", which allows
different
packet streams to be routed by more than just the destination address.
Constraints:
------------
I want to make some form of this available in the 6.x tree
(and by extension 7.x) , but FreeBSD in general needs it so I might as
well do it in -current and back port the portions I need.
One of the ways that this can be done is to have the ability to
instantiate multiple kernel routing tables (which I will now
refer to as "Forwarding Information Bases" or "FIBs" for political
correctness reasons). Which FIB a particular packet uses to make
the next hop decision can be decided by a number of mechanisms.
The policies these mechanisms implement are the "Policies" referred
to in "Policy based routing".
One of the constraints I have if I try to back port this work to
6.x is that it must be implemented as a EXTENSION to the existing
ABIs in 6.x so that third party applications do not need to be
recompiled in timespan of the branch.
This first version will not have some of the bells and whistles that
will come with later versions. It will, for example, be limited to 16
tables in the first commit.
Implementation method, Compatible version. (part 1)
-------------------------------
For this reason I have implemented a "sufficient subset" of a
multiple routing table solution in Perforce, and back-ported it
to 6.x. (also in Perforce though not always caught up with what I
have done in -current/P4). The subset allows a number of FIBs
to be defined at compile time (8 is sufficient for my purposes in 6.x)
and implements the changes needed to allow IPV4 to use them. I have not
done the changes for ipv6 simply because I do not need it, and I do not
have enough knowledge of ipv6 (e.g. neighbor discovery) needed to do it.
Other protocol families are left untouched and should there be
users with proprietary protocol families, they should continue to work
and be oblivious to the existence of the extra FIBs.
To understand how this is done, one must know that the current FIB
code starts everything off with a single dimensional array of
pointers to FIB head structures (One per protocol family), each of
which in turn points to the trie of routes available to that family.
The basic change in the ABI compatible version of the change is to
extent that array to be a 2 dimensional array, so that
instead of protocol family X looking at rt_tables[X] for the
table it needs, it looks at rt_tables[Y][X] when for all
protocol families except ipv4 Y is always 0.
Code that is unaware of the change always just sees the first row
of the table, which of course looks just like the one dimensional
array that existed before.
The entry points rtrequest(), rtalloc(), rtalloc1(), rtalloc_ign()
are all maintained, but refer only to the first row of the array,
so that existing callers in proprietary protocols can continue to
do the "right thing".
Some new entry points are added, for the exclusive use of ipv4 code
called in_rtrequest(), in_rtalloc(), in_rtalloc1() and in_rtalloc_ign(),
which have an extra argument which refers the code to the correct row.
In addition, there are some new entry points (currently called
rtalloc_fib() and friends) that check the Address family being
looked up and call either rtalloc() (and friends) if the protocol
is not IPv4 forcing the action to row 0 or to the appropriate row
if it IS IPv4 (and that info is available). These are for calling
from code that is not specific to any particular protocol. The way
these are implemented would change in the non ABI preserving code
to be added later.
One feature of the first version of the code is that for ipv4,
the interface routes show up automatically on all the FIBs, so
that no matter what FIB you select you always have the basic
direct attached hosts available to you. (rtinit() does this
automatically).
You CAN delete an interface route from one FIB should you want
to but by default it's there. ARP information is also available
in each FIB. It's assumed that the same machine would have the
same MAC address, regardless of which FIB you are using to get
to it.
This brings us as to how the correct FIB is selected for an outgoing
IPV4 packet.
Firstly, all packets have a FIB associated with them. if nothing
has been done to change it, it will be FIB 0. The FIB is changed
in the following ways.
Packets fall into one of a number of classes.
1/ locally generated packets, coming from a socket/PCB.
Such packets select a FIB from a number associated with the
socket/PCB. This in turn is inherited from the process,
but can be changed by a socket option. The process in turn
inherits it on fork. I have written a utility call setfib
that acts a bit like nice..
setfib -3 ping target.example.com # will use fib 3 for ping.
It is an obvious extension to make it a property of a jail
but I have not done so. It can be achieved by combining the setfib and
jail commands.
2/ packets received on an interface for forwarding.
By default these packets would use table 0,
(or possibly a number settable in a sysctl(not yet)).
but prior to routing the firewall can inspect them (see below).
(possibly in the future you may be able to associate a FIB
with packets received on an interface.. An ifconfig arg, but not yet.)
3/ packets inspected by a packet classifier, which can arbitrarily
associate a fib with it on a packet by packet basis.
A fib assigned to a packet by a packet classifier
(such as ipfw) would over-ride a fib associated by
a more default source. (such as cases 1 or 2).
4/ a tcp listen socket associated with a fib will generate
accept sockets that are associated with that same fib.
5/ Packets generated in response to some other packet (e.g. reset
or icmp packets). These should use the FIB associated with the
packet being reponded to.
6/ Packets generated during encapsulation.
gif, tun and other tunnel interfaces will encapsulate using the FIB
that was in effect withthe proces that set up the tunnel.
thus setfib 1 ifconfig gif0 [tunnel instructions]
will set the fib for the tunnel to use to be fib 1.
Routing messages would be associated with their
process, and thus select one FIB or another.
messages from the kernel would be associated with the fib they
refer to and would only be received by a routing socket associated
with that fib. (not yet implemented)
In addition Netstat has been edited to be able to cope with the
fact that the array is now 2 dimensional. (It looks in system
memory using libkvm (!)). Old versions of netstat see only the first FIB.
In addition two sysctls are added to give:
a) the number of FIBs compiled in (active)
b) the default FIB of the calling process.
Early testing experience:
-------------------------
Basically our (IronPort's) appliance does this functionality already
using ipfw fwd but that method has some drawbacks.
For example,
It can't fully simulate a routing table because it can't influence the
socket's choice of local address when a connect() is done.
Testing during the generating of these changes has been
remarkably smooth so far. Multiple tables have co-existed
with no notable side effects, and packets have been routes
accordingly.
ipfw has grown 2 new keywords:
setfib N ip from anay to any
count ip from any to any fib N
In pf there seems to be a requirement to be able to give symbolic names to the
fibs but I do not have that capacity. I am not sure if it is required.
SCTP has interestingly enough built in support for this, called VRFs
in Cisco parlance. it will be interesting to see how that handles it
when it suddenly actually does something.
Where to next:
--------------------
After committing the ABI compatible version and MFCing it, I'd
like to proceed in a forward direction in -current. this will
result in some roto-tilling in the routing code.
Firstly: the current code's idea of having a separate tree per
protocol family, all of the same format, and pointed to by the
1 dimensional array is a bit silly. Especially when one considers that
there is code that makes assumptions about every protocol having the
same internal structures there. Some protocols don't WANT that
sort of structure. (for example the whole idea of a netmask is foreign
to appletalk). This needs to be made opaque to the external code.
My suggested first change is to add routing method pointers to the
'domain' structure, along with information pointing the data.
instead of having an array of pointers to uniform structures,
there would be an array pointing to the 'domain' structures
for each protocol address domain (protocol family),
and the methods this reached would be called. The methods would have
an argument that gives FIB number, but the protocol would be free
to ignore it.
When the ABI can be changed it raises the possibilty of the
addition of a fib entry into the "struct route". Currently,
the structure contains the sockaddr of the desination, and the resulting
fib entry. To make this work fully, one could add a fib number
so that given an address and a fib, one can find the third element, the
fib entry.
Interaction with the ARP layer/ LL layer would need to be
revisited as well. Qing Li has been working on this already.
This work was sponsored by Ironport Systems/Cisco
Reviewed by: several including rwatson, bz and mlair (parts each)
Obtained from: Ironport systems/Cisco
2008-05-09 23:03:00 +00:00
|
|
|
/* XXX MRT 0 as a default revisit. need the mbuf for fib*/
|
This main goals of this project are:
1. separating L2 tables (ARP, NDP) from the L3 routing tables
2. removing as much locking dependencies among these layers as
possible to allow for some parallelism in the search operations
3. simplify the logic in the routing code,
The most notable end result is the obsolescent of the route
cloning (RTF_CLONING) concept, which translated into code reduction
in both IPv4 ARP and IPv6 NDP related modules, and size reduction in
struct rtentry{}. The change in design obsoletes the semantics of
RTF_CLONING, RTF_WASCLONE and RTF_LLINFO routing flags. The userland
applications such as "arp" and "ndp" have been modified to reflect
those changes. The output from "netstat -r" shows only the routing
entries.
Quite a few developers have contributed to this project in the
past: Glebius Smirnoff, Luigi Rizzo, Alessandro Cerri, and
Andre Oppermann. And most recently:
- Kip Macy revised the locking code completely, thus completing
the last piece of the puzzle, Kip has also been conducting
active functional testing
- Sam Leffler has helped me improving/refactoring the code, and
provided valuable reviews
- Julian Elischer setup the perforce tree for me and has helped
me maintaining that branch before the svn conversion
2008-12-15 06:10:57 +00:00
|
|
|
rt = rtalloc1_fib((struct sockaddr *)&sin, 0, 0, 0);
|
2008-02-07 11:10:17 +00:00
|
|
|
if (rt != NULL) {
|
2004-09-16 20:24:23 +00:00
|
|
|
if (rt_mask(rt))
|
2006-10-11 10:47:44 +00:00
|
|
|
fle->f.src_mask = bitcount32(((struct sockaddr_in *)
|
|
|
|
rt_mask(rt))->sin_addr.s_addr);
|
2004-09-16 20:24:23 +00:00
|
|
|
else if (rt->rt_flags & RTF_HOST)
|
|
|
|
/* Give up. We can't determine mask :( */
|
|
|
|
fle->f.src_mask = 32;
|
|
|
|
|
2008-02-07 11:10:17 +00:00
|
|
|
RTFREE_LOCKED(rt);
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Push new flow at the and of hash. */
|
|
|
|
TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Non-static functions called from ng_netflow.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Allocate memory and set up flow cache */
|
|
|
|
int
|
|
|
|
ng_netflow_cache_init(priv_p priv)
|
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
struct flow_hash_entry *hsh;
|
2004-09-16 20:24:23 +00:00
|
|
|
int i;
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Initialize cache UMA zone. */
|
|
|
|
priv->zone = uma_zcreate("NetFlow cache", sizeof(struct flow_entry),
|
|
|
|
uma_ctor_flow, uma_dtor_flow, NULL, NULL, UMA_ALIGN_CACHE, 0);
|
|
|
|
uma_zone_set_max(priv->zone, CACHESIZE);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Allocate hash. */
|
2008-10-23 20:26:15 +00:00
|
|
|
priv->hash = malloc(NBUCKETS * sizeof(struct flow_hash_entry),
|
2005-05-11 11:26:24 +00:00
|
|
|
M_NETFLOW_HASH, M_WAITOK | M_ZERO);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2004-12-28 12:11:32 +00:00
|
|
|
if (priv->hash == NULL) {
|
2005-05-11 11:26:24 +00:00
|
|
|
uma_zdestroy(priv->zone);
|
2004-09-16 20:24:23 +00:00
|
|
|
return (ENOMEM);
|
2004-12-28 12:11:32 +00:00
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Initialize hash. */
|
|
|
|
for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++) {
|
|
|
|
mtx_init(&hsh->mtx, "hash mutex", NULL, MTX_DEF);
|
|
|
|
TAILQ_INIT(&hsh->head);
|
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
mtx_init(&priv->export_mtx, "export dgram lock", NULL, MTX_DEF);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Free all flow cache memory. Called from node close method. */
|
|
|
|
void
|
|
|
|
ng_netflow_cache_flush(priv_p priv)
|
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
struct flow_entry *fle, *fle1;
|
|
|
|
struct flow_hash_entry *hsh;
|
|
|
|
item_p item = NULL;
|
2004-09-16 20:24:23 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are going to free probably billable data.
|
|
|
|
* Expire everything before freeing it.
|
|
|
|
* No locking is required since callout is already drained.
|
|
|
|
*/
|
2005-05-11 11:26:24 +00:00
|
|
|
for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++)
|
|
|
|
TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
|
|
|
|
TAILQ_REMOVE(&hsh->head, fle, fle_hash);
|
2006-01-12 22:48:12 +00:00
|
|
|
expire_flow(priv, &item, fle, NG_QUEUE);
|
2005-05-11 11:26:24 +00:00
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
if (item != NULL)
|
2006-01-12 22:48:12 +00:00
|
|
|
export_send(priv, item, NG_QUEUE);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
uma_zdestroy(priv->zone);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Destroy hash mutexes. */
|
|
|
|
for (i = 0, hsh = priv->hash; i < NBUCKETS; i++, hsh++)
|
|
|
|
mtx_destroy(&hsh->mtx);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Free hash memory. */
|
|
|
|
if (priv->hash)
|
2008-10-23 15:53:51 +00:00
|
|
|
free(priv->hash, M_NETFLOW_HASH);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
mtx_destroy(&priv->export_mtx);
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Insert packet from into flow cache. */
|
2004-09-16 20:24:23 +00:00
|
|
|
int
|
2008-10-08 10:37:07 +00:00
|
|
|
ng_netflow_flow_add(priv_p priv, struct ip *ip, unsigned int src_if_index)
|
2004-09-16 20:24:23 +00:00
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
register struct flow_entry *fle, *fle1;
|
|
|
|
struct flow_hash_entry *hsh;
|
2004-09-16 20:24:23 +00:00
|
|
|
struct flow_rec r;
|
2005-05-11 11:26:24 +00:00
|
|
|
item_p item = NULL;
|
2005-03-21 15:34:03 +00:00
|
|
|
int hlen, plen;
|
2005-05-11 11:26:24 +00:00
|
|
|
int error = 0;
|
2004-09-16 20:24:23 +00:00
|
|
|
uint8_t tcp_flags = 0;
|
|
|
|
|
2005-03-21 15:34:03 +00:00
|
|
|
/* Try to fill flow_rec r */
|
2004-09-16 20:24:23 +00:00
|
|
|
bzero(&r, sizeof(r));
|
2005-03-21 15:34:03 +00:00
|
|
|
/* check version */
|
|
|
|
if (ip->ip_v != IPVERSION)
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
/* verify min header length */
|
|
|
|
hlen = ip->ip_hl << 2;
|
|
|
|
|
|
|
|
if (hlen < sizeof(struct ip))
|
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
r.r_src = ip->ip_src;
|
|
|
|
r.r_dst = ip->ip_dst;
|
|
|
|
|
|
|
|
/* save packet length */
|
|
|
|
plen = ntohs(ip->ip_len);
|
|
|
|
|
|
|
|
r.r_ip_p = ip->ip_p;
|
|
|
|
r.r_tos = ip->ip_tos;
|
|
|
|
|
2008-10-08 10:37:07 +00:00
|
|
|
r.r_i_ifx = src_if_index;
|
2005-03-21 15:34:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX NOTE: only first fragment of fragmented TCP, UDP and
|
|
|
|
* ICMP packet will be recorded with proper s_port and d_port.
|
|
|
|
* Following fragments will be recorded simply as IP packet with
|
|
|
|
* ip_proto = ip->ip_p and s_port, d_port set to zero.
|
|
|
|
* I know, it looks like bug. But I don't want to re-implement
|
|
|
|
* ip packet assebmling here. Anyway, (in)famous trafd works this way -
|
|
|
|
* and nobody complains yet :)
|
|
|
|
*/
|
2005-04-11 10:16:17 +00:00
|
|
|
if ((ip->ip_off & htons(IP_OFFMASK)) == 0)
|
|
|
|
switch(r.r_ip_p) {
|
|
|
|
case IPPROTO_TCP:
|
|
|
|
{
|
|
|
|
register struct tcphdr *tcp;
|
|
|
|
|
|
|
|
tcp = (struct tcphdr *)((caddr_t )ip + hlen);
|
|
|
|
r.r_sport = tcp->th_sport;
|
|
|
|
r.r_dport = tcp->th_dport;
|
|
|
|
tcp_flags = tcp->th_flags;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case IPPROTO_UDP:
|
|
|
|
r.r_ports = *(uint32_t *)((caddr_t )ip + hlen);
|
|
|
|
break;
|
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Update node statistics. XXX: race... */
|
2004-12-28 12:11:32 +00:00
|
|
|
priv->info.nfinfo_packets ++;
|
|
|
|
priv->info.nfinfo_bytes += plen;
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Find hash slot. */
|
|
|
|
hsh = &priv->hash[ip_hash(&r)];
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
mtx_lock(&hsh->mtx);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Go through hash and find our entry. If we encounter an
|
|
|
|
* entry, that should be expired, purge it. We do a reverse
|
|
|
|
* search since most active entries are first, and most
|
|
|
|
* searches are done on most active entries.
|
|
|
|
*/
|
|
|
|
TAILQ_FOREACH_REVERSE_SAFE(fle, &hsh->head, fhead, fle_hash, fle1) {
|
|
|
|
if (bcmp(&r, &fle->f.r, sizeof(struct flow_rec)) == 0)
|
|
|
|
break;
|
|
|
|
if ((INACTIVE(fle) && SMALL(fle)) || AGED(fle)) {
|
|
|
|
TAILQ_REMOVE(&hsh->head, fle, fle_hash);
|
2006-01-12 22:48:12 +00:00
|
|
|
expire_flow(priv, &item, fle, NG_QUEUE);
|
2005-05-11 11:26:24 +00:00
|
|
|
atomic_add_32(&priv->info.nfinfo_act_exp, 1);
|
|
|
|
}
|
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
if (fle) { /* An existent entry. */
|
2004-09-16 20:24:23 +00:00
|
|
|
|
|
|
|
fle->f.bytes += plen;
|
|
|
|
fle->f.packets ++;
|
|
|
|
fle->f.tcp_flags |= tcp_flags;
|
|
|
|
fle->f.last = time_uptime;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have the following reasons to expire flow in active way:
|
|
|
|
* - it hit active timeout
|
|
|
|
* - a TCP connection closed
|
|
|
|
* - it is going to overflow counter
|
|
|
|
*/
|
|
|
|
if (tcp_flags & TH_FIN || tcp_flags & TH_RST || AGED(fle) ||
|
2005-05-11 11:26:24 +00:00
|
|
|
(fle->f.bytes >= (UINT_MAX - IF_MAXMTU)) ) {
|
|
|
|
TAILQ_REMOVE(&hsh->head, fle, fle_hash);
|
2006-01-12 22:48:12 +00:00
|
|
|
expire_flow(priv, &item, fle, NG_QUEUE);
|
|
|
|
atomic_add_32(&priv->info.nfinfo_act_exp, 1);
|
2005-05-11 11:26:24 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* It is the newest, move it to the tail,
|
|
|
|
* if it isn't there already. Next search will
|
|
|
|
* locate it quicker.
|
|
|
|
*/
|
|
|
|
if (fle != TAILQ_LAST(&hsh->head, fhead)) {
|
|
|
|
TAILQ_REMOVE(&hsh->head, fle, fle_hash);
|
|
|
|
TAILQ_INSERT_TAIL(&hsh->head, fle, fle_hash);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else /* A new flow entry. */
|
|
|
|
error = hash_insert(priv, hsh, &r, plen, tcp_flags);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
mtx_unlock(&hsh->mtx);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
if (item != NULL)
|
2006-01-12 22:48:12 +00:00
|
|
|
return_export_dgram(priv, item, NG_QUEUE);
|
2004-12-28 12:11:32 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
return (error);
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-05-11 11:26:24 +00:00
|
|
|
* Return records from cache to userland.
|
2004-09-16 20:24:23 +00:00
|
|
|
*
|
|
|
|
* TODO: matching particular IP should be done in kernel, here.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ng_netflow_flow_show(priv_p priv, uint32_t last, struct ng_mesg *resp)
|
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
struct flow_hash_entry *hsh;
|
2004-09-16 20:24:23 +00:00
|
|
|
struct flow_entry *fle;
|
|
|
|
struct ngnf_flows *data;
|
2005-05-11 11:26:24 +00:00
|
|
|
int i;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
|
|
|
data = (struct ngnf_flows *)resp->data;
|
|
|
|
data->last = 0;
|
|
|
|
data->nentries = 0;
|
|
|
|
|
|
|
|
/* Check if this is a first run */
|
2005-05-11 11:26:24 +00:00
|
|
|
if (last == 0) {
|
|
|
|
hsh = priv->hash;
|
|
|
|
i = 0;
|
|
|
|
} else {
|
|
|
|
if (last > NBUCKETS-1)
|
2004-09-16 20:24:23 +00:00
|
|
|
return (EINVAL);
|
2005-05-11 11:26:24 +00:00
|
|
|
hsh = priv->hash + last;
|
|
|
|
i = last;
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We will transfer not more than NREC_AT_ONCE. More data
|
|
|
|
* will come in next message.
|
2005-05-11 11:26:24 +00:00
|
|
|
* We send current hash index to userland, and userland should
|
|
|
|
* return it back to us. Then, we will restart with new entry.
|
|
|
|
*
|
|
|
|
* The resulting cache snapshot is inaccurate for the
|
|
|
|
* following reasons:
|
|
|
|
* - we skip locked hash entries
|
|
|
|
* - we bail out, if someone wants our entry
|
|
|
|
* - we skip rest of entry, when hit NREC_AT_ONCE
|
2004-09-16 20:24:23 +00:00
|
|
|
*/
|
2005-05-11 11:26:24 +00:00
|
|
|
for (; i < NBUCKETS; hsh++, i++) {
|
|
|
|
if (mtx_trylock(&hsh->mtx) == 0)
|
2004-09-16 20:24:23 +00:00
|
|
|
continue;
|
2005-05-11 11:26:24 +00:00
|
|
|
|
|
|
|
TAILQ_FOREACH(fle, &hsh->head, fle_hash) {
|
|
|
|
if (hsh->mtx.mtx_lock & MTX_CONTESTED)
|
|
|
|
break;
|
|
|
|
|
|
|
|
bcopy(&fle->f, &(data->entries[data->nentries]),
|
|
|
|
sizeof(fle->f));
|
|
|
|
data->nentries++;
|
|
|
|
if (data->nentries == NREC_AT_ONCE) {
|
|
|
|
mtx_unlock(&hsh->mtx);
|
|
|
|
if (++i < NBUCKETS)
|
|
|
|
data->last = i;
|
|
|
|
return (0);
|
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
2005-05-11 11:26:24 +00:00
|
|
|
mtx_unlock(&hsh->mtx);
|
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We have full datagram in privdata. Send it to export hook. */
|
|
|
|
static int
|
2006-01-12 22:48:12 +00:00
|
|
|
export_send(priv_p priv, item_p item, int flags)
|
2004-09-16 20:24:23 +00:00
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
struct mbuf *m = NGI_M(item);
|
|
|
|
struct netflow_v5_export_dgram *dgram = mtod(m,
|
|
|
|
struct netflow_v5_export_dgram *);
|
|
|
|
struct netflow_v5_header *header = &dgram->header;
|
2004-09-16 20:24:23 +00:00
|
|
|
struct timespec ts;
|
|
|
|
int error = 0;
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Fill mbuf header. */
|
|
|
|
m->m_len = m->m_pkthdr.len = sizeof(struct netflow_v5_record) *
|
|
|
|
header->count + sizeof(struct netflow_v5_header);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Fill export header. */
|
|
|
|
header->sys_uptime = htonl(MILLIUPTIME(time_uptime));
|
2004-09-16 20:24:23 +00:00
|
|
|
getnanotime(&ts);
|
|
|
|
header->unix_secs = htonl(ts.tv_sec);
|
|
|
|
header->unix_nsecs = htonl(ts.tv_nsec);
|
2006-04-25 19:56:53 +00:00
|
|
|
header->engine_type = 0;
|
|
|
|
header->engine_id = 0;
|
|
|
|
header->pad = 0;
|
2005-11-27 02:43:08 +00:00
|
|
|
header->flow_seq = htonl(atomic_fetchadd_32(&priv->flow_seq,
|
|
|
|
header->count));
|
2004-09-16 20:24:23 +00:00
|
|
|
header->count = htons(header->count);
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
if (priv->export != NULL)
|
2006-01-12 22:48:12 +00:00
|
|
|
NG_FWD_ITEM_HOOK_FLAGS(error, item, priv->export, flags);
|
2008-01-27 09:22:10 +00:00
|
|
|
else
|
|
|
|
NG_FREE_ITEM(item);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Add export record to dgram. */
|
2004-09-16 20:24:23 +00:00
|
|
|
static int
|
2005-05-11 11:26:24 +00:00
|
|
|
export_add(item_p item, struct flow_entry *fle)
|
2004-09-16 20:24:23 +00:00
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
struct netflow_v5_export_dgram *dgram = mtod(NGI_M(item),
|
|
|
|
struct netflow_v5_export_dgram *);
|
|
|
|
struct netflow_v5_header *header = &dgram->header;
|
2004-09-16 20:24:23 +00:00
|
|
|
struct netflow_v5_record *rec;
|
|
|
|
|
2008-01-27 08:52:41 +00:00
|
|
|
rec = &dgram->r[header->count];
|
|
|
|
header->count ++;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
KASSERT(header->count <= NETFLOW_V5_MAX_RECORDS,
|
|
|
|
("ng_netflow: export too big"));
|
|
|
|
|
|
|
|
/* Fill in export record. */
|
2004-09-16 20:24:23 +00:00
|
|
|
rec->src_addr = fle->f.r.r_src.s_addr;
|
|
|
|
rec->dst_addr = fle->f.r.r_dst.s_addr;
|
|
|
|
rec->next_hop = fle->f.next_hop.s_addr;
|
|
|
|
rec->i_ifx = htons(fle->f.fle_i_ifx);
|
|
|
|
rec->o_ifx = htons(fle->f.fle_o_ifx);
|
|
|
|
rec->packets = htonl(fle->f.packets);
|
|
|
|
rec->octets = htonl(fle->f.bytes);
|
2005-03-03 11:01:05 +00:00
|
|
|
rec->first = htonl(MILLIUPTIME(fle->f.first));
|
|
|
|
rec->last = htonl(MILLIUPTIME(fle->f.last));
|
2004-09-16 20:24:23 +00:00
|
|
|
rec->s_port = fle->f.r.r_sport;
|
|
|
|
rec->d_port = fle->f.r.r_dport;
|
|
|
|
rec->flags = fle->f.tcp_flags;
|
|
|
|
rec->prot = fle->f.r.r_ip_p;
|
|
|
|
rec->tos = fle->f.r.r_tos;
|
|
|
|
rec->dst_mask = fle->f.dst_mask;
|
|
|
|
rec->src_mask = fle->f.src_mask;
|
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Not supported fields. */
|
|
|
|
rec->src_as = rec->dst_as = 0;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
if (header->count == NETFLOW_V5_MAX_RECORDS)
|
|
|
|
return (1); /* end of datagram */
|
|
|
|
else
|
|
|
|
return (0);
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Periodic flow expiry run. */
|
|
|
|
void
|
|
|
|
ng_netflow_expire(void *arg)
|
|
|
|
{
|
2005-05-11 11:26:24 +00:00
|
|
|
struct flow_entry *fle, *fle1;
|
|
|
|
struct flow_hash_entry *hsh;
|
|
|
|
priv_p priv = (priv_p )arg;
|
|
|
|
item_p item = NULL;
|
|
|
|
uint32_t used;
|
|
|
|
int i;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/*
|
|
|
|
* Going through all the cache.
|
|
|
|
*/
|
|
|
|
for (hsh = priv->hash, i = 0; i < NBUCKETS; hsh++, i++) {
|
2004-09-16 20:24:23 +00:00
|
|
|
/*
|
2005-05-11 11:26:24 +00:00
|
|
|
* Skip entries, that are already being worked on.
|
2004-09-16 20:24:23 +00:00
|
|
|
*/
|
2005-05-11 11:26:24 +00:00
|
|
|
if (mtx_trylock(&hsh->mtx) == 0)
|
|
|
|
continue;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
used = atomic_load_acq_32(&priv->info.nfinfo_used);
|
|
|
|
TAILQ_FOREACH_SAFE(fle, &hsh->head, fle_hash, fle1) {
|
2004-09-16 20:24:23 +00:00
|
|
|
/*
|
2005-05-11 11:26:24 +00:00
|
|
|
* Interrupt thread wants this entry!
|
|
|
|
* Quick! Quick! Bail out!
|
2004-09-16 20:24:23 +00:00
|
|
|
*/
|
2005-05-11 11:26:24 +00:00
|
|
|
if (hsh->mtx.mtx_lock & MTX_CONTESTED)
|
|
|
|
break;
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/*
|
|
|
|
* Don't expire aggressively while hash collision
|
|
|
|
* ratio is predicted small.
|
|
|
|
*/
|
|
|
|
if (used <= (NBUCKETS*2) && !INACTIVE(fle))
|
|
|
|
break;
|
|
|
|
|
2006-10-11 12:32:53 +00:00
|
|
|
if ((INACTIVE(fle) && (SMALL(fle) ||
|
2006-10-11 12:39:21 +00:00
|
|
|
(used > (NBUCKETS*2)))) || AGED(fle)) {
|
2005-05-11 11:26:24 +00:00
|
|
|
TAILQ_REMOVE(&hsh->head, fle, fle_hash);
|
2006-01-12 22:48:12 +00:00
|
|
|
expire_flow(priv, &item, fle, NG_NOFLAGS);
|
2005-05-11 11:26:24 +00:00
|
|
|
used--;
|
|
|
|
atomic_add_32(&priv->info.nfinfo_inact_exp, 1);
|
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
}
|
2005-05-11 11:26:24 +00:00
|
|
|
mtx_unlock(&hsh->mtx);
|
|
|
|
}
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
if (item != NULL)
|
2006-01-12 22:48:12 +00:00
|
|
|
return_export_dgram(priv, item, NG_NOFLAGS);
|
2004-09-16 20:24:23 +00:00
|
|
|
|
2005-05-11 11:26:24 +00:00
|
|
|
/* Schedule next expire. */
|
2004-09-16 20:24:23 +00:00
|
|
|
callout_reset(&priv->exp_callout, (1*hz), &ng_netflow_expire,
|
|
|
|
(void *)priv);
|
|
|
|
}
|