2005-01-07 01:45:51 +00:00
|
|
|
/*-
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1990, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from the Stanford/CMU enet packet filter,
|
|
|
|
* (net/enet.c) distributed as part of 4.3BSD, and code contributed
|
1995-05-30 08:16:23 +00:00
|
|
|
* to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
|
1994-05-24 10:09:53 +00:00
|
|
|
* Berkeley Laboratory.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
2001-10-17 10:18:42 +00:00
|
|
|
* @(#)bpf.c 8.4 (Berkeley) 1/9/95
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2007-12-25 13:24:02 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2001-01-29 13:26:14 +00:00
|
|
|
#include "opt_bpf.h"
|
2002-07-31 16:09:38 +00:00
|
|
|
#include "opt_mac.h"
|
2001-01-29 13:26:14 +00:00
|
|
|
#include "opt_netgraph.h"
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-08-05 07:12:49 +00:00
|
|
|
#include <sys/types.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1995-12-02 19:38:06 +00:00
|
|
|
#include <sys/conf.h>
|
2004-12-22 17:37:57 +00:00
|
|
|
#include <sys/fcntl.h>
|
1997-09-02 01:19:47 +00:00
|
|
|
#include <sys/malloc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/time.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
1995-12-06 23:52:35 +00:00
|
|
|
#include <sys/signalvar.h>
|
1997-03-24 12:12:36 +00:00
|
|
|
#include <sys/filio.h>
|
|
|
|
#include <sys/sockio.h>
|
|
|
|
#include <sys/ttycom.h>
|
2004-12-22 17:37:57 +00:00
|
|
|
#include <sys/uio.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-08-05 07:12:49 +00:00
|
|
|
#include <sys/event.h>
|
|
|
|
#include <sys/file.h>
|
1997-09-14 03:03:05 +00:00
|
|
|
#include <sys/poll.h>
|
2003-08-05 07:12:49 +00:00
|
|
|
#include <sys/proc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
1997-11-18 16:29:53 +00:00
|
|
|
#include <net/if.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <net/bpf.h>
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
#include <net/bpf_buffer.h>
|
2005-12-06 02:58:12 +00:00
|
|
|
#ifdef BPF_JITTER
|
|
|
|
#include <net/bpf_jitter.h>
|
|
|
|
#endif
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
#include <net/bpf_zerocopy.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <net/bpfdesc.h>
|
|
|
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/if_ether.h>
|
|
|
|
#include <sys/kernel.h>
|
1995-12-14 09:55:16 +00:00
|
|
|
#include <sys/sysctl.h>
|
1998-01-24 02:54:56 +00:00
|
|
|
|
2006-07-26 03:15:16 +00:00
|
|
|
#include <net80211/ieee80211_freebsd.h>
|
|
|
|
|
2006-10-22 11:52:19 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
|
|
|
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
|
1995-12-08 11:19:42 +00:00
|
|
|
|
2001-01-29 13:26:14 +00:00
|
|
|
#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
|
1995-11-29 10:49:16 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#define PRINET 26 /* interruptible */
|
|
|
|
|
|
|
|
/*
|
2005-02-28 12:35:52 +00:00
|
|
|
* bpf_iflist is a list of BPF interface structures, each corresponding to a
|
|
|
|
* specific DLT. The same network interface might have several BPF interface
|
|
|
|
* structures registered by different layers in the stack (i.e., 802.11
|
|
|
|
* frames, ethernet frames, etc).
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2004-09-09 00:19:27 +00:00
|
|
|
static LIST_HEAD(, bpf_if) bpf_iflist;
|
2001-02-16 17:10:28 +00:00
|
|
|
static struct mtx bpf_mtx; /* bpf global lock */
|
2005-07-24 17:21:17 +00:00
|
|
|
static int bpf_bpfd_cnt;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2006-06-15 15:39:12 +00:00
|
|
|
static void bpf_attachd(struct bpf_d *, struct bpf_if *);
|
|
|
|
static void bpf_detachd(struct bpf_d *);
|
2002-03-19 21:54:18 +00:00
|
|
|
static void bpf_freed(struct bpf_d *);
|
2007-09-10 00:03:06 +00:00
|
|
|
static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
|
2007-02-26 22:24:14 +00:00
|
|
|
struct sockaddr *, int *, struct bpf_insn *);
|
2002-03-19 21:54:18 +00:00
|
|
|
static int bpf_setif(struct bpf_d *, struct ifreq *);
|
|
|
|
static void bpf_timed_out(void *);
|
2001-02-16 17:10:28 +00:00
|
|
|
static __inline void
|
2002-03-19 21:54:18 +00:00
|
|
|
bpf_wakeup(struct bpf_d *);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
static void catchpacket(struct bpf_d *, u_char *, u_int, u_int,
|
|
|
|
void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
|
2006-07-24 15:42:04 +00:00
|
|
|
struct timeval *);
|
2002-03-19 21:54:18 +00:00
|
|
|
static void reset_d(struct bpf_d *);
|
2005-08-22 19:35:48 +00:00
|
|
|
static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
|
2003-01-20 19:08:46 +00:00
|
|
|
static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
|
|
|
|
static int bpf_setdlt(struct bpf_d *, u_int);
|
2003-08-05 07:12:49 +00:00
|
|
|
static void filt_bpfdetach(struct knote *);
|
|
|
|
static int filt_bpfread(struct knote *, long);
|
2005-05-04 03:09:28 +00:00
|
|
|
static void bpf_drvinit(void *);
|
2005-07-24 17:21:17 +00:00
|
|
|
static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
|
|
|
|
|
|
|
|
SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
|
2008-08-29 20:34:06 +00:00
|
|
|
int bpf_maxinsns = BPF_MAXINSNS;
|
2005-07-24 17:21:17 +00:00
|
|
|
SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
|
|
|
|
&bpf_maxinsns, 0, "Maximum bpf program instructions");
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
static int bpf_zerocopy_enable = 0;
|
|
|
|
SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
|
|
|
|
&bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
|
2005-07-24 17:21:17 +00:00
|
|
|
SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_RW,
|
|
|
|
bpf_stats_sysctl, "bpf statistics portal");
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-12-08 11:19:42 +00:00
|
|
|
static d_open_t bpfopen;
|
|
|
|
static d_read_t bpfread;
|
|
|
|
static d_write_t bpfwrite;
|
|
|
|
static d_ioctl_t bpfioctl;
|
1997-09-14 03:03:05 +00:00
|
|
|
static d_poll_t bpfpoll;
|
2003-08-05 07:12:49 +00:00
|
|
|
static d_kqfilter_t bpfkqfilter;
|
1995-12-08 11:19:42 +00:00
|
|
|
|
1999-05-30 16:53:49 +00:00
|
|
|
static struct cdevsw bpf_cdevsw = {
|
2004-02-21 21:10:55 +00:00
|
|
|
.d_version = D_VERSION,
|
2003-03-03 12:15:54 +00:00
|
|
|
.d_open = bpfopen,
|
|
|
|
.d_read = bpfread,
|
|
|
|
.d_write = bpfwrite,
|
|
|
|
.d_ioctl = bpfioctl,
|
|
|
|
.d_poll = bpfpoll,
|
|
|
|
.d_name = "bpf",
|
2003-08-05 07:12:49 +00:00
|
|
|
.d_kqfilter = bpfkqfilter,
|
1999-05-30 16:53:49 +00:00
|
|
|
};
|
1995-12-08 11:19:42 +00:00
|
|
|
|
2003-08-05 07:12:49 +00:00
|
|
|
static struct filterops bpfread_filtops =
|
2004-02-16 18:19:15 +00:00
|
|
|
{ 1, NULL, filt_bpfdetach, filt_bpfread };
|
1995-12-08 11:19:42 +00:00
|
|
|
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
/*
|
|
|
|
* Wrapper functions for various buffering methods. If the set of buffer
|
|
|
|
* modes expands, we will probably want to introduce a switch data structure
|
|
|
|
* similar to protosw, et.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
|
|
|
|
u_int len)
|
|
|
|
{
|
|
|
|
|
|
|
|
BPFD_LOCK_ASSERT(d);
|
|
|
|
|
|
|
|
switch (d->bd_bufmode) {
|
|
|
|
case BPF_BUFMODE_BUFFER:
|
|
|
|
return (bpf_buffer_append_bytes(d, buf, offset, src, len));
|
|
|
|
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
d->bd_zcopy++;
|
|
|
|
return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("bpf_buf_append_bytes");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
|
|
|
|
u_int len)
|
|
|
|
{
|
|
|
|
|
|
|
|
BPFD_LOCK_ASSERT(d);
|
|
|
|
|
|
|
|
switch (d->bd_bufmode) {
|
|
|
|
case BPF_BUFMODE_BUFFER:
|
|
|
|
return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
|
|
|
|
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
d->bd_zcopy++;
|
|
|
|
return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("bpf_buf_append_mbuf");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-07-05 20:11:28 +00:00
|
|
|
/*
|
|
|
|
* This function gets called when the free buffer is re-assigned.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
bpf_buf_reclaimed(struct bpf_d *d)
|
|
|
|
{
|
|
|
|
|
|
|
|
BPFD_LOCK_ASSERT(d);
|
|
|
|
|
|
|
|
switch (d->bd_bufmode) {
|
|
|
|
case BPF_BUFMODE_BUFFER:
|
|
|
|
return;
|
|
|
|
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
bpf_zerocopy_buf_reclaimed(d);
|
|
|
|
return;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("bpf_buf_reclaimed");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
/*
|
|
|
|
* If the buffer mechanism has a way to decide that a held buffer can be made
|
|
|
|
* free, then it is exposed via the bpf_canfreebuf() interface. (1) is
|
|
|
|
* returned if the buffer can be discarded, (0) is returned if it cannot.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
bpf_canfreebuf(struct bpf_d *d)
|
|
|
|
{
|
|
|
|
|
|
|
|
BPFD_LOCK_ASSERT(d);
|
|
|
|
|
|
|
|
switch (d->bd_bufmode) {
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
return (bpf_zerocopy_canfreebuf(d));
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2008-04-07 02:51:00 +00:00
|
|
|
/*
|
|
|
|
* Allow the buffer model to indicate that the current store buffer is
|
|
|
|
* immutable, regardless of the appearance of space. Return (1) if the
|
|
|
|
* buffer is writable, and (0) if not.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
bpf_canwritebuf(struct bpf_d *d)
|
|
|
|
{
|
|
|
|
|
|
|
|
BPFD_LOCK_ASSERT(d);
|
|
|
|
|
|
|
|
switch (d->bd_bufmode) {
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
return (bpf_zerocopy_canwritebuf(d));
|
|
|
|
}
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify buffer model that an attempt to write to the store buffer has
|
|
|
|
* resulted in a dropped packet, in which case the buffer may be considered
|
|
|
|
* full.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
bpf_buffull(struct bpf_d *d)
|
|
|
|
{
|
|
|
|
|
|
|
|
BPFD_LOCK_ASSERT(d);
|
|
|
|
|
|
|
|
switch (d->bd_bufmode) {
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
bpf_zerocopy_buffull(d);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notify the buffer model that a buffer has moved into the hold position.
|
|
|
|
*/
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
void
|
|
|
|
bpf_bufheld(struct bpf_d *d)
|
|
|
|
{
|
|
|
|
|
|
|
|
BPFD_LOCK_ASSERT(d);
|
|
|
|
|
|
|
|
switch (d->bd_bufmode) {
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
bpf_zerocopy_bufheld(d);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bpf_free(struct bpf_d *d)
|
|
|
|
{
|
|
|
|
|
|
|
|
switch (d->bd_bufmode) {
|
|
|
|
case BPF_BUFMODE_BUFFER:
|
|
|
|
return (bpf_buffer_free(d));
|
|
|
|
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
return (bpf_zerocopy_free(d));
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("bpf_buf_free");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
return (bpf_buffer_uiomove(d, buf, len, uio));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
return (bpf_buffer_ioctl_sblen(d, i));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
return (bpf_zerocopy_ioctl_getzmax(td, d, i));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* General BPF functions.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
static int
|
2007-09-10 00:03:06 +00:00
|
|
|
bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
|
2007-02-26 22:24:14 +00:00
|
|
|
struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2006-07-26 03:15:16 +00:00
|
|
|
const struct ieee80211_bpf_params *p;
|
2007-09-10 00:03:06 +00:00
|
|
|
struct ether_header *eh;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mbuf *m;
|
|
|
|
int error;
|
|
|
|
int len;
|
|
|
|
int hlen;
|
2005-08-22 19:35:48 +00:00
|
|
|
int slen;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Build a sockaddr based on the data link layer type.
|
|
|
|
* We do this at this level because the ethernet header
|
|
|
|
* is copied directly into the data field of the sockaddr.
|
|
|
|
* In the case of SLIP, there is no header and the packet
|
|
|
|
* is forwarded as is.
|
|
|
|
* Also, we are careful to leave room at the front of the mbuf
|
|
|
|
* for the link level header.
|
|
|
|
*/
|
|
|
|
switch (linktype) {
|
|
|
|
|
|
|
|
case DLT_SLIP:
|
|
|
|
sockp->sa_family = AF_INET;
|
|
|
|
hlen = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DLT_EN10MB:
|
|
|
|
sockp->sa_family = AF_UNSPEC;
|
|
|
|
/* XXX Would MAXLINKHDR be better? */
|
2003-03-03 05:04:57 +00:00
|
|
|
hlen = ETHER_HDR_LEN;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DLT_FDDI:
|
1995-03-14 09:16:07 +00:00
|
|
|
sockp->sa_family = AF_IMPLINK;
|
|
|
|
hlen = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
1998-08-18 10:13:11 +00:00
|
|
|
case DLT_RAW:
|
1994-05-24 10:09:53 +00:00
|
|
|
sockp->sa_family = AF_UNSPEC;
|
|
|
|
hlen = 0;
|
|
|
|
break;
|
|
|
|
|
2005-06-26 18:11:11 +00:00
|
|
|
case DLT_NULL:
|
|
|
|
/*
|
|
|
|
* null interface types require a 4 byte pseudo header which
|
|
|
|
* corresponds to the address family of the packet.
|
|
|
|
*/
|
|
|
|
sockp->sa_family = AF_UNSPEC;
|
|
|
|
hlen = 4;
|
|
|
|
break;
|
|
|
|
|
1998-07-29 05:35:16 +00:00
|
|
|
case DLT_ATM_RFC1483:
|
|
|
|
/*
|
|
|
|
* en atm driver requires 4-byte atm pseudo header.
|
|
|
|
* though it isn't standard, vpi:vci needs to be
|
|
|
|
* specified anyway.
|
|
|
|
*/
|
|
|
|
sockp->sa_family = AF_UNSPEC;
|
2003-03-02 15:56:49 +00:00
|
|
|
hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
|
1998-07-29 05:35:16 +00:00
|
|
|
break;
|
|
|
|
|
2000-09-16 14:17:15 +00:00
|
|
|
case DLT_PPP:
|
|
|
|
sockp->sa_family = AF_UNSPEC;
|
|
|
|
hlen = 4; /* This should match PPP_HDRLEN */
|
|
|
|
break;
|
|
|
|
|
2006-07-26 03:15:16 +00:00
|
|
|
case DLT_IEEE802_11: /* IEEE 802.11 wireless */
|
|
|
|
sockp->sa_family = AF_IEEE80211;
|
|
|
|
hlen = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */
|
|
|
|
sockp->sa_family = AF_IEEE80211;
|
|
|
|
sockp->sa_len = 12; /* XXX != 0 */
|
|
|
|
hlen = sizeof(struct ieee80211_bpf_params);
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
default:
|
|
|
|
return (EIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
len = uio->uio_resid;
|
2005-06-26 18:11:11 +00:00
|
|
|
|
2007-09-10 00:03:06 +00:00
|
|
|
if (len - hlen > ifp->if_mtu)
|
2005-06-26 18:11:11 +00:00
|
|
|
return (EMSGSIZE);
|
|
|
|
|
2008-07-14 22:41:48 +00:00
|
|
|
if ((unsigned)len > MJUM16BYTES)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EIO);
|
|
|
|
|
2008-07-14 22:41:48 +00:00
|
|
|
if (len <= MHLEN)
|
|
|
|
MGETHDR(m, M_WAIT, MT_DATA);
|
|
|
|
else if (len <= MCLBYTES)
|
2008-03-25 09:39:02 +00:00
|
|
|
m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR);
|
|
|
|
else
|
2008-07-14 22:41:48 +00:00
|
|
|
m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR,
|
|
|
|
#if (MJUMPAGESIZE > MCLBYTES)
|
|
|
|
len <= MJUMPAGESIZE ? MJUMPAGESIZE :
|
|
|
|
#endif
|
|
|
|
(len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES));
|
1995-09-22 17:57:48 +00:00
|
|
|
m->m_pkthdr.len = m->m_len = len;
|
|
|
|
m->m_pkthdr.rcvif = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
*mp = m;
|
2002-11-14 23:24:13 +00:00
|
|
|
|
2005-08-22 19:35:48 +00:00
|
|
|
if (m->m_len < hlen) {
|
|
|
|
error = EPERM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = uiomove(mtod(m, u_char *), len, uio);
|
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
|
|
|
|
slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
|
|
|
|
if (slen == 0) {
|
|
|
|
error = EPERM;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
|
2007-09-10 00:03:06 +00:00
|
|
|
/* Check for multicast destination */
|
|
|
|
switch (linktype) {
|
|
|
|
case DLT_EN10MB:
|
|
|
|
eh = mtod(m, struct ether_header *);
|
|
|
|
if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
|
|
|
|
if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
|
|
|
|
ETHER_ADDR_LEN) == 0)
|
|
|
|
m->m_flags |= M_BCAST;
|
|
|
|
else
|
|
|
|
m->m_flags |= M_MCAST;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2005-08-22 19:35:48 +00:00
|
|
|
* Make room for link header, and copy it to sockaddr
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
if (hlen != 0) {
|
2006-07-26 03:15:16 +00:00
|
|
|
if (sockp->sa_family == AF_IEEE80211) {
|
|
|
|
/*
|
|
|
|
* Collect true length from the parameter header
|
|
|
|
* NB: sockp is known to be zero'd so if we do a
|
|
|
|
* short copy unspecified parameters will be
|
|
|
|
* zero.
|
|
|
|
* NB: packet may not be aligned after stripping
|
|
|
|
* bpf params
|
|
|
|
* XXX check ibp_vers
|
|
|
|
*/
|
|
|
|
p = mtod(m, const struct ieee80211_bpf_params *);
|
|
|
|
hlen = p->ibp_len;
|
|
|
|
if (hlen > sizeof(sockp->sa_data)) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
}
|
2005-08-22 19:35:48 +00:00
|
|
|
bcopy(m->m_data, sockp->sa_data, hlen);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2007-02-26 22:24:14 +00:00
|
|
|
*hdrlen = hlen;
|
2005-08-22 19:35:48 +00:00
|
|
|
|
|
|
|
return (0);
|
2002-11-14 23:24:13 +00:00
|
|
|
bad:
|
1994-05-24 10:09:53 +00:00
|
|
|
m_freem(m);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attach file to the bpf interface, i.e. make d listen on bp.
|
|
|
|
*/
|
|
|
|
static void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Point d at bp, and add d to the interface's list of listeners.
|
|
|
|
* Finally, point the driver's bpf cookie at the interface so
|
|
|
|
* it will divert packets to bpf.
|
|
|
|
*/
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFIF_LOCK(bp);
|
1994-05-24 10:09:53 +00:00
|
|
|
d->bd_bif = bp;
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-07-24 17:21:17 +00:00
|
|
|
bpf_bpfd_cnt++;
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFIF_UNLOCK(bp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Detach a file from its interface.
|
|
|
|
*/
|
|
|
|
static void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_detachd(struct bpf_d *d)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-06-01 21:57:13 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct bpf_if *bp;
|
2004-09-09 04:11:12 +00:00
|
|
|
struct ifnet *ifp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
bp = d->bd_bif;
|
2004-09-09 04:11:12 +00:00
|
|
|
BPFIF_LOCK(bp);
|
|
|
|
BPFD_LOCK(d);
|
|
|
|
ifp = d->bd_bif->bif_ifp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove d from the interface's descriptor list.
|
|
|
|
*/
|
|
|
|
LIST_REMOVE(d, bd_next);
|
|
|
|
|
2005-07-24 17:21:17 +00:00
|
|
|
bpf_bpfd_cnt--;
|
2004-07-24 16:58:56 +00:00
|
|
|
d->bd_bif = NULL;
|
2004-09-09 04:11:12 +00:00
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
BPFIF_UNLOCK(bp);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Check if this descriptor had requested promiscuous mode.
|
|
|
|
* If so, turn it off.
|
|
|
|
*/
|
|
|
|
if (d->bd_promisc) {
|
|
|
|
d->bd_promisc = 0;
|
2004-09-09 04:11:12 +00:00
|
|
|
error = ifpromisc(ifp, 0);
|
2000-06-01 21:57:13 +00:00
|
|
|
if (error != 0 && error != ENXIO) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2000-06-01 21:57:13 +00:00
|
|
|
* ENXIO can happen if a pccard is unplugged
|
1994-05-24 10:09:53 +00:00
|
|
|
* Something is really wrong if we were able to put
|
|
|
|
* the driver into promiscuous mode, but can't
|
|
|
|
* take it out.
|
|
|
|
*/
|
2003-01-20 19:08:46 +00:00
|
|
|
if_printf(bp->bif_ifp,
|
|
|
|
"bpf_detach: ifpromisc failed (%d)\n", error);
|
2000-06-01 21:57:13 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-08-13 15:41:21 +00:00
|
|
|
/*
|
|
|
|
* Close the descriptor by detaching it from its interface,
|
|
|
|
* deallocating its buffers, and marking it free.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
bpf_dtor(void *data)
|
|
|
|
{
|
|
|
|
struct bpf_d *d = data;
|
|
|
|
|
|
|
|
BPFD_LOCK(d);
|
|
|
|
if (d->bd_state == BPF_WAITING)
|
|
|
|
callout_stop(&d->bd_callout);
|
|
|
|
d->bd_state = BPF_IDLE;
|
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
funsetown(&d->bd_sigio);
|
|
|
|
mtx_lock(&bpf_mtx);
|
|
|
|
if (d->bd_bif)
|
|
|
|
bpf_detachd(d);
|
|
|
|
mtx_unlock(&bpf_mtx);
|
|
|
|
selwakeuppri(&d->bd_sel, PRINET);
|
|
|
|
#ifdef MAC
|
|
|
|
mac_bpfdesc_destroy(d);
|
|
|
|
#endif /* MAC */
|
|
|
|
knlist_destroy(&d->bd_sel.si_note);
|
|
|
|
bpf_freed(d);
|
|
|
|
free(d, M_BPF);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Open ethernet device. Returns ENXIO for illegal minor device number,
|
|
|
|
* EBUSY if file is open by another process.
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
1995-12-08 11:19:42 +00:00
|
|
|
static int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-16 17:10:28 +00:00
|
|
|
struct bpf_d *d;
|
2008-08-13 15:41:21 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
|
2008-08-13 15:41:21 +00:00
|
|
|
error = devfs_set_cdevpriv(d, bpf_dtor);
|
|
|
|
if (error != 0) {
|
|
|
|
free(d, M_BPF);
|
|
|
|
return (error);
|
|
|
|
}
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For historical reasons, perform a one-time initialization call to
|
|
|
|
* the buffer routines, even though we're not yet committed to a
|
|
|
|
* particular buffer method.
|
|
|
|
*/
|
|
|
|
bpf_buffer_init(d);
|
|
|
|
d->bd_bufmode = BPF_BUFMODE_BUFFER;
|
1995-06-15 18:11:00 +00:00
|
|
|
d->bd_sig = SIGIO;
|
2007-02-26 22:24:14 +00:00
|
|
|
d->bd_direction = BPF_D_INOUT;
|
2005-07-24 17:21:17 +00:00
|
|
|
d->bd_pid = td->td_proc->p_pid;
|
2002-07-31 16:09:38 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
mac_bpfdesc_init(d);
|
|
|
|
mac_bpfdesc_create(td->td_ucred, d);
|
2002-07-31 16:09:38 +00:00
|
|
|
#endif
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
|
2007-07-28 07:31:30 +00:00
|
|
|
callout_init(&d->bd_callout, CALLOUT_MPSAFE);
|
2005-07-01 16:28:32 +00:00
|
|
|
knlist_init(&d->bd_sel.si_note, &d->bd_mtx, NULL, NULL, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bpfread - read next chunk of packets from buffers
|
|
|
|
*/
|
1995-12-08 11:19:42 +00:00
|
|
|
static int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfread(struct cdev *dev, struct uio *uio, int ioflag)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2008-08-13 15:41:21 +00:00
|
|
|
struct bpf_d *d;
|
2001-12-14 22:17:54 +00:00
|
|
|
int timed_out;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2008-08-13 15:41:21 +00:00
|
|
|
error = devfs_get_cdevpriv((void **)&d);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Restrict application to use a buffer the same size as
|
|
|
|
* as kernel buffers.
|
|
|
|
*/
|
|
|
|
if (uio->uio_resid != d->bd_bufsize)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_LOCK(d);
|
2007-10-12 14:58:34 +00:00
|
|
|
d->bd_pid = curthread->td_proc->p_pid;
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
|
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
2001-12-14 22:17:54 +00:00
|
|
|
if (d->bd_state == BPF_WAITING)
|
|
|
|
callout_stop(&d->bd_callout);
|
|
|
|
timed_out = (d->bd_state == BPF_TIMED_OUT);
|
|
|
|
d->bd_state = BPF_IDLE;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If the hold buffer is empty, then do a timed sleep, which
|
|
|
|
* ends when the timeout expires or when enough packets
|
|
|
|
* have arrived to fill the store buffer.
|
|
|
|
*/
|
2004-07-24 16:58:56 +00:00
|
|
|
while (d->bd_hbuf == NULL) {
|
2001-12-14 22:17:54 +00:00
|
|
|
if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* A packet(s) either arrived since the previous
|
|
|
|
* read or arrived while we were asleep.
|
|
|
|
* Rotate the buffers and return what's here.
|
|
|
|
*/
|
|
|
|
ROTATE_BUFFERS(d);
|
|
|
|
break;
|
|
|
|
}
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No data is available, check to see if the bpf device
|
|
|
|
* is still pointed at a real interface. If not, return
|
|
|
|
* ENXIO so that the userland process knows to rebind
|
|
|
|
* it before using it again.
|
|
|
|
*/
|
|
|
|
if (d->bd_bif == NULL) {
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
2004-12-22 17:37:57 +00:00
|
|
|
if (ioflag & O_NONBLOCK) {
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
2000-12-17 20:50:22 +00:00
|
|
|
return (EWOULDBLOCK);
|
|
|
|
}
|
2003-03-02 16:54:40 +00:00
|
|
|
error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
|
2001-02-16 17:10:28 +00:00
|
|
|
"bpf", d->bd_rtout);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == EINTR || error == ERESTART) {
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
if (error == EWOULDBLOCK) {
|
|
|
|
/*
|
|
|
|
* On a timeout, return what's in the buffer,
|
|
|
|
* which may be nothing. If there is something
|
|
|
|
* in the store buffer, we can rotate the buffers.
|
|
|
|
*/
|
|
|
|
if (d->bd_hbuf)
|
|
|
|
/*
|
|
|
|
* We filled up the buffer in between
|
|
|
|
* getting the timeout and arriving
|
|
|
|
* here, so we don't need to rotate.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (d->bd_slen == 0) {
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
ROTATE_BUFFERS(d);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* At this point, we know we have something in the hold slot.
|
|
|
|
*/
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Move data from hold buffer into user space.
|
|
|
|
* We know the entire buffer is transferred since
|
|
|
|
* we checked above that the read buffer is bpf_bufsize bytes.
|
2008-02-02 20:35:05 +00:00
|
|
|
*
|
|
|
|
* XXXRW: More synchronization needed here: what if a second thread
|
|
|
|
* issues a read on the same fd at the same time? Don't want this
|
|
|
|
* getting invalidated.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_LOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
d->bd_fbuf = d->bd_hbuf;
|
2004-07-24 16:58:56 +00:00
|
|
|
d->bd_hbuf = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
d->bd_hlen = 0;
|
2008-07-05 20:11:28 +00:00
|
|
|
bpf_buf_reclaimed(d);
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are processes sleeping on this descriptor, wake them up.
|
|
|
|
*/
|
2001-02-16 17:10:28 +00:00
|
|
|
static __inline void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_wakeup(struct bpf_d *d)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2005-05-04 03:09:28 +00:00
|
|
|
|
|
|
|
BPFD_LOCK_ASSERT(d);
|
2001-12-14 22:17:54 +00:00
|
|
|
if (d->bd_state == BPF_WAITING) {
|
|
|
|
callout_stop(&d->bd_callout);
|
|
|
|
d->bd_state = BPF_IDLE;
|
|
|
|
}
|
2003-03-02 16:54:40 +00:00
|
|
|
wakeup(d);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
if (d->bd_async && d->bd_sig && d->bd_sigio)
|
2002-05-01 20:44:46 +00:00
|
|
|
pgsigio(&d->bd_sigio, d->bd_sig, 0);
|
1995-06-15 18:11:00 +00:00
|
|
|
|
2003-11-09 09:17:26 +00:00
|
|
|
selwakeuppri(&d->bd_sel, PRINET);
|
2004-08-15 06:24:42 +00:00
|
|
|
KNOTE_LOCKED(&d->bd_sel.si_note, 0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2001-12-14 22:17:54 +00:00
|
|
|
static void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_timed_out(void *arg)
|
2001-12-14 22:17:54 +00:00
|
|
|
{
|
|
|
|
struct bpf_d *d = (struct bpf_d *)arg;
|
|
|
|
|
|
|
|
BPFD_LOCK(d);
|
|
|
|
if (d->bd_state == BPF_WAITING) {
|
|
|
|
d->bd_state = BPF_TIMED_OUT;
|
|
|
|
if (d->bd_slen != 0)
|
|
|
|
bpf_wakeup(d);
|
|
|
|
}
|
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
}
|
|
|
|
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
static int
|
|
|
|
bpf_ready(struct bpf_d *d)
|
|
|
|
{
|
|
|
|
|
|
|
|
BPFD_LOCK_ASSERT(d);
|
|
|
|
|
|
|
|
if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
|
|
|
|
return (1);
|
|
|
|
if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
|
|
|
|
d->bd_slen != 0)
|
|
|
|
return (1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2006-06-15 15:39:12 +00:00
|
|
|
static int
|
|
|
|
bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2008-08-13 15:41:21 +00:00
|
|
|
struct bpf_d *d;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ifnet *ifp;
|
2007-02-26 22:24:14 +00:00
|
|
|
struct mbuf *m, *mc;
|
2004-06-11 03:45:42 +00:00
|
|
|
struct sockaddr dst;
|
2007-02-26 22:24:14 +00:00
|
|
|
int error, hlen;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2008-08-13 15:41:21 +00:00
|
|
|
error = devfs_get_cdevpriv((void **)&d);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
|
2007-10-12 14:58:34 +00:00
|
|
|
d->bd_pid = curthread->td_proc->p_pid;
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
d->bd_wcount++;
|
|
|
|
if (d->bd_bif == NULL) {
|
|
|
|
d->bd_wdcount++;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (ENXIO);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
ifp = d->bd_bif->bif_ifp;
|
|
|
|
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
if ((ifp->if_flags & IFF_UP) == 0) {
|
|
|
|
d->bd_wdcount++;
|
2004-12-08 05:40:02 +00:00
|
|
|
return (ENETDOWN);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
}
|
2004-12-08 05:40:02 +00:00
|
|
|
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
if (uio->uio_resid == 0) {
|
|
|
|
d->bd_wdcount++;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-06-11 03:45:42 +00:00
|
|
|
bzero(&dst, sizeof(dst));
|
2007-06-17 21:51:43 +00:00
|
|
|
m = NULL;
|
|
|
|
hlen = 0;
|
2007-09-10 00:03:06 +00:00
|
|
|
error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
|
2007-02-26 22:24:14 +00:00
|
|
|
&m, &dst, &hlen, d->bd_wfilter);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
if (error) {
|
|
|
|
d->bd_wdcount++;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
}
|
|
|
|
d->bd_wfcount++;
|
1999-10-15 05:07:00 +00:00
|
|
|
if (d->bd_hdrcmplt)
|
|
|
|
dst.sa_family = pseudo_AF_HDRCMPLT;
|
|
|
|
|
2007-02-26 22:24:14 +00:00
|
|
|
if (d->bd_feedback) {
|
|
|
|
mc = m_dup(m, M_DONTWAIT);
|
|
|
|
if (mc != NULL)
|
|
|
|
mc->m_pkthdr.rcvif = ifp;
|
2008-04-15 17:08:24 +00:00
|
|
|
/* Set M_PROMISC for outgoing packets to be discarded. */
|
|
|
|
if (d->bd_direction == BPF_D_INOUT)
|
|
|
|
m->m_flags |= M_PROMISC;
|
2007-02-26 22:24:14 +00:00
|
|
|
} else
|
|
|
|
mc = NULL;
|
|
|
|
|
|
|
|
m->m_pkthdr.len -= hlen;
|
|
|
|
m->m_len -= hlen;
|
|
|
|
m->m_data += hlen; /* XXX */
|
|
|
|
|
2002-07-31 16:09:38 +00:00
|
|
|
#ifdef MAC
|
2004-02-29 15:32:33 +00:00
|
|
|
BPFD_LOCK(d);
|
2007-10-24 19:04:04 +00:00
|
|
|
mac_bpfdesc_create_mbuf(d, m);
|
2007-02-26 22:24:14 +00:00
|
|
|
if (mc != NULL)
|
2007-10-24 19:04:04 +00:00
|
|
|
mac_bpfdesc_create_mbuf(d, mc);
|
2004-02-29 15:32:33 +00:00
|
|
|
BPFD_UNLOCK(d);
|
2002-07-31 16:09:38 +00:00
|
|
|
#endif
|
2007-02-26 22:24:14 +00:00
|
|
|
|
2004-07-24 16:58:56 +00:00
|
|
|
error = (*ifp->if_output)(ifp, m, &dst, NULL);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
if (error)
|
|
|
|
d->bd_wdcount++;
|
2007-02-26 22:24:14 +00:00
|
|
|
|
|
|
|
if (mc != NULL) {
|
2007-08-06 14:26:03 +00:00
|
|
|
if (error == 0)
|
2007-02-26 22:24:14 +00:00
|
|
|
(*ifp->if_input)(ifp, mc);
|
2007-08-06 14:26:03 +00:00
|
|
|
else
|
2007-02-26 22:24:14 +00:00
|
|
|
m_freem(mc);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset a descriptor by flushing its packet buffer and clearing the
|
2001-02-16 17:10:28 +00:00
|
|
|
* receive and drop counts.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
static void
|
2006-06-15 15:39:12 +00:00
|
|
|
reset_d(struct bpf_d *d)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-16 17:10:28 +00:00
|
|
|
|
|
|
|
mtx_assert(&d->bd_mtx, MA_OWNED);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (d->bd_hbuf) {
|
|
|
|
/* Free the hold buffer. */
|
|
|
|
d->bd_fbuf = d->bd_hbuf;
|
2004-07-24 16:58:56 +00:00
|
|
|
d->bd_hbuf = NULL;
|
2008-07-05 20:11:28 +00:00
|
|
|
bpf_buf_reclaimed(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
d->bd_slen = 0;
|
|
|
|
d->bd_hlen = 0;
|
|
|
|
d->bd_rcount = 0;
|
|
|
|
d->bd_dcount = 0;
|
2005-07-24 17:21:17 +00:00
|
|
|
d->bd_fcount = 0;
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
d->bd_wcount = 0;
|
|
|
|
d->bd_wfcount = 0;
|
|
|
|
d->bd_wdcount = 0;
|
|
|
|
d->bd_zcopy = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIONREAD Check for read packet available.
|
|
|
|
* SIOCGIFADDR Get interface address - convenient hook to driver.
|
|
|
|
* BIOCGBLEN Get buffer len [for read()].
|
2008-07-07 09:25:49 +00:00
|
|
|
* BIOCSETF Set read filter.
|
|
|
|
* BIOCSETFNR Set read filter without resetting descriptor.
|
|
|
|
* BIOCSETWF Set write filter.
|
1994-05-24 10:09:53 +00:00
|
|
|
* BIOCFLUSH Flush read packet buffer.
|
|
|
|
* BIOCPROMISC Put interface into promiscuous mode.
|
|
|
|
* BIOCGDLT Get link layer type.
|
|
|
|
* BIOCGETIF Get interface name.
|
|
|
|
* BIOCSETIF Set interface.
|
|
|
|
* BIOCSRTIMEOUT Set read timeout.
|
|
|
|
* BIOCGRTIMEOUT Get read timeout.
|
|
|
|
* BIOCGSTATS Get packet stats.
|
|
|
|
* BIOCIMMEDIATE Set immediate mode.
|
|
|
|
* BIOCVERSION Get filter language version.
|
1999-10-15 05:07:00 +00:00
|
|
|
* BIOCGHDRCMPLT Get "header already complete" flag
|
|
|
|
* BIOCSHDRCMPLT Set "header already complete" flag
|
2007-02-26 22:24:14 +00:00
|
|
|
* BIOCGDIRECTION Get packet direction flag
|
|
|
|
* BIOCSDIRECTION Set packet direction flag
|
2005-08-22 19:35:48 +00:00
|
|
|
* BIOCLOCK Set "locked" flag
|
2007-02-26 22:24:14 +00:00
|
|
|
* BIOCFEEDBACK Set packet feedback mode.
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
* BIOCSETZBUF Set current zero-copy buffer locations.
|
|
|
|
* BIOCGETZMAX Get maximum zero-copy buffer size.
|
|
|
|
* BIOCROTZBUF Force rotation of zero-copy buffer
|
|
|
|
* BIOCSETBUFMODE Set buffer mode.
|
|
|
|
* BIOCGETBUFMODE Get current buffer mode.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
1995-12-08 11:19:42 +00:00
|
|
|
static int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
|
|
|
|
struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2008-08-13 15:41:21 +00:00
|
|
|
struct bpf_d *d;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = devfs_get_cdevpriv((void **)&d);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2007-12-23 14:10:33 +00:00
|
|
|
/*
|
2005-09-05 23:08:04 +00:00
|
|
|
* Refresh PID associated with this descriptor.
|
|
|
|
*/
|
2001-12-14 22:17:54 +00:00
|
|
|
BPFD_LOCK(d);
|
2005-10-04 15:06:10 +00:00
|
|
|
d->bd_pid = td->td_proc->p_pid;
|
2001-12-14 22:17:54 +00:00
|
|
|
if (d->bd_state == BPF_WAITING)
|
|
|
|
callout_stop(&d->bd_callout);
|
|
|
|
d->bd_state = BPF_IDLE;
|
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
|
2005-08-22 19:35:48 +00:00
|
|
|
if (d->bd_locked == 1) {
|
|
|
|
switch (cmd) {
|
|
|
|
case BIOCGBLEN:
|
|
|
|
case BIOCFLUSH:
|
|
|
|
case BIOCGDLT:
|
2007-12-23 14:10:33 +00:00
|
|
|
case BIOCGDLTLIST:
|
2005-08-22 19:35:48 +00:00
|
|
|
case BIOCGETIF:
|
|
|
|
case BIOCGRTIMEOUT:
|
|
|
|
case BIOCGSTATS:
|
|
|
|
case BIOCVERSION:
|
|
|
|
case BIOCGRSIG:
|
|
|
|
case BIOCGHDRCMPLT:
|
2007-02-26 22:24:14 +00:00
|
|
|
case BIOCFEEDBACK:
|
2005-08-22 19:35:48 +00:00
|
|
|
case FIONREAD:
|
|
|
|
case BIOCLOCK:
|
|
|
|
case BIOCSRTIMEOUT:
|
|
|
|
case BIOCIMMEDIATE:
|
|
|
|
case TIOCGPGRP:
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
case BIOCROTZBUF:
|
2005-08-22 19:35:48 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EPERM);
|
|
|
|
}
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
switch (cmd) {
|
|
|
|
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for read packet available.
|
|
|
|
*/
|
|
|
|
case FIONREAD:
|
|
|
|
{
|
|
|
|
int n;
|
|
|
|
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_LOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
n = d->bd_slen;
|
|
|
|
if (d->bd_hbuf)
|
|
|
|
n += d->bd_hlen;
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
*(int *)addr = n;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case SIOCGIFADDR:
|
|
|
|
{
|
|
|
|
struct ifnet *ifp;
|
|
|
|
|
2004-07-24 16:58:56 +00:00
|
|
|
if (d->bd_bif == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EINVAL;
|
|
|
|
else {
|
|
|
|
ifp = d->bd_bif->bif_ifp;
|
|
|
|
error = (*ifp->if_ioctl)(ifp, cmd, addr);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get buffer len [for read()].
|
|
|
|
*/
|
|
|
|
case BIOCGBLEN:
|
|
|
|
*(u_int *)addr = d->bd_bufsize;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set buffer length.
|
|
|
|
*/
|
|
|
|
case BIOCSBLEN:
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
error = bpf_ioctl_sblen(d, (u_int *)addr);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set link layer read filter.
|
|
|
|
*/
|
|
|
|
case BIOCSETF:
|
2008-07-07 09:25:49 +00:00
|
|
|
case BIOCSETFNR:
|
2005-08-22 19:35:48 +00:00
|
|
|
case BIOCSETWF:
|
|
|
|
error = bpf_setf(d, (struct bpf_program *)addr, cmd);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush read packet buffer.
|
|
|
|
*/
|
|
|
|
case BIOCFLUSH:
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_LOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
reset_d(d);
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Put interface into promiscuous mode.
|
|
|
|
*/
|
|
|
|
case BIOCPROMISC:
|
2004-07-24 16:58:56 +00:00
|
|
|
if (d->bd_bif == NULL) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* No interface attached yet.
|
|
|
|
*/
|
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (d->bd_promisc == 0) {
|
|
|
|
error = ifpromisc(d->bd_bif->bif_ifp, 1);
|
|
|
|
if (error == 0)
|
|
|
|
d->bd_promisc = 1;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
2003-01-20 19:08:46 +00:00
|
|
|
* Get current data link type.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
case BIOCGDLT:
|
2004-07-24 16:58:56 +00:00
|
|
|
if (d->bd_bif == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EINVAL;
|
|
|
|
else
|
|
|
|
*(u_int *)addr = d->bd_bif->bif_dlt;
|
|
|
|
break;
|
|
|
|
|
2003-03-02 15:56:49 +00:00
|
|
|
/*
|
2003-01-20 19:08:46 +00:00
|
|
|
* Get a list of supported data link types.
|
|
|
|
*/
|
|
|
|
case BIOCGDLTLIST:
|
2004-07-24 16:58:56 +00:00
|
|
|
if (d->bd_bif == NULL)
|
2003-01-20 19:08:46 +00:00
|
|
|
error = EINVAL;
|
|
|
|
else
|
|
|
|
error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set data link type.
|
|
|
|
*/
|
|
|
|
case BIOCSDLT:
|
2004-07-24 16:58:56 +00:00
|
|
|
if (d->bd_bif == NULL)
|
2003-01-20 19:08:46 +00:00
|
|
|
error = EINVAL;
|
|
|
|
else
|
|
|
|
error = bpf_setdlt(d, *(u_int *)addr);
|
|
|
|
break;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1999-11-03 21:32:28 +00:00
|
|
|
* Get interface name.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
case BIOCGETIF:
|
2004-07-24 16:58:56 +00:00
|
|
|
if (d->bd_bif == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EINVAL;
|
1999-11-03 21:32:28 +00:00
|
|
|
else {
|
|
|
|
struct ifnet *const ifp = d->bd_bif->bif_ifp;
|
|
|
|
struct ifreq *const ifr = (struct ifreq *)addr;
|
|
|
|
|
2003-10-31 18:32:15 +00:00
|
|
|
strlcpy(ifr->ifr_name, ifp->if_xname,
|
|
|
|
sizeof(ifr->ifr_name));
|
1999-11-03 21:32:28 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set interface.
|
|
|
|
*/
|
|
|
|
case BIOCSETIF:
|
|
|
|
error = bpf_setif(d, (struct ifreq *)addr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set read timeout.
|
|
|
|
*/
|
|
|
|
case BIOCSRTIMEOUT:
|
|
|
|
{
|
|
|
|
struct timeval *tv = (struct timeval *)addr;
|
1998-10-04 17:20:22 +00:00
|
|
|
|
1998-10-08 00:32:08 +00:00
|
|
|
/*
|
|
|
|
* Subtract 1 tick from tvtohz() since this isn't
|
|
|
|
* a one-shot timer.
|
|
|
|
*/
|
|
|
|
if ((error = itimerfix(tv)) == 0)
|
|
|
|
d->bd_rtout = tvtohz(tv) - 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get read timeout.
|
|
|
|
*/
|
|
|
|
case BIOCGRTIMEOUT:
|
|
|
|
{
|
|
|
|
struct timeval *tv = (struct timeval *)addr;
|
|
|
|
|
1998-10-08 00:32:08 +00:00
|
|
|
tv->tv_sec = d->bd_rtout / hz;
|
|
|
|
tv->tv_usec = (d->bd_rtout % hz) * tick;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get packet stats.
|
|
|
|
*/
|
|
|
|
case BIOCGSTATS:
|
|
|
|
{
|
|
|
|
struct bpf_stat *bs = (struct bpf_stat *)addr;
|
|
|
|
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
/* XXXCSJP overflow */
|
1994-05-24 10:09:53 +00:00
|
|
|
bs->bs_recv = d->bd_rcount;
|
|
|
|
bs->bs_drop = d->bd_dcount;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set immediate mode.
|
|
|
|
*/
|
|
|
|
case BIOCIMMEDIATE:
|
|
|
|
d->bd_immediate = *(u_int *)addr;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BIOCVERSION:
|
|
|
|
{
|
|
|
|
struct bpf_version *bv = (struct bpf_version *)addr;
|
|
|
|
|
|
|
|
bv->bv_major = BPF_MAJOR_VERSION;
|
|
|
|
bv->bv_minor = BPF_MINOR_VERSION;
|
|
|
|
break;
|
|
|
|
}
|
1995-06-15 18:11:00 +00:00
|
|
|
|
1999-10-15 05:07:00 +00:00
|
|
|
/*
|
|
|
|
* Get "header already complete" flag
|
|
|
|
*/
|
|
|
|
case BIOCGHDRCMPLT:
|
|
|
|
*(u_int *)addr = d->bd_hdrcmplt;
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set "header already complete" flag
|
|
|
|
*/
|
|
|
|
case BIOCSHDRCMPLT:
|
|
|
|
d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
|
|
|
|
break;
|
|
|
|
|
2000-03-18 06:30:42 +00:00
|
|
|
/*
|
2007-02-26 22:24:14 +00:00
|
|
|
* Get packet direction flag
|
2000-03-18 06:30:42 +00:00
|
|
|
*/
|
2007-02-26 22:24:14 +00:00
|
|
|
case BIOCGDIRECTION:
|
|
|
|
*(u_int *)addr = d->bd_direction;
|
2000-03-18 06:30:42 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
2007-02-26 22:24:14 +00:00
|
|
|
* Set packet direction flag
|
2000-03-18 06:30:42 +00:00
|
|
|
*/
|
2007-02-26 22:24:14 +00:00
|
|
|
case BIOCSDIRECTION:
|
|
|
|
{
|
|
|
|
u_int direction;
|
|
|
|
|
|
|
|
direction = *(u_int *)addr;
|
|
|
|
switch (direction) {
|
|
|
|
case BPF_D_IN:
|
|
|
|
case BPF_D_INOUT:
|
|
|
|
case BPF_D_OUT:
|
|
|
|
d->bd_direction = direction;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BIOCFEEDBACK:
|
|
|
|
d->bd_feedback = *(u_int *)addr;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BIOCLOCK:
|
|
|
|
d->bd_locked = 1;
|
2000-03-18 06:30:42 +00:00
|
|
|
break;
|
|
|
|
|
1995-06-15 18:11:00 +00:00
|
|
|
case FIONBIO: /* Non-blocking I/O */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIOASYNC: /* Send signal on receive packets */
|
|
|
|
d->bd_async = *(int *)addr;
|
|
|
|
break;
|
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
case FIOSETOWN:
|
|
|
|
error = fsetown(*(int *)addr, &d->bd_sigio);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case FIOGETOWN:
|
2002-10-03 02:13:00 +00:00
|
|
|
*(int *)addr = fgetown(&d->bd_sigio);
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
break;
|
1995-06-15 18:11:00 +00:00
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
/* This is deprecated, FIOSETOWN should be used instead. */
|
|
|
|
case TIOCSPGRP:
|
|
|
|
error = fsetown(-(*(int *)addr), &d->bd_sigio);
|
1995-06-15 18:11:00 +00:00
|
|
|
break;
|
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
/* This is deprecated, FIOGETOWN should be used instead. */
|
1995-06-15 18:11:00 +00:00
|
|
|
case TIOCGPGRP:
|
2002-10-03 02:13:00 +00:00
|
|
|
*(int *)addr = -fgetown(&d->bd_sigio);
|
1995-06-15 18:11:00 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case BIOCSRSIG: /* Set receive signal */
|
|
|
|
{
|
2003-03-02 15:56:49 +00:00
|
|
|
u_int sig;
|
1995-06-15 18:11:00 +00:00
|
|
|
|
|
|
|
sig = *(u_int *)addr;
|
|
|
|
|
|
|
|
if (sig >= NSIG)
|
|
|
|
error = EINVAL;
|
|
|
|
else
|
|
|
|
d->bd_sig = sig;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case BIOCGRSIG:
|
|
|
|
*(u_int *)addr = d->bd_sig;
|
|
|
|
break;
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
|
|
|
|
case BIOCGETBUFMODE:
|
|
|
|
*(u_int *)addr = d->bd_bufmode;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BIOCSETBUFMODE:
|
|
|
|
/*
|
|
|
|
* Allow the buffering mode to be changed as long as we
|
|
|
|
* haven't yet committed to a particular mode. Our
|
|
|
|
* definition of commitment, for now, is whether or not a
|
|
|
|
* buffer has been allocated or an interface attached, since
|
|
|
|
* that's the point where things get tricky.
|
|
|
|
*/
|
|
|
|
switch (*(u_int *)addr) {
|
|
|
|
case BPF_BUFMODE_BUFFER:
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
if (bpf_zerocopy_enable)
|
|
|
|
break;
|
|
|
|
/* FALLSTHROUGH */
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
BPFD_LOCK(d);
|
|
|
|
if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
|
|
|
|
d->bd_fbuf != NULL || d->bd_bif != NULL) {
|
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
d->bd_bufmode = *(u_int *)addr;
|
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BIOCGETZMAX:
|
|
|
|
return (bpf_ioctl_getzmax(td, d, (size_t *)addr));
|
|
|
|
|
|
|
|
case BIOCSETZBUF:
|
|
|
|
return (bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr));
|
|
|
|
|
|
|
|
case BIOCROTZBUF:
|
|
|
|
return (bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set d's packet filter program to fp. If this file already has a filter,
|
|
|
|
* free it and replace it. Returns EINVAL for bogus requests.
|
|
|
|
*/
|
1995-12-14 09:55:16 +00:00
|
|
|
static int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct bpf_insn *fcode, *old;
|
2005-08-22 19:35:48 +00:00
|
|
|
u_int wfilter, flen, size;
|
2006-05-30 19:24:01 +00:00
|
|
|
#ifdef BPF_JITTER
|
2005-12-06 02:58:12 +00:00
|
|
|
bpf_jit_filter *ofunc;
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-08-22 19:35:48 +00:00
|
|
|
if (cmd == BIOCSETWF) {
|
|
|
|
old = d->bd_wfilter;
|
|
|
|
wfilter = 1;
|
2006-05-30 19:24:01 +00:00
|
|
|
#ifdef BPF_JITTER
|
2005-12-06 02:58:12 +00:00
|
|
|
ofunc = NULL;
|
|
|
|
#endif
|
2005-08-22 19:35:48 +00:00
|
|
|
} else {
|
|
|
|
wfilter = 0;
|
|
|
|
old = d->bd_rfilter;
|
2006-05-30 19:24:01 +00:00
|
|
|
#ifdef BPF_JITTER
|
2005-12-06 02:58:12 +00:00
|
|
|
ofunc = d->bd_bfilter;
|
|
|
|
#endif
|
2005-08-22 19:35:48 +00:00
|
|
|
}
|
2004-07-24 16:58:56 +00:00
|
|
|
if (fp->bf_insns == NULL) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (fp->bf_len != 0)
|
|
|
|
return (EINVAL);
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_LOCK(d);
|
2005-08-22 19:35:48 +00:00
|
|
|
if (wfilter)
|
|
|
|
d->bd_wfilter = NULL;
|
2005-12-06 02:58:12 +00:00
|
|
|
else {
|
2005-08-22 19:35:48 +00:00
|
|
|
d->bd_rfilter = NULL;
|
2006-05-30 19:24:01 +00:00
|
|
|
#ifdef BPF_JITTER
|
2005-12-06 02:58:12 +00:00
|
|
|
d->bd_bfilter = NULL;
|
|
|
|
#endif
|
2008-07-07 09:25:49 +00:00
|
|
|
if (cmd == BIOCSETF)
|
|
|
|
reset_d(d);
|
2005-12-06 02:58:12 +00:00
|
|
|
}
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
2004-07-24 16:58:56 +00:00
|
|
|
if (old != NULL)
|
1999-08-15 09:38:21 +00:00
|
|
|
free((caddr_t)old, M_BPF);
|
2006-05-30 19:24:01 +00:00
|
|
|
#ifdef BPF_JITTER
|
2005-12-06 02:58:12 +00:00
|
|
|
if (ofunc != NULL)
|
|
|
|
bpf_destroy_jit_filter(ofunc);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
flen = fp->bf_len;
|
2005-06-06 22:19:59 +00:00
|
|
|
if (flen > bpf_maxinsns)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
size = flen * sizeof(*fp->bf_insns);
|
2003-02-19 05:47:46 +00:00
|
|
|
fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
|
|
|
|
bpf_validate(fcode, (int)flen)) {
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_LOCK(d);
|
2005-08-22 19:35:48 +00:00
|
|
|
if (wfilter)
|
|
|
|
d->bd_wfilter = fcode;
|
2005-12-06 02:58:12 +00:00
|
|
|
else {
|
2005-08-22 19:35:48 +00:00
|
|
|
d->bd_rfilter = fcode;
|
2006-05-30 19:24:01 +00:00
|
|
|
#ifdef BPF_JITTER
|
2005-12-06 02:58:12 +00:00
|
|
|
d->bd_bfilter = bpf_jitter(fcode, flen);
|
|
|
|
#endif
|
2008-07-07 09:25:49 +00:00
|
|
|
if (cmd == BIOCSETF)
|
|
|
|
reset_d(d);
|
2005-12-06 02:58:12 +00:00
|
|
|
}
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
2004-07-24 16:58:56 +00:00
|
|
|
if (old != NULL)
|
1999-08-15 09:38:21 +00:00
|
|
|
free((caddr_t)old, M_BPF);
|
2006-05-30 19:24:01 +00:00
|
|
|
#ifdef BPF_JITTER
|
2005-12-06 02:58:12 +00:00
|
|
|
if (ofunc != NULL)
|
|
|
|
bpf_destroy_jit_filter(ofunc);
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
1999-08-15 09:38:21 +00:00
|
|
|
free((caddr_t)fcode, M_BPF);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Detach a file from its current interface (if attached at all) and attach
|
|
|
|
* to the interface indicated by the name stored in ifr.
|
|
|
|
* Return an errno or 0.
|
|
|
|
*/
|
|
|
|
static int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_setif(struct bpf_d *d, struct ifreq *ifr)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct bpf_if *bp;
|
1996-02-06 18:51:28 +00:00
|
|
|
struct ifnet *theywant;
|
|
|
|
|
|
|
|
theywant = ifunit(ifr->ifr_name);
|
2006-06-02 19:59:33 +00:00
|
|
|
if (theywant == NULL || theywant->if_bpf == NULL)
|
|
|
|
return (ENXIO);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2006-06-02 19:59:33 +00:00
|
|
|
bp = theywant->if_bpf;
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
* Behavior here depends on the buffering model. If we're using
|
|
|
|
* kernel memory buffers, then we can allocate them here. If we're
|
|
|
|
* using zero-copy, then the user process must have registered
|
|
|
|
* buffers by the time we get here. If not, return an error.
|
|
|
|
*
|
|
|
|
* XXXRW: There are locking issues here with multi-threaded use: what
|
|
|
|
* if two threads try to set the interface at once?
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
switch (d->bd_bufmode) {
|
|
|
|
case BPF_BUFMODE_BUFFER:
|
|
|
|
if (d->bd_sbuf == NULL)
|
|
|
|
bpf_buffer_alloc(d);
|
|
|
|
KASSERT(d->bd_sbuf != NULL, ("bpf_setif: bd_sbuf NULL"));
|
|
|
|
break;
|
|
|
|
|
|
|
|
case BPF_BUFMODE_ZBUF:
|
|
|
|
if (d->bd_sbuf == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
panic("bpf_setif: bufmode %d", d->bd_bufmode);
|
|
|
|
}
|
2006-06-02 19:59:33 +00:00
|
|
|
if (bp != d->bd_bif) {
|
|
|
|
if (d->bd_bif)
|
|
|
|
/*
|
|
|
|
* Detach if attached to something else.
|
|
|
|
*/
|
|
|
|
bpf_detachd(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2006-06-02 19:59:33 +00:00
|
|
|
bpf_attachd(d, bp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2006-06-02 19:59:33 +00:00
|
|
|
BPFD_LOCK(d);
|
|
|
|
reset_d(d);
|
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1997-09-14 03:03:05 +00:00
|
|
|
* Support for select() and poll() system calls
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
|
|
|
* Return true iff the specific operation will not block indefinitely.
|
|
|
|
* Otherwise, return false but make a note that a selwakeup() must be done.
|
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfpoll(struct cdev *dev, int events, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-02-16 17:10:28 +00:00
|
|
|
struct bpf_d *d;
|
2001-04-04 23:27:35 +00:00
|
|
|
int revents;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2008-08-13 15:41:21 +00:00
|
|
|
if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
|
|
|
|
return (events &
|
|
|
|
(POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
|
2005-09-05 23:08:04 +00:00
|
|
|
/*
|
|
|
|
* Refresh PID associated with this descriptor.
|
|
|
|
*/
|
2001-04-04 23:27:35 +00:00
|
|
|
revents = events & (POLLOUT | POLLWRNORM);
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_LOCK(d);
|
2005-10-04 15:06:10 +00:00
|
|
|
d->bd_pid = td->td_proc->p_pid;
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
if (events & (POLLIN | POLLRDNORM)) {
|
2003-08-05 07:12:49 +00:00
|
|
|
if (bpf_ready(d))
|
1997-09-14 03:03:05 +00:00
|
|
|
revents |= events & (POLLIN | POLLRDNORM);
|
2001-12-14 22:17:54 +00:00
|
|
|
else {
|
2001-09-21 22:46:54 +00:00
|
|
|
selrecord(td, &d->bd_sel);
|
2001-12-14 22:17:54 +00:00
|
|
|
/* Start the read timeout if necessary. */
|
|
|
|
if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
|
|
|
|
callout_reset(&d->bd_callout, d->bd_rtout,
|
|
|
|
bpf_timed_out, d);
|
|
|
|
d->bd_state = BPF_WAITING;
|
|
|
|
}
|
|
|
|
}
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
}
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
1997-09-14 03:03:05 +00:00
|
|
|
return (revents);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2003-08-05 07:12:49 +00:00
|
|
|
/*
|
|
|
|
* Support for kevent() system call. Register EVFILT_READ filters and
|
|
|
|
* reject all others.
|
|
|
|
*/
|
|
|
|
int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfkqfilter(struct cdev *dev, struct knote *kn)
|
2003-08-05 07:12:49 +00:00
|
|
|
{
|
2008-08-13 15:41:21 +00:00
|
|
|
struct bpf_d *d;
|
2003-08-05 07:12:49 +00:00
|
|
|
|
2008-08-13 15:41:21 +00:00
|
|
|
if (devfs_get_cdevpriv((void **)&d) != 0 ||
|
|
|
|
kn->kn_filter != EVFILT_READ)
|
2003-08-05 07:12:49 +00:00
|
|
|
return (1);
|
|
|
|
|
2007-12-23 14:10:33 +00:00
|
|
|
/*
|
2005-09-05 23:08:04 +00:00
|
|
|
* Refresh PID associated with this descriptor.
|
|
|
|
*/
|
2005-10-04 15:06:10 +00:00
|
|
|
BPFD_LOCK(d);
|
2005-09-05 23:08:04 +00:00
|
|
|
d->bd_pid = curthread->td_proc->p_pid;
|
2003-08-05 07:12:49 +00:00
|
|
|
kn->kn_fop = &bpfread_filtops;
|
|
|
|
kn->kn_hook = d;
|
2006-07-03 20:02:06 +00:00
|
|
|
knlist_add(&d->bd_sel.si_note, kn, 1);
|
2005-10-04 15:06:10 +00:00
|
|
|
BPFD_UNLOCK(d);
|
2003-08-05 07:12:49 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2006-06-15 15:39:12 +00:00
|
|
|
filt_bpfdetach(struct knote *kn)
|
2003-08-05 07:12:49 +00:00
|
|
|
{
|
|
|
|
struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
|
|
|
|
|
2004-08-15 06:24:42 +00:00
|
|
|
knlist_remove(&d->bd_sel.si_note, kn, 0);
|
2003-08-05 07:12:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2006-06-15 15:39:12 +00:00
|
|
|
filt_bpfread(struct knote *kn, long hint)
|
2003-08-05 07:12:49 +00:00
|
|
|
{
|
|
|
|
struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
|
|
|
|
int ready;
|
|
|
|
|
2004-12-17 03:21:46 +00:00
|
|
|
BPFD_LOCK_ASSERT(d);
|
2003-08-05 07:12:49 +00:00
|
|
|
ready = bpf_ready(d);
|
|
|
|
if (ready) {
|
|
|
|
kn->kn_data = d->bd_slen;
|
|
|
|
if (d->bd_hbuf)
|
|
|
|
kn->kn_data += d->bd_hlen;
|
|
|
|
}
|
|
|
|
else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
|
|
|
|
callout_reset(&d->bd_callout, d->bd_rtout,
|
|
|
|
bpf_timed_out, d);
|
|
|
|
d->bd_state = BPF_WAITING;
|
|
|
|
}
|
2004-02-16 18:19:15 +00:00
|
|
|
|
2003-08-05 07:12:49 +00:00
|
|
|
return (ready);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Incoming linkage from device drivers. Process the packet pkt, of length
|
|
|
|
* pktlen, which is stored in a contiguous buffer. The packet is parsed
|
|
|
|
* by each process' filter, and if accepted, stashed into the corresponding
|
|
|
|
* buffer.
|
|
|
|
*/
|
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-03-02 15:56:49 +00:00
|
|
|
struct bpf_d *d;
|
|
|
|
u_int slen;
|
2006-07-24 15:42:04 +00:00
|
|
|
int gottime;
|
|
|
|
struct timeval tv;
|
2001-02-16 17:10:28 +00:00
|
|
|
|
2006-07-24 15:42:04 +00:00
|
|
|
gottime = 0;
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFIF_LOCK(bp);
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_LOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
++d->bd_rcount;
|
2008-08-01 21:38:46 +00:00
|
|
|
/*
|
|
|
|
* NB: We dont call BPF_CHECK_DIRECTION() here since there is no
|
|
|
|
* way for the caller to indiciate to us whether this packet
|
|
|
|
* is inbound or outbound. In the bpf_mtap() routines, we use
|
|
|
|
* the interface pointers on the mbuf to figure it out.
|
|
|
|
*/
|
2005-12-06 02:58:12 +00:00
|
|
|
#ifdef BPF_JITTER
|
|
|
|
if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL)
|
|
|
|
slen = (*(d->bd_bfilter->func))(pkt, pktlen, pktlen);
|
|
|
|
else
|
|
|
|
#endif
|
2005-08-22 19:35:48 +00:00
|
|
|
slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
|
2002-07-31 16:11:32 +00:00
|
|
|
if (slen != 0) {
|
2005-07-24 17:21:17 +00:00
|
|
|
d->bd_fcount++;
|
2006-07-24 15:42:04 +00:00
|
|
|
if (!gottime) {
|
|
|
|
microtime(&tv);
|
|
|
|
gottime = 1;
|
|
|
|
}
|
2002-07-31 16:11:32 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
|
2002-07-31 16:11:32 +00:00
|
|
|
#endif
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
catchpacket(d, pkt, pktlen, slen,
|
|
|
|
bpf_append_bytes, &tv);
|
2002-07-31 16:11:32 +00:00
|
|
|
}
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFIF_UNLOCK(bp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2008-04-28 19:42:11 +00:00
|
|
|
#define BPF_CHECK_DIRECTION(d, r, i) \
|
|
|
|
(((d)->bd_direction == BPF_D_IN && (r) != (i)) || \
|
|
|
|
((d)->bd_direction == BPF_D_OUT && (r) == (i)))
|
2007-02-26 22:24:14 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Incoming linkage from device drivers, when packet is in an mbuf chain.
|
|
|
|
*/
|
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_mtap(struct bpf_if *bp, struct mbuf *m)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct bpf_d *d;
|
|
|
|
u_int pktlen, slen;
|
2006-07-24 15:42:04 +00:00
|
|
|
int gottime;
|
|
|
|
struct timeval tv;
|
|
|
|
|
2008-04-15 17:08:24 +00:00
|
|
|
/* Skip outgoing duplicate packets. */
|
|
|
|
if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
|
|
|
|
m->m_flags &= ~M_PROMISC;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-07-24 15:42:04 +00:00
|
|
|
gottime = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-09-18 19:48:59 +00:00
|
|
|
pktlen = m_length(m, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFIF_LOCK(bp);
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
|
2008-04-28 19:42:11 +00:00
|
|
|
if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
|
2000-03-18 06:30:42 +00:00
|
|
|
continue;
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_LOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
++d->bd_rcount;
|
2005-12-06 02:58:12 +00:00
|
|
|
#ifdef BPF_JITTER
|
|
|
|
/* XXX We cannot handle multiple mbufs. */
|
|
|
|
if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL &&
|
|
|
|
m->m_next == NULL)
|
|
|
|
slen = (*(d->bd_bfilter->func))(mtod(m, u_char *),
|
|
|
|
pktlen, pktlen);
|
|
|
|
else
|
|
|
|
#endif
|
2005-08-22 19:35:48 +00:00
|
|
|
slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
|
2005-08-18 22:30:52 +00:00
|
|
|
if (slen != 0) {
|
2005-07-24 17:21:17 +00:00
|
|
|
d->bd_fcount++;
|
2006-07-24 15:42:04 +00:00
|
|
|
if (!gottime) {
|
|
|
|
microtime(&tv);
|
|
|
|
gottime = 1;
|
|
|
|
}
|
2002-09-21 00:59:56 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
|
2002-09-21 00:59:56 +00:00
|
|
|
#endif
|
|
|
|
catchpacket(d, (u_char *)m, pktlen, slen,
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
bpf_append_mbuf, &tv);
|
2005-08-18 22:30:52 +00:00
|
|
|
}
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFD_UNLOCK(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-02-16 17:10:28 +00:00
|
|
|
BPFIF_UNLOCK(bp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2003-12-28 03:56:00 +00:00
|
|
|
/*
|
|
|
|
* Incoming linkage from device drivers, when packet is in
|
|
|
|
* an mbuf chain and to be prepended by a contiguous header.
|
|
|
|
*/
|
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
|
2003-12-28 03:56:00 +00:00
|
|
|
{
|
|
|
|
struct mbuf mb;
|
|
|
|
struct bpf_d *d;
|
|
|
|
u_int pktlen, slen;
|
2006-07-24 15:42:04 +00:00
|
|
|
int gottime;
|
|
|
|
struct timeval tv;
|
|
|
|
|
2008-04-15 17:08:24 +00:00
|
|
|
/* Skip outgoing duplicate packets. */
|
|
|
|
if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
|
|
|
|
m->m_flags &= ~M_PROMISC;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-07-24 15:42:04 +00:00
|
|
|
gottime = 0;
|
2003-12-28 03:56:00 +00:00
|
|
|
|
|
|
|
pktlen = m_length(m, NULL);
|
|
|
|
/*
|
|
|
|
* Craft on-stack mbuf suitable for passing to bpf_filter.
|
|
|
|
* Note that we cut corners here; we only setup what's
|
|
|
|
* absolutely needed--this mbuf should never go anywhere else.
|
|
|
|
*/
|
|
|
|
mb.m_next = m;
|
|
|
|
mb.m_data = data;
|
|
|
|
mb.m_len = dlen;
|
|
|
|
pktlen += dlen;
|
|
|
|
|
|
|
|
BPFIF_LOCK(bp);
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
|
2008-04-28 19:42:11 +00:00
|
|
|
if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
|
2003-12-28 03:56:00 +00:00
|
|
|
continue;
|
|
|
|
BPFD_LOCK(d);
|
|
|
|
++d->bd_rcount;
|
2005-08-22 19:35:48 +00:00
|
|
|
slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
|
2005-08-18 22:30:52 +00:00
|
|
|
if (slen != 0) {
|
2005-07-24 17:21:17 +00:00
|
|
|
d->bd_fcount++;
|
2006-07-24 15:42:04 +00:00
|
|
|
if (!gottime) {
|
|
|
|
microtime(&tv);
|
|
|
|
gottime = 1;
|
|
|
|
}
|
2003-12-28 03:56:00 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
|
2003-12-28 03:56:00 +00:00
|
|
|
#endif
|
|
|
|
catchpacket(d, (u_char *)&mb, pktlen, slen,
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
bpf_append_mbuf, &tv);
|
2005-08-18 22:30:52 +00:00
|
|
|
}
|
2003-12-28 03:56:00 +00:00
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
}
|
|
|
|
BPFIF_UNLOCK(bp);
|
|
|
|
}
|
|
|
|
|
2007-02-26 22:24:14 +00:00
|
|
|
#undef BPF_CHECK_DIRECTION
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Move the packet data from interface memory (pkt) into the
|
2004-02-16 18:19:15 +00:00
|
|
|
* store buffer. "cpfn" is the routine called to do the actual data
|
1994-05-24 10:09:53 +00:00
|
|
|
* transfer. bcopy is passed in to copy contiguous chunks, while
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
* bpf_append_mbuf is passed in to copy mbuf chains. In the latter case,
|
1994-05-24 10:09:53 +00:00
|
|
|
* pkt is really an mbuf.
|
|
|
|
*/
|
|
|
|
static void
|
2006-06-15 15:39:12 +00:00
|
|
|
catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
|
|
|
|
struct timeval *tv)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
struct bpf_hdr hdr;
|
2003-03-02 15:56:49 +00:00
|
|
|
int totlen, curlen;
|
|
|
|
int hdrlen = d->bd_bif->bif_hdrlen;
|
2005-03-02 21:59:39 +00:00
|
|
|
int do_wakeup = 0;
|
2004-02-16 18:19:15 +00:00
|
|
|
|
2005-05-04 03:09:28 +00:00
|
|
|
BPFD_LOCK_ASSERT(d);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Detect whether user space has released a buffer back to us, and if
|
|
|
|
* so, move it from being a hold buffer to a free buffer. This may
|
|
|
|
* not be the best place to do it (for example, we might only want to
|
|
|
|
* run this check if we need the space), but for now it's a reliable
|
|
|
|
* spot to do it.
|
|
|
|
*/
|
2008-03-25 07:41:33 +00:00
|
|
|
if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
d->bd_fbuf = d->bd_hbuf;
|
|
|
|
d->bd_hbuf = NULL;
|
|
|
|
d->bd_hlen = 0;
|
2008-07-05 20:11:28 +00:00
|
|
|
bpf_buf_reclaimed(d);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Figure out how many bytes to move. If the packet is
|
|
|
|
* greater or equal to the snapshot length, transfer that
|
|
|
|
* much. Otherwise, transfer the whole packet (unless
|
|
|
|
* we hit the buffer size limit).
|
|
|
|
*/
|
|
|
|
totlen = hdrlen + min(snaplen, pktlen);
|
|
|
|
if (totlen > d->bd_bufsize)
|
|
|
|
totlen = d->bd_bufsize;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Round up the end of the previous packet to the next longword.
|
2008-04-07 02:51:00 +00:00
|
|
|
*
|
|
|
|
* Drop the packet if there's no room and no hope of room
|
|
|
|
* If the packet would overflow the storage buffer or the storage
|
|
|
|
* buffer is considered immutable by the buffer model, try to rotate
|
|
|
|
* the buffer and wakeup pending processes.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
curlen = BPF_WORDALIGN(d->bd_slen);
|
2008-04-07 02:51:00 +00:00
|
|
|
if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
|
2004-07-24 16:58:56 +00:00
|
|
|
if (d->bd_fbuf == NULL) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2008-04-07 02:51:00 +00:00
|
|
|
* There's no room in the store buffer, and no
|
|
|
|
* prospect of room, so drop the packet. Notify the
|
|
|
|
* buffer model.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2008-04-07 02:51:00 +00:00
|
|
|
bpf_buffull(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
++d->bd_dcount;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ROTATE_BUFFERS(d);
|
2005-03-02 21:59:39 +00:00
|
|
|
do_wakeup = 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
curlen = 0;
|
2008-04-07 02:51:00 +00:00
|
|
|
} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
* Immediate mode is set, or the read timeout has already
|
|
|
|
* expired during a select call. A packet arrived, so the
|
|
|
|
* reader should be woken up.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2005-03-02 21:59:39 +00:00
|
|
|
do_wakeup = 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
* Append the bpf header. Note we append the actual header size, but
|
|
|
|
* move forward the length of the header plus padding.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
bzero(&hdr, sizeof(hdr));
|
|
|
|
hdr.bh_tstamp = *tv;
|
|
|
|
hdr.bh_datalen = pktlen;
|
|
|
|
hdr.bh_hdrlen = hdrlen;
|
|
|
|
hdr.bh_caplen = totlen - hdrlen;
|
|
|
|
bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copy the packet data into the store buffer and update its length.
|
|
|
|
*/
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
(*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, hdr.bh_caplen);
|
1994-05-24 10:09:53 +00:00
|
|
|
d->bd_slen = curlen + totlen;
|
2005-03-02 21:59:39 +00:00
|
|
|
|
|
|
|
if (do_wakeup)
|
|
|
|
bpf_wakeup(d);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free buffers currently in use by a descriptor.
|
|
|
|
* Called on close.
|
|
|
|
*/
|
|
|
|
static void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_freed(struct bpf_d *d)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* We don't need to lock out interrupts since this descriptor has
|
|
|
|
* been detached from its interface and it yet hasn't been marked
|
|
|
|
* free.
|
|
|
|
*/
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
bpf_free(d);
|
2005-12-06 02:58:12 +00:00
|
|
|
if (d->bd_rfilter) {
|
2005-08-22 19:35:48 +00:00
|
|
|
free((caddr_t)d->bd_rfilter, M_BPF);
|
2005-12-06 02:58:12 +00:00
|
|
|
#ifdef BPF_JITTER
|
|
|
|
bpf_destroy_jit_filter(d->bd_bfilter);
|
|
|
|
#endif
|
|
|
|
}
|
2005-08-22 19:35:48 +00:00
|
|
|
if (d->bd_wfilter)
|
|
|
|
free((caddr_t)d->bd_wfilter, M_BPF);
|
2001-02-16 17:10:28 +00:00
|
|
|
mtx_destroy(&d->bd_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2002-11-14 23:24:13 +00:00
|
|
|
/*
|
|
|
|
* Attach an interface to bpf. dlt is the link layer type; hdrlen is the
|
|
|
|
* fixed size of the link header (variable length headers not yet supported).
|
|
|
|
*/
|
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
|
2002-11-14 23:24:13 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2001-08-23 22:38:08 +00:00
|
|
|
* Attach an interface to bpf. ifp is a pointer to the structure
|
|
|
|
* defining the interface to be attached, dlt is the link layer type,
|
|
|
|
* and hdrlen is the fixed size of the link header (variable length
|
|
|
|
* headers are not yet supporrted).
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct bpf_if *bp;
|
2006-06-15 15:39:12 +00:00
|
|
|
|
|
|
|
bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
|
2004-07-24 16:58:56 +00:00
|
|
|
if (bp == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("bpfattach");
|
|
|
|
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_INIT(&bp->bif_dlist);
|
1994-05-24 10:09:53 +00:00
|
|
|
bp->bif_ifp = ifp;
|
|
|
|
bp->bif_dlt = dlt;
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
|
2006-06-02 19:59:33 +00:00
|
|
|
KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
|
|
|
|
*driverp = bp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-02-16 17:10:28 +00:00
|
|
|
mtx_lock(&bpf_mtx);
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
|
2001-02-16 17:10:28 +00:00
|
|
|
mtx_unlock(&bpf_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute the length of the bpf header. This is not necessarily
|
|
|
|
* equal to SIZEOF_BPF_HDR because we want to insert spacing such
|
|
|
|
* that the network layer header begins on a longword boundary (for
|
|
|
|
* performance reasons and to alleviate alignment restrictions).
|
|
|
|
*/
|
|
|
|
bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
|
|
|
|
|
1995-09-20 20:48:29 +00:00
|
|
|
if (bootverbose)
|
2002-11-14 23:24:13 +00:00
|
|
|
if_printf(ifp, "bpf attached\n");
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-11-29 10:49:16 +00:00
|
|
|
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
/*
|
|
|
|
* Detach bpf from an interface. This involves detaching each descriptor
|
|
|
|
* associated with the interface, and leaving bd_bif NULL. Notify each
|
|
|
|
* descriptor as it's detached so that any sleepers wake up and get
|
|
|
|
* ENXIO.
|
|
|
|
*/
|
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfdetach(struct ifnet *ifp)
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
{
|
2004-09-09 00:19:27 +00:00
|
|
|
struct bpf_if *bp;
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
struct bpf_d *d;
|
|
|
|
|
|
|
|
/* Locate BPF interface information */
|
2003-01-20 19:08:46 +00:00
|
|
|
mtx_lock(&bpf_mtx);
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_FOREACH(bp, &bpf_iflist, bif_next) {
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
if (ifp == bp->bif_ifp)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Interface wasn't attached */
|
2003-03-21 15:13:29 +00:00
|
|
|
if ((bp == NULL) || (bp->bif_ifp == NULL)) {
|
2001-02-16 17:10:28 +00:00
|
|
|
mtx_unlock(&bpf_mtx);
|
2003-10-31 18:32:15 +00:00
|
|
|
printf("bpfdetach: %s was not attached\n", ifp->if_xname);
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_REMOVE(bp, bif_next);
|
2003-01-20 19:08:46 +00:00
|
|
|
mtx_unlock(&bpf_mtx);
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
|
2004-09-09 00:19:27 +00:00
|
|
|
while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
|
2001-02-16 17:10:28 +00:00
|
|
|
bpf_detachd(d);
|
|
|
|
BPFD_LOCK(d);
|
|
|
|
bpf_wakeup(d);
|
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx_destroy(&bp->bif_mtx);
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
free(bp, M_BPF);
|
2003-01-20 19:08:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a list of available data link type of the interface.
|
|
|
|
*/
|
|
|
|
static int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
|
2003-01-20 19:08:46 +00:00
|
|
|
{
|
|
|
|
int n, error;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct bpf_if *bp;
|
|
|
|
|
|
|
|
ifp = d->bd_bif->bif_ifp;
|
|
|
|
n = 0;
|
|
|
|
error = 0;
|
|
|
|
mtx_lock(&bpf_mtx);
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_FOREACH(bp, &bpf_iflist, bif_next) {
|
2003-01-20 19:08:46 +00:00
|
|
|
if (bp->bif_ifp != ifp)
|
|
|
|
continue;
|
|
|
|
if (bfl->bfl_list != NULL) {
|
|
|
|
if (n >= bfl->bfl_len) {
|
|
|
|
mtx_unlock(&bpf_mtx);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
error = copyout(&bp->bif_dlt,
|
|
|
|
bfl->bfl_list + n, sizeof(u_int));
|
|
|
|
}
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
mtx_unlock(&bpf_mtx);
|
|
|
|
bfl->bfl_len = n;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the data link type of a BPF instance.
|
|
|
|
*/
|
|
|
|
static int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_setdlt(struct bpf_d *d, u_int dlt)
|
2003-01-20 19:08:46 +00:00
|
|
|
{
|
|
|
|
int error, opromisc;
|
|
|
|
struct ifnet *ifp;
|
|
|
|
struct bpf_if *bp;
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
|
2003-01-20 19:08:46 +00:00
|
|
|
if (d->bd_bif->bif_dlt == dlt)
|
|
|
|
return (0);
|
|
|
|
ifp = d->bd_bif->bif_ifp;
|
|
|
|
mtx_lock(&bpf_mtx);
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_FOREACH(bp, &bpf_iflist, bif_next) {
|
2003-01-20 19:08:46 +00:00
|
|
|
if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
|
|
|
|
break;
|
|
|
|
}
|
2001-02-16 17:10:28 +00:00
|
|
|
mtx_unlock(&bpf_mtx);
|
2003-01-20 19:08:46 +00:00
|
|
|
if (bp != NULL) {
|
|
|
|
opromisc = d->bd_promisc;
|
|
|
|
bpf_detachd(d);
|
|
|
|
bpf_attachd(d, bp);
|
2004-10-06 04:25:37 +00:00
|
|
|
BPFD_LOCK(d);
|
2003-01-20 19:08:46 +00:00
|
|
|
reset_d(d);
|
|
|
|
BPFD_UNLOCK(d);
|
|
|
|
if (opromisc) {
|
|
|
|
error = ifpromisc(bp->bif_ifp, 1);
|
|
|
|
if (error)
|
|
|
|
if_printf(bp->bif_ifp,
|
|
|
|
"bpf_setdlt: ifpromisc failed (%d)\n",
|
|
|
|
error);
|
|
|
|
else
|
|
|
|
d->bd_promisc = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (bp == NULL ? EINVAL : 0);
|
The advent of if_detach, allowing interface removal at runtime, makes it
possible for a panic to occur if BPF is in use on the interface at the
time of the call to if_detach. This happens because BPF maintains pointers
to the struct ifnet describing the interface, which is freed by if_detach.
To correct this problem, a new call, bpfdetach, is introduced. bpfdetach
locates BPF descriptor references to the interface, and NULLs them. Other
BPF code is modified so that discovery of a NULL interface results in
ENXIO (already implemented for some calls). Processes blocked on a BPF
call will also be woken up so that they can receive ENXIO.
Interface drivers that invoke bpfattach and if_detach must be modified to
also call bpfattach(ifp) before calling if_detach(ifp). This is relevant
for buses that support hot removal, such as pccard and usb. Patches to
all effected devices will not be committed, only to if_wi.c, due to
testing limitations. To reproduce the crash, load up tcpdump on you
favorite pccard ethernet card, and then eject the card. As some pccard
drivers do not invoke if_detach(ifp), this bug will not manifest itself
for those drivers.
Reviewed by: wes
2000-03-19 05:42:34 +00:00
|
|
|
}
|
|
|
|
|
1997-09-16 11:44:05 +00:00
|
|
|
static void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_drvinit(void *unused)
|
1995-11-29 10:49:16 +00:00
|
|
|
{
|
2008-08-13 15:41:21 +00:00
|
|
|
struct cdev *dev;
|
1996-03-28 14:33:59 +00:00
|
|
|
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
|
2004-09-09 00:19:27 +00:00
|
|
|
LIST_INIT(&bpf_iflist);
|
2008-08-13 15:41:21 +00:00
|
|
|
|
|
|
|
dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
|
|
|
|
/* For compatibility */
|
|
|
|
make_dev_alias(dev, "bpf0");
|
|
|
|
|
1995-11-29 10:49:16 +00:00
|
|
|
}
|
|
|
|
|
2005-07-24 17:21:17 +00:00
|
|
|
static void
|
|
|
|
bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
|
|
|
|
{
|
|
|
|
|
|
|
|
bzero(d, sizeof(*d));
|
|
|
|
BPFD_LOCK_ASSERT(bd);
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
d->bd_structsize = sizeof(*d);
|
2005-07-24 17:21:17 +00:00
|
|
|
d->bd_immediate = bd->bd_immediate;
|
|
|
|
d->bd_promisc = bd->bd_promisc;
|
|
|
|
d->bd_hdrcmplt = bd->bd_hdrcmplt;
|
2007-02-26 22:24:14 +00:00
|
|
|
d->bd_direction = bd->bd_direction;
|
|
|
|
d->bd_feedback = bd->bd_feedback;
|
2005-07-24 17:21:17 +00:00
|
|
|
d->bd_async = bd->bd_async;
|
|
|
|
d->bd_rcount = bd->bd_rcount;
|
|
|
|
d->bd_dcount = bd->bd_dcount;
|
|
|
|
d->bd_fcount = bd->bd_fcount;
|
|
|
|
d->bd_sig = bd->bd_sig;
|
|
|
|
d->bd_slen = bd->bd_slen;
|
|
|
|
d->bd_hlen = bd->bd_hlen;
|
|
|
|
d->bd_bufsize = bd->bd_bufsize;
|
|
|
|
d->bd_pid = bd->bd_pid;
|
|
|
|
strlcpy(d->bd_ifname,
|
|
|
|
bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
|
2005-08-22 19:35:48 +00:00
|
|
|
d->bd_locked = bd->bd_locked;
|
Introduce support for zero-copy BPF buffering, which reduces the
overhead of packet capture by allowing a user process to directly "loan"
buffer memory to the kernel rather than using read(2) to explicitly copy
data from kernel address space.
The user process will issue new BPF ioctls to set the shared memory
buffer mode and provide pointers to buffers and their size. The kernel
then wires and maps the pages into kernel address space using sf_buf(9),
which on supporting architectures will use the direct map region. The
current "buffered" access mode remains the default, and support for
zero-copy buffers must, for the time being, be explicitly enabled using
a sysctl for the kernel to accept requests to use it.
The kernel and user process synchronize use of the buffers with atomic
operations, avoiding the need for system calls under load; the user
process may use select()/poll()/kqueue() to manage blocking while
waiting for network data if the user process is able to consume data
faster than the kernel generates it. Patchs to libpcap are available
to allow libpcap applications to transparently take advantage of this
support. Detailed information on the new API may be found in bpf(4),
including specific atomic operations and memory barriers required to
synchronize buffer use safely.
These changes modify the base BPF implementation to (roughly) abstrac
the current buffer model, allowing the new shared memory model to be
added, and add new monitoring statistics for netstat to print. The
implementation, with the exception of some monitoring hanges that break
the netstat monitoring ABI for BPF, will be MFC'd.
Zerocopy bpf buffers are still considered experimental are disabled
by default. To experiment with this new facility, adjust the
net.bpf.zerocopy_enable sysctl variable to 1.
Changes to libpcap will be made available as a patch for the time being,
and further refinements to the implementation are expected.
Sponsored by: Seccuris Inc.
In collaboration with: rwatson
Tested by: pwood, gallatin
MFC after: 4 months [1]
[1] Certain portions will probably not be MFCed, specifically things
that can break the monitoring ABI.
2008-03-24 13:49:17 +00:00
|
|
|
d->bd_wcount = bd->bd_wcount;
|
|
|
|
d->bd_wdcount = bd->bd_wdcount;
|
|
|
|
d->bd_wfcount = bd->bd_wfcount;
|
|
|
|
d->bd_zcopy = bd->bd_zcopy;
|
|
|
|
d->bd_bufmode = bd->bd_bufmode;
|
2005-07-24 17:21:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
2005-07-26 17:21:56 +00:00
|
|
|
struct xbpf_d *xbdbuf, *xbd;
|
|
|
|
int index, error;
|
2005-07-24 17:21:17 +00:00
|
|
|
struct bpf_if *bp;
|
|
|
|
struct bpf_d *bd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX This is not technically correct. It is possible for non
|
|
|
|
* privileged users to open bpf devices. It would make sense
|
|
|
|
* if the users who opened the devices were able to retrieve
|
|
|
|
* the statistics for them, too.
|
|
|
|
*/
|
2006-11-06 13:42:10 +00:00
|
|
|
error = priv_check(req->td, PRIV_NET_BPF);
|
2005-07-24 17:21:17 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (req->oldptr == NULL)
|
2005-07-26 17:21:56 +00:00
|
|
|
return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
|
2005-07-24 17:21:17 +00:00
|
|
|
if (bpf_bpfd_cnt == 0)
|
|
|
|
return (SYSCTL_OUT(req, 0, 0));
|
2005-07-26 17:21:56 +00:00
|
|
|
xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
|
2005-07-24 17:21:17 +00:00
|
|
|
mtx_lock(&bpf_mtx);
|
2005-07-26 17:21:56 +00:00
|
|
|
if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
|
|
|
|
mtx_unlock(&bpf_mtx);
|
|
|
|
free(xbdbuf, M_BPF);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
index = 0;
|
2005-07-24 17:21:17 +00:00
|
|
|
LIST_FOREACH(bp, &bpf_iflist, bif_next) {
|
2006-05-07 03:21:43 +00:00
|
|
|
BPFIF_LOCK(bp);
|
2005-07-24 17:21:17 +00:00
|
|
|
LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
|
2005-07-26 17:21:56 +00:00
|
|
|
xbd = &xbdbuf[index++];
|
2005-07-24 17:21:17 +00:00
|
|
|
BPFD_LOCK(bd);
|
2005-07-26 17:21:56 +00:00
|
|
|
bpfstats_fill_xbpf(xbd, bd);
|
2005-07-24 17:21:17 +00:00
|
|
|
BPFD_UNLOCK(bd);
|
|
|
|
}
|
2006-05-07 03:21:43 +00:00
|
|
|
BPFIF_UNLOCK(bp);
|
2005-07-24 17:21:17 +00:00
|
|
|
}
|
|
|
|
mtx_unlock(&bpf_mtx);
|
2005-07-26 17:21:56 +00:00
|
|
|
error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
|
|
|
|
free(xbdbuf, M_BPF);
|
2005-07-24 17:21:17 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2008-03-16 10:58:09 +00:00
|
|
|
SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
|
1995-11-29 10:49:16 +00:00
|
|
|
|
2001-01-29 13:26:14 +00:00
|
|
|
#else /* !DEV_BPF && !NETGRAPH_BPF */
|
1999-04-28 01:18:13 +00:00
|
|
|
/*
|
|
|
|
* NOP stubs to allow bpf-using drivers to load and function.
|
|
|
|
*
|
|
|
|
* A 'better' implementation would allow the core bpf functionality
|
|
|
|
* to be loaded at runtime.
|
|
|
|
*/
|
2006-06-14 02:23:28 +00:00
|
|
|
static struct bpf_if bp_null;
|
1999-04-28 01:18:13 +00:00
|
|
|
|
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
|
1999-04-28 01:18:13 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_mtap(struct bpf_if *bp, struct mbuf *m)
|
1999-04-28 01:18:13 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2003-12-28 03:56:00 +00:00
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
|
2003-12-28 03:56:00 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
1999-04-28 01:18:13 +00:00
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
|
1999-04-28 01:18:13 +00:00
|
|
|
{
|
2006-06-14 02:23:28 +00:00
|
|
|
|
|
|
|
bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
|
1999-04-28 01:18:13 +00:00
|
|
|
}
|
|
|
|
|
2003-10-04 01:32:28 +00:00
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
|
2003-10-04 01:32:28 +00:00
|
|
|
{
|
2006-06-14 02:23:28 +00:00
|
|
|
|
|
|
|
*driverp = &bp_null;
|
2003-10-04 01:32:28 +00:00
|
|
|
}
|
|
|
|
|
2000-04-27 15:11:41 +00:00
|
|
|
void
|
2006-06-15 15:39:12 +00:00
|
|
|
bpfdetach(struct ifnet *ifp)
|
2000-04-27 15:11:41 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
1999-04-28 01:18:13 +00:00
|
|
|
u_int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
|
1999-04-28 01:18:13 +00:00
|
|
|
{
|
|
|
|
return -1; /* "no filter" behaviour */
|
|
|
|
}
|
|
|
|
|
2001-01-29 13:26:14 +00:00
|
|
|
int
|
2006-06-15 15:39:12 +00:00
|
|
|
bpf_validate(const struct bpf_insn *f, int len)
|
2001-01-29 13:26:14 +00:00
|
|
|
{
|
|
|
|
return 0; /* false */
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* !DEV_BPF && !NETGRAPH_BPF */
|