Move the definition of struct bpf_if to bpf.c.
A couple of fields are still exposed via struct bpf_if_ext so that bpf_peers_present() can be inlined into its callers. However, this change eliminates some type duplication in the resulting CTF container, since otherwise ctfmerge(1) propagates the duplication through all types that contain a struct bpf_if. Differential Revision: https://reviews.freebsd.org/D2319 Reviewed by: melifaro, rpaulo
This commit is contained in:
parent
8188e2e04e
commit
b23cbbe6db
@ -69,7 +69,6 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <net/if.h>
|
||||
#include <net/if_var.h>
|
||||
#define BPF_INTERNAL
|
||||
#include <net/bpf.h>
|
||||
#include <net/bpf_buffer.h>
|
||||
#ifdef BPF_JITTER
|
||||
@ -90,6 +89,20 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
|
||||
|
||||
struct bpf_if {
|
||||
#define bif_next bif_ext.bif_next
|
||||
#define bif_dlist bif_ext.bif_dlist
|
||||
struct bpf_if_ext bif_ext; /* public members */
|
||||
u_int bif_dlt; /* link layer type */
|
||||
u_int bif_hdrlen; /* length of link header */
|
||||
struct ifnet *bif_ifp; /* corresponding interface */
|
||||
struct rwlock bif_lock; /* interface lock */
|
||||
LIST_HEAD(, bpf_d) bif_wlist; /* writer-only list */
|
||||
int bif_flags; /* Interface flags */
|
||||
};
|
||||
|
||||
CTASSERT(offsetof(struct bpf_if, bif_ext) == 0);
|
||||
|
||||
#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
|
||||
|
||||
#define PRINET 26 /* interruptible */
|
||||
@ -1892,7 +1905,7 @@ bpf_setif(struct bpf_d *d, struct ifreq *ifr)
|
||||
|
||||
/* Check if interface is not being detached from BPF */
|
||||
BPFIF_RLOCK(bp);
|
||||
if (bp->flags & BPFIF_FLAG_DYING) {
|
||||
if (bp->bif_flags & BPFIF_FLAG_DYING) {
|
||||
BPFIF_RUNLOCK(bp);
|
||||
return (ENXIO);
|
||||
}
|
||||
@ -2561,7 +2574,7 @@ bpfdetach(struct ifnet *ifp)
|
||||
* Mark bp as detached to restrict new consumers.
|
||||
*/
|
||||
BPFIF_WLOCK(bp);
|
||||
bp->flags |= BPFIF_FLAG_DYING;
|
||||
bp->bif_flags |= BPFIF_FLAG_DYING;
|
||||
BPFIF_WUNLOCK(bp);
|
||||
|
||||
CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p",
|
||||
|
@ -1451,21 +1451,14 @@ SYSCTL_DECL(_net_bpf);
|
||||
|
||||
/*
|
||||
* Descriptor associated with each attached hardware interface.
|
||||
* FIXME: this structure is exposed to external callers to speed up
|
||||
* bpf_peers_present() call. However we cover all fields not needed by
|
||||
* this function via BPF_INTERNAL define
|
||||
* Part of this structure is exposed to external callers to speed up
|
||||
* bpf_peers_present() calls.
|
||||
*/
|
||||
struct bpf_if {
|
||||
struct bpf_if;
|
||||
|
||||
struct bpf_if_ext {
|
||||
LIST_ENTRY(bpf_if) bif_next; /* list of all interfaces */
|
||||
LIST_HEAD(, bpf_d) bif_dlist; /* descriptor list */
|
||||
#ifdef BPF_INTERNAL
|
||||
u_int bif_dlt; /* link layer type */
|
||||
u_int bif_hdrlen; /* length of link header */
|
||||
struct ifnet *bif_ifp; /* corresponding interface */
|
||||
struct rwlock bif_lock; /* interface lock */
|
||||
LIST_HEAD(, bpf_d) bif_wlist; /* writer-only list */
|
||||
int flags; /* Interface flags */
|
||||
#endif
|
||||
};
|
||||
|
||||
void bpf_bufheld(struct bpf_d *d);
|
||||
@ -1483,8 +1476,10 @@ u_int bpf_filter(const struct bpf_insn *, u_char *, u_int, u_int);
|
||||
static __inline int
|
||||
bpf_peers_present(struct bpf_if *bpf)
|
||||
{
|
||||
struct bpf_if_ext *ext;
|
||||
|
||||
if (!LIST_EMPTY(&bpf->bif_dlist))
|
||||
ext = (struct bpf_if_ext *)bpf;
|
||||
if (!LIST_EMPTY(&ext->bif_dlist))
|
||||
return (1);
|
||||
return (0);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user