1997-01-03 19:50:26 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1982, 1986, 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* From: @(#)if.h 8.1 (Berkeley) 6/10/93
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1997-01-03 19:50:26 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _NET_IF_VAR_H_
|
|
|
|
#define _NET_IF_VAR_H_
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Structures defining a network interface, providing a packet
|
|
|
|
* transport mechanism (ala level 0 of the PUP protocols).
|
|
|
|
*
|
|
|
|
* Each interface accepts output datagrams of a specified maximum
|
|
|
|
* length, and provides higher level routines with input datagrams
|
|
|
|
* received from its medium.
|
|
|
|
*
|
|
|
|
* Output occurs when the routine if_output is called, with three parameters:
|
|
|
|
* (*ifp->if_output)(ifp, m, dst, rt)
|
|
|
|
* Here m is the mbuf chain to be sent and dst is the destination address.
|
|
|
|
* The output routine encapsulates the supplied datagram if necessary,
|
|
|
|
* and then transmits it on its medium.
|
|
|
|
*
|
|
|
|
* On input, each interface unwraps the data received by it, and either
|
|
|
|
* places it on the input queue of a internetwork datagram routine
|
|
|
|
* and posts the associated software interrupt, or passes the datagram to a raw
|
|
|
|
* packet input routine.
|
|
|
|
*
|
|
|
|
* Routines exist for locating interfaces by their addresses
|
|
|
|
* or for locating a interface on a certain network, as well as more general
|
|
|
|
* routing and gateway routines maintaining information used to locate
|
|
|
|
* interfaces. These routines live in the files if.c and route.c
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef __STDC__
|
|
|
|
/*
|
|
|
|
* Forward structure declarations for function prototypes [sic].
|
|
|
|
*/
|
|
|
|
struct mbuf;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread;
|
1997-01-03 19:50:26 +00:00
|
|
|
struct rtentry;
|
2001-10-17 18:07:05 +00:00
|
|
|
struct rt_addrinfo;
|
1997-01-03 19:50:26 +00:00
|
|
|
struct socket;
|
|
|
|
struct ether_header;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <sys/queue.h> /* get TAILQ macros */
|
|
|
|
|
2000-11-26 21:47:01 +00:00
|
|
|
#ifdef _KERNEL
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
#include <sys/mbuf.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/systm.h> /* XXX */
|
2000-11-26 21:47:01 +00:00
|
|
|
#endif /* _KERNEL */
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h> /* XXX */
|
|
|
|
#include <sys/mutex.h> /* XXX */
|
2001-09-06 02:40:43 +00:00
|
|
|
#include <sys/event.h> /* XXX */
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_HEAD(ifnethead, ifnet); /* we use TAILQs so that the order of */
|
|
|
|
TAILQ_HEAD(ifaddrhead, ifaddr); /* instantiation is preserved in the list */
|
|
|
|
TAILQ_HEAD(ifprefixhead, ifprefix);
|
2001-02-06 10:12:15 +00:00
|
|
|
TAILQ_HEAD(ifmultihead, ifmultiaddr);
|
1997-01-03 19:50:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Structure defining a queue for a network interface.
|
|
|
|
*/
|
|
|
|
struct ifqueue {
|
|
|
|
struct mbuf *ifq_head;
|
|
|
|
struct mbuf *ifq_tail;
|
|
|
|
int ifq_len;
|
|
|
|
int ifq_maxlen;
|
|
|
|
int ifq_drops;
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
struct mtx ifq_mtx;
|
1997-01-03 19:50:26 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Structure defining a network interface.
|
|
|
|
*
|
|
|
|
* (Would like to call this struct ``if'', but C isn't PL/1.)
|
|
|
|
*/
|
2001-10-02 18:08:34 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NB: For FreeBSD, it is assumed that each NIC driver's softc starts with
|
|
|
|
* one of these structures, typically held within an arpcom structure.
|
|
|
|
*/
|
1997-01-03 19:50:26 +00:00
|
|
|
struct ifnet {
|
|
|
|
void *if_softc; /* pointer to driver state */
|
|
|
|
char *if_name; /* name, e.g. ``en'' or ``lo'' */
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained */
|
1997-01-03 19:50:26 +00:00
|
|
|
struct ifaddrhead if_addrhead; /* linked list of addresses per if */
|
2001-09-06 02:40:43 +00:00
|
|
|
struct klist if_klist; /* events attached to this if */
|
|
|
|
int if_pcount; /* number of promiscuous listeners */
|
1997-01-03 19:50:26 +00:00
|
|
|
struct bpf_if *if_bpf; /* packet filter structure */
|
|
|
|
u_short if_index; /* numeric abbreviation for this if */
|
|
|
|
short if_unit; /* sub-unit for lower level driver */
|
|
|
|
short if_timer; /* time 'til if_watchdog called */
|
|
|
|
short if_flags; /* up/down, broadcast, etc. */
|
2001-09-18 17:41:42 +00:00
|
|
|
int if_capabilities; /* interface capabilities */
|
|
|
|
int if_capenable; /* enabled features */
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
int if_mpsafe; /* XXX TEMPORARY */
|
1997-01-03 19:50:26 +00:00
|
|
|
int if_ipending; /* interrupts pending */
|
|
|
|
void *if_linkmib; /* link-type-specific MIB data */
|
|
|
|
size_t if_linkmiblen; /* length of above data */
|
|
|
|
struct if_data if_data;
|
1997-01-07 19:15:32 +00:00
|
|
|
struct ifmultihead if_multiaddrs; /* multicast addresses configured */
|
|
|
|
int if_amcount; /* number of all-multicast requests */
|
1997-01-03 19:50:26 +00:00
|
|
|
/* procedure handles */
|
|
|
|
int (*if_output) /* output routine (enqueue) */
|
|
|
|
__P((struct ifnet *, struct mbuf *, struct sockaddr *,
|
|
|
|
struct rtentry *));
|
|
|
|
void (*if_start) /* initiate output routine */
|
|
|
|
__P((struct ifnet *));
|
|
|
|
int (*if_done) /* output complete routine */
|
|
|
|
__P((struct ifnet *)); /* (XXX not used; fake prototype) */
|
|
|
|
int (*if_ioctl) /* ioctl routine */
|
1998-06-07 17:13:14 +00:00
|
|
|
__P((struct ifnet *, u_long, caddr_t));
|
1997-01-03 19:50:26 +00:00
|
|
|
void (*if_watchdog) /* timer routine */
|
|
|
|
__P((struct ifnet *));
|
|
|
|
int (*if_poll_recv) /* polled receive routine */
|
|
|
|
__P((struct ifnet *, int *));
|
|
|
|
int (*if_poll_xmit) /* polled transmit routine */
|
|
|
|
__P((struct ifnet *, int *));
|
|
|
|
void (*if_poll_intren) /* polled interrupt reenable routine */
|
|
|
|
__P((struct ifnet *));
|
|
|
|
void (*if_poll_slowinput) /* input routine for slow devices */
|
|
|
|
__P((struct ifnet *, struct mbuf *));
|
|
|
|
void (*if_init) /* Init routine */
|
|
|
|
__P((void *));
|
1997-01-07 19:15:32 +00:00
|
|
|
int (*if_resolvemulti) /* validate/resolve multicast */
|
|
|
|
__P((struct ifnet *, struct sockaddr **, struct sockaddr *));
|
1997-01-03 19:50:26 +00:00
|
|
|
struct ifqueue if_snd; /* output queue */
|
|
|
|
struct ifqueue *if_poll_slowq; /* input queue for slow devices */
|
1999-11-05 14:41:39 +00:00
|
|
|
struct ifprefixhead if_prefixhead; /* list of prefixes per if */
|
2001-10-14 20:17:53 +00:00
|
|
|
u_int8_t *if_broadcastaddr; /* linklevel broadcast bytestring */
|
1997-01-03 19:50:26 +00:00
|
|
|
};
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
|
1997-01-03 19:50:26 +00:00
|
|
|
typedef void if_init_f_t __P((void *));
|
|
|
|
|
|
|
|
#define if_mtu if_data.ifi_mtu
|
|
|
|
#define if_type if_data.ifi_type
|
|
|
|
#define if_physical if_data.ifi_physical
|
|
|
|
#define if_addrlen if_data.ifi_addrlen
|
|
|
|
#define if_hdrlen if_data.ifi_hdrlen
|
|
|
|
#define if_metric if_data.ifi_metric
|
|
|
|
#define if_baudrate if_data.ifi_baudrate
|
2000-03-27 19:14:27 +00:00
|
|
|
#define if_hwassist if_data.ifi_hwassist
|
1997-01-03 19:50:26 +00:00
|
|
|
#define if_ipackets if_data.ifi_ipackets
|
|
|
|
#define if_ierrors if_data.ifi_ierrors
|
|
|
|
#define if_opackets if_data.ifi_opackets
|
|
|
|
#define if_oerrors if_data.ifi_oerrors
|
|
|
|
#define if_collisions if_data.ifi_collisions
|
|
|
|
#define if_ibytes if_data.ifi_ibytes
|
|
|
|
#define if_obytes if_data.ifi_obytes
|
|
|
|
#define if_imcasts if_data.ifi_imcasts
|
|
|
|
#define if_omcasts if_data.ifi_omcasts
|
|
|
|
#define if_iqdrops if_data.ifi_iqdrops
|
|
|
|
#define if_noproto if_data.ifi_noproto
|
|
|
|
#define if_lastchange if_data.ifi_lastchange
|
|
|
|
#define if_recvquota if_data.ifi_recvquota
|
|
|
|
#define if_xmitquota if_data.ifi_xmitquota
|
|
|
|
#define if_rawoutput(if, m, sa) if_output(if, m, sa, (struct rtentry *)0)
|
|
|
|
|
1999-11-22 02:45:11 +00:00
|
|
|
/* for compatibility with other BSDs */
|
|
|
|
#define if_addrlist if_addrhead
|
|
|
|
#define if_list if_link
|
|
|
|
|
1997-01-03 19:50:26 +00:00
|
|
|
/*
|
|
|
|
* Bit values in if_ipending
|
|
|
|
*/
|
|
|
|
#define IFI_RECV 1 /* I want to receive */
|
|
|
|
#define IFI_XMIT 2 /* I want to transmit */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq)
|
|
|
|
* are queues of messages stored on ifqueue structures
|
|
|
|
* (defined above). Entries are added to and deleted from these structures
|
|
|
|
* by these macros, which should be called with ipl raised to splimp().
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
#define IF_LOCK(ifq) mtx_lock(&(ifq)->ifq_mtx)
|
|
|
|
#define IF_UNLOCK(ifq) mtx_unlock(&(ifq)->ifq_mtx)
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
#define _IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
|
|
|
|
#define _IF_DROP(ifq) ((ifq)->ifq_drops++)
|
|
|
|
#define _IF_QLEN(ifq) ((ifq)->ifq_len)
|
|
|
|
|
|
|
|
#define _IF_ENQUEUE(ifq, m) do { \
|
|
|
|
(m)->m_nextpkt = NULL; \
|
|
|
|
if ((ifq)->ifq_tail == NULL) \
|
|
|
|
(ifq)->ifq_head = m; \
|
|
|
|
else \
|
|
|
|
(ifq)->ifq_tail->m_nextpkt = m; \
|
|
|
|
(ifq)->ifq_tail = m; \
|
|
|
|
(ifq)->ifq_len++; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define IF_ENQUEUE(ifq, m) do { \
|
|
|
|
IF_LOCK(ifq); \
|
|
|
|
_IF_ENQUEUE(ifq, m); \
|
|
|
|
IF_UNLOCK(ifq); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _IF_PREPEND(ifq, m) do { \
|
|
|
|
(m)->m_nextpkt = (ifq)->ifq_head; \
|
|
|
|
if ((ifq)->ifq_tail == NULL) \
|
|
|
|
(ifq)->ifq_tail = (m); \
|
|
|
|
(ifq)->ifq_head = (m); \
|
|
|
|
(ifq)->ifq_len++; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define IF_PREPEND(ifq, m) do { \
|
|
|
|
IF_LOCK(ifq); \
|
|
|
|
_IF_PREPEND(ifq, m); \
|
|
|
|
IF_UNLOCK(ifq); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define _IF_DEQUEUE(ifq, m) do { \
|
|
|
|
(m) = (ifq)->ifq_head; \
|
|
|
|
if (m) { \
|
|
|
|
if (((ifq)->ifq_head = (m)->m_nextpkt) == 0) \
|
|
|
|
(ifq)->ifq_tail = NULL; \
|
|
|
|
(m)->m_nextpkt = NULL; \
|
|
|
|
(ifq)->ifq_len--; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define IF_DEQUEUE(ifq, m) do { \
|
|
|
|
IF_LOCK(ifq); \
|
|
|
|
_IF_DEQUEUE(ifq, m); \
|
|
|
|
IF_UNLOCK(ifq); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define IF_DRAIN(ifq) do { \
|
|
|
|
struct mbuf *m; \
|
|
|
|
IF_LOCK(ifq); \
|
|
|
|
for (;;) { \
|
|
|
|
_IF_DEQUEUE(ifq, m); \
|
|
|
|
if (m == NULL) \
|
|
|
|
break; \
|
|
|
|
m_freem(m); \
|
|
|
|
} \
|
|
|
|
IF_UNLOCK(ifq); \
|
|
|
|
} while (0)
|
1997-01-03 19:50:26 +00:00
|
|
|
|
1999-12-29 04:46:21 +00:00
|
|
|
#ifdef _KERNEL
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
#define IF_HANDOFF(ifq, m, ifp) if_handoff(ifq, m, ifp, 0)
|
|
|
|
#define IF_HANDOFF_ADJ(ifq, m, ifp, adj) if_handoff(ifq, m, ifp, adj)
|
1997-01-03 19:50:26 +00:00
|
|
|
|
1998-04-15 17:47:40 +00:00
|
|
|
static __inline int
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust)
|
1997-01-03 19:50:26 +00:00
|
|
|
{
|
Lock down the network interface queues. The queue mutex must be obtained
before adding/removing packets from the queue. Also, the if_obytes and
if_omcasts fields should only be manipulated under protection of the mutex.
IF_ENQUEUE, IF_PREPEND, and IF_DEQUEUE perform all necessary locking on
the queue. An IF_LOCK macro is provided, as well as the old (mutex-less)
versions of the macros in the form _IF_ENQUEUE, _IF_QFULL, for code which
needs them, but their use is discouraged.
Two new macros are introduced: IF_DRAIN() to drain a queue, and IF_HANDOFF,
which takes care of locking/enqueue, and also statistics updating/start
if necessary.
2000-11-25 07:35:38 +00:00
|
|
|
int active = 0;
|
|
|
|
|
|
|
|
IF_LOCK(ifq);
|
|
|
|
if (_IF_QFULL(ifq)) {
|
|
|
|
_IF_DROP(ifq);
|
|
|
|
IF_UNLOCK(ifq);
|
|
|
|
m_freem(m);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (ifp != NULL) {
|
|
|
|
ifp->if_obytes += m->m_pkthdr.len + adjust;
|
|
|
|
if (m->m_flags & M_MCAST)
|
|
|
|
ifp->if_omcasts++;
|
|
|
|
active = ifp->if_flags & IFF_OACTIVE;
|
|
|
|
}
|
|
|
|
_IF_ENQUEUE(ifq, m);
|
|
|
|
IF_UNLOCK(ifq);
|
|
|
|
if (ifp != NULL && !active) {
|
|
|
|
if (ifp->if_mpsafe) {
|
|
|
|
DROP_GIANT_NOSWITCH();
|
|
|
|
(*ifp->if_start)(ifp);
|
|
|
|
PICKUP_GIANT();
|
|
|
|
} else {
|
|
|
|
(*ifp->if_start)(ifp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (1);
|
1997-01-03 19:50:26 +00:00
|
|
|
}
|
1999-08-06 13:53:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* 72 was chosen below because it is the size of a TCP/IP
|
|
|
|
* header (40) + the minimum mss (32).
|
|
|
|
*/
|
|
|
|
#define IF_MINMTU 72
|
|
|
|
#define IF_MAXMTU 65535
|
|
|
|
|
1999-12-29 04:46:21 +00:00
|
|
|
#endif /* _KERNEL */
|
1997-01-03 19:50:26 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The ifaddr structure contains information about one address
|
|
|
|
* of an interface. They are maintained by the different address families,
|
|
|
|
* are allocated and attached when an address is set, and are linked
|
|
|
|
* together so all addresses for an interface can be located.
|
|
|
|
*/
|
|
|
|
struct ifaddr {
|
|
|
|
struct sockaddr *ifa_addr; /* address of interface */
|
|
|
|
struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */
|
|
|
|
#define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
|
|
|
|
struct sockaddr *ifa_netmask; /* used to determine subnet */
|
2000-10-19 23:15:54 +00:00
|
|
|
struct if_data if_data; /* not all members are meaningful */
|
1997-01-03 19:50:26 +00:00
|
|
|
struct ifnet *ifa_ifp; /* back-pointer to interface */
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */
|
1997-01-03 19:50:26 +00:00
|
|
|
void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */
|
2001-10-17 18:07:05 +00:00
|
|
|
__P((int, struct rtentry *, struct rt_addrinfo *));
|
1997-01-03 19:50:26 +00:00
|
|
|
u_short ifa_flags; /* mostly rt_flags for cloning */
|
1999-05-16 17:09:20 +00:00
|
|
|
u_int ifa_refcnt; /* references to this structure */
|
1997-01-03 19:50:26 +00:00
|
|
|
int ifa_metric; /* cost of going out this interface */
|
|
|
|
#ifdef notdef
|
|
|
|
struct rtentry *ifa_rt; /* XXXX for ROUTETOIF ????? */
|
|
|
|
#endif
|
1997-08-28 01:17:12 +00:00
|
|
|
int (*ifa_claim_addr) /* check if an addr goes to this if */
|
|
|
|
__P((struct ifaddr *, struct sockaddr *));
|
|
|
|
|
1997-01-03 19:50:26 +00:00
|
|
|
};
|
|
|
|
#define IFA_ROUTE RTF_UP /* route installed */
|
|
|
|
|
1999-11-22 02:45:11 +00:00
|
|
|
/* for compatibility with other BSDs */
|
|
|
|
#define ifa_list ifa_link
|
|
|
|
|
1999-11-05 14:41:39 +00:00
|
|
|
/*
|
|
|
|
* The prefix structure contains information about one prefix
|
|
|
|
* of an interface. They are maintained by the different address families,
|
|
|
|
* are allocated and attached when an prefix or an address is set,
|
1999-11-22 02:45:11 +00:00
|
|
|
* and are linked together so all prefixes for an interface can be located.
|
1999-11-05 14:41:39 +00:00
|
|
|
*/
|
|
|
|
struct ifprefix {
|
|
|
|
struct sockaddr *ifpr_prefix; /* prefix of interface */
|
|
|
|
struct ifnet *ifpr_ifp; /* back-pointer to interface */
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_ENTRY(ifprefix) ifpr_list; /* queue macro glue */
|
1999-11-05 14:41:39 +00:00
|
|
|
u_char ifpr_plen; /* prefix length in bits */
|
|
|
|
u_char ifpr_type; /* protocol dependent prefix type */
|
|
|
|
};
|
|
|
|
|
1997-01-07 19:15:32 +00:00
|
|
|
/*
|
|
|
|
* Multicast address structure. This is analogous to the ifaddr
|
|
|
|
* structure except that it keeps track of multicast addresses.
|
|
|
|
* Also, the reference count here is a count of requests for this
|
|
|
|
* address, not a count of pointers to this structure.
|
|
|
|
*/
|
|
|
|
struct ifmultiaddr {
|
2001-02-06 10:12:15 +00:00
|
|
|
TAILQ_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */
|
1997-01-08 13:20:25 +00:00
|
|
|
struct sockaddr *ifma_addr; /* address this membership is for */
|
|
|
|
struct sockaddr *ifma_lladdr; /* link-layer translation, if any */
|
|
|
|
struct ifnet *ifma_ifp; /* back-pointer to interface */
|
|
|
|
u_int ifma_refcount; /* reference count */
|
|
|
|
void *ifma_protospec; /* protocol-specific state, if any */
|
1997-01-07 19:15:32 +00:00
|
|
|
};
|
|
|
|
|
1999-12-29 04:46:21 +00:00
|
|
|
#ifdef _KERNEL
|
1997-01-03 19:50:26 +00:00
|
|
|
#define IFAFREE(ifa) \
|
1999-05-06 18:13:11 +00:00
|
|
|
do { \
|
|
|
|
if ((ifa)->ifa_refcnt <= 0) \
|
|
|
|
ifafree(ifa); \
|
|
|
|
else \
|
|
|
|
(ifa)->ifa_refcnt--; \
|
|
|
|
} while (0)
|
1997-01-03 19:50:26 +00:00
|
|
|
|
2001-09-06 02:40:43 +00:00
|
|
|
struct ifindex_entry {
|
|
|
|
struct ifnet *ife_ifnet;
|
|
|
|
struct ifaddr *ife_ifnet_addr;
|
|
|
|
dev_t ife_dev;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define ifnet_byindex(idx) ifindex_table[(idx)].ife_ifnet
|
|
|
|
#define ifaddr_byindex(idx) ifindex_table[(idx)].ife_ifnet_addr
|
|
|
|
#define ifdev_byindex(idx) ifindex_table[(idx)].ife_dev
|
|
|
|
|
1997-01-03 19:50:26 +00:00
|
|
|
extern struct ifnethead ifnet;
|
2001-09-06 02:40:43 +00:00
|
|
|
extern struct ifindex_entry *ifindex_table;
|
1997-01-03 19:50:26 +00:00
|
|
|
extern int ifqmaxlen;
|
2001-01-29 11:06:26 +00:00
|
|
|
extern struct ifnet *loif; /* first loopback interface */
|
1997-01-03 19:50:26 +00:00
|
|
|
extern int if_index;
|
|
|
|
|
2000-07-13 22:54:34 +00:00
|
|
|
void ether_ifattach __P((struct ifnet *, int));
|
|
|
|
void ether_ifdetach __P((struct ifnet *, int));
|
1997-01-03 19:50:26 +00:00
|
|
|
void ether_input __P((struct ifnet *, struct ether_header *, struct mbuf *));
|
2000-06-26 23:34:54 +00:00
|
|
|
void ether_demux __P((struct ifnet *, struct ether_header *, struct mbuf *));
|
1997-01-03 19:50:26 +00:00
|
|
|
int ether_output __P((struct ifnet *,
|
|
|
|
struct mbuf *, struct sockaddr *, struct rtentry *));
|
2000-06-26 23:34:54 +00:00
|
|
|
int ether_output_frame __P((struct ifnet *, struct mbuf *));
|
1997-01-03 19:50:26 +00:00
|
|
|
int ether_ioctl __P((struct ifnet *, int, caddr_t));
|
|
|
|
|
1999-11-22 02:45:11 +00:00
|
|
|
int if_addmulti __P((struct ifnet *, struct sockaddr *,
|
1997-01-08 13:20:25 +00:00
|
|
|
struct ifmultiaddr **));
|
1997-01-07 19:15:32 +00:00
|
|
|
int if_allmulti __P((struct ifnet *, int));
|
1997-01-03 19:50:26 +00:00
|
|
|
void if_attach __P((struct ifnet *));
|
1997-01-07 19:15:32 +00:00
|
|
|
int if_delmulti __P((struct ifnet *, struct sockaddr *));
|
1999-04-16 21:22:55 +00:00
|
|
|
void if_detach __P((struct ifnet *));
|
1997-01-03 19:50:26 +00:00
|
|
|
void if_down __P((struct ifnet *));
|
1998-12-16 18:30:43 +00:00
|
|
|
void if_route __P((struct ifnet *, int flag, int fam));
|
2000-08-15 00:48:38 +00:00
|
|
|
int if_setlladdr __P((struct ifnet *, const u_char *, int));
|
1998-12-16 18:30:43 +00:00
|
|
|
void if_unroute __P((struct ifnet *, int flag, int fam));
|
1997-01-03 19:50:26 +00:00
|
|
|
void if_up __P((struct ifnet *));
|
|
|
|
/*void ifinit __P((void));*/ /* declared in systm.h for main() */
|
2001-09-12 08:38:13 +00:00
|
|
|
int ifioctl __P((struct socket *, u_long, caddr_t, struct thread *));
|
1997-01-03 19:50:26 +00:00
|
|
|
int ifpromisc __P((struct ifnet *, int));
|
2001-07-02 20:49:25 +00:00
|
|
|
struct ifnet *ifunit __P((const char *));
|
1999-11-05 14:41:39 +00:00
|
|
|
struct ifnet *if_withname __P((struct sockaddr *));
|
1997-01-03 19:50:26 +00:00
|
|
|
|
|
|
|
int if_poll_recv_slow __P((struct ifnet *ifp, int *quotap));
|
|
|
|
void if_poll_xmit_slow __P((struct ifnet *ifp, int *quotap));
|
|
|
|
void if_poll_throttle __P((void));
|
|
|
|
void if_poll_unthrottle __P((void *));
|
|
|
|
void if_poll_init __P((void));
|
|
|
|
void if_poll __P((void));
|
|
|
|
|
|
|
|
struct ifaddr *ifa_ifwithaddr __P((struct sockaddr *));
|
|
|
|
struct ifaddr *ifa_ifwithdstaddr __P((struct sockaddr *));
|
|
|
|
struct ifaddr *ifa_ifwithnet __P((struct sockaddr *));
|
|
|
|
struct ifaddr *ifa_ifwithroute __P((int, struct sockaddr *,
|
|
|
|
struct sockaddr *));
|
|
|
|
struct ifaddr *ifaof_ifpforaddr __P((struct sockaddr *, struct ifnet *));
|
|
|
|
void ifafree __P((struct ifaddr *));
|
|
|
|
|
1999-11-22 02:45:11 +00:00
|
|
|
struct ifmultiaddr *ifmaof_ifpforaddr __P((struct sockaddr *,
|
1997-01-08 13:20:25 +00:00
|
|
|
struct ifnet *));
|
2000-05-24 21:16:56 +00:00
|
|
|
int if_simloop __P((struct ifnet *ifp, struct mbuf *m, int af, int hlen));
|
1997-01-08 13:20:25 +00:00
|
|
|
|
2001-07-02 20:49:25 +00:00
|
|
|
void if_clone_attach __P((struct if_clone *));
|
|
|
|
void if_clone_detach __P((struct if_clone *));
|
|
|
|
|
|
|
|
int if_clone_create __P((char *, int));
|
|
|
|
int if_clone_destroy __P((const char *));
|
|
|
|
|
2001-10-14 20:17:53 +00:00
|
|
|
#define IF_LLADDR(ifp) \
|
|
|
|
LLADDR((struct sockaddr_dl *) ifaddr_byindex((ifp)->if_index)->ifa_addr)
|
|
|
|
|
1999-12-29 04:46:21 +00:00
|
|
|
#endif /* _KERNEL */
|
1997-01-03 19:50:26 +00:00
|
|
|
|
|
|
|
#endif /* !_NET_IF_VAR_H_ */
|