2003-03-04 23:19:55 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 2001,2002,2003 Jonathan Lemon <jlemon@FreeBSD.org>
|
2002-09-22 05:56:41 +00:00
|
|
|
* Copyright (c) 1997, Stefan Esser <se@freebsd.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
2003-03-04 23:19:55 +00:00
|
|
|
* notice, this list of conditions and the following disclaimer.
|
2002-09-22 05:56:41 +00:00
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
2003-03-04 23:19:55 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
2002-09-22 05:56:41 +00:00
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
2005-10-05 10:09:17 +00:00
|
|
|
#include "opt_device_polling.h"
|
Change the default disposition of debug.mpsafenet from 0 to 1, which
will cause the network stack to operate without the Giant lock by
default. This change has the potential to improve performance by
increasing parallelism and decreasing latency in network processing.
Due to the potential exposure of existing or new bugs, the following
compatibility functionality is maintained:
- It is still possible to disable Giant-free operation by setting
debug.mpsafenet to 0 in loader.conf.
- Add "options NET_WITH_GIANT", which will restore the default value of
debug.mpsafenet to 0, and is intended for use on systems compiled with
known unsafe components, or where a more conservative configuration is
desired.
- Add a new declaration, NET_NEEDS_GIANT("componentname"), which permits
kernel components to declare dependence on Giant over the network
stack. If the declaration is made by a preloaded module or a compiled
in component, the disposition of debug.mpsafenet will be set to 0 and
a warning concerning performance degraded operation printed to the
console. If it is declared by a loadable kernel module after boot, a
warning is displayed but the disposition cannot be changed. This is
implemented by defining a new SYSINIT() value, SI_SUB_SETTINGS, which
is intended for the processing of configuration choices after tunables
are read in and the console is available to generate errors, but
before much else gets going.
This compatibility behavior will go away when we've finished the last
of the locking work and are confident that operation is correct.
2004-08-28 15:11:13 +00:00
|
|
|
|
2002-09-22 05:56:41 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/bus.h>
|
2003-03-04 23:19:55 +00:00
|
|
|
#include <sys/rtprio.h>
|
|
|
|
#include <sys/systm.h>
|
2002-09-22 05:56:41 +00:00
|
|
|
#include <sys/interrupt.h>
|
|
|
|
#include <sys/kernel.h>
|
2003-03-04 23:19:55 +00:00
|
|
|
#include <sys/kthread.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/random.h>
|
|
|
|
#include <sys/resourcevar.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/unistd.h>
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
#include <sys/vimage.h>
|
2003-03-04 23:19:55 +00:00
|
|
|
#include <machine/atomic.h>
|
|
|
|
#include <machine/cpu.h>
|
|
|
|
#include <machine/stdarg.h>
|
2002-09-22 05:56:41 +00:00
|
|
|
|
2003-03-04 23:19:55 +00:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/if_types.h>
|
|
|
|
#include <net/if_var.h>
|
2002-09-22 05:56:41 +00:00
|
|
|
#include <net/netisr.h>
|
|
|
|
|
2003-03-04 23:19:55 +00:00
|
|
|
volatile unsigned int netisr; /* scheduling bits for network */
|
|
|
|
|
|
|
|
struct netisr {
|
|
|
|
netisr_t *ni_handler;
|
|
|
|
struct ifqueue *ni_queue;
|
2003-11-08 22:28:40 +00:00
|
|
|
int ni_flags;
|
2003-03-04 23:19:55 +00:00
|
|
|
} netisrs[32];
|
2002-09-22 05:56:41 +00:00
|
|
|
|
2003-03-04 23:19:55 +00:00
|
|
|
static void *net_ih;
|
2002-09-22 05:56:41 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
legacy_setsoftnet(void)
|
|
|
|
{
|
|
|
|
swi_sched(net_ih, 0);
|
|
|
|
}
|
|
|
|
|
2003-03-04 23:19:55 +00:00
|
|
|
void
|
2003-11-08 22:28:40 +00:00
|
|
|
netisr_register(int num, netisr_t *handler, struct ifqueue *inq, int flags)
|
2003-03-04 23:19:55 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
|
|
|
|
("bad isr %d", num));
|
2009-05-13 17:22:33 +00:00
|
|
|
KASSERT(flags == 0, ("netisr_register: bad flags 0x%x\n", flags));
|
2003-03-04 23:19:55 +00:00
|
|
|
netisrs[num].ni_handler = handler;
|
|
|
|
netisrs[num].ni_queue = inq;
|
2003-11-08 22:28:40 +00:00
|
|
|
netisrs[num].ni_flags = flags;
|
2003-03-04 23:19:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
netisr_unregister(int num)
|
2002-09-22 05:56:41 +00:00
|
|
|
{
|
2003-03-04 23:19:55 +00:00
|
|
|
struct netisr *ni;
|
2002-09-22 05:56:41 +00:00
|
|
|
|
2003-03-04 23:19:55 +00:00
|
|
|
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
|
|
|
|
("bad isr %d", num));
|
|
|
|
ni = &netisrs[num];
|
|
|
|
ni->ni_handler = NULL;
|
2003-11-08 22:28:40 +00:00
|
|
|
if (ni->ni_queue != NULL)
|
2003-03-04 23:19:55 +00:00
|
|
|
IF_DRAIN(ni->ni_queue);
|
2004-10-11 20:01:43 +00:00
|
|
|
ni->ni_queue = NULL;
|
2002-09-22 05:56:41 +00:00
|
|
|
}
|
|
|
|
|
2003-03-04 23:19:55 +00:00
|
|
|
struct isrstat {
|
|
|
|
int isrs_count; /* dispatch count */
|
2003-11-08 22:28:40 +00:00
|
|
|
int isrs_directed; /* ...directly dispatched */
|
2003-03-04 23:19:55 +00:00
|
|
|
int isrs_deferred; /* ...queued instead */
|
|
|
|
int isrs_queued; /* intentionally queueued */
|
2003-11-08 22:28:40 +00:00
|
|
|
int isrs_drop; /* dropped 'cuz no handler */
|
2003-03-04 23:19:55 +00:00
|
|
|
int isrs_swi_count; /* swi_net handlers called */
|
|
|
|
};
|
|
|
|
static struct isrstat isrstat;
|
|
|
|
|
|
|
|
SYSCTL_NODE(_net, OID_AUTO, isr, CTLFLAG_RW, 0, "netisr counters");
|
|
|
|
|
2006-11-28 11:19:36 +00:00
|
|
|
static int netisr_direct = 1;
|
2005-10-04 07:59:28 +00:00
|
|
|
SYSCTL_INT(_net_isr, OID_AUTO, direct, CTLFLAG_RW,
|
|
|
|
&netisr_direct, 0, "enable direct dispatch");
|
|
|
|
TUNABLE_INT("net.isr.direct", &netisr_direct);
|
2003-03-04 23:19:55 +00:00
|
|
|
|
|
|
|
SYSCTL_INT(_net_isr, OID_AUTO, count, CTLFLAG_RD,
|
|
|
|
&isrstat.isrs_count, 0, "");
|
|
|
|
SYSCTL_INT(_net_isr, OID_AUTO, directed, CTLFLAG_RD,
|
|
|
|
&isrstat.isrs_directed, 0, "");
|
|
|
|
SYSCTL_INT(_net_isr, OID_AUTO, deferred, CTLFLAG_RD,
|
|
|
|
&isrstat.isrs_deferred, 0, "");
|
|
|
|
SYSCTL_INT(_net_isr, OID_AUTO, queued, CTLFLAG_RD,
|
|
|
|
&isrstat.isrs_queued, 0, "");
|
2003-11-08 22:28:40 +00:00
|
|
|
SYSCTL_INT(_net_isr, OID_AUTO, drop, CTLFLAG_RD,
|
|
|
|
&isrstat.isrs_drop, 0, "");
|
2003-03-04 23:19:55 +00:00
|
|
|
SYSCTL_INT(_net_isr, OID_AUTO, swi_count, CTLFLAG_RD,
|
|
|
|
&isrstat.isrs_swi_count, 0, "");
|
|
|
|
|
2003-10-03 18:27:24 +00:00
|
|
|
/*
|
|
|
|
* Process all packets currently present in a netisr queue. Used to
|
|
|
|
* drain an existing set of packets waiting for processing when we
|
|
|
|
* begin direct dispatch, to avoid processing packets out of order.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
netisr_processqueue(struct netisr *ni)
|
|
|
|
{
|
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
IF_DEQUEUE(ni->ni_queue, m);
|
|
|
|
if (m == NULL)
|
|
|
|
break;
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
VNET_ASSERT(m->m_pkthdr.rcvif != NULL);
|
|
|
|
CURVNET_SET(m->m_pkthdr.rcvif->if_vnet);
|
2003-10-03 18:27:24 +00:00
|
|
|
ni->ni_handler(m);
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
CURVNET_RESTORE();
|
2003-10-03 18:27:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-03-04 23:19:55 +00:00
|
|
|
/*
|
|
|
|
* Call the netisr directly instead of queueing the packet, if possible.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
netisr_dispatch(int num, struct mbuf *m)
|
2002-09-22 05:56:41 +00:00
|
|
|
{
|
2003-03-04 23:19:55 +00:00
|
|
|
struct netisr *ni;
|
2002-09-22 05:56:41 +00:00
|
|
|
|
2003-11-08 22:28:40 +00:00
|
|
|
isrstat.isrs_count++; /* XXX redundant */
|
2003-03-04 23:19:55 +00:00
|
|
|
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
|
|
|
|
("bad isr %d", num));
|
|
|
|
ni = &netisrs[num];
|
2003-03-08 22:12:32 +00:00
|
|
|
if (ni->ni_queue == NULL) {
|
2003-11-08 22:28:40 +00:00
|
|
|
isrstat.isrs_drop++;
|
2003-03-08 22:12:32 +00:00
|
|
|
m_freem(m);
|
|
|
|
return;
|
|
|
|
}
|
2008-07-04 00:21:38 +00:00
|
|
|
|
2003-11-08 22:28:40 +00:00
|
|
|
/*
|
2009-05-13 17:22:33 +00:00
|
|
|
* Directly dispatch handling of this packet, if permitted by global
|
|
|
|
* policy. Source ordering is maintained by virtue of callers
|
|
|
|
* consistently calling one of queued or direct dispatch.
|
2003-11-08 22:28:40 +00:00
|
|
|
*/
|
2009-05-13 17:22:33 +00:00
|
|
|
if (netisr_direct) {
|
2003-03-04 23:19:55 +00:00
|
|
|
isrstat.isrs_directed++;
|
|
|
|
ni->ni_handler(m);
|
|
|
|
} else {
|
|
|
|
isrstat.isrs_deferred++;
|
|
|
|
if (IF_HANDOFF(ni->ni_queue, m, NULL))
|
|
|
|
schednetisr(num);
|
2002-09-22 05:56:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-03-04 23:19:55 +00:00
|
|
|
/*
|
|
|
|
* Same as above, but always queue.
|
|
|
|
* This is either used in places where we are not confident that
|
|
|
|
* direct dispatch is possible, or where queueing is required.
|
2004-08-27 18:33:08 +00:00
|
|
|
* It returns (0) on success and ERRNO on failure. On failure the
|
|
|
|
* mbuf has been free'd.
|
2003-03-04 23:19:55 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
netisr_queue(int num, struct mbuf *m)
|
|
|
|
{
|
|
|
|
struct netisr *ni;
|
|
|
|
|
|
|
|
KASSERT(!(num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs))),
|
|
|
|
("bad isr %d", num));
|
|
|
|
ni = &netisrs[num];
|
2003-03-08 22:12:32 +00:00
|
|
|
if (ni->ni_queue == NULL) {
|
2003-11-08 22:28:40 +00:00
|
|
|
isrstat.isrs_drop++;
|
2003-03-08 22:12:32 +00:00
|
|
|
m_freem(m);
|
2004-08-27 18:33:08 +00:00
|
|
|
return (ENXIO);
|
2003-03-08 22:12:32 +00:00
|
|
|
}
|
2003-03-04 23:19:55 +00:00
|
|
|
isrstat.isrs_queued++;
|
|
|
|
if (!IF_HANDOFF(ni->ni_queue, m, NULL))
|
2004-08-27 18:33:08 +00:00
|
|
|
return (ENOBUFS); /* IF_HANDOFF has free'd the mbuf */
|
2003-03-04 23:19:55 +00:00
|
|
|
schednetisr(num);
|
2004-08-27 18:33:08 +00:00
|
|
|
return (0);
|
2003-03-04 23:19:55 +00:00
|
|
|
}
|
2002-09-22 05:56:41 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
swi_net(void *dummy)
|
|
|
|
{
|
2003-03-04 23:19:55 +00:00
|
|
|
struct netisr *ni;
|
2002-09-22 05:56:41 +00:00
|
|
|
u_int bits;
|
|
|
|
int i;
|
|
|
|
#ifdef DEVICE_POLLING
|
2003-03-04 23:19:55 +00:00
|
|
|
const int polling = 1;
|
|
|
|
#else
|
|
|
|
const int polling = 0;
|
2002-09-22 05:56:41 +00:00
|
|
|
#endif
|
2003-03-04 23:19:55 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
bits = atomic_readandclear_int(&netisr);
|
|
|
|
if (bits == 0)
|
|
|
|
break;
|
|
|
|
while ((i = ffs(bits)) != 0) {
|
|
|
|
isrstat.isrs_swi_count++;
|
|
|
|
i--;
|
|
|
|
bits &= ~(1 << i);
|
|
|
|
ni = &netisrs[i];
|
|
|
|
if (ni->ni_handler == NULL) {
|
|
|
|
printf("swi_net: unregistered isr %d.\n", i);
|
|
|
|
continue;
|
|
|
|
}
|
2008-07-04 00:21:38 +00:00
|
|
|
if (ni->ni_queue == NULL)
|
|
|
|
ni->ni_handler(NULL);
|
|
|
|
else
|
|
|
|
netisr_processqueue(ni);
|
2003-03-04 23:19:55 +00:00
|
|
|
}
|
|
|
|
} while (polling);
|
2002-09-22 05:56:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
start_netisr(void *dummy)
|
|
|
|
{
|
|
|
|
|
2003-11-08 22:28:40 +00:00
|
|
|
if (swi_add(NULL, "net", swi_net, NULL, SWI_NET, INTR_MPSAFE, &net_ih))
|
2002-09-22 05:56:41 +00:00
|
|
|
panic("start_netisr");
|
|
|
|
}
|
2008-03-16 10:58:09 +00:00
|
|
|
SYSINIT(start_netisr, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_netisr, NULL);
|