1999-10-21 09:06:11 +00:00
|
|
|
/*
|
|
|
|
* ng_base.c
|
2005-01-07 01:45:51 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*-
|
1999-10-21 09:06:11 +00:00
|
|
|
* Copyright (c) 1996-1999 Whistle Communications, Inc.
|
|
|
|
* All rights reserved.
|
2001-01-06 00:46:47 +00:00
|
|
|
*
|
1999-10-21 09:06:11 +00:00
|
|
|
* Subject to the following obligations and disclaimer of warranty, use and
|
|
|
|
* redistribution of this software, in source or object code forms, with or
|
|
|
|
* without modifications are expressly permitted by Whistle Communications;
|
|
|
|
* provided, however, that:
|
|
|
|
* 1. Any and all reproductions of the source or object code must include the
|
|
|
|
* copyright notice above and the following disclaimer of warranties; and
|
|
|
|
* 2. No rights are granted, in any manner or form, to use Whistle
|
|
|
|
* Communications, Inc. trademarks, including the mark "WHISTLE
|
|
|
|
* COMMUNICATIONS" on advertising, endorsements, or otherwise except as
|
|
|
|
* such appears in the above copyright notice or in the software.
|
2001-01-06 00:46:47 +00:00
|
|
|
*
|
1999-10-21 09:06:11 +00:00
|
|
|
* THIS SOFTWARE IS BEING PROVIDED BY WHISTLE COMMUNICATIONS "AS IS", AND
|
|
|
|
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, WHISTLE COMMUNICATIONS MAKES NO
|
|
|
|
* REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, REGARDING THIS SOFTWARE,
|
|
|
|
* INCLUDING WITHOUT LIMITATION, ANY AND ALL IMPLIED WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT.
|
|
|
|
* WHISTLE COMMUNICATIONS DOES NOT WARRANT, GUARANTEE, OR MAKE ANY
|
|
|
|
* REPRESENTATIONS REGARDING THE USE OF, OR THE RESULTS OF THE USE OF THIS
|
|
|
|
* SOFTWARE IN TERMS OF ITS CORRECTNESS, ACCURACY, RELIABILITY OR OTHERWISE.
|
|
|
|
* IN NO EVENT SHALL WHISTLE COMMUNICATIONS BE LIABLE FOR ANY DAMAGES
|
|
|
|
* RESULTING FROM OR ARISING OUT OF ANY USE OF THIS SOFTWARE, INCLUDING
|
|
|
|
* WITHOUT LIMITATION, ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
|
|
|
|
* PUNITIVE, OR CONSEQUENTIAL DAMAGES, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES, LOSS OF USE, DATA OR PROFITS, HOWEVER CAUSED AND UNDER ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF WHISTLE COMMUNICATIONS IS ADVISED OF THE POSSIBILITY
|
|
|
|
* OF SUCH DAMAGE.
|
|
|
|
*
|
2000-10-24 17:32:45 +00:00
|
|
|
* Authors: Julian Elischer <julian@freebsd.org>
|
|
|
|
* Archie Cobbs <archie@freebsd.org>
|
1999-10-21 09:06:11 +00:00
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
* $Whistle: ng_base.c,v 1.39 1999/01/28 23:54:53 julian Exp $
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This file implements the base netgraph code.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
2004-12-23 13:09:37 +00:00
|
|
|
#include <sys/systm.h>
|
2004-12-23 10:48:10 +00:00
|
|
|
#include <sys/ctype.h>
|
1999-10-21 09:06:11 +00:00
|
|
|
#include <sys/errno.h>
|
2004-07-10 21:45:58 +00:00
|
|
|
#include <sys/kdb.h>
|
1999-10-21 09:06:11 +00:00
|
|
|
#include <sys/kernel.h>
|
2006-01-11 15:29:48 +00:00
|
|
|
#include <sys/ktr.h>
|
2003-04-29 13:36:06 +00:00
|
|
|
#include <sys/limits.h>
|
1999-10-21 09:06:11 +00:00
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
2004-12-23 10:48:10 +00:00
|
|
|
#include <sys/queue.h>
|
2001-02-23 16:34:22 +00:00
|
|
|
#include <sys/sysctl.h>
|
2004-12-23 10:48:10 +00:00
|
|
|
#include <sys/syslog.h>
|
2007-10-19 15:04:17 +00:00
|
|
|
#include <sys/refcount.h>
|
2008-01-31 08:51:48 +00:00
|
|
|
#include <sys/proc.h>
|
2008-12-14 20:15:30 +00:00
|
|
|
#include <sys/unistd.h>
|
|
|
|
#include <sys/kthread.h>
|
|
|
|
#include <sys/smp.h>
|
2008-04-06 15:26:32 +00:00
|
|
|
#include <machine/cpu.h>
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
#include <net/netisr.h>
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
#include <net/vnet.h>
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
#include <netgraph/ng_message.h>
|
|
|
|
#include <netgraph/netgraph.h>
|
1999-11-30 02:45:32 +00:00
|
|
|
#include <netgraph/ng_parse.h>
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-02-05 18:57:11 +00:00
|
|
|
MODULE_VERSION(netgraph, NG_ABI_VERSION);
|
2000-04-29 13:36:07 +00:00
|
|
|
|
2005-11-02 15:23:47 +00:00
|
|
|
/* Mutex to protect topology events. */
|
|
|
|
static struct mtx ng_topo_mtx;
|
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
#ifdef NETGRAPH_DEBUG
|
2008-03-04 18:22:18 +00:00
|
|
|
static struct mtx ng_nodelist_mtx; /* protects global node/hook lists */
|
2005-11-15 10:54:20 +00:00
|
|
|
static struct mtx ngq_mtx; /* protects the queue item list */
|
2001-01-08 05:34:06 +00:00
|
|
|
|
|
|
|
static SLIST_HEAD(, ng_node) ng_allnodes;
|
|
|
|
static LIST_HEAD(, ng_node) ng_freenodes; /* in debug, we never free() them */
|
|
|
|
static SLIST_HEAD(, ng_hook) ng_allhooks;
|
|
|
|
static LIST_HEAD(, ng_hook) ng_freehooks; /* in debug, we never free() them */
|
|
|
|
|
|
|
|
static void ng_dumpitems(void);
|
|
|
|
static void ng_dumpnodes(void);
|
|
|
|
static void ng_dumphooks(void);
|
|
|
|
|
|
|
|
#endif /* NETGRAPH_DEBUG */
|
2001-01-11 19:27:54 +00:00
|
|
|
/*
|
2005-11-15 10:54:20 +00:00
|
|
|
* DEAD versions of the structures.
|
2001-01-11 19:27:54 +00:00
|
|
|
* In order to avoid races, it is sometimes neccesary to point
|
2005-11-15 10:54:20 +00:00
|
|
|
* at SOMETHING even though theoretically, the current entity is
|
2001-01-11 19:27:54 +00:00
|
|
|
* INVALID. Use these to avoid these races.
|
|
|
|
*/
|
|
|
|
struct ng_type ng_deadtype = {
|
|
|
|
NG_ABI_VERSION,
|
|
|
|
"dead",
|
|
|
|
NULL, /* modevent */
|
|
|
|
NULL, /* constructor */
|
|
|
|
NULL, /* rcvmsg */
|
|
|
|
NULL, /* shutdown */
|
|
|
|
NULL, /* newhook */
|
|
|
|
NULL, /* findhook */
|
|
|
|
NULL, /* connect */
|
|
|
|
NULL, /* rcvdata */
|
|
|
|
NULL, /* disconnect */
|
|
|
|
NULL, /* cmdlist */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ng_node ng_deadnode = {
|
|
|
|
"dead",
|
|
|
|
&ng_deadtype,
|
2004-07-20 17:15:38 +00:00
|
|
|
NGF_INVALID,
|
2001-01-11 19:27:54 +00:00
|
|
|
0, /* numhooks */
|
|
|
|
NULL, /* private */
|
|
|
|
0, /* ID */
|
2009-12-28 22:56:30 +00:00
|
|
|
LIST_HEAD_INITIALIZER(ng_deadnode.nd_hooks),
|
2001-01-11 19:27:54 +00:00
|
|
|
{}, /* all_nodes list entry */
|
|
|
|
{}, /* id hashtable list entry */
|
|
|
|
{ 0,
|
2008-04-15 21:15:32 +00:00
|
|
|
0,
|
2001-01-11 19:27:54 +00:00
|
|
|
{}, /* should never use! (should hang) */
|
2008-04-15 21:15:32 +00:00
|
|
|
{}, /* workqueue entry */
|
|
|
|
STAILQ_HEAD_INITIALIZER(ng_deadnode.nd_input_queue.queue),
|
2001-01-11 19:27:54 +00:00
|
|
|
},
|
2008-04-15 21:15:32 +00:00
|
|
|
1, /* refs */
|
2009-05-05 16:26:06 +00:00
|
|
|
NULL, /* vnet */
|
2001-01-11 19:27:54 +00:00
|
|
|
#ifdef NETGRAPH_DEBUG
|
|
|
|
ND_MAGIC,
|
|
|
|
__FILE__,
|
|
|
|
__LINE__,
|
|
|
|
{NULL}
|
|
|
|
#endif /* NETGRAPH_DEBUG */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct ng_hook ng_deadhook = {
|
|
|
|
"dead",
|
|
|
|
NULL, /* private */
|
|
|
|
HK_INVALID | HK_DEAD,
|
2005-07-21 20:34:40 +00:00
|
|
|
0, /* undefined data link type */
|
2001-01-11 19:27:54 +00:00
|
|
|
&ng_deadhook, /* Peer is self */
|
|
|
|
&ng_deadnode, /* attached to deadnode */
|
|
|
|
{}, /* hooks list */
|
2001-01-31 20:46:00 +00:00
|
|
|
NULL, /* override rcvmsg() */
|
|
|
|
NULL, /* override rcvdata() */
|
2008-04-15 21:15:32 +00:00
|
|
|
1, /* refs always >= 1 */
|
2001-01-11 19:27:54 +00:00
|
|
|
#ifdef NETGRAPH_DEBUG
|
|
|
|
HK_MAGIC,
|
|
|
|
__FILE__,
|
|
|
|
__LINE__,
|
|
|
|
{NULL}
|
|
|
|
#endif /* NETGRAPH_DEBUG */
|
|
|
|
};
|
2001-01-08 05:34:06 +00:00
|
|
|
|
2001-01-11 19:27:54 +00:00
|
|
|
/*
|
|
|
|
* END DEAD STRUCTURES
|
|
|
|
*/
|
2001-01-06 00:46:47 +00:00
|
|
|
/* List nodes with unallocated work */
|
2008-04-15 21:15:32 +00:00
|
|
|
static STAILQ_HEAD(, ng_node) ng_worklist = STAILQ_HEAD_INITIALIZER(ng_worklist);
|
2001-02-01 20:51:23 +00:00
|
|
|
static struct mtx ng_worklist_mtx; /* MUST LOCK NODE FIRST */
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* List of installed types */
|
2001-01-06 00:46:47 +00:00
|
|
|
static LIST_HEAD(, ng_type) ng_typelist;
|
|
|
|
static struct mtx ng_typelist_mtx;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/* Hash related definitions */
|
2001-01-21 23:32:00 +00:00
|
|
|
/* XXX Don't need to initialise them because it's a LIST */
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
static VNET_DEFINE(LIST_HEAD(, ng_node), ng_ID_hash[NG_ID_HASH_SIZE]);
|
2009-07-16 21:13:04 +00:00
|
|
|
#define V_ng_ID_hash VNET(ng_ID_hash)
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
static struct mtx ng_idhash_mtx;
|
2001-01-21 23:32:00 +00:00
|
|
|
/* Method to find a node.. used twice so do it here */
|
|
|
|
#define NG_IDHASH_FN(ID) ((ID) % (NG_ID_HASH_SIZE))
|
|
|
|
#define NG_IDHASH_FIND(ID, node) \
|
|
|
|
do { \
|
2004-06-24 01:47:31 +00:00
|
|
|
mtx_assert(&ng_idhash_mtx, MA_OWNED); \
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
LIST_FOREACH(node, &V_ng_ID_hash[NG_IDHASH_FN(ID)], \
|
2001-01-21 23:32:00 +00:00
|
|
|
nd_idnodes) { \
|
|
|
|
if (NG_NODE_IS_VALID(node) \
|
|
|
|
&& (NG_NODE_ID(node) == ID)) { \
|
|
|
|
break; \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} while (0)
|
2001-01-06 00:46:47 +00:00
|
|
|
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
static VNET_DEFINE(LIST_HEAD(, ng_node), ng_name_hash[NG_NAME_HASH_SIZE]);
|
2009-07-16 21:13:04 +00:00
|
|
|
#define V_ng_name_hash VNET(ng_name_hash)
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
|
2008-03-04 18:22:18 +00:00
|
|
|
static struct mtx ng_namehash_mtx;
|
|
|
|
#define NG_NAMEHASH(NAME, HASH) \
|
|
|
|
do { \
|
|
|
|
u_char h = 0; \
|
|
|
|
const u_char *c; \
|
|
|
|
for (c = (const u_char*)(NAME); *c; c++)\
|
|
|
|
h += *c; \
|
|
|
|
(HASH) = h % (NG_NAME_HASH_SIZE); \
|
|
|
|
} while (0)
|
|
|
|
|
1999-11-01 00:31:14 +00:00
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/* Internal functions */
|
|
|
|
static int ng_add_hook(node_p node, const char *name, hook_p * hookp);
|
2001-01-06 00:46:47 +00:00
|
|
|
static int ng_generic_msg(node_p here, item_p item, hook_p lasthook);
|
1999-11-01 00:31:14 +00:00
|
|
|
static ng_ID_t ng_decodeidname(const char *name);
|
1999-10-21 09:06:11 +00:00
|
|
|
static int ngb_mod_event(module_t mod, int event, void *data);
|
2008-04-06 15:26:32 +00:00
|
|
|
static void ng_worklist_add(node_p node);
|
2008-12-14 20:15:30 +00:00
|
|
|
static void ngthread(void *);
|
2007-06-01 09:20:57 +00:00
|
|
|
static int ng_apply_item(node_p node, item_p item, int rw);
|
2008-04-15 21:15:32 +00:00
|
|
|
static void ng_flush_input_queue(node_p node);
|
2001-01-06 00:46:47 +00:00
|
|
|
static node_p ng_ID2noderef(ng_ID_t ID);
|
2007-10-19 15:04:17 +00:00
|
|
|
static int ng_con_nodes(item_p item, node_p node, const char *name,
|
|
|
|
node_p node2, const char *name2);
|
|
|
|
static int ng_con_part2(node_p node, item_p item, hook_p hook);
|
|
|
|
static int ng_con_part3(node_p node, item_p item, hook_p hook);
|
2001-01-14 23:25:54 +00:00
|
|
|
static int ng_mkpeer(node_p node, const char *name,
|
|
|
|
const char *name2, char *type);
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2005-11-15 10:54:20 +00:00
|
|
|
/* Imported, these used to be externally visible, some may go back. */
|
2001-01-06 00:46:47 +00:00
|
|
|
void ng_destroy_hook(hook_p hook);
|
|
|
|
int ng_path2noderef(node_p here, const char *path,
|
|
|
|
node_p *dest, hook_p *lasthook);
|
|
|
|
int ng_make_node(const char *type, node_p *nodepp);
|
|
|
|
int ng_path_parse(char *addr, char **node, char **path, char **hook);
|
2001-01-30 20:51:52 +00:00
|
|
|
void ng_rmnode(node_p node, hook_p dummy1, void *dummy2, int dummy3);
|
2001-01-08 05:34:06 +00:00
|
|
|
void ng_unname(node_p node);
|
2001-01-06 00:46:47 +00:00
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Our own netgraph malloc type */
|
|
|
|
MALLOC_DEFINE(M_NETGRAPH, "netgraph", "netgraph structures and ctrl messages");
|
2001-01-06 00:46:47 +00:00
|
|
|
MALLOC_DEFINE(M_NETGRAPH_HOOK, "netgraph_hook", "netgraph hook structures");
|
|
|
|
MALLOC_DEFINE(M_NETGRAPH_NODE, "netgraph_node", "netgraph node structures");
|
|
|
|
MALLOC_DEFINE(M_NETGRAPH_ITEM, "netgraph_item", "netgraph item structures");
|
|
|
|
MALLOC_DEFINE(M_NETGRAPH_MSG, "netgraph_msg", "netgraph name storage");
|
|
|
|
|
|
|
|
/* Should not be visible outside this file */
|
2001-01-08 05:34:06 +00:00
|
|
|
|
|
|
|
#define _NG_ALLOC_HOOK(hook) \
|
2008-10-23 15:53:51 +00:00
|
|
|
hook = malloc(sizeof(*hook), M_NETGRAPH_HOOK, M_NOWAIT | M_ZERO)
|
2001-01-08 05:34:06 +00:00
|
|
|
#define _NG_ALLOC_NODE(node) \
|
2008-10-23 15:53:51 +00:00
|
|
|
node = malloc(sizeof(*node), M_NETGRAPH_NODE, M_NOWAIT | M_ZERO)
|
2001-01-08 05:34:06 +00:00
|
|
|
|
2007-03-30 14:34:34 +00:00
|
|
|
#define NG_QUEUE_LOCK_INIT(n) \
|
2007-03-31 15:43:06 +00:00
|
|
|
mtx_init(&(n)->q_mtx, "ng_node", NULL, MTX_DEF)
|
2007-03-30 14:34:34 +00:00
|
|
|
#define NG_QUEUE_LOCK(n) \
|
2007-03-31 15:43:06 +00:00
|
|
|
mtx_lock(&(n)->q_mtx)
|
2007-03-30 14:34:34 +00:00
|
|
|
#define NG_QUEUE_UNLOCK(n) \
|
2007-03-31 15:43:06 +00:00
|
|
|
mtx_unlock(&(n)->q_mtx)
|
2007-03-30 14:34:34 +00:00
|
|
|
#define NG_WORKLIST_LOCK_INIT() \
|
2007-03-31 15:43:06 +00:00
|
|
|
mtx_init(&ng_worklist_mtx, "ng_worklist", NULL, MTX_DEF)
|
2007-03-30 14:34:34 +00:00
|
|
|
#define NG_WORKLIST_LOCK() \
|
2007-03-31 15:43:06 +00:00
|
|
|
mtx_lock(&ng_worklist_mtx)
|
2007-03-30 14:34:34 +00:00
|
|
|
#define NG_WORKLIST_UNLOCK() \
|
2007-03-31 15:43:06 +00:00
|
|
|
mtx_unlock(&ng_worklist_mtx)
|
2008-12-14 20:15:30 +00:00
|
|
|
#define NG_WORKLIST_SLEEP() \
|
|
|
|
mtx_sleep(&ng_worklist, &ng_worklist_mtx, PI_NET, "sleep", 0)
|
|
|
|
#define NG_WORKLIST_WAKEUP() \
|
|
|
|
wakeup_one(&ng_worklist)
|
2007-03-30 14:34:34 +00:00
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
#ifdef NETGRAPH_DEBUG /*----------------------------------------------*/
|
|
|
|
/*
|
|
|
|
* In debug mode:
|
|
|
|
* In an attempt to help track reference count screwups
|
|
|
|
* we do not free objects back to the malloc system, but keep them
|
|
|
|
* in a local cache where we can examine them and keep information safely
|
|
|
|
* after they have been freed.
|
|
|
|
* We use this scheme for nodes and hooks, and to some extent for items.
|
|
|
|
*/
|
|
|
|
static __inline hook_p
|
|
|
|
ng_alloc_hook(void)
|
|
|
|
{
|
|
|
|
hook_p hook;
|
|
|
|
SLIST_ENTRY(ng_hook) temp;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
hook = LIST_FIRST(&ng_freehooks);
|
|
|
|
if (hook) {
|
|
|
|
LIST_REMOVE(hook, hk_hooks);
|
|
|
|
bcopy(&hook->hk_all, &temp, sizeof(temp));
|
|
|
|
bzero(hook, sizeof(struct ng_hook));
|
|
|
|
bcopy(&temp, &hook->hk_all, sizeof(temp));
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
hook->hk_magic = HK_MAGIC;
|
|
|
|
} else {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
_NG_ALLOC_HOOK(hook);
|
|
|
|
if (hook) {
|
|
|
|
hook->hk_magic = HK_MAGIC;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
SLIST_INSERT_HEAD(&ng_allhooks, hook, hk_all);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return (hook);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline node_p
|
|
|
|
ng_alloc_node(void)
|
|
|
|
{
|
|
|
|
node_p node;
|
|
|
|
SLIST_ENTRY(ng_node) temp;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
node = LIST_FIRST(&ng_freenodes);
|
|
|
|
if (node) {
|
|
|
|
LIST_REMOVE(node, nd_nodes);
|
|
|
|
bcopy(&node->nd_all, &temp, sizeof(temp));
|
|
|
|
bzero(node, sizeof(struct ng_node));
|
|
|
|
bcopy(&temp, &node->nd_all, sizeof(temp));
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
node->nd_magic = ND_MAGIC;
|
|
|
|
} else {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
_NG_ALLOC_NODE(node);
|
|
|
|
if (node) {
|
|
|
|
node->nd_magic = ND_MAGIC;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
SLIST_INSERT_HEAD(&ng_allnodes, node, nd_all);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return (node);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define NG_ALLOC_HOOK(hook) do { (hook) = ng_alloc_hook(); } while (0)
|
|
|
|
#define NG_ALLOC_NODE(node) do { (node) = ng_alloc_node(); } while (0)
|
|
|
|
|
|
|
|
|
|
|
|
#define NG_FREE_HOOK(hook) \
|
|
|
|
do { \
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_nodelist_mtx); \
|
2001-01-08 05:34:06 +00:00
|
|
|
LIST_INSERT_HEAD(&ng_freehooks, hook, hk_hooks); \
|
|
|
|
hook->hk_magic = 0; \
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx); \
|
2001-01-08 05:34:06 +00:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define NG_FREE_NODE(node) \
|
|
|
|
do { \
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_nodelist_mtx); \
|
2001-01-08 05:34:06 +00:00
|
|
|
LIST_INSERT_HEAD(&ng_freenodes, node, nd_nodes); \
|
|
|
|
node->nd_magic = 0; \
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx); \
|
2001-01-08 05:34:06 +00:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#else /* NETGRAPH_DEBUG */ /*----------------------------------------------*/
|
|
|
|
|
|
|
|
#define NG_ALLOC_HOOK(hook) _NG_ALLOC_HOOK(hook)
|
|
|
|
#define NG_ALLOC_NODE(node) _NG_ALLOC_NODE(node)
|
|
|
|
|
2008-10-23 15:53:51 +00:00
|
|
|
#define NG_FREE_HOOK(hook) do { free((hook), M_NETGRAPH_HOOK); } while (0)
|
|
|
|
#define NG_FREE_NODE(node) do { free((node), M_NETGRAPH_NODE); } while (0)
|
2001-01-08 05:34:06 +00:00
|
|
|
|
|
|
|
#endif /* NETGRAPH_DEBUG */ /*----------------------------------------------*/
|
|
|
|
|
2004-07-10 21:45:58 +00:00
|
|
|
/* Set this to kdb_enter("X") to catch all errors as they occur */
|
1999-10-21 09:06:11 +00:00
|
|
|
#ifndef TRAP_ERROR
|
2001-01-14 23:25:54 +00:00
|
|
|
#define TRAP_ERROR()
|
1999-10-21 09:06:11 +00:00
|
|
|
#endif
|
|
|
|
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
static VNET_DEFINE(ng_ID_t, nextID) = 1;
|
2009-07-16 21:13:04 +00:00
|
|
|
#define V_nextID VNET(nextID)
|
1999-11-01 00:31:14 +00:00
|
|
|
|
1999-11-19 05:43:33 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
#define CHECK_DATA_MBUF(m) do { \
|
|
|
|
struct mbuf *n; \
|
|
|
|
int total; \
|
|
|
|
\
|
2003-04-08 14:25:47 +00:00
|
|
|
M_ASSERTPKTHDR(m); \
|
2005-09-06 17:02:13 +00:00
|
|
|
for (total = 0, n = (m); n != NULL; n = n->m_next) { \
|
1999-11-19 05:43:33 +00:00
|
|
|
total += n->m_len; \
|
2005-09-06 17:02:13 +00:00
|
|
|
if (n->m_nextpkt != NULL) \
|
|
|
|
panic("%s: m_nextpkt", __func__); \
|
|
|
|
} \
|
2005-09-06 20:36:38 +00:00
|
|
|
\
|
1999-11-19 05:43:33 +00:00
|
|
|
if ((m)->m_pkthdr.len != total) { \
|
|
|
|
panic("%s: %d != %d", \
|
2001-12-10 08:09:49 +00:00
|
|
|
__func__, (m)->m_pkthdr.len, total); \
|
1999-11-19 05:43:33 +00:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define CHECK_DATA_MBUF(m)
|
|
|
|
#endif
|
|
|
|
|
2007-10-19 15:04:17 +00:00
|
|
|
#define ERROUT(x) do { error = (x); goto done; } while (0)
|
1999-11-01 00:31:14 +00:00
|
|
|
|
1999-11-30 02:45:32 +00:00
|
|
|
/************************************************************************
|
|
|
|
Parse type definitions for generic messages
|
|
|
|
************************************************************************/
|
|
|
|
|
|
|
|
/* Handy structure parse type defining macro */
|
|
|
|
#define DEFINE_PARSE_STRUCT_TYPE(lo, up, args) \
|
2002-05-31 23:48:03 +00:00
|
|
|
static const struct ng_parse_struct_field \
|
|
|
|
ng_ ## lo ## _type_fields[] = NG_GENERIC_ ## up ## _INFO args; \
|
1999-11-30 02:45:32 +00:00
|
|
|
static const struct ng_parse_type ng_generic_ ## lo ## _type = { \
|
|
|
|
&ng_parse_struct_type, \
|
2002-05-31 23:48:03 +00:00
|
|
|
&ng_ ## lo ## _type_fields \
|
1999-11-30 02:45:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(mkpeer, MKPEER, ());
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(connect, CONNECT, ());
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(name, NAME, ());
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(rmhook, RMHOOK, ());
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(nodeinfo, NODEINFO, ());
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(typeinfo, TYPEINFO, ());
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(linkinfo, LINKINFO, (&ng_generic_nodeinfo_type));
|
|
|
|
|
|
|
|
/* Get length of an array when the length is stored as a 32 bit
|
2001-02-18 10:43:53 +00:00
|
|
|
value immediately preceding the array -- as with struct namelist
|
1999-11-30 02:45:32 +00:00
|
|
|
and struct typelist. */
|
|
|
|
static int
|
|
|
|
ng_generic_list_getLength(const struct ng_parse_type *type,
|
|
|
|
const u_char *start, const u_char *buf)
|
|
|
|
{
|
|
|
|
return *((const u_int32_t *)(buf - 4));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get length of the array of struct linkinfo inside a struct hooklist */
|
|
|
|
static int
|
|
|
|
ng_generic_linkinfo_getLength(const struct ng_parse_type *type,
|
|
|
|
const u_char *start, const u_char *buf)
|
|
|
|
{
|
|
|
|
const struct hooklist *hl = (const struct hooklist *)start;
|
|
|
|
|
|
|
|
return hl->nodeinfo.hooks;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Array type for a variable length array of struct namelist */
|
|
|
|
static const struct ng_parse_array_info ng_nodeinfoarray_type_info = {
|
|
|
|
&ng_generic_nodeinfo_type,
|
|
|
|
&ng_generic_list_getLength
|
|
|
|
};
|
|
|
|
static const struct ng_parse_type ng_generic_nodeinfoarray_type = {
|
|
|
|
&ng_parse_array_type,
|
|
|
|
&ng_nodeinfoarray_type_info
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Array type for a variable length array of struct typelist */
|
|
|
|
static const struct ng_parse_array_info ng_typeinfoarray_type_info = {
|
|
|
|
&ng_generic_typeinfo_type,
|
|
|
|
&ng_generic_list_getLength
|
|
|
|
};
|
|
|
|
static const struct ng_parse_type ng_generic_typeinfoarray_type = {
|
|
|
|
&ng_parse_array_type,
|
|
|
|
&ng_typeinfoarray_type_info
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Array type for array of struct linkinfo in struct hooklist */
|
|
|
|
static const struct ng_parse_array_info ng_generic_linkinfo_array_type_info = {
|
|
|
|
&ng_generic_linkinfo_type,
|
|
|
|
&ng_generic_linkinfo_getLength
|
|
|
|
};
|
|
|
|
static const struct ng_parse_type ng_generic_linkinfo_array_type = {
|
|
|
|
&ng_parse_array_type,
|
|
|
|
&ng_generic_linkinfo_array_type_info
|
|
|
|
};
|
|
|
|
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(typelist, TYPELIST, (&ng_generic_nodeinfoarray_type));
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(hooklist, HOOKLIST,
|
|
|
|
(&ng_generic_nodeinfo_type, &ng_generic_linkinfo_array_type));
|
|
|
|
DEFINE_PARSE_STRUCT_TYPE(listnodes, LISTNODES,
|
|
|
|
(&ng_generic_nodeinfoarray_type));
|
|
|
|
|
|
|
|
/* List of commands and how to convert arguments to/from ASCII */
|
|
|
|
static const struct ng_cmdlist ng_generic_cmds[] = {
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_SHUTDOWN,
|
|
|
|
"shutdown",
|
|
|
|
NULL,
|
|
|
|
NULL
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_MKPEER,
|
|
|
|
"mkpeer",
|
|
|
|
&ng_generic_mkpeer_type,
|
|
|
|
NULL
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_CONNECT,
|
|
|
|
"connect",
|
|
|
|
&ng_generic_connect_type,
|
|
|
|
NULL
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_NAME,
|
|
|
|
"name",
|
|
|
|
&ng_generic_name_type,
|
|
|
|
NULL
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_RMHOOK,
|
|
|
|
"rmhook",
|
|
|
|
&ng_generic_rmhook_type,
|
|
|
|
NULL
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_NODEINFO,
|
|
|
|
"nodeinfo",
|
|
|
|
NULL,
|
|
|
|
&ng_generic_nodeinfo_type
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_LISTHOOKS,
|
|
|
|
"listhooks",
|
|
|
|
NULL,
|
|
|
|
&ng_generic_hooklist_type
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_LISTNAMES,
|
|
|
|
"listnames",
|
|
|
|
NULL,
|
|
|
|
&ng_generic_listnodes_type /* same as NGM_LISTNODES */
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_LISTNODES,
|
|
|
|
"listnodes",
|
|
|
|
NULL,
|
|
|
|
&ng_generic_listnodes_type
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_LISTTYPES,
|
|
|
|
"listtypes",
|
|
|
|
NULL,
|
|
|
|
&ng_generic_typeinfo_type
|
|
|
|
},
|
2000-07-03 13:34:18 +00:00
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_TEXT_CONFIG,
|
|
|
|
"textconfig",
|
|
|
|
NULL,
|
|
|
|
&ng_parse_string_type
|
|
|
|
},
|
1999-11-30 02:45:32 +00:00
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_TEXT_STATUS,
|
|
|
|
"textstatus",
|
|
|
|
NULL,
|
|
|
|
&ng_parse_string_type
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_ASCII2BINARY,
|
|
|
|
"ascii2binary",
|
|
|
|
&ng_parse_ng_mesg_type,
|
|
|
|
&ng_parse_ng_mesg_type
|
|
|
|
},
|
|
|
|
{
|
|
|
|
NGM_GENERIC_COOKIE,
|
|
|
|
NGM_BINARY2ASCII,
|
|
|
|
"binary2ascii",
|
|
|
|
&ng_parse_ng_mesg_type,
|
|
|
|
&ng_parse_ng_mesg_type
|
|
|
|
},
|
|
|
|
{ 0 }
|
|
|
|
};
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/************************************************************************
|
|
|
|
Node routines
|
|
|
|
************************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Instantiate a node of the requested type
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ng_make_node(const char *typename, node_p *nodepp)
|
|
|
|
{
|
|
|
|
struct ng_type *type;
|
2001-01-06 00:46:47 +00:00
|
|
|
int error;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Check that the type makes sense */
|
|
|
|
if (typename == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
2004-07-27 20:30:56 +00:00
|
|
|
/* Locate the node type. If we fail we return. Do not try to load
|
|
|
|
* module.
|
|
|
|
*/
|
|
|
|
if ((type = ng_findtype(typename)) == NULL)
|
|
|
|
return (ENXIO);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* If we have a constructor, then make the node and
|
|
|
|
* call the constructor to do type specific initialisation.
|
|
|
|
*/
|
|
|
|
if (type->constructor != NULL) {
|
|
|
|
if ((error = ng_make_node_common(type, nodepp)) == 0) {
|
|
|
|
if ((error = ((*type->constructor)(*nodepp)) != 0)) {
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_UNREF(*nodepp);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Node has no constructor. We cannot ask for one
|
2007-03-18 16:49:50 +00:00
|
|
|
* to be made. It must be brought into existence by
|
2001-01-11 19:27:54 +00:00
|
|
|
* some external agency. The external agency should
|
2001-01-06 00:46:47 +00:00
|
|
|
* call ng_make_node_common() directly to get the
|
|
|
|
* netgraph part initialised.
|
|
|
|
*/
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-01-06 00:46:47 +00:00
|
|
|
error = EINVAL;
|
|
|
|
}
|
|
|
|
return (error);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-01-06 00:46:47 +00:00
|
|
|
* Generic node creation. Called by node initialisation for externally
|
|
|
|
* instantiated nodes (e.g. hardware, sockets, etc ).
|
1999-10-21 09:06:11 +00:00
|
|
|
* The returned node has a reference count of 1.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ng_make_node_common(struct ng_type *type, node_p *nodepp)
|
|
|
|
{
|
|
|
|
node_p node;
|
|
|
|
|
|
|
|
/* Require the node type to have been already installed */
|
|
|
|
if (ng_findtype(type->name) == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make a node and try attach it to the type */
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_ALLOC_NODE(node);
|
1999-10-21 09:06:11 +00:00
|
|
|
if (node == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
}
|
2001-01-08 05:34:06 +00:00
|
|
|
node->nd_type = type;
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
#ifdef VIMAGE
|
|
|
|
node->nd_vnet = curvnet;
|
|
|
|
#endif
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_REF(node); /* note reference */
|
1999-10-21 09:06:11 +00:00
|
|
|
type->refs++;
|
|
|
|
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_QUEUE_LOCK_INIT(&node->nd_input_queue);
|
2008-04-15 21:15:32 +00:00
|
|
|
STAILQ_INIT(&node->nd_input_queue.queue);
|
2001-01-08 05:34:06 +00:00
|
|
|
node->nd_input_queue.q_flags = 0;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Initialize hook list for new node */
|
2001-01-08 05:34:06 +00:00
|
|
|
LIST_INIT(&node->nd_hooks);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2008-03-04 18:22:18 +00:00
|
|
|
/* Link us into the name hash. */
|
|
|
|
mtx_lock(&ng_namehash_mtx);
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
LIST_INSERT_HEAD(&V_ng_name_hash[0], node, nd_nodes);
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_unlock(&ng_namehash_mtx);
|
2001-01-06 00:46:47 +00:00
|
|
|
|
1999-11-01 00:31:14 +00:00
|
|
|
/* get an ID and put us in the hash chain */
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_idhash_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
for (;;) { /* wrap protection, even if silly */
|
2001-01-06 00:46:47 +00:00
|
|
|
node_p node2 = NULL;
|
2008-08-20 01:05:56 +00:00
|
|
|
node->nd_ID = V_nextID++; /* 137/sec for 1 year before wrap */
|
2001-01-21 23:32:00 +00:00
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
/* Is there a problem with the new number? */
|
2001-01-21 23:32:00 +00:00
|
|
|
NG_IDHASH_FIND(node->nd_ID, node2); /* already taken? */
|
|
|
|
if ((node->nd_ID != 0) && (node2 == NULL)) {
|
2001-01-08 05:34:06 +00:00
|
|
|
break;
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
2001-01-08 05:34:06 +00:00
|
|
|
}
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
LIST_INSERT_HEAD(&V_ng_ID_hash[NG_IDHASH_FN(node->nd_ID)],
|
2001-01-08 05:34:06 +00:00
|
|
|
node, nd_idnodes);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_idhash_mtx);
|
1999-11-01 00:31:14 +00:00
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/* Done */
|
|
|
|
*nodepp = node;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Forceably start the shutdown process on a node. Either call
|
2007-03-18 16:49:50 +00:00
|
|
|
* its shutdown method, or do the default shutdown if there is
|
1999-10-21 09:06:11 +00:00
|
|
|
* no type-specific method.
|
|
|
|
*
|
2007-03-18 16:49:50 +00:00
|
|
|
* We can only be called from a shutdown message, so we know we have
|
2001-01-11 22:22:52 +00:00
|
|
|
* a writer lock, and therefore exclusive access. It also means
|
|
|
|
* that we should not be on the work queue, but we check anyhow.
|
2001-01-06 00:46:47 +00:00
|
|
|
*
|
|
|
|
* Persistent node types must have a type-specific method which
|
2007-03-18 16:49:50 +00:00
|
|
|
* allocates a new node in which case, this one is irretrievably going away,
|
2001-01-11 22:22:52 +00:00
|
|
|
* or cleans up anything it needs, and just makes the node valid again,
|
2005-11-15 10:54:20 +00:00
|
|
|
* in which case we allow the node to survive.
|
2001-01-11 22:22:52 +00:00
|
|
|
*
|
2007-03-18 16:49:50 +00:00
|
|
|
* XXX We need to think of how to tell a persistent node that we
|
2001-01-11 22:22:52 +00:00
|
|
|
* REALLY need to go away because the hardware has gone or we
|
|
|
|
* are rebooting.... etc.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
|
|
|
void
|
2001-01-30 20:51:52 +00:00
|
|
|
ng_rmnode(node_p node, hook_p dummy1, void *dummy2, int dummy3)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
2001-01-11 22:22:52 +00:00
|
|
|
hook_p hook;
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/* Check if it's already shutting down */
|
2004-07-20 17:15:38 +00:00
|
|
|
if ((node->nd_flags & NGF_CLOSING) != 0)
|
1999-10-21 09:06:11 +00:00
|
|
|
return;
|
|
|
|
|
2001-01-30 20:51:52 +00:00
|
|
|
if (node == &ng_deadnode) {
|
|
|
|
printf ("shutdown called on deadnode\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/* Add an extra reference so it doesn't go away during this */
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_REF(node);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
/*
|
|
|
|
* Mark it invalid so any newcomers know not to try use it
|
|
|
|
* Also add our own mark so we can't recurse
|
2004-07-20 17:15:38 +00:00
|
|
|
* note that NGF_INVALID does not do this as it's also set during
|
2001-01-08 05:34:06 +00:00
|
|
|
* creation
|
|
|
|
*/
|
2004-07-20 17:15:38 +00:00
|
|
|
node->nd_flags |= NGF_INVALID|NGF_CLOSING;
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2004-05-29 07:21:46 +00:00
|
|
|
/* If node has its pre-shutdown method, then call it first*/
|
|
|
|
if (node->nd_type && node->nd_type->close)
|
|
|
|
(*node->nd_type->close)(node);
|
|
|
|
|
2001-01-11 22:22:52 +00:00
|
|
|
/* Notify all remaining connected nodes to disconnect */
|
|
|
|
while ((hook = LIST_FIRST(&node->nd_hooks)) != NULL)
|
|
|
|
ng_destroy_hook(hook);
|
2001-01-08 05:34:06 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* Drain the input queue forceably.
|
2001-01-08 05:34:06 +00:00
|
|
|
* it has no hooks so what's it going to do, bleed on someone?
|
|
|
|
* Theoretically we came here from a queue entry that was added
|
|
|
|
* Just before the queue was closed, so it should be empty anyway.
|
2001-02-01 20:51:23 +00:00
|
|
|
* Also removes us from worklist if needed.
|
2001-01-06 00:46:47 +00:00
|
|
|
*/
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_flush_input_queue(node);
|
2001-01-06 00:46:47 +00:00
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/* Ask the type if it has anything to do in this case */
|
2001-01-08 05:34:06 +00:00
|
|
|
if (node->nd_type && node->nd_type->shutdown) {
|
|
|
|
(*node->nd_type->shutdown)(node);
|
2001-01-30 20:51:52 +00:00
|
|
|
if (NG_NODE_IS_VALID(node)) {
|
|
|
|
/*
|
|
|
|
* Well, blow me down if the node code hasn't declared
|
|
|
|
* that it doesn't want to die.
|
|
|
|
* Presumably it is a persistant node.
|
|
|
|
* If we REALLY want it to go away,
|
|
|
|
* e.g. hardware going away,
|
2004-07-20 17:15:38 +00:00
|
|
|
* Our caller should set NGF_REALLY_DIE in nd_flags.
|
2005-11-15 10:54:20 +00:00
|
|
|
*/
|
2004-07-20 17:15:38 +00:00
|
|
|
node->nd_flags &= ~(NGF_INVALID|NGF_CLOSING);
|
2001-01-30 20:51:52 +00:00
|
|
|
NG_NODE_UNREF(node); /* Assume they still have theirs */
|
|
|
|
return;
|
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
} else { /* do the default thing */
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_UNREF(node);
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
ng_unname(node); /* basically a NOP these days */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove extra reference, possibly the last
|
|
|
|
* Possible other holders of references may include
|
|
|
|
* timeout callouts, but theoretically the node's supposed to
|
|
|
|
* have cancelled them. Possibly hardware dependencies may
|
|
|
|
* force a driver to 'linger' with a reference.
|
|
|
|
*/
|
|
|
|
NG_NODE_UNREF(node);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-03-10 16:31:00 +00:00
|
|
|
* Remove a reference to the node, possibly the last.
|
|
|
|
* deadnode always acts as it it were the last.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
2001-03-10 16:31:00 +00:00
|
|
|
int
|
2001-01-08 05:34:06 +00:00
|
|
|
ng_unref_node(node_p node)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
2005-11-15 10:54:20 +00:00
|
|
|
int v;
|
2001-01-14 23:25:54 +00:00
|
|
|
|
|
|
|
if (node == &ng_deadnode) {
|
2001-03-10 16:31:00 +00:00
|
|
|
return (0);
|
2001-01-14 23:25:54 +00:00
|
|
|
}
|
|
|
|
|
2008-03-30 00:27:48 +00:00
|
|
|
v = atomic_fetchadd_int(&node->nd_refs, -1);
|
2000-12-02 13:27:58 +00:00
|
|
|
|
2008-03-30 00:27:48 +00:00
|
|
|
if (v == 1) { /* we were the last */
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_lock(&ng_namehash_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
node->nd_type->refs--; /* XXX maybe should get types lock? */
|
|
|
|
LIST_REMOVE(node, nd_nodes);
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_unlock(&ng_namehash_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_idhash_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
LIST_REMOVE(node, nd_idnodes);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_idhash_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-08 06:28:30 +00:00
|
|
|
mtx_destroy(&node->nd_input_queue.q_mtx);
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_NODE(node);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2008-03-30 00:27:48 +00:00
|
|
|
return (v - 1);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
1999-11-01 00:31:14 +00:00
|
|
|
/************************************************************************
|
|
|
|
Node ID handling
|
|
|
|
************************************************************************/
|
|
|
|
static node_p
|
2001-01-06 00:46:47 +00:00
|
|
|
ng_ID2noderef(ng_ID_t ID)
|
1999-11-01 00:31:14 +00:00
|
|
|
{
|
2001-01-08 05:34:06 +00:00
|
|
|
node_p node;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_idhash_mtx);
|
2001-01-21 23:32:00 +00:00
|
|
|
NG_IDHASH_FIND(ID, node);
|
2001-01-08 05:34:06 +00:00
|
|
|
if(node)
|
|
|
|
NG_NODE_REF(node);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_idhash_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
return(node);
|
1999-11-01 00:31:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
ng_ID_t
|
|
|
|
ng_node2ID(node_p node)
|
|
|
|
{
|
2001-01-10 23:19:32 +00:00
|
|
|
return (node ? NG_NODE_ID(node) : 0);
|
1999-11-01 00:31:14 +00:00
|
|
|
}
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/************************************************************************
|
|
|
|
Node name handling
|
|
|
|
************************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Assign a node a name. Once assigned, the name cannot be changed.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ng_name_node(node_p node, const char *name)
|
|
|
|
{
|
2008-03-04 18:22:18 +00:00
|
|
|
int i, hash;
|
2001-01-06 00:46:47 +00:00
|
|
|
node_p node2;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Check the name is valid */
|
2004-01-26 14:05:31 +00:00
|
|
|
for (i = 0; i < NG_NODESIZ; i++) {
|
1999-10-21 09:06:11 +00:00
|
|
|
if (name[i] == '\0' || name[i] == '.' || name[i] == ':')
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == 0 || name[i] != '\0') {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
1999-11-01 00:31:14 +00:00
|
|
|
if (ng_decodeidname(name) != 0) { /* valid IDs not allowed here */
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check the name isn't already being used */
|
2001-01-06 00:46:47 +00:00
|
|
|
if ((node2 = ng_name2noderef(node, name)) != NULL) {
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_UNREF(node2);
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (EADDRINUSE);
|
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/* copy it */
|
2004-01-26 14:05:31 +00:00
|
|
|
strlcpy(NG_NODE_NAME(node), name, NG_NODESIZ);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2008-03-04 18:22:18 +00:00
|
|
|
/* Update name hash. */
|
|
|
|
NG_NAMEHASH(name, hash);
|
|
|
|
mtx_lock(&ng_namehash_mtx);
|
|
|
|
LIST_REMOVE(node, nd_nodes);
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
LIST_INSERT_HEAD(&V_ng_name_hash[hash], node, nd_nodes);
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_unlock(&ng_namehash_mtx);
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a node by absolute name. The name should NOT end with ':'
|
|
|
|
* The name "." means "this node" and "[xxx]" means "the node
|
|
|
|
* with ID (ie, at address) xxx".
|
|
|
|
*
|
|
|
|
* Returns the node if found, else NULL.
|
2001-01-06 00:46:47 +00:00
|
|
|
* Eventually should add something faster than a sequential search.
|
2007-05-27 20:50:23 +00:00
|
|
|
* Note it acquires a reference on the node so you can be sure it's still
|
|
|
|
* there.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
|
|
|
node_p
|
2001-01-06 00:46:47 +00:00
|
|
|
ng_name2noderef(node_p here, const char *name)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
1999-11-01 00:31:14 +00:00
|
|
|
node_p node;
|
|
|
|
ng_ID_t temp;
|
2008-03-04 18:22:18 +00:00
|
|
|
int hash;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* "." means "this node" */
|
2001-01-06 00:46:47 +00:00
|
|
|
if (strcmp(name, ".") == 0) {
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_REF(here);
|
2001-01-06 00:46:47 +00:00
|
|
|
return(here);
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Check for name-by-ID */
|
1999-11-01 00:31:14 +00:00
|
|
|
if ((temp = ng_decodeidname(name)) != 0) {
|
2001-01-06 00:46:47 +00:00
|
|
|
return (ng_ID2noderef(temp));
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Find node by name */
|
2008-03-04 18:22:18 +00:00
|
|
|
NG_NAMEHASH(name, hash);
|
|
|
|
mtx_lock(&ng_namehash_mtx);
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
LIST_FOREACH(node, &V_ng_name_hash[hash], nd_nodes) {
|
2008-03-04 18:22:18 +00:00
|
|
|
if (NG_NODE_IS_VALID(node) &&
|
|
|
|
(strcmp(NG_NODE_NAME(node), name) == 0)) {
|
1999-10-21 09:06:11 +00:00
|
|
|
break;
|
2001-01-10 23:19:32 +00:00
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
if (node)
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_REF(node);
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_unlock(&ng_namehash_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
return (node);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2003-01-01 18:49:04 +00:00
|
|
|
* Decode an ID name, eg. "[f03034de]". Returns 0 if the
|
1999-11-01 00:31:14 +00:00
|
|
|
* string is not valid, otherwise returns the value.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
1999-11-01 00:31:14 +00:00
|
|
|
static ng_ID_t
|
1999-10-21 09:06:11 +00:00
|
|
|
ng_decodeidname(const char *name)
|
|
|
|
{
|
1999-11-02 23:18:01 +00:00
|
|
|
const int len = strlen(name);
|
1999-11-24 01:03:08 +00:00
|
|
|
char *eptr;
|
1999-11-02 23:18:01 +00:00
|
|
|
u_long val;
|
|
|
|
|
|
|
|
/* Check for proper length, brackets, no leading junk */
|
2001-01-10 23:19:32 +00:00
|
|
|
if ((len < 3)
|
|
|
|
|| (name[0] != '[')
|
|
|
|
|| (name[len - 1] != ']')
|
|
|
|
|| (!isxdigit(name[1]))) {
|
|
|
|
return ((ng_ID_t)0);
|
|
|
|
}
|
1999-11-02 23:18:01 +00:00
|
|
|
|
|
|
|
/* Decode number */
|
|
|
|
val = strtoul(name + 1, &eptr, 16);
|
2001-01-10 23:19:32 +00:00
|
|
|
if ((eptr - name != len - 1)
|
|
|
|
|| (val == ULONG_MAX)
|
|
|
|
|| (val == 0)) {
|
1999-11-09 00:31:04 +00:00
|
|
|
return ((ng_ID_t)0);
|
2001-01-10 23:19:32 +00:00
|
|
|
}
|
1999-11-02 23:18:01 +00:00
|
|
|
return (ng_ID_t)val;
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a name from a node. This should only be called
|
|
|
|
* when shutting down and removing the node.
|
2007-03-18 16:49:50 +00:00
|
|
|
* IF we allow name changing this may be more resurrected.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
ng_unname(node_p node)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
Hook routines
|
|
|
|
Names are not optional. Hooks are always connected, except for a
|
2001-01-11 22:22:52 +00:00
|
|
|
brief moment within these routines. On invalidation or during creation
|
|
|
|
they are connected to the 'dead' hook.
|
1999-10-21 09:06:11 +00:00
|
|
|
************************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a hook reference
|
|
|
|
*/
|
2001-01-08 05:34:06 +00:00
|
|
|
void
|
1999-10-21 09:06:11 +00:00
|
|
|
ng_unref_hook(hook_p hook)
|
|
|
|
{
|
2005-11-15 10:54:20 +00:00
|
|
|
int v;
|
2001-01-14 23:25:54 +00:00
|
|
|
|
|
|
|
if (hook == &ng_deadhook) {
|
|
|
|
return;
|
|
|
|
}
|
2008-03-30 00:27:48 +00:00
|
|
|
|
|
|
|
v = atomic_fetchadd_int(&hook->hk_refs, -1);
|
2000-12-02 13:27:58 +00:00
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
if (v == 1) { /* we were the last */
|
2008-03-29 22:53:58 +00:00
|
|
|
if (_NG_HOOK_NODE(hook)) /* it'll probably be ng_deadnode */
|
2001-01-14 23:25:54 +00:00
|
|
|
_NG_NODE_UNREF((_NG_HOOK_NODE(hook)));
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_HOOK(hook);
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add an unconnected hook to a node. Only used internally.
|
2001-01-11 22:22:52 +00:00
|
|
|
* Assumes node is locked. (XXX not yet true )
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ng_add_hook(node_p node, const char *name, hook_p *hookp)
|
|
|
|
{
|
|
|
|
hook_p hook;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
/* Check that the given name is good */
|
|
|
|
if (name == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
1999-12-03 21:17:30 +00:00
|
|
|
if (ng_findhook(node, name) != NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-12-03 21:17:30 +00:00
|
|
|
return (EEXIST);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate the hook and link it up */
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_ALLOC_HOOK(hook);
|
1999-10-21 09:06:11 +00:00
|
|
|
if (hook == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
}
|
2001-01-11 22:22:52 +00:00
|
|
|
hook->hk_refs = 1; /* add a reference for us to return */
|
2001-01-08 05:34:06 +00:00
|
|
|
hook->hk_flags = HK_INVALID;
|
2001-01-11 22:22:52 +00:00
|
|
|
hook->hk_peer = &ng_deadhook; /* start off this way */
|
2001-01-08 05:34:06 +00:00
|
|
|
hook->hk_node = node;
|
|
|
|
NG_NODE_REF(node); /* each hook counts as a reference */
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-11 22:22:52 +00:00
|
|
|
/* Set hook name */
|
2004-01-26 14:05:31 +00:00
|
|
|
strlcpy(NG_HOOK_NAME(hook), name, NG_HOOKSIZ);
|
2001-01-11 22:22:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the node type code has something to say about it
|
|
|
|
* If it fails, the unref of the hook will also unref the node.
|
|
|
|
*/
|
2001-01-11 19:27:54 +00:00
|
|
|
if (node->nd_type->newhook != NULL) {
|
|
|
|
if ((error = (*node->nd_type->newhook)(node, hook, name))) {
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_HOOK_UNREF(hook); /* this frees the hook */
|
2001-01-06 00:46:47 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2001-01-11 19:27:54 +00:00
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
/*
|
|
|
|
* The 'type' agrees so far, so go ahead and link it in.
|
|
|
|
* We'll ask again later when we actually connect the hooks.
|
|
|
|
*/
|
2001-01-08 05:34:06 +00:00
|
|
|
LIST_INSERT_HEAD(&node->nd_hooks, hook, hk_hooks);
|
|
|
|
node->nd_numhooks++;
|
2001-01-11 22:22:52 +00:00
|
|
|
NG_HOOK_REF(hook); /* one for the node */
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
if (hookp)
|
|
|
|
*hookp = hook;
|
2001-01-11 22:22:52 +00:00
|
|
|
return (0);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
1999-12-03 21:17:30 +00:00
|
|
|
/*
|
|
|
|
* Find a hook
|
|
|
|
*
|
|
|
|
* Node types may supply their own optimized routines for finding
|
|
|
|
* hooks. If none is supplied, we just do a linear search.
|
2001-01-11 22:22:52 +00:00
|
|
|
* XXX Possibly we should add a reference to the hook?
|
1999-12-03 21:17:30 +00:00
|
|
|
*/
|
|
|
|
hook_p
|
|
|
|
ng_findhook(node_p node, const char *name)
|
|
|
|
{
|
|
|
|
hook_p hook;
|
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
if (node->nd_type->findhook != NULL)
|
|
|
|
return (*node->nd_type->findhook)(node, name);
|
|
|
|
LIST_FOREACH(hook, &node->nd_hooks, hk_hooks) {
|
2001-01-10 23:19:32 +00:00
|
|
|
if (NG_HOOK_IS_VALID(hook)
|
2001-01-11 04:13:46 +00:00
|
|
|
&& (strcmp(NG_HOOK_NAME(hook), name) == 0))
|
1999-12-03 21:17:30 +00:00
|
|
|
return (hook);
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/*
|
|
|
|
* Destroy a hook
|
|
|
|
*
|
|
|
|
* As hooks are always attached, this really destroys two hooks.
|
|
|
|
* The one given, and the one attached to it. Disconnect the hooks
|
2001-01-11 22:22:52 +00:00
|
|
|
* from each other first. We reconnect the peer hook to the 'dead'
|
|
|
|
* hook so that it can still exist after we depart. We then
|
|
|
|
* send the peer its own destroy message. This ensures that we only
|
2005-11-15 10:54:20 +00:00
|
|
|
* interact with the peer's structures when it is locked processing that
|
2001-01-11 22:22:52 +00:00
|
|
|
* message. We hold a reference to the peer hook so we are guaranteed that
|
|
|
|
* the peer hook and node are still going to exist until
|
|
|
|
* we are finished there as the hook holds a ref on the node.
|
2005-11-15 10:54:20 +00:00
|
|
|
* We run this same code again on the peer hook, but that time it is already
|
|
|
|
* attached to the 'dead' hook.
|
2001-01-14 23:25:54 +00:00
|
|
|
*
|
2005-11-15 10:54:20 +00:00
|
|
|
* This routine is called at all stages of hook creation
|
2001-01-14 23:25:54 +00:00
|
|
|
* on error detection and must be able to handle any such stage.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
ng_destroy_hook(hook_p hook)
|
|
|
|
{
|
2005-11-02 15:23:47 +00:00
|
|
|
hook_p peer;
|
|
|
|
node_p node;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-14 23:25:54 +00:00
|
|
|
if (hook == &ng_deadhook) { /* better safe than sorry */
|
|
|
|
printf("ng_destroy_hook called on deadhook\n");
|
|
|
|
return;
|
|
|
|
}
|
2005-11-02 15:23:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Protect divorce process with mutex, to avoid races on
|
|
|
|
* simultaneous disconnect.
|
|
|
|
*/
|
|
|
|
mtx_lock(&ng_topo_mtx);
|
|
|
|
|
|
|
|
hook->hk_flags |= HK_INVALID;
|
|
|
|
|
|
|
|
peer = NG_HOOK_PEER(hook);
|
|
|
|
node = NG_HOOK_NODE(hook);
|
|
|
|
|
2001-01-11 22:22:52 +00:00
|
|
|
if (peer && (peer != &ng_deadhook)) {
|
|
|
|
/*
|
|
|
|
* Set the peer to point to ng_deadhook
|
|
|
|
* from this moment on we are effectively independent it.
|
|
|
|
* send it an rmhook message of it's own.
|
|
|
|
*/
|
|
|
|
peer->hk_peer = &ng_deadhook; /* They no longer know us */
|
|
|
|
hook->hk_peer = &ng_deadhook; /* Nor us, them */
|
2001-01-14 23:25:54 +00:00
|
|
|
if (NG_HOOK_NODE(peer) == &ng_deadnode) {
|
2005-11-15 10:54:20 +00:00
|
|
|
/*
|
2001-01-14 23:25:54 +00:00
|
|
|
* If it's already divorced from a node,
|
|
|
|
* just free it.
|
|
|
|
*/
|
2005-11-02 15:23:47 +00:00
|
|
|
mtx_unlock(&ng_topo_mtx);
|
2001-01-14 23:25:54 +00:00
|
|
|
} else {
|
2005-11-02 15:23:47 +00:00
|
|
|
mtx_unlock(&ng_topo_mtx);
|
2001-01-14 23:25:54 +00:00
|
|
|
ng_rmhook_self(peer); /* Send it a surprise */
|
|
|
|
}
|
2001-01-11 23:05:34 +00:00
|
|
|
NG_HOOK_UNREF(peer); /* account for peer link */
|
|
|
|
NG_HOOK_UNREF(hook); /* account for peer link */
|
2005-11-02 15:23:47 +00:00
|
|
|
} else
|
|
|
|
mtx_unlock(&ng_topo_mtx);
|
|
|
|
|
|
|
|
mtx_assert(&ng_topo_mtx, MA_NOTOWNED);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the hook from the node's list to avoid possible recursion
|
|
|
|
* in case the disconnection results in node shutdown.
|
|
|
|
*/
|
2001-01-14 23:25:54 +00:00
|
|
|
if (node == &ng_deadnode) { /* happens if called from ng_con_nodes() */
|
|
|
|
return;
|
|
|
|
}
|
2001-01-08 05:34:06 +00:00
|
|
|
LIST_REMOVE(hook, hk_hooks);
|
|
|
|
node->nd_numhooks--;
|
|
|
|
if (node->nd_type->disconnect) {
|
1999-10-21 09:06:11 +00:00
|
|
|
/*
|
2001-01-14 23:25:54 +00:00
|
|
|
* The type handler may elect to destroy the node so don't
|
2007-03-18 16:49:50 +00:00
|
|
|
* trust its existence after this point. (except
|
2001-01-14 23:25:54 +00:00
|
|
|
* that we still hold a reference on it. (which we
|
|
|
|
* inherrited from the hook we are destroying)
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
2001-01-08 05:34:06 +00:00
|
|
|
(*node->nd_type->disconnect) (hook);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2001-01-14 23:25:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that because we will point to ng_deadnode, the original node
|
|
|
|
* is not decremented automatically so we do that manually.
|
|
|
|
*/
|
|
|
|
_NG_HOOK_NODE(hook) = &ng_deadnode;
|
|
|
|
NG_NODE_UNREF(node); /* We no longer point to it so adjust count */
|
|
|
|
NG_HOOK_UNREF(hook); /* Account for linkage (in list) to node */
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Take two hooks on a node and merge the connection so that the given node
|
|
|
|
* is effectively bypassed.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ng_bypass(hook_p hook1, hook_p hook2)
|
|
|
|
{
|
2001-01-08 05:34:06 +00:00
|
|
|
if (hook1->hk_node != hook2->hk_node) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (EINVAL);
|
2001-01-08 05:34:06 +00:00
|
|
|
}
|
|
|
|
hook1->hk_peer->hk_peer = hook2->hk_peer;
|
|
|
|
hook2->hk_peer->hk_peer = hook1->hk_peer;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-11 22:22:52 +00:00
|
|
|
hook1->hk_peer = &ng_deadhook;
|
|
|
|
hook2->hk_peer = &ng_deadhook;
|
|
|
|
|
2006-10-11 14:33:08 +00:00
|
|
|
NG_HOOK_UNREF(hook1);
|
|
|
|
NG_HOOK_UNREF(hook2);
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/* XXX If we ever cache methods on hooks update them as well */
|
|
|
|
ng_destroy_hook(hook1);
|
|
|
|
ng_destroy_hook(hook2);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install a new netgraph type
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ng_newtype(struct ng_type *tp)
|
|
|
|
{
|
|
|
|
const size_t namelen = strlen(tp->name);
|
|
|
|
|
|
|
|
/* Check version and type name fields */
|
2000-12-18 20:03:32 +00:00
|
|
|
if ((tp->version != NG_ABI_VERSION)
|
|
|
|
|| (namelen == 0)
|
2004-01-26 14:05:31 +00:00
|
|
|
|| (namelen >= NG_TYPESIZ)) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2004-06-30 22:51:29 +00:00
|
|
|
if (tp->version != NG_ABI_VERSION) {
|
|
|
|
printf("Netgraph: Node type rejected. ABI mismatch. Suggest recompile\n");
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for name collision */
|
|
|
|
if (ng_findtype(tp->name) != NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (EEXIST);
|
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
|
|
|
|
/* Link in new type */
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_typelist_mtx);
|
2001-01-06 00:46:47 +00:00
|
|
|
LIST_INSERT_HEAD(&ng_typelist, tp, types);
|
2001-01-24 21:29:57 +00:00
|
|
|
tp->refs = 1; /* first ref is linked list */
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_typelist_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2001-07-23 21:14:57 +00:00
|
|
|
/*
|
|
|
|
* unlink a netgraph type
|
|
|
|
* If no examples exist
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ng_rmtype(struct ng_type *tp)
|
|
|
|
{
|
|
|
|
/* Check for name collision */
|
|
|
|
if (tp->refs != 1) {
|
|
|
|
TRAP_ERROR();
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unlink type */
|
|
|
|
mtx_lock(&ng_typelist_mtx);
|
|
|
|
LIST_REMOVE(tp, types);
|
|
|
|
mtx_unlock(&ng_typelist_mtx);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/*
|
|
|
|
* Look for a type of the name given
|
|
|
|
*/
|
|
|
|
struct ng_type *
|
|
|
|
ng_findtype(const char *typename)
|
|
|
|
{
|
|
|
|
struct ng_type *type;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_typelist_mtx);
|
2001-01-06 00:46:47 +00:00
|
|
|
LIST_FOREACH(type, &ng_typelist, types) {
|
1999-10-21 09:06:11 +00:00
|
|
|
if (strcmp(type->name, typename) == 0)
|
|
|
|
break;
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_typelist_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
return (type);
|
|
|
|
}
|
|
|
|
|
|
|
|
/************************************************************************
|
|
|
|
Composite routines
|
|
|
|
************************************************************************/
|
|
|
|
/*
|
2001-01-14 23:25:54 +00:00
|
|
|
* Connect two nodes using the specified hooks, using queued functions.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
2007-10-19 15:04:17 +00:00
|
|
|
static int
|
|
|
|
ng_con_part3(node_p node, item_p item, hook_p hook)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
2007-10-19 15:04:17 +00:00
|
|
|
int error = 0;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-14 23:25:54 +00:00
|
|
|
/*
|
|
|
|
* When we run, we know that the node 'node' is locked for us.
|
|
|
|
* Our caller has a reference on the hook.
|
|
|
|
* Our caller has a reference on the node.
|
|
|
|
* (In this case our caller is ng_apply_item() ).
|
|
|
|
* The peer hook has a reference on the hook.
|
2001-01-30 20:51:52 +00:00
|
|
|
* We are all set up except for the final call to the node, and
|
|
|
|
* the clearing of the INVALID flag.
|
2001-01-14 23:25:54 +00:00
|
|
|
*/
|
|
|
|
if (NG_HOOK_NODE(hook) == &ng_deadnode) {
|
|
|
|
/*
|
|
|
|
* The node must have been freed again since we last visited
|
|
|
|
* here. ng_destry_hook() has this effect but nothing else does.
|
|
|
|
* We should just release our references and
|
|
|
|
* free anything we can think of.
|
|
|
|
* Since we know it's been destroyed, and it's our caller
|
|
|
|
* that holds the references, just return.
|
|
|
|
*/
|
2007-10-19 15:04:17 +00:00
|
|
|
ERROUT(ENOENT);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2001-01-14 23:25:54 +00:00
|
|
|
if (hook->hk_node->nd_type->connect) {
|
2007-10-19 15:04:17 +00:00
|
|
|
if ((error = (*hook->hk_node->nd_type->connect) (hook))) {
|
2001-01-14 23:25:54 +00:00
|
|
|
ng_destroy_hook(hook); /* also zaps peer */
|
2001-01-30 20:51:52 +00:00
|
|
|
printf("failed in ng_con_part3()\n");
|
2007-10-19 15:04:17 +00:00
|
|
|
ERROUT(error);
|
2001-01-14 23:25:54 +00:00
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2001-01-14 23:25:54 +00:00
|
|
|
/*
|
|
|
|
* XXX this is wrong for SMP. Possibly we need
|
|
|
|
* to separate out 'create' and 'invalid' flags.
|
|
|
|
* should only set flags on hooks we have locked under our node.
|
|
|
|
*/
|
|
|
|
hook->hk_flags &= ~HK_INVALID;
|
2007-10-19 15:04:17 +00:00
|
|
|
done:
|
|
|
|
NG_FREE_ITEM(item);
|
|
|
|
return (error);
|
2001-01-14 23:25:54 +00:00
|
|
|
}
|
|
|
|
|
2007-10-19 15:04:17 +00:00
|
|
|
static int
|
|
|
|
ng_con_part2(node_p node, item_p item, hook_p hook)
|
2001-01-14 23:25:54 +00:00
|
|
|
{
|
2007-10-19 15:04:17 +00:00
|
|
|
hook_p peer;
|
|
|
|
int error = 0;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/*
|
2001-01-14 23:25:54 +00:00
|
|
|
* When we run, we know that the node 'node' is locked for us.
|
|
|
|
* Our caller has a reference on the hook.
|
|
|
|
* Our caller has a reference on the node.
|
|
|
|
* (In this case our caller is ng_apply_item() ).
|
|
|
|
* The peer hook has a reference on the hook.
|
|
|
|
* our node pointer points to the 'dead' node.
|
|
|
|
* First check the hook name is unique.
|
2001-01-30 20:51:52 +00:00
|
|
|
* Should not happen because we checked before queueing this.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
2001-01-14 23:25:54 +00:00
|
|
|
if (ng_findhook(node, NG_HOOK_NAME(hook)) != NULL) {
|
|
|
|
TRAP_ERROR();
|
|
|
|
ng_destroy_hook(hook); /* should destroy peer too */
|
2001-01-30 20:51:52 +00:00
|
|
|
printf("failed in ng_con_part2()\n");
|
2007-10-19 15:04:17 +00:00
|
|
|
ERROUT(EEXIST);
|
2001-01-14 23:25:54 +00:00
|
|
|
}
|
2001-01-11 22:22:52 +00:00
|
|
|
/*
|
2001-01-14 23:25:54 +00:00
|
|
|
* Check if the node type code has something to say about it
|
|
|
|
* If it fails, the unref of the hook will also unref the attached node,
|
|
|
|
* however since that node is 'ng_deadnode' this will do nothing.
|
|
|
|
* The peer hook will also be destroyed.
|
2001-01-11 22:22:52 +00:00
|
|
|
*/
|
2001-01-14 23:25:54 +00:00
|
|
|
if (node->nd_type->newhook != NULL) {
|
2007-10-19 15:04:17 +00:00
|
|
|
if ((error = (*node->nd_type->newhook)(node, hook,
|
|
|
|
hook->hk_name))) {
|
2001-01-14 23:25:54 +00:00
|
|
|
ng_destroy_hook(hook); /* should destroy peer too */
|
2001-01-30 20:51:52 +00:00
|
|
|
printf("failed in ng_con_part2()\n");
|
2007-10-19 15:04:17 +00:00
|
|
|
ERROUT(error);
|
2001-01-14 23:25:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The 'type' agrees so far, so go ahead and link it in.
|
|
|
|
* We'll ask again later when we actually connect the hooks.
|
|
|
|
*/
|
|
|
|
hook->hk_node = node; /* just overwrite ng_deadnode */
|
|
|
|
NG_NODE_REF(node); /* each hook counts as a reference */
|
|
|
|
LIST_INSERT_HEAD(&node->nd_hooks, hook, hk_hooks);
|
|
|
|
node->nd_numhooks++;
|
|
|
|
NG_HOOK_REF(hook); /* one for the node */
|
|
|
|
|
|
|
|
/*
|
2007-03-18 16:49:50 +00:00
|
|
|
* We now have a symmetrical situation, where both hooks have been
|
2001-03-10 16:31:00 +00:00
|
|
|
* linked to their nodes, the newhook methods have been called
|
2001-01-14 23:25:54 +00:00
|
|
|
* And the references are all correct. The hooks are still marked
|
|
|
|
* as invalid, as we have not called the 'connect' methods
|
|
|
|
* yet.
|
2007-03-18 16:49:50 +00:00
|
|
|
* We can call the local one immediately as we have the
|
2001-01-14 23:25:54 +00:00
|
|
|
* node locked, but we need to queue the remote one.
|
|
|
|
*/
|
|
|
|
if (hook->hk_node->nd_type->connect) {
|
2007-10-19 15:04:17 +00:00
|
|
|
if ((error = (*hook->hk_node->nd_type->connect) (hook))) {
|
2001-01-14 23:25:54 +00:00
|
|
|
ng_destroy_hook(hook); /* also zaps peer */
|
2001-01-30 20:51:52 +00:00
|
|
|
printf("failed in ng_con_part2(A)\n");
|
2007-10-19 15:04:17 +00:00
|
|
|
ERROUT(error);
|
2001-01-14 23:25:54 +00:00
|
|
|
}
|
|
|
|
}
|
2005-11-02 15:23:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Acquire topo mutex to avoid race with ng_destroy_hook().
|
|
|
|
*/
|
|
|
|
mtx_lock(&ng_topo_mtx);
|
|
|
|
peer = hook->hk_peer;
|
|
|
|
if (peer == &ng_deadhook) {
|
|
|
|
mtx_unlock(&ng_topo_mtx);
|
|
|
|
printf("failed in ng_con_part2(B)\n");
|
|
|
|
ng_destroy_hook(hook);
|
2007-10-19 15:04:17 +00:00
|
|
|
ERROUT(ENOENT);
|
2005-11-02 15:23:47 +00:00
|
|
|
}
|
|
|
|
mtx_unlock(&ng_topo_mtx);
|
|
|
|
|
2007-11-14 11:25:58 +00:00
|
|
|
if ((error = ng_send_fn2(peer->hk_node, peer, item, &ng_con_part3,
|
|
|
|
NULL, 0, NG_REUSE_ITEM))) {
|
2005-11-02 15:23:47 +00:00
|
|
|
printf("failed in ng_con_part2(C)\n");
|
2001-01-30 20:51:52 +00:00
|
|
|
ng_destroy_hook(hook); /* also zaps peer */
|
2007-10-19 15:04:17 +00:00
|
|
|
return (error); /* item was consumed. */
|
2001-01-30 20:51:52 +00:00
|
|
|
}
|
2001-01-14 23:25:54 +00:00
|
|
|
hook->hk_flags &= ~HK_INVALID; /* need both to be able to work */
|
2007-10-19 15:04:17 +00:00
|
|
|
return (0); /* item was consumed. */
|
|
|
|
done:
|
|
|
|
NG_FREE_ITEM(item);
|
|
|
|
return (error);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-11-15 10:54:20 +00:00
|
|
|
* Connect this node with another node. We assume that this node is
|
2001-01-14 23:25:54 +00:00
|
|
|
* currently locked, as we are only called from an NGM_CONNECT message.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
2001-01-14 23:25:54 +00:00
|
|
|
static int
|
2007-10-19 15:04:17 +00:00
|
|
|
ng_con_nodes(item_p item, node_p node, const char *name,
|
|
|
|
node_p node2, const char *name2)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
2005-11-15 10:54:20 +00:00
|
|
|
int error;
|
|
|
|
hook_p hook;
|
|
|
|
hook_p hook2;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-30 20:51:52 +00:00
|
|
|
if (ng_findhook(node2, name2) != NULL) {
|
|
|
|
return(EEXIST);
|
|
|
|
}
|
2001-01-11 22:22:52 +00:00
|
|
|
if ((error = ng_add_hook(node, name, &hook))) /* gives us a ref */
|
1999-10-21 09:06:11 +00:00
|
|
|
return (error);
|
2001-01-14 23:25:54 +00:00
|
|
|
/* Allocate the other hook and link it up */
|
|
|
|
NG_ALLOC_HOOK(hook2);
|
2005-01-24 13:32:19 +00:00
|
|
|
if (hook2 == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
|
|
|
ng_destroy_hook(hook); /* XXX check ref counts so far */
|
|
|
|
NG_HOOK_UNREF(hook); /* including our ref */
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
hook2->hk_refs = 1; /* start with a reference for us. */
|
|
|
|
hook2->hk_flags = HK_INVALID;
|
|
|
|
hook2->hk_peer = hook; /* Link the two together */
|
|
|
|
hook->hk_peer = hook2;
|
|
|
|
NG_HOOK_REF(hook); /* Add a ref for the peer to each*/
|
|
|
|
NG_HOOK_REF(hook2);
|
2005-11-15 10:54:20 +00:00
|
|
|
hook2->hk_node = &ng_deadnode;
|
2004-01-26 14:05:31 +00:00
|
|
|
strlcpy(NG_HOOK_NAME(hook2), name2, NG_HOOKSIZ);
|
2001-01-14 23:25:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Queue the function above.
|
|
|
|
* Procesing continues in that function in the lock context of
|
|
|
|
* the other node.
|
|
|
|
*/
|
2007-11-14 11:25:58 +00:00
|
|
|
if ((error = ng_send_fn2(node2, hook2, item, &ng_con_part2, NULL, 0,
|
|
|
|
NG_NOFLAGS))) {
|
2007-08-18 11:59:17 +00:00
|
|
|
printf("failed in ng_con_nodes(): %d\n", error);
|
|
|
|
ng_destroy_hook(hook); /* also zaps peer */
|
|
|
|
}
|
2001-01-14 23:25:54 +00:00
|
|
|
|
|
|
|
NG_HOOK_UNREF(hook); /* Let each hook go if it wants to */
|
|
|
|
NG_HOOK_UNREF(hook2);
|
2007-08-18 11:59:17 +00:00
|
|
|
return (error);
|
2001-01-14 23:25:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a peer and connect.
|
|
|
|
* We assume that the local node is locked.
|
|
|
|
* The new node probably doesn't need a lock until
|
|
|
|
* it has a hook, because it cannot really have any work until then,
|
|
|
|
* but we should think about it a bit more.
|
|
|
|
*
|
|
|
|
* The problem may come if the other node also fires up
|
|
|
|
* some hardware or a timer or some other source of activation,
|
|
|
|
* also it may already get a command msg via it's ID.
|
|
|
|
*
|
|
|
|
* We could use the same method as ng_con_nodes() but we'd have
|
2005-11-15 10:54:20 +00:00
|
|
|
* to add ability to remove the node when failing. (Not hard, just
|
2001-01-14 23:25:54 +00:00
|
|
|
* make arg1 point to the node to remove).
|
|
|
|
* Unless of course we just ignore failure to connect and leave
|
|
|
|
* an unconnected node?
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ng_mkpeer(node_p node, const char *name, const char *name2, char *type)
|
|
|
|
{
|
2005-11-15 10:54:20 +00:00
|
|
|
node_p node2;
|
|
|
|
hook_p hook1, hook2;
|
|
|
|
int error;
|
2001-01-14 23:25:54 +00:00
|
|
|
|
|
|
|
if ((error = ng_make_node(type, &node2))) {
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((error = ng_add_hook(node, name, &hook1))) { /* gives us a ref */
|
2001-01-30 20:51:52 +00:00
|
|
|
ng_rmnode(node2, NULL, NULL, 0);
|
2001-01-14 23:25:54 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((error = ng_add_hook(node2, name2, &hook2))) {
|
2001-01-30 20:51:52 +00:00
|
|
|
ng_rmnode(node2, NULL, NULL, 0);
|
2001-01-14 23:25:54 +00:00
|
|
|
ng_destroy_hook(hook1);
|
|
|
|
NG_HOOK_UNREF(hook1);
|
1999-10-21 09:06:11 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2001-01-14 23:25:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Actually link the two hooks together.
|
|
|
|
*/
|
|
|
|
hook1->hk_peer = hook2;
|
|
|
|
hook2->hk_peer = hook1;
|
|
|
|
|
|
|
|
/* Each hook is referenced by the other */
|
|
|
|
NG_HOOK_REF(hook1);
|
|
|
|
NG_HOOK_REF(hook2);
|
|
|
|
|
|
|
|
/* Give each node the opportunity to veto the pending connection */
|
|
|
|
if (hook1->hk_node->nd_type->connect) {
|
|
|
|
error = (*hook1->hk_node->nd_type->connect) (hook1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((error == 0) && hook2->hk_node->nd_type->connect) {
|
|
|
|
error = (*hook2->hk_node->nd_type->connect) (hook2);
|
|
|
|
|
|
|
|
}
|
2001-01-11 22:22:52 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* drop the references we were holding on the two hooks.
|
|
|
|
*/
|
2001-01-14 23:25:54 +00:00
|
|
|
if (error) {
|
|
|
|
ng_destroy_hook(hook2); /* also zaps hook1 */
|
2001-01-30 20:51:52 +00:00
|
|
|
ng_rmnode(node2, NULL, NULL, 0);
|
2001-01-14 23:25:54 +00:00
|
|
|
} else {
|
|
|
|
/* As a last act, allow the hooks to be used */
|
|
|
|
hook1->hk_flags &= ~HK_INVALID;
|
|
|
|
hook2->hk_flags &= ~HK_INVALID;
|
|
|
|
}
|
|
|
|
NG_HOOK_UNREF(hook1);
|
2001-01-11 22:22:52 +00:00
|
|
|
NG_HOOK_UNREF(hook2);
|
|
|
|
return (error);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2001-01-14 23:25:54 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/************************************************************************
|
|
|
|
Utility routines to send self messages
|
|
|
|
************************************************************************/
|
|
|
|
|
2001-01-30 20:51:52 +00:00
|
|
|
/* Shut this node down as soon as everyone is clear of it */
|
2007-03-18 16:49:50 +00:00
|
|
|
/* Should add arg "immediately" to jump the queue */
|
2001-01-06 00:46:47 +00:00
|
|
|
int
|
2008-12-13 22:26:24 +00:00
|
|
|
ng_rmnode_self(node_p node)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
2001-01-30 20:51:52 +00:00
|
|
|
int error;
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2001-01-30 20:51:52 +00:00
|
|
|
if (node == &ng_deadnode)
|
|
|
|
return (0);
|
2004-07-20 17:15:38 +00:00
|
|
|
node->nd_flags |= NGF_INVALID;
|
|
|
|
if (node->nd_flags & NGF_CLOSING)
|
2001-01-30 20:51:52 +00:00
|
|
|
return (0);
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2008-12-13 22:26:24 +00:00
|
|
|
error = ng_send_fn(node, NULL, &ng_rmnode, NULL, 0);
|
2001-01-30 20:51:52 +00:00
|
|
|
return (error);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
2001-01-30 20:51:52 +00:00
|
|
|
static void
|
2001-01-14 23:25:54 +00:00
|
|
|
ng_rmhook_part2(node_p node, hook_p hook, void *arg1, int arg2)
|
|
|
|
{
|
|
|
|
ng_destroy_hook(hook);
|
2001-01-30 20:51:52 +00:00
|
|
|
return ;
|
2001-01-14 23:25:54 +00:00
|
|
|
}
|
|
|
|
|
2001-01-11 19:27:54 +00:00
|
|
|
int
|
|
|
|
ng_rmhook_self(hook_p hook)
|
|
|
|
{
|
2001-01-14 23:25:54 +00:00
|
|
|
int error;
|
2001-01-11 19:27:54 +00:00
|
|
|
node_p node = NG_HOOK_NODE(hook);
|
|
|
|
|
2001-01-14 23:25:54 +00:00
|
|
|
if (node == &ng_deadnode)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
error = ng_send_fn(node, hook, &ng_rmhook_part2, NULL, 0);
|
|
|
|
return (error);
|
2001-01-11 19:27:54 +00:00
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/***********************************************************************
|
1999-10-21 09:06:11 +00:00
|
|
|
* Parse and verify a string of the form: <NODE:><PATH>
|
|
|
|
*
|
|
|
|
* Such a string can refer to a specific node or a specific hook
|
|
|
|
* on a specific node, depending on how you look at it. In the
|
|
|
|
* latter case, the PATH component must not end in a dot.
|
|
|
|
*
|
|
|
|
* Both <NODE:> and <PATH> are optional. The <PATH> is a string
|
|
|
|
* of hook names separated by dots. This breaks out the original
|
|
|
|
* string, setting *nodep to "NODE" (or NULL if none) and *pathp
|
|
|
|
* to "PATH" (or NULL if degenerate). Also, *hookp will point to
|
|
|
|
* the final hook component of <PATH>, if any, otherwise NULL.
|
|
|
|
*
|
|
|
|
* This returns -1 if the path is malformed. The char ** are optional.
|
2001-01-06 00:46:47 +00:00
|
|
|
***********************************************************************/
|
1999-10-21 09:06:11 +00:00
|
|
|
int
|
|
|
|
ng_path_parse(char *addr, char **nodep, char **pathp, char **hookp)
|
|
|
|
{
|
2005-11-15 10:54:20 +00:00
|
|
|
char *node, *path, *hook;
|
|
|
|
int k;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract absolute NODE, if any
|
|
|
|
*/
|
|
|
|
for (path = addr; *path && *path != ':'; path++);
|
|
|
|
if (*path) {
|
|
|
|
node = addr; /* Here's the NODE */
|
|
|
|
*path++ = '\0'; /* Here's the PATH */
|
|
|
|
|
|
|
|
/* Node name must not be empty */
|
|
|
|
if (!*node)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* A name of "." is OK; otherwise '.' not allowed */
|
|
|
|
if (strcmp(node, ".") != 0) {
|
|
|
|
for (k = 0; node[k]; k++)
|
|
|
|
if (node[k] == '.')
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
node = NULL; /* No absolute NODE */
|
|
|
|
path = addr; /* Here's the PATH */
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Snoop for illegal characters in PATH */
|
|
|
|
for (k = 0; path[k]; k++)
|
|
|
|
if (path[k] == ':')
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Check for no repeated dots in PATH */
|
|
|
|
for (k = 0; path[k]; k++)
|
|
|
|
if (path[k] == '.' && path[k + 1] == '.')
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Remove extra (degenerate) dots from beginning or end of PATH */
|
|
|
|
if (path[0] == '.')
|
|
|
|
path++;
|
|
|
|
if (*path && path[strlen(path) - 1] == '.')
|
|
|
|
path[strlen(path) - 1] = 0;
|
|
|
|
|
|
|
|
/* If PATH has a dot, then we're not talking about a hook */
|
|
|
|
if (*path) {
|
|
|
|
for (hook = path, k = 0; path[k]; k++)
|
|
|
|
if (path[k] == '.') {
|
|
|
|
hook = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
path = hook = NULL;
|
|
|
|
|
|
|
|
/* Done */
|
|
|
|
if (nodep)
|
|
|
|
*nodep = node;
|
|
|
|
if (pathp)
|
|
|
|
*pathp = path;
|
|
|
|
if (hookp)
|
|
|
|
*hookp = hook;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a path, which may be absolute or relative, and a starting node,
|
2001-01-06 00:46:47 +00:00
|
|
|
* return the destination node.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-01-06 00:46:47 +00:00
|
|
|
ng_path2noderef(node_p here, const char *address,
|
|
|
|
node_p *destp, hook_p *lasthook)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
2004-01-26 14:05:31 +00:00
|
|
|
char fullpath[NG_PATHSIZ];
|
1999-10-21 09:06:11 +00:00
|
|
|
char *nodename, *path, pbuf[2];
|
2001-01-06 00:46:47 +00:00
|
|
|
node_p node, oldnode;
|
1999-10-21 09:06:11 +00:00
|
|
|
char *cp;
|
2000-04-28 17:09:00 +00:00
|
|
|
hook_p hook = NULL;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Initialize */
|
2001-01-08 05:34:06 +00:00
|
|
|
if (destp == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return EINVAL;
|
2001-01-08 05:34:06 +00:00
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
*destp = NULL;
|
|
|
|
|
|
|
|
/* Make a writable copy of address for ng_path_parse() */
|
|
|
|
strncpy(fullpath, address, sizeof(fullpath) - 1);
|
|
|
|
fullpath[sizeof(fullpath) - 1] = '\0';
|
|
|
|
|
|
|
|
/* Parse out node and sequence of hooks */
|
|
|
|
if (ng_path_parse(fullpath, &nodename, &path, NULL) < 0) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return EINVAL;
|
|
|
|
}
|
|
|
|
if (path == NULL) {
|
|
|
|
pbuf[0] = '.'; /* Needs to be writable */
|
|
|
|
pbuf[1] = '\0';
|
|
|
|
path = pbuf;
|
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* For an absolute address, jump to the starting node.
|
|
|
|
* Note that this holds a reference on the node for us.
|
|
|
|
* Don't forget to drop the reference if we don't need it.
|
|
|
|
*/
|
1999-10-21 09:06:11 +00:00
|
|
|
if (nodename) {
|
2001-01-06 00:46:47 +00:00
|
|
|
node = ng_name2noderef(here, nodename);
|
1999-10-21 09:06:11 +00:00
|
|
|
if (node == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (ENOENT);
|
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
} else {
|
|
|
|
if (here == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-01-06 00:46:47 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
node = here;
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_REF(node);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
2005-11-15 10:54:20 +00:00
|
|
|
* Now follow the sequence of hooks
|
2001-01-06 00:46:47 +00:00
|
|
|
* XXX
|
|
|
|
* We actually cannot guarantee that the sequence
|
|
|
|
* is not being demolished as we crawl along it
|
|
|
|
* without extra-ordinary locking etc.
|
|
|
|
* So this is a bit dodgy to say the least.
|
|
|
|
* We can probably hold up some things by holding
|
|
|
|
* the nodelist mutex for the time of this
|
|
|
|
* crawl if we wanted.. At least that way we wouldn't have to
|
2007-03-18 16:49:50 +00:00
|
|
|
* worry about the nodes disappearing, but the hooks would still
|
2001-01-06 00:46:47 +00:00
|
|
|
* be a problem.
|
|
|
|
*/
|
1999-10-21 09:06:11 +00:00
|
|
|
for (cp = path; node != NULL && *cp != '\0'; ) {
|
|
|
|
char *segment;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Break out the next path segment. Replace the dot we just
|
|
|
|
* found with a NUL; "cp" points to the next segment (or the
|
|
|
|
* NUL at the end).
|
|
|
|
*/
|
|
|
|
for (segment = cp; *cp != '\0'; cp++) {
|
|
|
|
if (*cp == '.') {
|
|
|
|
*cp++ = '\0';
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Empty segment */
|
|
|
|
if (*segment == '\0')
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* We have a segment, so look for a hook by that name */
|
1999-12-03 21:17:30 +00:00
|
|
|
hook = ng_findhook(node, segment);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Can't get there from here... */
|
|
|
|
if (hook == NULL
|
2001-01-08 05:34:06 +00:00
|
|
|
|| NG_HOOK_PEER(hook) == NULL
|
|
|
|
|| NG_HOOK_NOT_VALID(hook)
|
|
|
|
|| NG_HOOK_NOT_VALID(NG_HOOK_PEER(hook))) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_UNREF(node);
|
2005-11-15 10:54:20 +00:00
|
|
|
#if 0
|
2001-01-08 05:34:06 +00:00
|
|
|
printf("hooknotvalid %s %s %d %d %d %d ",
|
|
|
|
path,
|
|
|
|
segment,
|
|
|
|
hook == NULL,
|
2005-11-15 10:54:20 +00:00
|
|
|
NG_HOOK_PEER(hook) == NULL,
|
|
|
|
NG_HOOK_NOT_VALID(hook),
|
|
|
|
NG_HOOK_NOT_VALID(NG_HOOK_PEER(hook)));
|
2001-01-08 05:34:06 +00:00
|
|
|
#endif
|
1999-10-21 09:06:11 +00:00
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
2005-11-15 10:54:20 +00:00
|
|
|
* Hop on over to the next node
|
2001-01-06 00:46:47 +00:00
|
|
|
* XXX
|
2005-11-15 10:54:20 +00:00
|
|
|
* Big race conditions here as hooks and nodes go away
|
2001-01-06 00:46:47 +00:00
|
|
|
* *** Idea.. store an ng_ID_t in each hook and use that
|
|
|
|
* instead of the direct hook in this crawl?
|
|
|
|
*/
|
|
|
|
oldnode = node;
|
2001-01-08 05:34:06 +00:00
|
|
|
if ((node = NG_PEER_NODE(hook)))
|
|
|
|
NG_NODE_REF(node); /* XXX RACE */
|
|
|
|
NG_NODE_UNREF(oldnode); /* XXX another race */
|
|
|
|
if (NG_NODE_NOT_VALID(node)) {
|
|
|
|
NG_NODE_UNREF(node); /* XXX more races */
|
2001-01-06 00:46:47 +00:00
|
|
|
node = NULL;
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If node somehow missing, fail here (probably this is not needed) */
|
|
|
|
if (node == NULL) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
return (ENXIO);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Done */
|
|
|
|
*destp = node;
|
2000-05-02 17:09:46 +00:00
|
|
|
if (lasthook != NULL)
|
2001-01-08 05:34:06 +00:00
|
|
|
*lasthook = (hook ? NG_HOOK_PEER(hook) : NULL);
|
1999-10-21 09:06:11 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/***************************************************************\
|
|
|
|
* Input queue handling.
|
|
|
|
* All activities are submitted to the node via the input queue
|
|
|
|
* which implements a multiple-reader/single-writer gate.
|
2007-03-18 16:49:50 +00:00
|
|
|
* Items which cannot be handled immediately are queued.
|
2001-01-06 00:46:47 +00:00
|
|
|
*
|
|
|
|
* read-write queue locking inline functions *
|
|
|
|
\***************************************************************/
|
|
|
|
|
2008-04-15 21:15:32 +00:00
|
|
|
static __inline void ng_queue_rw(node_p node, item_p item, int rw);
|
|
|
|
static __inline item_p ng_dequeue(node_p node, int *rw);
|
|
|
|
static __inline item_p ng_acquire_read(node_p node, item_p item);
|
|
|
|
static __inline item_p ng_acquire_write(node_p node, item_p item);
|
|
|
|
static __inline void ng_leave_read(node_p node);
|
|
|
|
static __inline void ng_leave_write(node_p node);
|
2001-01-06 00:46:47 +00:00
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/*
|
2001-01-06 00:46:47 +00:00
|
|
|
* Definition of the bits fields in the ng_queue flag word.
|
|
|
|
* Defined here rather than in netgraph.h because no-one should fiddle
|
|
|
|
* with them.
|
|
|
|
*
|
2001-02-01 20:51:23 +00:00
|
|
|
* The ordering here may be important! don't shuffle these.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
2001-01-06 00:46:47 +00:00
|
|
|
/*-
|
|
|
|
Safety Barrier--------+ (adjustable to suit taste) (not used yet)
|
|
|
|
|
|
|
|
|
V
|
|
|
|
+-------+-------+-------+-------+-------+-------+-------+-------+
|
2005-11-02 14:27:24 +00:00
|
|
|
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
|
|
|
|
| |A|c|t|i|v|e| |R|e|a|d|e|r| |C|o|u|n|t| | | | | | | | | |P|A|
|
|
|
|
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | |O|W|
|
2001-01-06 00:46:47 +00:00
|
|
|
+-------+-------+-------+-------+-------+-------+-------+-------+
|
2005-11-02 14:27:24 +00:00
|
|
|
\___________________________ ____________________________/ | |
|
|
|
|
V | |
|
|
|
|
[active reader count] | |
|
2001-01-06 00:46:47 +00:00
|
|
|
| |
|
2005-11-02 14:27:24 +00:00
|
|
|
Operation Pending -------------------------------+ |
|
2001-01-06 00:46:47 +00:00
|
|
|
|
|
2005-11-02 14:27:24 +00:00
|
|
|
Active Writer ---------------------------------------+
|
2001-02-01 20:51:23 +00:00
|
|
|
|
2008-04-09 19:03:19 +00:00
|
|
|
Node queue has such semantics:
|
|
|
|
- All flags modifications are atomic.
|
|
|
|
- Reader count can be incremented only if there is no writer or pending flags.
|
|
|
|
As soon as this can't be done with single operation, it is implemented with
|
|
|
|
spin loop and atomic_cmpset().
|
|
|
|
- Writer flag can be set only if there is no any bits set.
|
|
|
|
It is implemented with atomic_cmpset().
|
|
|
|
- Pending flag can be set any time, but to avoid collision on queue processing
|
|
|
|
all queue fields are protected by the mutex.
|
|
|
|
- Queue processing thread reads queue holding the mutex, but releases it while
|
|
|
|
processing. When queue is empty pending flag is removed.
|
2001-01-06 00:46:47 +00:00
|
|
|
*/
|
2008-04-09 19:03:19 +00:00
|
|
|
|
2005-11-02 14:27:24 +00:00
|
|
|
#define WRITER_ACTIVE 0x00000001
|
|
|
|
#define OP_PENDING 0x00000002
|
|
|
|
#define READER_INCREMENT 0x00000004
|
|
|
|
#define READER_MASK 0xfffffffc /* Not valid if WRITER_ACTIVE is set */
|
|
|
|
#define SAFETY_BARRIER 0x00100000 /* 128K items queued should be enough */
|
2001-02-01 20:51:23 +00:00
|
|
|
|
|
|
|
/* Defines of more elaborate states on the queue */
|
2005-11-02 14:27:24 +00:00
|
|
|
/* Mask of bits a new read cares about */
|
|
|
|
#define NGQ_RMASK (WRITER_ACTIVE|OP_PENDING)
|
2001-02-01 20:51:23 +00:00
|
|
|
|
2005-11-02 14:27:24 +00:00
|
|
|
/* Mask of bits a new write cares about */
|
2001-02-01 20:51:23 +00:00
|
|
|
#define NGQ_WMASK (NGQ_RMASK|READER_MASK)
|
|
|
|
|
2005-11-02 14:27:24 +00:00
|
|
|
/* Test to decide if there is something on the queue. */
|
|
|
|
#define QUEUE_ACTIVE(QP) ((QP)->q_flags & OP_PENDING)
|
|
|
|
|
|
|
|
/* How to decide what the next queued item is. */
|
2008-04-15 21:15:32 +00:00
|
|
|
#define HEAD_IS_READER(QP) NGI_QUEUED_READER(STAILQ_FIRST(&(QP)->queue))
|
|
|
|
#define HEAD_IS_WRITER(QP) NGI_QUEUED_WRITER(STAILQ_FIRST(&(QP)->queue)) /* notused */
|
2005-11-02 14:27:24 +00:00
|
|
|
|
|
|
|
/* Read the status to decide if the next item on the queue can now run. */
|
|
|
|
#define QUEUED_READER_CAN_PROCEED(QP) \
|
|
|
|
(((QP)->q_flags & (NGQ_RMASK & ~OP_PENDING)) == 0)
|
|
|
|
#define QUEUED_WRITER_CAN_PROCEED(QP) \
|
|
|
|
(((QP)->q_flags & (NGQ_WMASK & ~OP_PENDING)) == 0)
|
2001-02-01 20:51:23 +00:00
|
|
|
|
|
|
|
/* Is there a chance of getting ANY work off the queue? */
|
2005-11-02 14:27:24 +00:00
|
|
|
#define NEXT_QUEUED_ITEM_CAN_PROCEED(QP) \
|
|
|
|
((HEAD_IS_READER(QP)) ? QUEUED_READER_CAN_PROCEED(QP) : \
|
2008-04-06 15:26:32 +00:00
|
|
|
QUEUED_WRITER_CAN_PROCEED(QP))
|
2005-11-02 14:27:24 +00:00
|
|
|
|
2005-10-11 13:48:38 +00:00
|
|
|
#define NGQRW_R 0
|
|
|
|
#define NGQRW_W 1
|
|
|
|
|
2008-04-15 21:15:32 +00:00
|
|
|
#define NGQ2_WORKQ 0x00000001
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/*
|
2001-01-06 00:46:47 +00:00
|
|
|
* Taking into account the current state of the queue and node, possibly take
|
|
|
|
* the next entry off the queue and return it. Return NULL if there was
|
|
|
|
* nothing we could return, either because there really was nothing there, or
|
|
|
|
* because the node was in a state where it cannot yet process the next item
|
|
|
|
* on the queue.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
2001-01-06 00:46:47 +00:00
|
|
|
static __inline item_p
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_dequeue(node_p node, int *rw)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
2001-01-06 00:46:47 +00:00
|
|
|
item_p item;
|
2008-04-15 21:15:32 +00:00
|
|
|
struct ng_queue *ngq = &node->nd_input_queue;
|
2001-02-01 20:51:23 +00:00
|
|
|
|
2008-04-09 19:03:19 +00:00
|
|
|
/* This MUST be called with the mutex held. */
|
2004-12-19 14:58:13 +00:00
|
|
|
mtx_assert(&ngq->q_mtx, MA_OWNED);
|
2008-04-09 19:03:19 +00:00
|
|
|
|
|
|
|
/* If there is nothing queued, then just return. */
|
2005-11-02 14:27:24 +00:00
|
|
|
if (!QUEUE_ACTIVE(ngq)) {
|
2006-01-12 22:41:32 +00:00
|
|
|
CTR4(KTR_NET, "%20s: node [%x] (%p) queue empty; "
|
|
|
|
"queue flags 0x%lx", __func__,
|
2008-04-15 21:15:32 +00:00
|
|
|
node->nd_ID, node, ngq->q_flags);
|
2005-11-02 14:27:24 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
2004-12-19 14:58:13 +00:00
|
|
|
|
2005-11-02 14:27:24 +00:00
|
|
|
/*
|
|
|
|
* From here, we can assume there is a head item.
|
|
|
|
* We need to find out what it is and if it can be dequeued, given
|
|
|
|
* the current state of the node.
|
|
|
|
*/
|
|
|
|
if (HEAD_IS_READER(ngq)) {
|
2008-04-06 15:26:32 +00:00
|
|
|
while (1) {
|
|
|
|
long t = ngq->q_flags;
|
|
|
|
if (t & WRITER_ACTIVE) {
|
2008-04-09 19:03:19 +00:00
|
|
|
/* There is writer, reader can't proceed. */
|
2008-04-06 15:26:32 +00:00
|
|
|
CTR4(KTR_NET, "%20s: node [%x] (%p) queued reader "
|
|
|
|
"can't proceed; queue flags 0x%lx", __func__,
|
2008-04-15 21:15:32 +00:00
|
|
|
node->nd_ID, node, t);
|
2008-04-06 15:26:32 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
2008-04-15 21:15:32 +00:00
|
|
|
if (atomic_cmpset_acq_int(&ngq->q_flags, t,
|
2008-04-06 15:26:32 +00:00
|
|
|
t + READER_INCREMENT))
|
|
|
|
break;
|
|
|
|
cpu_spinwait();
|
2005-11-02 14:27:24 +00:00
|
|
|
}
|
2008-04-09 19:03:19 +00:00
|
|
|
/* We have got reader lock for the node. */
|
2005-10-11 13:48:38 +00:00
|
|
|
*rw = NGQRW_R;
|
2008-04-15 21:15:32 +00:00
|
|
|
} else if (atomic_cmpset_acq_int(&ngq->q_flags, OP_PENDING,
|
2008-04-06 15:26:32 +00:00
|
|
|
OP_PENDING + WRITER_ACTIVE)) {
|
2008-04-09 19:03:19 +00:00
|
|
|
/* We have got writer lock for the node. */
|
2005-10-11 13:48:38 +00:00
|
|
|
*rw = NGQRW_W;
|
2001-01-06 00:46:47 +00:00
|
|
|
} else {
|
2008-04-09 19:03:19 +00:00
|
|
|
/* There is somebody other, writer can't proceed. */
|
2008-04-06 15:26:32 +00:00
|
|
|
CTR4(KTR_NET, "%20s: node [%x] (%p) queued writer "
|
|
|
|
"can't proceed; queue flags 0x%lx", __func__,
|
2008-04-15 21:15:32 +00:00
|
|
|
node->nd_ID, node, ngq->q_flags);
|
2005-11-02 14:27:24 +00:00
|
|
|
return (NULL);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2000-12-12 18:52:14 +00:00
|
|
|
/*
|
2001-01-06 00:46:47 +00:00
|
|
|
* Now we dequeue the request (whatever it may be) and correct the
|
|
|
|
* pending flags and the next and last pointers.
|
2000-12-12 18:52:14 +00:00
|
|
|
*/
|
2008-04-15 21:15:32 +00:00
|
|
|
item = STAILQ_FIRST(&ngq->queue);
|
|
|
|
STAILQ_REMOVE_HEAD(&ngq->queue, el_next);
|
|
|
|
if (STAILQ_EMPTY(&ngq->queue))
|
|
|
|
atomic_clear_int(&ngq->q_flags, OP_PENDING);
|
2006-01-12 22:41:32 +00:00
|
|
|
CTR6(KTR_NET, "%20s: node [%x] (%p) returning item %p as %s; "
|
|
|
|
"queue flags 0x%lx", __func__,
|
2008-04-15 21:15:32 +00:00
|
|
|
node->nd_ID, node, item, *rw ? "WRITER" : "READER" ,
|
2006-01-11 15:29:48 +00:00
|
|
|
ngq->q_flags);
|
2001-01-06 00:46:47 +00:00
|
|
|
return (item);
|
|
|
|
}
|
2000-12-12 18:52:14 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
2008-04-09 19:03:19 +00:00
|
|
|
* Queue a packet to be picked up later by someone else.
|
|
|
|
* If the queue could be run now, add node to the queue handler's worklist.
|
2001-01-06 00:46:47 +00:00
|
|
|
*/
|
|
|
|
static __inline void
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_queue_rw(node_p node, item_p item, int rw)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
2008-04-15 21:15:32 +00:00
|
|
|
struct ng_queue *ngq = &node->nd_input_queue;
|
2005-11-02 14:27:24 +00:00
|
|
|
if (rw == NGQRW_W)
|
|
|
|
NGI_SET_WRITER(item);
|
|
|
|
else
|
|
|
|
NGI_SET_READER(item);
|
2008-04-06 15:26:32 +00:00
|
|
|
|
|
|
|
NG_QUEUE_LOCK(ngq);
|
|
|
|
/* Set OP_PENDING flag and enqueue the item. */
|
2008-04-15 21:15:32 +00:00
|
|
|
atomic_set_int(&ngq->q_flags, OP_PENDING);
|
|
|
|
STAILQ_INSERT_TAIL(&ngq->queue, item, el_next);
|
2008-04-06 15:26:32 +00:00
|
|
|
|
2006-01-12 22:41:32 +00:00
|
|
|
CTR5(KTR_NET, "%20s: node [%x] (%p) queued item %p as %s", __func__,
|
2008-04-15 21:15:32 +00:00
|
|
|
node->nd_ID, node, item, rw ? "WRITER" : "READER" );
|
2008-04-06 15:26:32 +00:00
|
|
|
|
2005-11-02 14:27:24 +00:00
|
|
|
/*
|
|
|
|
* We can take the worklist lock with the node locked
|
|
|
|
* BUT NOT THE REVERSE!
|
|
|
|
*/
|
|
|
|
if (NEXT_QUEUED_ITEM_CAN_PROCEED(ngq))
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_worklist_add(node);
|
2008-04-06 15:26:32 +00:00
|
|
|
NG_QUEUE_UNLOCK(ngq);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2008-04-09 19:03:19 +00:00
|
|
|
/* Acquire reader lock on node. If node is busy, queue the packet. */
|
2001-01-06 00:46:47 +00:00
|
|
|
static __inline item_p
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_acquire_read(node_p node, item_p item)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
2008-04-15 21:15:32 +00:00
|
|
|
KASSERT(node != &ng_deadnode,
|
2005-11-02 15:23:47 +00:00
|
|
|
("%s: working on deadnode", __func__));
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2008-04-06 15:26:32 +00:00
|
|
|
/* Reader needs node without writer and pending items. */
|
|
|
|
while (1) {
|
2008-04-15 21:15:32 +00:00
|
|
|
long t = node->nd_input_queue.q_flags;
|
2008-04-06 15:26:32 +00:00
|
|
|
if (t & NGQ_RMASK)
|
|
|
|
break; /* Node is not ready for reader. */
|
2008-04-15 21:15:32 +00:00
|
|
|
if (atomic_cmpset_acq_int(&node->nd_input_queue.q_flags,
|
|
|
|
t, t + READER_INCREMENT)) {
|
2008-04-06 15:26:32 +00:00
|
|
|
/* Successfully grabbed node */
|
|
|
|
CTR4(KTR_NET, "%20s: node [%x] (%p) acquired item %p",
|
2008-04-15 21:15:32 +00:00
|
|
|
__func__, node->nd_ID, node, item);
|
2008-04-06 15:26:32 +00:00
|
|
|
return (item);
|
|
|
|
}
|
|
|
|
cpu_spinwait();
|
|
|
|
};
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2008-04-06 15:26:32 +00:00
|
|
|
/* Queue the request for later. */
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_queue_rw(node, item, NGQRW_R);
|
Problem description:
At the end of ng_snd_item(), node queue is processed. In certain
netgraph setups deep recursive calls can occur.
For example this happens, when two nodes are connected and can send
items to each other in both directions. If, for some reason, both nodes
have a lot of items in their queues, then the processing thread will
recurse between these two nodes, delivering items left and right, going
deeper in the stack. Other setups can suffer from deep recursion, too.
The following factors can influence risk of deep netgraph call:
- periodical write-access events on node
- combination of slow link and fast one in one graph
- net.inet.ip.fastforwarding
Changes made:
- In ng_acquire_{read,write}() do not dequeue another item. Instead,
call ng_setisr() for this node.
- At the end of ng_snd_item(), do not process queue. Call ng_setisr(),
if there are any dequeueable items on node queue.
- In ng_setisr() narrow worklist mutex holding.
- In ng_setisr() assert queue mutex.
Theoretically, the first two changes should negatively affect performance.
To check this, some profiling was made:
1) In general real tasks, no noticable performance difference was found.
2) The following test was made: two multithreaded nodes and one
single-threaded were connected into a ring. A large queues of packets
were sent around this ring. Time to pass the ring N times was measured.
This is a very vacuous test: no items/mbufs are allocated, no upcalls or
downcalls outside of netgraph. It doesn't represent a real load, it is
a stress test for ng_acquire_{read,write}() and item queueing functions.
Surprisingly, the performance impact was positive! New code is 13% faster
on UP and 17% faster on SMP, in this particular test.
The problem was originally found, described, analyzed and original patch
was written by Roselyn Lee from Vernier Networks. Thanks!
Submitted by: Roselyn Lee <rosel verniernetworks com>
2005-07-21 12:08:37 +00:00
|
|
|
|
|
|
|
return (NULL);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
2008-04-09 19:03:19 +00:00
|
|
|
/* Acquire writer lock on node. If node is busy, queue the packet. */
|
2001-01-06 00:46:47 +00:00
|
|
|
static __inline item_p
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_acquire_write(node_p node, item_p item)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
2008-04-15 21:15:32 +00:00
|
|
|
KASSERT(node != &ng_deadnode,
|
2005-11-02 15:23:47 +00:00
|
|
|
("%s: working on deadnode", __func__));
|
|
|
|
|
2008-04-06 15:26:32 +00:00
|
|
|
/* Writer needs completely idle node. */
|
2008-04-15 21:15:32 +00:00
|
|
|
if (atomic_cmpset_acq_int(&node->nd_input_queue.q_flags,
|
|
|
|
0, WRITER_ACTIVE)) {
|
2008-04-06 15:26:32 +00:00
|
|
|
/* Successfully grabbed node */
|
2006-01-12 22:41:32 +00:00
|
|
|
CTR4(KTR_NET, "%20s: node [%x] (%p) acquired item %p",
|
2008-04-15 21:15:32 +00:00
|
|
|
__func__, node->nd_ID, node, item);
|
2001-01-06 00:46:47 +00:00
|
|
|
return (item);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
2008-04-06 15:26:32 +00:00
|
|
|
/* Queue the request for later. */
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_queue_rw(node, item, NGQRW_W);
|
Problem description:
At the end of ng_snd_item(), node queue is processed. In certain
netgraph setups deep recursive calls can occur.
For example this happens, when two nodes are connected and can send
items to each other in both directions. If, for some reason, both nodes
have a lot of items in their queues, then the processing thread will
recurse between these two nodes, delivering items left and right, going
deeper in the stack. Other setups can suffer from deep recursion, too.
The following factors can influence risk of deep netgraph call:
- periodical write-access events on node
- combination of slow link and fast one in one graph
- net.inet.ip.fastforwarding
Changes made:
- In ng_acquire_{read,write}() do not dequeue another item. Instead,
call ng_setisr() for this node.
- At the end of ng_snd_item(), do not process queue. Call ng_setisr(),
if there are any dequeueable items on node queue.
- In ng_setisr() narrow worklist mutex holding.
- In ng_setisr() assert queue mutex.
Theoretically, the first two changes should negatively affect performance.
To check this, some profiling was made:
1) In general real tasks, no noticable performance difference was found.
2) The following test was made: two multithreaded nodes and one
single-threaded were connected into a ring. A large queues of packets
were sent around this ring. Time to pass the ring N times was measured.
This is a very vacuous test: no items/mbufs are allocated, no upcalls or
downcalls outside of netgraph. It doesn't represent a real load, it is
a stress test for ng_acquire_{read,write}() and item queueing functions.
Surprisingly, the performance impact was positive! New code is 13% faster
on UP and 17% faster on SMP, in this particular test.
The problem was originally found, described, analyzed and original patch
was written by Roselyn Lee from Vernier Networks. Thanks!
Submitted by: Roselyn Lee <rosel verniernetworks com>
2005-07-21 12:08:37 +00:00
|
|
|
|
|
|
|
return (NULL);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
2007-03-09 21:04:50 +00:00
|
|
|
#if 0
|
|
|
|
static __inline item_p
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_upgrade_write(node_p node, item_p item)
|
2007-03-09 21:04:50 +00:00
|
|
|
{
|
2008-04-15 21:15:32 +00:00
|
|
|
struct ng_queue *ngq = &node->nd_input_queue;
|
|
|
|
KASSERT(node != &ng_deadnode,
|
2007-03-09 21:04:50 +00:00
|
|
|
("%s: working on deadnode", __func__));
|
|
|
|
|
|
|
|
NGI_SET_WRITER(item);
|
|
|
|
|
2008-04-15 21:15:32 +00:00
|
|
|
NG_QUEUE_LOCK(ngq);
|
2007-03-09 21:04:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There will never be no readers as we are there ourselves.
|
|
|
|
* Set the WRITER_ACTIVE flags ASAP to block out fast track readers.
|
|
|
|
* The caller we are running from will call ng_leave_read()
|
|
|
|
* soon, so we must account for that. We must leave again with the
|
|
|
|
* READER lock. If we find other readers, then
|
|
|
|
* queue the request for later. However "later" may be rignt now
|
|
|
|
* if there are no readers. We don't really care if there are queued
|
|
|
|
* items as we will bypass them anyhow.
|
|
|
|
*/
|
2008-04-15 21:15:32 +00:00
|
|
|
atomic_add_int(&ngq->q_flags, WRITER_ACTIVE - READER_INCREMENT);
|
|
|
|
if ((ngq->q_flags & (NGQ_WMASK & ~OP_PENDING)) == WRITER_ACTIVE) {
|
|
|
|
NG_QUEUE_UNLOCK(ngq);
|
2007-03-09 21:04:50 +00:00
|
|
|
|
|
|
|
/* It's just us, act on the item. */
|
|
|
|
/* will NOT drop writer lock when done */
|
|
|
|
ng_apply_item(node, item, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Having acted on the item, atomically
|
|
|
|
* down grade back to READER and finish up
|
|
|
|
*/
|
2008-04-15 21:15:32 +00:00
|
|
|
atomic_add_int(&ngq->q_flags,
|
2007-03-09 21:04:50 +00:00
|
|
|
READER_INCREMENT - WRITER_ACTIVE);
|
|
|
|
|
|
|
|
/* Our caller will call ng_leave_read() */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* It's not just us active, so queue us AT THE HEAD.
|
|
|
|
* "Why?" I hear you ask.
|
|
|
|
* Put us at the head of the queue as we've already been
|
|
|
|
* through it once. If there is nothing else waiting,
|
|
|
|
* set the correct flags.
|
|
|
|
*/
|
2008-04-15 21:15:32 +00:00
|
|
|
if (STAILQ_EMPTY(&ngq->queue)) {
|
2007-03-09 21:04:50 +00:00
|
|
|
/* We've gone from, 0 to 1 item in the queue */
|
2008-04-15 21:15:32 +00:00
|
|
|
atomic_set_int(&ngq->q_flags, OP_PENDING);
|
2007-03-09 21:04:50 +00:00
|
|
|
|
|
|
|
CTR3(KTR_NET, "%20s: node [%x] (%p) set OP_PENDING", __func__,
|
2008-04-15 21:15:32 +00:00
|
|
|
node->nd_ID, node);
|
2007-03-09 21:04:50 +00:00
|
|
|
};
|
2008-04-15 21:15:32 +00:00
|
|
|
STAILQ_INSERT_HEAD(&ngq->queue, item, el_next);
|
|
|
|
CTR4(KTR_NET, "%20s: node [%x] (%p) requeued item %p as WRITER",
|
|
|
|
__func__, node->nd_ID, node, item );
|
2007-03-09 21:04:50 +00:00
|
|
|
|
|
|
|
/* Reverse what we did above. That downgrades us back to reader */
|
2008-04-15 21:15:32 +00:00
|
|
|
atomic_add_int(&ngq->q_flags, READER_INCREMENT - WRITER_ACTIVE);
|
2008-04-06 15:26:32 +00:00
|
|
|
if (QUEUE_ACTIVE(ngq) && NEXT_QUEUED_ITEM_CAN_PROCEED(ngq))
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_worklist_add(node);
|
|
|
|
NG_QUEUE_UNLOCK(ngq);
|
2007-03-09 21:04:50 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-04-09 19:03:19 +00:00
|
|
|
/* Release reader lock. */
|
2001-01-06 00:46:47 +00:00
|
|
|
static __inline void
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_leave_read(node_p node)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
2008-04-15 21:15:32 +00:00
|
|
|
atomic_subtract_rel_int(&node->nd_input_queue.q_flags, READER_INCREMENT);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
2008-04-09 19:03:19 +00:00
|
|
|
/* Release writer lock. */
|
2001-01-06 00:46:47 +00:00
|
|
|
static __inline void
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_leave_write(node_p node)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
2008-04-15 21:15:32 +00:00
|
|
|
atomic_clear_rel_int(&node->nd_input_queue.q_flags, WRITER_ACTIVE);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
2008-04-09 19:03:19 +00:00
|
|
|
/* Purge node queue. Called on node shutdown. */
|
2001-01-06 00:46:47 +00:00
|
|
|
static void
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_flush_input_queue(node_p node)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
2008-04-15 21:15:32 +00:00
|
|
|
struct ng_queue *ngq = &node->nd_input_queue;
|
2001-01-06 00:46:47 +00:00
|
|
|
item_p item;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_QUEUE_LOCK(ngq);
|
2008-04-15 21:15:32 +00:00
|
|
|
while ((item = STAILQ_FIRST(&ngq->queue)) != NULL) {
|
|
|
|
STAILQ_REMOVE_HEAD(&ngq->queue, el_next);
|
|
|
|
if (STAILQ_EMPTY(&ngq->queue))
|
|
|
|
atomic_clear_int(&ngq->q_flags, OP_PENDING);
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_QUEUE_UNLOCK(ngq);
|
2005-11-02 14:27:24 +00:00
|
|
|
|
|
|
|
/* If the item is supplying a callback, call it with an error */
|
2008-03-11 21:58:48 +00:00
|
|
|
if (item->apply != NULL) {
|
|
|
|
if (item->depth == 1)
|
|
|
|
item->apply->error = ENOENT;
|
|
|
|
if (refcount_release(&item->apply->refs)) {
|
|
|
|
(*item->apply->apply)(item->apply->context,
|
|
|
|
item->apply->error);
|
|
|
|
}
|
2005-10-13 11:55:50 +00:00
|
|
|
}
|
2004-07-18 22:57:46 +00:00
|
|
|
NG_FREE_ITEM(item);
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_QUEUE_LOCK(ngq);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_QUEUE_UNLOCK(ngq);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* Externally visible method for sending or queueing messages or data.
|
|
|
|
***********************************************************************/
|
|
|
|
|
|
|
|
/*
|
2001-01-30 20:51:52 +00:00
|
|
|
* The module code should have filled out the item correctly by this stage:
|
2001-01-06 00:46:47 +00:00
|
|
|
* Common:
|
|
|
|
* reference to destination node.
|
|
|
|
* Reference to destination rcv hook if relevant.
|
2007-10-19 15:04:17 +00:00
|
|
|
* apply pointer must be or NULL or reference valid struct ng_apply_info.
|
2001-01-06 00:46:47 +00:00
|
|
|
* Data:
|
|
|
|
* pointer to mbuf
|
|
|
|
* Control_Message:
|
|
|
|
* pointer to msg.
|
|
|
|
* ID of original sender node. (return address)
|
2001-01-30 20:51:52 +00:00
|
|
|
* Function:
|
|
|
|
* Function pointer
|
|
|
|
* void * argument
|
|
|
|
* integer argument
|
2001-01-06 00:46:47 +00:00
|
|
|
*
|
|
|
|
* The nodes have several routines and macros to help with this task:
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
2005-05-16 17:07:03 +00:00
|
|
|
ng_snd_item(item_p item, int flags)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
2007-10-19 15:04:17 +00:00
|
|
|
hook_p hook;
|
|
|
|
node_p node;
|
2005-05-16 17:07:03 +00:00
|
|
|
int queue, rw;
|
2007-10-19 15:04:17 +00:00
|
|
|
struct ng_queue *ngq;
|
2005-10-12 10:18:44 +00:00
|
|
|
int error = 0;
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2008-02-06 18:50:40 +00:00
|
|
|
/* We are sending item, so it must be present! */
|
|
|
|
KASSERT(item != NULL, ("ng_snd_item: item is NULL"));
|
2007-10-19 15:04:17 +00:00
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
#ifdef NETGRAPH_DEBUG
|
2005-11-15 10:54:20 +00:00
|
|
|
_ngi_check(item, __FILE__, __LINE__);
|
2001-01-06 00:46:47 +00:00
|
|
|
#endif
|
|
|
|
|
2008-02-06 18:50:40 +00:00
|
|
|
/* Item was sent once more, postpone apply() call. */
|
2007-10-19 15:04:17 +00:00
|
|
|
if (item->apply)
|
|
|
|
refcount_acquire(&item->apply->refs);
|
2005-05-16 17:07:03 +00:00
|
|
|
|
2007-10-19 15:04:17 +00:00
|
|
|
node = NGI_NODE(item);
|
2008-02-06 18:50:40 +00:00
|
|
|
/* Node is never optional. */
|
|
|
|
KASSERT(node != NULL, ("ng_snd_item: node is NULL"));
|
2007-10-19 15:04:17 +00:00
|
|
|
|
2008-01-31 10:13:04 +00:00
|
|
|
hook = NGI_HOOK(item);
|
2008-02-06 18:50:40 +00:00
|
|
|
/* Valid hook and mbuf are mandatory for data. */
|
|
|
|
if ((item->el_flags & NGQF_TYPE) == NGQF_DATA) {
|
|
|
|
KASSERT(hook != NULL, ("ng_snd_item: hook for data is NULL"));
|
2004-06-26 01:22:29 +00:00
|
|
|
if (NGI_M(item) == NULL)
|
2007-10-19 15:04:17 +00:00
|
|
|
ERROUT(EINVAL);
|
2001-01-06 00:46:47 +00:00
|
|
|
CHECK_DATA_MBUF(NGI_M(item));
|
2005-08-26 15:14:33 +00:00
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
2008-02-06 18:50:40 +00:00
|
|
|
* If the item or the node specifies single threading, force
|
|
|
|
* writer semantics. Similarly, the node may say one hook always
|
|
|
|
* produces writers. These are overrides.
|
2001-01-06 00:46:47 +00:00
|
|
|
*/
|
2008-02-25 21:24:53 +00:00
|
|
|
if (((item->el_flags & NGQF_RW) == NGQF_WRITER) ||
|
2008-02-06 18:50:40 +00:00
|
|
|
(node->nd_flags & NGF_FORCE_WRITER) ||
|
|
|
|
(hook && (hook->hk_flags & HK_FORCE_WRITER))) {
|
|
|
|
rw = NGQRW_W;
|
|
|
|
} else {
|
|
|
|
rw = NGQRW_R;
|
|
|
|
}
|
2005-08-26 15:14:33 +00:00
|
|
|
|
2008-01-31 08:51:48 +00:00
|
|
|
/*
|
Introduce a mechanism for detecting calls from outbound path of the
network stack when reentering the inbound path from netgraph, and
force queueing of mbufs at the outbound netgraph node.
The mechanism relies on two components. First, in netgraph nodes
where outbound path of the network stack calls into netgraph, the
current thread has to be appropriately marked using the new
NG_OUTBOUND_THREAD_REF() macro before proceeding to call further
into the netgraph topology, and unmarked using the
NG_OUTBOUND_THREAD_UNREF() macro before returning to the caller.
Second, netgraph nodes which can potentially reenter the network
stack in the inbound path have to mark their inbound hooks using
NG_HOOK_SET_TO_INBOUND() macro. The netgraph framework will then
detect when there is a danger of a call graph looping back from
outbound to inbound path via netgraph, and defer handing off the
mbufs to the "inbound" node to a worker thread with a clean stack.
In this first pass only the most obvious netgraph nodes have been
updated to ensure no outbound to inbound calls can occur. Nodes
such as ng_ipfw, ng_gif etc. should be further examined whether a
potential for outbound to inbound call looping exists.
This commit changes the layout of struct thread, but due to
__FreeBSD_version number shortage a version bump has been omitted
at this time, nevertheless kernel and modules have to be rebuilt.
Reviewed by: julian, rwatson, bz
Approved by: julian (mentor)
2009-06-11 16:50:49 +00:00
|
|
|
* If sender or receiver requests queued delivery, or call graph
|
|
|
|
* loops back from outbound to inbound path, or stack usage
|
2008-01-31 08:51:48 +00:00
|
|
|
* level is dangerous - enqueue message.
|
|
|
|
*/
|
|
|
|
if ((flags & NG_QUEUE) || (hook && (hook->hk_flags & HK_QUEUE))) {
|
|
|
|
queue = 1;
|
Introduce a mechanism for detecting calls from outbound path of the
network stack when reentering the inbound path from netgraph, and
force queueing of mbufs at the outbound netgraph node.
The mechanism relies on two components. First, in netgraph nodes
where outbound path of the network stack calls into netgraph, the
current thread has to be appropriately marked using the new
NG_OUTBOUND_THREAD_REF() macro before proceeding to call further
into the netgraph topology, and unmarked using the
NG_OUTBOUND_THREAD_UNREF() macro before returning to the caller.
Second, netgraph nodes which can potentially reenter the network
stack in the inbound path have to mark their inbound hooks using
NG_HOOK_SET_TO_INBOUND() macro. The netgraph framework will then
detect when there is a danger of a call graph looping back from
outbound to inbound path via netgraph, and defer handing off the
mbufs to the "inbound" node to a worker thread with a clean stack.
In this first pass only the most obvious netgraph nodes have been
updated to ensure no outbound to inbound calls can occur. Nodes
such as ng_ipfw, ng_gif etc. should be further examined whether a
potential for outbound to inbound call looping exists.
This commit changes the layout of struct thread, but due to
__FreeBSD_version number shortage a version bump has been omitted
at this time, nevertheless kernel and modules have to be rebuilt.
Reviewed by: julian, rwatson, bz
Approved by: julian (mentor)
2009-06-11 16:50:49 +00:00
|
|
|
} else if (hook && (hook->hk_flags & HK_TO_INBOUND) &&
|
|
|
|
curthread->td_ng_outbound) {
|
|
|
|
queue = 1;
|
2008-02-06 18:50:40 +00:00
|
|
|
} else {
|
|
|
|
queue = 0;
|
2008-01-31 08:51:48 +00:00
|
|
|
#ifdef GET_STACK_USAGE
|
2008-02-01 11:01:15 +00:00
|
|
|
/*
|
2008-02-01 17:35:46 +00:00
|
|
|
* Most of netgraph nodes have small stack consumption and
|
2008-02-06 18:50:40 +00:00
|
|
|
* for them 25% of free stack space is more than enough.
|
2008-02-01 11:01:15 +00:00
|
|
|
* Nodes/hooks with higher stack usage should be marked as
|
2008-02-02 08:41:53 +00:00
|
|
|
* HI_STACK. For them 50% of stack will be guaranteed then.
|
2008-02-06 18:50:40 +00:00
|
|
|
* XXX: Values 25% and 50% are completely empirical.
|
2008-02-01 11:01:15 +00:00
|
|
|
*/
|
2008-02-06 18:50:40 +00:00
|
|
|
size_t st, su, sl;
|
2008-01-31 08:51:48 +00:00
|
|
|
GET_STACK_USAGE(st, su);
|
2008-02-06 18:50:40 +00:00
|
|
|
sl = st - su;
|
|
|
|
if ((sl * 4 < st) ||
|
|
|
|
((sl * 2 < st) && ((node->nd_flags & NGF_HI_STACK) ||
|
2008-01-31 08:51:48 +00:00
|
|
|
(hook && (hook->hk_flags & HK_HI_STACK))))) {
|
|
|
|
queue = 1;
|
|
|
|
}
|
|
|
|
#endif
|
2008-02-06 18:50:40 +00:00
|
|
|
}
|
2008-01-31 08:51:48 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
if (queue) {
|
2008-03-11 21:58:48 +00:00
|
|
|
item->depth = 1;
|
2008-04-06 15:26:32 +00:00
|
|
|
/* Put it on the queue for that node*/
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_queue_rw(node, item, rw);
|
2008-02-06 18:50:40 +00:00
|
|
|
return ((flags & NG_PROGRESS) ? EINPROGRESS : 0);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We already decided how we will be queueud or treated.
|
|
|
|
* Try get the appropriate operating permission.
|
|
|
|
*/
|
2005-10-12 10:18:44 +00:00
|
|
|
if (rw == NGQRW_R)
|
2008-04-15 21:15:32 +00:00
|
|
|
item = ng_acquire_read(node, item);
|
2005-10-12 10:18:44 +00:00
|
|
|
else
|
2008-04-15 21:15:32 +00:00
|
|
|
item = ng_acquire_write(node, item);
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2008-02-06 18:50:40 +00:00
|
|
|
/* Item was queued while trying to get permission. */
|
|
|
|
if (item == NULL)
|
|
|
|
return ((flags & NG_PROGRESS) ? EINPROGRESS : 0);
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2001-03-10 16:31:00 +00:00
|
|
|
NGI_GET_NODE(item, node); /* zaps stored node */
|
|
|
|
|
2008-03-11 21:58:48 +00:00
|
|
|
item->depth++;
|
2007-06-01 09:20:57 +00:00
|
|
|
error = ng_apply_item(node, item, rw); /* drops r/w lock when done */
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2008-04-06 15:26:32 +00:00
|
|
|
/* If something is waiting on queue and ready, schedule it. */
|
2008-04-15 21:15:32 +00:00
|
|
|
ngq = &node->nd_input_queue;
|
2008-04-06 15:26:32 +00:00
|
|
|
if (QUEUE_ACTIVE(ngq)) {
|
|
|
|
NG_QUEUE_LOCK(ngq);
|
|
|
|
if (QUEUE_ACTIVE(ngq) && NEXT_QUEUED_ITEM_CAN_PROCEED(ngq))
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_worklist_add(node);
|
2008-04-06 15:26:32 +00:00
|
|
|
NG_QUEUE_UNLOCK(ngq);
|
|
|
|
}
|
|
|
|
|
2001-03-10 16:31:00 +00:00
|
|
|
/*
|
2008-04-06 15:26:32 +00:00
|
|
|
* Node may go away as soon as we remove the reference.
|
|
|
|
* Whatever we do, DO NOT access the node again!
|
2001-03-10 16:31:00 +00:00
|
|
|
*/
|
2008-04-06 15:26:32 +00:00
|
|
|
NG_NODE_UNREF(node);
|
2001-03-10 16:31:00 +00:00
|
|
|
|
2001-01-30 20:51:52 +00:00
|
|
|
return (error);
|
2007-10-19 15:04:17 +00:00
|
|
|
|
|
|
|
done:
|
2008-02-06 18:50:40 +00:00
|
|
|
/* If was not sent, apply callback here. */
|
2008-03-11 21:58:48 +00:00
|
|
|
if (item->apply != NULL) {
|
|
|
|
if (item->depth == 0 && error != 0)
|
|
|
|
item->apply->error = error;
|
|
|
|
if (refcount_release(&item->apply->refs)) {
|
|
|
|
(*item->apply->apply)(item->apply->context,
|
|
|
|
item->apply->error);
|
|
|
|
}
|
|
|
|
}
|
2008-01-31 10:13:04 +00:00
|
|
|
|
2007-10-19 15:04:17 +00:00
|
|
|
NG_FREE_ITEM(item);
|
|
|
|
return (error);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We have an item that was possibly queued somewhere.
|
|
|
|
* It should contain all the information needed
|
|
|
|
* to run it on the appropriate node/hook.
|
2007-10-19 15:04:17 +00:00
|
|
|
* If there is apply pointer and we own the last reference, call apply().
|
2001-01-06 00:46:47 +00:00
|
|
|
*/
|
2007-06-01 09:20:57 +00:00
|
|
|
static int
|
2005-10-11 13:48:38 +00:00
|
|
|
ng_apply_item(node_p node, item_p item, int rw)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
|
|
|
hook_p hook;
|
|
|
|
ng_rcvdata_t *rcvdata;
|
2001-01-31 20:46:00 +00:00
|
|
|
ng_rcvmsg_t *rcvmsg;
|
2007-10-19 15:04:17 +00:00
|
|
|
struct ng_apply_info *apply;
|
2008-03-11 21:58:48 +00:00
|
|
|
int error = 0, depth;
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2008-02-06 18:50:40 +00:00
|
|
|
/* Node and item are never optional. */
|
|
|
|
KASSERT(node != NULL, ("ng_apply_item: node is NULL"));
|
|
|
|
KASSERT(item != NULL, ("ng_apply_item: item is NULL"));
|
|
|
|
|
2001-01-30 20:51:52 +00:00
|
|
|
NGI_GET_HOOK(item, hook); /* clears stored hook */
|
2001-01-08 05:34:06 +00:00
|
|
|
#ifdef NETGRAPH_DEBUG
|
2005-11-15 10:54:20 +00:00
|
|
|
_ngi_check(item, __FILE__, __LINE__);
|
2001-01-06 00:46:47 +00:00
|
|
|
#endif
|
2005-07-05 17:35:20 +00:00
|
|
|
|
2007-10-19 15:04:17 +00:00
|
|
|
apply = item->apply;
|
2008-03-11 21:58:48 +00:00
|
|
|
depth = item->depth;
|
2005-07-05 17:35:20 +00:00
|
|
|
|
2001-01-14 23:25:54 +00:00
|
|
|
switch (item->el_flags & NGQF_TYPE) {
|
2001-01-06 00:46:47 +00:00
|
|
|
case NGQF_DATA:
|
|
|
|
/*
|
|
|
|
* Check things are still ok as when we were queued.
|
|
|
|
*/
|
2008-02-06 18:50:40 +00:00
|
|
|
KASSERT(hook != NULL, ("ng_apply_item: hook for data is NULL"));
|
|
|
|
if (NG_HOOK_NOT_VALID(hook) ||
|
|
|
|
NG_NODE_NOT_VALID(node)) {
|
2007-03-10 01:02:40 +00:00
|
|
|
error = EIO;
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_ITEM(item);
|
2001-01-31 20:46:00 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If no receive method, just silently drop it.
|
|
|
|
* Give preference to the hook over-ride method
|
|
|
|
*/
|
2005-11-15 10:54:20 +00:00
|
|
|
if ((!(rcvdata = hook->hk_rcvdata))
|
2001-01-31 20:46:00 +00:00
|
|
|
&& (!(rcvdata = NG_HOOK_NODE(hook)->nd_type->rcvdata))) {
|
|
|
|
error = 0;
|
|
|
|
NG_FREE_ITEM(item);
|
|
|
|
break;
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
2007-03-10 01:02:40 +00:00
|
|
|
error = (*rcvdata)(hook, item);
|
2001-01-06 00:46:47 +00:00
|
|
|
break;
|
|
|
|
case NGQF_MESG:
|
2008-01-31 10:13:04 +00:00
|
|
|
if (hook && NG_HOOK_NOT_VALID(hook)) {
|
|
|
|
/*
|
|
|
|
* The hook has been zapped then we can't use it.
|
|
|
|
* Immediately drop its reference.
|
|
|
|
* The message may not need it.
|
|
|
|
*/
|
|
|
|
NG_HOOK_UNREF(hook);
|
|
|
|
hook = NULL;
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Similarly, if the node is a zombie there is
|
|
|
|
* nothing we can do with it, drop everything.
|
|
|
|
*/
|
2001-01-08 05:34:06 +00:00
|
|
|
if (NG_NODE_NOT_VALID(node)) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2007-03-10 01:02:40 +00:00
|
|
|
error = EINVAL;
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_ITEM(item);
|
2008-01-31 10:13:04 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Call the appropriate message handler for the object.
|
|
|
|
* It is up to the message handler to free the message.
|
|
|
|
* If it's a generic message, handle it generically,
|
|
|
|
* otherwise call the type's message handler (if it exists).
|
|
|
|
* XXX (race). Remember that a queued message may
|
|
|
|
* reference a node or hook that has just been
|
|
|
|
* invalidated. It will exist as the queue code
|
|
|
|
* is holding a reference, but..
|
|
|
|
*/
|
|
|
|
if ((NGI_MSG(item)->header.typecookie == NGM_GENERIC_COOKIE) &&
|
|
|
|
((NGI_MSG(item)->header.flags & NGF_RESP) == 0)) {
|
|
|
|
error = ng_generic_msg(node, item, hook);
|
|
|
|
break;
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
2008-01-31 10:13:04 +00:00
|
|
|
if (((!hook) || (!(rcvmsg = hook->hk_rcvmsg))) &&
|
|
|
|
(!(rcvmsg = node->nd_type->rcvmsg))) {
|
|
|
|
TRAP_ERROR();
|
|
|
|
error = 0;
|
|
|
|
NG_FREE_ITEM(item);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error = (*rcvmsg)(node, item, hook);
|
2001-01-06 00:46:47 +00:00
|
|
|
break;
|
2001-01-14 23:25:54 +00:00
|
|
|
case NGQF_FN:
|
2007-10-19 15:04:17 +00:00
|
|
|
case NGQF_FN2:
|
|
|
|
/*
|
2008-09-13 09:17:02 +00:00
|
|
|
* In the case of the shutdown message we allow it to hit
|
2007-10-19 15:04:17 +00:00
|
|
|
* even if the node is invalid.
|
|
|
|
*/
|
2008-09-13 09:17:02 +00:00
|
|
|
if (NG_NODE_NOT_VALID(node) &&
|
|
|
|
NGI_FN(item) != &ng_rmnode) {
|
2007-10-19 15:04:17 +00:00
|
|
|
TRAP_ERROR();
|
|
|
|
error = EINVAL;
|
|
|
|
NG_FREE_ITEM(item);
|
|
|
|
break;
|
|
|
|
}
|
2008-09-13 09:17:02 +00:00
|
|
|
/* Same is about some internal functions and invalid hook. */
|
|
|
|
if (hook && NG_HOOK_NOT_VALID(hook) &&
|
|
|
|
NGI_FN2(item) != &ng_con_part2 &&
|
|
|
|
NGI_FN2(item) != &ng_con_part3 &&
|
|
|
|
NGI_FN(item) != &ng_rmhook_part2) {
|
|
|
|
TRAP_ERROR();
|
|
|
|
error = EINVAL;
|
|
|
|
NG_FREE_ITEM(item);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-11-14 11:25:58 +00:00
|
|
|
if ((item->el_flags & NGQF_TYPE) == NGQF_FN) {
|
|
|
|
(*NGI_FN(item))(node, hook, NGI_ARG1(item),
|
|
|
|
NGI_ARG2(item));
|
|
|
|
NG_FREE_ITEM(item);
|
|
|
|
} else /* it is NGQF_FN2 */
|
|
|
|
error = (*NGI_FN2(item))(node, item, hook);
|
2007-10-19 15:04:17 +00:00
|
|
|
break;
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We held references on some of the resources
|
|
|
|
* that we took from the item. Now that we have
|
|
|
|
* finished doing everything, drop those references.
|
|
|
|
*/
|
2008-01-31 10:13:04 +00:00
|
|
|
if (hook)
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_HOOK_UNREF(hook);
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2008-02-06 18:50:40 +00:00
|
|
|
if (rw == NGQRW_R)
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_leave_read(node);
|
2008-02-06 18:50:40 +00:00
|
|
|
else
|
2008-04-15 21:15:32 +00:00
|
|
|
ng_leave_write(node);
|
2005-07-05 17:35:20 +00:00
|
|
|
|
|
|
|
/* Apply callback. */
|
2008-03-11 21:58:48 +00:00
|
|
|
if (apply != NULL) {
|
|
|
|
if (depth == 1 && error != 0)
|
|
|
|
apply->error = error;
|
|
|
|
if (refcount_release(&apply->refs))
|
|
|
|
(*apply->apply)(apply->context, apply->error);
|
|
|
|
}
|
2005-07-05 17:35:20 +00:00
|
|
|
|
2007-06-01 09:20:57 +00:00
|
|
|
return (error);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* Implement the 'generic' control messages
|
|
|
|
***********************************************************************/
|
|
|
|
static int
|
|
|
|
ng_generic_msg(node_p here, item_p item, hook_p lasthook)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
struct ng_mesg *msg;
|
|
|
|
struct ng_mesg *resp = NULL;
|
|
|
|
|
|
|
|
NGI_GET_MSG(item, msg);
|
|
|
|
if (msg->header.typecookie != NGM_GENERIC_COOKIE) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-01-06 00:46:47 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
switch (msg->header.cmd) {
|
|
|
|
case NGM_SHUTDOWN:
|
2001-01-30 20:51:52 +00:00
|
|
|
ng_rmnode(here, NULL, NULL, 0);
|
2001-01-06 00:46:47 +00:00
|
|
|
break;
|
|
|
|
case NGM_MKPEER:
|
|
|
|
{
|
|
|
|
struct ngm_mkpeer *const mkp = (struct ngm_mkpeer *) msg->data;
|
|
|
|
|
|
|
|
if (msg->header.arglen != sizeof(*mkp)) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-01-06 00:46:47 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
mkp->type[sizeof(mkp->type) - 1] = '\0';
|
|
|
|
mkp->ourhook[sizeof(mkp->ourhook) - 1] = '\0';
|
|
|
|
mkp->peerhook[sizeof(mkp->peerhook) - 1] = '\0';
|
|
|
|
error = ng_mkpeer(here, mkp->ourhook, mkp->peerhook, mkp->type);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case NGM_CONNECT:
|
|
|
|
{
|
|
|
|
struct ngm_connect *const con =
|
|
|
|
(struct ngm_connect *) msg->data;
|
|
|
|
node_p node2;
|
|
|
|
|
|
|
|
if (msg->header.arglen != sizeof(*con)) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-01-06 00:46:47 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
con->path[sizeof(con->path) - 1] = '\0';
|
|
|
|
con->ourhook[sizeof(con->ourhook) - 1] = '\0';
|
|
|
|
con->peerhook[sizeof(con->peerhook) - 1] = '\0';
|
|
|
|
/* Don't forget we get a reference.. */
|
|
|
|
error = ng_path2noderef(here, con->path, &node2, NULL);
|
|
|
|
if (error)
|
|
|
|
break;
|
2007-10-19 15:04:17 +00:00
|
|
|
error = ng_con_nodes(item, here, con->ourhook,
|
|
|
|
node2, con->peerhook);
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_UNREF(node2);
|
2001-01-06 00:46:47 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case NGM_NAME:
|
|
|
|
{
|
|
|
|
struct ngm_name *const nam = (struct ngm_name *) msg->data;
|
|
|
|
|
|
|
|
if (msg->header.arglen != sizeof(*nam)) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-01-06 00:46:47 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
nam->name[sizeof(nam->name) - 1] = '\0';
|
|
|
|
error = ng_name_node(here, nam->name);
|
|
|
|
break;
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
case NGM_RMHOOK:
|
|
|
|
{
|
|
|
|
struct ngm_rmhook *const rmh = (struct ngm_rmhook *) msg->data;
|
|
|
|
hook_p hook;
|
|
|
|
|
|
|
|
if (msg->header.arglen != sizeof(*rmh)) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-01-06 00:46:47 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
rmh->ourhook[sizeof(rmh->ourhook) - 1] = '\0';
|
1999-12-03 21:17:30 +00:00
|
|
|
if ((hook = ng_findhook(here, rmh->ourhook)) != NULL)
|
1999-10-21 09:06:11 +00:00
|
|
|
ng_destroy_hook(hook);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case NGM_NODEINFO:
|
|
|
|
{
|
|
|
|
struct nodeinfo *ni;
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_MKRESPONSE(resp, msg, sizeof(*ni), M_NOWAIT);
|
1999-10-21 09:06:11 +00:00
|
|
|
if (resp == NULL) {
|
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fill in node info */
|
2001-01-06 00:46:47 +00:00
|
|
|
ni = (struct nodeinfo *) resp->data;
|
2001-01-08 05:34:06 +00:00
|
|
|
if (NG_NODE_HAS_NAME(here))
|
2004-01-26 14:05:31 +00:00
|
|
|
strcpy(ni->name, NG_NODE_NAME(here));
|
|
|
|
strcpy(ni->type, here->nd_type->name);
|
1999-11-01 00:31:14 +00:00
|
|
|
ni->id = ng_node2ID(here);
|
2001-01-08 05:34:06 +00:00
|
|
|
ni->hooks = here->nd_numhooks;
|
1999-10-21 09:06:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case NGM_LISTHOOKS:
|
|
|
|
{
|
2001-01-08 05:34:06 +00:00
|
|
|
const int nhooks = here->nd_numhooks;
|
1999-10-21 09:06:11 +00:00
|
|
|
struct hooklist *hl;
|
|
|
|
struct nodeinfo *ni;
|
|
|
|
hook_p hook;
|
|
|
|
|
|
|
|
/* Get response struct */
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_MKRESPONSE(resp, msg, sizeof(*hl)
|
1999-10-21 09:06:11 +00:00
|
|
|
+ (nhooks * sizeof(struct linkinfo)), M_NOWAIT);
|
2001-01-06 00:46:47 +00:00
|
|
|
if (resp == NULL) {
|
1999-10-21 09:06:11 +00:00
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
hl = (struct hooklist *) resp->data;
|
1999-10-21 09:06:11 +00:00
|
|
|
ni = &hl->nodeinfo;
|
|
|
|
|
|
|
|
/* Fill in node info */
|
2001-01-08 05:34:06 +00:00
|
|
|
if (NG_NODE_HAS_NAME(here))
|
2004-01-26 14:05:31 +00:00
|
|
|
strcpy(ni->name, NG_NODE_NAME(here));
|
|
|
|
strcpy(ni->type, here->nd_type->name);
|
1999-11-01 00:31:14 +00:00
|
|
|
ni->id = ng_node2ID(here);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Cycle through the linked list of hooks */
|
|
|
|
ni->hooks = 0;
|
2001-01-08 05:34:06 +00:00
|
|
|
LIST_FOREACH(hook, &here->nd_hooks, hk_hooks) {
|
1999-10-21 09:06:11 +00:00
|
|
|
struct linkinfo *const link = &hl->link[ni->hooks];
|
|
|
|
|
|
|
|
if (ni->hooks >= nhooks) {
|
|
|
|
log(LOG_ERR, "%s: number of %s changed\n",
|
2001-12-10 08:09:49 +00:00
|
|
|
__func__, "hooks");
|
1999-10-21 09:06:11 +00:00
|
|
|
break;
|
|
|
|
}
|
2001-01-08 05:34:06 +00:00
|
|
|
if (NG_HOOK_NOT_VALID(hook))
|
1999-10-21 09:06:11 +00:00
|
|
|
continue;
|
2004-01-26 14:05:31 +00:00
|
|
|
strcpy(link->ourhook, NG_HOOK_NAME(hook));
|
|
|
|
strcpy(link->peerhook, NG_PEER_HOOK_NAME(hook));
|
2001-01-08 05:34:06 +00:00
|
|
|
if (NG_PEER_NODE_NAME(hook)[0] != '\0')
|
2004-01-26 14:05:31 +00:00
|
|
|
strcpy(link->nodeinfo.name,
|
|
|
|
NG_PEER_NODE_NAME(hook));
|
|
|
|
strcpy(link->nodeinfo.type,
|
|
|
|
NG_PEER_NODE(hook)->nd_type->name);
|
2001-01-08 05:34:06 +00:00
|
|
|
link->nodeinfo.id = ng_node2ID(NG_PEER_NODE(hook));
|
|
|
|
link->nodeinfo.hooks = NG_PEER_NODE(hook)->nd_numhooks;
|
1999-10-21 09:06:11 +00:00
|
|
|
ni->hooks++;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case NGM_LISTNAMES:
|
|
|
|
case NGM_LISTNODES:
|
|
|
|
{
|
|
|
|
const int unnamed = (msg->header.cmd == NGM_LISTNODES);
|
|
|
|
struct namelist *nl;
|
|
|
|
node_p node;
|
2008-03-04 18:22:18 +00:00
|
|
|
int num = 0, i;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_lock(&ng_namehash_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
/* Count number of nodes */
|
2008-03-04 18:22:18 +00:00
|
|
|
for (i = 0; i < NG_NAME_HASH_SIZE; i++) {
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
LIST_FOREACH(node, &V_ng_name_hash[i], nd_nodes) {
|
2008-03-04 18:22:18 +00:00
|
|
|
if (NG_NODE_IS_VALID(node) &&
|
|
|
|
(unnamed || NG_NODE_HAS_NAME(node))) {
|
|
|
|
num++;
|
|
|
|
}
|
2001-01-10 23:19:32 +00:00
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_unlock(&ng_namehash_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Get response struct */
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_MKRESPONSE(resp, msg, sizeof(*nl)
|
1999-10-21 09:06:11 +00:00
|
|
|
+ (num * sizeof(struct nodeinfo)), M_NOWAIT);
|
2001-01-06 00:46:47 +00:00
|
|
|
if (resp == NULL) {
|
1999-10-21 09:06:11 +00:00
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
nl = (struct namelist *) resp->data;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Cycle through the linked list of nodes */
|
|
|
|
nl->numnames = 0;
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_lock(&ng_namehash_mtx);
|
|
|
|
for (i = 0; i < NG_NAME_HASH_SIZE; i++) {
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
LIST_FOREACH(node, &V_ng_name_hash[i], nd_nodes) {
|
2008-03-04 18:22:18 +00:00
|
|
|
struct nodeinfo *const np =
|
|
|
|
&nl->nodeinfo[nl->numnames];
|
|
|
|
|
|
|
|
if (NG_NODE_NOT_VALID(node))
|
|
|
|
continue;
|
|
|
|
if (!unnamed && (! NG_NODE_HAS_NAME(node)))
|
|
|
|
continue;
|
|
|
|
if (nl->numnames >= num) {
|
|
|
|
log(LOG_ERR, "%s: number of nodes changed\n",
|
|
|
|
__func__);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (NG_NODE_HAS_NAME(node))
|
|
|
|
strcpy(np->name, NG_NODE_NAME(node));
|
|
|
|
strcpy(np->type, node->nd_type->name);
|
|
|
|
np->id = ng_node2ID(node);
|
|
|
|
np->hooks = node->nd_numhooks;
|
|
|
|
nl->numnames++;
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
}
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_unlock(&ng_namehash_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case NGM_LISTTYPES:
|
|
|
|
{
|
|
|
|
struct typelist *tl;
|
|
|
|
struct ng_type *type;
|
|
|
|
int num = 0;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_typelist_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
/* Count number of types */
|
2001-01-10 23:19:32 +00:00
|
|
|
LIST_FOREACH(type, &ng_typelist, types) {
|
1999-10-21 09:06:11 +00:00
|
|
|
num++;
|
2001-01-10 23:19:32 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_typelist_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Get response struct */
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_MKRESPONSE(resp, msg, sizeof(*tl)
|
1999-10-21 09:06:11 +00:00
|
|
|
+ (num * sizeof(struct typeinfo)), M_NOWAIT);
|
2001-01-06 00:46:47 +00:00
|
|
|
if (resp == NULL) {
|
1999-10-21 09:06:11 +00:00
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
tl = (struct typelist *) resp->data;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
/* Cycle through the linked list of types */
|
|
|
|
tl->numtypes = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_typelist_mtx);
|
2001-01-06 00:46:47 +00:00
|
|
|
LIST_FOREACH(type, &ng_typelist, types) {
|
1999-10-21 09:06:11 +00:00
|
|
|
struct typeinfo *const tp = &tl->typeinfo[tl->numtypes];
|
|
|
|
|
|
|
|
if (tl->numtypes >= num) {
|
|
|
|
log(LOG_ERR, "%s: number of %s changed\n",
|
2001-12-10 08:09:49 +00:00
|
|
|
__func__, "types");
|
1999-10-21 09:06:11 +00:00
|
|
|
break;
|
|
|
|
}
|
2004-01-26 14:05:31 +00:00
|
|
|
strcpy(tp->type_name, type->name);
|
2001-01-24 21:29:57 +00:00
|
|
|
tp->numnodes = type->refs - 1; /* don't count list */
|
1999-10-21 09:06:11 +00:00
|
|
|
tl->numtypes++;
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_typelist_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
1999-11-30 02:45:32 +00:00
|
|
|
case NGM_BINARY2ASCII:
|
|
|
|
{
|
2000-08-10 22:50:38 +00:00
|
|
|
int bufSize = 20 * 1024; /* XXX hard coded constant */
|
1999-11-30 02:45:32 +00:00
|
|
|
const struct ng_parse_type *argstype;
|
|
|
|
const struct ng_cmdlist *c;
|
2001-01-06 00:46:47 +00:00
|
|
|
struct ng_mesg *binary, *ascii;
|
1999-11-30 02:45:32 +00:00
|
|
|
|
|
|
|
/* Data area must contain a valid netgraph message */
|
|
|
|
binary = (struct ng_mesg *)msg->data;
|
2005-11-15 10:54:20 +00:00
|
|
|
if (msg->header.arglen < sizeof(struct ng_mesg) ||
|
|
|
|
(msg->header.arglen - sizeof(struct ng_mesg) <
|
|
|
|
binary->header.arglen)) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-11-30 02:45:32 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get a response message with lots of room */
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_MKRESPONSE(resp, msg, sizeof(*ascii) + bufSize, M_NOWAIT);
|
|
|
|
if (resp == NULL) {
|
1999-11-30 02:45:32 +00:00
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
ascii = (struct ng_mesg *)resp->data;
|
1999-11-30 02:45:32 +00:00
|
|
|
|
|
|
|
/* Copy binary message header to response message payload */
|
|
|
|
bcopy(binary, ascii, sizeof(*binary));
|
|
|
|
|
|
|
|
/* Find command by matching typecookie and command number */
|
2001-01-08 05:34:06 +00:00
|
|
|
for (c = here->nd_type->cmdlist;
|
1999-11-30 02:45:32 +00:00
|
|
|
c != NULL && c->name != NULL; c++) {
|
|
|
|
if (binary->header.typecookie == c->cookie
|
|
|
|
&& binary->header.cmd == c->cmd)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (c == NULL || c->name == NULL) {
|
|
|
|
for (c = ng_generic_cmds; c->name != NULL; c++) {
|
|
|
|
if (binary->header.typecookie == c->cookie
|
|
|
|
&& binary->header.cmd == c->cmd)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (c->name == NULL) {
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_MSG(resp);
|
1999-11-30 02:45:32 +00:00
|
|
|
error = ENOSYS;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Convert command name to ASCII */
|
|
|
|
snprintf(ascii->header.cmdstr, sizeof(ascii->header.cmdstr),
|
|
|
|
"%s", c->name);
|
|
|
|
|
|
|
|
/* Convert command arguments to ASCII */
|
|
|
|
argstype = (binary->header.flags & NGF_RESP) ?
|
|
|
|
c->respType : c->mesgType;
|
2001-01-10 23:19:32 +00:00
|
|
|
if (argstype == NULL) {
|
1999-11-30 02:45:32 +00:00
|
|
|
*ascii->data = '\0';
|
2001-01-10 23:19:32 +00:00
|
|
|
} else {
|
1999-11-30 02:45:32 +00:00
|
|
|
if ((error = ng_unparse(argstype,
|
|
|
|
(u_char *)binary->data,
|
|
|
|
ascii->data, bufSize)) != 0) {
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_MSG(resp);
|
1999-11-30 02:45:32 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the result as struct ng_mesg plus ASCII string */
|
|
|
|
bufSize = strlen(ascii->data) + 1;
|
|
|
|
ascii->header.arglen = bufSize;
|
2001-01-06 00:46:47 +00:00
|
|
|
resp->header.arglen = sizeof(*ascii) + bufSize;
|
1999-11-30 02:45:32 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case NGM_ASCII2BINARY:
|
|
|
|
{
|
|
|
|
int bufSize = 2000; /* XXX hard coded constant */
|
|
|
|
const struct ng_cmdlist *c;
|
|
|
|
const struct ng_parse_type *argstype;
|
2001-01-06 00:46:47 +00:00
|
|
|
struct ng_mesg *ascii, *binary;
|
2000-04-12 17:29:33 +00:00
|
|
|
int off = 0;
|
1999-11-30 02:45:32 +00:00
|
|
|
|
|
|
|
/* Data area must contain at least a struct ng_mesg + '\0' */
|
|
|
|
ascii = (struct ng_mesg *)msg->data;
|
2005-11-15 10:54:20 +00:00
|
|
|
if ((msg->header.arglen < sizeof(*ascii) + 1) ||
|
|
|
|
(ascii->header.arglen < 1) ||
|
|
|
|
(msg->header.arglen < sizeof(*ascii) +
|
|
|
|
ascii->header.arglen)) {
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-11-30 02:45:32 +00:00
|
|
|
error = EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
ascii->data[ascii->header.arglen - 1] = '\0';
|
|
|
|
|
|
|
|
/* Get a response message with lots of room */
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_MKRESPONSE(resp, msg, sizeof(*binary) + bufSize, M_NOWAIT);
|
|
|
|
if (resp == NULL) {
|
1999-11-30 02:45:32 +00:00
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
binary = (struct ng_mesg *)resp->data;
|
1999-11-30 02:45:32 +00:00
|
|
|
|
|
|
|
/* Copy ASCII message header to response message payload */
|
|
|
|
bcopy(ascii, binary, sizeof(*ascii));
|
|
|
|
|
|
|
|
/* Find command by matching ASCII command string */
|
2001-01-08 05:34:06 +00:00
|
|
|
for (c = here->nd_type->cmdlist;
|
1999-11-30 02:45:32 +00:00
|
|
|
c != NULL && c->name != NULL; c++) {
|
|
|
|
if (strcmp(ascii->header.cmdstr, c->name) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (c == NULL || c->name == NULL) {
|
|
|
|
for (c = ng_generic_cmds; c->name != NULL; c++) {
|
|
|
|
if (strcmp(ascii->header.cmdstr, c->name) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (c->name == NULL) {
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_MSG(resp);
|
1999-11-30 02:45:32 +00:00
|
|
|
error = ENOSYS;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Convert command name to binary */
|
|
|
|
binary->header.cmd = c->cmd;
|
|
|
|
binary->header.typecookie = c->cookie;
|
|
|
|
|
|
|
|
/* Convert command arguments to binary */
|
|
|
|
argstype = (binary->header.flags & NGF_RESP) ?
|
|
|
|
c->respType : c->mesgType;
|
2001-01-10 23:19:32 +00:00
|
|
|
if (argstype == NULL) {
|
1999-11-30 02:45:32 +00:00
|
|
|
bufSize = 0;
|
2001-01-10 23:19:32 +00:00
|
|
|
} else {
|
1999-11-30 02:45:32 +00:00
|
|
|
if ((error = ng_parse(argstype, ascii->data,
|
|
|
|
&off, (u_char *)binary->data, &bufSize)) != 0) {
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_MSG(resp);
|
1999-11-30 02:45:32 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the result */
|
|
|
|
binary->header.arglen = bufSize;
|
2001-01-06 00:46:47 +00:00
|
|
|
resp->header.arglen = sizeof(*binary) + bufSize;
|
1999-11-30 02:45:32 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2000-07-03 13:34:18 +00:00
|
|
|
case NGM_TEXT_CONFIG:
|
1999-10-21 09:06:11 +00:00
|
|
|
case NGM_TEXT_STATUS:
|
|
|
|
/*
|
|
|
|
* This one is tricky as it passes the command down to the
|
|
|
|
* actual node, even though it is a generic type command.
|
2001-01-06 00:46:47 +00:00
|
|
|
* This means we must assume that the item/msg is already freed
|
1999-10-21 09:06:11 +00:00
|
|
|
* when control passes back to us.
|
|
|
|
*/
|
2001-01-08 05:34:06 +00:00
|
|
|
if (here->nd_type->rcvmsg != NULL) {
|
2001-01-06 00:46:47 +00:00
|
|
|
NGI_MSG(item) = msg; /* put it back as we found it */
|
2001-01-08 05:34:06 +00:00
|
|
|
return((*here->nd_type->rcvmsg)(here, item, lasthook));
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
/* Fall through if rcvmsg not supported */
|
|
|
|
default:
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
1999-10-21 09:06:11 +00:00
|
|
|
error = EINVAL;
|
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* Sometimes a generic message may be statically allocated
|
|
|
|
* to avoid problems with allocating when in tight memeory situations.
|
|
|
|
* Don't free it if it is so.
|
|
|
|
* I break them appart here, because erros may cause a free if the item
|
|
|
|
* in which case we'd be doing it twice.
|
|
|
|
* they are kept together above, to simplify freeing.
|
|
|
|
*/
|
|
|
|
out:
|
|
|
|
NG_RESPOND_MSG(error, here, item, resp);
|
2008-11-22 16:03:18 +00:00
|
|
|
NG_FREE_MSG(msg);
|
1999-10-21 09:06:11 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2005-05-14 10:07:17 +00:00
|
|
|
/************************************************************************
|
|
|
|
Queue element get/free routines
|
|
|
|
************************************************************************/
|
|
|
|
|
|
|
|
uma_zone_t ng_qzone;
|
2008-04-16 19:52:29 +00:00
|
|
|
uma_zone_t ng_qdzone;
|
2008-12-14 20:15:30 +00:00
|
|
|
static int numthreads = 0; /* number of queue threads */
|
2008-03-05 22:12:34 +00:00
|
|
|
static int maxalloc = 4096;/* limit the damage of a leak */
|
|
|
|
static int maxdata = 512; /* limit the damage of a DoS */
|
2005-05-14 10:07:17 +00:00
|
|
|
|
2008-12-14 20:15:30 +00:00
|
|
|
TUNABLE_INT("net.graph.threads", &numthreads);
|
|
|
|
SYSCTL_INT(_net_graph, OID_AUTO, threads, CTLFLAG_RDTUN, &numthreads,
|
|
|
|
0, "Number of queue processing threads");
|
2005-05-14 10:07:17 +00:00
|
|
|
TUNABLE_INT("net.graph.maxalloc", &maxalloc);
|
|
|
|
SYSCTL_INT(_net_graph, OID_AUTO, maxalloc, CTLFLAG_RDTUN, &maxalloc,
|
2008-04-16 19:52:29 +00:00
|
|
|
0, "Maximum number of non-data queue items to allocate");
|
2008-03-05 22:12:34 +00:00
|
|
|
TUNABLE_INT("net.graph.maxdata", &maxdata);
|
2008-04-16 19:52:29 +00:00
|
|
|
SYSCTL_INT(_net_graph, OID_AUTO, maxdata, CTLFLAG_RDTUN, &maxdata,
|
|
|
|
0, "Maximum number of data queue items to allocate");
|
2005-05-14 10:07:17 +00:00
|
|
|
|
|
|
|
#ifdef NETGRAPH_DEBUG
|
|
|
|
static TAILQ_HEAD(, ng_item) ng_itemlist = TAILQ_HEAD_INITIALIZER(ng_itemlist);
|
|
|
|
static int allocated; /* number of items malloc'd */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get a queue entry.
|
|
|
|
* This is usually called when a packet first enters netgraph.
|
|
|
|
* By definition, this is usually from an interrupt, or from a user.
|
|
|
|
* Users are not so important, but try be quick for the times that it's
|
|
|
|
* an interrupt.
|
|
|
|
*/
|
|
|
|
static __inline item_p
|
2008-04-16 19:52:29 +00:00
|
|
|
ng_alloc_item(int type, int flags)
|
2005-05-14 10:07:17 +00:00
|
|
|
{
|
2008-04-16 19:52:29 +00:00
|
|
|
item_p item;
|
2005-05-16 17:07:03 +00:00
|
|
|
|
2008-04-16 19:52:29 +00:00
|
|
|
KASSERT(((type & ~NGQF_TYPE) == 0),
|
|
|
|
("%s: incorrect item type: %d", __func__, type));
|
2005-05-14 10:07:17 +00:00
|
|
|
|
2008-04-16 19:52:29 +00:00
|
|
|
item = uma_zalloc((type == NGQF_DATA)?ng_qdzone:ng_qzone,
|
|
|
|
((flags & NG_WAITOK) ? M_WAITOK : M_NOWAIT) | M_ZERO);
|
2005-05-14 10:07:17 +00:00
|
|
|
|
|
|
|
if (item) {
|
2008-04-16 19:52:29 +00:00
|
|
|
item->el_flags = type;
|
|
|
|
#ifdef NETGRAPH_DEBUG
|
|
|
|
mtx_lock(&ngq_mtx);
|
|
|
|
TAILQ_INSERT_TAIL(&ng_itemlist, item, all);
|
|
|
|
allocated++;
|
|
|
|
mtx_unlock(&ngq_mtx);
|
2005-05-14 10:07:17 +00:00
|
|
|
#endif
|
2008-04-16 19:52:29 +00:00
|
|
|
}
|
2005-05-14 10:07:17 +00:00
|
|
|
|
|
|
|
return (item);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release a queue entry
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ng_free_item(item_p item)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The item may hold resources on it's own. We need to free
|
|
|
|
* these before we can free the item. What they are depends upon
|
|
|
|
* what kind of item it is. it is important that nodes zero
|
|
|
|
* out pointers to resources that they remove from the item
|
|
|
|
* or we release them again here.
|
|
|
|
*/
|
|
|
|
switch (item->el_flags & NGQF_TYPE) {
|
|
|
|
case NGQF_DATA:
|
|
|
|
/* If we have an mbuf still attached.. */
|
|
|
|
NG_FREE_M(_NGI_M(item));
|
|
|
|
break;
|
|
|
|
case NGQF_MESG:
|
|
|
|
_NGI_RETADDR(item) = 0;
|
|
|
|
NG_FREE_MSG(_NGI_MSG(item));
|
|
|
|
break;
|
|
|
|
case NGQF_FN:
|
2007-10-19 15:04:17 +00:00
|
|
|
case NGQF_FN2:
|
2005-05-14 10:07:17 +00:00
|
|
|
/* nothing to free really, */
|
|
|
|
_NGI_FN(item) = NULL;
|
|
|
|
_NGI_ARG1(item) = NULL;
|
|
|
|
_NGI_ARG2(item) = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* If we still have a node or hook referenced... */
|
|
|
|
_NGI_CLR_NODE(item);
|
|
|
|
_NGI_CLR_HOOK(item);
|
|
|
|
|
|
|
|
#ifdef NETGRAPH_DEBUG
|
|
|
|
mtx_lock(&ngq_mtx);
|
|
|
|
TAILQ_REMOVE(&ng_itemlist, item, all);
|
|
|
|
allocated--;
|
|
|
|
mtx_unlock(&ngq_mtx);
|
|
|
|
#endif
|
2008-04-16 19:52:29 +00:00
|
|
|
uma_zfree(((item->el_flags & NGQF_TYPE) == NGQF_DATA)?
|
|
|
|
ng_qdzone:ng_qzone, item);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Change type of the queue entry.
|
|
|
|
* Possibly reallocates it from another UMA zone.
|
|
|
|
*/
|
|
|
|
static __inline item_p
|
|
|
|
ng_realloc_item(item_p pitem, int type, int flags)
|
|
|
|
{
|
|
|
|
item_p item;
|
|
|
|
int from, to;
|
|
|
|
|
|
|
|
KASSERT((pitem != NULL), ("%s: can't reallocate NULL", __func__));
|
|
|
|
KASSERT(((type & ~NGQF_TYPE) == 0),
|
|
|
|
("%s: incorrect item type: %d", __func__, type));
|
|
|
|
|
|
|
|
from = ((pitem->el_flags & NGQF_TYPE) == NGQF_DATA);
|
|
|
|
to = (type == NGQF_DATA);
|
|
|
|
if (from != to) {
|
|
|
|
/* If reallocation is required do it and copy item. */
|
|
|
|
if ((item = ng_alloc_item(type, flags)) == NULL) {
|
|
|
|
ng_free_item(pitem);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
*item = *pitem;
|
|
|
|
ng_free_item(pitem);
|
|
|
|
} else
|
|
|
|
item = pitem;
|
|
|
|
item->el_flags = (item->el_flags & ~NGQF_TYPE) | type;
|
|
|
|
|
|
|
|
return (item);
|
2005-05-14 10:07:17 +00:00
|
|
|
}
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/************************************************************************
|
|
|
|
Module routines
|
|
|
|
************************************************************************/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle the loading/unloading of a netgraph node type module
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
ng_mod_event(module_t mod, int event, void *data)
|
|
|
|
{
|
|
|
|
struct ng_type *const type = data;
|
|
|
|
int s, error = 0;
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case MOD_LOAD:
|
|
|
|
|
|
|
|
/* Register new netgraph node type */
|
|
|
|
s = splnet();
|
|
|
|
if ((error = ng_newtype(type)) != 0) {
|
|
|
|
splx(s);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Call type specific code */
|
|
|
|
if (type->mod_event != NULL)
|
2001-01-06 00:46:47 +00:00
|
|
|
if ((error = (*type->mod_event)(mod, event, data))) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_typelist_mtx);
|
2001-01-24 21:29:57 +00:00
|
|
|
type->refs--; /* undo it */
|
1999-10-21 09:06:11 +00:00
|
|
|
LIST_REMOVE(type, types);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_typelist_mtx);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
splx(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MOD_UNLOAD:
|
|
|
|
s = splnet();
|
2001-01-24 21:29:57 +00:00
|
|
|
if (type->refs > 1) { /* make sure no nodes exist! */
|
1999-10-21 09:06:11 +00:00
|
|
|
error = EBUSY;
|
2001-01-24 21:29:57 +00:00
|
|
|
} else {
|
|
|
|
if (type->refs == 0) {
|
|
|
|
/* failed load, nothing to undo */
|
|
|
|
splx(s);
|
|
|
|
break;
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
if (type->mod_event != NULL) { /* check with type */
|
|
|
|
error = (*type->mod_event)(mod, event, data);
|
|
|
|
if (error != 0) { /* type refuses.. */
|
|
|
|
splx(s);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ng_typelist_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
LIST_REMOVE(type, types);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ng_typelist_mtx);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
if (type->mod_event != NULL)
|
|
|
|
error = (*type->mod_event)(mod, event, data);
|
|
|
|
else
|
2004-07-15 08:26:07 +00:00
|
|
|
error = EOPNOTSUPP; /* XXX ? */
|
1999-10-21 09:06:11 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
#ifdef VIMAGE
|
Introduce and use a sysinit-based initialization scheme for virtual
network stacks, VNET_SYSINIT:
- Add VNET_SYSINIT and VNET_SYSUNINIT macros to declare events that will
occur each time a network stack is instantiated and destroyed. In the
!VIMAGE case, these are simply mapped into regular SYSINIT/SYSUNINIT.
For the VIMAGE case, we instead use SYSINIT's to track their order and
properties on registration, using them for each vnet when created/
destroyed, or immediately on module load for already-started vnets.
- Remove vnet_modinfo mechanism that existed to serve this purpose
previously, as well as its dependency scheme: we now just use the
SYSINIT ordering scheme.
- Implement VNET_DOMAIN_SET() to allow protocol domains to declare that
they want init functions to be called for each virtual network stack
rather than just once at boot, compiling down to DOMAIN_SET() in the
non-VIMAGE case.
- Walk all virtualized kernel subsystems and make use of these instead
of modinfo or DOMAIN_SET() for init/uninit events. In some cases,
convert modular components from using modevent to using sysinit (where
appropriate). In some cases, do minor rejuggling of SYSINIT ordering
to make room for or better manage events.
Portions submitted by: jhb (VNET_SYSINIT), bz (cleanup)
Discussed with: jhb, bz, julian, zec
Reviewed by: bz
Approved by: re (VIMAGE blanket)
2009-07-23 20:46:49 +00:00
|
|
|
static void
|
|
|
|
vnet_netgraph_uninit(const void *unused __unused)
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
{
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
#if 0
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
node_p node, last_killed = NULL;
|
|
|
|
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
/* XXXRW: utterly bogus. */
|
|
|
|
while ((node = LIST_FIRST(&V_ng_allnodes)) != NULL) {
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
if (node == last_killed) {
|
|
|
|
/* This should never happen */
|
|
|
|
node->nd_flags |= NGF_REALLY_DIE;
|
|
|
|
printf("netgraph node %s needs NGF_REALLY_DIE\n",
|
|
|
|
node->nd_name);
|
|
|
|
ng_rmnode(node, NULL, NULL, 0);
|
|
|
|
/* This must never happen */
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
if (node == LIST_FIRST(&V_ng_allnodes))
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
panic("netgraph node %s won't die",
|
|
|
|
node->nd_name);
|
|
|
|
}
|
|
|
|
ng_rmnode(node, NULL, NULL, 0);
|
|
|
|
last_killed = node;
|
|
|
|
}
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
#endif
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
}
|
Introduce and use a sysinit-based initialization scheme for virtual
network stacks, VNET_SYSINIT:
- Add VNET_SYSINIT and VNET_SYSUNINIT macros to declare events that will
occur each time a network stack is instantiated and destroyed. In the
!VIMAGE case, these are simply mapped into regular SYSINIT/SYSUNINIT.
For the VIMAGE case, we instead use SYSINIT's to track their order and
properties on registration, using them for each vnet when created/
destroyed, or immediately on module load for already-started vnets.
- Remove vnet_modinfo mechanism that existed to serve this purpose
previously, as well as its dependency scheme: we now just use the
SYSINIT ordering scheme.
- Implement VNET_DOMAIN_SET() to allow protocol domains to declare that
they want init functions to be called for each virtual network stack
rather than just once at boot, compiling down to DOMAIN_SET() in the
non-VIMAGE case.
- Walk all virtualized kernel subsystems and make use of these instead
of modinfo or DOMAIN_SET() for init/uninit events. In some cases,
convert modular components from using modevent to using sysinit (where
appropriate). In some cases, do minor rejuggling of SYSINIT ordering
to make room for or better manage events.
Portions submitted by: jhb (VNET_SYSINIT), bz (cleanup)
Discussed with: jhb, bz, julian, zec
Reviewed by: bz
Approved by: re (VIMAGE blanket)
2009-07-23 20:46:49 +00:00
|
|
|
VNET_SYSUNINIT(vnet_netgraph_uninit, SI_SUB_NETGRAPH, SI_ORDER_ANY,
|
|
|
|
vnet_netgraph_uninit, NULL);
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
#endif /* VIMAGE */
|
|
|
|
|
1999-10-21 09:06:11 +00:00
|
|
|
/*
|
|
|
|
* Handle loading and unloading for this code.
|
|
|
|
* The only thing we need to link into is the NETISR strucure.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ngb_mod_event(module_t mod, int event, void *data)
|
|
|
|
{
|
2008-12-14 20:15:30 +00:00
|
|
|
struct proc *p;
|
|
|
|
struct thread *td;
|
|
|
|
int i, error = 0;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case MOD_LOAD:
|
2005-05-14 09:25:18 +00:00
|
|
|
/* Initialize everything. */
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_WORKLIST_LOCK_INIT();
|
2003-12-08 01:18:04 +00:00
|
|
|
mtx_init(&ng_typelist_mtx, "netgraph types mutex", NULL,
|
|
|
|
MTX_DEF);
|
|
|
|
mtx_init(&ng_idhash_mtx, "netgraph idhash mutex", NULL,
|
|
|
|
MTX_DEF);
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_init(&ng_namehash_mtx, "netgraph namehash mutex", NULL,
|
|
|
|
MTX_DEF);
|
2005-11-02 15:23:47 +00:00
|
|
|
mtx_init(&ng_topo_mtx, "netgraph topology mutex", NULL,
|
|
|
|
MTX_DEF);
|
2005-05-14 09:25:18 +00:00
|
|
|
#ifdef NETGRAPH_DEBUG
|
2008-03-04 18:22:18 +00:00
|
|
|
mtx_init(&ng_nodelist_mtx, "netgraph nodelist mutex", NULL,
|
|
|
|
MTX_DEF);
|
2005-05-14 09:25:18 +00:00
|
|
|
mtx_init(&ngq_mtx, "netgraph item list mutex", NULL,
|
2003-12-08 01:18:04 +00:00
|
|
|
MTX_DEF);
|
2005-05-14 09:25:18 +00:00
|
|
|
#endif
|
|
|
|
ng_qzone = uma_zcreate("NetGraph items", sizeof(struct ng_item),
|
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
|
|
|
|
uma_zone_set_max(ng_qzone, maxalloc);
|
2008-04-16 19:52:29 +00:00
|
|
|
ng_qdzone = uma_zcreate("NetGraph data items", sizeof(struct ng_item),
|
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_CACHE, 0);
|
|
|
|
uma_zone_set_max(ng_qdzone, maxdata);
|
2008-12-14 20:15:30 +00:00
|
|
|
/* Autoconfigure number of threads. */
|
|
|
|
if (numthreads <= 0)
|
|
|
|
numthreads = mp_ncpus;
|
|
|
|
/* Create threads. */
|
|
|
|
p = NULL; /* start with no process */
|
|
|
|
for (i = 0; i < numthreads; i++) {
|
|
|
|
if (kproc_kthread_add(ngthread, NULL, &p, &td,
|
|
|
|
RFHIGHPID, 0, "ng_queue", "ng_queue%d", i)) {
|
|
|
|
numthreads = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
2007-03-18 16:49:50 +00:00
|
|
|
/* You can't unload it because an interface may be using it. */
|
1999-10-21 09:06:11 +00:00
|
|
|
error = EBUSY;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
error = EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static moduledata_t netgraph_mod = {
|
|
|
|
"netgraph",
|
|
|
|
ngb_mod_event,
|
|
|
|
(NULL)
|
|
|
|
};
|
2005-01-06 17:45:03 +00:00
|
|
|
DECLARE_MODULE(netgraph, netgraph_mod, SI_SUB_NETGRAPH, SI_ORDER_MIDDLE);
|
2001-02-23 16:34:22 +00:00
|
|
|
SYSCTL_NODE(_net, OID_AUTO, graph, CTLFLAG_RW, 0, "netgraph Family");
|
|
|
|
SYSCTL_INT(_net_graph, OID_AUTO, abi_version, CTLFLAG_RD, 0, NG_ABI_VERSION,"");
|
|
|
|
SYSCTL_INT(_net_graph, OID_AUTO, msg_version, CTLFLAG_RD, 0, NG_VERSION, "");
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
#ifdef NETGRAPH_DEBUG
|
|
|
|
void
|
|
|
|
dumphook (hook_p hook, char *file, int line)
|
|
|
|
{
|
|
|
|
printf("hook: name %s, %d refs, Last touched:\n",
|
|
|
|
_NG_HOOK_NAME(hook), hook->hk_refs);
|
|
|
|
printf(" Last active @ %s, line %d\n",
|
|
|
|
hook->lastfile, hook->lastline);
|
|
|
|
if (line) {
|
|
|
|
printf(" problem discovered at file %s, line %d\n", file, line);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
dumpnode(node_p node, char *file, int line)
|
|
|
|
{
|
|
|
|
printf("node: ID [%x]: type '%s', %d hooks, flags 0x%x, %d refs, %s:\n",
|
2001-01-14 23:25:54 +00:00
|
|
|
_NG_NODE_ID(node), node->nd_type->name,
|
2001-01-08 05:34:06 +00:00
|
|
|
node->nd_numhooks, node->nd_flags,
|
|
|
|
node->nd_refs, node->nd_name);
|
|
|
|
printf(" Last active @ %s, line %d\n",
|
|
|
|
node->lastfile, node->lastline);
|
|
|
|
if (line) {
|
|
|
|
printf(" problem discovered at file %s, line %d\n", file, line);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
void
|
|
|
|
dumpitem(item_p item, char *file, int line)
|
|
|
|
{
|
2005-05-14 09:25:18 +00:00
|
|
|
printf(" ACTIVE item, last used at %s, line %d",
|
|
|
|
item->lastfile, item->lastline);
|
|
|
|
switch(item->el_flags & NGQF_TYPE) {
|
|
|
|
case NGQF_DATA:
|
|
|
|
printf(" - [data]\n");
|
|
|
|
break;
|
|
|
|
case NGQF_MESG:
|
|
|
|
printf(" - retaddr[%d]:\n", _NGI_RETADDR(item));
|
|
|
|
break;
|
|
|
|
case NGQF_FN:
|
2007-10-19 20:09:58 +00:00
|
|
|
printf(" - fn@%p (%p, %p, %p, %d (%x))\n",
|
|
|
|
_NGI_FN(item),
|
|
|
|
_NGI_NODE(item),
|
|
|
|
_NGI_HOOK(item),
|
|
|
|
item->body.fn.fn_arg1,
|
|
|
|
item->body.fn.fn_arg2,
|
|
|
|
item->body.fn.fn_arg2);
|
|
|
|
break;
|
2007-10-19 15:04:17 +00:00
|
|
|
case NGQF_FN2:
|
2007-10-28 18:05:59 +00:00
|
|
|
printf(" - fn2@%p (%p, %p, %p, %d (%x))\n",
|
2007-10-19 20:09:58 +00:00
|
|
|
_NGI_FN2(item),
|
2005-09-02 19:52:54 +00:00
|
|
|
_NGI_NODE(item),
|
|
|
|
_NGI_HOOK(item),
|
2005-05-14 09:25:18 +00:00
|
|
|
item->body.fn.fn_arg1,
|
|
|
|
item->body.fn.fn_arg2,
|
|
|
|
item->body.fn.fn_arg2);
|
|
|
|
break;
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
2001-01-08 05:34:06 +00:00
|
|
|
if (line) {
|
|
|
|
printf(" problem discovered at file %s, line %d\n", file, line);
|
2005-09-02 19:52:54 +00:00
|
|
|
if (_NGI_NODE(item)) {
|
2001-01-08 05:34:06 +00:00
|
|
|
printf("node %p ([%x])\n",
|
2005-09-02 19:52:54 +00:00
|
|
|
_NGI_NODE(item), ng_node2ID(_NGI_NODE(item)));
|
2001-01-08 05:34:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ng_dumpitems(void)
|
|
|
|
{
|
|
|
|
item_p item;
|
|
|
|
int i = 1;
|
|
|
|
TAILQ_FOREACH(item, &ng_itemlist, all) {
|
|
|
|
printf("[%d] ", i++);
|
|
|
|
dumpitem(item, NULL, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ng_dumpnodes(void)
|
|
|
|
{
|
|
|
|
node_p node;
|
|
|
|
int i = 1;
|
2004-06-24 01:47:31 +00:00
|
|
|
mtx_lock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
SLIST_FOREACH(node, &ng_allnodes, nd_all) {
|
|
|
|
printf("[%d] ", i++);
|
|
|
|
dumpnode(node, NULL, 0);
|
|
|
|
}
|
2004-06-24 01:47:31 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ng_dumphooks(void)
|
|
|
|
{
|
|
|
|
hook_p hook;
|
|
|
|
int i = 1;
|
2004-06-24 01:47:31 +00:00
|
|
|
mtx_lock(&ng_nodelist_mtx);
|
2001-01-08 05:34:06 +00:00
|
|
|
SLIST_FOREACH(hook, &ng_allhooks, hk_all) {
|
|
|
|
printf("[%d] ", i++);
|
|
|
|
dumphook(hook, NULL, 0);
|
|
|
|
}
|
2004-06-24 01:47:31 +00:00
|
|
|
mtx_unlock(&ng_nodelist_mtx);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_debug_ng_dump_items(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int val;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
val = allocated;
|
|
|
|
i = 1;
|
2007-06-04 18:25:08 +00:00
|
|
|
error = sysctl_handle_int(oidp, &val, 0, req);
|
2001-01-14 23:25:54 +00:00
|
|
|
if (error != 0 || req->newptr == NULL)
|
|
|
|
return (error);
|
|
|
|
if (val == 42) {
|
2001-01-08 05:34:06 +00:00
|
|
|
ng_dumpitems();
|
|
|
|
ng_dumpnodes();
|
|
|
|
ng_dumphooks();
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
2001-01-14 23:25:54 +00:00
|
|
|
return (0);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
2001-01-14 23:25:54 +00:00
|
|
|
SYSCTL_PROC(_debug, OID_AUTO, ng_dump_items, CTLTYPE_INT | CTLFLAG_RW,
|
|
|
|
0, sizeof(int), sysctl_debug_ng_dump_items, "I", "Number of allocated items");
|
2001-01-08 05:34:06 +00:00
|
|
|
#endif /* NETGRAPH_DEBUG */
|
2001-01-06 00:46:47 +00:00
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* Worklist routines
|
|
|
|
**********************************************************************/
|
1999-10-21 09:06:11 +00:00
|
|
|
/*
|
2001-01-06 00:46:47 +00:00
|
|
|
* Pick a node off the list of nodes with work,
|
2008-12-14 20:15:30 +00:00
|
|
|
* try get an item to process off it. Remove the node from the list.
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
2001-01-06 00:46:47 +00:00
|
|
|
static void
|
2008-12-14 20:15:30 +00:00
|
|
|
ngthread(void *arg)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
2001-01-06 00:46:47 +00:00
|
|
|
for (;;) {
|
2008-04-06 15:26:32 +00:00
|
|
|
node_p node;
|
|
|
|
|
|
|
|
/* Get node from the worklist. */
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_WORKLIST_LOCK();
|
2008-12-14 20:15:30 +00:00
|
|
|
while ((node = STAILQ_FIRST(&ng_worklist)) == NULL)
|
|
|
|
NG_WORKLIST_SLEEP();
|
2008-04-15 21:15:32 +00:00
|
|
|
STAILQ_REMOVE_HEAD(&ng_worklist, nd_input_queue.q_work);
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_WORKLIST_UNLOCK();
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
CURVNET_SET(node->nd_vnet);
|
2006-01-12 22:41:32 +00:00
|
|
|
CTR3(KTR_NET, "%20s: node [%x] (%p) taken off worklist",
|
|
|
|
__func__, node->nd_ID, node);
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* We have the node. We also take over the reference
|
|
|
|
* that the list had on it.
|
|
|
|
* Now process as much as you can, until it won't
|
|
|
|
* let you have another item off the queue.
|
|
|
|
* All this time, keep the reference
|
|
|
|
* that lets us be sure that the node still exists.
|
|
|
|
* Let the reference go at the last minute.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
2008-04-06 15:26:32 +00:00
|
|
|
item_p item;
|
2005-10-11 13:48:38 +00:00
|
|
|
int rw;
|
|
|
|
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_QUEUE_LOCK(&node->nd_input_queue);
|
2008-04-15 21:15:32 +00:00
|
|
|
item = ng_dequeue(node, &rw);
|
2001-01-06 00:46:47 +00:00
|
|
|
if (item == NULL) {
|
2008-04-15 21:15:32 +00:00
|
|
|
node->nd_input_queue.q_flags2 &= ~NGQ2_WORKQ;
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_QUEUE_UNLOCK(&node->nd_input_queue);
|
2001-01-06 00:46:47 +00:00
|
|
|
break; /* go look for another node */
|
|
|
|
} else {
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_QUEUE_UNLOCK(&node->nd_input_queue);
|
2001-03-10 16:31:00 +00:00
|
|
|
NGI_GET_NODE(item, node); /* zaps stored node */
|
2005-10-11 13:48:38 +00:00
|
|
|
ng_apply_item(node, item, rw);
|
2001-03-10 16:31:00 +00:00
|
|
|
NG_NODE_UNREF(node);
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
2000-12-12 18:52:14 +00:00
|
|
|
}
|
2001-02-28 18:49:09 +00:00
|
|
|
NG_NODE_UNREF(node);
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
CURVNET_RESTORE();
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
2000-12-12 18:52:14 +00:00
|
|
|
|
2001-02-24 14:29:47 +00:00
|
|
|
/*
|
|
|
|
* XXX
|
|
|
|
* It's posible that a debugging NG_NODE_REF may need
|
|
|
|
* to be outside the mutex zone
|
|
|
|
*/
|
2001-01-06 00:46:47 +00:00
|
|
|
static void
|
2008-04-06 15:26:32 +00:00
|
|
|
ng_worklist_add(node_p node)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
Problem description:
At the end of ng_snd_item(), node queue is processed. In certain
netgraph setups deep recursive calls can occur.
For example this happens, when two nodes are connected and can send
items to each other in both directions. If, for some reason, both nodes
have a lot of items in their queues, then the processing thread will
recurse between these two nodes, delivering items left and right, going
deeper in the stack. Other setups can suffer from deep recursion, too.
The following factors can influence risk of deep netgraph call:
- periodical write-access events on node
- combination of slow link and fast one in one graph
- net.inet.ip.fastforwarding
Changes made:
- In ng_acquire_{read,write}() do not dequeue another item. Instead,
call ng_setisr() for this node.
- At the end of ng_snd_item(), do not process queue. Call ng_setisr(),
if there are any dequeueable items on node queue.
- In ng_setisr() narrow worklist mutex holding.
- In ng_setisr() assert queue mutex.
Theoretically, the first two changes should negatively affect performance.
To check this, some profiling was made:
1) In general real tasks, no noticable performance difference was found.
2) The following test was made: two multithreaded nodes and one
single-threaded were connected into a ring. A large queues of packets
were sent around this ring. Time to pass the ring N times was measured.
This is a very vacuous test: no items/mbufs are allocated, no upcalls or
downcalls outside of netgraph. It doesn't represent a real load, it is
a stress test for ng_acquire_{read,write}() and item queueing functions.
Surprisingly, the performance impact was positive! New code is 13% faster
on UP and 17% faster on SMP, in this particular test.
The problem was originally found, described, analyzed and original patch
was written by Roselyn Lee from Vernier Networks. Thanks!
Submitted by: Roselyn Lee <rosel verniernetworks com>
2005-07-21 12:08:37 +00:00
|
|
|
|
2005-07-21 22:15:37 +00:00
|
|
|
mtx_assert(&node->nd_input_queue.q_mtx, MA_OWNED);
|
Problem description:
At the end of ng_snd_item(), node queue is processed. In certain
netgraph setups deep recursive calls can occur.
For example this happens, when two nodes are connected and can send
items to each other in both directions. If, for some reason, both nodes
have a lot of items in their queues, then the processing thread will
recurse between these two nodes, delivering items left and right, going
deeper in the stack. Other setups can suffer from deep recursion, too.
The following factors can influence risk of deep netgraph call:
- periodical write-access events on node
- combination of slow link and fast one in one graph
- net.inet.ip.fastforwarding
Changes made:
- In ng_acquire_{read,write}() do not dequeue another item. Instead,
call ng_setisr() for this node.
- At the end of ng_snd_item(), do not process queue. Call ng_setisr(),
if there are any dequeueable items on node queue.
- In ng_setisr() narrow worklist mutex holding.
- In ng_setisr() assert queue mutex.
Theoretically, the first two changes should negatively affect performance.
To check this, some profiling was made:
1) In general real tasks, no noticable performance difference was found.
2) The following test was made: two multithreaded nodes and one
single-threaded were connected into a ring. A large queues of packets
were sent around this ring. Time to pass the ring N times was measured.
This is a very vacuous test: no items/mbufs are allocated, no upcalls or
downcalls outside of netgraph. It doesn't represent a real load, it is
a stress test for ng_acquire_{read,write}() and item queueing functions.
Surprisingly, the performance impact was positive! New code is 13% faster
on UP and 17% faster on SMP, in this particular test.
The problem was originally found, described, analyzed and original patch
was written by Roselyn Lee from Vernier Networks. Thanks!
Submitted by: Roselyn Lee <rosel verniernetworks com>
2005-07-21 12:08:37 +00:00
|
|
|
|
2008-04-15 21:15:32 +00:00
|
|
|
if ((node->nd_input_queue.q_flags2 & NGQ2_WORKQ) == 0) {
|
2000-12-12 18:52:14 +00:00
|
|
|
/*
|
2001-01-06 00:46:47 +00:00
|
|
|
* If we are not already on the work queue,
|
|
|
|
* then put us on.
|
2000-12-12 18:52:14 +00:00
|
|
|
*/
|
2008-04-15 21:15:32 +00:00
|
|
|
node->nd_input_queue.q_flags2 |= NGQ2_WORKQ;
|
2008-04-06 15:26:32 +00:00
|
|
|
NG_NODE_REF(node); /* XXX fafe in mutex? */
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_WORKLIST_LOCK();
|
2008-04-15 21:15:32 +00:00
|
|
|
STAILQ_INSERT_TAIL(&ng_worklist, node, nd_input_queue.q_work);
|
2007-03-30 14:34:34 +00:00
|
|
|
NG_WORKLIST_UNLOCK();
|
2006-01-12 22:41:32 +00:00
|
|
|
CTR3(KTR_NET, "%20s: node [%x] (%p) put on worklist", __func__,
|
|
|
|
node->nd_ID, node);
|
2008-12-14 20:15:30 +00:00
|
|
|
NG_WORKLIST_WAKEUP();
|
2008-04-06 15:26:32 +00:00
|
|
|
} else {
|
2006-01-12 22:41:32 +00:00
|
|
|
CTR3(KTR_NET, "%20s: node [%x] (%p) already on worklist",
|
|
|
|
__func__, node->nd_ID, node);
|
2008-04-06 15:26:32 +00:00
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/***********************************************************************
|
|
|
|
* Externally useable functions to set up a queue item ready for sending
|
|
|
|
***********************************************************************/
|
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
#ifdef NETGRAPH_DEBUG
|
|
|
|
#define ITEM_DEBUG_CHECKS \
|
2001-01-06 00:46:47 +00:00
|
|
|
do { \
|
2001-01-30 20:51:52 +00:00
|
|
|
if (NGI_NODE(item) ) { \
|
2001-01-06 00:46:47 +00:00
|
|
|
printf("item already has node"); \
|
2007-12-25 17:52:02 +00:00
|
|
|
kdb_enter(KDB_WHY_NETGRAPH, "has node"); \
|
2001-01-30 20:51:52 +00:00
|
|
|
NGI_CLR_NODE(item); \
|
2001-01-06 00:46:47 +00:00
|
|
|
} \
|
2001-01-30 20:51:52 +00:00
|
|
|
if (NGI_HOOK(item) ) { \
|
2001-01-06 00:46:47 +00:00
|
|
|
printf("item already has hook"); \
|
2007-12-25 17:52:02 +00:00
|
|
|
kdb_enter(KDB_WHY_NETGRAPH, "has hook"); \
|
2001-01-30 20:51:52 +00:00
|
|
|
NGI_CLR_HOOK(item); \
|
2001-01-06 00:46:47 +00:00
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
#else
|
2001-01-08 05:34:06 +00:00
|
|
|
#define ITEM_DEBUG_CHECKS
|
2001-01-06 00:46:47 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2004-06-30 22:51:29 +00:00
|
|
|
* Put mbuf into the item.
|
2001-01-06 00:46:47 +00:00
|
|
|
* Hook and node references will be removed when the item is dequeued.
|
|
|
|
* (or equivalent)
|
|
|
|
* (XXX) Unsafe because no reference held by peer on remote node.
|
|
|
|
* remote node might go away in this timescale.
|
|
|
|
* We know the hooks can't go away because that would require getting
|
|
|
|
* a writer item on both nodes and we must have at least a reader
|
2005-11-02 14:27:24 +00:00
|
|
|
* here to be able to do this.
|
2001-01-06 00:46:47 +00:00
|
|
|
* Note that the hook loaded is the REMOTE hook.
|
|
|
|
*
|
|
|
|
* This is possibly in the critical path for new data.
|
|
|
|
*/
|
|
|
|
item_p
|
2005-05-16 17:07:03 +00:00
|
|
|
ng_package_data(struct mbuf *m, int flags)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
|
|
|
item_p item;
|
|
|
|
|
2008-04-16 19:52:29 +00:00
|
|
|
if ((item = ng_alloc_item(NGQF_DATA, flags)) == NULL) {
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_M(m);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2001-01-08 05:34:06 +00:00
|
|
|
ITEM_DEBUG_CHECKS;
|
2008-04-16 19:52:29 +00:00
|
|
|
item->el_flags |= NGQF_READER;
|
2001-01-06 00:46:47 +00:00
|
|
|
NGI_M(item) = m;
|
|
|
|
return (item);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a queue item and put items into it..
|
|
|
|
* Evaluate the address as this will be needed to queue it and
|
|
|
|
* to work out what some of the fields should be.
|
|
|
|
* Hook and node references will be removed when the item is dequeued.
|
|
|
|
* (or equivalent)
|
|
|
|
*/
|
|
|
|
item_p
|
2005-05-16 17:07:03 +00:00
|
|
|
ng_package_msg(struct ng_mesg *msg, int flags)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
|
|
|
item_p item;
|
|
|
|
|
2008-04-16 19:52:29 +00:00
|
|
|
if ((item = ng_alloc_item(NGQF_MESG, flags)) == NULL) {
|
2001-01-30 20:51:52 +00:00
|
|
|
NG_FREE_MSG(msg);
|
2001-01-06 00:46:47 +00:00
|
|
|
return (NULL);
|
2000-12-12 18:52:14 +00:00
|
|
|
}
|
2001-01-08 05:34:06 +00:00
|
|
|
ITEM_DEBUG_CHECKS;
|
2005-08-26 15:14:33 +00:00
|
|
|
/* Messages items count as writers unless explicitly exempted. */
|
|
|
|
if (msg->header.cmd & NGM_READONLY)
|
2008-04-16 19:52:29 +00:00
|
|
|
item->el_flags |= NGQF_READER;
|
2005-08-26 15:14:33 +00:00
|
|
|
else
|
2008-04-16 19:52:29 +00:00
|
|
|
item->el_flags |= NGQF_WRITER;
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* Set the current lasthook into the queue item
|
|
|
|
*/
|
|
|
|
NGI_MSG(item) = msg;
|
2002-08-22 00:30:03 +00:00
|
|
|
NGI_RETADDR(item) = 0;
|
2001-01-06 00:46:47 +00:00
|
|
|
return (item);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2001-01-30 20:51:52 +00:00
|
|
|
#define SET_RETADDR(item, here, retaddr) \
|
2001-01-14 23:25:54 +00:00
|
|
|
do { /* Data or fn items don't have retaddrs */ \
|
|
|
|
if ((item->el_flags & NGQF_TYPE) == NGQF_MESG) { \
|
2001-01-06 00:46:47 +00:00
|
|
|
if (retaddr) { \
|
|
|
|
NGI_RETADDR(item) = retaddr; \
|
|
|
|
} else { \
|
|
|
|
/* \
|
|
|
|
* The old return address should be ok. \
|
|
|
|
* If there isn't one, use the address \
|
|
|
|
* here. \
|
|
|
|
*/ \
|
|
|
|
if (NGI_RETADDR(item) == 0) { \
|
|
|
|
NGI_RETADDR(item) \
|
|
|
|
= ng_node2ID(here); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
} while (0)
|
2000-12-12 18:52:14 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
int
|
|
|
|
ng_address_hook(node_p here, item_p item, hook_p hook, ng_ID_t retaddr)
|
|
|
|
{
|
2001-01-30 20:51:52 +00:00
|
|
|
hook_p peer;
|
|
|
|
node_p peernode;
|
2001-01-08 05:34:06 +00:00
|
|
|
ITEM_DEBUG_CHECKS;
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* Quick sanity check..
|
2001-01-08 05:34:06 +00:00
|
|
|
* Since a hook holds a reference on it's node, once we know
|
|
|
|
* that the peer is still connected (even if invalid,) we know
|
|
|
|
* that the peer node is present, though maybe invalid.
|
2001-01-06 00:46:47 +00:00
|
|
|
*/
|
2008-04-19 05:30:49 +00:00
|
|
|
if ((hook == NULL) ||
|
|
|
|
NG_HOOK_NOT_VALID(hook) ||
|
|
|
|
NG_HOOK_NOT_VALID(peer = NG_HOOK_PEER(hook)) ||
|
|
|
|
NG_NODE_NOT_VALID(peernode = NG_PEER_NODE(hook))) {
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_FREE_ITEM(item);
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-02-26 09:31:54 +00:00
|
|
|
return (ENETDOWN);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* Transfer our interest to the other (peer) end.
|
|
|
|
*/
|
2001-01-30 20:51:52 +00:00
|
|
|
NG_HOOK_REF(peer);
|
|
|
|
NG_NODE_REF(peernode);
|
2008-04-19 05:30:49 +00:00
|
|
|
NGI_SET_HOOK(item, peer);
|
2001-01-30 20:51:52 +00:00
|
|
|
NGI_SET_NODE(item, peernode);
|
2001-07-14 05:51:33 +00:00
|
|
|
SET_RETADDR(item, here, retaddr);
|
2001-01-06 00:46:47 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2005-11-15 10:54:20 +00:00
|
|
|
ng_address_path(node_p here, item_p item, char *address, ng_ID_t retaddr)
|
2001-01-06 00:46:47 +00:00
|
|
|
{
|
2005-11-15 10:54:20 +00:00
|
|
|
node_p dest = NULL;
|
2001-01-06 00:46:47 +00:00
|
|
|
hook_p hook = NULL;
|
2005-11-15 10:54:20 +00:00
|
|
|
int error;
|
2001-01-06 00:46:47 +00:00
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
ITEM_DEBUG_CHECKS;
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* Note that ng_path2noderef increments the reference count
|
|
|
|
* on the node for us if it finds one. So we don't have to.
|
|
|
|
*/
|
|
|
|
error = ng_path2noderef(here, address, &dest, &hook);
|
|
|
|
if (error) {
|
|
|
|
NG_FREE_ITEM(item);
|
2001-01-08 05:34:06 +00:00
|
|
|
return (error);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2001-01-30 20:51:52 +00:00
|
|
|
NGI_SET_NODE(item, dest);
|
|
|
|
if ( hook) {
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_HOOK_REF(hook); /* don't let it go while on the queue */
|
2001-01-30 20:51:52 +00:00
|
|
|
NGI_SET_HOOK(item, hook);
|
|
|
|
}
|
|
|
|
SET_RETADDR(item, here, retaddr);
|
2001-01-06 00:46:47 +00:00
|
|
|
return (0);
|
|
|
|
}
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
int
|
|
|
|
ng_address_ID(node_p here, item_p item, ng_ID_t ID, ng_ID_t retaddr)
|
|
|
|
{
|
|
|
|
node_p dest;
|
|
|
|
|
2001-01-08 05:34:06 +00:00
|
|
|
ITEM_DEBUG_CHECKS;
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* Find the target node.
|
|
|
|
*/
|
|
|
|
dest = ng_ID2noderef(ID); /* GETS REFERENCE! */
|
|
|
|
if (dest == NULL) {
|
|
|
|
NG_FREE_ITEM(item);
|
2001-01-14 23:25:54 +00:00
|
|
|
TRAP_ERROR();
|
2001-01-06 00:46:47 +00:00
|
|
|
return(EINVAL);
|
|
|
|
}
|
|
|
|
/* Fill out the contents */
|
2001-01-30 20:51:52 +00:00
|
|
|
NGI_SET_NODE(item, dest);
|
|
|
|
NGI_CLR_HOOK(item);
|
|
|
|
SET_RETADDR(item, here, retaddr);
|
1999-10-21 09:06:11 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-01-06 00:46:47 +00:00
|
|
|
* special case to send a message to self (e.g. destroy node)
|
|
|
|
* Possibly indicate an arrival hook too.
|
|
|
|
* Useful for removing that hook :-)
|
1999-10-21 09:06:11 +00:00
|
|
|
*/
|
2001-01-06 00:46:47 +00:00
|
|
|
item_p
|
|
|
|
ng_package_msg_self(node_p here, hook_p hook, struct ng_mesg *msg)
|
1999-10-21 09:06:11 +00:00
|
|
|
{
|
2001-01-06 00:46:47 +00:00
|
|
|
item_p item;
|
1999-10-21 09:06:11 +00:00
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* Find the target node.
|
|
|
|
* If there is a HOOK argument, then use that in preference
|
|
|
|
* to the address.
|
|
|
|
*/
|
2008-04-16 19:52:29 +00:00
|
|
|
if ((item = ng_alloc_item(NGQF_MESG, NG_NOFLAGS)) == NULL) {
|
2001-01-30 20:51:52 +00:00
|
|
|
NG_FREE_MSG(msg);
|
2001-01-06 00:46:47 +00:00
|
|
|
return (NULL);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
|
|
|
|
/* Fill out the contents */
|
2008-04-16 19:52:29 +00:00
|
|
|
item->el_flags |= NGQF_WRITER;
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_NODE_REF(here);
|
2001-01-30 20:51:52 +00:00
|
|
|
NGI_SET_NODE(item, here);
|
|
|
|
if (hook) {
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_HOOK_REF(hook);
|
2001-01-30 20:51:52 +00:00
|
|
|
NGI_SET_HOOK(item, hook);
|
|
|
|
}
|
2001-01-06 00:46:47 +00:00
|
|
|
NGI_MSG(item) = msg;
|
|
|
|
NGI_RETADDR(item) = ng_node2ID(here);
|
|
|
|
return (item);
|
1999-10-21 09:06:11 +00:00
|
|
|
}
|
|
|
|
|
2007-10-19 15:04:17 +00:00
|
|
|
/*
|
|
|
|
* Send ng_item_fn function call to the specified node.
|
|
|
|
*/
|
|
|
|
|
2005-05-16 17:07:03 +00:00
|
|
|
int
|
2007-11-14 11:25:58 +00:00
|
|
|
ng_send_fn(node_p node, hook_p hook, ng_item_fn *fn, void * arg1, int arg2)
|
2001-01-14 23:25:54 +00:00
|
|
|
{
|
|
|
|
|
2007-11-14 11:25:58 +00:00
|
|
|
return ng_send_fn1(node, hook, fn, arg1, arg2, NG_NOFLAGS);
|
2001-01-14 23:25:54 +00:00
|
|
|
}
|
|
|
|
|
2007-10-19 15:04:17 +00:00
|
|
|
int
|
2007-11-14 11:25:58 +00:00
|
|
|
ng_send_fn1(node_p node, hook_p hook, ng_item_fn *fn, void * arg1, int arg2,
|
2007-10-19 15:04:17 +00:00
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
item_p item;
|
|
|
|
|
2008-04-16 19:52:29 +00:00
|
|
|
if ((item = ng_alloc_item(NGQF_FN, flags)) == NULL) {
|
2007-10-19 15:04:17 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
}
|
2008-04-16 19:52:29 +00:00
|
|
|
item->el_flags |= NGQF_WRITER;
|
2007-10-19 15:04:17 +00:00
|
|
|
NG_NODE_REF(node); /* and one for the item */
|
|
|
|
NGI_SET_NODE(item, node);
|
|
|
|
if (hook) {
|
|
|
|
NG_HOOK_REF(hook);
|
|
|
|
NGI_SET_HOOK(item, hook);
|
|
|
|
}
|
2007-11-14 11:25:58 +00:00
|
|
|
NGI_FN(item) = fn;
|
2007-10-19 15:04:17 +00:00
|
|
|
NGI_ARG1(item) = arg1;
|
|
|
|
NGI_ARG2(item) = arg2;
|
|
|
|
return(ng_snd_item(item, flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-11-14 11:25:58 +00:00
|
|
|
* Send ng_item_fn2 function call to the specified node.
|
|
|
|
*
|
|
|
|
* If an optional pitem parameter is supplied, its apply
|
|
|
|
* callback will be copied to the new item. If also NG_REUSE_ITEM
|
|
|
|
* flag is set, no new item will be allocated, but pitem will
|
|
|
|
* be used.
|
2007-10-19 15:04:17 +00:00
|
|
|
*/
|
|
|
|
int
|
2007-11-14 11:25:58 +00:00
|
|
|
ng_send_fn2(node_p node, hook_p hook, item_p pitem, ng_item_fn2 *fn, void *arg1,
|
|
|
|
int arg2, int flags)
|
2007-10-19 15:04:17 +00:00
|
|
|
{
|
|
|
|
item_p item;
|
|
|
|
|
2007-11-14 11:25:58 +00:00
|
|
|
KASSERT((pitem != NULL || (flags & NG_REUSE_ITEM) == 0),
|
|
|
|
("%s: NG_REUSE_ITEM but no pitem", __func__));
|
2007-10-19 15:04:17 +00:00
|
|
|
|
2007-11-14 11:25:58 +00:00
|
|
|
/*
|
|
|
|
* Allocate a new item if no supplied or
|
|
|
|
* if we can't use supplied one.
|
|
|
|
*/
|
|
|
|
if (pitem == NULL || (flags & NG_REUSE_ITEM) == 0) {
|
2008-04-16 19:52:29 +00:00
|
|
|
if ((item = ng_alloc_item(NGQF_FN2, flags)) == NULL)
|
2007-11-14 11:25:58 +00:00
|
|
|
return (ENOMEM);
|
2008-04-16 19:52:29 +00:00
|
|
|
if (pitem != NULL)
|
|
|
|
item->apply = pitem->apply;
|
2008-03-05 22:12:34 +00:00
|
|
|
} else {
|
2008-04-16 19:52:29 +00:00
|
|
|
if ((item = ng_realloc_item(pitem, NGQF_FN2, flags)) == NULL)
|
|
|
|
return (ENOMEM);
|
2008-03-05 22:12:34 +00:00
|
|
|
}
|
2007-10-19 15:04:17 +00:00
|
|
|
|
2008-04-16 19:52:29 +00:00
|
|
|
item->el_flags = (item->el_flags & ~NGQF_RW) | NGQF_WRITER;
|
2007-10-19 15:04:17 +00:00
|
|
|
NG_NODE_REF(node); /* and one for the item */
|
|
|
|
NGI_SET_NODE(item, node);
|
|
|
|
if (hook) {
|
|
|
|
NG_HOOK_REF(hook);
|
|
|
|
NGI_SET_HOOK(item, hook);
|
|
|
|
}
|
|
|
|
NGI_FN2(item) = fn;
|
|
|
|
NGI_ARG1(item) = arg1;
|
|
|
|
NGI_ARG2(item) = arg2;
|
|
|
|
return(ng_snd_item(item, flags));
|
|
|
|
}
|
|
|
|
|
2005-11-15 10:54:20 +00:00
|
|
|
/*
|
2002-03-05 20:26:20 +00:00
|
|
|
* Official timeout routines for Netgraph nodes.
|
|
|
|
*/
|
|
|
|
static void
|
2005-01-26 09:01:50 +00:00
|
|
|
ng_callout_trampoline(void *arg)
|
2002-03-05 20:26:20 +00:00
|
|
|
{
|
|
|
|
item_p item = arg;
|
|
|
|
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
CURVNET_SET(NGI_NODE(item)->nd_vnet);
|
2002-03-05 20:26:20 +00:00
|
|
|
ng_snd_item(item, 0);
|
Introduce an infrastructure for dismantling vnet instances.
Vnet modules and protocol domains may now register destructor
functions to clean up and release per-module state. The destructor
mechanisms can be triggered by invoking "vimage -d", or a future
equivalent command which will be provided via the new jail framework.
While this patch introduces numerous placeholder destructor functions,
many of those are currently incomplete, thus leaking memory or (even
worse) failing to stop all running timers. Many of such issues are
already known and will be incrementaly fixed over the next weeks in
smaller incremental commits.
Apart from introducing new fields in structs ifnet, domain, protosw
and vnet_net, which requires the kernel and modules to be rebuilt, this
change should have no impact on nooptions VIMAGE builds, since vnet
destructors can only be called in VIMAGE kernels. Moreover,
destructor functions should be in general compiled in only in
options VIMAGE builds, except for kernel modules which can be safely
kldunloaded at run time.
Bump __FreeBSD_version to 800097.
Reviewed by: bz, julian
Approved by: rwatson, kib (re), julian (mentor)
2009-06-08 17:15:40 +00:00
|
|
|
CURVNET_RESTORE();
|
2002-03-05 20:26:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-11-02 21:24:30 +00:00
|
|
|
int
|
2004-12-01 11:56:32 +00:00
|
|
|
ng_callout(struct callout *c, node_p node, hook_p hook, int ticks,
|
2002-03-05 20:26:20 +00:00
|
|
|
ng_item_fn *fn, void * arg1, int arg2)
|
|
|
|
{
|
2005-09-08 14:32:19 +00:00
|
|
|
item_p item, oitem;
|
2002-03-05 20:26:20 +00:00
|
|
|
|
2008-04-16 19:52:29 +00:00
|
|
|
if ((item = ng_alloc_item(NGQF_FN, NG_NOFLAGS)) == NULL)
|
2004-11-02 21:24:30 +00:00
|
|
|
return (ENOMEM);
|
|
|
|
|
2008-04-16 19:52:29 +00:00
|
|
|
item->el_flags |= NGQF_WRITER;
|
2002-03-05 20:26:20 +00:00
|
|
|
NG_NODE_REF(node); /* and one for the item */
|
|
|
|
NGI_SET_NODE(item, node);
|
|
|
|
if (hook) {
|
|
|
|
NG_HOOK_REF(hook);
|
|
|
|
NGI_SET_HOOK(item, hook);
|
|
|
|
}
|
|
|
|
NGI_FN(item) = fn;
|
|
|
|
NGI_ARG1(item) = arg1;
|
|
|
|
NGI_ARG2(item) = arg2;
|
2005-09-08 14:32:19 +00:00
|
|
|
oitem = c->c_arg;
|
|
|
|
if (callout_reset(c, ticks, &ng_callout_trampoline, item) == 1 &&
|
|
|
|
oitem != NULL)
|
|
|
|
NG_FREE_ITEM(oitem);
|
2004-11-02 21:24:30 +00:00
|
|
|
return (0);
|
2002-03-05 20:26:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* A special modified version of untimeout() */
|
2005-11-15 10:54:20 +00:00
|
|
|
int
|
2004-12-01 11:56:32 +00:00
|
|
|
ng_uncallout(struct callout *c, node_p node)
|
2002-03-05 20:26:20 +00:00
|
|
|
{
|
|
|
|
item_p item;
|
2004-11-02 21:24:30 +00:00
|
|
|
int rval;
|
2005-08-21 19:48:51 +00:00
|
|
|
|
|
|
|
KASSERT(c != NULL, ("ng_uncallout: NULL callout"));
|
|
|
|
KASSERT(node != NULL, ("ng_uncallout: NULL node"));
|
|
|
|
|
2004-11-04 21:30:18 +00:00
|
|
|
rval = callout_stop(c);
|
2004-11-02 21:24:30 +00:00
|
|
|
item = c->c_arg;
|
|
|
|
/* Do an extra check */
|
2005-01-26 09:01:50 +00:00
|
|
|
if ((rval > 0) && (c->c_func == &ng_callout_trampoline) &&
|
2004-11-02 21:24:30 +00:00
|
|
|
(NGI_NODE(item) == node)) {
|
2002-03-05 20:26:20 +00:00
|
|
|
/*
|
|
|
|
* We successfully removed it from the queue before it ran
|
2005-11-15 10:54:20 +00:00
|
|
|
* So now we need to unreference everything that was
|
2002-03-05 20:26:20 +00:00
|
|
|
* given extra references. (NG_FREE_ITEM does this).
|
|
|
|
*/
|
|
|
|
NG_FREE_ITEM(item);
|
|
|
|
}
|
2005-09-08 14:32:19 +00:00
|
|
|
c->c_arg = NULL;
|
2004-11-02 21:24:30 +00:00
|
|
|
|
|
|
|
return (rval);
|
2002-03-05 20:26:20 +00:00
|
|
|
}
|
|
|
|
|
2001-01-06 00:46:47 +00:00
|
|
|
/*
|
|
|
|
* Set the address, if none given, give the node here.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
ng_replace_retaddr(node_p here, item_p item, ng_ID_t retaddr)
|
|
|
|
{
|
|
|
|
if (retaddr) {
|
|
|
|
NGI_RETADDR(item) = retaddr;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The old return address should be ok.
|
|
|
|
* If there isn't one, use the address here.
|
|
|
|
*/
|
|
|
|
NGI_RETADDR(item) = ng_node2ID(here);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define TESTING
|
|
|
|
#ifdef TESTING
|
|
|
|
/* just test all the macros */
|
|
|
|
void
|
|
|
|
ng_macro_test(item_p item);
|
|
|
|
void
|
|
|
|
ng_macro_test(item_p item)
|
|
|
|
{
|
|
|
|
node_p node = NULL;
|
|
|
|
hook_p hook = NULL;
|
|
|
|
struct mbuf *m;
|
|
|
|
struct ng_mesg *msg;
|
|
|
|
ng_ID_t retaddr;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
NGI_GET_M(item, m);
|
|
|
|
NGI_GET_MSG(item, msg);
|
|
|
|
retaddr = NGI_RETADDR(item);
|
2004-06-30 22:51:29 +00:00
|
|
|
NG_SEND_DATA(error, hook, m, NULL);
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_SEND_DATA_ONLY(error, hook, m);
|
|
|
|
NG_FWD_NEW_DATA(error, item, hook, m);
|
2001-01-08 05:34:06 +00:00
|
|
|
NG_FWD_ITEM_HOOK(error, item, hook);
|
2001-01-06 00:46:47 +00:00
|
|
|
NG_SEND_MSG_HOOK(error, node, msg, hook, retaddr);
|
|
|
|
NG_SEND_MSG_ID(error, node, msg, retaddr, retaddr);
|
|
|
|
NG_SEND_MSG_PATH(error, node, msg, ".:", retaddr);
|
|
|
|
NG_FWD_MSG_HOOK(error, node, item, hook, retaddr);
|
|
|
|
}
|
|
|
|
#endif /* TESTING */
|
1999-10-21 09:06:11 +00:00
|
|
|
|