2010-12-21 13:45:29 +00:00
|
|
|
/*-
|
2013-06-15 04:03:40 +00:00
|
|
|
* Copyright (c) 2010,2013 Lawrence Stewart <lstewart@freebsd.org>
|
2010-12-21 13:45:29 +00:00
|
|
|
* Copyright (c) 2010 The FreeBSD Foundation
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This software was developed by Lawrence Stewart while studying at the Centre
|
2011-04-12 08:13:18 +00:00
|
|
|
* for Advanced Internet Architectures, Swinburne University of Technology,
|
|
|
|
* made possible in part by grants from the FreeBSD Foundation and Cisco
|
|
|
|
* University Research Program Fund at Community Foundation Silicon Valley.
|
2010-12-21 13:45:29 +00:00
|
|
|
*
|
|
|
|
* Portions of this software were developed at the Centre for Advanced
|
|
|
|
* Internet Architectures, Swinburne University of Technology, Melbourne,
|
|
|
|
* Australia by Lawrence Stewart under sponsorship from the FreeBSD Foundation.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/hhook.h>
|
|
|
|
#include <sys/khelp.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/module_khelp.h>
|
|
|
|
#include <sys/osd.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <sys/refcount.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
|
|
|
|
#include <net/vnet.h>
|
|
|
|
|
|
|
|
struct hhook {
|
|
|
|
hhook_func_t hhk_func;
|
|
|
|
struct helper *hhk_helper;
|
|
|
|
void *hhk_udata;
|
|
|
|
STAILQ_ENTRY(hhook) hhk_next;
|
|
|
|
};
|
|
|
|
|
2011-04-13 11:28:46 +00:00
|
|
|
static MALLOC_DEFINE(M_HHOOK, "hhook", "Helper hooks are linked off hhook_head lists");
|
2010-12-21 13:45:29 +00:00
|
|
|
|
|
|
|
LIST_HEAD(hhookheadhead, hhook_head);
|
2013-06-14 04:10:34 +00:00
|
|
|
struct hhookheadhead hhook_head_list;
|
|
|
|
VNET_DEFINE(struct hhookheadhead, hhook_vhead_list);
|
|
|
|
#define V_hhook_vhead_list VNET(hhook_vhead_list)
|
2010-12-21 13:45:29 +00:00
|
|
|
|
|
|
|
static struct mtx hhook_head_list_lock;
|
|
|
|
MTX_SYSINIT(hhookheadlistlock, &hhook_head_list_lock, "hhook_head list lock",
|
|
|
|
MTX_DEF);
|
|
|
|
|
2013-06-15 04:03:40 +00:00
|
|
|
/* Protected by hhook_head_list_lock. */
|
|
|
|
static uint32_t n_hhookheads;
|
|
|
|
|
2010-12-21 13:45:29 +00:00
|
|
|
/* Private function prototypes. */
|
|
|
|
static void hhook_head_destroy(struct hhook_head *hhh);
|
2013-06-15 05:57:29 +00:00
|
|
|
void khelp_new_hhook_registered(struct hhook_head *hhh, uint32_t flags);
|
2010-12-21 13:45:29 +00:00
|
|
|
|
|
|
|
#define HHHLIST_LOCK() mtx_lock(&hhook_head_list_lock)
|
|
|
|
#define HHHLIST_UNLOCK() mtx_unlock(&hhook_head_list_lock)
|
|
|
|
#define HHHLIST_LOCK_ASSERT() mtx_assert(&hhook_head_list_lock, MA_OWNED)
|
|
|
|
|
|
|
|
#define HHH_LOCK_INIT(hhh) rm_init(&(hhh)->hhh_lock, "hhook_head rm lock")
|
|
|
|
#define HHH_LOCK_DESTROY(hhh) rm_destroy(&(hhh)->hhh_lock)
|
|
|
|
#define HHH_WLOCK(hhh) rm_wlock(&(hhh)->hhh_lock)
|
|
|
|
#define HHH_WUNLOCK(hhh) rm_wunlock(&(hhh)->hhh_lock)
|
|
|
|
#define HHH_RLOCK(hhh, rmpt) rm_rlock(&(hhh)->hhh_lock, (rmpt))
|
|
|
|
#define HHH_RUNLOCK(hhh, rmpt) rm_runlock(&(hhh)->hhh_lock, (rmpt))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Run all helper hook functions for a given hook point.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
hhook_run_hooks(struct hhook_head *hhh, void *ctx_data, struct osd *hosd)
|
|
|
|
{
|
|
|
|
struct hhook *hhk;
|
|
|
|
void *hdata;
|
|
|
|
struct rm_priotracker rmpt;
|
|
|
|
|
|
|
|
KASSERT(hhh->hhh_refcount > 0, ("hhook_head %p refcount is 0", hhh));
|
|
|
|
|
|
|
|
HHH_RLOCK(hhh, &rmpt);
|
|
|
|
STAILQ_FOREACH(hhk, &hhh->hhh_hooks, hhk_next) {
|
2015-11-25 07:14:58 +00:00
|
|
|
if (hhk->hhk_helper != NULL &&
|
|
|
|
hhk->hhk_helper->h_flags & HELPER_NEEDS_OSD) {
|
2010-12-21 13:45:29 +00:00
|
|
|
hdata = osd_get(OSD_KHELP, hosd, hhk->hhk_helper->h_id);
|
|
|
|
if (hdata == NULL)
|
|
|
|
continue;
|
|
|
|
} else
|
|
|
|
hdata = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXXLAS: We currently ignore the int returned by the hook,
|
|
|
|
* but will likely want to handle it in future to allow hhook to
|
|
|
|
* be used like pfil and effect changes at the hhook calling
|
|
|
|
* site e.g. we could define a new hook type of HHOOK_TYPE_PFIL
|
|
|
|
* and standardise what particular return values mean and set
|
|
|
|
* the context data to pass exactly the same information as pfil
|
|
|
|
* hooks currently receive, thus replicating pfil with hhook.
|
|
|
|
*/
|
|
|
|
hhk->hhk_func(hhh->hhh_type, hhh->hhh_id, hhk->hhk_udata,
|
|
|
|
ctx_data, hdata, hosd);
|
|
|
|
}
|
|
|
|
HHH_RUNLOCK(hhh, &rmpt);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register a new helper hook function with a helper hook point.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
hhook_add_hook(struct hhook_head *hhh, struct hookinfo *hki, uint32_t flags)
|
|
|
|
{
|
|
|
|
struct hhook *hhk, *tmp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
if (hhh == NULL)
|
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
hhk = malloc(sizeof(struct hhook), M_HHOOK,
|
|
|
|
M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT));
|
|
|
|
|
|
|
|
if (hhk == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
|
|
|
hhk->hhk_helper = hki->hook_helper;
|
|
|
|
hhk->hhk_func = hki->hook_func;
|
|
|
|
hhk->hhk_udata = hki->hook_udata;
|
|
|
|
|
|
|
|
HHH_WLOCK(hhh);
|
|
|
|
STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) {
|
|
|
|
if (tmp->hhk_func == hki->hook_func &&
|
|
|
|
tmp->hhk_udata == hki->hook_udata) {
|
|
|
|
/* The helper hook function is already registered. */
|
|
|
|
error = EEXIST;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!error) {
|
|
|
|
STAILQ_INSERT_TAIL(&hhh->hhh_hooks, hhk, hhk_next);
|
|
|
|
hhh->hhh_nhooks++;
|
2011-01-11 00:29:17 +00:00
|
|
|
} else
|
2010-12-21 13:45:29 +00:00
|
|
|
free(hhk, M_HHOOK);
|
|
|
|
|
|
|
|
HHH_WUNLOCK(hhh);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-06-15 04:03:40 +00:00
|
|
|
* Register a helper hook function with a helper hook point (including all
|
|
|
|
* virtual instances of the hook point if it is virtualised).
|
|
|
|
*
|
|
|
|
* The logic is unfortunately far more complex than for
|
|
|
|
* hhook_remove_hook_lookup() because hhook_add_hook() can call malloc() with
|
|
|
|
* M_WAITOK and thus we cannot call hhook_add_hook() with the
|
|
|
|
* hhook_head_list_lock held.
|
|
|
|
*
|
|
|
|
* The logic assembles an array of hhook_head structs that correspond to the
|
|
|
|
* helper hook point being hooked and bumps the refcount on each (all done with
|
|
|
|
* the hhook_head_list_lock held). The hhook_head_list_lock is then dropped, and
|
|
|
|
* hhook_add_hook() is called and the refcount dropped for each hhook_head
|
|
|
|
* struct in the array.
|
2010-12-21 13:45:29 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
hhook_add_hook_lookup(struct hookinfo *hki, uint32_t flags)
|
|
|
|
{
|
2013-06-15 04:03:40 +00:00
|
|
|
struct hhook_head **heads_to_hook, *hhh;
|
|
|
|
int error, i, n_heads_to_hook;
|
|
|
|
|
|
|
|
tryagain:
|
|
|
|
error = i = 0;
|
|
|
|
/*
|
|
|
|
* Accessing n_hhookheads without hhook_head_list_lock held opens up a
|
|
|
|
* race with hhook_head_register() which we are unlikely to lose, but
|
|
|
|
* nonetheless have to cope with - hence the complex goto logic.
|
|
|
|
*/
|
|
|
|
n_heads_to_hook = n_hhookheads;
|
|
|
|
heads_to_hook = malloc(n_heads_to_hook * sizeof(struct hhook_head *),
|
|
|
|
M_HHOOK, flags & HHOOK_WAITOK ? M_WAITOK : M_NOWAIT);
|
|
|
|
if (heads_to_hook == NULL)
|
|
|
|
return (ENOMEM);
|
2010-12-21 13:45:29 +00:00
|
|
|
|
2013-06-15 04:03:40 +00:00
|
|
|
HHHLIST_LOCK();
|
|
|
|
LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
|
|
|
|
if (hhh->hhh_type == hki->hook_type &&
|
|
|
|
hhh->hhh_id == hki->hook_id) {
|
|
|
|
if (i < n_heads_to_hook) {
|
|
|
|
heads_to_hook[i] = hhh;
|
|
|
|
refcount_acquire(&heads_to_hook[i]->hhh_refcount);
|
|
|
|
i++;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We raced with hhook_head_register() which
|
|
|
|
* inserted a hhook_head that we need to hook
|
|
|
|
* but did not malloc space for. Abort this run
|
|
|
|
* and try again.
|
|
|
|
*/
|
|
|
|
for (i--; i >= 0; i--)
|
|
|
|
refcount_release(&heads_to_hook[i]->hhh_refcount);
|
|
|
|
free(heads_to_hook, M_HHOOK);
|
|
|
|
HHHLIST_UNLOCK();
|
|
|
|
goto tryagain;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
HHHLIST_UNLOCK();
|
2010-12-21 13:45:29 +00:00
|
|
|
|
2013-06-15 04:03:40 +00:00
|
|
|
for (i--; i >= 0; i--) {
|
|
|
|
if (!error)
|
|
|
|
error = hhook_add_hook(heads_to_hook[i], hki, flags);
|
|
|
|
refcount_release(&heads_to_hook[i]->hhh_refcount);
|
|
|
|
}
|
2010-12-21 13:45:29 +00:00
|
|
|
|
2013-06-15 04:03:40 +00:00
|
|
|
free(heads_to_hook, M_HHOOK);
|
2010-12-21 13:45:29 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a helper hook function from a helper hook point.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
hhook_remove_hook(struct hhook_head *hhh, struct hookinfo *hki)
|
|
|
|
{
|
|
|
|
struct hhook *tmp;
|
|
|
|
|
|
|
|
if (hhh == NULL)
|
|
|
|
return (ENOENT);
|
|
|
|
|
|
|
|
HHH_WLOCK(hhh);
|
|
|
|
STAILQ_FOREACH(tmp, &hhh->hhh_hooks, hhk_next) {
|
|
|
|
if (tmp->hhk_func == hki->hook_func &&
|
|
|
|
tmp->hhk_udata == hki->hook_udata) {
|
|
|
|
STAILQ_REMOVE(&hhh->hhh_hooks, tmp, hhook, hhk_next);
|
|
|
|
free(tmp, M_HHOOK);
|
|
|
|
hhh->hhh_nhooks--;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
HHH_WUNLOCK(hhh);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-06-15 04:03:40 +00:00
|
|
|
* Remove a helper hook function from a helper hook point (including all
|
|
|
|
* virtual instances of the hook point if it is virtualised).
|
2010-12-21 13:45:29 +00:00
|
|
|
*/
|
|
|
|
int
|
|
|
|
hhook_remove_hook_lookup(struct hookinfo *hki)
|
|
|
|
{
|
|
|
|
struct hhook_head *hhh;
|
|
|
|
|
2013-06-15 04:03:40 +00:00
|
|
|
HHHLIST_LOCK();
|
|
|
|
LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
|
|
|
|
if (hhh->hhh_type == hki->hook_type &&
|
|
|
|
hhh->hhh_id == hki->hook_id)
|
|
|
|
hhook_remove_hook(hhh, hki);
|
|
|
|
}
|
|
|
|
HHHLIST_UNLOCK();
|
2010-12-21 13:45:29 +00:00
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Register a new helper hook point.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
hhook_head_register(int32_t hhook_type, int32_t hhook_id, struct hhook_head **hhh,
|
|
|
|
uint32_t flags)
|
|
|
|
{
|
|
|
|
struct hhook_head *tmphhh;
|
|
|
|
|
|
|
|
tmphhh = hhook_head_get(hhook_type, hhook_id);
|
|
|
|
|
|
|
|
if (tmphhh != NULL) {
|
|
|
|
/* Hook point previously registered. */
|
|
|
|
hhook_head_release(tmphhh);
|
|
|
|
return (EEXIST);
|
|
|
|
}
|
|
|
|
|
|
|
|
tmphhh = malloc(sizeof(struct hhook_head), M_HHOOK,
|
|
|
|
M_ZERO | ((flags & HHOOK_WAITOK) ? M_WAITOK : M_NOWAIT));
|
|
|
|
|
|
|
|
if (tmphhh == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
|
|
|
tmphhh->hhh_type = hhook_type;
|
|
|
|
tmphhh->hhh_id = hhook_id;
|
|
|
|
tmphhh->hhh_nhooks = 0;
|
|
|
|
STAILQ_INIT(&tmphhh->hhh_hooks);
|
|
|
|
HHH_LOCK_INIT(tmphhh);
|
2013-06-15 05:57:29 +00:00
|
|
|
refcount_init(&tmphhh->hhh_refcount, 1);
|
2010-12-21 13:45:29 +00:00
|
|
|
|
2013-06-14 04:10:34 +00:00
|
|
|
HHHLIST_LOCK();
|
2010-12-21 13:45:29 +00:00
|
|
|
if (flags & HHOOK_HEADISINVNET) {
|
|
|
|
tmphhh->hhh_flags |= HHH_ISINVNET;
|
2013-06-14 18:11:21 +00:00
|
|
|
#ifdef VIMAGE
|
2013-06-14 04:10:34 +00:00
|
|
|
KASSERT(curvnet != NULL, ("curvnet is NULL"));
|
|
|
|
tmphhh->hhh_vid = (uintptr_t)curvnet;
|
|
|
|
LIST_INSERT_HEAD(&V_hhook_vhead_list, tmphhh, hhh_vnext);
|
2013-06-14 18:11:21 +00:00
|
|
|
#endif
|
2010-12-21 13:45:29 +00:00
|
|
|
}
|
2013-06-14 04:10:34 +00:00
|
|
|
LIST_INSERT_HEAD(&hhook_head_list, tmphhh, hhh_next);
|
2013-06-15 04:03:40 +00:00
|
|
|
n_hhookheads++;
|
2013-06-14 04:10:34 +00:00
|
|
|
HHHLIST_UNLOCK();
|
2010-12-21 13:45:29 +00:00
|
|
|
|
2013-06-15 05:57:29 +00:00
|
|
|
khelp_new_hhook_registered(tmphhh, flags);
|
|
|
|
|
|
|
|
if (hhh != NULL)
|
|
|
|
*hhh = tmphhh;
|
|
|
|
else
|
|
|
|
refcount_release(&tmphhh->hhh_refcount);
|
|
|
|
|
2010-12-21 13:45:29 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
hhook_head_destroy(struct hhook_head *hhh)
|
|
|
|
{
|
|
|
|
struct hhook *tmp, *tmp2;
|
|
|
|
|
|
|
|
HHHLIST_LOCK_ASSERT();
|
2013-06-15 04:03:40 +00:00
|
|
|
KASSERT(n_hhookheads > 0, ("n_hhookheads should be > 0"));
|
2010-12-21 13:45:29 +00:00
|
|
|
|
|
|
|
LIST_REMOVE(hhh, hhh_next);
|
2013-06-14 18:11:21 +00:00
|
|
|
#ifdef VIMAGE
|
2013-06-14 04:10:34 +00:00
|
|
|
if (hhook_head_is_virtualised(hhh) == HHOOK_HEADISINVNET)
|
|
|
|
LIST_REMOVE(hhh, hhh_vnext);
|
2013-06-14 18:11:21 +00:00
|
|
|
#endif
|
2010-12-21 13:45:29 +00:00
|
|
|
HHH_WLOCK(hhh);
|
|
|
|
STAILQ_FOREACH_SAFE(tmp, &hhh->hhh_hooks, hhk_next, tmp2)
|
|
|
|
free(tmp, M_HHOOK);
|
|
|
|
HHH_WUNLOCK(hhh);
|
|
|
|
HHH_LOCK_DESTROY(hhh);
|
|
|
|
free(hhh, M_HHOOK);
|
2013-06-15 04:03:40 +00:00
|
|
|
n_hhookheads--;
|
2010-12-21 13:45:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a helper hook point.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
hhook_head_deregister(struct hhook_head *hhh)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
HHHLIST_LOCK();
|
|
|
|
if (hhh == NULL)
|
|
|
|
error = ENOENT;
|
|
|
|
else if (hhh->hhh_refcount > 1)
|
|
|
|
error = EBUSY;
|
|
|
|
else
|
|
|
|
hhook_head_destroy(hhh);
|
|
|
|
HHHLIST_UNLOCK();
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a helper hook point via a hhook_head lookup.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
hhook_head_deregister_lookup(int32_t hhook_type, int32_t hhook_id)
|
|
|
|
{
|
|
|
|
struct hhook_head *hhh;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
hhh = hhook_head_get(hhook_type, hhook_id);
|
|
|
|
error = hhook_head_deregister(hhh);
|
|
|
|
|
|
|
|
if (error == EBUSY)
|
|
|
|
hhook_head_release(hhh);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lookup and return the hhook_head struct associated with the specified type
|
|
|
|
* and id, or NULL if not found. If found, the hhook_head's refcount is bumped.
|
|
|
|
*/
|
|
|
|
struct hhook_head *
|
|
|
|
hhook_head_get(int32_t hhook_type, int32_t hhook_id)
|
|
|
|
{
|
|
|
|
struct hhook_head *hhh;
|
|
|
|
|
|
|
|
HHHLIST_LOCK();
|
2013-06-14 04:10:34 +00:00
|
|
|
LIST_FOREACH(hhh, &hhook_head_list, hhh_next) {
|
2010-12-21 13:45:29 +00:00
|
|
|
if (hhh->hhh_type == hhook_type && hhh->hhh_id == hhook_id) {
|
2013-06-14 18:11:21 +00:00
|
|
|
#ifdef VIMAGE
|
2013-06-14 04:10:34 +00:00
|
|
|
if (hhook_head_is_virtualised(hhh) ==
|
|
|
|
HHOOK_HEADISINVNET) {
|
|
|
|
KASSERT(curvnet != NULL, ("curvnet is NULL"));
|
|
|
|
if (hhh->hhh_vid != (uintptr_t)curvnet)
|
|
|
|
continue;
|
|
|
|
}
|
2013-06-14 18:11:21 +00:00
|
|
|
#endif
|
2010-12-21 13:45:29 +00:00
|
|
|
refcount_acquire(&hhh->hhh_refcount);
|
2011-01-11 00:29:17 +00:00
|
|
|
break;
|
2010-12-21 13:45:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
HHHLIST_UNLOCK();
|
|
|
|
|
2011-01-11 00:29:17 +00:00
|
|
|
return (hhh);
|
2010-12-21 13:45:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
hhook_head_release(struct hhook_head *hhh)
|
|
|
|
{
|
|
|
|
|
|
|
|
refcount_release(&hhh->hhh_refcount);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the hhook_head private flags and return the appropriate public
|
|
|
|
* representation of the flag to the caller. The function is implemented in a
|
|
|
|
* way that allows us to cope with other subsystems becoming virtualised in the
|
|
|
|
* future.
|
|
|
|
*/
|
|
|
|
uint32_t
|
|
|
|
hhook_head_is_virtualised(struct hhook_head *hhh)
|
|
|
|
{
|
|
|
|
uint32_t ret;
|
|
|
|
|
2011-01-11 01:11:07 +00:00
|
|
|
ret = 0;
|
2010-12-21 13:45:29 +00:00
|
|
|
|
2011-01-11 01:11:07 +00:00
|
|
|
if (hhh != NULL) {
|
|
|
|
if (hhh->hhh_flags & HHH_ISINVNET)
|
|
|
|
ret = HHOOK_HEADISINVNET;
|
|
|
|
}
|
2010-12-21 13:45:29 +00:00
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t
|
|
|
|
hhook_head_is_virtualised_lookup(int32_t hook_type, int32_t hook_id)
|
|
|
|
{
|
|
|
|
struct hhook_head *hhh;
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
hhh = hhook_head_get(hook_type, hook_id);
|
|
|
|
|
|
|
|
if (hhh == NULL)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
ret = hhook_head_is_virtualised(hhh);
|
|
|
|
hhook_head_release(hhh);
|
|
|
|
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnet created and being initialised.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
hhook_vnet_init(const void *unused __unused)
|
|
|
|
{
|
|
|
|
|
2013-06-14 04:10:34 +00:00
|
|
|
LIST_INIT(&V_hhook_vhead_list);
|
2010-12-21 13:45:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnet being torn down and destroyed.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
hhook_vnet_uninit(const void *unused __unused)
|
|
|
|
{
|
|
|
|
struct hhook_head *hhh, *tmphhh;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If subsystems which export helper hook points use the hhook KPI
|
|
|
|
* correctly, the loop below should have no work to do because the
|
|
|
|
* subsystem should have already called hhook_head_deregister().
|
|
|
|
*/
|
|
|
|
HHHLIST_LOCK();
|
2013-06-14 04:10:34 +00:00
|
|
|
LIST_FOREACH_SAFE(hhh, &V_hhook_vhead_list, hhh_vnext, tmphhh) {
|
2010-12-21 13:45:29 +00:00
|
|
|
printf("%s: hhook_head type=%d, id=%d cleanup required\n",
|
|
|
|
__func__, hhh->hhh_type, hhh->hhh_id);
|
|
|
|
hhook_head_destroy(hhh);
|
|
|
|
}
|
|
|
|
HHHLIST_UNLOCK();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2013-06-14 04:10:34 +00:00
|
|
|
* When a vnet is created and being initialised, init the V_hhook_vhead_list.
|
2010-12-21 13:45:29 +00:00
|
|
|
*/
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
VNET_SYSINIT(hhook_vnet_init, SI_SUB_INIT_IF, SI_ORDER_FIRST,
|
2010-12-21 13:45:29 +00:00
|
|
|
hhook_vnet_init, NULL);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The hhook KPI provides a mechanism for subsystems which export helper hook
|
|
|
|
* points to clean up on vnet tear down, but in case the KPI is misused,
|
|
|
|
* provide a function to clean up and free memory for a vnet being destroyed.
|
|
|
|
*/
|
Get closer to a VIMAGE network stack teardown from top to bottom rather
than removing the network interfaces first. This change is rather larger
and convoluted as the ordering requirements cannot be separated.
Move the pfil(9) framework to SI_SUB_PROTO_PFIL, move Firewalls and
related modules to their own SI_SUB_PROTO_FIREWALL.
Move initialization of "physical" interfaces to SI_SUB_DRIVERS,
move virtual (cloned) interfaces to SI_SUB_PSEUDO.
Move Multicast to SI_SUB_PROTO_MC.
Re-work parts of multicast initialisation and teardown, not taking the
huge amount of memory into account if used as a module yet.
For interface teardown we try to do as many of them as we can on
SI_SUB_INIT_IF, but for some this makes no sense, e.g., when tunnelling
over a higher layer protocol such as IP. In that case the interface
has to go along (or before) the higher layer protocol is shutdown.
Kernel hhooks need to go last on teardown as they may be used at various
higher layers and we cannot remove them before we cleaned up the higher
layers.
For interface teardown there are multiple paths:
(a) a cloned interface is destroyed (inside a VIMAGE or in the base system),
(b) any interface is moved from a virtual network stack to a different
network stack ("vmove"), or (c) a virtual network stack is being shut down.
All code paths go through if_detach_internal() where we, depending on the
vmove flag or the vnet state, make a decision on how much to shut down;
in case we are destroying a VNET the individual protocol layers will
cleanup their own parts thus we cannot do so again for each interface as
we end up with, e.g., double-frees, destroying locks twice or acquiring
already destroyed locks.
When calling into protocol cleanups we equally have to tell them
whether they need to detach upper layer protocols ("ulp") or not
(e.g., in6_ifdetach()).
Provide or enahnce helper functions to do proper cleanup at a protocol
rather than at an interface level.
Approved by: re (hrs)
Obtained from: projects/vnet
Reviewed by: gnn, jhb
Sponsored by: The FreeBSD Foundation
MFC after: 2 weeks
Differential Revision: https://reviews.freebsd.org/D6747
2016-06-21 13:48:49 +00:00
|
|
|
VNET_SYSUNINIT(hhook_vnet_uninit, SI_SUB_INIT_IF, SI_ORDER_FIRST,
|
2010-12-21 13:45:29 +00:00
|
|
|
hhook_vnet_uninit, NULL);
|