freebsd-dev/sys/security/mac/mac_framework.c

597 lines
16 KiB
C
Raw Normal View History

/*-
* Copyright (c) 1999-2002, 2006, 2009 Robert N. M. Watson
* Copyright (c) 2001 Ilmar S. Habibulin
* Copyright (c) 2001-2005 Networks Associates Technology, Inc.
* Copyright (c) 2005-2006 SPARTA, Inc.
* Copyright (c) 2008-2009 Apple Inc.
* All rights reserved.
*
* This software was developed by Robert Watson and Ilmar Habibulin for the
* TrustedBSD Project.
*
* This software was developed for the FreeBSD Project in part by Network
* Associates Laboratories, the Security Research Division of Network
* Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"),
* as part of the DARPA CHATS research program.
*
* This software was enhanced by SPARTA ISSO under SPAWAR contract
* N66001-04-C-6019 ("SEFOS").
*
* This software was developed at the University of Cambridge Computer
* Laboratory with support from a grant from Google, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
2003-06-11 00:56:59 +00:00
/*-
* Framework for extensible kernel access control. This file contains core
* kernel infrastructure for the TrustedBSD MAC Framework, including policy
* registration, versioning, locking, error composition operator, and system
* calls.
*
* The MAC Framework implements three programming interfaces:
*
* - The kernel MAC interface, defined in mac_framework.h, and invoked
* throughout the kernel to request security decisions, notify of security
* related events, etc.
*
* - The MAC policy module interface, defined in mac_policy.h, which is
* implemented by MAC policy modules and invoked by the MAC Framework to
* forward kernel security requests and notifications to policy modules.
*
* - The user MAC API, defined in mac.h, which allows user programs to query
* and set label state on objects.
*
* The majority of the MAC Framework implementation may be found in
* src/sys/security/mac. Sample policy modules may be found in
* src/sys/security/mac_*.
*/
#include "opt_mac.h"
2003-06-11 00:56:59 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/condvar.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mac.h>
#include <sys/module.h>
#include <sys/rmlock.h>
#include <sys/sdt.h>
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
#include <sys/sx.h>
#include <sys/sysctl.h>
#include <security/mac/mac_framework.h>
#include <security/mac/mac_internal.h>
#include <security/mac/mac_policy.h>
/*
* DTrace SDT providers for MAC.
*/
SDT_PROVIDER_DEFINE(mac);
SDT_PROVIDER_DEFINE(mac_framework);
SDT_PROBE_DEFINE2(mac, kernel, policy, modevent, "int",
"struct mac_policy_conf *");
SDT_PROBE_DEFINE1(mac, kernel, policy, register,
"struct mac_policy_conf *");
SDT_PROBE_DEFINE1(mac, kernel, policy, unregister,
"struct mac_policy_conf *");
/*
* Root sysctl node for all MAC and MAC policy controls.
*/
SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW, 0,
"TrustedBSD MAC policy controls");
/*
* Declare that the kernel provides MAC support, version 3 (FreeBSD 7.x).
* This permits modules to refuse to be loaded if the necessary support isn't
* present, even if it's pre-boot.
*/
MODULE_VERSION(kernel_mac_support, MAC_VERSION);
static unsigned int mac_version = MAC_VERSION;
SYSCTL_UINT(_security_mac, OID_AUTO, version, CTLFLAG_RD, &mac_version, 0,
"");
/*
* Labels consist of a indexed set of "slots", which are allocated policies
* as required. The MAC Framework maintains a bitmask of slots allocated so
* far to prevent reuse. Slots cannot be reused, as the MAC Framework
* guarantees that newly allocated slots in labels will be NULL unless
* otherwise initialized, and because we do not have a mechanism to garbage
* collect slots on policy unload. As labeled policies tend to be statically
* loaded during boot, and not frequently unloaded and reloaded, this is not
* generally an issue.
*/
#if MAC_MAX_SLOTS > 32
#error "MAC_MAX_SLOTS too large"
#endif
static unsigned int mac_max_slots = MAC_MAX_SLOTS;
static unsigned int mac_slot_offsets_free = (1 << MAC_MAX_SLOTS) - 1;
SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD, &mac_max_slots,
0, "");
/*
* Has the kernel started generating labeled objects yet? All read/write
* access to this variable is serialized during the boot process. Following
* the end of serialization, we don't update this flag; no locking.
*/
static int mac_late = 0;
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
/*
* Each policy declares a mask of object types requiring labels to be
* allocated for them. For convenience, we combine and cache the bitwise or
* of the per-policy object flags to track whether we will allocate a label
* for an object type at run-time.
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
*/
uint64_t mac_labeled;
SYSCTL_UQUAD(_security_mac, OID_AUTO, labeled, CTLFLAG_RD, &mac_labeled, 0,
"Mask of object types being labeled");
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
MALLOC_DEFINE(M_MACTEMP, "mactemp", "MAC temporary label storage");
/*
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
* MAC policy modules are placed in one of two lists: mac_static_policy_list,
* for policies that are loaded early and cannot be unloaded, and
* mac_policy_list, which holds policies either loaded later in the boot
* cycle or that may be unloaded. The static policy list does not require
* locks to iterate over, but the dynamic list requires synchronization.
* Support for dynamic policy loading can be compiled out using the
* MAC_STATIC kernel option.
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
*
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
* The dynamic policy list is protected by two locks: modifying the list
* requires both locks to be held exclusively. One of the locks,
* mac_policy_rm, is acquired over policy entry points that will never sleep;
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
* the other, mac_policy_sx, is acquire over policy entry points that may
* sleep. The former category will be used when kernel locks may be held
* over calls to the MAC Framework, during network processing in ithreads,
* etc. The latter will tend to involve potentially blocking memory
* allocations, extended attribute I/O, etc.
*/
#ifndef MAC_STATIC
static struct rmlock mac_policy_rm; /* Non-sleeping entry points. */
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
static struct sx mac_policy_sx; /* Sleeping entry points. */
#endif
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
struct mac_policy_list_head mac_policy_list;
struct mac_policy_list_head mac_static_policy_list;
u_int mac_policy_count; /* Registered policy count. */
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
static void mac_policy_xlock(void);
static void mac_policy_xlock_assert(void);
static void mac_policy_xunlock(void);
void
mac_policy_slock_nosleep(struct rm_priotracker *tracker)
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
{
#ifndef MAC_STATIC
if (!mac_late)
return;
rm_rlock(&mac_policy_rm, tracker);
#endif
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
}
void
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_slock_sleep(void)
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
{
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"mac_policy_slock_sleep");
#ifndef MAC_STATIC
if (!mac_late)
return;
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
sx_slock(&mac_policy_sx);
#endif
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
}
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
void
mac_policy_sunlock_nosleep(struct rm_priotracker *tracker)
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
{
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
#ifndef MAC_STATIC
if (!mac_late)
return;
rm_runlock(&mac_policy_rm, tracker);
#endif
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
}
void
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_sunlock_sleep(void)
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
{
#ifndef MAC_STATIC
if (!mac_late)
return;
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
sx_sunlock(&mac_policy_sx);
#endif
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
}
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
static void
mac_policy_xlock(void)
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
{
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"mac_policy_xlock()");
#ifndef MAC_STATIC
if (!mac_late)
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
return;
sx_xlock(&mac_policy_sx);
rm_wlock(&mac_policy_rm);
#endif
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
}
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
static void
mac_policy_xunlock(void)
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
{
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
#ifndef MAC_STATIC
if (!mac_late)
return;
rm_wunlock(&mac_policy_rm);
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
sx_xunlock(&mac_policy_sx);
#endif
}
static void
mac_policy_xlock_assert(void)
{
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
#ifndef MAC_STATIC
if (!mac_late)
return;
/* XXXRW: rm_assert(&mac_policy_rm, RA_WLOCKED); */
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
sx_assert(&mac_policy_sx, SA_XLOCKED);
#endif
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
}
/*
* Initialize the MAC subsystem, including appropriate SMP locks.
*/
static void
mac_init(void)
{
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
LIST_INIT(&mac_static_policy_list);
LIST_INIT(&mac_policy_list);
Modify the MAC Framework so that instead of embedding a (struct label) in various kernel objects to represent security data, we embed a (struct label *) pointer, which now references labels allocated using a UMA zone (mac_label.c). This allows the size and shape of struct label to be varied without changing the size and shape of these kernel objects, which become part of the frozen ABI with 5-STABLE. This opens the door for boot-time selection of the number of label slots, and hence changes to the bound on the number of simultaneous labeled policies at boot-time instead of compile-time. This also makes it easier to embed label references in new objects as required for locking/caching with fine-grained network stack locking, such as inpcb structures. This change also moves us further in the direction of hiding the structure of kernel objects from MAC policy modules, not to mention dramatically reducing the number of '&' symbols appearing in both the MAC Framework and MAC policy modules, and improving readability. While this results in minimal performance change with MAC enabled, it will observably shrink the size of a number of critical kernel data structures for the !MAC case, and should have a small (but measurable) performance benefit (i.e., struct vnode, struct socket) do to memory conservation and reduced cost of zeroing memory. NOTE: Users of MAC must recompile their kernel and all MAC modules as a result of this change. Because this is an API change, third party MAC modules will also need to be updated to make less use of the '&' symbol. Suggestions from: bmilekic Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-11-12 03:14:31 +00:00
mac_labelzone_init();
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
#ifndef MAC_STATIC
rm_init_flags(&mac_policy_rm, "mac_policy_rm", RM_NOWITNESS |
RM_RECURSE);
sx_init_flags(&mac_policy_sx, "mac_policy_sx", SX_NOWITNESS);
#endif
}
/*
* For the purposes of modules that want to know if they were loaded "early",
* set the mac_late flag once we've processed modules either linked into the
* kernel, or loaded before the kernel startup.
*/
static void
mac_late_init(void)
{
mac_late = 1;
}
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
/*
* Given a policy, derive from its set of non-NULL label init methods what
* object types the policy is interested in.
*/
static uint64_t
mac_policy_getlabeled(struct mac_policy_conf *mpc)
{
uint64_t labeled;
#define MPC_FLAG(method, flag) \
if (mpc->mpc_ops->mpo_ ## method != NULL) \
labeled |= (flag); \
labeled = 0;
MPC_FLAG(cred_init_label, MPC_OBJECT_CRED);
MPC_FLAG(proc_init_label, MPC_OBJECT_PROC);
MPC_FLAG(vnode_init_label, MPC_OBJECT_VNODE);
MPC_FLAG(inpcb_init_label, MPC_OBJECT_INPCB);
MPC_FLAG(socket_init_label, MPC_OBJECT_SOCKET);
MPC_FLAG(devfs_init_label, MPC_OBJECT_DEVFS);
MPC_FLAG(mbuf_init_label, MPC_OBJECT_MBUF);
MPC_FLAG(ipq_init_label, MPC_OBJECT_IPQ);
MPC_FLAG(ifnet_init_label, MPC_OBJECT_IFNET);
MPC_FLAG(bpfdesc_init_label, MPC_OBJECT_BPFDESC);
MPC_FLAG(pipe_init_label, MPC_OBJECT_PIPE);
MPC_FLAG(mount_init_label, MPC_OBJECT_MOUNT);
MPC_FLAG(posixsem_init_label, MPC_OBJECT_POSIXSEM);
MPC_FLAG(posixshm_init_label, MPC_OBJECT_POSIXSHM);
MPC_FLAG(sysvmsg_init_label, MPC_OBJECT_SYSVMSG);
MPC_FLAG(sysvmsq_init_label, MPC_OBJECT_SYSVMSQ);
MPC_FLAG(sysvsem_init_label, MPC_OBJECT_SYSVSEM);
MPC_FLAG(sysvshm_init_label, MPC_OBJECT_SYSVSHM);
MPC_FLAG(syncache_init_label, MPC_OBJECT_SYNCACHE);
MPC_FLAG(ip6q_init_label, MPC_OBJECT_IP6Q);
#undef MPC_FLAG
return (labeled);
}
/*
* When policies are loaded or unloaded, walk the list of registered policies
* and built mac_labeled, a bitmask representing the union of all objects
* requiring labels across all policies.
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
*/
static void
mac_policy_update(void)
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
{
struct mac_policy_conf *mpc;
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_xlock_assert();
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
mac_labeled = 0;
mac_policy_count = 0;
LIST_FOREACH(mpc, &mac_static_policy_list, mpc_list) {
mac_labeled |= mac_policy_getlabeled(mpc);
mac_policy_count++;
}
LIST_FOREACH(mpc, &mac_policy_list, mpc_list) {
mac_labeled |= mac_policy_getlabeled(mpc);
mac_policy_count++;
}
Move MAC label storage for mbufs into m_tags from the m_pkthdr structure, returning some additional room in the first mbuf in a chain, and avoiding feature-specific contents in the mbuf header. To do this: - Modify mbuf_to_label() to extract the tag, returning NULL if not found. - Introduce mac_init_mbuf_tag() which does most of the work mac_init_mbuf() used to do, except on an m_tag rather than an mbuf. - Scale back mac_init_mbuf() to perform m_tag allocation and invoke mac_init_mbuf_tag(). - Replace mac_destroy_mbuf() with mac_destroy_mbuf_tag(), since m_tag's are now GC'd deep in the m_tag/mbuf code rather than at a higher level when mbufs are directly free()'d. - Add mac_copy_mbuf_tag() to support m_copy_pkthdr() and related notions. - Generally change all references to mbuf labels so that they use mbuf_to_label() rather than &mbuf->m_pkthdr.label. This required no changes in the MAC policies (yay!). - Tweak mbuf release routines to not call mac_destroy_mbuf(), tag destruction takes care of it for us now. - Remove MAC magic from m_copy_pkthdr() and m_move_pkthdr() -- the existing m_tag support does all this for us. Note that we can no longer just zero the m_tag list on the target mbuf, rather, we have to delete the chain because m_tag's will already be hung off freshly allocated mbuf's. - Tweak m_tag copying routines so that if we're copying a MAC m_tag, we don't do a binary copy, rather, we initialize the new storage and do a deep copy of the label. - Remove use of MAC_FLAG_INITIALIZED in a few bizarre places having to do with mbuf header copies previously. - When an mbuf is copied in ip_input(), we no longer need to explicitly copy the label because it will get handled by the m_tag code now. - No longer any weird handling of MAC labels in if_loop.c during header copies. - Add MPC_LOADTIME_FLAG_LABELMBUFS flag to Biba, MLS, mac_test. In mac_test, handle the label==NULL case, since it can be dynamically loaded. In order to improve performance with this change, introduce the notion of "lazy MAC label allocation" -- only allocate m_tag storage for MAC labels if we're running with a policy that uses MAC labels on mbufs. Policies declare this intent by setting the MPC_LOADTIME_FLAG_LABELMBUFS flag in their load-time flags field during declaration. Note: this opens up the possibility of post-boot policy modules getting back NULL slot entries even though they have policy invariants of non-NULL slot entries, as the policy might have been loaded after the mbuf was allocated, leaving the mbuf without label storage. Policies that cannot handle this case must be declared as NOTLATE, or must be modified. - mac_labelmbufs holds the current cumulative status as to whether any policies require mbuf labeling or not. This is updated whenever the active policy set changes by the function mac_policy_updateflags(). The function iterates the list and checks whether any have the flag set. Write access to this variable is protected by the policy list; read access is currently not protected for performance reasons. This might change if it causes problems. - Add MAC_POLICY_LIST_ASSERT_EXCLUSIVE() to permit the flags update function to assert appropriate locks. - This makes allocation in mac_init_mbuf() conditional on the flag. Reviewed by: sam Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-04-14 20:39:06 +00:00
}
static int
mac_policy_register(struct mac_policy_conf *mpc)
{
struct mac_policy_conf *tmpc;
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
int error, slot, static_entry;
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
error = 0;
/*
* We don't technically need exclusive access while !mac_late, but
* hold it for assertion consistency.
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
*/
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_xlock();
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
/*
* If the module can potentially be unloaded, or we're loading late,
* we have to stick it in the non-static list and pay an extra
* performance overhead. Otherwise, we can pay a light locking cost
* and stick it in the static list.
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
*/
static_entry = (!mac_late &&
!(mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK));
if (static_entry) {
LIST_FOREACH(tmpc, &mac_static_policy_list, mpc_list) {
if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) {
error = EEXIST;
goto out;
}
}
} else {
LIST_FOREACH(tmpc, &mac_policy_list, mpc_list) {
if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) {
error = EEXIST;
goto out;
}
}
}
if (mpc->mpc_field_off != NULL) {
slot = ffs(mac_slot_offsets_free);
if (slot == 0) {
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
error = ENOMEM;
goto out;
}
slot--;
mac_slot_offsets_free &= ~(1 << slot);
*mpc->mpc_field_off = slot;
}
mpc->mpc_runtime_flags |= MPC_RUNTIME_FLAG_REGISTERED;
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
/*
* If we're loading a MAC module after the framework has initialized,
* it has to go into the dynamic list. If we're loading it before
* we've finished initializing, it can go into the static list with
* weaker locker requirements.
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
*/
if (static_entry)
LIST_INSERT_HEAD(&mac_static_policy_list, mpc, mpc_list);
else
LIST_INSERT_HEAD(&mac_policy_list, mpc, mpc_list);
/*
* Per-policy initialization. Currently, this takes place under the
* exclusive lock, so policies must not sleep in their init method.
* In the future, we may want to separate "init" from "start", with
* "init" occuring without the lock held. Likewise, on tear-down,
* breaking out "stop" from "destroy".
*/
if (mpc->mpc_ops->mpo_init != NULL)
(*(mpc->mpc_ops->mpo_init))(mpc);
mac_policy_update();
SDT_PROBE(mac, kernel, policy, register, mpc, 0, 0, 0, 0);
printf("Security policy loaded: %s (%s)\n", mpc->mpc_fullname,
mpc->mpc_name);
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
out:
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_xunlock();
Clean up locking for the MAC Framework: (1) Accept that we're now going to use mutexes, so don't attempt to avoid treating them as mutexes. This cleans up locking accessor function names some. (2) Rename variables to _mtx, _cv, _count, simplifying the naming. (3) Add a new form of the _busy() primitive that conditionally makes the list busy: if there are entries on the list, bump the busy count. If there are no entries, don't bump the busy count. Return a boolean indicating whether or not the busy count was bumped. (4) Break mac_policy_list into two lists: one with the same name holding dynamic policies, and a new list, mac_static_policy_list, which holds policies loaded before mac_late and without the unload flag set. The static list may be accessed without holding the busy count, since it can't change at run-time. (5) In general, prefer making the list busy conditionally, meaning we pay only one mutex lock per entry point if all modules are on the static list, rather than two (since we don't have to lower the busy count when we're done with the framework). For systems running just Biba or MLS, this will halve the mutex accesses in the network stack, and may offer a substantial performance benefits. (6) Lay the groundwork for a dynamic-free kernel option which eliminates all locking associated with dynamically loaded or unloaded policies, for pre-configured systems requiring maximum performance but less run-time flexibility. These changes have been running for a few weeks on MAC development branch systems. Approved by: re (jhb) Obtained from: TrustedBSD Project Sponsored by: DARPA, Network Associates Laboratories
2003-05-07 17:49:24 +00:00
return (error);
}
static int
mac_policy_unregister(struct mac_policy_conf *mpc)
{
/*
* If we fail the load, we may get a request to unload. Check to see
* if we did the run-time registration, and if not, silently succeed.
*/
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_xlock();
if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) == 0) {
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_xunlock();
return (0);
}
#if 0
/*
* Don't allow unloading modules with private data.
*/
if (mpc->mpc_field_off != NULL) {
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_xunlock();
return (EBUSY);
}
#endif
/*
* Only allow the unload to proceed if the module is unloadable by
* its own definition.
*/
if ((mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK) == 0) {
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_xunlock();
return (EBUSY);
}
if (mpc->mpc_ops->mpo_destroy != NULL)
(*(mpc->mpc_ops->mpo_destroy))(mpc);
LIST_REMOVE(mpc, mpc_list);
mpc->mpc_runtime_flags &= ~MPC_RUNTIME_FLAG_REGISTERED;
mac_policy_update();
Rework MAC Framework synchronization in a number of ways in order to improve performance: - Eliminate custom reference count and condition variable to monitor threads entering the framework, as this had both significant overhead and behaved badly in the face of contention. - Replace reference count with two locks: an rwlock and an sx lock, which will be read-acquired by threads entering the framework depending on whether a give policy entry point is permitted to sleep or not. - Replace previous mutex locking of the reference count for exclusive access with write acquiring of both the policy list sx and rw locks, which occurs only when policies are attached or detached. - Do a lockless read of the dynamic policy list head before acquiring any locks in order to reduce overhead when no dynamic policies are loaded; this a race we can afford to lose. - For every policy entry point invocation, decide whether sleeping is permitted, and if not, use a _NOSLEEP() variant of the composition macros, which will use the rwlock instead of the sxlock. In some cases, we decide which to use based on allocation flags passed to the MAC Framework entry point. As with the move to rwlocks/rmlocks in pfil, this may trigger witness warnings, but these should (generally) be false positives as all acquisition of the locks is for read with two very narrow exceptions for policy load/unload, and those code blocks should never acquire other locks. Sponsored by: Google, Inc. Obtained from: TrustedBSD Project Discussed with: csjp (idea, not specific patch)
2009-03-14 16:06:06 +00:00
mac_policy_xunlock();
SDT_PROBE(mac, kernel, policy, unregister, mpc, 0, 0, 0, 0);
printf("Security policy unload: %s (%s)\n", mpc->mpc_fullname,
mpc->mpc_name);
return (0);
}
/*
* Allow MAC policy modules to register during boot, etc.
*/
int
mac_policy_modevent(module_t mod, int type, void *data)
{
struct mac_policy_conf *mpc;
int error;
error = 0;
mpc = (struct mac_policy_conf *) data;
#ifdef MAC_STATIC
if (mac_late) {
printf("mac_policy_modevent: MAC_STATIC and late\n");
return (EBUSY);
}
#endif
SDT_PROBE(mac, kernel, policy, modevent, type, mpc, 0, 0, 0);
switch (type) {
case MOD_LOAD:
if (mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_NOTLATE &&
mac_late) {
printf("mac_policy_modevent: can't load %s policy "
"after booting\n", mpc->mpc_name);
error = EBUSY;
break;
}
error = mac_policy_register(mpc);
break;
case MOD_UNLOAD:
/* Don't unregister the module if it was never registered. */
if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED)
!= 0)
error = mac_policy_unregister(mpc);
else
error = 0;
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
/*
* Define an error value precedence, and given two arguments, selects the
* value with the higher precedence.
*/
int
mac_error_select(int error1, int error2)
{
/* Certain decision-making errors take top priority. */
if (error1 == EDEADLK || error2 == EDEADLK)
return (EDEADLK);
/* Invalid arguments should be reported where possible. */
if (error1 == EINVAL || error2 == EINVAL)
return (EINVAL);
/* Precedence goes to "visibility", with both process and file. */
if (error1 == ESRCH || error2 == ESRCH)
return (ESRCH);
if (error1 == ENOENT || error2 == ENOENT)
return (ENOENT);
/* Precedence goes to DAC/MAC protections. */
if (error1 == EACCES || error2 == EACCES)
return (EACCES);
/* Precedence goes to privilege. */
if (error1 == EPERM || error2 == EPERM)
return (EPERM);
/* Precedence goes to error over success; otherwise, arbitrary. */
if (error1 != 0)
return (error1);
return (error2);
}
int
mac_check_structmac_consistent(struct mac *mac)
{
if (mac->m_buflen > MAC_MAX_LABEL_BUF_LEN)
return (EINVAL);
return (0);
}
SYSINIT(mac, SI_SUB_MAC, SI_ORDER_FIRST, mac_init, NULL);
SYSINIT(mac_late, SI_SUB_MAC_LATE, SI_ORDER_FIRST, mac_late_init, NULL);