2015-01-13 03:52:19 +00:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* This file and its contents are supplied under the terms of the
|
|
|
|
* Common Development and Distribution License ("CDDL"), version 1.0.
|
|
|
|
* You may only use this file in accordance with the terms of version
|
|
|
|
* 1.0 of the CDDL.
|
|
|
|
*
|
|
|
|
* A full copy of the text of the CDDL should have accompanied this
|
|
|
|
* source. A copy of the CDDL is also available via the Internet at
|
|
|
|
* http://www.illumos.org/license/CDDL.
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
/*
|
2017-02-15 23:49:33 +00:00
|
|
|
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
|
2015-01-13 03:52:19 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/zfs_context.h>
|
|
|
|
#include <sys/multilist.h>
|
Enable use of DTRACE_PROBE* macros in "spl" module
This change modifies some of the infrastructure for enabling the use of
the DTRACE_PROBE* macros, such that we can use tehm in the "spl" module.
Currently, when the DTRACE_PROBE* macros are used, they get expanded to
create new functions, and these dynamically generated functions become
part of the "zfs" module.
Since the "spl" module does not depend on the "zfs" module, the use of
DTRACE_PROBE* in the "spl" module would result in undefined symbols
being used in the "spl" module. Specifically, DTRACE_PROBE* would turn
into a function call, and the function being called would be a symbol
only contained in the "zfs" module; which results in a linker and/or
runtime error.
Thus, this change adds the necessary logic to the "spl" module, to
mirror the tracing functionality available to the "zfs" module. After
this change, we'll have a "trace_zfs.h" header file which defines the
probes available only to the "zfs" module, and a "trace_spl.h" header
file which defines the probes available only to the "spl" module.
Reviewed by: Brad Lewis <brad.lewis@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Prakash Surya <prakash.surya@delphix.com>
Closes #9525
2019-10-30 18:02:41 +00:00
|
|
|
#include <sys/trace_zfs.h>
|
2015-01-13 03:52:19 +00:00
|
|
|
|
|
|
|
/* needed for spa_get_random() */
|
|
|
|
#include <sys/spa.h>
|
|
|
|
|
2017-02-15 23:49:33 +00:00
|
|
|
/*
|
|
|
|
* This overrides the number of sublists in each multilist_t, which defaults
|
|
|
|
* to the number of CPUs in the system (see multilist_create()).
|
|
|
|
*/
|
|
|
|
int zfs_multilist_num_sublists = 0;
|
|
|
|
|
2015-01-13 03:52:19 +00:00
|
|
|
/*
|
|
|
|
* Given the object contained on the list, return a pointer to the
|
|
|
|
* object's multilist_node_t structure it contains.
|
|
|
|
*/
|
|
|
|
#ifdef DEBUG
|
|
|
|
static multilist_node_t *
|
|
|
|
multilist_d2l(multilist_t *ml, void *obj)
|
|
|
|
{
|
|
|
|
return ((multilist_node_t *)((char *)obj + ml->ml_offset));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize a new mutlilist using the parameters specified.
|
|
|
|
*
|
|
|
|
* - 'size' denotes the size of the structure containing the
|
|
|
|
* multilist_node_t.
|
|
|
|
* - 'offset' denotes the byte offset of the mutlilist_node_t within
|
|
|
|
* the structure that contains it.
|
|
|
|
* - 'num' specifies the number of internal sublists to create.
|
|
|
|
* - 'index_func' is used to determine which sublist to insert into
|
|
|
|
* when the multilist_insert() function is called; as well as which
|
|
|
|
* sublist to remove from when multilist_remove() is called. The
|
|
|
|
* requirements this function must meet, are the following:
|
|
|
|
*
|
|
|
|
* - It must always return the same value when called on the same
|
|
|
|
* object (to ensure the object is removed from the list it was
|
|
|
|
* inserted into).
|
|
|
|
*
|
|
|
|
* - It must return a value in the range [0, number of sublists).
|
|
|
|
* The multilist_get_num_sublists() function may be used to
|
|
|
|
* determine the number of sublists in the multilist.
|
|
|
|
*
|
|
|
|
* Also, in order to reduce internal contention between the sublists
|
|
|
|
* during insertion and removal, this function should choose evenly
|
|
|
|
* between all available sublists when inserting. This isn't a hard
|
|
|
|
* requirement, but a general rule of thumb in order to garner the
|
|
|
|
* best multi-threaded performance out of the data structure.
|
|
|
|
*/
|
2017-03-21 01:36:00 +00:00
|
|
|
static multilist_t *
|
|
|
|
multilist_create_impl(size_t size, size_t offset,
|
2017-02-15 23:49:33 +00:00
|
|
|
unsigned int num, multilist_sublist_index_func_t *index_func)
|
2015-01-13 03:52:19 +00:00
|
|
|
{
|
|
|
|
ASSERT3U(size, >, 0);
|
|
|
|
ASSERT3U(size, >=, offset + sizeof (multilist_node_t));
|
|
|
|
ASSERT3U(num, >, 0);
|
|
|
|
ASSERT3P(index_func, !=, NULL);
|
|
|
|
|
2017-03-21 01:36:00 +00:00
|
|
|
multilist_t *ml = kmem_alloc(sizeof (*ml), KM_SLEEP);
|
2015-01-13 03:52:19 +00:00
|
|
|
ml->ml_offset = offset;
|
|
|
|
ml->ml_num_sublists = num;
|
|
|
|
ml->ml_index_func = index_func;
|
|
|
|
|
|
|
|
ml->ml_sublists = kmem_zalloc(sizeof (multilist_sublist_t) *
|
|
|
|
ml->ml_num_sublists, KM_SLEEP);
|
|
|
|
|
|
|
|
ASSERT3P(ml->ml_sublists, !=, NULL);
|
|
|
|
|
2017-03-21 01:36:00 +00:00
|
|
|
for (int i = 0; i < ml->ml_num_sublists; i++) {
|
2015-01-13 03:52:19 +00:00
|
|
|
multilist_sublist_t *mls = &ml->ml_sublists[i];
|
Identify locks flagged by lockdep
When running a kernel with CONFIG_LOCKDEP=y, lockdep reports possible
recursive locking in some cases and possible circular locking dependency
in others, within the SPL and ZFS modules.
This patch uses a mutex type defined in SPL, MUTEX_NOLOCKDEP, to mark
such mutexes when they are initialized. This mutex type causes
attempts to take or release those locks to be wrapped in lockdep_off()
and lockdep_on() calls to silence the dependency checker and allow the
use of lock_stats to examine contention.
For RW locks, it uses an analogous lock type, RW_NOLOCKDEP.
The goal is that these locks are ultimately changed back to type
MUTEX_DEFAULT or RW_DEFAULT, after the locks are annotated to reflect
their relationship (e.g. z_name_lock below) or any real problem with the
lock dependencies are fixed.
Some of the affected locks are:
tc_open_lock:
=============
This is an array of locks, all with same name, which txg_quiesce must
take all of in order to move txg to next state. All default to the same
lockdep class, and so to lockdep appears recursive.
zp->z_name_lock:
================
In zfs_rmdir,
dzp = znode for the directory (input to zfs_dirent_lock)
zp = znode for the entry being removed (output of zfs_dirent_lock)
zfs_rmdir()->zfs_dirent_lock() takes z_name_lock in dzp
zfs_rmdir() takes z_name_lock in zp
Since both dzp and zp are type znode_t, the locks have the same default
class, and lockdep considers it a possible recursive lock attempt.
l->l_rwlock:
============
zap_expand_leaf() sometimes creates two new zap leaf structures, via
these call paths:
zap_deref_leaf()->zap_get_leaf_byblk()->zap_leaf_open()
zap_expand_leaf()->zap_create_leaf()->zap_expand_leaf()->zap_create_leaf()
Because both zap_leaf_open() and zap_create_leaf() initialize
l->l_rwlock in their (separate) leaf structures, the lockdep class is
the same, and the linux kernel believes these might both be the same
lock, and emits a possible recursive lock warning.
Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #3895
2015-10-15 20:08:27 +00:00
|
|
|
mutex_init(&mls->mls_lock, NULL, MUTEX_NOLOCKDEP, NULL);
|
2015-01-13 03:52:19 +00:00
|
|
|
list_create(&mls->mls_list, size, offset);
|
|
|
|
}
|
2017-03-21 01:36:00 +00:00
|
|
|
return (ml);
|
2015-01-13 03:52:19 +00:00
|
|
|
}
|
|
|
|
|
2017-02-15 23:49:33 +00:00
|
|
|
/*
|
2017-03-21 01:36:00 +00:00
|
|
|
* Allocate a new multilist, using the default number of sublists
|
2017-02-15 23:49:33 +00:00
|
|
|
* (the number of CPUs, or at least 4, or the tunable
|
|
|
|
* zfs_multilist_num_sublists).
|
|
|
|
*/
|
2017-03-21 01:36:00 +00:00
|
|
|
multilist_t *
|
|
|
|
multilist_create(size_t size, size_t offset,
|
2017-02-15 23:49:33 +00:00
|
|
|
multilist_sublist_index_func_t *index_func)
|
|
|
|
{
|
|
|
|
int num_sublists;
|
|
|
|
|
|
|
|
if (zfs_multilist_num_sublists > 0) {
|
|
|
|
num_sublists = zfs_multilist_num_sublists;
|
|
|
|
} else {
|
|
|
|
num_sublists = MAX(boot_ncpus, 4);
|
|
|
|
}
|
|
|
|
|
2017-03-21 01:36:00 +00:00
|
|
|
return (multilist_create_impl(size, offset, num_sublists, index_func));
|
2017-02-15 23:49:33 +00:00
|
|
|
}
|
|
|
|
|
2015-01-13 03:52:19 +00:00
|
|
|
/*
|
|
|
|
* Destroy the given multilist object, and free up any memory it holds.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
multilist_destroy(multilist_t *ml)
|
|
|
|
{
|
|
|
|
ASSERT(multilist_is_empty(ml));
|
|
|
|
|
2017-11-04 20:25:13 +00:00
|
|
|
for (int i = 0; i < ml->ml_num_sublists; i++) {
|
2015-01-13 03:52:19 +00:00
|
|
|
multilist_sublist_t *mls = &ml->ml_sublists[i];
|
|
|
|
|
|
|
|
ASSERT(list_is_empty(&mls->mls_list));
|
|
|
|
|
|
|
|
list_destroy(&mls->mls_list);
|
|
|
|
mutex_destroy(&mls->mls_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT3P(ml->ml_sublists, !=, NULL);
|
|
|
|
kmem_free(ml->ml_sublists,
|
|
|
|
sizeof (multilist_sublist_t) * ml->ml_num_sublists);
|
|
|
|
|
|
|
|
ml->ml_num_sublists = 0;
|
|
|
|
ml->ml_offset = 0;
|
2017-03-21 01:36:00 +00:00
|
|
|
kmem_free(ml, sizeof (multilist_t));
|
2015-01-13 03:52:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert the given object into the multilist.
|
|
|
|
*
|
|
|
|
* This function will insert the object specified into the sublist
|
|
|
|
* determined using the function given at multilist creation time.
|
|
|
|
*
|
|
|
|
* The sublist locks are automatically acquired if not already held, to
|
|
|
|
* ensure consistency when inserting and removing from multiple threads.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
multilist_insert(multilist_t *ml, void *obj)
|
|
|
|
{
|
|
|
|
unsigned int sublist_idx = ml->ml_index_func(ml, obj);
|
|
|
|
multilist_sublist_t *mls;
|
|
|
|
boolean_t need_lock;
|
|
|
|
|
|
|
|
DTRACE_PROBE3(multilist__insert, multilist_t *, ml,
|
|
|
|
unsigned int, sublist_idx, void *, obj);
|
|
|
|
|
|
|
|
ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
|
|
|
|
|
|
|
|
mls = &ml->ml_sublists[sublist_idx];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: Callers may already hold the sublist lock by calling
|
|
|
|
* multilist_sublist_lock(). Here we rely on MUTEX_HELD()
|
|
|
|
* returning TRUE if and only if the current thread holds the
|
|
|
|
* lock. While it's a little ugly to make the lock recursive in
|
|
|
|
* this way, it works and allows the calling code to be much
|
|
|
|
* simpler -- otherwise it would have to pass around a flag
|
|
|
|
* indicating that it already has the lock.
|
|
|
|
*/
|
|
|
|
need_lock = !MUTEX_HELD(&mls->mls_lock);
|
|
|
|
|
|
|
|
if (need_lock)
|
|
|
|
mutex_enter(&mls->mls_lock);
|
|
|
|
|
|
|
|
ASSERT(!multilist_link_active(multilist_d2l(ml, obj)));
|
|
|
|
|
|
|
|
multilist_sublist_insert_head(mls, obj);
|
|
|
|
|
|
|
|
if (need_lock)
|
|
|
|
mutex_exit(&mls->mls_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove the given object from the multilist.
|
|
|
|
*
|
|
|
|
* This function will remove the object specified from the sublist
|
|
|
|
* determined using the function given at multilist creation time.
|
|
|
|
*
|
|
|
|
* The necessary sublist locks are automatically acquired, to ensure
|
|
|
|
* consistency when inserting and removing from multiple threads.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
multilist_remove(multilist_t *ml, void *obj)
|
|
|
|
{
|
|
|
|
unsigned int sublist_idx = ml->ml_index_func(ml, obj);
|
|
|
|
multilist_sublist_t *mls;
|
|
|
|
boolean_t need_lock;
|
|
|
|
|
|
|
|
DTRACE_PROBE3(multilist__remove, multilist_t *, ml,
|
|
|
|
unsigned int, sublist_idx, void *, obj);
|
|
|
|
|
|
|
|
ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
|
|
|
|
|
|
|
|
mls = &ml->ml_sublists[sublist_idx];
|
|
|
|
/* See comment in multilist_insert(). */
|
|
|
|
need_lock = !MUTEX_HELD(&mls->mls_lock);
|
|
|
|
|
|
|
|
if (need_lock)
|
|
|
|
mutex_enter(&mls->mls_lock);
|
|
|
|
|
|
|
|
ASSERT(multilist_link_active(multilist_d2l(ml, obj)));
|
|
|
|
|
|
|
|
multilist_sublist_remove(mls, obj);
|
|
|
|
|
|
|
|
if (need_lock)
|
|
|
|
mutex_exit(&mls->mls_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if this multilist object is empty.
|
|
|
|
*
|
|
|
|
* This will return TRUE if it finds all of the sublists of this
|
|
|
|
* multilist to be empty, and FALSE otherwise. Each sublist lock will be
|
|
|
|
* automatically acquired as necessary.
|
|
|
|
*
|
|
|
|
* If concurrent insertions and removals are occurring, the semantics
|
|
|
|
* of this function become a little fuzzy. Instead of locking all
|
|
|
|
* sublists for the entire call time of the function, each sublist is
|
|
|
|
* only locked as it is individually checked for emptiness. Thus, it's
|
|
|
|
* possible for this function to return TRUE with non-empty sublists at
|
|
|
|
* the time the function returns. This would be due to another thread
|
|
|
|
* inserting into a given sublist, after that specific sublist was check
|
|
|
|
* and deemed empty, but before all sublists have been checked.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
multilist_is_empty(multilist_t *ml)
|
|
|
|
{
|
2017-11-04 20:25:13 +00:00
|
|
|
for (int i = 0; i < ml->ml_num_sublists; i++) {
|
2015-01-13 03:52:19 +00:00
|
|
|
multilist_sublist_t *mls = &ml->ml_sublists[i];
|
|
|
|
/* See comment in multilist_insert(). */
|
|
|
|
boolean_t need_lock = !MUTEX_HELD(&mls->mls_lock);
|
|
|
|
|
|
|
|
if (need_lock)
|
|
|
|
mutex_enter(&mls->mls_lock);
|
|
|
|
|
|
|
|
if (!list_is_empty(&mls->mls_list)) {
|
|
|
|
if (need_lock)
|
|
|
|
mutex_exit(&mls->mls_lock);
|
|
|
|
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (need_lock)
|
|
|
|
mutex_exit(&mls->mls_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (TRUE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the number of sublists composing this multilist */
|
|
|
|
unsigned int
|
|
|
|
multilist_get_num_sublists(multilist_t *ml)
|
|
|
|
{
|
|
|
|
return (ml->ml_num_sublists);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return a randomly selected, valid sublist index for this multilist */
|
|
|
|
unsigned int
|
|
|
|
multilist_get_random_index(multilist_t *ml)
|
|
|
|
{
|
|
|
|
return (spa_get_random(ml->ml_num_sublists));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Lock and return the sublist specified at the given index */
|
|
|
|
multilist_sublist_t *
|
|
|
|
multilist_sublist_lock(multilist_t *ml, unsigned int sublist_idx)
|
|
|
|
{
|
|
|
|
multilist_sublist_t *mls;
|
|
|
|
|
|
|
|
ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
|
|
|
|
mls = &ml->ml_sublists[sublist_idx];
|
|
|
|
mutex_enter(&mls->mls_lock);
|
|
|
|
|
|
|
|
return (mls);
|
|
|
|
}
|
|
|
|
|
2017-03-21 01:36:00 +00:00
|
|
|
/* Lock and return the sublist that would be used to store the specified obj */
|
|
|
|
multilist_sublist_t *
|
|
|
|
multilist_sublist_lock_obj(multilist_t *ml, void *obj)
|
|
|
|
{
|
|
|
|
return (multilist_sublist_lock(ml, ml->ml_index_func(ml, obj)));
|
|
|
|
}
|
|
|
|
|
2015-01-13 03:52:19 +00:00
|
|
|
void
|
|
|
|
multilist_sublist_unlock(multilist_sublist_t *mls)
|
|
|
|
{
|
|
|
|
mutex_exit(&mls->mls_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're allowing any object to be inserted into this specific sublist,
|
|
|
|
* but this can lead to trouble if multilist_remove() is called to
|
|
|
|
* remove this object. Specifically, if calling ml_index_func on this
|
|
|
|
* object returns an index for sublist different than what is passed as
|
|
|
|
* a parameter here, any call to multilist_remove() with this newly
|
|
|
|
* inserted object is undefined! (the call to multilist_remove() will
|
|
|
|
* remove the object from a list that it isn't contained in)
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
multilist_sublist_insert_head(multilist_sublist_t *mls, void *obj)
|
|
|
|
{
|
|
|
|
ASSERT(MUTEX_HELD(&mls->mls_lock));
|
|
|
|
list_insert_head(&mls->mls_list, obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* please see comment above multilist_sublist_insert_head */
|
|
|
|
void
|
|
|
|
multilist_sublist_insert_tail(multilist_sublist_t *mls, void *obj)
|
|
|
|
{
|
|
|
|
ASSERT(MUTEX_HELD(&mls->mls_lock));
|
|
|
|
list_insert_tail(&mls->mls_list, obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move the object one element forward in the list.
|
|
|
|
*
|
|
|
|
* This function will move the given object forward in the list (towards
|
|
|
|
* the head) by one object. So, in essence, it will swap its position in
|
|
|
|
* the list with its "prev" pointer. If the given object is already at the
|
|
|
|
* head of the list, it cannot be moved forward any more than it already
|
|
|
|
* is, so no action is taken.
|
|
|
|
*
|
|
|
|
* NOTE: This function **must not** remove any object from the list other
|
|
|
|
* than the object given as the parameter. This is relied upon in
|
|
|
|
* arc_evict_state_impl().
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
multilist_sublist_move_forward(multilist_sublist_t *mls, void *obj)
|
|
|
|
{
|
|
|
|
void *prev = list_prev(&mls->mls_list, obj);
|
|
|
|
|
|
|
|
ASSERT(MUTEX_HELD(&mls->mls_lock));
|
|
|
|
ASSERT(!list_is_empty(&mls->mls_list));
|
|
|
|
|
|
|
|
/* 'obj' must be at the head of the list, nothing to do */
|
|
|
|
if (prev == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_remove(&mls->mls_list, obj);
|
|
|
|
list_insert_before(&mls->mls_list, prev, obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
multilist_sublist_remove(multilist_sublist_t *mls, void *obj)
|
|
|
|
{
|
|
|
|
ASSERT(MUTEX_HELD(&mls->mls_lock));
|
|
|
|
list_remove(&mls->mls_list, obj);
|
|
|
|
}
|
|
|
|
|
2019-06-25 19:03:38 +00:00
|
|
|
int
|
|
|
|
multilist_sublist_is_empty(multilist_sublist_t *mls)
|
|
|
|
{
|
|
|
|
ASSERT(MUTEX_HELD(&mls->mls_lock));
|
|
|
|
return (list_is_empty(&mls->mls_list));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
multilist_sublist_is_empty_idx(multilist_t *ml, unsigned int sublist_idx)
|
|
|
|
{
|
|
|
|
multilist_sublist_t *mls;
|
|
|
|
int empty;
|
|
|
|
|
|
|
|
ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
|
|
|
|
mls = &ml->ml_sublists[sublist_idx];
|
|
|
|
ASSERT(!MUTEX_HELD(&mls->mls_lock));
|
|
|
|
mutex_enter(&mls->mls_lock);
|
|
|
|
empty = list_is_empty(&mls->mls_list);
|
|
|
|
mutex_exit(&mls->mls_lock);
|
|
|
|
return (empty);
|
|
|
|
}
|
|
|
|
|
2015-01-13 03:52:19 +00:00
|
|
|
void *
|
|
|
|
multilist_sublist_head(multilist_sublist_t *mls)
|
|
|
|
{
|
|
|
|
ASSERT(MUTEX_HELD(&mls->mls_lock));
|
|
|
|
return (list_head(&mls->mls_list));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
multilist_sublist_tail(multilist_sublist_t *mls)
|
|
|
|
{
|
|
|
|
ASSERT(MUTEX_HELD(&mls->mls_lock));
|
|
|
|
return (list_tail(&mls->mls_list));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
multilist_sublist_next(multilist_sublist_t *mls, void *obj)
|
|
|
|
{
|
|
|
|
ASSERT(MUTEX_HELD(&mls->mls_lock));
|
|
|
|
return (list_next(&mls->mls_list, obj));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
multilist_sublist_prev(multilist_sublist_t *mls, void *obj)
|
|
|
|
{
|
|
|
|
ASSERT(MUTEX_HELD(&mls->mls_lock));
|
|
|
|
return (list_prev(&mls->mls_list, obj));
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
multilist_link_init(multilist_node_t *link)
|
|
|
|
{
|
|
|
|
list_link_init(link);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
multilist_link_active(multilist_node_t *link)
|
|
|
|
{
|
|
|
|
return (list_link_active(link));
|
|
|
|
}
|
2017-02-15 23:49:33 +00:00
|
|
|
|
|
|
|
/* BEGIN CSTYLED */
|
2019-09-05 21:49:49 +00:00
|
|
|
ZFS_MODULE_PARAM(zfs, zfs_, multilist_num_sublists, INT, ZMOD_RW,
|
2017-02-15 23:49:33 +00:00
|
|
|
"Number of sublists used in each multilist");
|
|
|
|
/* END CSTYLED */
|