freebsd-nq/module/zfs/multilist.c
Prakash Surya ca0bf58d65 Illumos 5497 - lock contention on arcs_mtx
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Richard Elling <richard.elling@richardelling.com>
Approved by: Dan McDonald <danmcd@omniti.com>

Porting notes and other significant code changes:

The illumos 5368 patch (ARC should cache more metadata), which
was never picked up by ZoL, is mostly reverted by this patch.

Since ZoL relies on the kernel asynchronously calling the shrinker to
actually reap memory, the shrinker wakes up arc_reclaim_waiters_cv every
time it runs.

The arc_adapt_thread() function no longer calls arc_do_user_evicts()
since the newly-added arc_user_evicts_thread() calls it periodically.

Notable conflicting ZoL commits which conflicted with this patch or
whose effects are either duplicated or un-done by this patch:

    302f753 - Integrate ARC more tightly with Linux
    39e055c - Adjust arc_p based on "bytes" in arc_shrink
    f521ce1 - Allow "arc_p" to drop to zero or grow to "arc_c"
    77765b5 - Remove "arc_meta_used" from arc_adjust calculation
    94520ca - Prune metadata from ghost lists in arc_adjust_meta

Trace support for multilist_insert() and multilist_remove() has been
added and produces the following output:

    fio-12498 [077] .... 112936.448324: zfs_multilist__insert: ml { offset 240 numsublists 80 sublistidx 63 }
    fio-12498 [077] .... 112936.448347: zfs_multilist__remove: ml { offset 240 numsublists 80 sublistidx 29 }

The following arcstats have been removed:

    recycle_miss - Used by arcstat.py and arc_summary.py, both of which
    have been updated appropriately.

    l2_writes_hdr_miss

The following arcstats have been added:

    evict_not_enough - Number of times arc_evict_state() was unable to
    evict enough buffers to reach its target amount.

    evict_l2_skip - Number of times arc_evict_hdr() skipped eviction
    because it was being written to the l2arc.

    l2_writes_lock_retry - Replaces l2_writes_hdr_miss.  Number of times
    l2arc_write_done() failed to acquire hash_lock (and re-tries).

    arc_meta_min - Shows the value of the zfs_arc_meta_min module
    parameter (see below).

The "index" column of the "dbuf" kstat has been removed since it doesn't
have a direct analog in the new multilist scheme.  Additional multilist-
related stats could be added in the future but would likely require
extensions to the mulilist API.

The following module parameters have been added:

    zfs_arc_evict_batch_limit - Number of ARC headers to free per sub-list
    before moving on to the next sub-list.

    zfs_arc_meta_min - Enforce a floor on the amount of metadata in
    the ARC.

    zfs_arc_num_sublists_per_state - Number of multilist sub-lists per
    ARC state.

    zfs_arc_overflow_shift - Controls amount by which the ARC must exceed
    the target size to be considered "overflowing".

Ported-by: Tim Chase <tim@chase2k.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov
2015-06-11 10:27:25 -07:00

376 lines
10 KiB
C

/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2013, 2014 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/multilist.h>
#include <sys/trace_multilist.h>
/* needed for spa_get_random() */
#include <sys/spa.h>
/*
* Given the object contained on the list, return a pointer to the
* object's multilist_node_t structure it contains.
*/
#ifdef DEBUG
static multilist_node_t *
multilist_d2l(multilist_t *ml, void *obj)
{
return ((multilist_node_t *)((char *)obj + ml->ml_offset));
}
#endif
/*
* Initialize a new mutlilist using the parameters specified.
*
* - 'size' denotes the size of the structure containing the
* multilist_node_t.
* - 'offset' denotes the byte offset of the mutlilist_node_t within
* the structure that contains it.
* - 'num' specifies the number of internal sublists to create.
* - 'index_func' is used to determine which sublist to insert into
* when the multilist_insert() function is called; as well as which
* sublist to remove from when multilist_remove() is called. The
* requirements this function must meet, are the following:
*
* - It must always return the same value when called on the same
* object (to ensure the object is removed from the list it was
* inserted into).
*
* - It must return a value in the range [0, number of sublists).
* The multilist_get_num_sublists() function may be used to
* determine the number of sublists in the multilist.
*
* Also, in order to reduce internal contention between the sublists
* during insertion and removal, this function should choose evenly
* between all available sublists when inserting. This isn't a hard
* requirement, but a general rule of thumb in order to garner the
* best multi-threaded performance out of the data structure.
*/
void
multilist_create(multilist_t *ml, size_t size, size_t offset, unsigned int num,
multilist_sublist_index_func_t *index_func)
{
int i;
ASSERT3P(ml, !=, NULL);
ASSERT3U(size, >, 0);
ASSERT3U(size, >=, offset + sizeof (multilist_node_t));
ASSERT3U(num, >, 0);
ASSERT3P(index_func, !=, NULL);
ml->ml_offset = offset;
ml->ml_num_sublists = num;
ml->ml_index_func = index_func;
ml->ml_sublists = kmem_zalloc(sizeof (multilist_sublist_t) *
ml->ml_num_sublists, KM_SLEEP);
ASSERT3P(ml->ml_sublists, !=, NULL);
for (i = 0; i < ml->ml_num_sublists; i++) {
multilist_sublist_t *mls = &ml->ml_sublists[i];
mutex_init(&mls->mls_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&mls->mls_list, size, offset);
}
}
/*
* Destroy the given multilist object, and free up any memory it holds.
*/
void
multilist_destroy(multilist_t *ml)
{
int i;
ASSERT(multilist_is_empty(ml));
for (i = 0; i < ml->ml_num_sublists; i++) {
multilist_sublist_t *mls = &ml->ml_sublists[i];
ASSERT(list_is_empty(&mls->mls_list));
list_destroy(&mls->mls_list);
mutex_destroy(&mls->mls_lock);
}
ASSERT3P(ml->ml_sublists, !=, NULL);
kmem_free(ml->ml_sublists,
sizeof (multilist_sublist_t) * ml->ml_num_sublists);
ml->ml_num_sublists = 0;
ml->ml_offset = 0;
}
/*
* Insert the given object into the multilist.
*
* This function will insert the object specified into the sublist
* determined using the function given at multilist creation time.
*
* The sublist locks are automatically acquired if not already held, to
* ensure consistency when inserting and removing from multiple threads.
*/
void
multilist_insert(multilist_t *ml, void *obj)
{
unsigned int sublist_idx = ml->ml_index_func(ml, obj);
multilist_sublist_t *mls;
boolean_t need_lock;
DTRACE_PROBE3(multilist__insert, multilist_t *, ml,
unsigned int, sublist_idx, void *, obj);
ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
mls = &ml->ml_sublists[sublist_idx];
/*
* Note: Callers may already hold the sublist lock by calling
* multilist_sublist_lock(). Here we rely on MUTEX_HELD()
* returning TRUE if and only if the current thread holds the
* lock. While it's a little ugly to make the lock recursive in
* this way, it works and allows the calling code to be much
* simpler -- otherwise it would have to pass around a flag
* indicating that it already has the lock.
*/
need_lock = !MUTEX_HELD(&mls->mls_lock);
if (need_lock)
mutex_enter(&mls->mls_lock);
ASSERT(!multilist_link_active(multilist_d2l(ml, obj)));
multilist_sublist_insert_head(mls, obj);
if (need_lock)
mutex_exit(&mls->mls_lock);
}
/*
* Remove the given object from the multilist.
*
* This function will remove the object specified from the sublist
* determined using the function given at multilist creation time.
*
* The necessary sublist locks are automatically acquired, to ensure
* consistency when inserting and removing from multiple threads.
*/
void
multilist_remove(multilist_t *ml, void *obj)
{
unsigned int sublist_idx = ml->ml_index_func(ml, obj);
multilist_sublist_t *mls;
boolean_t need_lock;
DTRACE_PROBE3(multilist__remove, multilist_t *, ml,
unsigned int, sublist_idx, void *, obj);
ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
mls = &ml->ml_sublists[sublist_idx];
/* See comment in multilist_insert(). */
need_lock = !MUTEX_HELD(&mls->mls_lock);
if (need_lock)
mutex_enter(&mls->mls_lock);
ASSERT(multilist_link_active(multilist_d2l(ml, obj)));
multilist_sublist_remove(mls, obj);
if (need_lock)
mutex_exit(&mls->mls_lock);
}
/*
* Check to see if this multilist object is empty.
*
* This will return TRUE if it finds all of the sublists of this
* multilist to be empty, and FALSE otherwise. Each sublist lock will be
* automatically acquired as necessary.
*
* If concurrent insertions and removals are occurring, the semantics
* of this function become a little fuzzy. Instead of locking all
* sublists for the entire call time of the function, each sublist is
* only locked as it is individually checked for emptiness. Thus, it's
* possible for this function to return TRUE with non-empty sublists at
* the time the function returns. This would be due to another thread
* inserting into a given sublist, after that specific sublist was check
* and deemed empty, but before all sublists have been checked.
*/
int
multilist_is_empty(multilist_t *ml)
{
int i;
for (i = 0; i < ml->ml_num_sublists; i++) {
multilist_sublist_t *mls = &ml->ml_sublists[i];
/* See comment in multilist_insert(). */
boolean_t need_lock = !MUTEX_HELD(&mls->mls_lock);
if (need_lock)
mutex_enter(&mls->mls_lock);
if (!list_is_empty(&mls->mls_list)) {
if (need_lock)
mutex_exit(&mls->mls_lock);
return (FALSE);
}
if (need_lock)
mutex_exit(&mls->mls_lock);
}
return (TRUE);
}
/* Return the number of sublists composing this multilist */
unsigned int
multilist_get_num_sublists(multilist_t *ml)
{
return (ml->ml_num_sublists);
}
/* Return a randomly selected, valid sublist index for this multilist */
unsigned int
multilist_get_random_index(multilist_t *ml)
{
return (spa_get_random(ml->ml_num_sublists));
}
/* Lock and return the sublist specified at the given index */
multilist_sublist_t *
multilist_sublist_lock(multilist_t *ml, unsigned int sublist_idx)
{
multilist_sublist_t *mls;
ASSERT3U(sublist_idx, <, ml->ml_num_sublists);
mls = &ml->ml_sublists[sublist_idx];
mutex_enter(&mls->mls_lock);
return (mls);
}
void
multilist_sublist_unlock(multilist_sublist_t *mls)
{
mutex_exit(&mls->mls_lock);
}
/*
* We're allowing any object to be inserted into this specific sublist,
* but this can lead to trouble if multilist_remove() is called to
* remove this object. Specifically, if calling ml_index_func on this
* object returns an index for sublist different than what is passed as
* a parameter here, any call to multilist_remove() with this newly
* inserted object is undefined! (the call to multilist_remove() will
* remove the object from a list that it isn't contained in)
*/
void
multilist_sublist_insert_head(multilist_sublist_t *mls, void *obj)
{
ASSERT(MUTEX_HELD(&mls->mls_lock));
list_insert_head(&mls->mls_list, obj);
}
/* please see comment above multilist_sublist_insert_head */
void
multilist_sublist_insert_tail(multilist_sublist_t *mls, void *obj)
{
ASSERT(MUTEX_HELD(&mls->mls_lock));
list_insert_tail(&mls->mls_list, obj);
}
/*
* Move the object one element forward in the list.
*
* This function will move the given object forward in the list (towards
* the head) by one object. So, in essence, it will swap its position in
* the list with its "prev" pointer. If the given object is already at the
* head of the list, it cannot be moved forward any more than it already
* is, so no action is taken.
*
* NOTE: This function **must not** remove any object from the list other
* than the object given as the parameter. This is relied upon in
* arc_evict_state_impl().
*/
void
multilist_sublist_move_forward(multilist_sublist_t *mls, void *obj)
{
void *prev = list_prev(&mls->mls_list, obj);
ASSERT(MUTEX_HELD(&mls->mls_lock));
ASSERT(!list_is_empty(&mls->mls_list));
/* 'obj' must be at the head of the list, nothing to do */
if (prev == NULL)
return;
list_remove(&mls->mls_list, obj);
list_insert_before(&mls->mls_list, prev, obj);
}
void
multilist_sublist_remove(multilist_sublist_t *mls, void *obj)
{
ASSERT(MUTEX_HELD(&mls->mls_lock));
list_remove(&mls->mls_list, obj);
}
void *
multilist_sublist_head(multilist_sublist_t *mls)
{
ASSERT(MUTEX_HELD(&mls->mls_lock));
return (list_head(&mls->mls_list));
}
void *
multilist_sublist_tail(multilist_sublist_t *mls)
{
ASSERT(MUTEX_HELD(&mls->mls_lock));
return (list_tail(&mls->mls_list));
}
void *
multilist_sublist_next(multilist_sublist_t *mls, void *obj)
{
ASSERT(MUTEX_HELD(&mls->mls_lock));
return (list_next(&mls->mls_list, obj));
}
void *
multilist_sublist_prev(multilist_sublist_t *mls, void *obj)
{
ASSERT(MUTEX_HELD(&mls->mls_lock));
return (list_prev(&mls->mls_list, obj));
}
void
multilist_link_init(multilist_node_t *link)
{
list_link_init(link);
}
int
multilist_link_active(multilist_node_t *link)
{
return (list_link_active(link));
}