Remove skc_ref from alloc/free paths

As described in spl_kmem_cache_destroy() the ->skc_ref count was
added to address the case of a cache reap or grow racing with a
destroy.  They are not strictly needed in the alloc/free paths
because consumers of the cache are responsible for not using it
while it's being destroyed.

Removing this code is desirable because there is some evidence that
contention on this atomic negative impacts performance on large-scale
NUMA systems.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tim Chase <tim@chase2k.com>
Issue #463
This commit is contained in:
Brian Behlendorf 2015-07-23 13:45:31 -07:00
parent 62aa81a577
commit 4699d76d19

View File

@ -1403,8 +1403,6 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
atomic_inc(&skc->skc_ref);
/*
* Allocate directly from a Linux slab. All optimizations are left
* to the underlying cache we only need to guarantee that KM_SLEEP
@ -1457,8 +1455,6 @@ ret:
prefetchw(obj);
}
atomic_dec(&skc->skc_ref);
return (obj);
}
EXPORT_SYMBOL(spl_kmem_cache_alloc);
@ -1479,7 +1475,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
atomic_inc(&skc->skc_ref);
/*
* Run the destructor
@ -1492,7 +1487,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
*/
if (skc->skc_flags & KMC_SLAB) {
kmem_cache_free(skc->skc_linux_cache, obj);
goto out;
return;
}
/*
@ -1507,7 +1502,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
spin_unlock(&skc->skc_lock);
if (do_emergency && (spl_emergency_free(skc, obj) == 0))
goto out;
return;
}
local_irq_save(flags);
@ -1538,8 +1533,6 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
if (do_reclaim)
spl_slab_reclaim(skc);
out:
atomic_dec(&skc->skc_ref);
}
EXPORT_SYMBOL(spl_kmem_cache_free);