Optimize locking checks in mempool allocator

Avoid checking the whole array of objects each time by removing the self
organized memory reaping. this can be managed by the global memory reap
callback which is called every 60 seconds. this will reduce the use if
locking operations significant.

Reviewed-by: Kjeld Schouten <kjeld@schouten-lebbing.nl>
Reviewed-by: Mateusz Guzik <mjguzik@gmail.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Sebastian Gottschall <s.gottschall@dd-wrt.com>
Closes #11126
This commit is contained in:
Sebastian Gottschall 2020-11-02 21:10:07 +01:00 committed by GitHub
parent ab8c935ea6
commit 7eefaf0ca0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -202,6 +202,34 @@ static struct zstd_fallback_mem zstd_dctx_fallback;
static struct zstd_pool *zstd_mempool_cctx;
static struct zstd_pool *zstd_mempool_dctx;
static void
zstd_mempool_reap(struct zstd_pool *zstd_mempool)
{
struct zstd_pool *pool;
if (!zstd_mempool || !ZSTDSTAT(zstd_stat_buffers)) {
return;
}
/* free obsolete slots */
for (int i = 0; i < ZSTD_POOL_MAX; i++) {
pool = &zstd_mempool[i];
if (pool->mem && mutex_tryenter(&pool->barrier)) {
/* Free memory if unused object older than 2 minutes */
if (pool->mem && gethrestime_sec() > pool->timeout) {
vmem_free(pool->mem, pool->size);
ZSTDSTAT_SUB(zstd_stat_buffers, 1);
ZSTDSTAT_SUB(zstd_stat_size, pool->size);
pool->mem = NULL;
pool->size = 0;
pool->timeout = 0;
}
mutex_exit(&pool->barrier);
}
}
}
/*
* Try to get a cached allocated buffer from memory pool or allocate a new one
* if necessary. If a object is older than 2 minutes and does not fit the
@ -215,6 +243,7 @@ static struct zstd_pool *zstd_mempool_dctx;
*
* The scheduled release will be updated every time a object is reused.
*/
static void *
zstd_mempool_alloc(struct zstd_pool *zstd_mempool, size_t size)
{
@ -242,31 +271,16 @@ zstd_mempool_alloc(struct zstd_pool *zstd_mempool, size_t size)
* Check if objects fits the size, if so we take it and
* update the timestamp.
*/
if (size && !mem && pool->mem && size <= pool->size) {
if (pool->mem && size <= pool->size) {
pool->timeout = gethrestime_sec() +
ZSTD_POOL_TIMEOUT;
mem = pool->mem;
continue;
return (mem);
}
/* Free memory if unused object older than 2 minutes */
if (pool->mem && gethrestime_sec() > pool->timeout) {
vmem_free(pool->mem, pool->size);
ZSTDSTAT_SUB(zstd_stat_buffers, 1);
ZSTDSTAT_SUB(zstd_stat_size, pool->size);
pool->mem = NULL;
pool->size = 0;
pool->timeout = 0;
}
mutex_exit(&pool->barrier);
}
}
if (!size || mem) {
return (mem);
}
/*
* If no preallocated slot was found, try to fill in a new one.
*
@ -704,8 +718,8 @@ zfs_zstd_cache_reap_now(void)
* calling alloc with zero size seeks
* and releases old unused objects
*/
zstd_mempool_alloc(zstd_mempool_cctx, 0);
zstd_mempool_alloc(zstd_mempool_dctx, 0);
zstd_mempool_reap(zstd_mempool_cctx);
zstd_mempool_reap(zstd_mempool_dctx);
}
extern int __init