Linux 3.12 compat: shrinker semantics

The new shrinker API as of Linux 3.12 modifies "struct shrinker" by
replacing the @shrink callback with the pair of @count_objects and
@scan_objects.  It also requires the return value of @count_objects to
return the number of objects actually freed whereas the previous @shrink
callback returned the number of remaining freeable objects.

This patch adds support for the new @scan_objects return value semantics
and updates the splat shrinker test case appropriately.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Tim Chase <tim@chase2k.com>
Closes #403
This commit is contained in:
Tim Chase 2014-10-02 07:40:05 -05:00 committed by Brian Behlendorf
parent 46c936756e
commit 802a4a2ad5
3 changed files with 57 additions and 25 deletions

View File

@ -199,4 +199,11 @@ fn ## _scan_objects(struct shrinker *shrink, struct shrink_control *sc) \
#error "Unknown shrinker callback"
#endif
#if defined(HAVE_SPLIT_SHRINKER_CALLBACK)
typedef unsigned long spl_shrinker_t;
#else
typedef int spl_shrinker_t;
#define SHRINK_STOP (-1)
#endif
#endif /* SPL_MM_COMPAT_H */

View File

@ -64,7 +64,7 @@ MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
* setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
* is reclaimed. This may increase the likelihood of out of memory events.
*/
unsigned int spl_kmem_cache_reclaim = 0;
unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */;
module_param(spl_kmem_cache_reclaim, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
@ -2048,14 +2048,24 @@ EXPORT_SYMBOL(spl_kmem_cache_free);
* report that they contain unused objects. Because of this we only
* register one shrinker function in the shim layer for all slab caches.
* We always attempt to shrink all caches when this generic shrinker
* is called. The shrinker should return the number of free objects
* in the cache when called with nr_to_scan == 0 but not attempt to
* free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
* objects should be freed, which differs from Solaris semantics.
* Solaris semantics are to free all available objects which may (and
* probably will) be more objects than the requested nr_to_scan.
* is called.
*
* If sc->nr_to_scan is zero, the caller is requesting a query of the
* number of objects which can potentially be freed. If it is nonzero,
* the request is to free that many objects.
*
* Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
* in struct shrinker and also require the shrinker to return the number
* of objects freed.
*
* Older kernels require the shrinker to return the number of freeable
* objects following the freeing of nr_to_free.
*
* Linux semantics differ from those under Solaris, which are to
* free all available objects which may (and probably will) be more
* objects than the requested nr_to_scan.
*/
static int
static spl_shrinker_t
__spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
struct shrink_control *sc)
{
@ -2064,17 +2074,22 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
down_read(&spl_kmem_cache_sem);
list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
if (sc->nr_to_scan)
if (sc->nr_to_scan) {
#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
uint64_t oldalloc = skc->skc_obj_alloc;
spl_kmem_cache_reap_now(skc,
MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
/*
* Presume everything alloc'ed is reclaimable, this ensures
* we are called again with nr_to_scan > 0 so can try and
* reclaim. The exact number is not important either so
* we forgo taking this already highly contented lock.
*/
alloc += skc->skc_obj_alloc;
if (oldalloc > skc->skc_obj_alloc)
alloc += oldalloc - skc->skc_obj_alloc;
#else
spl_kmem_cache_reap_now(skc,
MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
alloc += skc->skc_obj_alloc;
#endif /* HAVE_SPLIT_SHRINKER_CALLBACK */
} else {
/* Request to query number of freeable objects */
alloc += skc->skc_obj_alloc;
}
}
up_read(&spl_kmem_cache_sem);
@ -2085,7 +2100,7 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
* system to thrash.
*/
if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
return (-1);
return (SHRINK_STOP);
return (MAX(alloc, 0));
}
@ -2196,7 +2211,7 @@ spl_kmem_reap(void)
sc.nr_to_scan = KMC_REAP_CHUNK;
sc.gfp_mask = GFP_KERNEL;
__spl_kmem_cache_generic_shrinker(NULL, &sc);
(void) __spl_kmem_cache_generic_shrinker(NULL, &sc);
}
EXPORT_SYMBOL(spl_kmem_reap);

View File

@ -44,11 +44,13 @@ SPL_SHRINKER_DECLARE(splat_linux_shrinker, splat_linux_shrinker_fn, 1);
static unsigned long splat_linux_shrinker_size = 0;
static struct file *splat_linux_shrinker_file = NULL;
static int
static spl_shrinker_t
__splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
{
static int failsafe = 0;
static unsigned long last_splat_linux_shrinker_size = 0;
unsigned long size;
spl_shrinker_t count;
/*
* shrinker_size can only decrease or stay the same between callbacks
@ -61,13 +63,21 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
last_splat_linux_shrinker_size = splat_linux_shrinker_size;
if (sc->nr_to_scan) {
splat_linux_shrinker_size = splat_linux_shrinker_size -
MIN(sc->nr_to_scan, splat_linux_shrinker_size);
size = MIN(sc->nr_to_scan, splat_linux_shrinker_size);
splat_linux_shrinker_size -= size;
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME,
"Reclaimed %lu objects, size now %lu\n",
sc->nr_to_scan, splat_linux_shrinker_size);
size, splat_linux_shrinker_size);
#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
count = size;
#else
count = splat_linux_shrinker_size;
#endif /* HAVE_SPLIT_SHRINKER_CALLBACK */
} else {
count = splat_linux_shrinker_size;
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME,
"Cache size is %lu\n", splat_linux_shrinker_size);
}
@ -77,7 +87,7 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME,
"Far more calls than expected (%d), size now %lu\n",
failsafe, splat_linux_shrinker_size);
return -1;
return (SHRINK_STOP);
} else {
/*
* We only increment failsafe if it doesn't trigger. This
@ -89,7 +99,7 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
/* Shrinker has run, so signal back to test. */
wake_up(&shrinker_wait);
return (int)splat_linux_shrinker_size;
return (count);
}
SPL_SHRINKER_CALLBACK_WRAPPER(splat_linux_shrinker_fn);