From 1de9724e55161501923951439059e5a9fcf96463 Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Tue, 22 Oct 2019 14:20:06 +0000 Subject: [PATCH] Avoid reloading bucket pointers in uma_vm_zone_stats(). The correctness of per-CPU cache accounting in that function is dependent on reading per-CPU pointers exactly once. Ensure that the compiler does not emit multiple loads of those pointers. Reported and tested by: pho Reviewed by: kib MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D22081 --- sys/vm/uma_core.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index e0ccce0efac0..5d001fee4fea 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -4055,6 +4055,7 @@ uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf, struct uma_percpu_stat *ups, bool internal) { uma_zone_domain_t zdom; + uma_bucket_t bucket; uma_cache_t cache; int i; @@ -4068,28 +4069,29 @@ uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf, uth->uth_fails = counter_u64_fetch(z->uz_fails); uth->uth_sleeps = z->uz_sleeps; uth->uth_xdomain = z->uz_xdomain; + /* - * While it is not normally safe to access the cache - * bucket pointers while not on the CPU that owns the - * cache, we only allow the pointers to be exchanged - * without the zone lock held, not invalidated, so - * accept the possible race associated with bucket - * exchange during monitoring. + * While it is not normally safe to access the cache bucket pointers + * while not on the CPU that owns the cache, we only allow the pointers + * to be exchanged without the zone lock held, not invalidated, so + * accept the possible race associated with bucket exchange during + * monitoring. Use atomic_load_ptr() to ensure that the bucket pointers + * are loaded only once. */ for (i = 0; i < mp_maxid + 1; i++) { bzero(&ups[i], sizeof(*ups)); if (internal || CPU_ABSENT(i)) continue; cache = &z->uz_cpu[i]; - if (cache->uc_allocbucket != NULL) - ups[i].ups_cache_free += - cache->uc_allocbucket->ub_cnt; - if (cache->uc_freebucket != NULL) - ups[i].ups_cache_free += - cache->uc_freebucket->ub_cnt; - if (cache->uc_crossbucket != NULL) - ups[i].ups_cache_free += - cache->uc_crossbucket->ub_cnt; + bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_allocbucket); + if (bucket != NULL) + ups[i].ups_cache_free += bucket->ub_cnt; + bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_freebucket); + if (bucket != NULL) + ups[i].ups_cache_free += bucket->ub_cnt; + bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_crossbucket); + if (bucket != NULL) + ups[i].ups_cache_free += bucket->ub_cnt; ups[i].ups_allocs = cache->uc_allocs; ups[i].ups_frees = cache->uc_frees; }