Avoid reloading bucket pointers in uma_vm_zone_stats().
The correctness of per-CPU cache accounting in that function is dependent on reading per-CPU pointers exactly once. Ensure that the compiler does not emit multiple loads of those pointers. Reported and tested by: pho Reviewed by: kib MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D22081
This commit is contained in:
parent
200abb43c0
commit
1de9724e55
@ -4055,6 +4055,7 @@ uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
|
|||||||
struct uma_percpu_stat *ups, bool internal)
|
struct uma_percpu_stat *ups, bool internal)
|
||||||
{
|
{
|
||||||
uma_zone_domain_t zdom;
|
uma_zone_domain_t zdom;
|
||||||
|
uma_bucket_t bucket;
|
||||||
uma_cache_t cache;
|
uma_cache_t cache;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -4068,28 +4069,29 @@ uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
|
|||||||
uth->uth_fails = counter_u64_fetch(z->uz_fails);
|
uth->uth_fails = counter_u64_fetch(z->uz_fails);
|
||||||
uth->uth_sleeps = z->uz_sleeps;
|
uth->uth_sleeps = z->uz_sleeps;
|
||||||
uth->uth_xdomain = z->uz_xdomain;
|
uth->uth_xdomain = z->uz_xdomain;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* While it is not normally safe to access the cache
|
* While it is not normally safe to access the cache bucket pointers
|
||||||
* bucket pointers while not on the CPU that owns the
|
* while not on the CPU that owns the cache, we only allow the pointers
|
||||||
* cache, we only allow the pointers to be exchanged
|
* to be exchanged without the zone lock held, not invalidated, so
|
||||||
* without the zone lock held, not invalidated, so
|
* accept the possible race associated with bucket exchange during
|
||||||
* accept the possible race associated with bucket
|
* monitoring. Use atomic_load_ptr() to ensure that the bucket pointers
|
||||||
* exchange during monitoring.
|
* are loaded only once.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < mp_maxid + 1; i++) {
|
for (i = 0; i < mp_maxid + 1; i++) {
|
||||||
bzero(&ups[i], sizeof(*ups));
|
bzero(&ups[i], sizeof(*ups));
|
||||||
if (internal || CPU_ABSENT(i))
|
if (internal || CPU_ABSENT(i))
|
||||||
continue;
|
continue;
|
||||||
cache = &z->uz_cpu[i];
|
cache = &z->uz_cpu[i];
|
||||||
if (cache->uc_allocbucket != NULL)
|
bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_allocbucket);
|
||||||
ups[i].ups_cache_free +=
|
if (bucket != NULL)
|
||||||
cache->uc_allocbucket->ub_cnt;
|
ups[i].ups_cache_free += bucket->ub_cnt;
|
||||||
if (cache->uc_freebucket != NULL)
|
bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_freebucket);
|
||||||
ups[i].ups_cache_free +=
|
if (bucket != NULL)
|
||||||
cache->uc_freebucket->ub_cnt;
|
ups[i].ups_cache_free += bucket->ub_cnt;
|
||||||
if (cache->uc_crossbucket != NULL)
|
bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_crossbucket);
|
||||||
ups[i].ups_cache_free +=
|
if (bucket != NULL)
|
||||||
cache->uc_crossbucket->ub_cnt;
|
ups[i].ups_cache_free += bucket->ub_cnt;
|
||||||
ups[i].ups_allocs = cache->uc_allocs;
|
ups[i].ups_allocs = cache->uc_allocs;
|
||||||
ups[i].ups_frees = cache->uc_frees;
|
ups[i].ups_frees = cache->uc_frees;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user