Move the unlocking of the zone mutex in sysctl_vm_zone_stats() so that

it covers the following of the uc_alloc/freebucket cache pointers.
Originally, I felt that the race wasn't helped by holding the mutex,
hence a comment in the code and not holding it across the cache access.
However, it does improve consistency, as while it doesn't prevent
bucket exchange, it does prevent bucket pointer invalidation.  So a
race in gathering cache free space statistics still can occur, but not
one that follows an invalid bucket pointer, if the mutex is held.

Submitted by:	yongari
MFC after:	1 week
This commit is contained in:
Robert Watson 2005-07-16 09:40:34 +00:00
parent c329d70b9a
commit 2450bbb872

View File

@ -2986,17 +2986,19 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
uth.uth_allocs = z->uz_allocs;
uth.uth_frees = z->uz_frees;
uth.uth_fails = z->uz_fails;
ZONE_UNLOCK(z);
if (sbuf_bcat(&sbuf, &uth, sizeof(uth)) < 0) {
ZONE_UNLOCK(z);
mtx_unlock(&uma_mtx);
error = ENOMEM;
goto out;
}
/*
* XXXRW: Should not access bucket fields from
* non-local CPU. Instead need to modify the caches
* to directly maintain these statistics so we don't
* have to.
* While it is not normally safe to access the cache
* bucket pointers while not on the CPU that owns the
* cache, we only allow the pointers to be exchanged
* without the zone lock held, not invalidated, so
* accept the possible race associated with bucket
* exchange during monitoring.
*/
for (i = 0; i < MAXCPU; i++) {
bzero(&ups, sizeof(ups));
@ -3013,11 +3015,13 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
ups.ups_frees = cache->uc_frees;
skip:
if (sbuf_bcat(&sbuf, &ups, sizeof(ups)) < 0) {
ZONE_UNLOCK(z);
mtx_unlock(&uma_mtx);
error = ENOMEM;
goto out;
}
}
ZONE_UNLOCK(z);
}
}
mtx_unlock(&uma_mtx);