Fix pre-SI_SUB_CPU initialization of per-CPU counters.
r336020 introduced pcpu_page_alloc(), replacing page_alloc() as the backend allocator for PCPU UMA zones. Unlike page_alloc(), it does not honour malloc(9) flags such as M_ZERO or M_NODUMP, so fix that. r336020 also changed counter(9) to initialize each counter using a CPU_FOREACH() loop instead of an SMP rendezvous. Before SI_SUB_CPU, smp_rendezvous() will only execute the callback on the current CPU (i.e., CPU 0), so only one counter gets zeroed. The rest are zeroed by virtue of the fact that UMA gratuitously zeroes slabs when importing them into a zone. Prior to SI_SUB_CPU, all_cpus is clear, so with r336020 we weren't zeroing vm_cnt counters during boot: the CPU_FOREACH() loop had no effect, and pcpu_page_alloc() didn't honour M_ZERO. Fix this by iterating over the full range of CPU IDs when zeroing counters, ignoring whether the corresponding bits in all_cpus are set. Reported and tested by: pho (previous version) Reviewed by: kib (previous version) Differential Revision: https://reviews.freebsd.org/D16190
This commit is contained in:
parent
bdea3adca6
commit
013072f04c
@ -43,15 +43,6 @@ __FBSDID("$FreeBSD$");
|
||||
#define IN_SUBR_COUNTER_C
|
||||
#include <sys/counter.h>
|
||||
|
||||
static void
|
||||
counter_u64_zero_sync(counter_u64_t c)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
CPU_FOREACH(cpu)
|
||||
*(uint64_t*)zpcpu_get_cpu(c, cpu) = 0;
|
||||
}
|
||||
|
||||
void
|
||||
counter_u64_zero(counter_u64_t c)
|
||||
{
|
||||
@ -69,13 +60,8 @@ counter_u64_fetch(counter_u64_t c)
|
||||
counter_u64_t
|
||||
counter_u64_alloc(int flags)
|
||||
{
|
||||
counter_u64_t r;
|
||||
|
||||
r = uma_zalloc_pcpu(pcpu_zone_64, flags);
|
||||
if (r != NULL)
|
||||
counter_u64_zero_sync(r);
|
||||
|
||||
return (r);
|
||||
return (uma_zalloc_pcpu(pcpu_zone_64, flags | M_ZERO));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1186,13 +1186,12 @@ pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
|
||||
struct pcpu *pc;
|
||||
#endif
|
||||
|
||||
TAILQ_INIT(&alloctail);
|
||||
MPASS(bytes == (mp_maxid+1)*PAGE_SIZE);
|
||||
*pflag = UMA_SLAB_KERNEL;
|
||||
MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
|
||||
|
||||
TAILQ_INIT(&alloctail);
|
||||
flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
|
||||
((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
|
||||
VM_ALLOC_NOWAIT);
|
||||
malloc2vm_flags(wait);
|
||||
*pflag = UMA_SLAB_KERNEL;
|
||||
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
||||
if (CPU_ABSENT(cpu)) {
|
||||
p = vm_page_alloc(NULL, 0, flags);
|
||||
@ -2328,10 +2327,10 @@ uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
|
||||
|
||||
MPASS(zone->uz_flags & UMA_ZONE_PCPU);
|
||||
#endif
|
||||
item = uma_zalloc_arg(zone, udata, flags &~ M_ZERO);
|
||||
item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
|
||||
if (item != NULL && (flags & M_ZERO)) {
|
||||
#ifdef SMP
|
||||
CPU_FOREACH(i)
|
||||
for (i = 0; i <= mp_maxid; i++)
|
||||
bzero(zpcpu_get_cpu(item, i), zone->uz_size);
|
||||
#else
|
||||
bzero(item, zone->uz_size);
|
||||
|
Loading…
Reference in New Issue
Block a user