diff --git a/sys/amd64/include/counter.h b/sys/amd64/include/counter.h index b37a4b836a79..b571e70769a9 100644 --- a/sys/amd64/include/counter.h +++ b/sys/amd64/include/counter.h @@ -36,6 +36,44 @@ extern struct pcpu __pcpu[1]; #define counter_enter() do {} while (0) #define counter_exit() do {} while (0) +#ifdef IN_SUBR_COUNTER_C +static inline uint64_t +counter_u64_read_one(uint64_t *p, int cpu) +{ + + return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); +} + +static inline uint64_t +counter_u64_fetch_inline(uint64_t *p) +{ + uint64_t r; + int i; + + r = 0; + for (i = 0; i < mp_ncpus; i++) + r += counter_u64_read_one((uint64_t *)p, i); + + return (r); +} + +static void +counter_u64_zero_one_cpu(void *arg) +{ + + *((uint64_t *)((char *)arg + sizeof(struct pcpu) * + PCPU_GET(cpuid))) = 0; +} + +static inline void +counter_u64_zero_inline(counter_u64_t c) +{ + + smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, + smp_no_rendevous_barrier, c); +} +#endif + #define counter_u64_add_protected(c, i) counter_u64_add(c, i) static inline void diff --git a/sys/arm/include/counter.h b/sys/arm/include/counter.h index 68f89e2cb459..a6dfa8ef875b 100644 --- a/sys/arm/include/counter.h +++ b/sys/arm/include/counter.h @@ -37,6 +37,46 @@ #define counter_enter() critical_enter() #define counter_exit() critical_exit() +#ifdef IN_SUBR_COUNTER_C +/* XXXKIB non-atomic 64bit read */ +static inline uint64_t +counter_u64_read_one(uint64_t *p, int cpu) +{ + + return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); +} + +static inline uint64_t +counter_u64_fetch_inline(uint64_t *p) +{ + uint64_t r; + int i; + + r = 0; + for (i = 0; i < mp_ncpus; i++) + r += counter_u64_read_one((uint64_t *)p, i); + + return (r); +} + +/* XXXKIB non-atomic 64bit store, might interrupt increment */ +static void +counter_u64_zero_one_cpu(void *arg) +{ + + *((uint64_t *)((char *)arg + sizeof(struct pcpu) * + PCPU_GET(cpuid))) = 0; +} + +static inline void +counter_u64_zero_inline(counter_u64_t c) +{ + + smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, + smp_no_rendevous_barrier, c); +} +#endif + #define counter_u64_add_protected(c, inc) do { \ CRITICAL_ASSERT(curthread); \ *(uint64_t *)zpcpu_get(c) += (inc); \ diff --git a/sys/i386/include/counter.h b/sys/i386/include/counter.h index 3e93b3621144..01b09bb47bd2 100644 --- a/sys/i386/include/counter.h +++ b/sys/i386/include/counter.h @@ -67,6 +67,93 @@ counter_64_inc_8b(uint64_t *p, int64_t inc) : "memory", "cc", "eax", "edx", "ebx", "ecx"); } +#ifdef IN_SUBR_COUNTER_C +static inline uint64_t +counter_u64_read_one_8b(uint64_t *p) +{ + uint32_t res_lo, res_high; + + __asm __volatile( + "movl %%eax,%%ebx\n\t" + "movl %%edx,%%ecx\n\t" + "cmpxchg8b (%2)" + : "=a" (res_lo), "=d"(res_high) + : "SD" (p) + : "cc", "ebx", "ecx"); + return (res_lo + ((uint64_t)res_high << 32)); +} + +static inline uint64_t +counter_u64_fetch_inline(uint64_t *p) +{ + uint64_t res; + int i; + + res = 0; + if ((cpu_feature & CPUID_CX8) == 0) { + /* + * The machines without cmpxchg8b are not SMP. + * Disabling the preemption provides atomicity of the + * counter reading, since update is done in the + * critical section as well. + */ + critical_enter(); + for (i = 0; i < mp_ncpus; i++) { + res += *(uint64_t *)((char *)p + + sizeof(struct pcpu) * i); + } + critical_exit(); + } else { + for (i = 0; i < mp_ncpus; i++) + res += counter_u64_read_one_8b((uint64_t *)((char *)p + + sizeof(struct pcpu) * i)); + } + return (res); +} + +static inline void +counter_u64_zero_one_8b(uint64_t *p) +{ + + __asm __volatile( + "movl (%0),%%eax\n\t" + "movl 4(%0),%%edx\n" + "xorl %%ebx,%%ebx\n\t" + "xorl %%ecx,%%ecx\n\t" +"1:\n\t" + "cmpxchg8b (%0)\n\t" + "jnz 1b" + : + : "SD" (p) + : "memory", "cc", "eax", "edx", "ebx", "ecx"); +} + +static void +counter_u64_zero_one_cpu(void *arg) +{ + uint64_t *p; + + p = (uint64_t *)((char *)arg + sizeof(struct pcpu) * PCPU_GET(cpuid)); + counter_u64_zero_one_8b(p); +} + +static inline void +counter_u64_zero_inline(counter_u64_t c) +{ + int i; + + if ((cpu_feature & CPUID_CX8) == 0) { + critical_enter(); + for (i = 0; i < mp_ncpus; i++) + *(uint64_t *)((char *)c + sizeof(struct pcpu) * i) = 0; + critical_exit(); + } else { + smp_rendezvous(smp_no_rendevous_barrier, + counter_u64_zero_one_cpu, smp_no_rendevous_barrier, c); + } +} +#endif + #define counter_u64_add_protected(c, inc) do { \ if ((cpu_feature & CPUID_CX8) == 0) { \ CRITICAL_ASSERT(curthread); \ diff --git a/sys/ia64/include/counter.h b/sys/ia64/include/counter.h index 68f89e2cb459..a3fe871d9693 100644 --- a/sys/ia64/include/counter.h +++ b/sys/ia64/include/counter.h @@ -37,6 +37,45 @@ #define counter_enter() critical_enter() #define counter_exit() critical_exit() +#ifdef IN_SUBR_COUNTER_C +static inline uint64_t +counter_u64_read_one(uint64_t *p, int cpu) +{ + + return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); +} + +static inline uint64_t +counter_u64_fetch_inline(uint64_t *p) +{ + uint64_t r; + int i; + + r = 0; + for (i = 0; i < mp_ncpus; i++) + r += counter_u64_read_one((uint64_t *)p, i); + + return (r); +} + +/* XXXKIB might interrupt increment */ +static void +counter_u64_zero_one_cpu(void *arg) +{ + + *((uint64_t *)((char *)arg + sizeof(struct pcpu) * + PCPU_GET(cpuid))) = 0; +} + +static inline void +counter_u64_zero_inline(counter_u64_t c) +{ + + smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, + smp_no_rendevous_barrier, c); +} +#endif + #define counter_u64_add_protected(c, inc) do { \ CRITICAL_ASSERT(curthread); \ *(uint64_t *)zpcpu_get(c) += (inc); \ diff --git a/sys/kern/subr_counter.c b/sys/kern/subr_counter.c index a98ed405072d..2656ed781030 100644 --- a/sys/kern/subr_counter.c +++ b/sys/kern/subr_counter.c @@ -29,34 +29,32 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include +#include +#include +#include +#include #include #include #include + +#define IN_SUBR_COUNTER_C +#include static uma_zone_t uint64_pcpu_zone; void counter_u64_zero(counter_u64_t c) { - int i; - for (i = 0; i < mp_ncpus; i++) - *(uint64_t *)((char *)c + sizeof(struct pcpu) * i) = 0; + counter_u64_zero_inline(c); } uint64_t counter_u64_fetch(counter_u64_t c) { - uint64_t r; - int i; - r = 0; - for (i = 0; i < mp_ncpus; i++) - r += *(uint64_t *)((char *)c + sizeof(struct pcpu) * i); - - return (r); + return (counter_u64_fetch_inline(c)); } counter_u64_t diff --git a/sys/mips/include/counter.h b/sys/mips/include/counter.h index 68f89e2cb459..193c98e0a4a2 100644 --- a/sys/mips/include/counter.h +++ b/sys/mips/include/counter.h @@ -37,6 +37,46 @@ #define counter_enter() critical_enter() #define counter_exit() critical_exit() +#ifdef IN_SUBR_COUNTER_C +/* XXXKIB non-atomic 64bit read on 32bit */ +static inline uint64_t +counter_u64_read_one(uint64_t *p, int cpu) +{ + + return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); +} + +static inline uint64_t +counter_u64_fetch_inline(uint64_t *p) +{ + uint64_t r; + int i; + + r = 0; + for (i = 0; i < mp_ncpus; i++) + r += counter_u64_read_one((uint64_t *)p, i); + + return (r); +} + +/* XXXKIB non-atomic 64bit store on 32bit, might interrupt increment */ +static void +counter_u64_zero_one_cpu(void *arg) +{ + + *((uint64_t *)((char *)arg + sizeof(struct pcpu) * + PCPU_GET(cpuid))) = 0; +} + +static inline void +counter_u64_zero_inline(counter_u64_t c) +{ + + smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, + smp_no_rendevous_barrier, c); +} +#endif + #define counter_u64_add_protected(c, inc) do { \ CRITICAL_ASSERT(curthread); \ *(uint64_t *)zpcpu_get(c) += (inc); \ diff --git a/sys/powerpc/include/counter.h b/sys/powerpc/include/counter.h index 81b6bbcaba6a..e876f62e7a95 100644 --- a/sys/powerpc/include/counter.h +++ b/sys/powerpc/include/counter.h @@ -39,6 +39,44 @@ #define counter_enter() do {} while (0) #define counter_exit() do {} while (0) +#ifdef IN_SUBR_COUNTER_C +static inline uint64_t +counter_u64_read_one(uint64_t *p, int cpu) +{ + + return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); +} + +static inline uint64_t +counter_u64_fetch_inline(uint64_t *p) +{ + uint64_t r; + int i; + + r = 0; + for (i = 0; i < mp_ncpus; i++) + r += counter_u64_read_one((uint64_t *)p, i); + + return (r); +} + +static void +counter_u64_zero_one_cpu(void *arg) +{ + + *((uint64_t *)((char *)arg + sizeof(struct pcpu) * + PCPU_GET(cpuid))) = 0; +} + +static inline void +counter_u64_zero_inline(counter_u64_t c) +{ + + smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, + smp_no_rendevous_barrier, c); +} +#endif + #define counter_u64_add_protected(c, i) counter_u64_add(c, i) extern struct pcpu __pcpu[MAXCPU]; @@ -65,6 +103,46 @@ counter_u64_add(counter_u64_t c, int64_t inc) #define counter_enter() critical_enter() #define counter_exit() critical_exit() +#ifdef IN_SUBR_COUNTER_C +/* XXXKIB non-atomic 64bit read */ +static inline uint64_t +counter_u64_read_one(uint64_t *p, int cpu) +{ + + return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); +} + +static inline uint64_t +counter_u64_fetch_inline(uint64_t *p) +{ + uint64_t r; + int i; + + r = 0; + for (i = 0; i < mp_ncpus; i++) + r += counter_u64_read_one((uint64_t *)p, i); + + return (r); +} + +/* XXXKIB non-atomic 64bit store, might interrupt increment */ +static void +counter_u64_zero_one_cpu(void *arg) +{ + + *((uint64_t *)((char *)arg + sizeof(struct pcpu) * + PCPU_GET(cpuid))) = 0; +} + +static inline void +counter_u64_zero_inline(counter_u64_t c) +{ + + smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, + smp_no_rendevous_barrier, c); +} +#endif + #define counter_u64_add_protected(c, inc) do { \ CRITICAL_ASSERT(curthread); \ *(uint64_t *)zpcpu_get(c) += (inc); \ diff --git a/sys/sparc64/include/counter.h b/sys/sparc64/include/counter.h index 68f89e2cb459..a3fe871d9693 100644 --- a/sys/sparc64/include/counter.h +++ b/sys/sparc64/include/counter.h @@ -37,6 +37,45 @@ #define counter_enter() critical_enter() #define counter_exit() critical_exit() +#ifdef IN_SUBR_COUNTER_C +static inline uint64_t +counter_u64_read_one(uint64_t *p, int cpu) +{ + + return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu)); +} + +static inline uint64_t +counter_u64_fetch_inline(uint64_t *p) +{ + uint64_t r; + int i; + + r = 0; + for (i = 0; i < mp_ncpus; i++) + r += counter_u64_read_one((uint64_t *)p, i); + + return (r); +} + +/* XXXKIB might interrupt increment */ +static void +counter_u64_zero_one_cpu(void *arg) +{ + + *((uint64_t *)((char *)arg + sizeof(struct pcpu) * + PCPU_GET(cpuid))) = 0; +} + +static inline void +counter_u64_zero_inline(counter_u64_t c) +{ + + smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu, + smp_no_rendevous_barrier, c); +} +#endif + #define counter_u64_add_protected(c, inc) do { \ CRITICAL_ASSERT(curthread); \ *(uint64_t *)zpcpu_get(c) += (inc); \