Provide counter_ratecheck(), a MP-friendly substitution to ppsratecheck().

When rated event happens at a very quick rate, the ppsratecheck() is not
only racy, but also becomes a performance bottleneck.

Together with:	rrs, jtl
This commit is contained in:
Gleb Smirnoff 2016-12-09 17:58:34 +00:00
parent ebecdad811
commit 169170209c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=309745
3 changed files with 82 additions and 1 deletions

View File

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd March 14, 2016
.Dd December 8, 2016
.Dt COUNTER 9
.Os
.Sh NAME
@ -51,6 +51,8 @@
.Fn counter_u64_fetch "counter_u64_t c"
.Ft void
.Fn counter_u64_zero "counter_u64_t c"
.Ft int64_t
.Fn counter_ratecheck "struct counter_rate *cr" "int64_t limit"
.In sys/sysctl.h
.Fn SYSCTL_COUNTER_U64 parent nbr name access ptr descr
.Fn SYSCTL_ADD_COUNTER_U64 ctx parent nbr name access ptr descr
@ -128,6 +130,18 @@ value for any moment.
Clear the counter
.Fa c
and set it to zero.
.It Fn counter_ratecheck cr limit
The function is a multiprocessor-friendly version of
.Fn ppsratecheck ,
which uses
.Nm
internally.
Returns non-negative value if the rate isn't yet reached during the current
second, and a negative value otherwise.
If the limit was reached on previous second, but was just reset back to zero,
then
.Fn counter_ratecheck
returns number of events since previous reset.
.It Fn SYSCTL_COUNTER_U64 parent nbr name access ptr descr
Declare a static
.Xr sysctl

View File

@ -119,3 +119,57 @@ sysctl_handle_counter_u64_array(SYSCTL_HANDLER_ARGS)
return (0);
}
/*
* MP-friendly version of ppsratecheck().
*
* Returns non-negative if we are in the rate, negative otherwise.
* 0 - rate limit not reached.
* -1 - rate limit reached.
* >0 - rate limit was reached before, and was just reset. The return value
* is number of events since last reset.
*/
int64_t
counter_ratecheck(struct counter_rate *cr, int64_t limit)
{
int64_t val;
int now;
val = cr->cr_over;
now = ticks;
if (now - cr->cr_ticks >= hz) {
/*
* Time to clear the structure, we are in the next second.
* First try unlocked read, and then proceed with atomic.
*/
if ((cr->cr_lock == 0) &&
atomic_cmpset_int(&cr->cr_lock, 0, 1)) {
/*
* Check if other thread has just went through the
* reset sequence before us.
*/
if (now - cr->cr_ticks >= hz) {
val = counter_u64_fetch(cr->cr_rate);
counter_u64_zero(cr->cr_rate);
cr->cr_over = 0;
cr->cr_ticks = now;
}
atomic_store_rel_int(&cr->cr_lock, 0);
} else
/*
* We failed to lock, in this case other thread may
* be running counter_u64_zero(), so it is not safe
* to do an update, we skip it.
*/
return (val);
}
counter_u64_add(cr->cr_rate, 1);
if (cr->cr_over != 0)
return (-1);
if (counter_u64_fetch(cr->cr_rate) > limit)
val = cr->cr_over = -1;
return (val);
}

View File

@ -59,5 +59,18 @@ uint64_t counter_u64_fetch(counter_u64_t);
for (int i = 0; i < (n); i++) \
counter_u64_zero((a)[i]); \
} while (0)
/*
* counter(9) based rate checking.
*/
struct counter_rate {
counter_u64_t cr_rate; /* Events since last second */
volatile int cr_lock; /* Lock to clean the struct */
int cr_ticks; /* Ticks on last clean */
int cr_over; /* Over limit since cr_ticks? */
};
int64_t counter_ratecheck(struct counter_rate *, int64_t);
#endif /* _KERNEL */
#endif /* ! __SYS_COUNTER_H__ */