From ea4af9c09a7fa07fd84f459e8ce5ee9d2e9ec8dc Mon Sep 17 00:00:00 2001 From: Alexander Motin Date: Tue, 24 Sep 2013 07:03:16 +0000 Subject: [PATCH] Make load average sampling asynchronous to hardclock ticks. This improves measurement of load caused by time-related events still using hardclock. For example, without this change dummynet, scheduling events each hardclock tick, was always miscounted as load of 1. There is still aliasing with events delayed by the new precision mechanism, but it probably can't be avoided without moving this sampling from using callout to some lower-level code or handling it in some other special way. Reviewed by: davide Approved by: re (marius) --- sys/kern/kern_synch.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 0a400e9fe4e2..047fa46a630b 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -570,8 +570,8 @@ loadav(void *arg) * run at regular intervals. */ callout_reset_sbt(&loadav_callout, - tick_sbt * (hz * 4 + (int)(random() % (hz * 2 + 1))), 0, - loadav, NULL, C_DIRECT_EXEC | C_HARDCLOCK); + SBT_1US * (4000000 + (int)(random() % 2000001)), SBT_1US, + loadav, NULL, C_DIRECT_EXEC | C_PREL(32)); } /* ARGSUSED */