1994-05-24 10:09:53 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1982, 1986, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1999-03-06 04:46:20 +00:00
|
|
|
* From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#include "opt_callout_profiling.h"
|
2009-01-24 10:22:49 +00:00
|
|
|
#include "opt_kdtrace.h"
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#if defined(__arm__)
|
|
|
|
#include "opt_timer.h"
|
|
|
|
#endif
|
2009-01-24 10:22:49 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2008-04-02 11:20:30 +00:00
|
|
|
#include <sys/bus.h>
|
1998-02-15 14:15:21 +00:00
|
|
|
#include <sys/callout.h>
|
2008-04-02 11:20:30 +00:00
|
|
|
#include <sys/interrupt.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/kernel.h>
|
2004-08-06 21:49:00 +00:00
|
|
|
#include <sys/ktr.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h>
|
2008-04-02 11:20:30 +00:00
|
|
|
#include <sys/malloc.h>
|
2000-11-16 21:20:52 +00:00
|
|
|
#include <sys/mutex.h>
|
2005-09-15 20:20:36 +00:00
|
|
|
#include <sys/proc.h>
|
2009-01-24 10:22:49 +00:00
|
|
|
#include <sys/sdt.h>
|
2007-06-26 21:42:01 +00:00
|
|
|
#include <sys/sleepqueue.h>
|
2003-06-04 05:25:58 +00:00
|
|
|
#include <sys/sysctl.h>
|
2008-04-02 11:20:30 +00:00
|
|
|
#include <sys/smp.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
#ifdef SMP
|
|
|
|
#include <machine/cpu.h>
|
|
|
|
#endif
|
|
|
|
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#ifndef NO_EVENTTIMERS
|
|
|
|
DPCPU_DECLARE(sbintime_t, hardclocktime);
|
|
|
|
#endif
|
|
|
|
|
2009-01-24 10:22:49 +00:00
|
|
|
SDT_PROVIDER_DEFINE(callout_execute);
|
2010-08-22 11:18:57 +00:00
|
|
|
SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start);
|
2009-01-24 10:22:49 +00:00
|
|
|
SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
|
|
|
|
"struct callout *");
|
2010-08-22 11:18:57 +00:00
|
|
|
SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end);
|
2009-01-24 10:22:49 +00:00
|
|
|
SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
|
|
|
|
"struct callout *");
|
|
|
|
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#ifdef CALLOUT_PROFILING
|
2003-06-04 05:25:58 +00:00
|
|
|
static int avg_depth;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
|
|
|
|
"Average number of items examined per softclock call. Units = 1/1000");
|
|
|
|
static int avg_gcalls;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
|
|
|
|
"Average number of Giant callouts made per softclock call. Units = 1/1000");
|
2007-11-20 00:37:45 +00:00
|
|
|
static int avg_lockcalls;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
|
|
|
|
"Average number of lock callouts made per softclock call. Units = 1/1000");
|
2003-06-04 05:25:58 +00:00
|
|
|
static int avg_mpcalls;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
|
|
|
|
"Average number of MP callouts made per softclock call. Units = 1/1000");
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
static int avg_depth_dir;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0,
|
|
|
|
"Average number of direct callouts examined per callout_process call. "
|
|
|
|
"Units = 1/1000");
|
|
|
|
static int avg_lockcalls_dir;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD,
|
|
|
|
&avg_lockcalls_dir, 0, "Average number of lock direct callouts made per "
|
|
|
|
"callout_process call. Units = 1/1000");
|
|
|
|
static int avg_mpcalls_dir;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir,
|
|
|
|
0, "Average number of MP direct callouts made per callout_process call. "
|
|
|
|
"Units = 1/1000");
|
|
|
|
#endif
|
1998-02-15 14:15:21 +00:00
|
|
|
/*
|
|
|
|
* TODO:
|
|
|
|
* allocate more timeout table slots when table overflows.
|
|
|
|
*/
|
2013-03-03 15:01:33 +00:00
|
|
|
u_int callwheelsize, callwheelmask;
|
1994-08-18 22:36:09 +00:00
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
/*
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
* The callout cpu exec entities represent informations necessary for
|
|
|
|
* describing the state of callouts currently running on the CPU and the ones
|
|
|
|
* necessary for migrating callouts to the new callout cpu. In particular,
|
|
|
|
* the first entry of the array cc_exec_entity holds informations for callout
|
|
|
|
* running in SWI thread context, while the second one holds informations
|
|
|
|
* for callout running directly from hardware interrupt context.
|
2011-04-08 18:48:57 +00:00
|
|
|
* The cached informations are very important for deferring migration when
|
|
|
|
* the migrating callout is already running.
|
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
struct cc_exec {
|
|
|
|
struct callout *cc_next;
|
|
|
|
struct callout *cc_curr;
|
2011-04-08 18:48:57 +00:00
|
|
|
#ifdef SMP
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
void (*ce_migration_func)(void *);
|
|
|
|
void *ce_migration_arg;
|
|
|
|
int ce_migration_cpu;
|
|
|
|
sbintime_t ce_migration_time;
|
2011-04-08 18:48:57 +00:00
|
|
|
#endif
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
boolean_t cc_cancel;
|
|
|
|
boolean_t cc_waiting;
|
2011-04-08 18:48:57 +00:00
|
|
|
};
|
2013-02-28 16:22:49 +00:00
|
|
|
|
2009-12-14 12:23:46 +00:00
|
|
|
/*
|
|
|
|
* There is one struct callout_cpu per cpu, holding all relevant
|
|
|
|
* state for the callout processing thread on the individual CPU.
|
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu {
|
2012-10-31 18:07:18 +00:00
|
|
|
struct mtx_padalign cc_lock;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
struct cc_exec cc_exec_entity[2];
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout *cc_callout;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
struct callout_list *cc_callwheel;
|
|
|
|
struct callout_tailq cc_expireq;
|
|
|
|
struct callout_slist cc_callfree;
|
|
|
|
sbintime_t cc_firstevent;
|
|
|
|
sbintime_t cc_lastscan;
|
2008-04-02 11:20:30 +00:00
|
|
|
void *cc_cookie;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
u_int cc_bucket;
|
2008-04-02 11:20:30 +00:00
|
|
|
};
|
|
|
|
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#define cc_exec_curr cc_exec_entity[0].cc_curr
|
|
|
|
#define cc_exec_next cc_exec_entity[0].cc_next
|
|
|
|
#define cc_exec_cancel cc_exec_entity[0].cc_cancel
|
|
|
|
#define cc_exec_waiting cc_exec_entity[0].cc_waiting
|
|
|
|
#define cc_exec_curr_dir cc_exec_entity[1].cc_curr
|
|
|
|
#define cc_exec_next_dir cc_exec_entity[1].cc_next
|
|
|
|
#define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel
|
|
|
|
#define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
#ifdef SMP
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#define cc_migration_func cc_exec_entity[0].ce_migration_func
|
|
|
|
#define cc_migration_arg cc_exec_entity[0].ce_migration_arg
|
|
|
|
#define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu
|
|
|
|
#define cc_migration_time cc_exec_entity[0].ce_migration_time
|
|
|
|
#define cc_migration_func_dir cc_exec_entity[1].ce_migration_func
|
|
|
|
#define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg
|
|
|
|
#define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu
|
|
|
|
#define cc_migration_time_dir cc_exec_entity[1].ce_migration_time
|
2011-04-08 18:48:57 +00:00
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu cc_cpu[MAXCPU];
|
2011-04-08 18:48:57 +00:00
|
|
|
#define CPUBLOCK MAXCPU
|
2008-04-02 11:20:30 +00:00
|
|
|
#define CC_CPU(cpu) (&cc_cpu[(cpu)])
|
|
|
|
#define CC_SELF() CC_CPU(PCPU_GET(cpuid))
|
|
|
|
#else
|
|
|
|
struct callout_cpu cc_cpu;
|
|
|
|
#define CC_CPU(cpu) &cc_cpu
|
|
|
|
#define CC_SELF() &cc_cpu
|
|
|
|
#endif
|
|
|
|
#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
|
|
|
|
#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
|
2011-04-08 18:48:57 +00:00
|
|
|
#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
|
2008-04-02 11:20:30 +00:00
|
|
|
|
|
|
|
static int timeout_cpu;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
|
|
|
|
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
|
|
|
|
#ifdef CALLOUT_PROFILING
|
|
|
|
int *mpcalls, int *lockcalls, int *gcalls,
|
|
|
|
#endif
|
|
|
|
int direct);
|
2008-04-02 11:20:30 +00:00
|
|
|
|
2011-11-07 06:44:47 +00:00
|
|
|
static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
|
2004-04-08 02:03:49 +00:00
|
|
|
|
2005-01-07 03:25:45 +00:00
|
|
|
/**
|
2008-04-02 11:20:30 +00:00
|
|
|
* Locked by cc_lock:
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
* cc_curr - If a callout is in progress, it is cc_curr.
|
|
|
|
* If cc_curr is non-NULL, threads waiting in
|
2008-04-02 11:20:30 +00:00
|
|
|
* callout_drain() will be woken up as soon as the
|
2004-04-06 23:08:49 +00:00
|
|
|
* relevant callout completes.
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
* cc_cancel - Changing to 1 with both callout_lock and cc_lock held
|
2005-02-07 02:47:33 +00:00
|
|
|
* guarantees that the current callout will not run.
|
|
|
|
* The softclock() function sets this to 0 before it
|
2007-11-20 00:37:45 +00:00
|
|
|
* drops callout_lock to acquire c_lock, and it calls
|
2006-02-23 19:13:12 +00:00
|
|
|
* the handler only if curr_cancelled is still 0 after
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
* cc_lock is successfully acquired.
|
2008-04-02 11:20:30 +00:00
|
|
|
* cc_waiting - If a thread is waiting in callout_drain(), then
|
2006-02-23 19:13:12 +00:00
|
|
|
* callout_wait is nonzero. Set only when
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
* cc_curr is non-NULL.
|
2004-04-06 23:08:49 +00:00
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
/*
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
* Resets the execution entity tied to a specific callout cpu.
|
2011-04-08 18:48:57 +00:00
|
|
|
*/
|
|
|
|
static void
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc_cce_cleanup(struct callout_cpu *cc, int direct)
|
2011-04-08 18:48:57 +00:00
|
|
|
{
|
|
|
|
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc->cc_exec_entity[direct].cc_curr = NULL;
|
|
|
|
cc->cc_exec_entity[direct].cc_next = NULL;
|
|
|
|
cc->cc_exec_entity[direct].cc_cancel = FALSE;
|
|
|
|
cc->cc_exec_entity[direct].cc_waiting = FALSE;
|
2011-04-08 18:48:57 +00:00
|
|
|
#ifdef SMP
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK;
|
|
|
|
cc->cc_exec_entity[direct].ce_migration_time = 0;
|
|
|
|
cc->cc_exec_entity[direct].ce_migration_func = NULL;
|
|
|
|
cc->cc_exec_entity[direct].ce_migration_arg = NULL;
|
2011-04-08 18:48:57 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks if migration is requested by a specific callout cpu.
|
|
|
|
*/
|
|
|
|
static int
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc_cce_migrating(struct callout_cpu *cc, int direct)
|
2011-04-08 18:48:57 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef SMP
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK);
|
2011-04-08 18:48:57 +00:00
|
|
|
#else
|
|
|
|
return (0);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2001-08-22 04:07:27 +00:00
|
|
|
/*
|
2013-03-03 09:11:24 +00:00
|
|
|
* kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
|
2001-08-22 04:07:27 +00:00
|
|
|
*
|
|
|
|
* This code is called very early in the kernel initialization sequence,
|
|
|
|
* and may be called more then once.
|
|
|
|
*/
|
|
|
|
caddr_t
|
|
|
|
kern_timeout_callwheel_alloc(caddr_t v)
|
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
|
|
|
|
|
|
|
timeout_cpu = PCPU_GET(cpuid);
|
|
|
|
cc = CC_CPU(timeout_cpu);
|
2001-08-22 04:07:27 +00:00
|
|
|
/*
|
2012-12-04 05:28:20 +00:00
|
|
|
* Calculate callout wheel size, should be next power of two higher
|
|
|
|
* than 'ncallout'.
|
2001-08-22 04:07:27 +00:00
|
|
|
*/
|
2012-12-04 05:28:20 +00:00
|
|
|
callwheelsize = 1 << fls(ncallout);
|
2001-08-22 04:07:27 +00:00
|
|
|
callwheelmask = callwheelsize - 1;
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_callout = (struct callout *)v;
|
|
|
|
v = (caddr_t)(cc->cc_callout + ncallout);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc->cc_callwheel = (struct callout_list *)v;
|
2008-04-02 11:20:30 +00:00
|
|
|
v = (caddr_t)(cc->cc_callwheel + callwheelsize);
|
2001-08-22 04:07:27 +00:00
|
|
|
return(v);
|
|
|
|
}
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
static void
|
|
|
|
callout_cpu_init(struct callout_cpu *cc)
|
|
|
|
{
|
|
|
|
struct callout *c;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
|
|
|
|
SLIST_INIT(&cc->cc_callfree);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
for (i = 0; i < callwheelsize; i++)
|
|
|
|
LIST_INIT(&cc->cc_callwheel[i]);
|
|
|
|
TAILQ_INIT(&cc->cc_expireq);
|
|
|
|
cc->cc_firstevent = INT64_MAX;
|
|
|
|
for (i = 0; i < 2; i++)
|
|
|
|
cc_cce_cleanup(cc, i);
|
2008-04-02 11:20:30 +00:00
|
|
|
if (cc->cc_callout == NULL)
|
|
|
|
return;
|
|
|
|
for (i = 0; i < ncallout; i++) {
|
|
|
|
c = &cc->cc_callout[i];
|
|
|
|
callout_init(c, 0);
|
|
|
|
c->c_flags = CALLOUT_LOCAL_ALLOC;
|
|
|
|
SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
#ifdef SMP
|
|
|
|
/*
|
|
|
|
* Switches the cpu tied to a specific callout.
|
|
|
|
* The function expects a locked incoming callout cpu and returns with
|
|
|
|
* locked outcoming callout cpu.
|
|
|
|
*/
|
|
|
|
static struct callout_cpu *
|
|
|
|
callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
|
|
|
|
{
|
|
|
|
struct callout_cpu *new_cc;
|
|
|
|
|
|
|
|
MPASS(c != NULL && cc != NULL);
|
|
|
|
CC_LOCK_ASSERT(cc);
|
|
|
|
|
2011-08-21 10:52:50 +00:00
|
|
|
/*
|
|
|
|
* Avoid interrupts and preemption firing after the callout cpu
|
|
|
|
* is blocked in order to avoid deadlocks as the new thread
|
|
|
|
* may be willing to acquire the callout cpu lock.
|
|
|
|
*/
|
2011-04-08 18:48:57 +00:00
|
|
|
c->c_cpu = CPUBLOCK;
|
2011-08-21 10:52:50 +00:00
|
|
|
spinlock_enter();
|
2011-04-08 18:48:57 +00:00
|
|
|
CC_UNLOCK(cc);
|
|
|
|
new_cc = CC_CPU(new_cpu);
|
|
|
|
CC_LOCK(new_cc);
|
2011-08-21 10:52:50 +00:00
|
|
|
spinlock_exit();
|
2011-04-08 18:48:57 +00:00
|
|
|
c->c_cpu = new_cpu;
|
|
|
|
return (new_cc);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2001-08-22 04:07:27 +00:00
|
|
|
/*
|
|
|
|
* kern_timeout_callwheel_init() - initialize previously reserved callwheel
|
|
|
|
* space.
|
|
|
|
*
|
|
|
|
* This code is called just once, after the space reserved for the
|
|
|
|
* callout wheel has been finalized.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
kern_timeout_callwheel_init(void)
|
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
callout_cpu_init(CC_CPU(timeout_cpu));
|
|
|
|
}
|
2001-08-22 04:07:27 +00:00
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
/*
|
|
|
|
* Start standard softclock thread.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
start_softclock(void *dummy)
|
|
|
|
{
|
|
|
|
struct callout_cpu *cc;
|
|
|
|
#ifdef SMP
|
|
|
|
int cpu;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cc = CC_CPU(timeout_cpu);
|
|
|
|
if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
|
2010-11-03 15:38:52 +00:00
|
|
|
INTR_MPSAFE, &cc->cc_cookie))
|
2008-04-02 11:20:30 +00:00
|
|
|
panic("died while creating standard software ithreads");
|
|
|
|
#ifdef SMP
|
2010-06-11 18:46:34 +00:00
|
|
|
CPU_FOREACH(cpu) {
|
2008-04-02 11:20:30 +00:00
|
|
|
if (cpu == timeout_cpu)
|
|
|
|
continue;
|
|
|
|
cc = CC_CPU(cpu);
|
|
|
|
if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
|
|
|
|
INTR_MPSAFE, &cc->cc_cookie))
|
|
|
|
panic("died while creating standard software ithreads");
|
|
|
|
cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */
|
|
|
|
cc->cc_callwheel = malloc(
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
sizeof(struct callout_list) * callwheelsize, M_CALLOUT,
|
2008-04-02 11:20:30 +00:00
|
|
|
M_WAITOK);
|
|
|
|
callout_cpu_init(cc);
|
2001-08-22 04:07:27 +00:00
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
|
|
|
|
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#define CC_HASH_SHIFT 8
|
|
|
|
|
|
|
|
static inline u_int
|
|
|
|
callout_hash(sbintime_t sbt)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (sbt >> (32 - CC_HASH_SHIFT));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u_int
|
|
|
|
callout_get_bucket(sbintime_t sbt)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (callout_hash(sbt) & callwheelmask);
|
|
|
|
}
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
void
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
callout_process(sbintime_t now)
|
2008-04-02 11:20:30 +00:00
|
|
|
{
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
struct callout *tmp, *tmpn;
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
struct callout_list *sc;
|
|
|
|
sbintime_t first, last, max, tmp_max;
|
|
|
|
uint32_t lookahead;
|
|
|
|
u_int firstb, lastb, nowb;
|
|
|
|
#ifdef CALLOUT_PROFILING
|
|
|
|
int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0;
|
|
|
|
#endif
|
2008-04-02 11:20:30 +00:00
|
|
|
|
|
|
|
cc = CC_SELF();
|
|
|
|
mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
|
|
|
|
/* Compute the buckets of the last scan and present times. */
|
|
|
|
firstb = callout_hash(cc->cc_lastscan);
|
|
|
|
cc->cc_lastscan = now;
|
|
|
|
nowb = callout_hash(now);
|
|
|
|
|
|
|
|
/* Compute the last bucket and minimum time of the bucket after it. */
|
|
|
|
if (nowb == firstb)
|
|
|
|
lookahead = (SBT_1S / 16);
|
|
|
|
else if (nowb - firstb == 1)
|
|
|
|
lookahead = (SBT_1S / 8);
|
|
|
|
else
|
|
|
|
lookahead = (SBT_1S / 2);
|
|
|
|
first = last = now;
|
|
|
|
first += (lookahead / 2);
|
|
|
|
last += lookahead;
|
|
|
|
last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT));
|
|
|
|
lastb = callout_hash(last) - 1;
|
|
|
|
max = last;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if we wrapped around the entire wheel from the last scan.
|
|
|
|
* In case, we need to scan entirely the wheel for pending callouts.
|
|
|
|
*/
|
|
|
|
if (lastb - firstb >= callwheelsize) {
|
|
|
|
lastb = firstb + callwheelsize - 1;
|
|
|
|
if (nowb - firstb >= callwheelsize)
|
|
|
|
nowb = lastb;
|
2008-07-19 05:18:29 +00:00
|
|
|
}
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
|
|
|
|
/* Iterate callwheel from firstb to nowb and then up to lastb. */
|
|
|
|
do {
|
|
|
|
sc = &cc->cc_callwheel[firstb & callwheelmask];
|
|
|
|
tmp = LIST_FIRST(sc);
|
|
|
|
while (tmp != NULL) {
|
|
|
|
/* Run the callout if present time within allowed. */
|
|
|
|
if (tmp->c_time <= now) {
|
|
|
|
/*
|
|
|
|
* Consumer told us the callout may be run
|
|
|
|
* directly from hardware interrupt context.
|
|
|
|
*/
|
|
|
|
if (tmp->c_flags & CALLOUT_DIRECT) {
|
|
|
|
#ifdef CALLOUT_PROFILING
|
|
|
|
++depth_dir;
|
|
|
|
#endif
|
|
|
|
cc->cc_exec_next_dir =
|
|
|
|
LIST_NEXT(tmp, c_links.le);
|
|
|
|
cc->cc_bucket = firstb & callwheelmask;
|
|
|
|
LIST_REMOVE(tmp, c_links.le);
|
|
|
|
softclock_call_cc(tmp, cc,
|
|
|
|
#ifdef CALLOUT_PROFILING
|
|
|
|
&mpcalls_dir, &lockcalls_dir, NULL,
|
|
|
|
#endif
|
|
|
|
1);
|
|
|
|
tmp = cc->cc_exec_next_dir;
|
|
|
|
} else {
|
|
|
|
tmpn = LIST_NEXT(tmp, c_links.le);
|
|
|
|
LIST_REMOVE(tmp, c_links.le);
|
|
|
|
TAILQ_INSERT_TAIL(&cc->cc_expireq,
|
|
|
|
tmp, c_links.tqe);
|
|
|
|
tmp->c_flags |= CALLOUT_PROCESSED;
|
|
|
|
tmp = tmpn;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* Skip events from distant future. */
|
|
|
|
if (tmp->c_time >= max)
|
|
|
|
goto next;
|
|
|
|
/*
|
|
|
|
* Event minimal time is bigger than present maximal
|
|
|
|
* time, so it cannot be aggregated.
|
|
|
|
*/
|
|
|
|
if (tmp->c_time > last) {
|
|
|
|
lastb = nowb;
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
/* Update first and last time, respecting this event. */
|
|
|
|
if (tmp->c_time < first)
|
|
|
|
first = tmp->c_time;
|
|
|
|
tmp_max = tmp->c_time + tmp->c_precision;
|
|
|
|
if (tmp_max < last)
|
|
|
|
last = tmp_max;
|
|
|
|
next:
|
|
|
|
tmp = LIST_NEXT(tmp, c_links.le);
|
|
|
|
}
|
|
|
|
/* Proceed with the next bucket. */
|
|
|
|
firstb++;
|
|
|
|
/*
|
|
|
|
* Stop if we looked after present time and found
|
|
|
|
* some event we can't execute at now.
|
|
|
|
* Stop if we looked far enough into the future.
|
|
|
|
*/
|
|
|
|
} while (((int)(firstb - lastb)) <= 0);
|
|
|
|
cc->cc_firstevent = last;
|
|
|
|
#ifndef NO_EVENTTIMERS
|
|
|
|
cpu_new_callout(curcpu, last, first);
|
|
|
|
#endif
|
|
|
|
#ifdef CALLOUT_PROFILING
|
|
|
|
avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8;
|
|
|
|
avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8;
|
|
|
|
avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8;
|
|
|
|
#endif
|
2008-04-02 11:20:30 +00:00
|
|
|
mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
|
|
|
|
/*
|
|
|
|
* swi_sched acquires the thread lock, so we don't want to call it
|
|
|
|
* with cc_lock held; incorrect locking order.
|
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if (!TAILQ_EMPTY(&cc->cc_expireq))
|
2008-04-02 11:20:30 +00:00
|
|
|
swi_sched(cc->cc_cookie, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct callout_cpu *
|
|
|
|
callout_lock(struct callout *c)
|
|
|
|
{
|
|
|
|
struct callout_cpu *cc;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
cpu = c->c_cpu;
|
2011-04-08 18:48:57 +00:00
|
|
|
#ifdef SMP
|
|
|
|
if (cpu == CPUBLOCK) {
|
|
|
|
while (c->c_cpu == CPUBLOCK)
|
|
|
|
cpu_spinwait();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = CC_CPU(cpu);
|
|
|
|
CC_LOCK(cc);
|
|
|
|
if (cpu == c->c_cpu)
|
|
|
|
break;
|
|
|
|
CC_UNLOCK(cc);
|
2001-08-22 04:07:27 +00:00
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
return (cc);
|
2001-08-22 04:07:27 +00:00
|
|
|
}
|
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
static void
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
callout_cc_add(struct callout *c, struct callout_cpu *cc,
|
|
|
|
sbintime_t sbt, sbintime_t precision, void (*func)(void *),
|
|
|
|
void *arg, int cpu, int flags)
|
2011-04-08 18:48:57 +00:00
|
|
|
{
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
int bucket;
|
2011-04-08 18:48:57 +00:00
|
|
|
|
|
|
|
CC_LOCK_ASSERT(cc);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if (sbt < cc->cc_lastscan)
|
|
|
|
sbt = cc->cc_lastscan;
|
2011-04-08 18:48:57 +00:00
|
|
|
c->c_arg = arg;
|
|
|
|
c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if (flags & C_DIRECT_EXEC)
|
|
|
|
c->c_flags |= CALLOUT_DIRECT;
|
|
|
|
c->c_flags &= ~CALLOUT_PROCESSED;
|
2011-04-08 18:48:57 +00:00
|
|
|
c->c_func = func;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
c->c_time = sbt;
|
|
|
|
c->c_precision = precision;
|
|
|
|
bucket = callout_get_bucket(c->c_time);
|
|
|
|
CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x",
|
|
|
|
c, (int)(c->c_precision >> 32),
|
|
|
|
(u_int)(c->c_precision & 0xffffffff));
|
|
|
|
LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le);
|
|
|
|
if (cc->cc_bucket == bucket)
|
|
|
|
cc->cc_exec_next_dir = c;
|
|
|
|
#ifndef NO_EVENTTIMERS
|
|
|
|
/*
|
|
|
|
* Inform the eventtimers(4) subsystem there's a new callout
|
|
|
|
* that has been inserted, but only if really required.
|
|
|
|
*/
|
|
|
|
sbt = c->c_time + c->c_precision;
|
|
|
|
if (sbt < cc->cc_firstevent) {
|
|
|
|
cc->cc_firstevent = sbt;
|
|
|
|
cpu_new_callout(cpu, sbt, c->c_time);
|
2011-04-08 18:48:57 +00:00
|
|
|
}
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#endif
|
2011-04-08 18:48:57 +00:00
|
|
|
}
|
|
|
|
|
2012-05-03 20:00:30 +00:00
|
|
|
static void
|
|
|
|
callout_cc_del(struct callout *c, struct callout_cpu *cc)
|
|
|
|
{
|
|
|
|
|
2012-12-05 19:02:22 +00:00
|
|
|
if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0)
|
|
|
|
return;
|
|
|
|
c->c_func = NULL;
|
|
|
|
SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
|
2012-05-03 20:00:30 +00:00
|
|
|
}
|
|
|
|
|
2012-12-05 19:02:22 +00:00
|
|
|
static void
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
softclock_call_cc(struct callout *c, struct callout_cpu *cc,
|
|
|
|
#ifdef CALLOUT_PROFILING
|
|
|
|
int *mpcalls, int *lockcalls, int *gcalls,
|
|
|
|
#endif
|
|
|
|
int direct)
|
2012-05-03 20:00:30 +00:00
|
|
|
{
|
|
|
|
void (*c_func)(void *);
|
|
|
|
void *c_arg;
|
|
|
|
struct lock_class *class;
|
|
|
|
struct lock_object *c_lock;
|
|
|
|
int c_flags, sharedlock;
|
|
|
|
#ifdef SMP
|
|
|
|
struct callout_cpu *new_cc;
|
|
|
|
void (*new_func)(void *);
|
|
|
|
void *new_arg;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
int flags, new_cpu;
|
|
|
|
sbintime_t new_time;
|
2012-05-03 20:00:30 +00:00
|
|
|
#endif
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
|
|
|
|
sbintime_t bt1, bt2;
|
2012-05-03 20:00:30 +00:00
|
|
|
struct timespec ts2;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */
|
2012-05-03 20:00:30 +00:00
|
|
|
static timeout_t *lastfunc;
|
|
|
|
#endif
|
|
|
|
|
2012-12-05 19:02:22 +00:00
|
|
|
KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) ==
|
|
|
|
(CALLOUT_PENDING | CALLOUT_ACTIVE),
|
|
|
|
("softclock_call_cc: pend|act %p %x", c, c->c_flags));
|
2012-05-03 20:00:30 +00:00
|
|
|
class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL;
|
|
|
|
sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1;
|
|
|
|
c_lock = c->c_lock;
|
|
|
|
c_func = c->c_func;
|
|
|
|
c_arg = c->c_arg;
|
|
|
|
c_flags = c->c_flags;
|
|
|
|
if (c->c_flags & CALLOUT_LOCAL_ALLOC)
|
|
|
|
c->c_flags = CALLOUT_LOCAL_ALLOC;
|
|
|
|
else
|
|
|
|
c->c_flags &= ~CALLOUT_PENDING;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc->cc_exec_entity[direct].cc_curr = c;
|
|
|
|
cc->cc_exec_entity[direct].cc_cancel = FALSE;
|
2012-05-03 20:00:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
|
|
|
if (c_lock != NULL) {
|
|
|
|
class->lc_lock(c_lock, sharedlock);
|
|
|
|
/*
|
|
|
|
* The callout may have been cancelled
|
|
|
|
* while we switched locks.
|
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if (cc->cc_exec_entity[direct].cc_cancel) {
|
2012-05-03 20:00:30 +00:00
|
|
|
class->lc_unlock(c_lock);
|
|
|
|
goto skip;
|
|
|
|
}
|
|
|
|
/* The callout cannot be stopped now. */
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc->cc_exec_entity[direct].cc_cancel = TRUE;
|
2012-05-03 20:00:30 +00:00
|
|
|
if (c_lock == &Giant.lock_object) {
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#ifdef CALLOUT_PROFILING
|
2012-05-03 20:00:30 +00:00
|
|
|
(*gcalls)++;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#endif
|
|
|
|
CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p",
|
2012-05-03 20:00:30 +00:00
|
|
|
c, c_func, c_arg);
|
|
|
|
} else {
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#ifdef CALLOUT_PROFILING
|
2012-05-03 20:00:30 +00:00
|
|
|
(*lockcalls)++;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#endif
|
2012-05-03 20:00:30 +00:00
|
|
|
CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p",
|
|
|
|
c, c_func, c_arg);
|
|
|
|
}
|
|
|
|
} else {
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#ifdef CALLOUT_PROFILING
|
2012-05-03 20:00:30 +00:00
|
|
|
(*mpcalls)++;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#endif
|
|
|
|
CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
|
2012-05-03 20:00:30 +00:00
|
|
|
c, c_func, c_arg);
|
|
|
|
}
|
|
|
|
#ifdef DIAGNOSTIC
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
sbt1 = sbinuptime();
|
2012-05-03 20:00:30 +00:00
|
|
|
#endif
|
|
|
|
THREAD_NO_SLEEPING();
|
|
|
|
SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0);
|
|
|
|
c_func(c_arg);
|
|
|
|
SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0);
|
|
|
|
THREAD_SLEEPING_OK();
|
|
|
|
#ifdef DIAGNOSTIC
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
bt2 = sbinuptime();
|
|
|
|
bt2 -= bt1;
|
|
|
|
if (bt2 > maxdt) {
|
|
|
|
if (lastfunc != c_func || bt2 > maxdt * 2) {
|
|
|
|
ts2 = sbttots(bt2);
|
2012-05-03 20:00:30 +00:00
|
|
|
printf(
|
|
|
|
"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
|
|
|
|
c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec);
|
|
|
|
}
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
maxdt = bt2;
|
2012-05-03 20:00:30 +00:00
|
|
|
lastfunc = c_func;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
CTR1(KTR_CALLOUT, "callout %p finished", c);
|
|
|
|
if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
|
|
|
|
class->lc_unlock(c_lock);
|
|
|
|
skip:
|
|
|
|
CC_LOCK(cc);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr"));
|
|
|
|
cc->cc_exec_entity[direct].cc_curr = NULL;
|
|
|
|
if (cc->cc_exec_entity[direct].cc_waiting) {
|
2012-05-03 20:00:30 +00:00
|
|
|
/*
|
|
|
|
* There is someone waiting for the
|
|
|
|
* callout to complete.
|
|
|
|
* If the callout was scheduled for
|
|
|
|
* migration just cancel it.
|
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if (cc_cce_migrating(cc, direct)) {
|
|
|
|
cc_cce_cleanup(cc, direct);
|
2012-12-05 22:32:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It should be assert here that the callout is not
|
|
|
|
* destroyed but that is not easy.
|
|
|
|
*/
|
2012-12-05 19:02:22 +00:00
|
|
|
c->c_flags &= ~CALLOUT_DFRMIGRATION;
|
|
|
|
}
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc->cc_exec_entity[direct].cc_waiting = FALSE;
|
2012-05-03 20:00:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
wakeup(&cc->cc_exec_entity[direct].cc_waiting);
|
2012-05-03 20:00:30 +00:00
|
|
|
CC_LOCK(cc);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
} else if (cc_cce_migrating(cc, direct)) {
|
2012-12-05 22:32:12 +00:00
|
|
|
KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0,
|
2012-12-05 19:02:22 +00:00
|
|
|
("Migrating legacy callout %p", c));
|
2012-05-03 20:00:30 +00:00
|
|
|
#ifdef SMP
|
|
|
|
/*
|
|
|
|
* If the callout was scheduled for
|
|
|
|
* migration just perform it now.
|
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu;
|
|
|
|
new_time = cc->cc_exec_entity[direct].ce_migration_time;
|
|
|
|
new_func = cc->cc_exec_entity[direct].ce_migration_func;
|
|
|
|
new_arg = cc->cc_exec_entity[direct].ce_migration_arg;
|
|
|
|
cc_cce_cleanup(cc, direct);
|
2012-05-03 20:00:30 +00:00
|
|
|
|
|
|
|
/*
|
2012-12-05 22:32:12 +00:00
|
|
|
* It should be assert here that the callout is not destroyed
|
|
|
|
* but that is not easy.
|
|
|
|
*
|
|
|
|
* As first thing, handle deferred callout stops.
|
2012-05-03 20:00:30 +00:00
|
|
|
*/
|
|
|
|
if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) {
|
|
|
|
CTR3(KTR_CALLOUT,
|
|
|
|
"deferred cancelled %p func %p arg %p",
|
|
|
|
c, new_func, new_arg);
|
|
|
|
callout_cc_del(c, cc);
|
2012-12-05 19:02:22 +00:00
|
|
|
return;
|
2012-05-03 20:00:30 +00:00
|
|
|
}
|
|
|
|
c->c_flags &= ~CALLOUT_DFRMIGRATION;
|
|
|
|
|
|
|
|
new_cc = callout_cpu_switch(c, cc, new_cpu);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
flags = (direct) ? C_DIRECT_EXEC : 0;
|
|
|
|
callout_cc_add(c, new_cc, new_time, c->c_precision, new_func,
|
|
|
|
new_arg, new_cpu, flags);
|
2012-05-03 20:00:30 +00:00
|
|
|
CC_UNLOCK(new_cc);
|
|
|
|
CC_LOCK(cc);
|
|
|
|
#else
|
|
|
|
panic("migration should not happen");
|
|
|
|
#endif
|
|
|
|
}
|
2012-12-05 19:02:22 +00:00
|
|
|
/*
|
|
|
|
* If the current callout is locally allocated (from
|
|
|
|
* timeout(9)) then put it on the freelist.
|
|
|
|
*
|
|
|
|
* Note: we need to check the cached copy of c_flags because
|
|
|
|
* if it was not local, then it's not safe to deref the
|
|
|
|
* callout pointer.
|
|
|
|
*/
|
|
|
|
KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 ||
|
|
|
|
c->c_flags == CALLOUT_LOCAL_ALLOC,
|
|
|
|
("corrupted callout"));
|
2012-12-05 22:32:12 +00:00
|
|
|
if (c_flags & CALLOUT_LOCAL_ALLOC)
|
|
|
|
callout_cc_del(c, cc);
|
2012-05-03 20:00:30 +00:00
|
|
|
}
|
|
|
|
|
1997-09-21 22:00:25 +00:00
|
|
|
/*
|
2013-02-28 16:22:49 +00:00
|
|
|
* The callout mechanism is based on the work of Adam M. Costello and
|
1997-09-21 22:00:25 +00:00
|
|
|
* George Varghese, published in a technical report entitled "Redesigning
|
|
|
|
* the BSD Callout and Timer Facilities" and modified slightly for inclusion
|
|
|
|
* in FreeBSD by Justin T. Gibbs. The original work on the data structures
|
2004-04-25 04:10:17 +00:00
|
|
|
* used in this implementation was published by G. Varghese and T. Lauck in
|
1997-09-21 22:00:25 +00:00
|
|
|
* the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
|
|
|
|
* the Efficient Implementation of a Timer Facility" in the Proceedings of
|
|
|
|
* the 11th ACM Annual Symposium on Operating Systems Principles,
|
|
|
|
* Austin, Texas Nov 1987.
|
|
|
|
*/
|
1998-01-10 13:16:26 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Software (low priority) clock interrupt.
|
|
|
|
* Run periodic events from timeout queue.
|
|
|
|
*/
|
|
|
|
void
|
2008-04-02 11:20:30 +00:00
|
|
|
softclock(void *arg)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
2002-09-04 20:05:00 +00:00
|
|
|
struct callout *c;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#ifdef CALLOUT_PROFILING
|
|
|
|
int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0;
|
|
|
|
#endif
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = (struct callout_cpu *)arg;
|
|
|
|
CC_LOCK(cc);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) {
|
|
|
|
TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
|
|
|
|
softclock_call_cc(c, cc,
|
|
|
|
#ifdef CALLOUT_PROFILING
|
|
|
|
&mpcalls, &lockcalls, &gcalls,
|
|
|
|
#endif
|
|
|
|
0);
|
|
|
|
#ifdef CALLOUT_PROFILING
|
|
|
|
++depth;
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#ifdef CALLOUT_PROFILING
|
2003-06-04 05:25:58 +00:00
|
|
|
avg_depth += (depth * 1000 - avg_depth) >> 8;
|
|
|
|
avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
|
2007-11-20 00:37:45 +00:00
|
|
|
avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
|
2003-06-04 05:25:58 +00:00
|
|
|
avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
#endif
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* timeout --
|
|
|
|
* Execute a function after a specified length of time.
|
|
|
|
*
|
|
|
|
* untimeout --
|
|
|
|
* Cancel previous timeout function call.
|
|
|
|
*
|
1997-09-21 22:00:25 +00:00
|
|
|
* callout_handle_init --
|
|
|
|
* Initialize a handle so that using it with untimeout is benign.
|
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* See AT&T BCI Driver Reference Manual for specification. This
|
2013-03-03 09:11:24 +00:00
|
|
|
* implementation differs from that one in that although an
|
1997-09-21 22:00:25 +00:00
|
|
|
* identification value is returned from timeout, the original
|
|
|
|
* arguments to timeout as well as the identifier are used to
|
|
|
|
* identify entries for untimeout.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1997-09-21 22:00:25 +00:00
|
|
|
struct callout_handle
|
|
|
|
timeout(ftn, arg, to_ticks)
|
1998-02-25 06:13:32 +00:00
|
|
|
timeout_t *ftn;
|
1994-05-24 10:09:53 +00:00
|
|
|
void *arg;
|
2000-11-25 06:22:16 +00:00
|
|
|
int to_ticks;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
1997-09-21 22:00:25 +00:00
|
|
|
struct callout *new;
|
|
|
|
struct callout_handle handle;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = CC_CPU(timeout_cpu);
|
|
|
|
CC_LOCK(cc);
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Fill in the next free callout structure. */
|
2008-04-02 11:20:30 +00:00
|
|
|
new = SLIST_FIRST(&cc->cc_callfree);
|
1997-09-21 22:00:25 +00:00
|
|
|
if (new == NULL)
|
|
|
|
/* XXX Attempt to malloc first */
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("timeout table full");
|
2008-04-02 11:20:30 +00:00
|
|
|
SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
|
1999-03-06 04:46:20 +00:00
|
|
|
callout_reset(new, to_ticks, ftn, arg);
|
1997-09-21 22:00:25 +00:00
|
|
|
handle.callout = new;
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
|
|
|
|
1997-09-21 22:00:25 +00:00
|
|
|
return (handle);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
1997-09-21 22:00:25 +00:00
|
|
|
untimeout(ftn, arg, handle)
|
1998-02-25 06:13:32 +00:00
|
|
|
timeout_t *ftn;
|
1994-05-24 10:09:53 +00:00
|
|
|
void *arg;
|
1997-09-21 22:00:25 +00:00
|
|
|
struct callout_handle handle;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-09-21 22:00:25 +00:00
|
|
|
/*
|
|
|
|
* Check for a handle that was initialized
|
|
|
|
* by callout_handle_init, but never used
|
|
|
|
* for a real timeout.
|
|
|
|
*/
|
|
|
|
if (handle.callout == NULL)
|
|
|
|
return;
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = callout_lock(handle.callout);
|
1999-03-06 04:46:20 +00:00
|
|
|
if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
|
|
|
|
callout_stop(handle.callout);
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1997-09-21 22:00:25 +00:00
|
|
|
void
|
|
|
|
callout_handle_init(struct callout_handle *handle)
|
|
|
|
{
|
|
|
|
handle->callout = NULL;
|
|
|
|
}
|
|
|
|
|
1999-03-06 04:46:20 +00:00
|
|
|
/*
|
|
|
|
* New interface; clients allocate their own callout structures.
|
|
|
|
*
|
|
|
|
* callout_reset() - establish or change a timeout
|
|
|
|
* callout_stop() - disestablish a timeout
|
|
|
|
* callout_init() - initialize a callout structure so that it can
|
|
|
|
* safely be passed to callout_reset() and callout_stop()
|
|
|
|
*
|
1999-08-30 21:17:07 +00:00
|
|
|
* <sys/callout.h> defines three convenience macros:
|
1999-03-06 04:46:20 +00:00
|
|
|
*
|
2005-01-19 19:46:35 +00:00
|
|
|
* callout_active() - returns truth if callout has not been stopped,
|
|
|
|
* drained, or deactivated since the last time the callout was
|
|
|
|
* reset.
|
1999-08-30 21:17:07 +00:00
|
|
|
* callout_pending() - returns truth if callout is still waiting for timeout
|
|
|
|
* callout_deactivate() - marks the callout as having been serviced
|
1999-03-06 04:46:20 +00:00
|
|
|
*/
|
2005-09-08 14:20:39 +00:00
|
|
|
int
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision,
|
|
|
|
void (*ftn)(void *), void *arg, int cpu, int flags)
|
1999-03-06 04:46:20 +00:00
|
|
|
{
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
sbintime_t to_sbt, pr;
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
int cancelled, direct;
|
1999-03-06 04:46:20 +00:00
|
|
|
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cancelled = 0;
|
|
|
|
if (flags & C_ABSOLUTE) {
|
|
|
|
to_sbt = sbt;
|
|
|
|
} else {
|
|
|
|
if ((flags & C_HARDCLOCK) && (sbt < tick_sbt))
|
|
|
|
sbt = tick_sbt;
|
|
|
|
if ((flags & C_HARDCLOCK) ||
|
|
|
|
#ifdef NO_EVENTTIMERS
|
|
|
|
sbt >= sbt_timethreshold) {
|
|
|
|
to_sbt = getsbinuptime();
|
|
|
|
|
|
|
|
/* Add safety belt for the case of hz > 1000. */
|
|
|
|
to_sbt += tc_tick_sbt - tick_sbt;
|
|
|
|
#else
|
|
|
|
sbt >= sbt_tickthreshold) {
|
|
|
|
/*
|
|
|
|
* Obtain the time of the last hardclock() call on
|
|
|
|
* this CPU directly from the kern_clocksource.c.
|
|
|
|
* This value is per-CPU, but it is equal for all
|
|
|
|
* active ones.
|
|
|
|
*/
|
|
|
|
#ifdef __LP64__
|
|
|
|
to_sbt = DPCPU_GET(hardclocktime);
|
|
|
|
#else
|
|
|
|
spinlock_enter();
|
|
|
|
to_sbt = DPCPU_GET(hardclocktime);
|
|
|
|
spinlock_exit();
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
if ((flags & C_HARDCLOCK) == 0)
|
|
|
|
to_sbt += tick_sbt;
|
|
|
|
} else
|
|
|
|
to_sbt = sbinuptime();
|
|
|
|
to_sbt += sbt;
|
|
|
|
pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp :
|
|
|
|
sbt >> C_PRELGET(flags));
|
|
|
|
if (pr > precision)
|
|
|
|
precision = pr;
|
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
/*
|
|
|
|
* Don't allow migration of pre-allocated callouts lest they
|
|
|
|
* become unbalanced.
|
|
|
|
*/
|
|
|
|
if (c->c_flags & CALLOUT_LOCAL_ALLOC)
|
|
|
|
cpu = c->c_cpu;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
direct = (c->c_flags & CALLOUT_DIRECT) != 0;
|
|
|
|
KASSERT(!direct || c->c_lock == NULL,
|
|
|
|
("%s: direct callout %p has lock", __func__, c));
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = callout_lock(c);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if (cc->cc_exec_entity[direct].cc_curr == c) {
|
2004-04-06 23:08:49 +00:00
|
|
|
/*
|
|
|
|
* We're being asked to reschedule a callout which is
|
2007-11-20 00:37:45 +00:00
|
|
|
* currently in progress. If there is a lock then we
|
2005-02-07 02:47:33 +00:00
|
|
|
* can cancel the callout if it has not really started.
|
2004-04-06 23:08:49 +00:00
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel)
|
|
|
|
cancelled = cc->cc_exec_entity[direct].cc_cancel = TRUE;
|
|
|
|
if (cc->cc_exec_entity[direct].cc_waiting) {
|
2005-02-07 02:47:33 +00:00
|
|
|
/*
|
|
|
|
* Someone has called callout_drain to kill this
|
|
|
|
* callout. Don't reschedule.
|
|
|
|
*/
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
|
|
|
|
cancelled ? "cancelled" : "failed to cancel",
|
|
|
|
c, c->c_func, c->c_arg);
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2005-09-08 14:20:39 +00:00
|
|
|
return (cancelled);
|
2005-02-07 02:47:33 +00:00
|
|
|
}
|
2004-04-08 02:03:49 +00:00
|
|
|
}
|
2004-08-06 02:44:58 +00:00
|
|
|
if (c->c_flags & CALLOUT_PENDING) {
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
|
|
|
|
if (cc->cc_exec_next_dir == c)
|
|
|
|
cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le);
|
|
|
|
LIST_REMOVE(c, c_links.le);
|
|
|
|
} else
|
|
|
|
TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
|
2005-09-08 14:20:39 +00:00
|
|
|
cancelled = 1;
|
2008-04-02 11:20:30 +00:00
|
|
|
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
|
2004-08-06 02:44:58 +00:00
|
|
|
}
|
2011-04-08 18:48:57 +00:00
|
|
|
|
|
|
|
#ifdef SMP
|
1999-03-06 04:46:20 +00:00
|
|
|
/*
|
2011-04-08 18:48:57 +00:00
|
|
|
* If the callout must migrate try to perform it immediately.
|
|
|
|
* If the callout is currently running, just defer the migration
|
|
|
|
* to a more appropriate moment.
|
1999-03-06 04:46:20 +00:00
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
if (c->c_cpu != cpu) {
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if (cc->cc_exec_entity[direct].cc_curr == c) {
|
|
|
|
cc->cc_exec_entity[direct].ce_migration_cpu = cpu;
|
|
|
|
cc->cc_exec_entity[direct].ce_migration_time
|
|
|
|
= to_sbt;
|
|
|
|
cc->cc_exec_entity[direct].ce_migration_func = ftn;
|
|
|
|
cc->cc_exec_entity[direct].ce_migration_arg = arg;
|
When callout_reset_on() cannot immediately migrate a callout since it
is running on other cpu, the CALLOUT_PENDING flag is temporarily
cleared. Then, callout_stop() on this, in fact active, callout fails
because CALLOUT_PENDING is not set, and callout_stop() returns 0.
Now, in sleepq_check_timeout(), the failed callout_stop() causes the
sleepq code to execute mi_switch() without even setting the wmesg,
since the switch-out is supposed to be transient. In fact, the thread
is put off the CPU for full timeout interval, instead of being put on
runq immediately. Until timeout fires, the process is unkillable for
obvious reasons.
Fix this by marking the migrating callouts with CALLOUT_DFRMIGRATION
flag. The flag is cleared by callout_stop_safe() when the function
detects a migration, besides returning the success. The softclock()
rechecks the flag for migrating callout and cancels its execution if
the flag was cleared meantime.
PR: misc/166340
Reported, debugging traces provided and tested by:
Christian Esken <christian.esken trivago com>
Reviewed by: avg, jhb
MFC after: 1 week
2012-05-03 10:38:02 +00:00
|
|
|
c->c_flags |= CALLOUT_DFRMIGRATION;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
CTR6(KTR_CALLOUT,
|
|
|
|
"migration of %p func %p arg %p in %d.%08x to %u deferred",
|
|
|
|
c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
|
|
|
|
(u_int)(to_sbt & 0xffffffff), cpu);
|
2011-04-08 18:48:57 +00:00
|
|
|
CC_UNLOCK(cc);
|
|
|
|
return (cancelled);
|
|
|
|
}
|
|
|
|
cc = callout_cpu_switch(c, cc, cpu);
|
2008-04-02 11:20:30 +00:00
|
|
|
}
|
2011-04-08 18:48:57 +00:00
|
|
|
#endif
|
2008-04-02 11:20:30 +00:00
|
|
|
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags);
|
|
|
|
CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x",
|
|
|
|
cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32),
|
|
|
|
(u_int)(to_sbt & 0xffffffff));
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2005-09-08 14:20:39 +00:00
|
|
|
|
|
|
|
return (cancelled);
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
|
|
|
|
2008-08-02 17:42:38 +00:00
|
|
|
/*
|
|
|
|
* Common idioms that can be optimized in the future.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
callout_schedule_on(struct callout *c, int to_ticks, int cpu)
|
|
|
|
{
|
|
|
|
return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
callout_schedule(struct callout *c, int to_ticks)
|
|
|
|
{
|
|
|
|
return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
|
|
|
|
}
|
|
|
|
|
2004-04-06 23:08:49 +00:00
|
|
|
int
|
|
|
|
_callout_stop_safe(c, safe)
|
|
|
|
struct callout *c;
|
|
|
|
int safe;
|
|
|
|
{
|
2011-04-08 18:48:57 +00:00
|
|
|
struct callout_cpu *cc, *old_cc;
|
2007-11-20 00:37:45 +00:00
|
|
|
struct lock_class *class;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
int direct, sq_locked, use_lock;
|
2007-11-20 00:37:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some old subsystems don't hold Giant while running a callout_stop(),
|
|
|
|
* so just discard this check for the moment.
|
|
|
|
*/
|
|
|
|
if (!safe && c->c_lock != NULL) {
|
|
|
|
if (c->c_lock == &Giant.lock_object)
|
|
|
|
use_lock = mtx_owned(&Giant);
|
|
|
|
else {
|
|
|
|
use_lock = 1;
|
|
|
|
class = LOCK_CLASS(c->c_lock);
|
|
|
|
class->lc_assert(c->c_lock, LA_XLOCKED);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
use_lock = 0;
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
direct = (c->c_flags & CALLOUT_DIRECT) != 0;
|
2007-08-31 19:01:30 +00:00
|
|
|
sq_locked = 0;
|
2011-04-08 18:48:57 +00:00
|
|
|
old_cc = NULL;
|
2007-08-31 19:01:30 +00:00
|
|
|
again:
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = callout_lock(c);
|
2011-04-08 18:48:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the callout was migrating while the callout cpu lock was
|
|
|
|
* dropped, just drop the sleepqueue lock and check the states
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
if (sq_locked != 0 && cc != old_cc) {
|
|
|
|
#ifdef SMP
|
|
|
|
CC_UNLOCK(cc);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting);
|
2011-04-08 18:48:57 +00:00
|
|
|
sq_locked = 0;
|
|
|
|
old_cc = NULL;
|
|
|
|
goto again;
|
|
|
|
#else
|
|
|
|
panic("migration should not happen");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1999-03-06 04:46:20 +00:00
|
|
|
/*
|
2006-02-23 19:13:12 +00:00
|
|
|
* If the callout isn't pending, it's not on the queue, so
|
|
|
|
* don't attempt to remove it from the queue. We can try to
|
|
|
|
* stop it by other means however.
|
1999-03-06 04:46:20 +00:00
|
|
|
*/
|
|
|
|
if (!(c->c_flags & CALLOUT_PENDING)) {
|
1999-08-30 21:17:07 +00:00
|
|
|
c->c_flags &= ~CALLOUT_ACTIVE;
|
2006-02-23 19:13:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If it wasn't on the queue and it isn't the current
|
|
|
|
* callout, then we can't stop it, so just bail.
|
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if (cc->cc_exec_entity[direct].cc_curr != c) {
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
|
|
|
|
c, c->c_func, c->c_arg);
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2007-08-31 19:01:30 +00:00
|
|
|
if (sq_locked)
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
sleepq_release(
|
|
|
|
&cc->cc_exec_entity[direct].cc_waiting);
|
2005-02-07 02:47:33 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2004-04-08 02:03:49 +00:00
|
|
|
|
2006-02-23 19:13:12 +00:00
|
|
|
if (safe) {
|
2004-04-06 23:08:49 +00:00
|
|
|
/*
|
2006-02-23 19:13:12 +00:00
|
|
|
* The current callout is running (or just
|
|
|
|
* about to run) and blocking is allowed, so
|
|
|
|
* just wait for the current invocation to
|
|
|
|
* finish.
|
2004-04-06 23:08:49 +00:00
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
while (cc->cc_exec_entity[direct].cc_curr == c) {
|
2007-06-26 21:42:01 +00:00
|
|
|
/*
|
|
|
|
* Use direct calls to sleepqueue interface
|
|
|
|
* instead of cv/msleep in order to avoid
|
2008-04-02 11:20:30 +00:00
|
|
|
* a LOR between cc_lock and sleepqueue
|
2007-06-26 21:42:01 +00:00
|
|
|
* chain spinlocks. This piece of code
|
|
|
|
* emulates a msleep_spin() call actually.
|
2007-08-31 19:01:30 +00:00
|
|
|
*
|
|
|
|
* If we already have the sleepqueue chain
|
|
|
|
* locked, then we can safely block. If we
|
|
|
|
* don't already have it locked, however,
|
2008-04-02 11:20:30 +00:00
|
|
|
* we have to drop the cc_lock to lock
|
2007-08-31 19:01:30 +00:00
|
|
|
* it. This opens several races, so we
|
|
|
|
* restart at the beginning once we have
|
|
|
|
* both locks. If nothing has changed, then
|
|
|
|
* we will end up back here with sq_locked
|
|
|
|
* set.
|
2007-06-26 21:42:01 +00:00
|
|
|
*/
|
2007-08-31 19:01:30 +00:00
|
|
|
if (!sq_locked) {
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
sleepq_lock(
|
|
|
|
&cc->cc_exec_entity[direct].cc_waiting);
|
2007-08-31 19:01:30 +00:00
|
|
|
sq_locked = 1;
|
2011-04-08 18:48:57 +00:00
|
|
|
old_cc = cc;
|
2007-08-31 19:01:30 +00:00
|
|
|
goto again;
|
2007-06-26 21:42:01 +00:00
|
|
|
}
|
2011-04-08 18:48:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Migration could be cancelled here, but
|
|
|
|
* as long as it is still not sure when it
|
|
|
|
* will be packed up, just let softclock()
|
|
|
|
* take care of it.
|
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc->cc_exec_entity[direct].cc_waiting = TRUE;
|
2007-06-26 21:42:01 +00:00
|
|
|
DROP_GIANT();
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
sleepq_add(
|
|
|
|
&cc->cc_exec_entity[direct].cc_waiting,
|
2008-04-02 11:20:30 +00:00
|
|
|
&cc->cc_lock.lock_object, "codrain",
|
2007-06-26 21:42:01 +00:00
|
|
|
SLEEPQ_SLEEP, 0);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
sleepq_wait(
|
|
|
|
&cc->cc_exec_entity[direct].cc_waiting,
|
|
|
|
0);
|
2007-08-31 19:01:30 +00:00
|
|
|
sq_locked = 0;
|
2011-04-08 18:48:57 +00:00
|
|
|
old_cc = NULL;
|
2007-06-26 21:42:01 +00:00
|
|
|
|
|
|
|
/* Reacquire locks previously released. */
|
|
|
|
PICKUP_GIANT();
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_LOCK(cc);
|
2006-02-23 19:13:12 +00:00
|
|
|
}
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
} else if (use_lock &&
|
|
|
|
!cc->cc_exec_entity[direct].cc_cancel) {
|
2006-02-23 19:13:12 +00:00
|
|
|
/*
|
2007-11-20 00:37:45 +00:00
|
|
|
* The current callout is waiting for its
|
|
|
|
* lock which we hold. Cancel the callout
|
2006-02-23 19:13:12 +00:00
|
|
|
* and return. After our caller drops the
|
2007-11-20 00:37:45 +00:00
|
|
|
* lock, the callout will be skipped in
|
2006-02-23 19:13:12 +00:00
|
|
|
* softclock().
|
|
|
|
*/
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
cc->cc_exec_entity[direct].cc_cancel = TRUE;
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
|
|
|
|
c, c->c_func, c->c_arg);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
KASSERT(!cc_cce_migrating(cc, direct),
|
2011-04-08 18:48:57 +00:00
|
|
|
("callout wrongly scheduled for migration"));
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2007-08-31 19:01:30 +00:00
|
|
|
KASSERT(!sq_locked, ("sleepqueue chain locked"));
|
2005-02-07 02:47:33 +00:00
|
|
|
return (1);
|
When callout_reset_on() cannot immediately migrate a callout since it
is running on other cpu, the CALLOUT_PENDING flag is temporarily
cleared. Then, callout_stop() on this, in fact active, callout fails
because CALLOUT_PENDING is not set, and callout_stop() returns 0.
Now, in sleepq_check_timeout(), the failed callout_stop() causes the
sleepq code to execute mi_switch() without even setting the wmesg,
since the switch-out is supposed to be transient. In fact, the thread
is put off the CPU for full timeout interval, instead of being put on
runq immediately. Until timeout fires, the process is unkillable for
obvious reasons.
Fix this by marking the migrating callouts with CALLOUT_DFRMIGRATION
flag. The flag is cleared by callout_stop_safe() when the function
detects a migration, besides returning the success. The softclock()
rechecks the flag for migrating callout and cancels its execution if
the flag was cleared meantime.
PR: misc/166340
Reported, debugging traces provided and tested by:
Christian Esken <christian.esken trivago com>
Reviewed by: avg, jhb
MFC after: 1 week
2012-05-03 10:38:02 +00:00
|
|
|
} else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
|
|
|
|
c->c_flags &= ~CALLOUT_DFRMIGRATION;
|
|
|
|
CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p",
|
|
|
|
c, c->c_func, c->c_arg);
|
|
|
|
CC_UNLOCK(cc);
|
|
|
|
return (1);
|
2006-02-23 19:13:12 +00:00
|
|
|
}
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
|
|
|
|
c, c->c_func, c->c_arg);
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2007-08-31 19:01:30 +00:00
|
|
|
KASSERT(!sq_locked, ("sleepqueue chain still locked"));
|
2001-08-10 21:06:59 +00:00
|
|
|
return (0);
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
2007-08-31 19:01:30 +00:00
|
|
|
if (sq_locked)
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
sleepq_release(&cc->cc_exec_entity[direct].cc_waiting);
|
2007-08-31 19:01:30 +00:00
|
|
|
|
1999-08-30 21:17:07 +00:00
|
|
|
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
|
1999-03-06 04:46:20 +00:00
|
|
|
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
|
|
|
|
c, c->c_func, c->c_arg);
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
if ((c->c_flags & CALLOUT_PROCESSED) == 0) {
|
|
|
|
if (cc->cc_exec_next_dir == c)
|
|
|
|
cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le);
|
|
|
|
LIST_REMOVE(c, c_links.le);
|
|
|
|
} else
|
|
|
|
TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe);
|
2012-05-03 20:00:30 +00:00
|
|
|
callout_cc_del(c, cc);
|
2006-10-11 14:57:03 +00:00
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2001-08-10 21:06:59 +00:00
|
|
|
return (1);
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2000-11-25 06:22:16 +00:00
|
|
|
callout_init(c, mpsafe)
|
1999-03-06 04:46:20 +00:00
|
|
|
struct callout *c;
|
2000-11-25 06:22:16 +00:00
|
|
|
int mpsafe;
|
1999-03-06 04:46:20 +00:00
|
|
|
{
|
1999-03-06 22:27:02 +00:00
|
|
|
bzero(c, sizeof *c);
|
2005-02-07 02:47:33 +00:00
|
|
|
if (mpsafe) {
|
2007-11-20 00:37:45 +00:00
|
|
|
c->c_lock = NULL;
|
2005-02-07 02:47:33 +00:00
|
|
|
c->c_flags = CALLOUT_RETURNUNLOCKED;
|
|
|
|
} else {
|
2007-11-20 00:37:45 +00:00
|
|
|
c->c_lock = &Giant.lock_object;
|
2005-02-07 02:47:33 +00:00
|
|
|
c->c_flags = 0;
|
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
c->c_cpu = timeout_cpu;
|
2005-02-07 02:47:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2007-11-20 00:37:45 +00:00
|
|
|
_callout_init_lock(c, lock, flags)
|
2005-02-07 02:47:33 +00:00
|
|
|
struct callout *c;
|
2007-11-20 00:37:45 +00:00
|
|
|
struct lock_object *lock;
|
2005-02-07 02:47:33 +00:00
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
bzero(c, sizeof *c);
|
2007-11-20 00:37:45 +00:00
|
|
|
c->c_lock = lock;
|
|
|
|
KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
|
|
|
|
("callout_init_lock: bad flags %d", flags));
|
|
|
|
KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
|
|
|
|
("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
|
2008-02-06 00:04:09 +00:00
|
|
|
KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
|
|
|
|
(LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
|
2007-11-20 00:37:45 +00:00
|
|
|
__func__));
|
|
|
|
c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
|
2008-04-02 11:20:30 +00:00
|
|
|
c->c_cpu = timeout_cpu;
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
|
|
|
|
1997-12-23 16:32:35 +00:00
|
|
|
#ifdef APM_FIXUP_CALLTODO
|
|
|
|
/*
|
|
|
|
* Adjust the kernel calltodo timeout list. This routine is used after
|
|
|
|
* an APM resume to recalculate the calltodo timer list values with the
|
|
|
|
* number of hz's we have been sleeping. The next hardclock() will detect
|
|
|
|
* that there are fired timers and run softclock() to execute them.
|
|
|
|
*
|
|
|
|
* Please note, I have not done an exhaustive analysis of what code this
|
|
|
|
* might break. I am motivated to have my select()'s and alarm()'s that
|
|
|
|
* have expired during suspend firing upon resume so that the applications
|
|
|
|
* which set the timer can do the maintanence the timer was for as close
|
|
|
|
* as possible to the originally intended time. Testing this code for a
|
|
|
|
* week showed that resuming from a suspend resulted in 22 to 25 timers
|
|
|
|
* firing, which seemed independant on whether the suspend was 2 hours or
|
|
|
|
* 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
adjust_timeout_calltodo(time_change)
|
|
|
|
struct timeval *time_change;
|
|
|
|
{
|
|
|
|
register struct callout *p;
|
|
|
|
unsigned long delta_ticks;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* How many ticks were we asleep?
|
1998-05-17 20:08:05 +00:00
|
|
|
* (stolen from tvtohz()).
|
1997-12-23 16:32:35 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Don't do anything */
|
|
|
|
if (time_change->tv_sec < 0)
|
|
|
|
return;
|
|
|
|
else if (time_change->tv_sec <= LONG_MAX / 1000000)
|
|
|
|
delta_ticks = (time_change->tv_sec * 1000000 +
|
|
|
|
time_change->tv_usec + (tick - 1)) / tick + 1;
|
|
|
|
else if (time_change->tv_sec <= LONG_MAX / hz)
|
|
|
|
delta_ticks = time_change->tv_sec * hz +
|
|
|
|
(time_change->tv_usec + (tick - 1)) / tick + 1;
|
|
|
|
else
|
|
|
|
delta_ticks = LONG_MAX;
|
|
|
|
|
|
|
|
if (delta_ticks > INT_MAX)
|
|
|
|
delta_ticks = INT_MAX;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now rip through the timer calltodo list looking for timers
|
|
|
|
* to expire.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* don't collide with softclock() */
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_LOCK(cc);
|
1997-12-23 16:32:35 +00:00
|
|
|
for (p = calltodo.c_next; p != NULL; p = p->c_next) {
|
|
|
|
p->c_time -= delta_ticks;
|
|
|
|
|
|
|
|
/* Break if the timer had more time on it than delta_ticks */
|
|
|
|
if (p->c_time > 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* take back the ticks the timer didn't use (p->c_time <= 0) */
|
|
|
|
delta_ticks = -p->c_time;
|
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
1997-12-23 16:32:35 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif /* APM_FIXUP_CALLTODO */
|
- Make callout(9) tickless, relying on eventtimers(4) as backend for
precise time event generation. This greatly improves granularity of
callouts which are not anymore constrained to wait next tick to be
scheduled.
- Extend the callout KPI introducing a set of callout_reset_sbt* functions,
which take a sbintime_t as timeout argument. The new KPI also offers a
way for consumers to specify precision tolerance they allow, so that
callout can coalesce events and reduce number of interrupts as well as
potentially avoid scheduling a SWI thread.
- Introduce support for dispatching callouts directly from hardware
interrupt context, specifying an additional flag. This feature should be
used carefully, as long as interrupt context has some limitations
(e.g. no sleeping locks can be held).
- Enhance mechanisms to gather informations about callwheel, introducing
a new sysctl to obtain stats.
This change breaks the KBI. struct callout fields has been changed, in
particular 'int ticks' (4 bytes) has been replaced with 'sbintime_t'
(8 bytes) and another 'sbintime_t' field was added for precision.
Together with: mav
Reviewed by: attilio, bde, luigi, phk
Sponsored by: Google Summer of Code 2012, iXsystems inc.
Tested by: flo (amd64, sparc64), marius (sparc64), ian (arm),
markj (amd64), mav, Fabian Keil
2013-03-04 11:09:56 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
flssbt(sbintime_t sbt)
|
|
|
|
{
|
|
|
|
|
|
|
|
sbt += (uint64_t)sbt >> 1;
|
|
|
|
if (sizeof(long) >= sizeof(sbintime_t))
|
|
|
|
return (flsl(sbt));
|
|
|
|
if (sbt >= SBT_1S)
|
|
|
|
return (flsl(((uint64_t)sbt) >> 32) + 32);
|
|
|
|
return (flsl(sbt));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dump immediate statistic snapshot of the scheduled callouts.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct callout *tmp;
|
|
|
|
struct callout_cpu *cc;
|
|
|
|
struct callout_list *sc;
|
|
|
|
sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t;
|
|
|
|
int ct[64], cpr[64], ccpbk[32];
|
|
|
|
int error, val, i, count, tcum, pcum, maxc, c, medc;
|
|
|
|
#ifdef SMP
|
|
|
|
int cpu;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
val = 0;
|
|
|
|
error = sysctl_handle_int(oidp, &val, 0, req);
|
|
|
|
if (error != 0 || req->newptr == NULL)
|
|
|
|
return (error);
|
|
|
|
count = maxc = 0;
|
|
|
|
st = spr = maxt = maxpr = 0;
|
|
|
|
bzero(ccpbk, sizeof(ccpbk));
|
|
|
|
bzero(ct, sizeof(ct));
|
|
|
|
bzero(cpr, sizeof(cpr));
|
|
|
|
now = sbinuptime();
|
|
|
|
#ifdef SMP
|
|
|
|
CPU_FOREACH(cpu) {
|
|
|
|
cc = CC_CPU(cpu);
|
|
|
|
#else
|
|
|
|
cc = CC_CPU(timeout_cpu);
|
|
|
|
#endif
|
|
|
|
CC_LOCK(cc);
|
|
|
|
for (i = 0; i < callwheelsize; i++) {
|
|
|
|
sc = &cc->cc_callwheel[i];
|
|
|
|
c = 0;
|
|
|
|
LIST_FOREACH(tmp, sc, c_links.le) {
|
|
|
|
c++;
|
|
|
|
t = tmp->c_time - now;
|
|
|
|
if (t < 0)
|
|
|
|
t = 0;
|
|
|
|
st += t / SBT_1US;
|
|
|
|
spr += tmp->c_precision / SBT_1US;
|
|
|
|
if (t > maxt)
|
|
|
|
maxt = t;
|
|
|
|
if (tmp->c_precision > maxpr)
|
|
|
|
maxpr = tmp->c_precision;
|
|
|
|
ct[flssbt(t)]++;
|
|
|
|
cpr[flssbt(tmp->c_precision)]++;
|
|
|
|
}
|
|
|
|
if (c > maxc)
|
|
|
|
maxc = c;
|
|
|
|
ccpbk[fls(c + c / 2)]++;
|
|
|
|
count += c;
|
|
|
|
}
|
|
|
|
CC_UNLOCK(cc);
|
|
|
|
#ifdef SMP
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++)
|
|
|
|
tcum += ct[i];
|
|
|
|
medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
|
|
|
|
for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++)
|
|
|
|
pcum += cpr[i];
|
|
|
|
medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0;
|
|
|
|
for (i = 0, c = 0; i < 32 && c < count / 2; i++)
|
|
|
|
c += ccpbk[i];
|
|
|
|
medc = (i >= 2) ? (1 << (i - 2)) : 0;
|
|
|
|
|
|
|
|
printf("Scheduled callouts statistic snapshot:\n");
|
|
|
|
printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n",
|
|
|
|
count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT);
|
|
|
|
printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n",
|
|
|
|
medc,
|
|
|
|
count / callwheelsize / mp_ncpus,
|
|
|
|
(uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000,
|
|
|
|
maxc);
|
|
|
|
printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
|
|
|
|
medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32,
|
|
|
|
(st / count) / 1000000, (st / count) % 1000000,
|
|
|
|
maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32);
|
|
|
|
printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n",
|
|
|
|
medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32,
|
|
|
|
(spr / count) / 1000000, (spr / count) % 1000000,
|
|
|
|
maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32);
|
|
|
|
printf(" Distribution: \tbuckets\t time\t tcum\t"
|
|
|
|
" prec\t pcum\n");
|
|
|
|
for (i = 0, tcum = pcum = 0; i < 64; i++) {
|
|
|
|
if (ct[i] == 0 && cpr[i] == 0)
|
|
|
|
continue;
|
|
|
|
t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0;
|
|
|
|
tcum += ct[i];
|
|
|
|
pcum += cpr[i];
|
|
|
|
printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n",
|
|
|
|
t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32,
|
|
|
|
i - 1 - (32 - CC_HASH_SHIFT),
|
|
|
|
ct[i], tcum, cpr[i], pcum);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
SYSCTL_PROC(_kern, OID_AUTO, callout_stat,
|
|
|
|
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
|
|
|
|
0, 0, sysctl_kern_callout_stat, "I",
|
|
|
|
"Dump immediate statistic snapshot of the scheduled callouts");
|