1994-05-24 10:09:53 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1982, 1986, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1999-03-06 04:46:20 +00:00
|
|
|
* From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2009-01-24 10:22:49 +00:00
|
|
|
#include "opt_kdtrace.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2008-04-02 11:20:30 +00:00
|
|
|
#include <sys/bus.h>
|
1998-02-15 14:15:21 +00:00
|
|
|
#include <sys/callout.h>
|
2004-04-06 23:08:49 +00:00
|
|
|
#include <sys/condvar.h>
|
2008-04-02 11:20:30 +00:00
|
|
|
#include <sys/interrupt.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/kernel.h>
|
2004-08-06 21:49:00 +00:00
|
|
|
#include <sys/ktr.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h>
|
2008-04-02 11:20:30 +00:00
|
|
|
#include <sys/malloc.h>
|
2000-11-16 21:20:52 +00:00
|
|
|
#include <sys/mutex.h>
|
2005-09-15 20:20:36 +00:00
|
|
|
#include <sys/proc.h>
|
2009-01-24 10:22:49 +00:00
|
|
|
#include <sys/sdt.h>
|
2007-06-26 21:42:01 +00:00
|
|
|
#include <sys/sleepqueue.h>
|
2003-06-04 05:25:58 +00:00
|
|
|
#include <sys/sysctl.h>
|
2008-04-02 11:20:30 +00:00
|
|
|
#include <sys/smp.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
#ifdef SMP
|
|
|
|
#include <machine/cpu.h>
|
|
|
|
#endif
|
|
|
|
|
2009-01-24 10:22:49 +00:00
|
|
|
SDT_PROVIDER_DEFINE(callout_execute);
|
2010-08-22 11:18:57 +00:00
|
|
|
SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start);
|
2009-01-24 10:22:49 +00:00
|
|
|
SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
|
|
|
|
"struct callout *");
|
2010-08-22 11:18:57 +00:00
|
|
|
SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end);
|
2009-01-24 10:22:49 +00:00
|
|
|
SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
|
|
|
|
"struct callout *");
|
|
|
|
|
2003-06-04 05:25:58 +00:00
|
|
|
static int avg_depth;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
|
|
|
|
"Average number of items examined per softclock call. Units = 1/1000");
|
|
|
|
static int avg_gcalls;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
|
|
|
|
"Average number of Giant callouts made per softclock call. Units = 1/1000");
|
2007-11-20 00:37:45 +00:00
|
|
|
static int avg_lockcalls;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
|
|
|
|
"Average number of lock callouts made per softclock call. Units = 1/1000");
|
2003-06-04 05:25:58 +00:00
|
|
|
static int avg_mpcalls;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
|
|
|
|
"Average number of MP callouts made per softclock call. Units = 1/1000");
|
1998-02-15 14:15:21 +00:00
|
|
|
/*
|
|
|
|
* TODO:
|
|
|
|
* allocate more timeout table slots when table overflows.
|
|
|
|
*/
|
1997-09-21 22:00:25 +00:00
|
|
|
int callwheelsize, callwheelbits, callwheelmask;
|
1994-08-18 22:36:09 +00:00
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
/*
|
|
|
|
* The callout cpu migration entity represents informations necessary for
|
|
|
|
* describing the migrating callout to the new callout cpu.
|
|
|
|
* The cached informations are very important for deferring migration when
|
|
|
|
* the migrating callout is already running.
|
|
|
|
*/
|
|
|
|
struct cc_mig_ent {
|
|
|
|
#ifdef SMP
|
|
|
|
void (*ce_migration_func)(void *);
|
|
|
|
void *ce_migration_arg;
|
|
|
|
int ce_migration_cpu;
|
|
|
|
int ce_migration_ticks;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
2009-12-14 12:23:46 +00:00
|
|
|
/*
|
|
|
|
* There is one struct callout_cpu per cpu, holding all relevant
|
|
|
|
* state for the callout processing thread on the individual CPU.
|
|
|
|
* In particular:
|
|
|
|
* cc_ticks is incremented once per tick in callout_cpu().
|
|
|
|
* It tracks the global 'ticks' but in a way that the individual
|
|
|
|
* threads should not worry about races in the order in which
|
|
|
|
* hardclock() and hardclock_cpu() run on the various CPUs.
|
|
|
|
* cc_softclock is advanced in callout_cpu() to point to the
|
|
|
|
* first entry in cc_callwheel that may need handling. In turn,
|
|
|
|
* a softclock() is scheduled so it can serve the various entries i
|
|
|
|
* such that cc_softclock <= i <= cc_ticks .
|
|
|
|
* XXX maybe cc_softclock and cc_ticks should be volatile ?
|
|
|
|
*
|
|
|
|
* cc_ticks is also used in callout_reset_cpu() to determine
|
|
|
|
* when the callout should be served.
|
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu {
|
2011-04-08 18:48:57 +00:00
|
|
|
struct cc_mig_ent cc_migrating_entity;
|
2008-04-02 11:20:30 +00:00
|
|
|
struct mtx cc_lock;
|
|
|
|
struct callout *cc_callout;
|
|
|
|
struct callout_tailq *cc_callwheel;
|
|
|
|
struct callout_list cc_callfree;
|
|
|
|
struct callout *cc_next;
|
|
|
|
struct callout *cc_curr;
|
|
|
|
void *cc_cookie;
|
2009-12-14 12:23:46 +00:00
|
|
|
int cc_ticks;
|
2008-04-02 11:20:30 +00:00
|
|
|
int cc_softticks;
|
|
|
|
int cc_cancel;
|
|
|
|
int cc_waiting;
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
int cc_firsttick;
|
2008-04-02 11:20:30 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef SMP
|
2011-04-08 18:48:57 +00:00
|
|
|
#define cc_migration_func cc_migrating_entity.ce_migration_func
|
|
|
|
#define cc_migration_arg cc_migrating_entity.ce_migration_arg
|
|
|
|
#define cc_migration_cpu cc_migrating_entity.ce_migration_cpu
|
|
|
|
#define cc_migration_ticks cc_migrating_entity.ce_migration_ticks
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu cc_cpu[MAXCPU];
|
2011-04-08 18:48:57 +00:00
|
|
|
#define CPUBLOCK MAXCPU
|
2008-04-02 11:20:30 +00:00
|
|
|
#define CC_CPU(cpu) (&cc_cpu[(cpu)])
|
|
|
|
#define CC_SELF() CC_CPU(PCPU_GET(cpuid))
|
|
|
|
#else
|
|
|
|
struct callout_cpu cc_cpu;
|
|
|
|
#define CC_CPU(cpu) &cc_cpu
|
|
|
|
#define CC_SELF() &cc_cpu
|
|
|
|
#endif
|
|
|
|
#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock)
|
|
|
|
#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
|
2011-04-08 18:48:57 +00:00
|
|
|
#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
|
2008-04-02 11:20:30 +00:00
|
|
|
|
|
|
|
static int timeout_cpu;
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
void (*callout_new_inserted)(int cpu, int ticks) = NULL;
|
2008-04-02 11:20:30 +00:00
|
|
|
|
2011-11-07 06:44:47 +00:00
|
|
|
static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
|
2004-04-08 02:03:49 +00:00
|
|
|
|
2005-01-07 03:25:45 +00:00
|
|
|
/**
|
2008-04-02 11:20:30 +00:00
|
|
|
* Locked by cc_lock:
|
|
|
|
* cc_curr - If a callout is in progress, it is curr_callout.
|
2006-02-23 19:13:12 +00:00
|
|
|
* If curr_callout is non-NULL, threads waiting in
|
2008-04-02 11:20:30 +00:00
|
|
|
* callout_drain() will be woken up as soon as the
|
2004-04-06 23:08:49 +00:00
|
|
|
* relevant callout completes.
|
2008-04-02 11:20:30 +00:00
|
|
|
* cc_cancel - Changing to 1 with both callout_lock and c_lock held
|
2005-02-07 02:47:33 +00:00
|
|
|
* guarantees that the current callout will not run.
|
|
|
|
* The softclock() function sets this to 0 before it
|
2007-11-20 00:37:45 +00:00
|
|
|
* drops callout_lock to acquire c_lock, and it calls
|
2006-02-23 19:13:12 +00:00
|
|
|
* the handler only if curr_cancelled is still 0 after
|
2007-11-20 00:37:45 +00:00
|
|
|
* c_lock is successfully acquired.
|
2008-04-02 11:20:30 +00:00
|
|
|
* cc_waiting - If a thread is waiting in callout_drain(), then
|
2006-02-23 19:13:12 +00:00
|
|
|
* callout_wait is nonzero. Set only when
|
2004-04-06 23:08:49 +00:00
|
|
|
* curr_callout is non-NULL.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
/*
|
|
|
|
* Resets the migration entity tied to a specific callout cpu.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
cc_cme_cleanup(struct callout_cpu *cc)
|
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef SMP
|
|
|
|
cc->cc_migration_cpu = CPUBLOCK;
|
|
|
|
cc->cc_migration_ticks = 0;
|
|
|
|
cc->cc_migration_func = NULL;
|
|
|
|
cc->cc_migration_arg = NULL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks if migration is requested by a specific callout cpu.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
cc_cme_migrating(struct callout_cpu *cc)
|
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef SMP
|
|
|
|
return (cc->cc_migration_cpu != CPUBLOCK);
|
|
|
|
#else
|
|
|
|
return (0);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2001-08-22 04:07:27 +00:00
|
|
|
/*
|
|
|
|
* kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
|
|
|
|
*
|
|
|
|
* This code is called very early in the kernel initialization sequence,
|
|
|
|
* and may be called more then once.
|
|
|
|
*/
|
|
|
|
caddr_t
|
|
|
|
kern_timeout_callwheel_alloc(caddr_t v)
|
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
|
|
|
|
|
|
|
timeout_cpu = PCPU_GET(cpuid);
|
|
|
|
cc = CC_CPU(timeout_cpu);
|
2001-08-22 04:07:27 +00:00
|
|
|
/*
|
|
|
|
* Calculate callout wheel size
|
|
|
|
*/
|
|
|
|
for (callwheelsize = 1, callwheelbits = 0;
|
|
|
|
callwheelsize < ncallout;
|
|
|
|
callwheelsize <<= 1, ++callwheelbits)
|
|
|
|
;
|
|
|
|
callwheelmask = callwheelsize - 1;
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_callout = (struct callout *)v;
|
|
|
|
v = (caddr_t)(cc->cc_callout + ncallout);
|
|
|
|
cc->cc_callwheel = (struct callout_tailq *)v;
|
|
|
|
v = (caddr_t)(cc->cc_callwheel + callwheelsize);
|
2001-08-22 04:07:27 +00:00
|
|
|
return(v);
|
|
|
|
}
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
static void
|
|
|
|
callout_cpu_init(struct callout_cpu *cc)
|
|
|
|
{
|
|
|
|
struct callout *c;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
|
|
|
|
SLIST_INIT(&cc->cc_callfree);
|
|
|
|
for (i = 0; i < callwheelsize; i++) {
|
|
|
|
TAILQ_INIT(&cc->cc_callwheel[i]);
|
|
|
|
}
|
2011-04-08 18:48:57 +00:00
|
|
|
cc_cme_cleanup(cc);
|
2008-04-02 11:20:30 +00:00
|
|
|
if (cc->cc_callout == NULL)
|
|
|
|
return;
|
|
|
|
for (i = 0; i < ncallout; i++) {
|
|
|
|
c = &cc->cc_callout[i];
|
|
|
|
callout_init(c, 0);
|
|
|
|
c->c_flags = CALLOUT_LOCAL_ALLOC;
|
|
|
|
SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
#ifdef SMP
|
|
|
|
/*
|
|
|
|
* Switches the cpu tied to a specific callout.
|
|
|
|
* The function expects a locked incoming callout cpu and returns with
|
|
|
|
* locked outcoming callout cpu.
|
|
|
|
*/
|
|
|
|
static struct callout_cpu *
|
|
|
|
callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu)
|
|
|
|
{
|
|
|
|
struct callout_cpu *new_cc;
|
|
|
|
|
|
|
|
MPASS(c != NULL && cc != NULL);
|
|
|
|
CC_LOCK_ASSERT(cc);
|
|
|
|
|
2011-08-21 10:52:50 +00:00
|
|
|
/*
|
|
|
|
* Avoid interrupts and preemption firing after the callout cpu
|
|
|
|
* is blocked in order to avoid deadlocks as the new thread
|
|
|
|
* may be willing to acquire the callout cpu lock.
|
|
|
|
*/
|
2011-04-08 18:48:57 +00:00
|
|
|
c->c_cpu = CPUBLOCK;
|
2011-08-21 10:52:50 +00:00
|
|
|
spinlock_enter();
|
2011-04-08 18:48:57 +00:00
|
|
|
CC_UNLOCK(cc);
|
|
|
|
new_cc = CC_CPU(new_cpu);
|
|
|
|
CC_LOCK(new_cc);
|
2011-08-21 10:52:50 +00:00
|
|
|
spinlock_exit();
|
2011-04-08 18:48:57 +00:00
|
|
|
c->c_cpu = new_cpu;
|
|
|
|
return (new_cc);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2001-08-22 04:07:27 +00:00
|
|
|
/*
|
|
|
|
* kern_timeout_callwheel_init() - initialize previously reserved callwheel
|
|
|
|
* space.
|
|
|
|
*
|
|
|
|
* This code is called just once, after the space reserved for the
|
|
|
|
* callout wheel has been finalized.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
kern_timeout_callwheel_init(void)
|
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
callout_cpu_init(CC_CPU(timeout_cpu));
|
|
|
|
}
|
2001-08-22 04:07:27 +00:00
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
/*
|
|
|
|
* Start standard softclock thread.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
start_softclock(void *dummy)
|
|
|
|
{
|
|
|
|
struct callout_cpu *cc;
|
|
|
|
#ifdef SMP
|
|
|
|
int cpu;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cc = CC_CPU(timeout_cpu);
|
|
|
|
if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
|
2010-11-03 15:38:52 +00:00
|
|
|
INTR_MPSAFE, &cc->cc_cookie))
|
2008-04-02 11:20:30 +00:00
|
|
|
panic("died while creating standard software ithreads");
|
|
|
|
#ifdef SMP
|
2010-06-11 18:46:34 +00:00
|
|
|
CPU_FOREACH(cpu) {
|
2008-04-02 11:20:30 +00:00
|
|
|
if (cpu == timeout_cpu)
|
|
|
|
continue;
|
|
|
|
cc = CC_CPU(cpu);
|
|
|
|
if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
|
|
|
|
INTR_MPSAFE, &cc->cc_cookie))
|
|
|
|
panic("died while creating standard software ithreads");
|
|
|
|
cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */
|
|
|
|
cc->cc_callwheel = malloc(
|
|
|
|
sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
|
|
|
|
M_WAITOK);
|
|
|
|
callout_cpu_init(cc);
|
2001-08-22 04:07:27 +00:00
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
|
|
|
|
|
|
|
|
void
|
|
|
|
callout_tick(void)
|
|
|
|
{
|
|
|
|
struct callout_cpu *cc;
|
2008-07-19 05:18:29 +00:00
|
|
|
int need_softclock;
|
|
|
|
int bucket;
|
2008-04-02 11:20:30 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Process callouts at a very low cpu priority, so we don't keep the
|
|
|
|
* relatively high clock interrupt priority any longer than necessary.
|
|
|
|
*/
|
2008-07-19 05:18:29 +00:00
|
|
|
need_softclock = 0;
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = CC_SELF();
|
|
|
|
mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
cc->cc_firsttick = cc->cc_ticks = ticks;
|
2009-12-14 12:23:46 +00:00
|
|
|
for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) {
|
2008-07-19 05:18:29 +00:00
|
|
|
bucket = cc->cc_softticks & callwheelmask;
|
|
|
|
if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
|
|
|
|
need_softclock = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
|
|
|
|
/*
|
|
|
|
* swi_sched acquires the thread lock, so we don't want to call it
|
|
|
|
* with cc_lock held; incorrect locking order.
|
|
|
|
*/
|
|
|
|
if (need_softclock)
|
|
|
|
swi_sched(cc->cc_cookie, 0);
|
|
|
|
}
|
|
|
|
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
int
|
2010-09-14 08:48:06 +00:00
|
|
|
callout_tickstofirst(int limit)
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
{
|
|
|
|
struct callout_cpu *cc;
|
|
|
|
struct callout *c;
|
|
|
|
struct callout_tailq *sc;
|
|
|
|
int curticks;
|
|
|
|
int skip = 1;
|
|
|
|
|
|
|
|
cc = CC_SELF();
|
|
|
|
mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
|
|
|
|
curticks = cc->cc_ticks;
|
2010-09-14 08:48:06 +00:00
|
|
|
while( skip < ncallout && skip < limit ) {
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ];
|
|
|
|
/* search scanning ticks */
|
|
|
|
TAILQ_FOREACH( c, sc, c_links.tqe ){
|
2010-10-31 11:44:41 +00:00
|
|
|
if (c->c_time - curticks <= ncallout)
|
Refactor timer management code with priority to one-shot operation mode.
The main goal of this is to generate timer interrupts only when there is
some work to do. When CPU is busy interrupts are generating at full rate
of hz + stathz to fullfill scheduler and timekeeping requirements. But
when CPU is idle, only minimum set of interrupts (down to 8 interrupts per
second per CPU now), needed to handle scheduled callouts is executed.
This allows significantly increase idle CPU sleep time, increasing effect
of static power-saving technologies. Also it should reduce host CPU load
on virtualized systems, when guest system is idle.
There is set of tunables, also available as writable sysctls, allowing to
control wanted event timer subsystem behavior:
kern.eventtimer.timer - allows to choose event timer hardware to use.
On x86 there is up to 4 different kinds of timers. Depending on whether
chosen timer is per-CPU, behavior of other options slightly differs.
kern.eventtimer.periodic - allows to choose periodic and one-shot
operation mode. In periodic mode, current timer hardware taken as the only
source of time for time events. This mode is quite alike to previous kernel
behavior. One-shot mode instead uses currently selected time counter
hardware to schedule all needed events one by one and program timer to
generate interrupt exactly in specified time. Default value depends of
chosen timer capabilities, but one-shot mode is preferred, until other is
forced by user or hardware.
kern.eventtimer.singlemul - in periodic mode specifies how much times
higher timer frequency should be, to not strictly alias hardclock() and
statclock() events. Default values are 2 and 4, but could be reduced to 1
if extra interrupts are unwanted.
kern.eventtimer.idletick - makes each CPU to receive every timer interrupt
independently of whether they busy or not. By default this options is
disabled. If chosen timer is per-CPU and runs in periodic mode, this option
has no effect - all interrupts are generating.
As soon as this patch modifies cpu_idle() on some platforms, I have also
refactored one on x86. Now it makes use of MONITOR/MWAIT instrunctions
(if supported) under high sleep/wakeup rate, as fast alternative to other
methods. It allows SMP scheduler to wake up sleeping CPUs much faster
without using IPI, significantly increasing performance on some highly
task-switching loads.
Tested by: many (on i386, amd64, sparc64 and powerc)
H/W donated by: Gheorghe Ardelean
Sponsored by: iXsystems, Inc.
2010-09-13 07:25:35 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
skip++;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
cc->cc_firsttick = curticks + skip;
|
|
|
|
mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
|
|
|
|
return (skip);
|
|
|
|
}
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
static struct callout_cpu *
|
|
|
|
callout_lock(struct callout *c)
|
|
|
|
{
|
|
|
|
struct callout_cpu *cc;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
cpu = c->c_cpu;
|
2011-04-08 18:48:57 +00:00
|
|
|
#ifdef SMP
|
|
|
|
if (cpu == CPUBLOCK) {
|
|
|
|
while (c->c_cpu == CPUBLOCK)
|
|
|
|
cpu_spinwait();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = CC_CPU(cpu);
|
|
|
|
CC_LOCK(cc);
|
|
|
|
if (cpu == c->c_cpu)
|
|
|
|
break;
|
|
|
|
CC_UNLOCK(cc);
|
2001-08-22 04:07:27 +00:00
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
return (cc);
|
2001-08-22 04:07:27 +00:00
|
|
|
}
|
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
static void
|
|
|
|
callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks,
|
|
|
|
void (*func)(void *), void *arg, int cpu)
|
|
|
|
{
|
|
|
|
|
|
|
|
CC_LOCK_ASSERT(cc);
|
|
|
|
|
|
|
|
if (to_ticks <= 0)
|
|
|
|
to_ticks = 1;
|
|
|
|
c->c_arg = arg;
|
|
|
|
c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
|
|
|
|
c->c_func = func;
|
|
|
|
c->c_time = ticks + to_ticks;
|
|
|
|
TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
|
|
|
|
c, c_links.tqe);
|
|
|
|
if ((c->c_time - cc->cc_firsttick) < 0 &&
|
|
|
|
callout_new_inserted != NULL) {
|
|
|
|
cc->cc_firsttick = c->c_time;
|
|
|
|
(*callout_new_inserted)(cpu,
|
|
|
|
to_ticks + (ticks - cc->cc_ticks));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1997-09-21 22:00:25 +00:00
|
|
|
/*
|
|
|
|
* The callout mechanism is based on the work of Adam M. Costello and
|
|
|
|
* George Varghese, published in a technical report entitled "Redesigning
|
|
|
|
* the BSD Callout and Timer Facilities" and modified slightly for inclusion
|
|
|
|
* in FreeBSD by Justin T. Gibbs. The original work on the data structures
|
2004-04-25 04:10:17 +00:00
|
|
|
* used in this implementation was published by G. Varghese and T. Lauck in
|
1997-09-21 22:00:25 +00:00
|
|
|
* the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
|
|
|
|
* the Efficient Implementation of a Timer Facility" in the Proceedings of
|
|
|
|
* the 11th ACM Annual Symposium on Operating Systems Principles,
|
|
|
|
* Austin, Texas Nov 1987.
|
|
|
|
*/
|
1998-01-10 13:16:26 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Software (low priority) clock interrupt.
|
|
|
|
* Run periodic events from timeout queue.
|
|
|
|
*/
|
|
|
|
void
|
2008-04-02 11:20:30 +00:00
|
|
|
softclock(void *arg)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
2002-09-04 20:05:00 +00:00
|
|
|
struct callout *c;
|
|
|
|
struct callout_tailq *bucket;
|
|
|
|
int curticks;
|
|
|
|
int steps; /* #steps since we last allowed interrupts */
|
2003-06-04 05:25:58 +00:00
|
|
|
int depth;
|
|
|
|
int mpcalls;
|
2007-11-20 00:37:45 +00:00
|
|
|
int lockcalls;
|
2003-06-04 05:25:58 +00:00
|
|
|
int gcalls;
|
2003-11-12 22:28:27 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
struct bintime bt1, bt2;
|
|
|
|
struct timespec ts2;
|
|
|
|
static uint64_t maxdt = 36893488147419102LL; /* 2 msec */
|
2003-12-07 20:03:28 +00:00
|
|
|
static timeout_t *lastfunc;
|
2003-11-12 22:28:27 +00:00
|
|
|
#endif
|
1998-01-10 13:16:26 +00:00
|
|
|
|
1998-02-15 14:15:21 +00:00
|
|
|
#ifndef MAX_SOFTCLOCK_STEPS
|
|
|
|
#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
|
|
|
|
#endif /* MAX_SOFTCLOCK_STEPS */
|
1997-09-21 22:00:25 +00:00
|
|
|
|
2003-06-04 05:25:58 +00:00
|
|
|
mpcalls = 0;
|
2007-11-20 00:37:45 +00:00
|
|
|
lockcalls = 0;
|
2003-06-04 05:25:58 +00:00
|
|
|
gcalls = 0;
|
|
|
|
depth = 0;
|
1997-09-21 22:00:25 +00:00
|
|
|
steps = 0;
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = (struct callout_cpu *)arg;
|
|
|
|
CC_LOCK(cc);
|
2009-12-14 12:23:46 +00:00
|
|
|
while (cc->cc_softticks - 1 != cc->cc_ticks) {
|
1997-09-24 16:39:27 +00:00
|
|
|
/*
|
2008-04-02 11:20:30 +00:00
|
|
|
* cc_softticks may be modified by hard clock, so cache
|
1997-09-24 16:39:27 +00:00
|
|
|
* it while we work on a given bucket.
|
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
curticks = cc->cc_softticks;
|
2008-07-19 05:18:29 +00:00
|
|
|
cc->cc_softticks++;
|
2008-04-02 11:20:30 +00:00
|
|
|
bucket = &cc->cc_callwheel[curticks & callwheelmask];
|
1997-09-24 16:39:27 +00:00
|
|
|
c = TAILQ_FIRST(bucket);
|
1997-09-21 22:00:25 +00:00
|
|
|
while (c) {
|
2003-06-04 05:25:58 +00:00
|
|
|
depth++;
|
1997-09-24 16:39:27 +00:00
|
|
|
if (c->c_time != curticks) {
|
1997-09-21 22:00:25 +00:00
|
|
|
c = TAILQ_NEXT(c, c_links.tqe);
|
|
|
|
++steps;
|
|
|
|
if (steps >= MAX_SOFTCLOCK_STEPS) {
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_next = c;
|
1997-09-24 16:39:27 +00:00
|
|
|
/* Give interrupts a chance. */
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2001-08-10 01:36:25 +00:00
|
|
|
; /* nothing */
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_LOCK(cc);
|
|
|
|
c = cc->cc_next;
|
1997-09-21 22:00:25 +00:00
|
|
|
steps = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
void (*c_func)(void *);
|
2011-01-08 18:51:15 +00:00
|
|
|
void *c_arg;
|
2007-11-20 00:37:45 +00:00
|
|
|
struct lock_class *class;
|
2007-11-22 12:15:54 +00:00
|
|
|
struct lock_object *c_lock;
|
2007-11-20 00:37:45 +00:00
|
|
|
int c_flags, sharedlock;
|
1997-09-21 22:00:25 +00:00
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
|
1997-09-24 16:39:27 +00:00
|
|
|
TAILQ_REMOVE(bucket, c, c_links.tqe);
|
2007-11-20 00:37:45 +00:00
|
|
|
class = (c->c_lock != NULL) ?
|
|
|
|
LOCK_CLASS(c->c_lock) : NULL;
|
|
|
|
sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
|
|
|
|
0 : 1;
|
2007-11-22 12:15:54 +00:00
|
|
|
c_lock = c->c_lock;
|
1997-09-21 22:00:25 +00:00
|
|
|
c_func = c->c_func;
|
|
|
|
c_arg = c->c_arg;
|
2000-11-19 06:02:32 +00:00
|
|
|
c_flags = c->c_flags;
|
1999-03-06 04:46:20 +00:00
|
|
|
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
|
|
|
|
c->c_flags = CALLOUT_LOCAL_ALLOC;
|
|
|
|
} else {
|
|
|
|
c->c_flags =
|
1999-08-30 21:17:07 +00:00
|
|
|
(c->c_flags & ~CALLOUT_PENDING);
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_curr = c;
|
|
|
|
cc->cc_cancel = 0;
|
|
|
|
CC_UNLOCK(cc);
|
2007-11-22 12:15:54 +00:00
|
|
|
if (c_lock != NULL) {
|
|
|
|
class->lc_lock(c_lock, sharedlock);
|
2005-02-07 02:47:33 +00:00
|
|
|
/*
|
|
|
|
* The callout may have been cancelled
|
|
|
|
* while we switched locks.
|
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
if (cc->cc_cancel) {
|
2007-11-22 12:15:54 +00:00
|
|
|
class->lc_unlock(c_lock);
|
2006-02-23 19:13:12 +00:00
|
|
|
goto skip;
|
2005-02-07 02:47:33 +00:00
|
|
|
}
|
|
|
|
/* The callout cannot be stopped now. */
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_cancel = 1;
|
2005-02-07 02:47:33 +00:00
|
|
|
|
2007-11-22 12:15:54 +00:00
|
|
|
if (c_lock == &Giant.lock_object) {
|
2005-02-07 02:47:33 +00:00
|
|
|
gcalls++;
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT,
|
|
|
|
"callout %p func %p arg %p",
|
|
|
|
c, c_func, c_arg);
|
2005-02-07 02:47:33 +00:00
|
|
|
} else {
|
2007-11-20 00:37:45 +00:00
|
|
|
lockcalls++;
|
|
|
|
CTR3(KTR_CALLOUT, "callout lock"
|
2006-10-11 14:57:03 +00:00
|
|
|
" %p func %p arg %p",
|
|
|
|
c, c_func, c_arg);
|
2005-02-07 02:47:33 +00:00
|
|
|
}
|
2003-06-04 05:25:58 +00:00
|
|
|
} else {
|
|
|
|
mpcalls++;
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT,
|
|
|
|
"callout mpsafe %p func %p arg %p",
|
|
|
|
c, c_func, c_arg);
|
2003-06-04 05:25:58 +00:00
|
|
|
}
|
2003-11-12 22:28:27 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
binuptime(&bt1);
|
|
|
|
#endif
|
2005-09-15 20:09:08 +00:00
|
|
|
THREAD_NO_SLEEPING();
|
2009-01-24 10:22:49 +00:00
|
|
|
SDT_PROBE(callout_execute, kernel, ,
|
|
|
|
callout_start, c, 0, 0, 0, 0);
|
1997-09-21 22:00:25 +00:00
|
|
|
c_func(c_arg);
|
2009-01-24 10:22:49 +00:00
|
|
|
SDT_PROBE(callout_execute, kernel, ,
|
|
|
|
callout_end, c, 0, 0, 0, 0);
|
2005-09-15 20:09:08 +00:00
|
|
|
THREAD_SLEEPING_OK();
|
2003-11-12 22:28:27 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
binuptime(&bt2);
|
|
|
|
bintime_sub(&bt2, &bt1);
|
|
|
|
if (bt2.frac > maxdt) {
|
2003-12-07 20:03:28 +00:00
|
|
|
if (lastfunc != c_func ||
|
|
|
|
bt2.frac > maxdt * 2) {
|
|
|
|
bintime2timespec(&bt2, &ts2);
|
|
|
|
printf(
|
|
|
|
"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
|
|
|
|
c_func, c_arg,
|
|
|
|
(intmax_t)ts2.tv_sec,
|
|
|
|
ts2.tv_nsec);
|
|
|
|
}
|
2003-11-12 22:28:27 +00:00
|
|
|
maxdt = bt2.frac;
|
2003-12-07 20:03:28 +00:00
|
|
|
lastfunc = c_func;
|
2003-11-12 22:28:27 +00:00
|
|
|
}
|
|
|
|
#endif
|
2009-01-13 15:56:53 +00:00
|
|
|
CTR1(KTR_CALLOUT, "callout %p finished", c);
|
2005-02-07 02:47:33 +00:00
|
|
|
if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
|
2007-11-22 12:15:54 +00:00
|
|
|
class->lc_unlock(c_lock);
|
2006-02-23 19:13:12 +00:00
|
|
|
skip:
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_LOCK(cc);
|
2008-03-22 07:29:45 +00:00
|
|
|
/*
|
|
|
|
* If the current callout is locally
|
|
|
|
* allocated (from timeout(9))
|
|
|
|
* then put it on the freelist.
|
|
|
|
*
|
|
|
|
* Note: we need to check the cached
|
|
|
|
* copy of c_flags because if it was not
|
|
|
|
* local, then it's not safe to deref the
|
|
|
|
* callout pointer.
|
|
|
|
*/
|
|
|
|
if (c_flags & CALLOUT_LOCAL_ALLOC) {
|
|
|
|
KASSERT(c->c_flags ==
|
|
|
|
CALLOUT_LOCAL_ALLOC,
|
|
|
|
("corrupted callout"));
|
|
|
|
c->c_func = NULL;
|
2008-04-02 11:20:30 +00:00
|
|
|
SLIST_INSERT_HEAD(&cc->cc_callfree, c,
|
2008-03-22 07:29:45 +00:00
|
|
|
c_links.sle);
|
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_curr = NULL;
|
2011-01-08 18:51:15 +00:00
|
|
|
if (cc->cc_waiting) {
|
2011-04-08 18:48:57 +00:00
|
|
|
|
2004-04-06 23:08:49 +00:00
|
|
|
/*
|
2011-04-08 18:48:57 +00:00
|
|
|
* There is someone waiting for the
|
|
|
|
* callout to complete.
|
|
|
|
* If the callout was scheduled for
|
|
|
|
* migration just cancel it.
|
2004-04-06 23:08:49 +00:00
|
|
|
*/
|
2011-04-08 18:48:57 +00:00
|
|
|
if (cc_cme_migrating(cc))
|
|
|
|
cc_cme_cleanup(cc);
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_waiting = 0;
|
|
|
|
CC_UNLOCK(cc);
|
|
|
|
wakeup(&cc->cc_waiting);
|
|
|
|
CC_LOCK(cc);
|
2011-04-08 18:48:57 +00:00
|
|
|
} else if (cc_cme_migrating(cc)) {
|
|
|
|
#ifdef SMP
|
|
|
|
struct callout_cpu *new_cc;
|
|
|
|
void (*new_func)(void *);
|
|
|
|
void *new_arg;
|
|
|
|
int new_cpu, new_ticks;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the callout was scheduled for
|
|
|
|
* migration just perform it now.
|
|
|
|
*/
|
|
|
|
new_cpu = cc->cc_migration_cpu;
|
|
|
|
new_ticks = cc->cc_migration_ticks;
|
|
|
|
new_func = cc->cc_migration_func;
|
|
|
|
new_arg = cc->cc_migration_arg;
|
|
|
|
cc_cme_cleanup(cc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It should be assert here that the
|
|
|
|
* callout is not destroyed but that
|
|
|
|
* is not easy.
|
|
|
|
*/
|
|
|
|
new_cc = callout_cpu_switch(c, cc,
|
|
|
|
new_cpu);
|
|
|
|
callout_cc_add(c, new_cc, new_ticks,
|
|
|
|
new_func, new_arg, new_cpu);
|
|
|
|
CC_UNLOCK(new_cc);
|
|
|
|
CC_LOCK(cc);
|
|
|
|
#else
|
|
|
|
panic("migration should not happen");
|
|
|
|
#endif
|
2004-04-08 02:03:49 +00:00
|
|
|
}
|
1997-09-21 22:00:25 +00:00
|
|
|
steps = 0;
|
2008-04-02 11:20:30 +00:00
|
|
|
c = cc->cc_next;
|
1997-09-21 22:00:25 +00:00
|
|
|
}
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2003-06-04 05:25:58 +00:00
|
|
|
avg_depth += (depth * 1000 - avg_depth) >> 8;
|
|
|
|
avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
|
2007-11-20 00:37:45 +00:00
|
|
|
avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
|
2003-06-04 05:25:58 +00:00
|
|
|
avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_next = NULL;
|
|
|
|
CC_UNLOCK(cc);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* timeout --
|
|
|
|
* Execute a function after a specified length of time.
|
|
|
|
*
|
|
|
|
* untimeout --
|
|
|
|
* Cancel previous timeout function call.
|
|
|
|
*
|
1997-09-21 22:00:25 +00:00
|
|
|
* callout_handle_init --
|
|
|
|
* Initialize a handle so that using it with untimeout is benign.
|
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* See AT&T BCI Driver Reference Manual for specification. This
|
1997-09-21 22:00:25 +00:00
|
|
|
* implementation differs from that one in that although an
|
|
|
|
* identification value is returned from timeout, the original
|
|
|
|
* arguments to timeout as well as the identifier are used to
|
|
|
|
* identify entries for untimeout.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1997-09-21 22:00:25 +00:00
|
|
|
struct callout_handle
|
|
|
|
timeout(ftn, arg, to_ticks)
|
1998-02-25 06:13:32 +00:00
|
|
|
timeout_t *ftn;
|
1994-05-24 10:09:53 +00:00
|
|
|
void *arg;
|
2000-11-25 06:22:16 +00:00
|
|
|
int to_ticks;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
1997-09-21 22:00:25 +00:00
|
|
|
struct callout *new;
|
|
|
|
struct callout_handle handle;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = CC_CPU(timeout_cpu);
|
|
|
|
CC_LOCK(cc);
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Fill in the next free callout structure. */
|
2008-04-02 11:20:30 +00:00
|
|
|
new = SLIST_FIRST(&cc->cc_callfree);
|
1997-09-21 22:00:25 +00:00
|
|
|
if (new == NULL)
|
|
|
|
/* XXX Attempt to malloc first */
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("timeout table full");
|
2008-04-02 11:20:30 +00:00
|
|
|
SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
|
1999-03-06 04:46:20 +00:00
|
|
|
callout_reset(new, to_ticks, ftn, arg);
|
1997-09-21 22:00:25 +00:00
|
|
|
handle.callout = new;
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
|
|
|
|
1997-09-21 22:00:25 +00:00
|
|
|
return (handle);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
1997-09-21 22:00:25 +00:00
|
|
|
untimeout(ftn, arg, handle)
|
1998-02-25 06:13:32 +00:00
|
|
|
timeout_t *ftn;
|
1994-05-24 10:09:53 +00:00
|
|
|
void *arg;
|
1997-09-21 22:00:25 +00:00
|
|
|
struct callout_handle handle;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-09-21 22:00:25 +00:00
|
|
|
/*
|
|
|
|
* Check for a handle that was initialized
|
|
|
|
* by callout_handle_init, but never used
|
|
|
|
* for a real timeout.
|
|
|
|
*/
|
|
|
|
if (handle.callout == NULL)
|
|
|
|
return;
|
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = callout_lock(handle.callout);
|
1999-03-06 04:46:20 +00:00
|
|
|
if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
|
|
|
|
callout_stop(handle.callout);
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1997-09-21 22:00:25 +00:00
|
|
|
void
|
|
|
|
callout_handle_init(struct callout_handle *handle)
|
|
|
|
{
|
|
|
|
handle->callout = NULL;
|
|
|
|
}
|
|
|
|
|
1999-03-06 04:46:20 +00:00
|
|
|
/*
|
|
|
|
* New interface; clients allocate their own callout structures.
|
|
|
|
*
|
|
|
|
* callout_reset() - establish or change a timeout
|
|
|
|
* callout_stop() - disestablish a timeout
|
|
|
|
* callout_init() - initialize a callout structure so that it can
|
|
|
|
* safely be passed to callout_reset() and callout_stop()
|
|
|
|
*
|
1999-08-30 21:17:07 +00:00
|
|
|
* <sys/callout.h> defines three convenience macros:
|
1999-03-06 04:46:20 +00:00
|
|
|
*
|
2005-01-19 19:46:35 +00:00
|
|
|
* callout_active() - returns truth if callout has not been stopped,
|
|
|
|
* drained, or deactivated since the last time the callout was
|
|
|
|
* reset.
|
1999-08-30 21:17:07 +00:00
|
|
|
* callout_pending() - returns truth if callout is still waiting for timeout
|
|
|
|
* callout_deactivate() - marks the callout as having been serviced
|
1999-03-06 04:46:20 +00:00
|
|
|
*/
|
2005-09-08 14:20:39 +00:00
|
|
|
int
|
2008-04-02 11:20:30 +00:00
|
|
|
callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
|
|
|
|
void *arg, int cpu)
|
1999-03-06 04:46:20 +00:00
|
|
|
{
|
2008-04-02 11:20:30 +00:00
|
|
|
struct callout_cpu *cc;
|
2005-09-08 14:20:39 +00:00
|
|
|
int cancelled = 0;
|
1999-03-06 04:46:20 +00:00
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
/*
|
|
|
|
* Don't allow migration of pre-allocated callouts lest they
|
|
|
|
* become unbalanced.
|
|
|
|
*/
|
|
|
|
if (c->c_flags & CALLOUT_LOCAL_ALLOC)
|
|
|
|
cpu = c->c_cpu;
|
|
|
|
cc = callout_lock(c);
|
|
|
|
if (cc->cc_curr == c) {
|
2004-04-06 23:08:49 +00:00
|
|
|
/*
|
|
|
|
* We're being asked to reschedule a callout which is
|
2007-11-20 00:37:45 +00:00
|
|
|
* currently in progress. If there is a lock then we
|
2005-02-07 02:47:33 +00:00
|
|
|
* can cancel the callout if it has not really started.
|
2004-04-06 23:08:49 +00:00
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
if (c->c_lock != NULL && !cc->cc_cancel)
|
|
|
|
cancelled = cc->cc_cancel = 1;
|
|
|
|
if (cc->cc_waiting) {
|
2005-02-07 02:47:33 +00:00
|
|
|
/*
|
|
|
|
* Someone has called callout_drain to kill this
|
|
|
|
* callout. Don't reschedule.
|
|
|
|
*/
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
|
|
|
|
cancelled ? "cancelled" : "failed to cancel",
|
|
|
|
c, c->c_func, c->c_arg);
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2005-09-08 14:20:39 +00:00
|
|
|
return (cancelled);
|
2005-02-07 02:47:33 +00:00
|
|
|
}
|
2004-04-08 02:03:49 +00:00
|
|
|
}
|
2004-08-06 02:44:58 +00:00
|
|
|
if (c->c_flags & CALLOUT_PENDING) {
|
2008-04-02 11:20:30 +00:00
|
|
|
if (cc->cc_next == c) {
|
|
|
|
cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
|
2004-08-06 02:44:58 +00:00
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
|
2004-08-06 02:44:58 +00:00
|
|
|
c_links.tqe);
|
|
|
|
|
2005-09-08 14:20:39 +00:00
|
|
|
cancelled = 1;
|
2008-04-02 11:20:30 +00:00
|
|
|
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
|
2004-08-06 02:44:58 +00:00
|
|
|
}
|
2011-04-08 18:48:57 +00:00
|
|
|
|
|
|
|
#ifdef SMP
|
1999-03-06 04:46:20 +00:00
|
|
|
/*
|
2011-04-08 18:48:57 +00:00
|
|
|
* If the callout must migrate try to perform it immediately.
|
|
|
|
* If the callout is currently running, just defer the migration
|
|
|
|
* to a more appropriate moment.
|
1999-03-06 04:46:20 +00:00
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
if (c->c_cpu != cpu) {
|
2011-04-08 18:48:57 +00:00
|
|
|
if (cc->cc_curr == c) {
|
|
|
|
cc->cc_migration_cpu = cpu;
|
|
|
|
cc->cc_migration_ticks = to_ticks;
|
|
|
|
cc->cc_migration_func = ftn;
|
|
|
|
cc->cc_migration_arg = arg;
|
|
|
|
CTR5(KTR_CALLOUT,
|
|
|
|
"migration of %p func %p arg %p in %d to %u deferred",
|
|
|
|
c, c->c_func, c->c_arg, to_ticks, cpu);
|
|
|
|
CC_UNLOCK(cc);
|
|
|
|
return (cancelled);
|
|
|
|
}
|
|
|
|
cc = callout_cpu_switch(c, cc, cpu);
|
2008-04-02 11:20:30 +00:00
|
|
|
}
|
2011-04-08 18:48:57 +00:00
|
|
|
#endif
|
2008-04-02 11:20:30 +00:00
|
|
|
|
2011-04-08 18:48:57 +00:00
|
|
|
callout_cc_add(c, cc, to_ticks, ftn, arg, cpu);
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
|
|
|
|
cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2005-09-08 14:20:39 +00:00
|
|
|
|
|
|
|
return (cancelled);
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
|
|
|
|
2008-08-02 17:42:38 +00:00
|
|
|
/*
|
|
|
|
* Common idioms that can be optimized in the future.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
callout_schedule_on(struct callout *c, int to_ticks, int cpu)
|
|
|
|
{
|
|
|
|
return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
callout_schedule(struct callout *c, int to_ticks)
|
|
|
|
{
|
|
|
|
return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
|
|
|
|
}
|
|
|
|
|
2004-04-06 23:08:49 +00:00
|
|
|
int
|
|
|
|
_callout_stop_safe(c, safe)
|
|
|
|
struct callout *c;
|
|
|
|
int safe;
|
|
|
|
{
|
2011-04-08 18:48:57 +00:00
|
|
|
struct callout_cpu *cc, *old_cc;
|
2007-11-20 00:37:45 +00:00
|
|
|
struct lock_class *class;
|
|
|
|
int use_lock, sq_locked;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some old subsystems don't hold Giant while running a callout_stop(),
|
|
|
|
* so just discard this check for the moment.
|
|
|
|
*/
|
|
|
|
if (!safe && c->c_lock != NULL) {
|
|
|
|
if (c->c_lock == &Giant.lock_object)
|
|
|
|
use_lock = mtx_owned(&Giant);
|
|
|
|
else {
|
|
|
|
use_lock = 1;
|
|
|
|
class = LOCK_CLASS(c->c_lock);
|
|
|
|
class->lc_assert(c->c_lock, LA_XLOCKED);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
use_lock = 0;
|
2004-04-06 23:08:49 +00:00
|
|
|
|
2007-08-31 19:01:30 +00:00
|
|
|
sq_locked = 0;
|
2011-04-08 18:48:57 +00:00
|
|
|
old_cc = NULL;
|
2007-08-31 19:01:30 +00:00
|
|
|
again:
|
2008-04-02 11:20:30 +00:00
|
|
|
cc = callout_lock(c);
|
2011-04-08 18:48:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the callout was migrating while the callout cpu lock was
|
|
|
|
* dropped, just drop the sleepqueue lock and check the states
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
if (sq_locked != 0 && cc != old_cc) {
|
|
|
|
#ifdef SMP
|
|
|
|
CC_UNLOCK(cc);
|
|
|
|
sleepq_release(&old_cc->cc_waiting);
|
|
|
|
sq_locked = 0;
|
|
|
|
old_cc = NULL;
|
|
|
|
goto again;
|
|
|
|
#else
|
|
|
|
panic("migration should not happen");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1999-03-06 04:46:20 +00:00
|
|
|
/*
|
2006-02-23 19:13:12 +00:00
|
|
|
* If the callout isn't pending, it's not on the queue, so
|
|
|
|
* don't attempt to remove it from the queue. We can try to
|
|
|
|
* stop it by other means however.
|
1999-03-06 04:46:20 +00:00
|
|
|
*/
|
|
|
|
if (!(c->c_flags & CALLOUT_PENDING)) {
|
1999-08-30 21:17:07 +00:00
|
|
|
c->c_flags &= ~CALLOUT_ACTIVE;
|
2006-02-23 19:13:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If it wasn't on the queue and it isn't the current
|
|
|
|
* callout, then we can't stop it, so just bail.
|
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
if (cc->cc_curr != c) {
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
|
|
|
|
c, c->c_func, c->c_arg);
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2007-08-31 19:01:30 +00:00
|
|
|
if (sq_locked)
|
2008-04-06 11:08:49 +00:00
|
|
|
sleepq_release(&cc->cc_waiting);
|
2005-02-07 02:47:33 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2004-04-08 02:03:49 +00:00
|
|
|
|
2006-02-23 19:13:12 +00:00
|
|
|
if (safe) {
|
2004-04-06 23:08:49 +00:00
|
|
|
/*
|
2006-02-23 19:13:12 +00:00
|
|
|
* The current callout is running (or just
|
|
|
|
* about to run) and blocking is allowed, so
|
|
|
|
* just wait for the current invocation to
|
|
|
|
* finish.
|
2004-04-06 23:08:49 +00:00
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
while (cc->cc_curr == c) {
|
2007-06-26 21:42:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Use direct calls to sleepqueue interface
|
|
|
|
* instead of cv/msleep in order to avoid
|
2008-04-02 11:20:30 +00:00
|
|
|
* a LOR between cc_lock and sleepqueue
|
2007-06-26 21:42:01 +00:00
|
|
|
* chain spinlocks. This piece of code
|
|
|
|
* emulates a msleep_spin() call actually.
|
2007-08-31 19:01:30 +00:00
|
|
|
*
|
|
|
|
* If we already have the sleepqueue chain
|
|
|
|
* locked, then we can safely block. If we
|
|
|
|
* don't already have it locked, however,
|
2008-04-02 11:20:30 +00:00
|
|
|
* we have to drop the cc_lock to lock
|
2007-08-31 19:01:30 +00:00
|
|
|
* it. This opens several races, so we
|
|
|
|
* restart at the beginning once we have
|
|
|
|
* both locks. If nothing has changed, then
|
|
|
|
* we will end up back here with sq_locked
|
|
|
|
* set.
|
2007-06-26 21:42:01 +00:00
|
|
|
*/
|
2007-08-31 19:01:30 +00:00
|
|
|
if (!sq_locked) {
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2008-04-06 11:08:49 +00:00
|
|
|
sleepq_lock(&cc->cc_waiting);
|
2007-08-31 19:01:30 +00:00
|
|
|
sq_locked = 1;
|
2011-04-08 18:48:57 +00:00
|
|
|
old_cc = cc;
|
2007-08-31 19:01:30 +00:00
|
|
|
goto again;
|
2007-06-26 21:42:01 +00:00
|
|
|
}
|
2011-04-08 18:48:57 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Migration could be cancelled here, but
|
|
|
|
* as long as it is still not sure when it
|
|
|
|
* will be packed up, just let softclock()
|
|
|
|
* take care of it.
|
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_waiting = 1;
|
2007-06-26 21:42:01 +00:00
|
|
|
DROP_GIANT();
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2008-04-06 11:08:49 +00:00
|
|
|
sleepq_add(&cc->cc_waiting,
|
2008-04-02 11:20:30 +00:00
|
|
|
&cc->cc_lock.lock_object, "codrain",
|
2007-06-26 21:42:01 +00:00
|
|
|
SLEEPQ_SLEEP, 0);
|
2008-04-06 11:08:49 +00:00
|
|
|
sleepq_wait(&cc->cc_waiting, 0);
|
2007-08-31 19:01:30 +00:00
|
|
|
sq_locked = 0;
|
2011-04-08 18:48:57 +00:00
|
|
|
old_cc = NULL;
|
2007-06-26 21:42:01 +00:00
|
|
|
|
|
|
|
/* Reacquire locks previously released. */
|
|
|
|
PICKUP_GIANT();
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_LOCK(cc);
|
2006-02-23 19:13:12 +00:00
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
} else if (use_lock && !cc->cc_cancel) {
|
2006-02-23 19:13:12 +00:00
|
|
|
/*
|
2007-11-20 00:37:45 +00:00
|
|
|
* The current callout is waiting for its
|
|
|
|
* lock which we hold. Cancel the callout
|
2006-02-23 19:13:12 +00:00
|
|
|
* and return. After our caller drops the
|
2007-11-20 00:37:45 +00:00
|
|
|
* lock, the callout will be skipped in
|
2006-02-23 19:13:12 +00:00
|
|
|
* softclock().
|
|
|
|
*/
|
2008-04-02 11:20:30 +00:00
|
|
|
cc->cc_cancel = 1;
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
|
|
|
|
c, c->c_func, c->c_arg);
|
2011-04-08 18:48:57 +00:00
|
|
|
KASSERT(!cc_cme_migrating(cc),
|
|
|
|
("callout wrongly scheduled for migration"));
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2007-08-31 19:01:30 +00:00
|
|
|
KASSERT(!sq_locked, ("sleepqueue chain locked"));
|
2005-02-07 02:47:33 +00:00
|
|
|
return (1);
|
2006-02-23 19:13:12 +00:00
|
|
|
}
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
|
|
|
|
c, c->c_func, c->c_arg);
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2007-08-31 19:01:30 +00:00
|
|
|
KASSERT(!sq_locked, ("sleepqueue chain still locked"));
|
2001-08-10 21:06:59 +00:00
|
|
|
return (0);
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
2007-08-31 19:01:30 +00:00
|
|
|
if (sq_locked)
|
2008-04-06 11:08:49 +00:00
|
|
|
sleepq_release(&cc->cc_waiting);
|
2007-08-31 19:01:30 +00:00
|
|
|
|
1999-08-30 21:17:07 +00:00
|
|
|
c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
|
1999-03-06 04:46:20 +00:00
|
|
|
|
2008-04-02 11:20:30 +00:00
|
|
|
if (cc->cc_next == c) {
|
|
|
|
cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
|
|
|
|
c_links.tqe);
|
1999-03-06 04:46:20 +00:00
|
|
|
|
2006-10-11 14:57:03 +00:00
|
|
|
CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
|
|
|
|
c, c->c_func, c->c_arg);
|
|
|
|
|
1999-03-06 04:46:20 +00:00
|
|
|
if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
|
2005-01-19 21:15:58 +00:00
|
|
|
c->c_func = NULL;
|
2008-04-02 11:20:30 +00:00
|
|
|
SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
2001-08-10 21:06:59 +00:00
|
|
|
return (1);
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2000-11-25 06:22:16 +00:00
|
|
|
callout_init(c, mpsafe)
|
1999-03-06 04:46:20 +00:00
|
|
|
struct callout *c;
|
2000-11-25 06:22:16 +00:00
|
|
|
int mpsafe;
|
1999-03-06 04:46:20 +00:00
|
|
|
{
|
1999-03-06 22:27:02 +00:00
|
|
|
bzero(c, sizeof *c);
|
2005-02-07 02:47:33 +00:00
|
|
|
if (mpsafe) {
|
2007-11-20 00:37:45 +00:00
|
|
|
c->c_lock = NULL;
|
2005-02-07 02:47:33 +00:00
|
|
|
c->c_flags = CALLOUT_RETURNUNLOCKED;
|
|
|
|
} else {
|
2007-11-20 00:37:45 +00:00
|
|
|
c->c_lock = &Giant.lock_object;
|
2005-02-07 02:47:33 +00:00
|
|
|
c->c_flags = 0;
|
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
c->c_cpu = timeout_cpu;
|
2005-02-07 02:47:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2007-11-20 00:37:45 +00:00
|
|
|
_callout_init_lock(c, lock, flags)
|
2005-02-07 02:47:33 +00:00
|
|
|
struct callout *c;
|
2007-11-20 00:37:45 +00:00
|
|
|
struct lock_object *lock;
|
2005-02-07 02:47:33 +00:00
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
bzero(c, sizeof *c);
|
2007-11-20 00:37:45 +00:00
|
|
|
c->c_lock = lock;
|
|
|
|
KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
|
|
|
|
("callout_init_lock: bad flags %d", flags));
|
|
|
|
KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
|
|
|
|
("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
|
2008-02-06 00:04:09 +00:00
|
|
|
KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
|
|
|
|
(LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
|
2007-11-20 00:37:45 +00:00
|
|
|
__func__));
|
|
|
|
c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
|
2008-04-02 11:20:30 +00:00
|
|
|
c->c_cpu = timeout_cpu;
|
1999-03-06 04:46:20 +00:00
|
|
|
}
|
|
|
|
|
1997-12-23 16:32:35 +00:00
|
|
|
#ifdef APM_FIXUP_CALLTODO
|
|
|
|
/*
|
|
|
|
* Adjust the kernel calltodo timeout list. This routine is used after
|
|
|
|
* an APM resume to recalculate the calltodo timer list values with the
|
|
|
|
* number of hz's we have been sleeping. The next hardclock() will detect
|
|
|
|
* that there are fired timers and run softclock() to execute them.
|
|
|
|
*
|
|
|
|
* Please note, I have not done an exhaustive analysis of what code this
|
|
|
|
* might break. I am motivated to have my select()'s and alarm()'s that
|
|
|
|
* have expired during suspend firing upon resume so that the applications
|
|
|
|
* which set the timer can do the maintanence the timer was for as close
|
|
|
|
* as possible to the originally intended time. Testing this code for a
|
|
|
|
* week showed that resuming from a suspend resulted in 22 to 25 timers
|
|
|
|
* firing, which seemed independant on whether the suspend was 2 hours or
|
|
|
|
* 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu>
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
adjust_timeout_calltodo(time_change)
|
|
|
|
struct timeval *time_change;
|
|
|
|
{
|
|
|
|
register struct callout *p;
|
|
|
|
unsigned long delta_ticks;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* How many ticks were we asleep?
|
1998-05-17 20:08:05 +00:00
|
|
|
* (stolen from tvtohz()).
|
1997-12-23 16:32:35 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Don't do anything */
|
|
|
|
if (time_change->tv_sec < 0)
|
|
|
|
return;
|
|
|
|
else if (time_change->tv_sec <= LONG_MAX / 1000000)
|
|
|
|
delta_ticks = (time_change->tv_sec * 1000000 +
|
|
|
|
time_change->tv_usec + (tick - 1)) / tick + 1;
|
|
|
|
else if (time_change->tv_sec <= LONG_MAX / hz)
|
|
|
|
delta_ticks = time_change->tv_sec * hz +
|
|
|
|
(time_change->tv_usec + (tick - 1)) / tick + 1;
|
|
|
|
else
|
|
|
|
delta_ticks = LONG_MAX;
|
|
|
|
|
|
|
|
if (delta_ticks > INT_MAX)
|
|
|
|
delta_ticks = INT_MAX;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now rip through the timer calltodo list looking for timers
|
|
|
|
* to expire.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* don't collide with softclock() */
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_LOCK(cc);
|
1997-12-23 16:32:35 +00:00
|
|
|
for (p = calltodo.c_next; p != NULL; p = p->c_next) {
|
|
|
|
p->c_time -= delta_ticks;
|
|
|
|
|
|
|
|
/* Break if the timer had more time on it than delta_ticks */
|
|
|
|
if (p->c_time > 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/* take back the ticks the timer didn't use (p->c_time <= 0) */
|
|
|
|
delta_ticks = -p->c_time;
|
|
|
|
}
|
2008-04-02 11:20:30 +00:00
|
|
|
CC_UNLOCK(cc);
|
1997-12-23 16:32:35 +00:00
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif /* APM_FIXUP_CALLTODO */
|