remove opensolaris cyclic code, replace with high-precision callouts

In the old days callout(9) had 1 tick precision and that was inadequate
for some uses, e.g. DTrace profile module, so we had to emulate cyclic
API and behavior.  Now we can directly use callout(9) in the very few
places where cyclic was used.

Differential Revision:	https://reviews.freebsd.org/D1161
Reviewed by:	gnn, jhb, markj
MFC after:	2 weeks
This commit is contained in:
Andriy Gapon 2014-12-07 11:21:41 +00:00
parent 4268212124
commit 036a8c5dac
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=275576
16 changed files with 171 additions and 2357 deletions

View File

@ -38,11 +38,8 @@ struct cyc_cpu;
typedef struct {
int cpuid;
struct cyc_cpu *cpu_cyclic;
uint32_t cpu_flags;
uint_t cpu_intr_actv;
uintptr_t cpu_profile_pc;
uintptr_t cpu_profile_upc;
uintptr_t cpu_dtrace_caller; /* DTrace: caller, if any */
hrtime_t cpu_dtrace_chillmark; /* DTrace: chill mark time */
hrtime_t cpu_dtrace_chilled; /* DTrace: total chill time */

View File

@ -1,79 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* $FreeBSD$
*
*/
/*
* Copyright (c) 1999-2001 by Sun Microsystems, Inc.
* All rights reserved.
*/
#ifndef _COMPAT_OPENSOLARIS_SYS_CYCLIC_H_
#define _COMPAT_OPENSOLARIS_SYS_CYCLIC_H_
#ifndef _KERNEL
typedef void cpu_t;
#endif
#ifndef _ASM
#include <sys/time.h>
#include <sys/cpuvar.h>
#endif /* !_ASM */
#ifndef _ASM
typedef uintptr_t cyclic_id_t;
typedef int cyc_index_t;
typedef uint16_t cyc_level_t;
typedef void (*cyc_func_t)(void *);
typedef void *cyb_arg_t;
#define CYCLIC_NONE ((cyclic_id_t)0)
typedef struct cyc_handler {
cyc_func_t cyh_func;
void *cyh_arg;
} cyc_handler_t;
typedef struct cyc_time {
hrtime_t cyt_when;
hrtime_t cyt_interval;
} cyc_time_t;
typedef struct cyc_omni_handler {
void (*cyo_online)(void *, cpu_t *, cyc_handler_t *, cyc_time_t *);
void (*cyo_offline)(void *, cpu_t *, void *);
void *cyo_arg;
} cyc_omni_handler_t;
#ifdef _KERNEL
cyclic_id_t cyclic_add(cyc_handler_t *, cyc_time_t *);
cyclic_id_t cyclic_add_omni(cyc_omni_handler_t *);
void cyclic_remove(cyclic_id_t);
#endif /* _KERNEL */
#endif /* !_ASM */
#endif

View File

@ -1,311 +0,0 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*
* $FreeBSD$
*
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _COMPAT_OPENSOLARIS_SYS_CYCLIC_IMPL_H_
#define _COMPAT_OPENSOLARIS_SYS_CYCLIC_IMPL_H_
#include <sys/cyclic.h>
/*
* Cyclic Subsystem Backend-supplied Interfaces
* --------------------------------------------
*
* 0 Background
*
* The design, implementation and interfaces of the cyclic subsystem are
* covered in detail in block comments in the implementation. This
* comment covers the interface from the cyclic subsystem into the cyclic
* backend. The backend is specified by a structure of function pointers
* defined below.
*
* 1 Overview
*
* cyb_configure() <-- Configures the backend on the specified CPU
* cyb_unconfigure() <-- Unconfigures the backend
* cyb_enable() <-- Enables the CY_HIGH_LEVEL interrupt source
* cyb_disable() <-- Disables the CY_HIGH_LEVEL interrupt source
* cyb_reprogram() <-- Reprograms the CY_HIGH_LEVEL interrupt source
* cyb_xcall() <-- Cross calls to the specified CPU
*
* 2 cyb_arg_t cyb_configure(cpu_t *)
*
* 2.1 Overview
*
* cyb_configure() should configure the specified CPU for cyclic operation.
*
* 2.2 Arguments and notes
*
* cyb_configure() should initialize any backend-specific per-CPU
* structures for the specified CPU. cyb_configure() will be called for
* each CPU (including the boot CPU) during boot. If the platform
* supports dynamic reconfiguration, cyb_configure() will be called for
* new CPUs as they are configured into the system.
*
* 2.3 Return value
*
* cyb_configure() is expected to return a cookie (a cyb_arg_t, which is
* of type void *) which will be used as the first argument for all future
* cyclic calls into the backend on the specified CPU.
*
* 2.4 Caller's context
*
* cpu_lock will be held. The caller's CPU is unspecified, and may or
* may not be the CPU specified to cyb_configure().
*
* 3 void cyb_unconfigure(cyb_arg_t arg)
*
* 3.1 Overview
*
* cyb_unconfigure() should unconfigure the specified backend.
*
* 3.2 Arguments and notes
*
* The only argument to cyb_unconfigure() is a cookie as returned from
* cyb_configure().
*
* cyb_unconfigure() should free any backend-specific per-CPU structures
* for the specified backend. cyb_unconfigure() will _only_ be called on
* platforms which support dynamic reconfiguration. If the platform does
* not support dynamic reconfiguration, cyb_unconfigure() may panic.
*
* After cyb_unconfigure() returns, the backend must not call cyclic_fire()
* on the corresponding CPU; doing so will result in a bad trap.
*
* 3.3 Return value
*
* None.
*
* 3.4 Caller's context
*
* cpu_lock will be held. The caller's CPU is unspecified, and may or
* may not be the CPU specified to cyb_unconfigure(). The specified
* CPU is guaranteed to exist at the time cyb_unconfigure() is called.
* The cyclic subsystem is guaranteed to be suspended when cyb_unconfigure()
* is called, and interrupts are guaranteed to be disabled.
*
* 4 void cyb_enable(cyb_arg_t arg)
*
* 4.1 Overview
*
* cyb_enable() should enable the CY_HIGH_LEVEL interrupt source on
* the specified backend.
*
* 4.2 Arguments and notes
*
* The only argument to cyb_enable() is a backend cookie as returned from
* cyb_configure().
*
* cyb_enable() will only be called if a) the specified backend has never
* been enabled or b) the specified backend has been explicitly disabled with
* cyb_disable(). In either case, cyb_enable() will only be called if
* the cyclic subsystem wishes to add a cyclic to the CPU corresponding
* to the specified backend. cyb_enable() will be called before
* cyb_reprogram() for a given backend.
*
* cyclic_fire() should not be called on a CPU which has not had its backend
* explicitly cyb_enable()'d, but to do so does not constitute fatal error.
*
* 4.3 Return value
*
* None.
*
* 4.4 Caller's context
*
* cyb_enable() will only be called from CY_HIGH_LEVEL context on the CPU
* corresponding to the specified backend.
*
* 5 void cyb_disable(cyb_arg_t arg)
*
* 5.1 Overview
*
* cyb_disable() should disable the CY_HIGH_LEVEL interrupt source on
* the specified backend.
*
* 5.2 Arguments and notes
*
* The only argument to cyb_disable() is a backend cookie as returned from
* cyb_configure().
*
* cyb_disable() will only be called on backends which have been previously
* been cyb_enable()'d. cyb_disable() will be called when all cyclics have
* been juggled away or removed from a cyb_enable()'d CPU.
*
* cyclic_fire() should not be called on a CPU which has had its backend
* explicitly cyb_disable()'d, but to do so does not constitute fatal
* error. cyb_disable() is thus not required to check for a pending
* CY_HIGH_LEVEL interrupt.
*
* 5.3 Return value
*
* None.
*
* 5.4 Caller's context
*
* cyb_disable() will only be called from CY_HIGH_LEVEL context on the CPU
* corresponding to the specified backend.
*
* 6 void cyb_reprogram(cyb_arg_t arg, hrtime_t time)
*
* 6.1 Overview
*
* cyb_reprogram() should reprogram the CY_HIGH_LEVEL interrupt source
* to fire at the absolute time specified.
*
* 6.2 Arguments and notes
*
* The first argument to cyb_reprogram() is a backend cookie as returned from
* cyb_configure().
*
* The second argument is an absolute time at which the CY_HIGH_LEVEL
* interrupt should fire. The specified time _may_ be in the past (albeit
* the very recent past). If this is the case, the backend should generate
* a CY_HIGH_LEVEL interrupt as soon as possible.
*
* The platform should not assume that cyb_reprogram() will be called with
* monotonically increasing values.
*
* If the platform does not allow for interrupts at arbitrary times in the
* future, cyb_reprogram() may do nothing -- as long as cyclic_fire() is
* called periodically at CY_HIGH_LEVEL. While this is clearly suboptimal
* (cyclic granularity will be bounded by the length of the period between
* cyclic_fire()'s), it allows the cyclic subsystem to be implemented on
* inferior hardware.
*
* 6.3 Return value
*
* None.
*
* 6.4 Caller's context
*
* cyb_reprogram() will only be called from CY_HIGH_LEVEL context on the CPU
* corresponding to the specified backend.
*
* 10 cyb_xcall(cyb_arg_t arg, cpu_t *, void(*func)(void *), void *farg)
*
* 10.1 Overview
*
* cyb_xcall() should execute the specified function on the specified CPU.
*
* 10.2 Arguments and notes
*
* The first argument to cyb_restore_level() is a backend cookie as returned
* from cyb_configure(). The second argument is a CPU on which the third
* argument, a function pointer, should be executed. The fourth argument,
* a void *, should be passed as the argument to the specified function.
*
* cyb_xcall() must provide exactly-once semantics. If the specified
* function is called more than once, or not at all, the cyclic subsystem
* will become internally inconsistent. The specified function must be
* be executed on the specified CPU, but may be executed in any context
* (any interrupt context or kernel context).
*
* cyb_xcall() cannot block. Any resources which cyb_xcall() needs to
* acquire must thus be protected by synchronization primitives which
* never require the caller to block.
*
* 10.3 Return value
*
* None.
*
* 10.4 Caller's context
*
* cpu_lock will be held and kernel preemption may be disabled. The caller
* may be unable to block, giving rise to the constraint outlined in
* 10.2, above.
*
*/
typedef struct cyc_backend {
cyb_arg_t (*cyb_configure)(cpu_t *);
void (*cyb_unconfigure)(cyb_arg_t);
void (*cyb_enable)(cyb_arg_t);
void (*cyb_disable)(cyb_arg_t);
void (*cyb_reprogram)(cyb_arg_t, hrtime_t);
void (*cyb_xcall)(cyb_arg_t, cpu_t *, cyc_func_t, void *);
cyb_arg_t cyb_arg;
} cyc_backend_t;
#define CYF_FREE 0x0001
typedef struct cyclic {
hrtime_t cy_expire;
hrtime_t cy_interval;
void (*cy_handler)(void *);
void *cy_arg;
uint16_t cy_flags;
} cyclic_t;
typedef struct cyc_cpu {
cpu_t *cyp_cpu;
cyc_index_t *cyp_heap;
cyclic_t *cyp_cyclics;
cyc_index_t cyp_nelems;
cyc_index_t cyp_size;
cyc_backend_t *cyp_backend;
struct mtx cyp_mtx;
} cyc_cpu_t;
typedef struct cyc_omni_cpu {
cyc_cpu_t *cyo_cpu;
cyc_index_t cyo_ndx;
void *cyo_arg;
struct cyc_omni_cpu *cyo_next;
} cyc_omni_cpu_t;
typedef struct cyc_id {
cyc_cpu_t *cyi_cpu;
cyc_index_t cyi_ndx;
struct cyc_id *cyi_prev;
struct cyc_id *cyi_next;
cyc_omni_handler_t cyi_omni_hdlr;
cyc_omni_cpu_t *cyi_omni_list;
} cyc_id_t;
typedef struct cyc_xcallarg {
cyc_cpu_t *cyx_cpu;
cyc_handler_t *cyx_hdlr;
cyc_time_t *cyx_when;
cyc_index_t cyx_ndx;
cyc_index_t *cyx_heap;
cyclic_t *cyx_cyclics;
cyc_index_t cyx_size;
uint16_t cyx_flags;
int cyx_wait;
} cyc_xcallarg_t;
#define CY_DEFAULT_PERCPU 1
#define CY_PASSIVE_LEVEL -1
#define CY_WAIT 0
#define CY_NOWAIT 1
#define CYC_HEAP_PARENT(ndx) (((ndx) - 1) >> 1)
#define CYC_HEAP_RIGHT(ndx) (((ndx) + 1) << 1)
#define CYC_HEAP_LEFT(ndx) ((((ndx) + 1) << 1) - 1)
#endif

View File

@ -17947,6 +17947,5 @@ SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init,
DEV_MODULE(dtrace, dtrace_modevent, NULL);
MODULE_VERSION(dtrace, 1);
MODULE_DEPEND(dtrace, cyclic, 1, 1, 1);
MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);
#endif

View File

@ -57,6 +57,7 @@ extern "C" {
#if defined(sun)
#include <sys/systm.h>
#else
#include <sys/cpuvar.h>
#include <sys/param.h>
#include <sys/linker.h>
#include <sys/ioccom.h>
@ -64,8 +65,8 @@ extern "C" {
typedef int model_t;
#endif
#include <sys/ctf_api.h>
#include <sys/cyclic.h>
#if defined(sun)
#include <sys/cyclic.h>
#include <sys/int_limits.h>
#else
#include <sys/stdint.h>

File diff suppressed because it is too large Load Diff

View File

@ -1,301 +0,0 @@
/*-
* Copyright 2007 John Birrell <jb@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#include <sys/cdefs.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/conf.h>
#include <sys/kthread.h>
#include <sys/module.h>
#include <sys/sysctl.h>
#include <sys/cyclic.h>
#include <sys/time.h>
static struct timespec test_001_start;
static void
cyclic_test_001_func(void *arg)
{
struct timespec ts;
nanotime(&ts);
timespecsub(&ts,&test_001_start);
printf("%s: called after %lu.%09lu on curcpu %d\n",__func__,(u_long) ts.tv_sec,(u_long) ts.tv_nsec, curcpu);
}
static void
cyclic_test_001(void)
{
int error = 0;
cyc_handler_t hdlr;
cyc_time_t when;
cyclic_id_t id;
printf("%s: starting\n",__func__);
hdlr.cyh_func = (cyc_func_t) cyclic_test_001_func;
hdlr.cyh_arg = 0;
when.cyt_when = 0;
when.cyt_interval = 1000000000;
nanotime(&test_001_start);
mutex_enter(&cpu_lock);
id = cyclic_add(&hdlr, &when);
mutex_exit(&cpu_lock);
DELAY(1200000);
mutex_enter(&cpu_lock);
cyclic_remove(id);
mutex_exit(&cpu_lock);
printf("%s: %s\n",__func__, error == 0 ? "passed":"failed");
}
static struct timespec test_002_start;
static void
cyclic_test_002_func(void *arg)
{
struct timespec ts;
nanotime(&ts);
timespecsub(&ts,&test_002_start);
printf("%s: called after %lu.%09lu on curcpu %d\n",__func__,(u_long) ts.tv_sec,(u_long) ts.tv_nsec, curcpu);
}
static void
cyclic_test_002_online(void *arg, cpu_t *c, cyc_handler_t *hdlr, cyc_time_t *t)
{
printf("%s: online on curcpu %d\n",__func__, curcpu);
hdlr->cyh_func = cyclic_test_002_func;
hdlr->cyh_arg = NULL;
t->cyt_when = 0;
t->cyt_interval = 1000000000;
}
static void
cyclic_test_002_offline(void *arg, cpu_t *c, void *arg1)
{
printf("%s: offline on curcpu %d\n",__func__, curcpu);
}
static void
cyclic_test_002(void)
{
int error = 0;
cyc_omni_handler_t hdlr;
cyclic_id_t id;
printf("%s: starting\n",__func__);
hdlr.cyo_online = cyclic_test_002_online;
hdlr.cyo_offline = cyclic_test_002_offline;
hdlr.cyo_arg = NULL;
nanotime(&test_002_start);
mutex_enter(&cpu_lock);
id = cyclic_add_omni(&hdlr);
mutex_exit(&cpu_lock);
DELAY(1200000);
mutex_enter(&cpu_lock);
cyclic_remove(id);
mutex_exit(&cpu_lock);
printf("%s: %s\n",__func__, error == 0 ? "passed":"failed");
}
static struct timespec test_003_start;
static void
cyclic_test_003_func(void *arg)
{
struct timespec ts;
nanotime(&ts);
timespecsub(&ts,&test_003_start);
printf("%s: called after %lu.%09lu on curcpu %d id %ju\n",__func__,(u_long) ts.tv_sec,(u_long) ts.tv_nsec, curcpu, (uintmax_t)(uintptr_t) arg);
}
static void
cyclic_test_003(void)
{
int error = 0;
cyc_handler_t hdlr;
cyc_time_t when;
cyclic_id_t id;
cyclic_id_t id1;
cyclic_id_t id2;
cyclic_id_t id3;
printf("%s: starting\n",__func__);
hdlr.cyh_func = (cyc_func_t) cyclic_test_003_func;
when.cyt_when = 0;
nanotime(&test_003_start);
mutex_enter(&cpu_lock);
when.cyt_interval = 200000000;
hdlr.cyh_arg = (void *) 0UL;
id = cyclic_add(&hdlr, &when);
when.cyt_interval = 400000000;
hdlr.cyh_arg = (void *) 1UL;
id1 = cyclic_add(&hdlr, &when);
hdlr.cyh_arg = (void *) 2UL;
when.cyt_interval = 1000000000;
id2 = cyclic_add(&hdlr, &when);
hdlr.cyh_arg = (void *) 3UL;
when.cyt_interval = 1300000000;
id3 = cyclic_add(&hdlr, &when);
mutex_exit(&cpu_lock);
DELAY(1200000);
mutex_enter(&cpu_lock);
cyclic_remove(id);
cyclic_remove(id1);
cyclic_remove(id2);
cyclic_remove(id3);
mutex_exit(&cpu_lock);
printf("%s: %s\n",__func__, error == 0 ? "passed":"failed");
}
/* Kernel thread command routine. */
static void
cyclic_run_tests(void *arg)
{
intptr_t cmd = (intptr_t) arg;
switch (cmd) {
case 1:
cyclic_test_001();
break;
case 2:
cyclic_test_002();
break;
case 3:
cyclic_test_003();
break;
default:
cyclic_test_001();
cyclic_test_002();
cyclic_test_003();
break;
}
printf("%s: finished\n",__func__);
kthread_exit();
}
static int
cyclic_test(SYSCTL_HANDLER_ARGS)
{
int error, cmd = 0;
error = sysctl_wire_old_buffer(req, sizeof(int));
if (error == 0)
error = sysctl_handle_int(oidp, &cmd, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
/* Check for command validity. */
switch (cmd) {
case 1:
case 2:
case -1:
/*
* Execute the tests in a kernel thread to avoid blocking
* the sysctl. Look for the results in the syslog.
*/
error = kthread_add(cyclic_run_tests, (void *)(uintptr_t) cmd,
NULL, NULL, 0, 0, "cyctest%d", cmd);
break;
default:
printf("Usage: debug.cyclic.test=(1..9) or -1 for all tests\n");
error = EINVAL;
break;
}
return (error);
}
SYSCTL_NODE(_debug, OID_AUTO, cyclic, CTLFLAG_RW, NULL, "Cyclic nodes");
SYSCTL_PROC(_debug_cyclic, OID_AUTO, test, CTLTYPE_INT | CTLFLAG_RW, 0, 0,
cyclic_test, "I", "Enables a cyclic test. Use -1 for all tests.");
static int
cyclic_test_modevent(module_t mod, int type, void *data)
{
int error = 0;
switch (type) {
case MOD_LOAD:
break;
case MOD_UNLOAD:
break;
case MOD_SHUTDOWN:
break;
default:
error = EOPNOTSUPP;
break;
}
return (error);
}
DEV_MODULE(cyclic_test, cyclic_test_modevent, NULL);
MODULE_VERSION(cyclic_test, 1);
MODULE_DEPEND(cyclic_test, cyclic, 1, 1, 1);
MODULE_DEPEND(cyclic_test, opensolaris, 1, 1, 1);

View File

@ -1,131 +0,0 @@
/*-
* Copyright 2006-2008 John Birrell <jb@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
static void enable(cyb_arg_t);
static void disable(cyb_arg_t);
static void reprogram(cyb_arg_t, hrtime_t);
static void xcall(cyb_arg_t, cpu_t *, cyc_func_t, void *);
static void cyclic_clock(struct trapframe *frame);
static cyc_backend_t be = {
NULL, /* cyb_configure */
NULL, /* cyb_unconfigure */
enable,
disable,
reprogram,
xcall,
NULL /* cyb_arg_t cyb_arg */
};
static void
cyclic_ap_start(void *dummy)
{
/* Initialise the rest of the CPUs. */
cyclic_clock_func = cyclic_clock;
cyclic_mp_init();
}
SYSINIT(cyclic_ap_start, SI_SUB_SMP, SI_ORDER_ANY, cyclic_ap_start, NULL);
/*
* Machine dependent cyclic subsystem initialisation.
*/
static void
cyclic_machdep_init(void)
{
/* Register the cyclic backend. */
cyclic_init(&be);
}
static void
cyclic_machdep_uninit(void)
{
/* De-register the cyclic backend. */
cyclic_uninit();
}
/*
* This function is the one registered by the machine dependent
* initialiser as the callback for high speed timer events.
*/
static void
cyclic_clock(struct trapframe *frame)
{
cpu_t *c = &solaris_cpu[curcpu];
if (c->cpu_cyclic != NULL) {
if (TRAPF_USERMODE(frame)) {
c->cpu_profile_pc = 0;
c->cpu_profile_upc = TRAPF_PC(frame);
} else {
c->cpu_profile_pc = TRAPF_PC(frame);
c->cpu_profile_upc = 0;
}
c->cpu_intr_actv = 1;
/* Fire any timers that are due. */
cyclic_fire(c);
c->cpu_intr_actv = 0;
}
}
static void
enable(cyb_arg_t arg __unused)
{
}
static void
disable(cyb_arg_t arg __unused)
{
}
static void
reprogram(cyb_arg_t arg __unused, hrtime_t exp)
{
struct bintime bt;
struct timespec ts;
ts.tv_sec = exp / 1000000000;
ts.tv_nsec = exp % 1000000000;
timespec2bintime(&ts, &bt);
clocksource_cyc_set(&bt);
}
static void xcall(cyb_arg_t arg __unused, cpu_t *c, cyc_func_t func,
void *param)
{
cpuset_t cpus;
CPU_SETOF(c->cpuid, &cpus);
smp_rendezvous_cpus(cpus,
smp_no_rendevous_barrier, func, smp_no_rendevous_barrier, param);
}

View File

@ -144,13 +144,6 @@ fbt_provide_module(void *arg, modctl_t *lf)
if (strcmp(modname, "dtrace") == 0)
return;
/*
* The cyclic timer subsystem can be built as a module and DTrace
* depends on that, so it is ineligible too.
*/
if (strcmp(modname, "cyclic") == 0)
return;
/*
* To register with DTrace, a module must list 'dtrace' as a
* dependency in order for the kernel linker to resolve

View File

@ -52,9 +52,9 @@
#include <sys/smp.h>
#include <sys/uio.h>
#include <sys/unistd.h>
#include <machine/cpu.h>
#include <machine/stdarg.h>
#include <sys/cyclic.h>
#include <sys/dtrace.h>
#include <sys/dtrace_bsd.h>
@ -97,7 +97,7 @@
* allow for a manual override in case we get it completely wrong.
*/
#ifdef __amd64
#define PROF_ARTIFICIAL_FRAMES 7
#define PROF_ARTIFICIAL_FRAMES 10
#else
#ifdef __i386
#define PROF_ARTIFICIAL_FRAMES 6
@ -126,18 +126,30 @@
#define PROF_ARTIFICIAL_FRAMES 3
#endif
struct profile_probe_percpu;
typedef struct profile_probe {
char prof_name[PROF_NAMELEN];
dtrace_id_t prof_id;
int prof_kind;
#ifdef illumos
hrtime_t prof_interval;
cyclic_id_t prof_cyclic;
#else
sbintime_t prof_interval;
struct callout prof_cyclic;
sbintime_t prof_expected;
struct profile_probe_percpu **prof_pcpus;
#endif
} profile_probe_t;
typedef struct profile_probe_percpu {
hrtime_t profc_expected;
hrtime_t profc_interval;
profile_probe_t *profc_probe;
#ifdef __FreeBSD__
struct callout profc_cyclic;
#endif
} profile_probe_percpu_t;
static d_open_t profile_open;
@ -206,29 +218,92 @@ static dtrace_provider_id_t profile_id;
static hrtime_t profile_interval_min = NANOSEC / 5000; /* 5000 hz */
static int profile_aframes = 0; /* override */
static sbintime_t
nsec_to_sbt(hrtime_t nsec)
{
time_t sec;
/*
* We need to calculate nsec * 2^32 / 10^9
* Seconds and nanoseconds are split to avoid overflow.
*/
sec = nsec / NANOSEC;
nsec = nsec % NANOSEC;
return (((sbintime_t)sec << 32) | ((sbintime_t)nsec << 32) / NANOSEC);
}
static hrtime_t
sbt_to_nsec(sbintime_t sbt)
{
return ((sbt >> 32) * NANOSEC +
(((uint32_t)sbt * (hrtime_t)NANOSEC) >> 32));
}
static void
profile_fire(void *arg)
{
profile_probe_percpu_t *pcpu = arg;
profile_probe_t *prof = pcpu->profc_probe;
hrtime_t late;
solaris_cpu_t *c = &solaris_cpu[curcpu];
struct trapframe *frame;
uintfptr_t pc, upc;
#ifdef illumos
late = gethrtime() - pcpu->profc_expected;
pcpu->profc_expected += pcpu->profc_interval;
#else
late = sbt_to_nsec(sbinuptime() - pcpu->profc_expected);
#endif
dtrace_probe(prof->prof_id, c->cpu_profile_pc,
c->cpu_profile_upc, late, 0, 0);
pc = 0;
upc = 0;
/*
* td_intr_frame can be unset if this is a catch up event
* after waking up from idle sleep.
* This can only happen on a CPU idle thread.
*/
frame = curthread->td_intr_frame;
if (frame != NULL) {
if (TRAPF_USERMODE(frame))
upc = TRAPF_PC(frame);
else
pc = TRAPF_PC(frame);
}
dtrace_probe(prof->prof_id, pc, upc, late, 0, 0);
pcpu->profc_expected += pcpu->profc_interval;
callout_schedule_sbt_curcpu(&pcpu->profc_cyclic,
pcpu->profc_expected, 0, C_DIRECT_EXEC | C_ABSOLUTE);
}
static void
profile_tick(void *arg)
{
profile_probe_t *prof = arg;
solaris_cpu_t *c = &solaris_cpu[curcpu];
struct trapframe *frame;
uintfptr_t pc, upc;
dtrace_probe(prof->prof_id, c->cpu_profile_pc,
c->cpu_profile_upc, 0, 0, 0);
pc = 0;
upc = 0;
/*
* td_intr_frame can be unset if this is a catch up event
* after waking up from idle sleep.
* This can only happen on a CPU idle thread.
*/
frame = curthread->td_intr_frame;
if (frame != NULL) {
if (TRAPF_USERMODE(frame))
upc = TRAPF_PC(frame);
else
pc = TRAPF_PC(frame);
}
dtrace_probe(prof->prof_id, pc, upc, 0, 0, 0);
prof->prof_expected += prof->prof_interval;
callout_schedule_sbt(&prof->prof_cyclic,
prof->prof_expected, 0, C_DIRECT_EXEC | C_ABSOLUTE);
}
static void
@ -250,8 +325,13 @@ profile_create(hrtime_t interval, char *name, int kind)
prof = kmem_zalloc(sizeof (profile_probe_t), KM_SLEEP);
(void) strcpy(prof->prof_name, name);
#ifdef illumos
prof->prof_interval = interval;
prof->prof_cyclic = CYCLIC_NONE;
#else
prof->prof_interval = nsec_to_sbt(interval);
callout_init(&prof->prof_cyclic, CALLOUT_MPSAFE);
#endif
prof->prof_kind = kind;
prof->prof_id = dtrace_probe_create(profile_id,
NULL, NULL, name,
@ -396,13 +476,18 @@ profile_destroy(void *arg, dtrace_id_t id, void *parg)
{
profile_probe_t *prof = parg;
#ifdef illumos
ASSERT(prof->prof_cyclic == CYCLIC_NONE);
#else
ASSERT(!callout_active(&prof->prof_cyclic) && prof->prof_pcpus == NULL);
#endif
kmem_free(prof, sizeof (profile_probe_t));
ASSERT(profile_total >= 1);
atomic_add_32(&profile_total, -1);
}
#ifdef illumos
/*ARGSUSED*/
static void
profile_online(void *arg, cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
@ -478,6 +563,81 @@ profile_disable(void *arg, dtrace_id_t id, void *parg)
prof->prof_cyclic = CYCLIC_NONE;
}
#else
static void
profile_enable_omni(profile_probe_t *prof)
{
profile_probe_percpu_t *pcpu;
int cpu;
prof->prof_pcpus = kmem_zalloc((mp_maxid + 1) * sizeof(pcpu), KM_SLEEP);
CPU_FOREACH(cpu) {
pcpu = kmem_zalloc(sizeof(profile_probe_percpu_t), KM_SLEEP);
prof->prof_pcpus[cpu] = pcpu;
pcpu->profc_probe = prof;
pcpu->profc_expected = sbinuptime() + prof->prof_interval;
pcpu->profc_interval = prof->prof_interval;
callout_init(&pcpu->profc_cyclic, CALLOUT_MPSAFE);
callout_reset_sbt_on(&pcpu->profc_cyclic,
pcpu->profc_expected, 0, profile_fire, pcpu,
cpu, C_DIRECT_EXEC | C_ABSOLUTE);
}
}
static void
profile_disable_omni(profile_probe_t *prof)
{
profile_probe_percpu_t *pcpu;
int cpu;
ASSERT(prof->prof_pcpus != NULL);
CPU_FOREACH(cpu) {
pcpu = prof->prof_pcpus[cpu];
ASSERT(pcpu->profc_probe == prof);
ASSERT(callout_active(&pcpu->profc_cyclic));
callout_stop(&pcpu->profc_cyclic);
callout_drain(&pcpu->profc_cyclic);
kmem_free(pcpu, sizeof(profile_probe_percpu_t));
}
kmem_free(prof->prof_pcpus, (mp_maxid + 1) * sizeof(pcpu));
prof->prof_pcpus = NULL;
}
/* ARGSUSED */
static void
profile_enable(void *arg, dtrace_id_t id, void *parg)
{
profile_probe_t *prof = parg;
if (prof->prof_kind == PROF_TICK) {
prof->prof_expected = sbinuptime() + prof->prof_interval;
callout_reset_sbt(&prof->prof_cyclic,
prof->prof_expected, 0, profile_tick, prof,
C_DIRECT_EXEC | C_ABSOLUTE);
} else {
ASSERT(prof->prof_kind == PROF_PROFILE);
profile_enable_omni(prof);
}
}
/* ARGSUSED */
static void
profile_disable(void *arg, dtrace_id_t id, void *parg)
{
profile_probe_t *prof = parg;
if (prof->prof_kind == PROF_TICK) {
ASSERT(callout_active(&prof->prof_cyclic));
callout_stop(&prof->prof_cyclic);
callout_drain(&prof->prof_cyclic);
} else {
ASSERT(prof->prof_kind == PROF_PROFILE);
profile_disable_omni(prof);
}
}
#endif
static void
profile_load(void *dummy)
{
@ -541,5 +701,4 @@ SYSUNINIT(profile_unload, SI_SUB_DTRACE_PROVIDER, SI_ORDER_ANY, profile_unload,
DEV_MODULE(profile, profile_modevent, NULL);
MODULE_VERSION(profile, 1);
MODULE_DEPEND(profile, dtrace, 1, 1, 1);
MODULE_DEPEND(profile, cyclic, 1, 1, 1);
MODULE_DEPEND(profile, opensolaris, 1, 1, 1);

View File

@ -54,11 +54,6 @@ __FBSDID("$FreeBSD$");
#include <machine/cpu.h>
#include <machine/smp.h>
#ifdef KDTRACE_HOOKS
#include <sys/dtrace_bsd.h>
cyclic_clock_func_t cyclic_clock_func = NULL;
#endif
int cpu_can_deep_sleep = 0; /* C3 state is available. */
int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */
@ -125,9 +120,6 @@ struct pcpu_state {
sbintime_t nextprof; /* Next profclock() event. */
sbintime_t nextcall; /* Next callout event. */
sbintime_t nextcallopt; /* Next optional callout event. */
#ifdef KDTRACE_HOOKS
sbintime_t nextcyc; /* Next OpenSolaris cyclics event. */
#endif
int ipi; /* This CPU needs IPI. */
int idle; /* This CPU is in idle mode. */
};
@ -219,13 +211,6 @@ handleevents(sbintime_t now, int fake)
callout_process(now);
}
#ifdef KDTRACE_HOOKS
if (fake == 0 && now >= state->nextcyc && cyclic_clock_func != NULL) {
state->nextcyc = SBT_MAX;
(*cyclic_clock_func)(frame);
}
#endif
t = getnextcpuevent(0);
ET_HW_LOCK(state);
if (!busy) {
@ -271,10 +256,6 @@ getnextcpuevent(int idle)
if (profiling && event > state->nextprof)
event = state->nextprof;
}
#ifdef KDTRACE_HOOKS
if (event > state->nextcyc)
event = state->nextcyc;
#endif
return (event);
}
@ -595,9 +576,6 @@ cpu_initclocks_bsp(void)
CPU_FOREACH(cpu) {
state = DPCPU_ID_PTR(cpu, timerstate);
mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
#ifdef KDTRACE_HOOKS
state->nextcyc = SBT_MAX;
#endif
state->nextcall = SBT_MAX;
state->nextcallopt = SBT_MAX;
}
@ -816,41 +794,6 @@ cpu_et_frequency(struct eventtimer *et, uint64_t newfreq)
ET_UNLOCK();
}
#ifdef KDTRACE_HOOKS
void
clocksource_cyc_set(const struct bintime *bt)
{
sbintime_t now, t;
struct pcpu_state *state;
/* Do not touch anything if somebody reconfiguring timers. */
if (busy)
return;
t = bttosbt(*bt);
state = DPCPU_PTR(timerstate);
if (periodic)
now = state->now;
else
now = sbinuptime();
CTR5(KTR_SPARE2, "set_cyc at %d: now %d.%08x t %d.%08x",
curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff),
(int)(t >> 32), (u_int)(t & 0xffffffff));
ET_HW_LOCK(state);
if (t == state->nextcyc)
goto done;
state->nextcyc = t;
if (t >= state->nextevent)
goto done;
state->nextevent = t;
if (!periodic)
loadtimer(now, 0);
done:
ET_HW_UNLOCK(state);
}
#endif
void
cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt)
{

View File

@ -90,7 +90,6 @@ SUBDIR= \
cuse \
${_cxgb} \
${_cxgbe} \
${_cyclic} \
dc \
dcons \
dcons_crom \
@ -476,9 +475,6 @@ _cardbus= cardbus
_cbb= cbb
_cpuctl= cpuctl
_cpufreq= cpufreq
.if ${MK_CDDL} != "no" || defined(ALL_MODULES)
_cyclic= cyclic
.endif
_dpms= dpms
_drm= drm
_drm2= drm2
@ -686,9 +682,6 @@ _cardbus= cardbus
_cbb= cbb
_cfi= cfi
_cpufreq= cpufreq
.if ${MK_CDDL} != "no" || defined(ALL_MODULES)
_cyclic= cyclic
.endif
_drm= drm
.if ${MK_CDDL} != "no" || defined(ALL_MODULES)
_dtrace= dtrace

View File

@ -1,21 +0,0 @@
# $FreeBSD$
SYSDIR?= ${.CURDIR}/../..
.PATH: ${SYSDIR}/cddl/dev/cyclic
KMOD= cyclic
SRCS= cyclic.c
SRCS+= vnode_if.h
CFLAGS+= -I${SYSDIR}/cddl/compat/opensolaris \
-I${SYSDIR}/cddl/contrib/opensolaris/uts/common \
-I${SYSDIR} \
-I${SYSDIR}/cddl/dev/cyclic/i386
IGNORE_PRAGMA= 1
.include <bsd.kmod.mk>
CFLAGS+= -include ${SYSDIR}/cddl/compat/opensolaris/sys/debug_compat.h

View File

@ -3,7 +3,6 @@
IGNORE_PRAGMA= 1
load :
-kldload cyclic
-kldload dtrace
.if ${MACHINE_CPUARCH} == "i386"
-kldload sdt
@ -25,5 +24,4 @@ unload :
-kldunload sdt
.endif
-kldunload dtrace
-kldunload cyclic
kldstat

View File

@ -63,7 +63,6 @@ DEV_MODULE(dtraceall, dtraceall_modevent, NULL);
MODULE_VERSION(dtraceall, 1);
/* All the DTrace modules should be dependencies here: */
MODULE_DEPEND(dtraceall, cyclic, 1, 1, 1);
MODULE_DEPEND(dtraceall, opensolaris, 1, 1, 1);
MODULE_DEPEND(dtraceall, dtrace, 1, 1, 1);
MODULE_DEPEND(dtraceall, dtmalloc, 1, 1, 1);

View File

@ -39,15 +39,6 @@ struct vattr;
struct vnode;
struct reg;
/*
* Cyclic clock function type definition used to hook the cyclic
* subsystem into the appropriate timer interrupt.
*/
typedef void (*cyclic_clock_func_t)(struct trapframe *);
extern cyclic_clock_func_t cyclic_clock_func;
void clocksource_cyc_set(const struct bintime *t);
int dtrace_trap(struct trapframe *);
/*