1993-06-12 14:58:17 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1990 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* William Jolitz and Don Ahn.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1993-10-16 13:48:52 +00:00
|
|
|
* from: @(#)clock.c 7.2 (Berkeley) 5/12/91
|
1996-10-25 13:46:21 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-02 16:32:55 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1996-10-25 13:46:21 +00:00
|
|
|
/*
|
|
|
|
* Routines to handle clock hardware.
|
1994-09-20 00:31:07 +00:00
|
|
|
*/
|
|
|
|
|
1994-09-29 08:24:45 +00:00
|
|
|
/*
|
|
|
|
* inittodr, settodr and support routines written
|
|
|
|
* by Christoph Robitschko <chmr@edvz.tu-graz.ac.at>
|
|
|
|
*
|
|
|
|
* reintroduced and updated by Chris Stenton <chris@gnome.co.uk> 8/10/94
|
1993-06-12 14:58:17 +00:00
|
|
|
*/
|
|
|
|
|
2005-02-08 20:25:07 +00:00
|
|
|
#include "opt_apic.h"
|
1996-06-11 16:02:55 +00:00
|
|
|
#include "opt_clock.h"
|
2002-01-30 12:41:12 +00:00
|
|
|
#include "opt_isa.h"
|
2001-01-29 11:57:27 +00:00
|
|
|
#include "opt_mca.h"
|
2005-11-09 03:55:40 +00:00
|
|
|
#include "opt_xbox.h"
|
1996-01-04 21:13:23 +00:00
|
|
|
|
1994-08-13 03:50:34 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2000-06-23 07:44:33 +00:00
|
|
|
#include <sys/bus.h>
|
2006-10-02 12:59:59 +00:00
|
|
|
#include <sys/clock.h>
|
2007-06-15 22:58:14 +00:00
|
|
|
#include <sys/conf.h>
|
|
|
|
#include <sys/fcntl.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h>
|
2004-07-10 22:16:18 +00:00
|
|
|
#include <sys/kdb.h>
|
2000-10-20 07:31:00 +00:00
|
|
|
#include <sys/mutex.h>
|
2000-09-07 01:33:02 +00:00
|
|
|
#include <sys/proc.h>
|
1994-08-13 03:50:34 +00:00
|
|
|
#include <sys/time.h>
|
2000-03-20 14:09:06 +00:00
|
|
|
#include <sys/timetc.h>
|
2007-06-15 22:58:14 +00:00
|
|
|
#include <sys/uio.h>
|
1994-08-13 03:50:34 +00:00
|
|
|
#include <sys/kernel.h>
|
2003-04-29 13:36:06 +00:00
|
|
|
#include <sys/limits.h>
|
2004-05-30 17:57:46 +00:00
|
|
|
#include <sys/module.h>
|
2005-12-13 19:08:55 +00:00
|
|
|
#include <sys/sched.h>
|
1996-05-01 08:39:02 +00:00
|
|
|
#include <sys/sysctl.h>
|
1999-08-09 10:35:05 +00:00
|
|
|
#include <sys/cons.h>
|
2001-11-01 16:34:07 +00:00
|
|
|
#include <sys/power.h>
|
1996-05-01 08:39:02 +00:00
|
|
|
|
1994-11-05 23:55:07 +00:00
|
|
|
#include <machine/clock.h>
|
Tweak how the MD code calls the fooclock() methods some. Instead of
passing a pointer to an opaque clockframe structure and requiring the
MD code to supply CLKF_FOO() macros to extract needed values out of the
opaque structure, just pass the needed values directly. In practice this
means passing the pair (usermode, pc) to hardclock() and profclock() and
passing the boolean (usermode) to hardclock_cpu() and hardclock_process().
Other details:
- Axe clockframe and CLKF_FOO() macros on all architectures. Basically,
all the archs were taking a trapframe and converting it into a clockframe
one way or another. Now they can just extract the PC and usermode values
directly out of the trapframe and pass it to fooclock().
- Renamed hardclock_process() to hardclock_cpu() as the latter is more
accurate.
- On Alpha, we now run profclock() at hz (profhz == hz) rather than at
the slower stathz.
- On Alpha, for the TurboLaser machines that don't have an 8254
timecounter, call hardclock() directly. This removes an extra
conditional check from every clock interrupt on Alpha on the BSP.
There is probably room for even further pruning here by changing Alpha
to use the simplified timecounter we use on x86 with the lapic timer
since we don't get interrupts from the 8254 on Alpha anyway.
- On x86, clkintr() shouldn't ever be called now unless using_lapic_timer
is false, so add a KASSERT() to that affect and remove a condition
to slightly optimize the non-lapic case.
- Change prototypeof arm_handler_execute() so that it's first arg is a
trapframe pointer rather than a void pointer for clarity.
- Use KCOUNT macro in profclock() to lookup the kernel profiling bucket.
Tested on: alpha, amd64, arm, i386, ia64, sparc64
Reviewed by: bde (mostly)
2005-12-22 22:16:09 +00:00
|
|
|
#include <machine/cpu.h>
|
1997-10-28 11:43:57 +00:00
|
|
|
#include <machine/cputypes.h>
|
1994-08-13 03:50:34 +00:00
|
|
|
#include <machine/frame.h>
|
2003-11-03 21:53:38 +00:00
|
|
|
#include <machine/intr_machdep.h>
|
1997-12-28 17:33:10 +00:00
|
|
|
#include <machine/md_var.h>
|
1999-05-28 14:08:59 +00:00
|
|
|
#include <machine/psl.h>
|
2005-02-08 20:25:07 +00:00
|
|
|
#ifdef DEV_APIC
|
|
|
|
#include <machine/apicvar.h>
|
2003-11-03 21:53:38 +00:00
|
|
|
#endif
|
1997-12-28 17:33:10 +00:00
|
|
|
#include <machine/specialreg.h>
|
2005-05-14 09:10:02 +00:00
|
|
|
#include <machine/ppireg.h>
|
|
|
|
#include <machine/timerreg.h>
|
1996-05-01 08:39:02 +00:00
|
|
|
|
1999-05-31 18:36:14 +00:00
|
|
|
#include <isa/rtc.h>
|
2002-01-30 12:41:12 +00:00
|
|
|
#ifdef DEV_ISA
|
2005-05-14 09:10:02 +00:00
|
|
|
#include <isa/isareg.h>
|
2000-06-23 07:44:33 +00:00
|
|
|
#include <isa/isavar.h>
|
2002-01-30 12:41:12 +00:00
|
|
|
#endif
|
1994-09-18 23:08:56 +00:00
|
|
|
|
2001-01-29 11:57:27 +00:00
|
|
|
#ifdef DEV_MCA
|
2003-03-24 19:14:46 +00:00
|
|
|
#include <i386/bios/mca_machdep.h>
|
1999-09-03 02:04:28 +00:00
|
|
|
#endif
|
|
|
|
|
1996-05-01 08:39:02 +00:00
|
|
|
#define TIMER_DIV(x) ((timer_freq + (x) / 2) / (x))
|
1993-06-12 14:58:17 +00:00
|
|
|
|
1999-05-28 14:08:59 +00:00
|
|
|
int clkintr_pending;
|
2003-02-03 17:53:15 +00:00
|
|
|
int pscnt = 1;
|
|
|
|
int psdiv = 1;
|
1996-04-22 19:40:28 +00:00
|
|
|
int statclock_disable;
|
1998-02-20 16:36:17 +00:00
|
|
|
#ifndef TIMER_FREQ
|
|
|
|
#define TIMER_FREQ 1193182
|
1996-07-21 08:20:51 +00:00
|
|
|
#endif
|
1998-02-20 16:36:17 +00:00
|
|
|
u_int timer_freq = TIMER_FREQ;
|
1996-10-25 13:46:21 +00:00
|
|
|
int timer0_max_count;
|
2005-07-13 15:43:21 +00:00
|
|
|
int timer0_real_max_count;
|
2005-04-12 20:49:31 +00:00
|
|
|
#define RTC_LOCK mtx_lock_spin(&clock_lock)
|
|
|
|
#define RTC_UNLOCK mtx_unlock_spin(&clock_lock)
|
1994-11-05 23:55:07 +00:00
|
|
|
|
|
|
|
static int beeping = 0;
|
2007-01-23 08:01:20 +00:00
|
|
|
static struct mtx clock_lock;
|
2004-04-27 20:03:26 +00:00
|
|
|
static struct intsrc *i8254_intsrc;
|
1998-02-20 16:36:17 +00:00
|
|
|
static u_int32_t i8254_lastcount;
|
|
|
|
static u_int32_t i8254_offset;
|
2004-04-27 20:03:26 +00:00
|
|
|
static int (*i8254_pending)(struct intsrc *);
|
1998-02-20 16:36:17 +00:00
|
|
|
static int i8254_ticked;
|
2005-02-08 20:25:07 +00:00
|
|
|
static int using_lapic_timer;
|
2006-12-03 03:49:28 +00:00
|
|
|
static int rtc_reg = -1;
|
1994-11-05 23:55:07 +00:00
|
|
|
static u_char rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
|
2005-03-24 21:34:16 +00:00
|
|
|
static u_char rtc_statusb = RTCSB_24HR;
|
1996-07-20 18:47:23 +00:00
|
|
|
|
|
|
|
/* Values for timerX_state: */
|
1996-07-21 08:20:51 +00:00
|
|
|
#define RELEASED 0
|
|
|
|
#define RELEASE_PENDING 1
|
|
|
|
#define ACQUIRED 2
|
|
|
|
#define ACQUIRE_PENDING 3
|
1996-07-20 18:47:23 +00:00
|
|
|
|
|
|
|
static u_char timer2_state;
|
1994-04-21 14:19:16 +00:00
|
|
|
|
2002-03-20 07:51:46 +00:00
|
|
|
static unsigned i8254_get_timecount(struct timecounter *tc);
|
2005-07-01 15:47:27 +00:00
|
|
|
static unsigned i8254_simple_get_timecount(struct timecounter *tc);
|
1997-01-16 18:28:20 +00:00
|
|
|
static void set_timer_freq(u_int freq, int intr_freq);
|
1998-02-20 16:36:17 +00:00
|
|
|
|
1998-10-23 10:46:20 +00:00
|
|
|
static struct timecounter i8254_timecounter = {
|
1998-02-20 16:36:17 +00:00
|
|
|
i8254_get_timecount, /* get_timecount */
|
1998-06-07 20:36:55 +00:00
|
|
|
0, /* no poll_pps */
|
2000-10-28 06:55:12 +00:00
|
|
|
~0u, /* counter_mask */
|
1998-02-20 16:36:17 +00:00
|
|
|
0, /* frequency */
|
2003-08-16 08:23:53 +00:00
|
|
|
"i8254", /* name */
|
|
|
|
0 /* quality */
|
1998-02-20 16:36:17 +00:00
|
|
|
};
|
|
|
|
|
2007-02-23 12:19:07 +00:00
|
|
|
static int
|
Tweak how the MD code calls the fooclock() methods some. Instead of
passing a pointer to an opaque clockframe structure and requiring the
MD code to supply CLKF_FOO() macros to extract needed values out of the
opaque structure, just pass the needed values directly. In practice this
means passing the pair (usermode, pc) to hardclock() and profclock() and
passing the boolean (usermode) to hardclock_cpu() and hardclock_process().
Other details:
- Axe clockframe and CLKF_FOO() macros on all architectures. Basically,
all the archs were taking a trapframe and converting it into a clockframe
one way or another. Now they can just extract the PC and usermode values
directly out of the trapframe and pass it to fooclock().
- Renamed hardclock_process() to hardclock_cpu() as the latter is more
accurate.
- On Alpha, we now run profclock() at hz (profhz == hz) rather than at
the slower stathz.
- On Alpha, for the TurboLaser machines that don't have an 8254
timecounter, call hardclock() directly. This removes an extra
conditional check from every clock interrupt on Alpha on the BSP.
There is probably room for even further pruning here by changing Alpha
to use the simplified timecounter we use on x86 with the lapic timer
since we don't get interrupts from the 8254 on Alpha anyway.
- On x86, clkintr() shouldn't ever be called now unless using_lapic_timer
is false, so add a KASSERT() to that affect and remove a condition
to slightly optimize the non-lapic case.
- Change prototypeof arm_handler_execute() so that it's first arg is a
trapframe pointer rather than a void pointer for clarity.
- Use KCOUNT macro in profclock() to lookup the kernel profiling bucket.
Tested on: alpha, amd64, arm, i386, ia64, sparc64
Reviewed by: bde (mostly)
2005-12-22 22:16:09 +00:00
|
|
|
clkintr(struct trapframe *frame)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
2000-09-07 01:33:02 +00:00
|
|
|
|
1998-09-20 03:47:54 +00:00
|
|
|
if (timecounter->tc_get_timecount == i8254_get_timecount) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1999-05-28 14:08:59 +00:00
|
|
|
if (i8254_ticked)
|
1998-09-20 03:47:54 +00:00
|
|
|
i8254_ticked = 0;
|
1999-05-28 14:08:59 +00:00
|
|
|
else {
|
1998-09-20 03:47:54 +00:00
|
|
|
i8254_offset += timer0_max_count;
|
|
|
|
i8254_lastcount = 0;
|
|
|
|
}
|
1999-05-28 14:08:59 +00:00
|
|
|
clkintr_pending = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1998-09-20 03:47:54 +00:00
|
|
|
}
|
Tweak how the MD code calls the fooclock() methods some. Instead of
passing a pointer to an opaque clockframe structure and requiring the
MD code to supply CLKF_FOO() macros to extract needed values out of the
opaque structure, just pass the needed values directly. In practice this
means passing the pair (usermode, pc) to hardclock() and profclock() and
passing the boolean (usermode) to hardclock_cpu() and hardclock_process().
Other details:
- Axe clockframe and CLKF_FOO() macros on all architectures. Basically,
all the archs were taking a trapframe and converting it into a clockframe
one way or another. Now they can just extract the PC and usermode values
directly out of the trapframe and pass it to fooclock().
- Renamed hardclock_process() to hardclock_cpu() as the latter is more
accurate.
- On Alpha, we now run profclock() at hz (profhz == hz) rather than at
the slower stathz.
- On Alpha, for the TurboLaser machines that don't have an 8254
timecounter, call hardclock() directly. This removes an extra
conditional check from every clock interrupt on Alpha on the BSP.
There is probably room for even further pruning here by changing Alpha
to use the simplified timecounter we use on x86 with the lapic timer
since we don't get interrupts from the 8254 on Alpha anyway.
- On x86, clkintr() shouldn't ever be called now unless using_lapic_timer
is false, so add a KASSERT() to that affect and remove a condition
to slightly optimize the non-lapic case.
- Change prototypeof arm_handler_execute() so that it's first arg is a
trapframe pointer rather than a void pointer for clarity.
- Use KCOUNT macro in profclock() to lookup the kernel profiling bucket.
Tested on: alpha, amd64, arm, i386, ia64, sparc64
Reviewed by: bde (mostly)
2005-12-22 22:16:09 +00:00
|
|
|
KASSERT(!using_lapic_timer, ("clk interrupt enabled with lapic timer"));
|
|
|
|
hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
|
2001-01-29 11:57:27 +00:00
|
|
|
#ifdef DEV_MCA
|
1999-09-03 02:04:28 +00:00
|
|
|
/* Reset clock interrupt by asserting bit 7 of port 0x61 */
|
|
|
|
if (MCA_system)
|
|
|
|
outb(0x61, inb(0x61) | 0x80);
|
|
|
|
#endif
|
2007-02-23 12:19:07 +00:00
|
|
|
return (FILTER_HANDLED);
|
1994-04-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
acquire_timer2(int mode)
|
|
|
|
{
|
1996-07-20 18:47:23 +00:00
|
|
|
|
|
|
|
if (timer2_state != RELEASED)
|
|
|
|
return (-1);
|
|
|
|
timer2_state = ACQUIRED;
|
1996-07-21 08:20:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This access to the timer registers is as atomic as possible
|
|
|
|
* because it is a single instruction. We could do better if we
|
|
|
|
* knew the rate. Use of splclock() limits glitches to 10-100us,
|
|
|
|
* and this is probably good enough for timer2, so we aren't as
|
|
|
|
* careful with it as with timer0.
|
|
|
|
*/
|
|
|
|
outb(TIMER_MODE, TIMER_SEL2 | (mode & 0x3f));
|
|
|
|
|
1996-07-20 18:47:23 +00:00
|
|
|
return (0);
|
1994-04-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
release_timer2()
|
|
|
|
{
|
1996-07-20 18:47:23 +00:00
|
|
|
|
|
|
|
if (timer2_state != ACQUIRED)
|
|
|
|
return (-1);
|
|
|
|
timer2_state = RELEASED;
|
1996-07-21 08:20:51 +00:00
|
|
|
outb(TIMER_MODE, TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT);
|
1996-07-20 18:47:23 +00:00
|
|
|
return (0);
|
1994-04-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
|
1994-09-29 08:24:45 +00:00
|
|
|
/*
|
|
|
|
* This routine receives statistical clock interrupts from the RTC.
|
|
|
|
* As explained above, these occur at 128 interrupts per second.
|
|
|
|
* When profiling, we receive interrupts at a rate of 1024 Hz.
|
|
|
|
*
|
|
|
|
* This does not actually add as much overhead as it sounds, because
|
|
|
|
* when the statistical clock is active, the hardclock driver no longer
|
|
|
|
* needs to keep (inaccurate) statistics on its own. This decouples
|
|
|
|
* statistics gathering from scheduling interrupts.
|
|
|
|
*
|
|
|
|
* The RTC chip requires that we read status register C (RTC_INTR)
|
|
|
|
* to acknowledge an interrupt, before it will generate the next one.
|
1997-04-06 13:25:48 +00:00
|
|
|
* Under high interrupt load, rtcintr() can be indefinitely delayed and
|
|
|
|
* the clock can tick immediately after the read from RTC_INTR. In this
|
|
|
|
* case, the mc146818A interrupt signal will not drop for long enough
|
|
|
|
* to register with the 8259 PIC. If an interrupt is missed, the stat
|
|
|
|
* clock will halt, considerably degrading system performance. This is
|
|
|
|
* why we use 'while' rather than a more straightforward 'if' below.
|
|
|
|
* Stat clock ticks can still be lost, causing minor loss of accuracy
|
|
|
|
* in the statistics, but the stat clock will no longer stop.
|
1994-09-29 08:24:45 +00:00
|
|
|
*/
|
2007-02-23 12:19:07 +00:00
|
|
|
static int
|
Tweak how the MD code calls the fooclock() methods some. Instead of
passing a pointer to an opaque clockframe structure and requiring the
MD code to supply CLKF_FOO() macros to extract needed values out of the
opaque structure, just pass the needed values directly. In practice this
means passing the pair (usermode, pc) to hardclock() and profclock() and
passing the boolean (usermode) to hardclock_cpu() and hardclock_process().
Other details:
- Axe clockframe and CLKF_FOO() macros on all architectures. Basically,
all the archs were taking a trapframe and converting it into a clockframe
one way or another. Now they can just extract the PC and usermode values
directly out of the trapframe and pass it to fooclock().
- Renamed hardclock_process() to hardclock_cpu() as the latter is more
accurate.
- On Alpha, we now run profclock() at hz (profhz == hz) rather than at
the slower stathz.
- On Alpha, for the TurboLaser machines that don't have an 8254
timecounter, call hardclock() directly. This removes an extra
conditional check from every clock interrupt on Alpha on the BSP.
There is probably room for even further pruning here by changing Alpha
to use the simplified timecounter we use on x86 with the lapic timer
since we don't get interrupts from the 8254 on Alpha anyway.
- On x86, clkintr() shouldn't ever be called now unless using_lapic_timer
is false, so add a KASSERT() to that affect and remove a condition
to slightly optimize the non-lapic case.
- Change prototypeof arm_handler_execute() so that it's first arg is a
trapframe pointer rather than a void pointer for clarity.
- Use KCOUNT macro in profclock() to lookup the kernel profiling bucket.
Tested on: alpha, amd64, arm, i386, ia64, sparc64
Reviewed by: bde (mostly)
2005-12-22 22:16:09 +00:00
|
|
|
rtcintr(struct trapframe *frame)
|
1994-09-29 08:24:45 +00:00
|
|
|
{
|
2004-07-10 22:16:18 +00:00
|
|
|
|
2001-04-27 19:28:25 +00:00
|
|
|
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
|
2003-02-03 17:53:15 +00:00
|
|
|
if (profprocs != 0) {
|
|
|
|
if (--pscnt == 0)
|
|
|
|
pscnt = psdiv;
|
Tweak how the MD code calls the fooclock() methods some. Instead of
passing a pointer to an opaque clockframe structure and requiring the
MD code to supply CLKF_FOO() macros to extract needed values out of the
opaque structure, just pass the needed values directly. In practice this
means passing the pair (usermode, pc) to hardclock() and profclock() and
passing the boolean (usermode) to hardclock_cpu() and hardclock_process().
Other details:
- Axe clockframe and CLKF_FOO() macros on all architectures. Basically,
all the archs were taking a trapframe and converting it into a clockframe
one way or another. Now they can just extract the PC and usermode values
directly out of the trapframe and pass it to fooclock().
- Renamed hardclock_process() to hardclock_cpu() as the latter is more
accurate.
- On Alpha, we now run profclock() at hz (profhz == hz) rather than at
the slower stathz.
- On Alpha, for the TurboLaser machines that don't have an 8254
timecounter, call hardclock() directly. This removes an extra
conditional check from every clock interrupt on Alpha on the BSP.
There is probably room for even further pruning here by changing Alpha
to use the simplified timecounter we use on x86 with the lapic timer
since we don't get interrupts from the 8254 on Alpha anyway.
- On x86, clkintr() shouldn't ever be called now unless using_lapic_timer
is false, so add a KASSERT() to that affect and remove a condition
to slightly optimize the non-lapic case.
- Change prototypeof arm_handler_execute() so that it's first arg is a
trapframe pointer rather than a void pointer for clarity.
- Use KCOUNT macro in profclock() to lookup the kernel profiling bucket.
Tested on: alpha, amd64, arm, i386, ia64, sparc64
Reviewed by: bde (mostly)
2005-12-22 22:16:09 +00:00
|
|
|
profclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
|
2003-02-03 17:53:15 +00:00
|
|
|
}
|
|
|
|
if (pscnt == psdiv)
|
Tweak how the MD code calls the fooclock() methods some. Instead of
passing a pointer to an opaque clockframe structure and requiring the
MD code to supply CLKF_FOO() macros to extract needed values out of the
opaque structure, just pass the needed values directly. In practice this
means passing the pair (usermode, pc) to hardclock() and profclock() and
passing the boolean (usermode) to hardclock_cpu() and hardclock_process().
Other details:
- Axe clockframe and CLKF_FOO() macros on all architectures. Basically,
all the archs were taking a trapframe and converting it into a clockframe
one way or another. Now they can just extract the PC and usermode values
directly out of the trapframe and pass it to fooclock().
- Renamed hardclock_process() to hardclock_cpu() as the latter is more
accurate.
- On Alpha, we now run profclock() at hz (profhz == hz) rather than at
the slower stathz.
- On Alpha, for the TurboLaser machines that don't have an 8254
timecounter, call hardclock() directly. This removes an extra
conditional check from every clock interrupt on Alpha on the BSP.
There is probably room for even further pruning here by changing Alpha
to use the simplified timecounter we use on x86 with the lapic timer
since we don't get interrupts from the 8254 on Alpha anyway.
- On x86, clkintr() shouldn't ever be called now unless using_lapic_timer
is false, so add a KASSERT() to that affect and remove a condition
to slightly optimize the non-lapic case.
- Change prototypeof arm_handler_execute() so that it's first arg is a
trapframe pointer rather than a void pointer for clarity.
- Use KCOUNT macro in profclock() to lookup the kernel profiling bucket.
Tested on: alpha, amd64, arm, i386, ia64, sparc64
Reviewed by: bde (mostly)
2005-12-22 22:16:09 +00:00
|
|
|
statclock(TRAPF_USERMODE(frame));
|
2001-04-27 19:28:25 +00:00
|
|
|
}
|
2007-02-23 12:19:07 +00:00
|
|
|
return (FILTER_HANDLED);
|
1994-09-29 08:24:45 +00:00
|
|
|
}
|
|
|
|
|
1996-09-14 10:53:48 +00:00
|
|
|
#include "opt_ddb.h"
|
1994-12-30 12:43:35 +00:00
|
|
|
#ifdef DDB
|
1996-09-14 10:53:48 +00:00
|
|
|
#include <ddb/ddb.h>
|
|
|
|
|
|
|
|
DB_SHOW_COMMAND(rtc, rtc)
|
1994-09-29 08:24:45 +00:00
|
|
|
{
|
1994-12-30 12:43:35 +00:00
|
|
|
printf("%02x/%02x/%02x %02x:%02x:%02x, A = %02x, B = %02x, C = %02x\n",
|
|
|
|
rtcin(RTC_YEAR), rtcin(RTC_MONTH), rtcin(RTC_DAY),
|
|
|
|
rtcin(RTC_HRS), rtcin(RTC_MIN), rtcin(RTC_SEC),
|
|
|
|
rtcin(RTC_STATUSA), rtcin(RTC_STATUSB), rtcin(RTC_INTR));
|
1994-09-29 08:24:45 +00:00
|
|
|
}
|
1996-09-14 10:53:48 +00:00
|
|
|
#endif /* DDB */
|
1994-04-21 14:19:16 +00:00
|
|
|
|
|
|
|
static int
|
1995-08-25 19:24:56 +00:00
|
|
|
getit(void)
|
1994-04-21 14:19:16 +00:00
|
|
|
{
|
2000-10-06 02:20:21 +00:00
|
|
|
int high, low;
|
1994-04-21 14:19:16 +00:00
|
|
|
|
2004-07-11 17:50:59 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1996-06-17 12:50:22 +00:00
|
|
|
|
|
|
|
/* Select timer0 and latch counter value. */
|
1996-10-25 13:46:21 +00:00
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
|
1996-06-17 12:50:22 +00:00
|
|
|
|
1994-04-21 14:19:16 +00:00
|
|
|
low = inb(TIMER_CNTR0);
|
|
|
|
high = inb(TIMER_CNTR0);
|
1996-06-17 12:50:22 +00:00
|
|
|
|
2004-07-11 17:50:59 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1994-04-21 14:19:16 +00:00
|
|
|
return ((high << 8) | low);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait "n" microseconds.
|
1996-05-01 08:39:02 +00:00
|
|
|
* Relies on timer 1 counting down from (timer_freq / hz)
|
1994-04-21 14:19:16 +00:00
|
|
|
* Note: timer had better have been programmed before this is first used!
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
DELAY(int n)
|
|
|
|
{
|
1997-01-29 22:51:44 +00:00
|
|
|
int delta, prev_tick, tick, ticks_left;
|
1994-04-21 14:19:16 +00:00
|
|
|
|
|
|
|
#ifdef DELAYDEBUG
|
|
|
|
int getit_calls = 1;
|
|
|
|
int n1;
|
|
|
|
static int state = 0;
|
2005-12-13 19:08:55 +00:00
|
|
|
#endif
|
1994-04-21 14:19:16 +00:00
|
|
|
|
2005-12-13 19:08:55 +00:00
|
|
|
if (tsc_freq != 0 && !tsc_is_broken) {
|
|
|
|
uint64_t start, end, now;
|
|
|
|
|
|
|
|
sched_pin();
|
|
|
|
start = rdtsc();
|
|
|
|
end = start + (tsc_freq * n) / 1000000;
|
|
|
|
do {
|
|
|
|
now = rdtsc();
|
|
|
|
} while (now < end || (now > start && end < start));
|
|
|
|
sched_unpin();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#ifdef DELAYDEBUG
|
1994-04-21 14:19:16 +00:00
|
|
|
if (state == 0) {
|
|
|
|
state = 1;
|
|
|
|
for (n1 = 1; n1 <= 10000000; n1 *= 10)
|
|
|
|
DELAY(n1);
|
|
|
|
state = 2;
|
|
|
|
}
|
|
|
|
if (state == 1)
|
|
|
|
printf("DELAY(%d)...", n);
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* Read the counter first, so that the rest of the setup overhead is
|
|
|
|
* counted. Guess the initial overhead is 20 usec (on most systems it
|
|
|
|
* takes about 1.5 usec for each of the i/o's in getit(). The loop
|
|
|
|
* takes about 6 usec on a 486/33 and 13 usec on a 386/20. The
|
|
|
|
* multiplications and divisions to scale the count take a while).
|
2004-07-11 17:50:59 +00:00
|
|
|
*
|
|
|
|
* However, if ddb is active then use a fake counter since reading
|
|
|
|
* the i8254 counter involves acquiring a lock. ddb must not do
|
|
|
|
* locking for many reasons, but it calls here for at least atkbd
|
|
|
|
* input.
|
1994-04-21 14:19:16 +00:00
|
|
|
*/
|
2004-07-11 17:50:59 +00:00
|
|
|
#ifdef KDB
|
|
|
|
if (kdb_active)
|
|
|
|
prev_tick = 1;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
prev_tick = getit();
|
1997-01-29 22:51:44 +00:00
|
|
|
n -= 0; /* XXX actually guess no initial overhead */
|
1994-04-21 14:19:16 +00:00
|
|
|
/*
|
1996-05-01 08:39:02 +00:00
|
|
|
* Calculate (n * (timer_freq / 1e6)) without using floating point
|
1994-04-21 14:19:16 +00:00
|
|
|
* and without any avoidable overflows.
|
|
|
|
*/
|
1997-01-29 22:51:44 +00:00
|
|
|
if (n <= 0)
|
|
|
|
ticks_left = 0;
|
|
|
|
else if (n < 256)
|
|
|
|
/*
|
|
|
|
* Use fixed point to avoid a slow division by 1000000.
|
|
|
|
* 39099 = 1193182 * 2^15 / 10^6 rounded to nearest.
|
|
|
|
* 2^15 is the first power of 2 that gives exact results
|
|
|
|
* for n between 0 and 256.
|
|
|
|
*/
|
|
|
|
ticks_left = ((u_int)n * 39099 + (1 << 15) - 1) >> 15;
|
|
|
|
else
|
|
|
|
/*
|
|
|
|
* Don't bother using fixed point, although gcc-2.7.2
|
|
|
|
* generates particularly poor code for the long long
|
|
|
|
* division, since even the slow way will complete long
|
|
|
|
* before the delay is up (unless we're interrupted).
|
|
|
|
*/
|
|
|
|
ticks_left = ((u_int)n * (long long)timer_freq + 999999)
|
|
|
|
/ 1000000;
|
1994-04-21 14:19:16 +00:00
|
|
|
|
|
|
|
while (ticks_left > 0) {
|
2004-07-11 17:50:59 +00:00
|
|
|
#ifdef KDB
|
|
|
|
if (kdb_active) {
|
|
|
|
inb(0x84);
|
|
|
|
tick = prev_tick - 1;
|
|
|
|
if (tick <= 0)
|
|
|
|
tick = timer0_max_count;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
tick = getit();
|
1994-04-21 14:19:16 +00:00
|
|
|
#ifdef DELAYDEBUG
|
|
|
|
++getit_calls;
|
|
|
|
#endif
|
1997-01-16 18:28:20 +00:00
|
|
|
delta = prev_tick - tick;
|
1994-04-21 14:19:16 +00:00
|
|
|
prev_tick = tick;
|
1997-01-16 18:28:20 +00:00
|
|
|
if (delta < 0) {
|
|
|
|
delta += timer0_max_count;
|
|
|
|
/*
|
|
|
|
* Guard against timer0_max_count being wrong.
|
|
|
|
* This shouldn't happen in normal operation,
|
|
|
|
* but it may happen if set_timer_freq() is
|
|
|
|
* traced.
|
|
|
|
*/
|
|
|
|
if (delta < 0)
|
|
|
|
delta = 0;
|
|
|
|
}
|
|
|
|
ticks_left -= delta;
|
1994-04-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
#ifdef DELAYDEBUG
|
|
|
|
if (state == 1)
|
|
|
|
printf(" %d calls to getit() at %d usec each\n",
|
|
|
|
getit_calls, (n + 5) / getit_calls);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
1994-09-29 08:24:45 +00:00
|
|
|
sysbeepstop(void *chan)
|
1994-04-21 14:19:16 +00:00
|
|
|
{
|
2005-05-14 09:10:02 +00:00
|
|
|
ppi_spkr_off(); /* disable counter2 output to speaker */
|
|
|
|
timer_spkr_release();
|
1994-04-21 14:19:16 +00:00
|
|
|
beeping = 0;
|
|
|
|
}
|
|
|
|
|
1995-05-30 08:16:23 +00:00
|
|
|
int
|
1994-04-21 14:19:16 +00:00
|
|
|
sysbeep(int pitch, int period)
|
|
|
|
{
|
1996-07-20 18:47:23 +00:00
|
|
|
int x = splclock();
|
1994-04-21 14:19:16 +00:00
|
|
|
|
2005-05-14 09:10:02 +00:00
|
|
|
if (timer_spkr_acquire())
|
1996-07-20 18:47:23 +00:00
|
|
|
if (!beeping) {
|
|
|
|
/* Something else owns it. */
|
|
|
|
splx(x);
|
|
|
|
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
2005-05-14 09:10:02 +00:00
|
|
|
spkr_set_pitch(pitch);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1994-04-21 14:19:16 +00:00
|
|
|
if (!beeping) {
|
1996-07-20 18:47:23 +00:00
|
|
|
/* enable counter2 output to speaker */
|
2005-05-14 09:10:02 +00:00
|
|
|
ppi_spkr_on();
|
1994-04-21 14:19:16 +00:00
|
|
|
beeping = period;
|
1994-09-18 23:08:56 +00:00
|
|
|
timeout(sysbeepstop, (void *)NULL, period);
|
1994-04-21 14:19:16 +00:00
|
|
|
}
|
1996-07-20 18:47:23 +00:00
|
|
|
splx(x);
|
|
|
|
return (0);
|
1994-04-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
|
1994-09-20 00:31:07 +00:00
|
|
|
/*
|
|
|
|
* RTC support routines
|
|
|
|
*/
|
|
|
|
|
1996-03-31 04:05:36 +00:00
|
|
|
int
|
|
|
|
rtcin(reg)
|
|
|
|
int reg;
|
|
|
|
{
|
|
|
|
u_char val;
|
|
|
|
|
2005-04-12 20:49:31 +00:00
|
|
|
RTC_LOCK;
|
2006-12-03 03:49:28 +00:00
|
|
|
if (rtc_reg != reg) {
|
|
|
|
inb(0x84);
|
|
|
|
outb(IO_RTC, reg);
|
|
|
|
rtc_reg = reg;
|
|
|
|
inb(0x84);
|
|
|
|
}
|
1996-03-31 04:05:36 +00:00
|
|
|
val = inb(IO_RTC + 1);
|
2005-04-12 20:49:31 +00:00
|
|
|
RTC_UNLOCK;
|
1996-03-31 04:05:36 +00:00
|
|
|
return (val);
|
|
|
|
}
|
|
|
|
|
2006-12-03 03:49:28 +00:00
|
|
|
static void
|
|
|
|
writertc(int reg, u_char val)
|
1994-09-20 00:31:07 +00:00
|
|
|
{
|
1999-12-25 15:30:31 +00:00
|
|
|
|
2005-04-12 20:49:31 +00:00
|
|
|
RTC_LOCK;
|
2006-12-03 03:49:28 +00:00
|
|
|
if (rtc_reg != reg) {
|
|
|
|
inb(0x84);
|
|
|
|
outb(IO_RTC, reg);
|
|
|
|
rtc_reg = reg;
|
|
|
|
inb(0x84);
|
|
|
|
}
|
1994-12-30 12:43:35 +00:00
|
|
|
outb(IO_RTC + 1, val);
|
2006-12-03 03:49:28 +00:00
|
|
|
inb(0x84);
|
2005-04-12 20:49:31 +00:00
|
|
|
RTC_UNLOCK;
|
1994-09-20 00:31:07 +00:00
|
|
|
}
|
|
|
|
|
1996-01-15 21:26:43 +00:00
|
|
|
static __inline int
|
1994-09-20 00:31:07 +00:00
|
|
|
readrtc(int port)
|
|
|
|
{
|
1996-01-15 21:26:43 +00:00
|
|
|
return(bcd2bin(rtcin(port)));
|
1994-09-20 00:31:07 +00:00
|
|
|
}
|
|
|
|
|
1996-05-01 08:39:02 +00:00
|
|
|
static u_int
|
|
|
|
calibrate_clocks(void)
|
|
|
|
{
|
|
|
|
u_int count, prev_count, tot_count;
|
|
|
|
int sec, start_sec, timeout;
|
|
|
|
|
1997-03-05 08:08:48 +00:00
|
|
|
if (bootverbose)
|
|
|
|
printf("Calibrating clock(s) ... ");
|
1996-05-01 08:39:02 +00:00
|
|
|
if (!(rtcin(RTC_STATUSD) & RTCSD_PWR))
|
|
|
|
goto fail;
|
|
|
|
timeout = 100000000;
|
|
|
|
|
|
|
|
/* Read the mc146818A seconds counter. */
|
|
|
|
for (;;) {
|
|
|
|
if (!(rtcin(RTC_STATUSA) & RTCSA_TUP)) {
|
|
|
|
sec = rtcin(RTC_SEC);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (--timeout == 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for the mC146818A seconds counter to change. */
|
|
|
|
start_sec = sec;
|
|
|
|
for (;;) {
|
|
|
|
if (!(rtcin(RTC_STATUSA) & RTCSA_TUP)) {
|
|
|
|
sec = rtcin(RTC_SEC);
|
|
|
|
if (sec != start_sec)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (--timeout == 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Start keeping track of the i8254 counter. */
|
|
|
|
prev_count = getit();
|
|
|
|
if (prev_count == 0 || prev_count > timer0_max_count)
|
|
|
|
goto fail;
|
|
|
|
tot_count = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the mc146818A seconds counter to change. Read the i8254
|
|
|
|
* counter for each iteration since this is convenient and only
|
|
|
|
* costs a few usec of inaccuracy. The timing of the final reads
|
|
|
|
* of the counters almost matches the timing of the initial reads,
|
|
|
|
* so the main cause of inaccuracy is the varying latency from
|
|
|
|
* inside getit() or rtcin(RTC_STATUSA) to the beginning of the
|
|
|
|
* rtcin(RTC_SEC) that returns a changed seconds count. The
|
|
|
|
* maximum inaccuracy from this cause is < 10 usec on 486's.
|
|
|
|
*/
|
|
|
|
start_sec = sec;
|
|
|
|
for (;;) {
|
|
|
|
if (!(rtcin(RTC_STATUSA) & RTCSA_TUP))
|
|
|
|
sec = rtcin(RTC_SEC);
|
|
|
|
count = getit();
|
|
|
|
if (count == 0 || count > timer0_max_count)
|
|
|
|
goto fail;
|
|
|
|
if (count > prev_count)
|
|
|
|
tot_count += prev_count - (count - timer0_max_count);
|
|
|
|
else
|
|
|
|
tot_count += prev_count - count;
|
|
|
|
prev_count = count;
|
|
|
|
if (sec != start_sec)
|
|
|
|
break;
|
|
|
|
if (--timeout == 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
1998-02-20 16:36:17 +00:00
|
|
|
if (bootverbose) {
|
1998-02-28 21:16:13 +00:00
|
|
|
printf("i8254 clock: %u Hz\n", tot_count);
|
1998-02-20 16:36:17 +00:00
|
|
|
}
|
1996-05-01 08:39:02 +00:00
|
|
|
return (tot_count);
|
|
|
|
|
|
|
|
fail:
|
1997-03-05 08:08:48 +00:00
|
|
|
if (bootverbose)
|
|
|
|
printf("failed, using default i8254 clock of %u Hz\n",
|
|
|
|
timer_freq);
|
1996-05-01 08:39:02 +00:00
|
|
|
return (timer_freq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
set_timer_freq(u_int freq, int intr_freq)
|
|
|
|
{
|
2005-07-13 15:43:21 +00:00
|
|
|
int new_timer0_real_max_count;
|
1996-05-01 08:39:02 +00:00
|
|
|
|
2005-07-01 15:47:27 +00:00
|
|
|
i8254_timecounter.tc_frequency = freq;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1996-05-01 08:39:02 +00:00
|
|
|
timer_freq = freq;
|
2005-07-13 15:43:21 +00:00
|
|
|
if (using_lapic_timer)
|
|
|
|
new_timer0_real_max_count = 0x10000;
|
|
|
|
else
|
|
|
|
new_timer0_real_max_count = TIMER_DIV(intr_freq);
|
|
|
|
if (new_timer0_real_max_count != timer0_real_max_count) {
|
|
|
|
timer0_real_max_count = new_timer0_real_max_count;
|
|
|
|
if (timer0_real_max_count == 0x10000)
|
|
|
|
timer0_max_count = 0xffff;
|
|
|
|
else
|
|
|
|
timer0_max_count = timer0_real_max_count;
|
1998-02-13 06:33:16 +00:00
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
|
2005-07-13 15:43:21 +00:00
|
|
|
outb(TIMER_CNTR0, timer0_real_max_count & 0xff);
|
|
|
|
outb(TIMER_CNTR0, timer0_real_max_count >> 8);
|
1998-02-13 06:33:16 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1996-05-01 08:39:02 +00:00
|
|
|
}
|
|
|
|
|
2001-09-04 16:02:06 +00:00
|
|
|
static void
|
1999-10-30 14:56:01 +00:00
|
|
|
i8254_restore(void)
|
|
|
|
{
|
|
|
|
|
2005-07-13 15:43:21 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
|
|
|
|
outb(TIMER_CNTR0, timer0_real_max_count & 0xff);
|
|
|
|
outb(TIMER_CNTR0, timer0_real_max_count >> 8);
|
|
|
|
mtx_unlock_spin(&clock_lock);
|
1999-10-30 14:56:01 +00:00
|
|
|
}
|
|
|
|
|
2001-09-04 16:02:06 +00:00
|
|
|
static void
|
|
|
|
rtc_restore(void)
|
|
|
|
{
|
|
|
|
|
2002-10-17 13:55:39 +00:00
|
|
|
/* Restore all of the RTC's "status" (actually, control) registers. */
|
|
|
|
/* XXX locking is needed for RTC access. */
|
2007-03-05 09:10:17 +00:00
|
|
|
rtc_reg = -1;
|
2002-10-17 13:55:39 +00:00
|
|
|
writertc(RTC_STATUSB, RTCSB_24HR);
|
2002-09-18 07:34:04 +00:00
|
|
|
writertc(RTC_STATUSA, rtc_statusa);
|
2001-09-04 16:02:06 +00:00
|
|
|
writertc(RTC_STATUSB, rtc_statusb);
|
2005-02-03 19:06:03 +00:00
|
|
|
rtcin(RTC_INTR);
|
2001-09-04 16:02:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-10-17 13:55:39 +00:00
|
|
|
* Restore all the timers non-atomically (XXX: should be atomically).
|
|
|
|
*
|
|
|
|
* This function is called from pmtimer_resume() to restore all the timers.
|
|
|
|
* This should not be necessary, but there are broken laptops that do not
|
|
|
|
* restore all the timers on resume.
|
2001-09-04 16:02:06 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
timer_restore(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
i8254_restore(); /* restore timer_freq and hz */
|
|
|
|
rtc_restore(); /* reenable RTC interrupts */
|
|
|
|
}
|
|
|
|
|
2007-01-23 08:01:20 +00:00
|
|
|
/* This is separate from startrtclock() so that it can be called early. */
|
|
|
|
void
|
|
|
|
i8254_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_NOPROFILE);
|
|
|
|
set_timer_freq(timer_freq, hz);
|
|
|
|
}
|
|
|
|
|
1993-11-25 01:38:01 +00:00
|
|
|
void
|
1995-05-30 08:16:23 +00:00
|
|
|
startrtclock()
|
1993-11-25 01:38:01 +00:00
|
|
|
{
|
1996-05-01 08:39:02 +00:00
|
|
|
u_int delta, freq;
|
|
|
|
|
|
|
|
writertc(RTC_STATUSA, rtc_statusa);
|
|
|
|
writertc(RTC_STATUSB, RTCSB_24HR);
|
|
|
|
|
|
|
|
freq = calibrate_clocks();
|
|
|
|
#ifdef CLK_CALIBRATION_LOOP
|
|
|
|
if (bootverbose) {
|
|
|
|
printf(
|
|
|
|
"Press a key on the console to abort clock calibration\n");
|
1996-09-14 04:27:46 +00:00
|
|
|
while (cncheckc() == -1)
|
1996-05-01 08:39:02 +00:00
|
|
|
calibrate_clocks();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the calibrated i8254 frequency if it seems reasonable.
|
|
|
|
* Otherwise use the default, and don't use the calibrated i586
|
|
|
|
* frequency.
|
|
|
|
*/
|
|
|
|
delta = freq > timer_freq ? freq - timer_freq : timer_freq - freq;
|
|
|
|
if (delta < timer_freq / 100) {
|
|
|
|
#ifndef CLK_USE_I8254_CALIBRATION
|
1996-06-11 16:11:27 +00:00
|
|
|
if (bootverbose)
|
1996-10-25 13:46:21 +00:00
|
|
|
printf(
|
1996-05-01 08:39:02 +00:00
|
|
|
"CLK_USE_I8254_CALIBRATION not specified - using default frequency\n");
|
|
|
|
freq = timer_freq;
|
|
|
|
#endif
|
|
|
|
timer_freq = freq;
|
|
|
|
} else {
|
1997-03-05 08:08:48 +00:00
|
|
|
if (bootverbose)
|
|
|
|
printf(
|
|
|
|
"%d Hz differs from default of %d Hz by more than 1%%\n",
|
|
|
|
freq, timer_freq);
|
1996-05-01 08:39:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
set_timer_freq(timer_freq, hz);
|
2000-03-20 14:09:06 +00:00
|
|
|
tc_init(&i8254_timecounter);
|
1996-05-01 08:39:02 +00:00
|
|
|
|
2003-02-11 11:43:25 +00:00
|
|
|
init_TSC();
|
1993-06-12 14:58:17 +00:00
|
|
|
}
|
|
|
|
|
1994-09-20 00:31:07 +00:00
|
|
|
/*
|
1998-12-14 13:30:29 +00:00
|
|
|
* Initialize the time of day register, based on the time base which is, e.g.
|
|
|
|
* from a filesystem.
|
1994-09-20 00:31:07 +00:00
|
|
|
*/
|
|
|
|
void
|
1994-09-29 08:24:45 +00:00
|
|
|
inittodr(time_t base)
|
1993-06-12 14:58:17 +00:00
|
|
|
{
|
2006-10-02 16:18:40 +00:00
|
|
|
int s;
|
1998-02-20 16:36:17 +00:00
|
|
|
struct timespec ts;
|
2006-10-02 16:18:40 +00:00
|
|
|
struct clocktime ct;
|
1994-09-20 00:31:07 +00:00
|
|
|
|
1998-01-28 10:41:33 +00:00
|
|
|
if (base) {
|
|
|
|
s = splclock();
|
1998-02-20 16:36:17 +00:00
|
|
|
ts.tv_sec = base;
|
|
|
|
ts.tv_nsec = 0;
|
2000-03-20 14:09:06 +00:00
|
|
|
tc_setclock(&ts);
|
1998-01-28 10:41:33 +00:00
|
|
|
splx(s);
|
|
|
|
}
|
1994-09-20 00:31:07 +00:00
|
|
|
|
1998-12-14 13:30:29 +00:00
|
|
|
/* Look if we have a RTC present and the time is valid */
|
2006-10-02 16:18:40 +00:00
|
|
|
if (!(rtcin(RTC_STATUSD) & RTCSD_PWR)) {
|
2007-07-23 09:42:32 +00:00
|
|
|
printf("Invalid time in clock: check and reset the date!\n");
|
2006-10-02 16:18:40 +00:00
|
|
|
return;
|
|
|
|
}
|
1994-09-20 00:31:07 +00:00
|
|
|
|
1998-12-14 13:30:29 +00:00
|
|
|
/* wait for time update to complete */
|
|
|
|
/* If RTCSA_TUP is zero, we have at least 244us before next update */
|
1999-12-25 15:30:31 +00:00
|
|
|
s = splhigh();
|
|
|
|
while (rtcin(RTC_STATUSA) & RTCSA_TUP) {
|
|
|
|
splx(s);
|
|
|
|
s = splhigh();
|
|
|
|
}
|
2006-10-02 16:18:40 +00:00
|
|
|
ct.nsec = 0;
|
|
|
|
ct.sec = readrtc(RTC_SEC);
|
|
|
|
ct.min = readrtc(RTC_MIN);
|
|
|
|
ct.hour = readrtc(RTC_HRS);
|
|
|
|
ct.day = readrtc(RTC_DAY);
|
|
|
|
ct.dow = readrtc(RTC_WDAY) - 1;
|
|
|
|
ct.mon = readrtc(RTC_MONTH);
|
|
|
|
ct.year = readrtc(RTC_YEAR);
|
1994-10-04 13:59:44 +00:00
|
|
|
#ifdef USE_RTC_CENTURY
|
2006-10-02 16:18:40 +00:00
|
|
|
ct.year += readrtc(RTC_CENTURY) * 100;
|
1994-10-04 13:59:44 +00:00
|
|
|
#else
|
2006-10-02 16:18:40 +00:00
|
|
|
ct.year += 2000;
|
1994-10-04 13:59:44 +00:00
|
|
|
#endif
|
2007-07-27 09:34:42 +00:00
|
|
|
/* Set dow = -1 because some clocks don't set it correctly. */
|
|
|
|
ct.dow = -1;
|
2007-07-23 09:42:32 +00:00
|
|
|
if (clock_ct_to_ts(&ct, &ts)) {
|
|
|
|
printf("Invalid time in clock: check and reset the date!\n");
|
|
|
|
return;
|
|
|
|
}
|
2006-10-02 16:18:40 +00:00
|
|
|
ts.tv_sec += utc_offset();
|
|
|
|
tc_setclock(&ts);
|
1993-06-12 14:58:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1998-12-14 13:30:29 +00:00
|
|
|
* Write system time back to RTC
|
1993-06-12 14:58:17 +00:00
|
|
|
*/
|
1994-11-05 23:55:07 +00:00
|
|
|
void
|
|
|
|
resettodr()
|
1993-06-12 14:58:17 +00:00
|
|
|
{
|
2006-10-02 16:18:40 +00:00
|
|
|
struct timespec ts;
|
|
|
|
struct clocktime ct;
|
1994-09-20 00:31:07 +00:00
|
|
|
|
1994-10-04 18:39:10 +00:00
|
|
|
if (disable_rtc_set)
|
|
|
|
return;
|
|
|
|
|
2006-10-02 16:18:40 +00:00
|
|
|
getnanotime(&ts);
|
|
|
|
ts.tv_sec -= utc_offset();
|
|
|
|
clock_ts_to_ct(&ts, &ct);
|
1994-09-20 00:31:07 +00:00
|
|
|
|
1994-12-30 12:43:35 +00:00
|
|
|
/* Disable RTC updates and interrupts. */
|
1994-09-20 00:31:07 +00:00
|
|
|
writertc(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR);
|
|
|
|
|
2006-10-02 16:18:40 +00:00
|
|
|
writertc(RTC_SEC, bin2bcd(ct.sec)); /* Write back Seconds */
|
|
|
|
writertc(RTC_MIN, bin2bcd(ct.min)); /* Write back Minutes */
|
|
|
|
writertc(RTC_HRS, bin2bcd(ct.hour)); /* Write back Hours */
|
1994-09-20 00:31:07 +00:00
|
|
|
|
2006-10-02 16:18:40 +00:00
|
|
|
writertc(RTC_WDAY, ct.dow + 1); /* Write back Weekday */
|
|
|
|
writertc(RTC_DAY, bin2bcd(ct.day)); /* Write back Day */
|
|
|
|
writertc(RTC_MONTH, bin2bcd(ct.mon)); /* Write back Month */
|
|
|
|
writertc(RTC_YEAR, bin2bcd(ct.year % 100)); /* Write back Year */
|
1996-01-16 06:35:40 +00:00
|
|
|
#ifdef USE_RTC_CENTURY
|
2006-10-02 16:18:40 +00:00
|
|
|
writertc(RTC_CENTURY, bin2bcd(ct.year / 100)); /* ... and Century */
|
1994-10-04 13:59:44 +00:00
|
|
|
#endif
|
1994-09-20 00:31:07 +00:00
|
|
|
|
1994-12-30 12:43:35 +00:00
|
|
|
/* Reenable RTC updates and interrupts. */
|
1996-04-22 19:40:28 +00:00
|
|
|
writertc(RTC_STATUSB, rtc_statusb);
|
2005-02-03 19:06:03 +00:00
|
|
|
rtcin(RTC_INTR);
|
1993-06-12 14:58:17 +00:00
|
|
|
}
|
|
|
|
|
1997-04-26 11:46:25 +00:00
|
|
|
|
1993-06-12 14:58:17 +00:00
|
|
|
/*
|
1994-12-30 12:43:35 +00:00
|
|
|
* Start both clocks running.
|
1993-06-12 14:58:17 +00:00
|
|
|
*/
|
1994-12-30 12:43:35 +00:00
|
|
|
void
|
|
|
|
cpu_initclocks()
|
1993-06-12 14:58:17 +00:00
|
|
|
{
|
1994-12-30 12:43:35 +00:00
|
|
|
int diag;
|
1993-06-12 14:58:17 +00:00
|
|
|
|
2005-02-08 20:25:07 +00:00
|
|
|
#ifdef DEV_APIC
|
|
|
|
using_lapic_timer = lapic_setup_clock();
|
|
|
|
#endif
|
2005-03-24 21:34:16 +00:00
|
|
|
/*
|
|
|
|
* If we aren't using the local APIC timer to drive the kernel
|
|
|
|
* clocks, setup the interrupt handler for the 8254 timer 0 so
|
2005-07-01 15:47:27 +00:00
|
|
|
* that it can drive hardclock(). Otherwise, change the 8254
|
|
|
|
* timecounter to user a simpler algorithm.
|
2005-03-24 21:34:16 +00:00
|
|
|
*/
|
2005-07-05 20:13:12 +00:00
|
|
|
if (!using_lapic_timer) {
|
2007-03-04 04:55:19 +00:00
|
|
|
intr_add_handler("clk", 0, (driver_filter_t *)clkintr, NULL,
|
|
|
|
NULL, INTR_TYPE_CLK, NULL);
|
2005-03-24 21:34:16 +00:00
|
|
|
i8254_intsrc = intr_lookup_source(0);
|
|
|
|
if (i8254_intsrc != NULL)
|
|
|
|
i8254_pending =
|
|
|
|
i8254_intsrc->is_pic->pic_source_pending;
|
2005-07-01 15:47:27 +00:00
|
|
|
} else {
|
|
|
|
i8254_timecounter.tc_get_timecount =
|
|
|
|
i8254_simple_get_timecount;
|
|
|
|
i8254_timecounter.tc_counter_mask = 0xffff;
|
|
|
|
set_timer_freq(timer_freq, hz);
|
2005-03-24 21:34:16 +00:00
|
|
|
}
|
1997-06-25 21:00:00 +00:00
|
|
|
|
1994-12-30 12:43:35 +00:00
|
|
|
/* Initialize RTC. */
|
|
|
|
writertc(RTC_STATUSA, rtc_statusa);
|
|
|
|
writertc(RTC_STATUSB, RTCSB_24HR);
|
1996-04-22 19:40:28 +00:00
|
|
|
|
2005-03-24 21:34:16 +00:00
|
|
|
/*
|
|
|
|
* If the separate statistics clock hasn't been explicility disabled
|
|
|
|
* and we aren't already using the local APIC timer to drive the
|
|
|
|
* kernel clocks, then setup the RTC to periodically interrupt to
|
|
|
|
* drive statclock() and profclock().
|
|
|
|
*/
|
2005-02-08 20:25:07 +00:00
|
|
|
if (!statclock_disable && !using_lapic_timer) {
|
2003-11-13 10:02:12 +00:00
|
|
|
diag = rtcin(RTC_DIAG);
|
|
|
|
if (diag != 0)
|
|
|
|
printf("RTC BIOS diagnostic error %b\n", diag, RTCDG_BITS);
|
1997-07-19 02:28:30 +00:00
|
|
|
|
2005-03-24 21:34:16 +00:00
|
|
|
/* Setting stathz to nonzero early helps avoid races. */
|
|
|
|
stathz = RTC_NOPROFRATE;
|
|
|
|
profhz = RTC_PROFRATE;
|
|
|
|
|
|
|
|
/* Enable periodic interrupts from the RTC. */
|
|
|
|
rtc_statusb |= RTCSB_PINTR;
|
2007-02-23 12:19:07 +00:00
|
|
|
intr_add_handler("rtc", 8, (driver_filter_t *)rtcintr, NULL, NULL,
|
|
|
|
INTR_TYPE_CLK, NULL);
|
2000-10-06 02:20:21 +00:00
|
|
|
|
2003-11-13 10:02:12 +00:00
|
|
|
writertc(RTC_STATUSB, rtc_statusb);
|
2005-02-03 19:06:03 +00:00
|
|
|
rtcin(RTC_INTR);
|
2003-11-13 10:02:12 +00:00
|
|
|
}
|
2000-10-06 02:20:21 +00:00
|
|
|
|
2003-08-06 15:05:27 +00:00
|
|
|
init_TSC_tc();
|
1998-03-14 03:11:50 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2003-02-03 17:53:15 +00:00
|
|
|
cpu_startprofclock(void)
|
1994-05-25 09:21:21 +00:00
|
|
|
{
|
2003-02-03 17:53:15 +00:00
|
|
|
|
2005-02-08 20:25:07 +00:00
|
|
|
if (using_lapic_timer)
|
|
|
|
return;
|
2003-02-03 17:53:15 +00:00
|
|
|
rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
|
|
|
|
writertc(RTC_STATUSA, rtc_statusa);
|
|
|
|
psdiv = pscnt = psratio;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
cpu_stopprofclock(void)
|
|
|
|
{
|
|
|
|
|
2005-02-08 20:25:07 +00:00
|
|
|
if (using_lapic_timer)
|
|
|
|
return;
|
2003-02-03 17:53:15 +00:00
|
|
|
rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
|
1994-12-30 12:43:35 +00:00
|
|
|
writertc(RTC_STATUSA, rtc_statusa);
|
2003-02-03 17:53:15 +00:00
|
|
|
psdiv = pscnt = 1;
|
1994-05-25 09:21:21 +00:00
|
|
|
}
|
1996-05-01 08:39:02 +00:00
|
|
|
|
|
|
|
static int
|
2000-07-04 11:25:35 +00:00
|
|
|
sysctl_machdep_i8254_freq(SYSCTL_HANDLER_ARGS)
|
1996-05-01 08:39:02 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
u_int freq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use `i8254' instead of `timer' in external names because `timer'
|
|
|
|
* is is too generic. Should use it everywhere.
|
|
|
|
*/
|
|
|
|
freq = timer_freq;
|
2007-06-04 18:25:08 +00:00
|
|
|
error = sysctl_handle_int(oidp, &freq, 0, req);
|
2005-07-01 15:47:27 +00:00
|
|
|
if (error == 0 && req->newptr != NULL)
|
1996-05-01 08:39:02 +00:00
|
|
|
set_timer_freq(freq, hz);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSCTL_PROC(_machdep, OID_AUTO, i8254_freq, CTLTYPE_INT | CTLFLAG_RW,
|
2002-06-22 16:30:18 +00:00
|
|
|
0, sizeof(u_int), sysctl_machdep_i8254_freq, "IU", "");
|
1996-05-01 08:39:02 +00:00
|
|
|
|
2005-07-01 15:47:27 +00:00
|
|
|
static unsigned
|
|
|
|
i8254_simple_get_timecount(struct timecounter *tc)
|
|
|
|
{
|
|
|
|
|
2005-07-13 15:43:21 +00:00
|
|
|
return (timer0_max_count - getit());
|
2005-07-01 15:47:27 +00:00
|
|
|
}
|
|
|
|
|
1998-05-28 09:30:28 +00:00
|
|
|
static unsigned
|
1998-06-07 08:40:53 +00:00
|
|
|
i8254_get_timecount(struct timecounter *tc)
|
1998-02-20 16:36:17 +00:00
|
|
|
{
|
1998-05-19 18:48:30 +00:00
|
|
|
u_int count;
|
1998-02-20 16:36:17 +00:00
|
|
|
u_int high, low;
|
2000-10-06 02:20:21 +00:00
|
|
|
u_int eflags;
|
1998-02-20 16:36:17 +00:00
|
|
|
|
2000-10-06 02:20:21 +00:00
|
|
|
eflags = read_eflags();
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1998-02-20 16:36:17 +00:00
|
|
|
|
|
|
|
/* Select timer0 and latch counter value. */
|
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
|
|
|
|
|
|
|
|
low = inb(TIMER_CNTR0);
|
|
|
|
high = inb(TIMER_CNTR0);
|
1999-05-28 14:08:59 +00:00
|
|
|
count = timer0_max_count - ((high << 8) | low);
|
|
|
|
if (count < i8254_lastcount ||
|
|
|
|
(!i8254_ticked && (clkintr_pending ||
|
2000-10-06 02:20:21 +00:00
|
|
|
((count < 20 || (!(eflags & PSL_I) && count < timer0_max_count / 2u)) &&
|
2004-04-27 20:03:26 +00:00
|
|
|
i8254_pending != NULL && i8254_pending(i8254_intsrc))))) {
|
1998-02-20 16:36:17 +00:00
|
|
|
i8254_ticked = 1;
|
1999-05-28 14:08:59 +00:00
|
|
|
i8254_offset += timer0_max_count;
|
1998-02-20 16:36:17 +00:00
|
|
|
}
|
|
|
|
i8254_lastcount = count;
|
|
|
|
count += i8254_offset;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1998-02-20 16:36:17 +00:00
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
2002-01-30 12:41:12 +00:00
|
|
|
#ifdef DEV_ISA
|
2000-06-23 07:44:33 +00:00
|
|
|
/*
|
|
|
|
* Attach to the ISA PnP descriptors for the timer and realtime clock.
|
|
|
|
*/
|
|
|
|
static struct isa_pnp_id attimer_ids[] = {
|
|
|
|
{ 0x0001d041 /* PNP0100 */, "AT timer" },
|
|
|
|
{ 0x000bd041 /* PNP0B00 */, "AT realtime clock" },
|
|
|
|
{ 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
attimer_probe(device_t dev)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
|
|
|
|
if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, attimer_ids)) <= 0)
|
|
|
|
device_quiet(dev);
|
|
|
|
return(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
attimer_attach(device_t dev)
|
|
|
|
{
|
|
|
|
return(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static device_method_t attimer_methods[] = {
|
|
|
|
/* Device interface */
|
|
|
|
DEVMETHOD(device_probe, attimer_probe),
|
|
|
|
DEVMETHOD(device_attach, attimer_attach),
|
|
|
|
DEVMETHOD(device_detach, bus_generic_detach),
|
|
|
|
DEVMETHOD(device_shutdown, bus_generic_shutdown),
|
|
|
|
DEVMETHOD(device_suspend, bus_generic_suspend), /* XXX stop statclock? */
|
|
|
|
DEVMETHOD(device_resume, bus_generic_resume), /* XXX restart statclock? */
|
|
|
|
{ 0, 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
static driver_t attimer_driver = {
|
|
|
|
"attimer",
|
|
|
|
attimer_methods,
|
|
|
|
1, /* no softc */
|
|
|
|
};
|
|
|
|
|
|
|
|
static devclass_t attimer_devclass;
|
|
|
|
|
|
|
|
DRIVER_MODULE(attimer, isa, attimer_driver, attimer_devclass, 0, 0);
|
2001-08-30 09:17:03 +00:00
|
|
|
DRIVER_MODULE(attimer, acpi, attimer_driver, attimer_devclass, 0, 0);
|
2007-06-15 22:58:14 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Linux-style /dev/nvram driver
|
|
|
|
*
|
|
|
|
* cmos ram starts at bytes 14 through 128, for a total of 114 bytes.
|
|
|
|
* bytes 16 through 31 are checksummed at byte 32.
|
|
|
|
* Unlike Linux, you have to take care of the checksums yourself.
|
|
|
|
* The driver exposes byte 14 as file offset 0.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define NVRAM_FIRST RTC_DIAG /* 14 */
|
|
|
|
#define NVRAM_LAST 128
|
|
|
|
|
|
|
|
static d_open_t nvram_open;
|
|
|
|
static d_read_t nvram_read;
|
|
|
|
static d_write_t nvram_write;
|
|
|
|
|
|
|
|
static struct cdev *nvram_dev;
|
|
|
|
|
|
|
|
static struct cdevsw nvram_cdevsw = {
|
|
|
|
.d_version = D_VERSION,
|
|
|
|
.d_flags = D_NEEDGIANT,
|
|
|
|
.d_open = nvram_open,
|
|
|
|
.d_read = nvram_read,
|
|
|
|
.d_write = nvram_write,
|
|
|
|
.d_name = "nvram",
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvram_open(struct cdev *dev __unused, int flags, int fmt __unused,
|
|
|
|
struct thread *td)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (flags & FWRITE)
|
|
|
|
error = securelevel_gt(td->td_ucred, 0);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvram_read(struct cdev *dev, struct uio *uio, int flags)
|
|
|
|
{
|
|
|
|
int nv_off;
|
|
|
|
u_char v;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
while (uio->uio_resid > 0 && error == 0) {
|
|
|
|
nv_off = uio->uio_offset + NVRAM_FIRST;
|
|
|
|
if (nv_off < NVRAM_FIRST || nv_off >= NVRAM_LAST)
|
|
|
|
return (0); /* Signal EOF */
|
|
|
|
/* Single byte at a time */
|
|
|
|
v = rtcin(nv_off);
|
|
|
|
error = uiomove(&v, 1, uio);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvram_write(struct cdev *dev, struct uio *uio, int flags)
|
|
|
|
{
|
|
|
|
int nv_off;
|
|
|
|
u_char v;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
while (uio->uio_resid > 0 && error == 0) {
|
|
|
|
nv_off = uio->uio_offset + NVRAM_FIRST;
|
|
|
|
if (nv_off < NVRAM_FIRST || nv_off >= NVRAM_LAST)
|
|
|
|
return (0); /* Signal EOF */
|
|
|
|
/* Single byte at a time */
|
|
|
|
error = uiomove(&v, 1, uio);
|
|
|
|
writertc(nv_off, v);
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nvram_modevent(module_t mod __unused, int type, void *data __unused)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case MOD_LOAD:
|
|
|
|
nvram_dev = make_dev(&nvram_cdevsw, 0,
|
|
|
|
UID_ROOT, GID_KMEM, 0640, "nvram");
|
|
|
|
break;
|
|
|
|
case MOD_UNLOAD:
|
|
|
|
case MOD_SHUTDOWN:
|
|
|
|
destroy_dev(nvram_dev);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EOPNOTSUPP);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
DEV_MODULE(nvram, nvram_modevent, NULL);
|
|
|
|
|
2002-01-30 12:41:12 +00:00
|
|
|
#endif /* DEV_ISA */
|