1996-06-14 10:04:54 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1990 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* William Jolitz and Don Ahn.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* from: @(#)clock.c 7.2 (Berkeley) 5/12/91
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1996-10-30 22:41:46 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Routines to handle clock hardware.
|
1996-06-14 10:04:54 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* inittodr, settodr and support routines written
|
|
|
|
* by Christoph Robitschko <chmr@edvz.tu-graz.ac.at>
|
|
|
|
*
|
|
|
|
* reintroduced and updated by Chris Stenton <chris@gnome.co.uk> 8/10/94
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
1996-10-29 08:37:02 +00:00
|
|
|
* modified for PC98 by Kakefuda
|
1996-06-14 10:04:54 +00:00
|
|
|
*/
|
|
|
|
|
1996-07-23 07:46:59 +00:00
|
|
|
#include "opt_clock.h"
|
2001-01-19 14:09:54 +00:00
|
|
|
#include "opt_apm.h"
|
2001-01-29 11:57:27 +00:00
|
|
|
#include "opt_mca.h"
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2000-06-28 03:17:51 +00:00
|
|
|
#include <sys/bus.h>
|
2001-04-01 06:41:47 +00:00
|
|
|
#include <sys/lock.h>
|
2000-10-20 10:19:40 +00:00
|
|
|
#include <sys/mutex.h>
|
2000-09-07 13:32:59 +00:00
|
|
|
#include <sys/proc.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
#include <sys/time.h>
|
2000-03-23 08:55:45 +00:00
|
|
|
#include <sys/timetc.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/sysctl.h>
|
1999-08-09 10:35:05 +00:00
|
|
|
#include <sys/cons.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#include <machine/clock.h>
|
|
|
|
#ifdef CLK_CALIBRATION_LOOP
|
|
|
|
#endif
|
1997-10-28 14:30:47 +00:00
|
|
|
#include <machine/cputypes.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
#include <machine/frame.h>
|
1997-08-22 08:12:14 +00:00
|
|
|
#include <machine/limits.h>
|
1997-12-29 16:15:57 +00:00
|
|
|
#include <machine/md_var.h>
|
1999-06-01 12:32:54 +00:00
|
|
|
#include <machine/psl.h>
|
1997-10-28 14:30:47 +00:00
|
|
|
#ifdef APIC_IO
|
|
|
|
#include <machine/segments.h>
|
|
|
|
#endif
|
1997-08-30 15:47:49 +00:00
|
|
|
#if defined(SMP) || defined(APIC_IO)
|
1997-06-26 14:49:25 +00:00
|
|
|
#include <machine/smp.h>
|
1997-08-30 15:47:49 +00:00
|
|
|
#endif /* SMP || APIC_IO */
|
1997-12-29 16:15:57 +00:00
|
|
|
#include <machine/specialreg.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1996-09-07 02:14:47 +00:00
|
|
|
#include <i386/isa/icu.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
|
|
|
#include <pc98/pc98/pc98.h>
|
1997-02-07 19:07:43 +00:00
|
|
|
#include <pc98/pc98/pc98_machdep.h>
|
1996-09-03 10:24:29 +00:00
|
|
|
#include <i386/isa/isa_device.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
#else
|
|
|
|
#include <i386/isa/isa.h>
|
1999-06-01 12:32:54 +00:00
|
|
|
#include <isa/rtc.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
2000-06-28 03:17:51 +00:00
|
|
|
#include <isa/isavar.h>
|
1996-10-30 22:41:46 +00:00
|
|
|
#include <i386/isa/timerreg.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1999-04-21 12:14:37 +00:00
|
|
|
#include <i386/isa/intr_machdep.h>
|
1997-08-21 08:23:52 +00:00
|
|
|
|
2001-01-29 11:57:27 +00:00
|
|
|
#ifdef DEV_MCA
|
1999-09-04 01:39:52 +00:00
|
|
|
#include <i386/isa/mca_machdep.h>
|
|
|
|
#endif
|
|
|
|
|
1998-03-15 13:35:42 +00:00
|
|
|
#ifdef APIC_IO
|
|
|
|
#include <i386/isa/intr_machdep.h>
|
1998-03-07 15:43:43 +00:00
|
|
|
/* The interrupt triggered by the 8254 (timer) chip */
|
|
|
|
int apic_8254_intr;
|
1998-03-15 13:35:42 +00:00
|
|
|
static u_long read_intr_count __P((int vec));
|
|
|
|
static void setup_8254_mixed_mode __P((void));
|
|
|
|
#endif
|
1997-08-30 15:47:49 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* 32-bit time_t's can't reach leap years before 1904 or after 2036, so we
|
|
|
|
* can use a simple formula for leap years.
|
|
|
|
*/
|
|
|
|
#define LEAPYEAR(y) ((u_int)(y) % 4 == 0)
|
|
|
|
#define DAYSPERYEAR (31+28+31+30+31+30+31+31+30+31+30+31)
|
|
|
|
|
|
|
|
#define TIMER_DIV(x) ((timer_freq + (x) / 2) / (x))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Time in timer cycles that it takes for microtime() to disable interrupts
|
|
|
|
* and latch the count. microtime() currently uses "cli; outb ..." so it
|
|
|
|
* normally takes less than 2 timer cycles. Add a few for cache misses.
|
|
|
|
* Add a few more to allow for latency in bogus calls to microtime() with
|
|
|
|
* interrupts already disabled.
|
|
|
|
*/
|
|
|
|
#define TIMER0_LATCH_COUNT 20
|
|
|
|
|
|
|
|
/*
|
1996-07-23 07:46:59 +00:00
|
|
|
* Maximum frequency that we are willing to allow for timer0. Must be
|
|
|
|
* low enough to guarantee that the timer interrupt handler returns
|
1998-02-21 15:52:40 +00:00
|
|
|
* before the next timer interrupt.
|
1996-06-14 10:04:54 +00:00
|
|
|
*/
|
1996-07-23 07:46:59 +00:00
|
|
|
#define TIMER0_MAX_FREQ 20000
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1998-12-17 08:54:47 +00:00
|
|
|
int adjkerntz; /* local offset from GMT in seconds */
|
1999-06-01 12:32:54 +00:00
|
|
|
int clkintr_pending;
|
1996-06-14 10:04:54 +00:00
|
|
|
int disable_rtc_set; /* disable resettodr() if != 0 */
|
|
|
|
int statclock_disable;
|
1998-02-21 15:52:40 +00:00
|
|
|
#ifndef TIMER_FREQ
|
1996-07-23 07:46:59 +00:00
|
|
|
#ifdef PC98
|
2001-09-16 05:29:27 +00:00
|
|
|
#define TIMER_FREQ 2457600
|
1996-07-23 07:46:59 +00:00
|
|
|
#else /* IBM-PC */
|
2001-09-16 05:29:27 +00:00
|
|
|
#define TIMER_FREQ 1193182
|
1996-07-23 07:46:59 +00:00
|
|
|
#endif /* PC98 */
|
|
|
|
#endif
|
1998-02-21 15:52:40 +00:00
|
|
|
u_int timer_freq = TIMER_FREQ;
|
1996-10-30 22:41:46 +00:00
|
|
|
int timer0_max_count;
|
1997-12-29 16:15:57 +00:00
|
|
|
u_int tsc_freq;
|
1999-06-01 12:32:54 +00:00
|
|
|
int tsc_is_broken;
|
2001-02-21 10:22:22 +00:00
|
|
|
u_int tsc_present;
|
1998-12-17 08:54:47 +00:00
|
|
|
int wall_cmos_clock; /* wall CMOS clock assumed if != 0 */
|
2001-01-21 07:52:20 +00:00
|
|
|
struct mtx clock_lock;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
static int beeping = 0;
|
|
|
|
static const u_char daysinmonth[] = {31,28,31,30,31,30,31,31,30,31,30,31};
|
1996-10-30 22:41:46 +00:00
|
|
|
static u_int hardclock_max_count;
|
1998-02-21 15:52:40 +00:00
|
|
|
static u_int32_t i8254_lastcount;
|
|
|
|
static u_int32_t i8254_offset;
|
|
|
|
static int i8254_ticked;
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* XXX new_function and timer_func should not handle clockframes, but
|
|
|
|
* timer_func currently needs to hold hardclock to handle the
|
1999-12-20 15:24:56 +00:00
|
|
|
* timer0_state == 0 case. We should use inthand_add()/inthand_remove()
|
1996-06-14 10:04:54 +00:00
|
|
|
* to switch between clkintr() and a slightly different timerintr().
|
|
|
|
*/
|
1996-10-30 22:41:46 +00:00
|
|
|
static void (*new_function) __P((struct clockframe *frame));
|
|
|
|
static u_int new_rate;
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifndef PC98
|
|
|
|
static u_char rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
|
|
|
|
static u_char rtc_statusb = RTCSB_24HR | RTCSB_PINTR;
|
|
|
|
#endif
|
1998-02-21 15:52:40 +00:00
|
|
|
static u_int timer0_prescaler_count;
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
/* Values for timerX_state: */
|
|
|
|
#define RELEASED 0
|
|
|
|
#define RELEASE_PENDING 1
|
|
|
|
#define ACQUIRED 2
|
|
|
|
#define ACQUIRE_PENDING 3
|
|
|
|
|
1997-01-18 10:25:04 +00:00
|
|
|
static u_char timer0_state;
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
1996-07-23 07:46:59 +00:00
|
|
|
static u_char timer1_state;
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
1996-07-23 07:46:59 +00:00
|
|
|
static u_char timer2_state;
|
1996-10-30 22:41:46 +00:00
|
|
|
static void (*timer_func) __P((struct clockframe *frame)) = hardclock;
|
1997-01-04 06:38:46 +00:00
|
|
|
#ifdef PC98
|
1997-01-10 17:11:09 +00:00
|
|
|
static void rtc_serialcombit __P((int));
|
|
|
|
static void rtc_serialcom __P((int));
|
|
|
|
static int rtc_inb __P((void));
|
|
|
|
static void rtc_outb __P((int));
|
1997-01-04 06:38:46 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1998-06-07 09:51:08 +00:00
|
|
|
static unsigned i8254_get_timecount __P((struct timecounter *tc));
|
|
|
|
static unsigned tsc_get_timecount __P((struct timecounter *tc));
|
1997-01-18 10:25:04 +00:00
|
|
|
static void set_timer_freq(u_int freq, int intr_freq);
|
1998-02-21 15:52:40 +00:00
|
|
|
|
1998-10-23 13:13:43 +00:00
|
|
|
static struct timecounter tsc_timecounter = {
|
1998-02-21 15:52:40 +00:00
|
|
|
tsc_get_timecount, /* get_timecount */
|
1998-06-08 08:56:43 +00:00
|
|
|
0, /* no poll_pps */
|
1998-05-20 13:38:42 +00:00
|
|
|
~0u, /* counter_mask */
|
1998-02-21 15:52:40 +00:00
|
|
|
0, /* frequency */
|
|
|
|
"TSC" /* name */
|
|
|
|
};
|
|
|
|
|
|
|
|
SYSCTL_OPAQUE(_debug, OID_AUTO, tsc_timecounter, CTLFLAG_RD,
|
1998-10-23 13:13:43 +00:00
|
|
|
&tsc_timecounter, sizeof(tsc_timecounter), "S,timecounter", "");
|
1998-02-21 15:52:40 +00:00
|
|
|
|
1998-10-23 13:13:43 +00:00
|
|
|
static struct timecounter i8254_timecounter = {
|
1998-02-21 15:52:40 +00:00
|
|
|
i8254_get_timecount, /* get_timecount */
|
1998-06-08 08:56:43 +00:00
|
|
|
0, /* no poll_pps */
|
1998-05-20 13:38:42 +00:00
|
|
|
~0u, /* counter_mask */
|
1998-02-21 15:52:40 +00:00
|
|
|
0, /* frequency */
|
|
|
|
"i8254" /* name */
|
|
|
|
};
|
|
|
|
|
|
|
|
SYSCTL_OPAQUE(_debug, OID_AUTO, i8254_timecounter, CTLFLAG_RD,
|
1998-10-23 13:13:43 +00:00
|
|
|
&i8254_timecounter, sizeof(i8254_timecounter), "S,timecounter", "");
|
1996-08-30 10:43:14 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
static void
|
|
|
|
clkintr(struct clockframe frame)
|
|
|
|
{
|
2000-09-07 13:32:59 +00:00
|
|
|
|
1998-09-20 10:51:57 +00:00
|
|
|
if (timecounter->tc_get_timecount == i8254_get_timecount) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1999-06-01 12:32:54 +00:00
|
|
|
if (i8254_ticked)
|
1998-09-20 10:51:57 +00:00
|
|
|
i8254_ticked = 0;
|
1999-06-01 12:32:54 +00:00
|
|
|
else {
|
1998-09-20 10:51:57 +00:00
|
|
|
i8254_offset += timer0_max_count;
|
|
|
|
i8254_lastcount = 0;
|
|
|
|
}
|
1999-06-01 12:32:54 +00:00
|
|
|
clkintr_pending = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1998-09-20 10:51:57 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
timer_func(&frame);
|
2001-05-02 13:51:49 +00:00
|
|
|
#ifdef SMP
|
|
|
|
if (timer_func == hardclock)
|
|
|
|
forward_hardclock();
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
switch (timer0_state) {
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
case RELEASED:
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
case ACQUIRED:
|
1996-06-14 10:04:54 +00:00
|
|
|
if ((timer0_prescaler_count += timer0_max_count)
|
|
|
|
>= hardclock_max_count) {
|
1998-02-13 09:32:17 +00:00
|
|
|
timer0_prescaler_count -= hardclock_max_count;
|
1996-06-14 10:04:54 +00:00
|
|
|
hardclock(&frame);
|
2001-05-02 13:51:49 +00:00
|
|
|
#ifdef SMP
|
|
|
|
forward_hardclock();
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
break;
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
case ACQUIRE_PENDING:
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1999-07-26 12:21:09 +00:00
|
|
|
i8254_offset = i8254_get_timecount(NULL);
|
|
|
|
i8254_lastcount = 0;
|
|
|
|
timer0_max_count = TIMER_DIV(new_rate);
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
|
|
|
|
outb(TIMER_CNTR0, timer0_max_count & 0xff);
|
|
|
|
outb(TIMER_CNTR0, timer0_max_count >> 8);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
timer_func = new_function;
|
1996-07-23 07:46:59 +00:00
|
|
|
timer0_state = ACQUIRED;
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
case RELEASE_PENDING:
|
1996-06-14 10:04:54 +00:00
|
|
|
if ((timer0_prescaler_count += timer0_max_count)
|
|
|
|
>= hardclock_max_count) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1999-07-26 12:21:09 +00:00
|
|
|
i8254_offset = i8254_get_timecount(NULL);
|
|
|
|
i8254_lastcount = 0;
|
|
|
|
timer0_max_count = hardclock_max_count;
|
1998-02-13 09:32:17 +00:00
|
|
|
outb(TIMER_MODE,
|
|
|
|
TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
|
|
|
|
outb(TIMER_CNTR0, timer0_max_count & 0xff);
|
|
|
|
outb(TIMER_CNTR0, timer0_max_count >> 8);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
timer0_prescaler_count = 0;
|
1996-07-23 07:46:59 +00:00
|
|
|
timer_func = hardclock;
|
|
|
|
timer0_state = RELEASED;
|
1999-07-26 12:21:09 +00:00
|
|
|
hardclock(&frame);
|
2001-05-02 13:51:49 +00:00
|
|
|
#ifdef SMP
|
|
|
|
forward_hardclock();
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2001-01-29 11:57:27 +00:00
|
|
|
#ifdef DEV_MCA
|
1999-09-04 01:39:52 +00:00
|
|
|
/* Reset clock interrupt by asserting bit 7 of port 0x61 */
|
|
|
|
if (MCA_system)
|
|
|
|
outb(0x61, inb(0x61) | 0x80);
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
1996-07-23 07:46:59 +00:00
|
|
|
/*
|
|
|
|
* The acquire and release functions must be called at ipl >= splclock().
|
|
|
|
*/
|
1996-06-14 10:04:54 +00:00
|
|
|
int
|
|
|
|
acquire_timer0(int rate, void (*function) __P((struct clockframe *frame)))
|
|
|
|
{
|
1996-07-23 07:46:59 +00:00
|
|
|
static int old_rate;
|
|
|
|
|
|
|
|
if (rate <= 0 || rate > TIMER0_MAX_FREQ)
|
|
|
|
return (-1);
|
|
|
|
switch (timer0_state) {
|
|
|
|
|
|
|
|
case RELEASED:
|
|
|
|
timer0_state = ACQUIRE_PENDING;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RELEASE_PENDING:
|
|
|
|
if (rate != old_rate)
|
|
|
|
return (-1);
|
|
|
|
/*
|
|
|
|
* The timer has been released recently, but is being
|
|
|
|
* re-acquired before the release completed. In this
|
|
|
|
* case, we simply reclaim it as if it had not been
|
|
|
|
* released at all.
|
|
|
|
*/
|
|
|
|
timer0_state = ACQUIRED;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (-1); /* busy */
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
new_function = function;
|
1996-07-23 07:46:59 +00:00
|
|
|
old_rate = new_rate = rate;
|
|
|
|
return (0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
int
|
|
|
|
acquire_timer1(int mode)
|
|
|
|
{
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
if (timer1_state != RELEASED)
|
|
|
|
return (-1);
|
|
|
|
timer1_state = ACQUIRED;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This access to the timer registers is as atomic as possible
|
|
|
|
* because it is a single instruction. We could do better if we
|
|
|
|
* knew the rate. Use of splclock() limits glitches to 10-100us,
|
|
|
|
* and this is probably good enough for timer2, so we aren't as
|
|
|
|
* careful with it as with timer0.
|
|
|
|
*/
|
|
|
|
outb(TIMER_MODE, TIMER_SEL1 | (mode & 0x3f));
|
|
|
|
|
|
|
|
return (0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int
|
|
|
|
acquire_timer2(int mode)
|
|
|
|
{
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
if (timer2_state != RELEASED)
|
|
|
|
return (-1);
|
|
|
|
timer2_state = ACQUIRED;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This access to the timer registers is as atomic as possible
|
|
|
|
* because it is a single instruction. We could do better if we
|
|
|
|
* knew the rate. Use of splclock() limits glitches to 10-100us,
|
|
|
|
* and this is probably good enough for timer2, so we aren't as
|
|
|
|
* careful with it as with timer0.
|
|
|
|
*/
|
|
|
|
outb(TIMER_MODE, TIMER_SEL2 | (mode & 0x3f));
|
|
|
|
|
|
|
|
return (0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
release_timer0()
|
|
|
|
{
|
1996-07-23 07:46:59 +00:00
|
|
|
switch (timer0_state) {
|
|
|
|
|
|
|
|
case ACQUIRED:
|
|
|
|
timer0_state = RELEASE_PENDING;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case ACQUIRE_PENDING:
|
|
|
|
/* Nothing happened yet, release quickly. */
|
|
|
|
timer0_state = RELEASED;
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
return (0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
int
|
|
|
|
release_timer1()
|
|
|
|
{
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
if (timer1_state != ACQUIRED)
|
|
|
|
return (-1);
|
|
|
|
timer1_state = RELEASED;
|
|
|
|
outb(TIMER_MODE, TIMER_SEL1 | TIMER_SQWAVE | TIMER_16BIT);
|
|
|
|
return (0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
int
|
|
|
|
release_timer2()
|
|
|
|
{
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
if (timer2_state != ACQUIRED)
|
|
|
|
return (-1);
|
|
|
|
timer2_state = RELEASED;
|
|
|
|
outb(TIMER_MODE, TIMER_SEL2 | TIMER_SQWAVE | TIMER_16BIT);
|
|
|
|
return (0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef PC98
|
|
|
|
/*
|
|
|
|
* This routine receives statistical clock interrupts from the RTC.
|
|
|
|
* As explained above, these occur at 128 interrupts per second.
|
|
|
|
* When profiling, we receive interrupts at a rate of 1024 Hz.
|
|
|
|
*
|
|
|
|
* This does not actually add as much overhead as it sounds, because
|
|
|
|
* when the statistical clock is active, the hardclock driver no longer
|
|
|
|
* needs to keep (inaccurate) statistics on its own. This decouples
|
|
|
|
* statistics gathering from scheduling interrupts.
|
|
|
|
*
|
|
|
|
* The RTC chip requires that we read status register C (RTC_INTR)
|
|
|
|
* to acknowledge an interrupt, before it will generate the next one.
|
1997-04-07 10:53:14 +00:00
|
|
|
* Under high interrupt load, rtcintr() can be indefinitely delayed and
|
|
|
|
* the clock can tick immediately after the read from RTC_INTR. In this
|
|
|
|
* case, the mc146818A interrupt signal will not drop for long enough
|
|
|
|
* to register with the 8259 PIC. If an interrupt is missed, the stat
|
|
|
|
* clock will halt, considerably degrading system performance. This is
|
|
|
|
* why we use 'while' rather than a more straightforward 'if' below.
|
|
|
|
* Stat clock ticks can still be lost, causing minor loss of accuracy
|
|
|
|
* in the statistics, but the stat clock will no longer stop.
|
1996-06-14 10:04:54 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
rtcintr(struct clockframe frame)
|
|
|
|
{
|
2001-05-02 13:51:49 +00:00
|
|
|
while (rtcin(RTC_INTR) & RTCIR_PERIOD) {
|
1996-06-14 10:04:54 +00:00
|
|
|
statclock(&frame);
|
2001-05-02 13:51:49 +00:00
|
|
|
#ifdef SMP
|
|
|
|
forward_statclock();
|
|
|
|
#endif
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
1996-10-09 21:47:16 +00:00
|
|
|
#include "opt_ddb.h"
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef DDB
|
1996-10-09 21:47:16 +00:00
|
|
|
#include <ddb/ddb.h>
|
|
|
|
|
|
|
|
DB_SHOW_COMMAND(rtc, rtc)
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
printf("%02x/%02x/%02x %02x:%02x:%02x, A = %02x, B = %02x, C = %02x\n",
|
|
|
|
rtcin(RTC_YEAR), rtcin(RTC_MONTH), rtcin(RTC_DAY),
|
|
|
|
rtcin(RTC_HRS), rtcin(RTC_MIN), rtcin(RTC_SEC),
|
|
|
|
rtcin(RTC_STATUSA), rtcin(RTC_STATUSB), rtcin(RTC_INTR));
|
|
|
|
}
|
1996-10-09 21:47:16 +00:00
|
|
|
#endif /* DDB */
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif /* for PC98 */
|
|
|
|
|
|
|
|
static int
|
|
|
|
getit(void)
|
|
|
|
{
|
2000-10-06 11:50:19 +00:00
|
|
|
int high, low;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1996-07-23 07:46:59 +00:00
|
|
|
|
|
|
|
/* Select timer0 and latch counter value. */
|
1996-10-30 22:41:46 +00:00
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
|
1996-07-23 07:46:59 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
low = inb(TIMER_CNTR0);
|
|
|
|
high = inb(TIMER_CNTR0);
|
1996-07-23 07:46:59 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
return ((high << 8) | low);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait "n" microseconds.
|
|
|
|
* Relies on timer 1 counting down from (timer_freq / hz)
|
|
|
|
* Note: timer had better have been programmed before this is first used!
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
DELAY(int n)
|
|
|
|
{
|
1997-01-30 10:44:05 +00:00
|
|
|
int delta, prev_tick, tick, ticks_left;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#ifdef DELAYDEBUG
|
|
|
|
int getit_calls = 1;
|
|
|
|
int n1;
|
|
|
|
static int state = 0;
|
|
|
|
|
|
|
|
if (state == 0) {
|
|
|
|
state = 1;
|
|
|
|
for (n1 = 1; n1 <= 10000000; n1 *= 10)
|
|
|
|
DELAY(n1);
|
|
|
|
state = 2;
|
|
|
|
}
|
|
|
|
if (state == 1)
|
|
|
|
printf("DELAY(%d)...", n);
|
|
|
|
#endif
|
1997-01-18 10:25:04 +00:00
|
|
|
/*
|
|
|
|
* Guard against the timer being uninitialized if we are called
|
|
|
|
* early for console i/o.
|
|
|
|
*/
|
|
|
|
if (timer0_max_count == 0)
|
|
|
|
set_timer_freq(timer_freq, hz);
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Read the counter first, so that the rest of the setup overhead is
|
|
|
|
* counted. Guess the initial overhead is 20 usec (on most systems it
|
|
|
|
* takes about 1.5 usec for each of the i/o's in getit(). The loop
|
|
|
|
* takes about 6 usec on a 486/33 and 13 usec on a 386/20. The
|
|
|
|
* multiplications and divisions to scale the count take a while).
|
|
|
|
*/
|
|
|
|
prev_tick = getit();
|
1997-01-30 10:44:05 +00:00
|
|
|
n -= 0; /* XXX actually guess no initial overhead */
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Calculate (n * (timer_freq / 1e6)) without using floating point
|
|
|
|
* and without any avoidable overflows.
|
|
|
|
*/
|
1997-01-30 10:44:05 +00:00
|
|
|
if (n <= 0)
|
|
|
|
ticks_left = 0;
|
|
|
|
else if (n < 256)
|
|
|
|
/*
|
|
|
|
* Use fixed point to avoid a slow division by 1000000.
|
|
|
|
* 39099 = 1193182 * 2^15 / 10^6 rounded to nearest.
|
|
|
|
* 2^15 is the first power of 2 that gives exact results
|
|
|
|
* for n between 0 and 256.
|
|
|
|
*/
|
|
|
|
ticks_left = ((u_int)n * 39099 + (1 << 15) - 1) >> 15;
|
|
|
|
else
|
|
|
|
/*
|
|
|
|
* Don't bother using fixed point, although gcc-2.7.2
|
|
|
|
* generates particularly poor code for the long long
|
|
|
|
* division, since even the slow way will complete long
|
|
|
|
* before the delay is up (unless we're interrupted).
|
|
|
|
*/
|
|
|
|
ticks_left = ((u_int)n * (long long)timer_freq + 999999)
|
|
|
|
/ 1000000;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
while (ticks_left > 0) {
|
|
|
|
tick = getit();
|
|
|
|
#ifdef DELAYDEBUG
|
|
|
|
++getit_calls;
|
|
|
|
#endif
|
1997-01-18 10:25:04 +00:00
|
|
|
delta = prev_tick - tick;
|
1996-06-14 10:04:54 +00:00
|
|
|
prev_tick = tick;
|
1997-01-18 10:25:04 +00:00
|
|
|
if (delta < 0) {
|
|
|
|
delta += timer0_max_count;
|
|
|
|
/*
|
|
|
|
* Guard against timer0_max_count being wrong.
|
|
|
|
* This shouldn't happen in normal operation,
|
|
|
|
* but it may happen if set_timer_freq() is
|
|
|
|
* traced.
|
|
|
|
*/
|
|
|
|
if (delta < 0)
|
|
|
|
delta = 0;
|
|
|
|
}
|
|
|
|
ticks_left -= delta;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#ifdef DELAYDEBUG
|
|
|
|
if (state == 1)
|
|
|
|
printf(" %d calls to getit() at %d usec each\n",
|
|
|
|
getit_calls, (n + 5) / getit_calls);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
sysbeepstop(void *chan)
|
|
|
|
{
|
|
|
|
#ifdef PC98 /* PC98 */
|
|
|
|
outb(IO_PPI, inb(IO_PPI)|0x08); /* disable counter1 output to speaker */
|
|
|
|
release_timer1();
|
|
|
|
#else
|
|
|
|
outb(IO_PPI, inb(IO_PPI)&0xFC); /* disable counter2 output to speaker */
|
|
|
|
release_timer2();
|
|
|
|
#endif
|
|
|
|
beeping = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
sysbeep(int pitch, int period)
|
|
|
|
{
|
1996-07-23 07:46:59 +00:00
|
|
|
int x = splclock();
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
1997-01-18 10:25:04 +00:00
|
|
|
if (acquire_timer1(TIMER_SQWAVE|TIMER_16BIT))
|
1996-07-23 07:46:59 +00:00
|
|
|
if (!beeping) {
|
|
|
|
/* Something else owns it. */
|
|
|
|
splx(x);
|
|
|
|
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
disable_intr();
|
|
|
|
outb(0x3fdb, pitch);
|
|
|
|
outb(0x3fdb, (pitch>>8));
|
|
|
|
enable_intr();
|
|
|
|
if (!beeping) {
|
1996-07-23 07:46:59 +00:00
|
|
|
/* enable counter1 output to speaker */
|
|
|
|
outb(IO_PPI, (inb(IO_PPI) & 0xf7));
|
1996-06-14 10:04:54 +00:00
|
|
|
beeping = period;
|
|
|
|
timeout(sysbeepstop, (void *)NULL, period);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (acquire_timer2(TIMER_SQWAVE|TIMER_16BIT))
|
1996-07-23 07:46:59 +00:00
|
|
|
if (!beeping) {
|
|
|
|
/* Something else owns it. */
|
|
|
|
splx(x);
|
|
|
|
return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(TIMER_CNTR2, pitch);
|
|
|
|
outb(TIMER_CNTR2, (pitch>>8));
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (!beeping) {
|
1996-07-23 07:46:59 +00:00
|
|
|
/* enable counter2 output to speaker */
|
|
|
|
outb(IO_PPI, inb(IO_PPI) | 3);
|
1996-06-14 10:04:54 +00:00
|
|
|
beeping = period;
|
|
|
|
timeout(sysbeepstop, (void *)NULL, period);
|
|
|
|
}
|
|
|
|
#endif
|
1996-07-23 07:46:59 +00:00
|
|
|
splx(x);
|
|
|
|
return (0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef PC98
|
|
|
|
/*
|
|
|
|
* RTC support routines
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
rtcin(reg)
|
|
|
|
int reg;
|
|
|
|
{
|
1999-12-27 13:56:54 +00:00
|
|
|
int s;
|
1996-06-14 10:04:54 +00:00
|
|
|
u_char val;
|
|
|
|
|
1999-12-27 13:56:54 +00:00
|
|
|
s = splhigh();
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(IO_RTC, reg);
|
|
|
|
inb(0x84);
|
|
|
|
val = inb(IO_RTC + 1);
|
|
|
|
inb(0x84);
|
1999-12-27 13:56:54 +00:00
|
|
|
splx(s);
|
1996-06-14 10:04:54 +00:00
|
|
|
return (val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
|
|
|
writertc(u_char reg, u_char val)
|
|
|
|
{
|
1999-12-27 13:56:54 +00:00
|
|
|
int s;
|
|
|
|
|
|
|
|
s = splhigh();
|
1998-02-13 09:32:17 +00:00
|
|
|
inb(0x84);
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(IO_RTC, reg);
|
1998-02-13 09:32:17 +00:00
|
|
|
inb(0x84);
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(IO_RTC + 1, val);
|
1998-02-13 09:32:17 +00:00
|
|
|
inb(0x84); /* XXX work around wrong order in rtcin() */
|
1999-12-27 13:56:54 +00:00
|
|
|
splx(s);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline int
|
|
|
|
readrtc(int port)
|
|
|
|
{
|
|
|
|
return(bcd2bin(rtcin(port)));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
unsigned int delaycount;
|
|
|
|
#define FIRST_GUESS 0x2000
|
|
|
|
static void findcpuspeed(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int remainder;
|
|
|
|
|
|
|
|
/* Put counter in count down mode */
|
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_16BIT | TIMER_RATEGEN);
|
|
|
|
outb(TIMER_CNTR0, 0xff);
|
|
|
|
outb(TIMER_CNTR0, 0xff);
|
|
|
|
for (i = FIRST_GUESS; i; i--)
|
|
|
|
;
|
|
|
|
remainder = getit();
|
|
|
|
delaycount = (FIRST_GUESS * TIMER_DIV(1000)) / (0xffff - remainder);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1998-10-13 02:33:21 +00:00
|
|
|
#ifdef PC98
|
|
|
|
static u_int
|
|
|
|
calibrate_clocks(void)
|
|
|
|
{
|
|
|
|
int timeout;
|
|
|
|
u_int count, prev_count, tot_count;
|
|
|
|
u_short sec, start_sec;
|
|
|
|
|
|
|
|
if (bootverbose)
|
|
|
|
printf("Calibrating clock(s) ... ");
|
|
|
|
/* Check ARTIC. */
|
|
|
|
if (!(PC98_SYSTEM_PARAMETER(0x458) & 0x80) &&
|
|
|
|
!(PC98_SYSTEM_PARAMETER(0x45b) & 0x04))
|
|
|
|
goto fail;
|
|
|
|
timeout = 100000000;
|
|
|
|
|
|
|
|
/* Read the ARTIC. */
|
|
|
|
sec = inw(0x5e);
|
|
|
|
|
|
|
|
/* Wait for the ARTIC to changes. */
|
|
|
|
start_sec = sec;
|
|
|
|
for (;;) {
|
|
|
|
sec = inw(0x5e);
|
|
|
|
if (sec != start_sec)
|
|
|
|
break;
|
|
|
|
if (--timeout == 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
prev_count = getit();
|
|
|
|
if (prev_count == 0 || prev_count > timer0_max_count)
|
|
|
|
goto fail;
|
|
|
|
tot_count = 0;
|
|
|
|
|
|
|
|
if (tsc_present)
|
|
|
|
wrmsr(0x10, 0LL); /* XXX 0x10 is the MSR for the TSC */
|
|
|
|
start_sec = sec;
|
|
|
|
for (;;) {
|
|
|
|
sec = inw(0x5e);
|
|
|
|
count = getit();
|
|
|
|
if (count == 0 || count > timer0_max_count)
|
|
|
|
goto fail;
|
|
|
|
if (count > prev_count)
|
|
|
|
tot_count += prev_count - (count - timer0_max_count);
|
|
|
|
else
|
|
|
|
tot_count += prev_count - count;
|
|
|
|
prev_count = count;
|
|
|
|
if ((sec == start_sec + 1200) ||
|
|
|
|
(sec < start_sec &&
|
1998-10-13 03:24:01 +00:00
|
|
|
(u_int)sec + 0x10000 == (u_int)start_sec + 1200))
|
1998-10-13 02:33:21 +00:00
|
|
|
break;
|
|
|
|
if (--timeout == 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Read the cpu cycle counter. The timing considerations are
|
|
|
|
* similar to those for the i8254 clock.
|
|
|
|
*/
|
|
|
|
if (tsc_present)
|
|
|
|
tsc_freq = rdtsc();
|
|
|
|
|
|
|
|
if (bootverbose) {
|
|
|
|
if (tsc_present)
|
|
|
|
printf("TSC clock: %u Hz, ", tsc_freq);
|
|
|
|
printf("i8254 clock: %u Hz\n", tot_count);
|
|
|
|
}
|
|
|
|
return (tot_count);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
if (bootverbose)
|
|
|
|
printf("failed, using default i8254 clock of %u Hz\n",
|
|
|
|
timer_freq);
|
|
|
|
return (timer_freq);
|
|
|
|
}
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
static u_int
|
|
|
|
calibrate_clocks(void)
|
|
|
|
{
|
1999-06-24 08:32:52 +00:00
|
|
|
u_int64_t old_tsc;
|
1996-06-14 10:04:54 +00:00
|
|
|
u_int count, prev_count, tot_count;
|
|
|
|
int sec, start_sec, timeout;
|
|
|
|
|
1997-03-05 16:19:48 +00:00
|
|
|
if (bootverbose)
|
|
|
|
printf("Calibrating clock(s) ... ");
|
1996-06-14 10:04:54 +00:00
|
|
|
if (!(rtcin(RTC_STATUSD) & RTCSD_PWR))
|
|
|
|
goto fail;
|
|
|
|
timeout = 100000000;
|
|
|
|
|
|
|
|
/* Read the mc146818A seconds counter. */
|
|
|
|
for (;;) {
|
|
|
|
if (!(rtcin(RTC_STATUSA) & RTCSA_TUP)) {
|
|
|
|
sec = rtcin(RTC_SEC);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (--timeout == 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for the mC146818A seconds counter to change. */
|
|
|
|
start_sec = sec;
|
|
|
|
for (;;) {
|
|
|
|
if (!(rtcin(RTC_STATUSA) & RTCSA_TUP)) {
|
|
|
|
sec = rtcin(RTC_SEC);
|
|
|
|
if (sec != start_sec)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (--timeout == 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Start keeping track of the i8254 counter. */
|
|
|
|
prev_count = getit();
|
|
|
|
if (prev_count == 0 || prev_count > timer0_max_count)
|
|
|
|
goto fail;
|
|
|
|
tot_count = 0;
|
|
|
|
|
1997-12-29 16:15:57 +00:00
|
|
|
if (tsc_present)
|
1999-06-24 08:32:52 +00:00
|
|
|
old_tsc = rdtsc();
|
1999-06-28 13:11:16 +00:00
|
|
|
else
|
|
|
|
old_tsc = 0; /* shut up gcc */
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the mc146818A seconds counter to change. Read the i8254
|
|
|
|
* counter for each iteration since this is convenient and only
|
|
|
|
* costs a few usec of inaccuracy. The timing of the final reads
|
|
|
|
* of the counters almost matches the timing of the initial reads,
|
|
|
|
* so the main cause of inaccuracy is the varying latency from
|
|
|
|
* inside getit() or rtcin(RTC_STATUSA) to the beginning of the
|
|
|
|
* rtcin(RTC_SEC) that returns a changed seconds count. The
|
|
|
|
* maximum inaccuracy from this cause is < 10 usec on 486's.
|
|
|
|
*/
|
|
|
|
start_sec = sec;
|
|
|
|
for (;;) {
|
|
|
|
if (!(rtcin(RTC_STATUSA) & RTCSA_TUP))
|
|
|
|
sec = rtcin(RTC_SEC);
|
|
|
|
count = getit();
|
|
|
|
if (count == 0 || count > timer0_max_count)
|
|
|
|
goto fail;
|
|
|
|
if (count > prev_count)
|
|
|
|
tot_count += prev_count - (count - timer0_max_count);
|
|
|
|
else
|
|
|
|
tot_count += prev_count - count;
|
|
|
|
prev_count = count;
|
|
|
|
if (sec != start_sec)
|
|
|
|
break;
|
|
|
|
if (--timeout == 0)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the cpu cycle counter. The timing considerations are
|
|
|
|
* similar to those for the i8254 clock.
|
|
|
|
*/
|
1998-02-21 15:52:40 +00:00
|
|
|
if (tsc_present)
|
1999-06-24 08:32:52 +00:00
|
|
|
tsc_freq = rdtsc() - old_tsc;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1998-02-21 15:52:40 +00:00
|
|
|
if (bootverbose) {
|
|
|
|
if (tsc_present)
|
|
|
|
printf("TSC clock: %u Hz, ", tsc_freq);
|
1998-03-01 05:22:25 +00:00
|
|
|
printf("i8254 clock: %u Hz\n", tot_count);
|
1998-02-21 15:52:40 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
return (tot_count);
|
|
|
|
|
|
|
|
fail:
|
1997-03-05 16:19:48 +00:00
|
|
|
if (bootverbose)
|
|
|
|
printf("failed, using default i8254 clock of %u Hz\n",
|
|
|
|
timer_freq);
|
1996-06-14 10:04:54 +00:00
|
|
|
return (timer_freq);
|
|
|
|
}
|
|
|
|
#endif /* !PC98 */
|
|
|
|
|
|
|
|
static void
|
|
|
|
set_timer_freq(u_int freq, int intr_freq)
|
|
|
|
{
|
1998-02-13 09:32:17 +00:00
|
|
|
int new_timer0_max_count;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
timer_freq = freq;
|
1998-02-13 09:32:17 +00:00
|
|
|
new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq);
|
|
|
|
if (new_timer0_max_count != timer0_max_count) {
|
|
|
|
timer0_max_count = new_timer0_max_count;
|
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
|
|
|
|
outb(TIMER_CNTR0, timer0_max_count & 0xff);
|
|
|
|
outb(TIMER_CNTR0, timer0_max_count >> 8);
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
1999-11-03 08:36:17 +00:00
|
|
|
/*
|
|
|
|
* i8254_restore is called from apm_default_resume() to reload
|
|
|
|
* the countdown register.
|
|
|
|
* this should not be necessary but there are broken laptops that
|
|
|
|
* do not restore the countdown register on resume.
|
|
|
|
* when it happnes, it messes up the hardclock interval and system clock,
|
|
|
|
* which leads to the infamous "calcru: negative time" problem.
|
|
|
|
*/
|
2001-09-16 05:29:27 +00:00
|
|
|
static void
|
1999-11-03 08:36:17 +00:00
|
|
|
i8254_restore(void)
|
|
|
|
{
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1999-11-03 08:36:17 +00:00
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT);
|
|
|
|
outb(TIMER_CNTR0, timer0_max_count & 0xff);
|
|
|
|
outb(TIMER_CNTR0, timer0_max_count >> 8);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1999-11-03 08:36:17 +00:00
|
|
|
}
|
|
|
|
|
2001-09-16 05:29:27 +00:00
|
|
|
#ifndef PC98
|
|
|
|
static void
|
|
|
|
rtc_restore(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Reenable RTC updates and interrupts. */
|
|
|
|
/* XXX locking is needed for RTC access? */
|
|
|
|
writertc(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR);
|
|
|
|
writertc(RTC_STATUSB, rtc_statusb);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore all the timers atomically.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
timer_restore(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
i8254_restore(); /* restore timer_freq and hz */
|
|
|
|
#ifndef PC98
|
|
|
|
rtc_restore(); /* reenable RTC interrupts */
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
1998-02-21 15:52:40 +00:00
|
|
|
* Initialize 8254 timer 0 early so that it can be used in DELAY().
|
1996-06-14 10:04:54 +00:00
|
|
|
* XXX initialization of other timers is unintentionally left blank.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
startrtclock()
|
|
|
|
{
|
|
|
|
u_int delta, freq;
|
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
findcpuspeed();
|
|
|
|
if (pc98_machine_type & M_8M)
|
|
|
|
timer_freq = 1996800L; /* 1.9968 MHz */
|
|
|
|
else
|
|
|
|
timer_freq = 2457600L; /* 2.4576 MHz */
|
|
|
|
#endif /* PC98 */
|
|
|
|
|
1997-12-29 16:15:57 +00:00
|
|
|
if (cpu_feature & CPUID_TSC)
|
|
|
|
tsc_present = 1;
|
|
|
|
else
|
|
|
|
tsc_present = 0;
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifndef PC98
|
|
|
|
writertc(RTC_STATUSA, rtc_statusa);
|
|
|
|
writertc(RTC_STATUSB, RTCSB_24HR);
|
|
|
|
#endif
|
|
|
|
|
1996-07-23 07:46:59 +00:00
|
|
|
set_timer_freq(timer_freq, hz);
|
1996-06-14 10:04:54 +00:00
|
|
|
freq = calibrate_clocks();
|
|
|
|
#ifdef CLK_CALIBRATION_LOOP
|
|
|
|
if (bootverbose) {
|
|
|
|
printf(
|
|
|
|
"Press a key on the console to abort clock calibration\n");
|
1996-10-09 21:47:16 +00:00
|
|
|
while (cncheckc() == -1)
|
1996-06-14 10:04:54 +00:00
|
|
|
calibrate_clocks();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the calibrated i8254 frequency if it seems reasonable.
|
|
|
|
* Otherwise use the default, and don't use the calibrated i586
|
|
|
|
* frequency.
|
|
|
|
*/
|
|
|
|
delta = freq > timer_freq ? freq - timer_freq : timer_freq - freq;
|
|
|
|
if (delta < timer_freq / 100) {
|
|
|
|
#ifndef CLK_USE_I8254_CALIBRATION
|
1996-07-23 07:46:59 +00:00
|
|
|
if (bootverbose)
|
1996-10-30 22:41:46 +00:00
|
|
|
printf(
|
1996-06-14 10:04:54 +00:00
|
|
|
"CLK_USE_I8254_CALIBRATION not specified - using default frequency\n");
|
|
|
|
freq = timer_freq;
|
|
|
|
#endif
|
|
|
|
timer_freq = freq;
|
|
|
|
} else {
|
1997-03-05 16:19:48 +00:00
|
|
|
if (bootverbose)
|
|
|
|
printf(
|
|
|
|
"%d Hz differs from default of %d Hz by more than 1%%\n",
|
|
|
|
freq, timer_freq);
|
1997-12-26 20:42:37 +00:00
|
|
|
tsc_freq = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
set_timer_freq(timer_freq, hz);
|
1998-10-23 13:13:43 +00:00
|
|
|
i8254_timecounter.tc_frequency = timer_freq;
|
2000-03-23 08:55:45 +00:00
|
|
|
tc_init(&i8254_timecounter);
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1997-12-29 16:15:57 +00:00
|
|
|
#ifndef CLK_USE_TSC_CALIBRATION
|
1997-12-26 20:42:37 +00:00
|
|
|
if (tsc_freq != 0) {
|
1996-07-23 07:46:59 +00:00
|
|
|
if (bootverbose)
|
1996-10-30 22:41:46 +00:00
|
|
|
printf(
|
1997-12-29 16:15:57 +00:00
|
|
|
"CLK_USE_TSC_CALIBRATION not specified - using old calibration method\n");
|
1997-12-26 20:42:37 +00:00
|
|
|
tsc_freq = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#endif
|
1997-12-29 16:15:57 +00:00
|
|
|
if (tsc_present && tsc_freq == 0) {
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Calibration of the i586 clock relative to the mc146818A
|
|
|
|
* clock failed. Do a less accurate calibration relative
|
|
|
|
* to the i8254 clock.
|
|
|
|
*/
|
1999-06-24 08:32:52 +00:00
|
|
|
u_int64_t old_tsc = rdtsc();
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
DELAY(1000000);
|
1999-06-24 08:32:52 +00:00
|
|
|
tsc_freq = rdtsc() - old_tsc;
|
1997-12-29 16:15:57 +00:00
|
|
|
#ifdef CLK_USE_TSC_CALIBRATION
|
1997-03-05 16:19:48 +00:00
|
|
|
if (bootverbose)
|
1998-02-21 15:52:40 +00:00
|
|
|
printf("TSC clock: %u Hz (Method B)\n", tsc_freq);
|
1996-07-23 07:46:59 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1998-03-17 08:42:18 +00:00
|
|
|
|
|
|
|
#if !defined(SMP)
|
|
|
|
/*
|
|
|
|
* We can not use the TSC in SMP mode, until we figure out a
|
|
|
|
* cheap (impossible), reliable and precise (yeah right!) way
|
|
|
|
* to synchronize the TSCs of all the CPUs.
|
|
|
|
* Curse Intel for leaving the counter out of the I/O APIC.
|
|
|
|
*/
|
|
|
|
|
2001-01-19 14:09:54 +00:00
|
|
|
#ifdef DEV_APM
|
1998-03-17 08:42:18 +00:00
|
|
|
/*
|
1999-07-30 11:43:10 +00:00
|
|
|
* We can not use the TSC if we support APM. Precise timekeeping
|
|
|
|
* on an APM'ed machine is at best a fools pursuit, since
|
1998-03-17 08:42:18 +00:00
|
|
|
* any and all of the time spent in various SMM code can't
|
|
|
|
* be reliably accounted for. Reading the RTC is your only
|
|
|
|
* source of reliable time info. The i8254 looses too of course
|
|
|
|
* but we need to have some kind of time...
|
1999-07-30 11:43:10 +00:00
|
|
|
* We don't know at this point whether APM is going to be used
|
|
|
|
* or not, nor when it might be activated. Play it safe.
|
1998-03-17 08:42:18 +00:00
|
|
|
*/
|
2000-08-04 08:15:45 +00:00
|
|
|
{
|
|
|
|
int disabled = 0;
|
|
|
|
resource_int_value("apm", 0, "disabled", &disabled);
|
|
|
|
if (disabled == 0)
|
|
|
|
return;
|
|
|
|
}
|
2001-01-19 14:09:54 +00:00
|
|
|
#endif /* DEV_APM */
|
1998-03-17 08:42:18 +00:00
|
|
|
|
1999-06-01 12:32:54 +00:00
|
|
|
if (tsc_present && tsc_freq != 0 && !tsc_is_broken) {
|
1998-10-23 13:13:43 +00:00
|
|
|
tsc_timecounter.tc_frequency = tsc_freq;
|
2000-03-23 08:55:45 +00:00
|
|
|
tc_init(&tsc_timecounter);
|
1998-02-21 15:52:40 +00:00
|
|
|
}
|
1998-03-17 08:42:18 +00:00
|
|
|
|
|
|
|
#endif /* !defined(SMP) */
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef PC98
|
1997-01-10 17:11:09 +00:00
|
|
|
static void
|
1996-06-14 10:04:54 +00:00
|
|
|
rtc_serialcombit(int i)
|
|
|
|
{
|
|
|
|
outb(IO_RTC, ((i&0x01)<<5)|0x07);
|
|
|
|
DELAY(1);
|
|
|
|
outb(IO_RTC, ((i&0x01)<<5)|0x17);
|
|
|
|
DELAY(1);
|
|
|
|
outb(IO_RTC, ((i&0x01)<<5)|0x07);
|
|
|
|
DELAY(1);
|
|
|
|
}
|
|
|
|
|
1997-01-10 17:11:09 +00:00
|
|
|
static void
|
1996-06-14 10:04:54 +00:00
|
|
|
rtc_serialcom(int i)
|
|
|
|
{
|
|
|
|
rtc_serialcombit(i&0x01);
|
|
|
|
rtc_serialcombit((i&0x02)>>1);
|
|
|
|
rtc_serialcombit((i&0x04)>>2);
|
|
|
|
rtc_serialcombit((i&0x08)>>3);
|
|
|
|
outb(IO_RTC, 0x07);
|
|
|
|
DELAY(1);
|
|
|
|
outb(IO_RTC, 0x0f);
|
|
|
|
DELAY(1);
|
|
|
|
outb(IO_RTC, 0x07);
|
|
|
|
DELAY(1);
|
|
|
|
}
|
|
|
|
|
1997-01-10 17:11:09 +00:00
|
|
|
static void
|
1996-06-14 10:04:54 +00:00
|
|
|
rtc_outb(int val)
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
int sa = 0;
|
|
|
|
|
|
|
|
for (s=0;s<8;s++) {
|
|
|
|
sa = ((val >> s) & 0x01) ? 0x27 : 0x07;
|
|
|
|
outb(IO_RTC, sa); /* set DI & CLK 0 */
|
|
|
|
DELAY(1);
|
|
|
|
outb(IO_RTC, sa | 0x10); /* CLK 1 */
|
|
|
|
DELAY(1);
|
1997-01-18 10:25:04 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(IO_RTC, sa & 0xef); /* CLK 0 */
|
|
|
|
}
|
|
|
|
|
1997-01-10 17:11:09 +00:00
|
|
|
static int
|
1996-06-14 10:04:54 +00:00
|
|
|
rtc_inb(void)
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
int sa = 0;
|
|
|
|
|
|
|
|
for (s=0;s<8;s++) {
|
|
|
|
sa |= ((inb(0x33) & 0x01) << s);
|
|
|
|
outb(IO_RTC, 0x17); /* CLK 1 */
|
|
|
|
DELAY(1);
|
|
|
|
outb(IO_RTC, 0x07); /* CLK 0 */
|
|
|
|
DELAY(2);
|
1997-01-18 10:25:04 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
return sa;
|
|
|
|
}
|
|
|
|
#endif /* PC-98 */
|
|
|
|
|
|
|
|
/*
|
1998-12-17 08:54:47 +00:00
|
|
|
* Initialize the time of day register, based on the time base which is, e.g.
|
|
|
|
* from a filesystem.
|
1996-06-14 10:04:54 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
inittodr(time_t base)
|
|
|
|
{
|
|
|
|
unsigned long sec, days;
|
|
|
|
int year, month;
|
|
|
|
int y, m, s;
|
1998-02-21 15:54:23 +00:00
|
|
|
struct timespec ts;
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
|
|
|
int second, min, hour;
|
|
|
|
#endif
|
|
|
|
|
1998-01-28 12:25:06 +00:00
|
|
|
if (base) {
|
|
|
|
s = splclock();
|
1998-02-21 15:52:40 +00:00
|
|
|
ts.tv_sec = base;
|
|
|
|
ts.tv_nsec = 0;
|
2000-03-23 08:55:45 +00:00
|
|
|
tc_setclock(&ts);
|
1998-01-28 12:25:06 +00:00
|
|
|
splx(s);
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
rtc_serialcom(0x03); /* Time Read */
|
|
|
|
rtc_serialcom(0x01); /* Register shift command. */
|
|
|
|
DELAY(20);
|
|
|
|
|
|
|
|
second = bcd2bin(rtc_inb() & 0xff); /* sec */
|
|
|
|
min = bcd2bin(rtc_inb() & 0xff); /* min */
|
|
|
|
hour = bcd2bin(rtc_inb() & 0xff); /* hour */
|
|
|
|
days = bcd2bin(rtc_inb() & 0xff) - 1; /* date */
|
|
|
|
|
|
|
|
month = (rtc_inb() >> 4) & 0x0f; /* month */
|
|
|
|
for (m = 1; m < month; m++)
|
|
|
|
days += daysinmonth[m-1];
|
|
|
|
year = bcd2bin(rtc_inb() & 0xff) + 1900; /* year */
|
|
|
|
/* 2000 year problem */
|
|
|
|
if (year < 1995)
|
|
|
|
year += 100;
|
|
|
|
if (year < 1970)
|
|
|
|
goto wrong_time;
|
|
|
|
for (y = 1970; y < year; y++)
|
|
|
|
days += DAYSPERYEAR + LEAPYEAR(y);
|
|
|
|
if ((month > 2) && LEAPYEAR(year))
|
|
|
|
days ++;
|
|
|
|
sec = ((( days * 24 +
|
|
|
|
hour) * 60 +
|
|
|
|
min) * 60 +
|
|
|
|
second);
|
|
|
|
/* sec now contains the number of seconds, since Jan 1 1970,
|
|
|
|
in the local time zone */
|
2000-10-15 04:54:17 +00:00
|
|
|
|
|
|
|
s = splhigh();
|
1996-06-14 10:04:54 +00:00
|
|
|
#else /* IBM-PC */
|
1998-12-17 08:54:47 +00:00
|
|
|
/* Look if we have a RTC present and the time is valid */
|
1996-06-14 10:04:54 +00:00
|
|
|
if (!(rtcin(RTC_STATUSD) & RTCSD_PWR))
|
|
|
|
goto wrong_time;
|
|
|
|
|
1998-12-17 08:54:47 +00:00
|
|
|
/* wait for time update to complete */
|
|
|
|
/* If RTCSA_TUP is zero, we have at least 244us before next update */
|
1999-12-27 13:56:54 +00:00
|
|
|
s = splhigh();
|
|
|
|
while (rtcin(RTC_STATUSA) & RTCSA_TUP) {
|
|
|
|
splx(s);
|
|
|
|
s = splhigh();
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
days = 0;
|
|
|
|
#ifdef USE_RTC_CENTURY
|
1998-12-17 08:54:47 +00:00
|
|
|
year = readrtc(RTC_YEAR) + readrtc(RTC_CENTURY) * 100;
|
1996-06-14 10:04:54 +00:00
|
|
|
#else
|
|
|
|
year = readrtc(RTC_YEAR) + 1900;
|
|
|
|
if (year < 1970)
|
|
|
|
year += 100;
|
|
|
|
#endif
|
1999-12-27 13:56:54 +00:00
|
|
|
if (year < 1970) {
|
|
|
|
splx(s);
|
1996-06-14 10:04:54 +00:00
|
|
|
goto wrong_time;
|
1999-12-27 13:56:54 +00:00
|
|
|
}
|
1998-12-17 08:54:47 +00:00
|
|
|
month = readrtc(RTC_MONTH);
|
|
|
|
for (m = 1; m < month; m++)
|
|
|
|
days += daysinmonth[m-1];
|
|
|
|
if ((month > 2) && LEAPYEAR(year))
|
1996-06-14 10:04:54 +00:00
|
|
|
days ++;
|
1998-12-17 08:54:47 +00:00
|
|
|
days += readrtc(RTC_DAY) - 1;
|
1996-06-14 10:04:54 +00:00
|
|
|
for (y = 1970; y < year; y++)
|
1998-12-17 08:54:47 +00:00
|
|
|
days += DAYSPERYEAR + LEAPYEAR(y);
|
1996-06-14 10:04:54 +00:00
|
|
|
sec = ((( days * 24 +
|
|
|
|
readrtc(RTC_HRS)) * 60 +
|
|
|
|
readrtc(RTC_MIN)) * 60 +
|
|
|
|
readrtc(RTC_SEC));
|
1998-12-17 08:54:47 +00:00
|
|
|
/* sec now contains the number of seconds, since Jan 1 1970,
|
|
|
|
in the local time zone */
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
sec += tz.tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
|
|
|
|
|
1998-03-31 07:53:13 +00:00
|
|
|
y = time_second - sec;
|
1998-02-21 15:52:40 +00:00
|
|
|
if (y <= -2 || y >= 2) {
|
|
|
|
/* badly off, adjust it */
|
|
|
|
ts.tv_sec = sec;
|
|
|
|
ts.tv_nsec = 0;
|
2000-03-23 08:55:45 +00:00
|
|
|
tc_setclock(&ts);
|
1998-02-21 15:52:40 +00:00
|
|
|
}
|
1999-12-27 13:56:54 +00:00
|
|
|
splx(s);
|
1996-06-14 10:04:54 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
wrong_time:
|
1998-12-17 08:54:47 +00:00
|
|
|
printf("Invalid time in real time clock.\n");
|
|
|
|
printf("Check and reset the date immediately!\n");
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1998-12-17 08:54:47 +00:00
|
|
|
* Write system time back to RTC
|
1996-06-14 10:04:54 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
resettodr()
|
|
|
|
{
|
|
|
|
unsigned long tm;
|
|
|
|
int y, m, s;
|
|
|
|
#ifdef PC98
|
|
|
|
int wd;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (disable_rtc_set)
|
|
|
|
return;
|
|
|
|
|
|
|
|
s = splclock();
|
1998-03-31 07:53:13 +00:00
|
|
|
tm = time_second;
|
1996-06-14 10:04:54 +00:00
|
|
|
splx(s);
|
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
rtc_serialcom(0x01); /* Register shift command. */
|
|
|
|
|
|
|
|
/* Calculate local time to put in RTC */
|
|
|
|
|
|
|
|
tm -= tz.tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
|
|
|
|
|
|
|
|
rtc_outb(bin2bcd(tm%60)); tm /= 60; /* Write back Seconds */
|
|
|
|
rtc_outb(bin2bcd(tm%60)); tm /= 60; /* Write back Minutes */
|
|
|
|
rtc_outb(bin2bcd(tm%24)); tm /= 24; /* Write back Hours */
|
|
|
|
|
|
|
|
/* We have now the days since 01-01-1970 in tm */
|
|
|
|
wd = (tm+4)%7;
|
|
|
|
for (y = 1970, m = DAYSPERYEAR + LEAPYEAR(y);
|
|
|
|
tm >= m;
|
|
|
|
y++, m = DAYSPERYEAR + LEAPYEAR(y))
|
|
|
|
tm -= m;
|
|
|
|
|
|
|
|
/* Now we have the years in y and the day-of-the-year in tm */
|
|
|
|
for (m = 0; ; m++) {
|
|
|
|
int ml;
|
|
|
|
|
|
|
|
ml = daysinmonth[m];
|
|
|
|
if (m == 1 && LEAPYEAR(y))
|
|
|
|
ml++;
|
|
|
|
if (tm < ml)
|
|
|
|
break;
|
|
|
|
tm -= ml;
|
|
|
|
}
|
|
|
|
|
|
|
|
m++;
|
|
|
|
rtc_outb(bin2bcd(tm+1)); /* Write back Day */
|
|
|
|
rtc_outb((m << 4) | wd); /* Write back Month & Weekday */
|
|
|
|
rtc_outb(bin2bcd(y%100)); /* Write back Year */
|
|
|
|
|
|
|
|
rtc_serialcom(0x02); /* Time set & Counter hold command. */
|
|
|
|
rtc_serialcom(0x00); /* Register hold command. */
|
|
|
|
#else
|
|
|
|
/* Disable RTC updates and interrupts. */
|
|
|
|
writertc(RTC_STATUSB, RTCSB_HALT | RTCSB_24HR);
|
|
|
|
|
1998-12-17 08:54:47 +00:00
|
|
|
/* Calculate local time to put in RTC */
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
tm -= tz.tz_minuteswest * 60 + (wall_cmos_clock ? adjkerntz : 0);
|
|
|
|
|
|
|
|
writertc(RTC_SEC, bin2bcd(tm%60)); tm /= 60; /* Write back Seconds */
|
|
|
|
writertc(RTC_MIN, bin2bcd(tm%60)); tm /= 60; /* Write back Minutes */
|
|
|
|
writertc(RTC_HRS, bin2bcd(tm%24)); tm /= 24; /* Write back Hours */
|
|
|
|
|
1998-12-17 08:54:47 +00:00
|
|
|
/* We have now the days since 01-01-1970 in tm */
|
1996-06-14 10:04:54 +00:00
|
|
|
writertc(RTC_WDAY, (tm+4)%7); /* Write back Weekday */
|
|
|
|
for (y = 1970, m = DAYSPERYEAR + LEAPYEAR(y);
|
|
|
|
tm >= m;
|
|
|
|
y++, m = DAYSPERYEAR + LEAPYEAR(y))
|
|
|
|
tm -= m;
|
|
|
|
|
|
|
|
/* Now we have the years in y and the day-of-the-year in tm */
|
|
|
|
writertc(RTC_YEAR, bin2bcd(y%100)); /* Write back Year */
|
|
|
|
#ifdef USE_RTC_CENTURY
|
|
|
|
writertc(RTC_CENTURY, bin2bcd(y/100)); /* ... and Century */
|
|
|
|
#endif
|
|
|
|
for (m = 0; ; m++) {
|
|
|
|
int ml;
|
|
|
|
|
|
|
|
ml = daysinmonth[m];
|
|
|
|
if (m == 1 && LEAPYEAR(y))
|
|
|
|
ml++;
|
|
|
|
if (tm < ml)
|
|
|
|
break;
|
|
|
|
tm -= ml;
|
|
|
|
}
|
|
|
|
|
|
|
|
writertc(RTC_MONTH, bin2bcd(m + 1)); /* Write back Month */
|
|
|
|
writertc(RTC_DAY, bin2bcd(tm + 1)); /* Write back Month Day */
|
|
|
|
|
|
|
|
/* Reenable RTC updates and interrupts. */
|
|
|
|
writertc(RTC_STATUSB, rtc_statusb);
|
2000-09-07 13:32:59 +00:00
|
|
|
#endif /* PC98 */
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
1997-04-27 13:22:09 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Start both clocks running.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
cpu_initclocks()
|
|
|
|
{
|
2001-09-16 05:29:27 +00:00
|
|
|
#ifndef PC98
|
|
|
|
int diag;
|
|
|
|
#endif
|
1997-06-26 14:49:25 +00:00
|
|
|
#ifdef APIC_IO
|
2000-10-06 11:50:19 +00:00
|
|
|
int apic_8254_trial;
|
2001-02-13 10:35:15 +00:00
|
|
|
void *clkdesc;
|
1997-04-27 13:22:09 +00:00
|
|
|
#endif /* APIC_IO */
|
1996-06-14 10:04:54 +00:00
|
|
|
|
2001-09-16 05:29:27 +00:00
|
|
|
#ifndef PC98
|
1996-06-14 10:04:54 +00:00
|
|
|
if (statclock_disable) {
|
|
|
|
/*
|
|
|
|
* The stat interrupt mask is different without the
|
|
|
|
* statistics clock. Also, don't set the interrupt
|
|
|
|
* flag which would normally cause the RTC to generate
|
|
|
|
* interrupts.
|
|
|
|
*/
|
|
|
|
rtc_statusb = RTCSB_24HR;
|
|
|
|
} else {
|
|
|
|
/* Setting stathz to nonzero early helps avoid races. */
|
|
|
|
stathz = RTC_NOPROFRATE;
|
|
|
|
profhz = RTC_PROFRATE;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Finish initializing 8253 timer 0. */
|
1997-06-26 14:49:25 +00:00
|
|
|
#ifdef APIC_IO
|
1997-07-21 13:12:01 +00:00
|
|
|
|
1998-09-08 09:47:46 +00:00
|
|
|
apic_8254_intr = isa_apic_irq(0);
|
1998-03-15 13:35:42 +00:00
|
|
|
apic_8254_trial = 0;
|
|
|
|
if (apic_8254_intr >= 0 ) {
|
|
|
|
if (apic_int_type(0, 0) == 3)
|
|
|
|
apic_8254_trial = 1;
|
|
|
|
} else {
|
|
|
|
/* look for ExtInt on pin 0 */
|
|
|
|
if (apic_int_type(0, 0) == 3) {
|
2000-01-05 12:35:03 +00:00
|
|
|
apic_8254_intr = apic_irq(0, 0);
|
1998-03-15 13:35:42 +00:00
|
|
|
setup_8254_mixed_mode();
|
|
|
|
} else
|
|
|
|
panic("APIC_IO: Cannot route 8254 interrupt to CPU");
|
1997-07-21 13:12:01 +00:00
|
|
|
}
|
2000-10-06 11:50:19 +00:00
|
|
|
|
2001-02-13 10:35:15 +00:00
|
|
|
inthand_add("clk", apic_8254_intr, (driver_intr_t *)clkintr, NULL,
|
|
|
|
INTR_TYPE_CLK | INTR_FAST, &clkdesc);
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_lock_spin(&icu_lock);
|
2000-10-06 11:50:19 +00:00
|
|
|
INTREN(1 << apic_8254_intr);
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_unlock_spin(&icu_lock);
|
2000-10-06 11:50:19 +00:00
|
|
|
|
1997-07-20 11:55:52 +00:00
|
|
|
#else /* APIC_IO */
|
1997-07-26 13:52:47 +00:00
|
|
|
|
2000-09-07 13:32:59 +00:00
|
|
|
/*
|
|
|
|
* XXX Check the priority of this interrupt handler. I
|
|
|
|
* couldn't find anything suitable in the BSD/OS code (grog,
|
|
|
|
* 19 July 2000).
|
|
|
|
*/
|
2001-02-13 10:35:15 +00:00
|
|
|
inthand_add("clk", 0, (driver_intr_t *)clkintr, NULL,
|
|
|
|
INTR_TYPE_CLK | INTR_FAST, NULL);
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_lock_spin(&icu_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
INTREN(IRQ0);
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_unlock_spin(&icu_lock);
|
1997-07-26 13:52:47 +00:00
|
|
|
|
1997-04-27 13:22:09 +00:00
|
|
|
#endif /* APIC_IO */
|
1997-06-26 14:49:25 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifndef PC98
|
|
|
|
/* Initialize RTC. */
|
|
|
|
writertc(RTC_STATUSA, rtc_statusa);
|
|
|
|
writertc(RTC_STATUSB, RTCSB_24HR);
|
|
|
|
|
1996-10-23 07:25:35 +00:00
|
|
|
/* Don't bother enabling the statistics clock. */
|
2000-10-06 11:50:19 +00:00
|
|
|
if (statclock_disable)
|
1996-10-23 07:25:35 +00:00
|
|
|
return;
|
|
|
|
diag = rtcin(RTC_DIAG);
|
|
|
|
if (diag != 0)
|
|
|
|
printf("RTC BIOS diagnostic error %b\n", diag, RTCDG_BITS);
|
2000-09-07 13:32:59 +00:00
|
|
|
#endif /* !PC98 */
|
1997-07-20 11:55:52 +00:00
|
|
|
|
2000-09-07 13:32:59 +00:00
|
|
|
#ifndef PC98
|
2000-10-06 11:50:19 +00:00
|
|
|
#ifdef APIC_IO
|
1998-09-08 09:47:46 +00:00
|
|
|
if (isa_apic_irq(8) != 8)
|
1997-07-20 11:55:52 +00:00
|
|
|
panic("APIC RTC != 8");
|
2000-10-06 11:50:19 +00:00
|
|
|
#endif /* APIC_IO */
|
1997-08-21 08:23:52 +00:00
|
|
|
|
2001-02-13 10:35:15 +00:00
|
|
|
inthand_add("rtc", 8, (driver_intr_t *)rtcintr, NULL,
|
|
|
|
INTR_TYPE_CLK | INTR_FAST, NULL);
|
1997-07-20 11:55:52 +00:00
|
|
|
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_lock_spin(&icu_lock);
|
2000-10-06 11:50:19 +00:00
|
|
|
#ifdef APIC_IO
|
|
|
|
INTREN(APIC_IRQ8);
|
|
|
|
#else
|
|
|
|
INTREN(IRQ8);
|
|
|
|
#endif /* APIC_IO */
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_unlock_spin(&icu_lock);
|
2000-10-06 11:50:19 +00:00
|
|
|
|
|
|
|
writertc(RTC_STATUSB, rtc_statusb);
|
|
|
|
#endif /* PC98 */
|
|
|
|
|
|
|
|
#ifdef APIC_IO
|
|
|
|
if (apic_8254_trial) {
|
1998-03-15 13:35:42 +00:00
|
|
|
|
|
|
|
printf("APIC_IO: Testing 8254 interrupt delivery\n");
|
|
|
|
while (read_intr_count(8) < 6)
|
1998-04-06 03:38:18 +00:00
|
|
|
; /* nothing */
|
2000-10-06 11:50:19 +00:00
|
|
|
if (read_intr_count(apic_8254_intr) < 3) {
|
1998-03-15 13:35:42 +00:00
|
|
|
/*
|
|
|
|
* The MP table is broken.
|
|
|
|
* The 8254 was not connected to the specified pin
|
|
|
|
* on the IO APIC.
|
|
|
|
* Workaround: Limited variant of mixed mode.
|
|
|
|
*/
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_lock_spin(&icu_lock);
|
2000-10-06 11:50:19 +00:00
|
|
|
INTRDIS(1 << apic_8254_intr);
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_unlock_spin(&icu_lock);
|
2000-10-06 11:50:19 +00:00
|
|
|
inthand_remove(clkdesc);
|
1998-03-15 13:35:42 +00:00
|
|
|
printf("APIC_IO: Broken MP table detected: "
|
2000-01-05 12:35:03 +00:00
|
|
|
"8254 is not connected to "
|
|
|
|
"IOAPIC #%d intpin %d\n",
|
|
|
|
int_to_apicintpin[apic_8254_intr].ioapic,
|
|
|
|
int_to_apicintpin[apic_8254_intr].int_pin);
|
|
|
|
/*
|
|
|
|
* Revoke current ISA IRQ 0 assignment and
|
|
|
|
* configure a fallback interrupt routing from
|
|
|
|
* the 8254 Timer via the 8259 PIC to the
|
|
|
|
* an ExtInt interrupt line on IOAPIC #0 intpin 0.
|
|
|
|
* We reuse the low level interrupt handler number.
|
|
|
|
*/
|
|
|
|
if (apic_irq(0, 0) < 0) {
|
|
|
|
revoke_apic_irq(apic_8254_intr);
|
|
|
|
assign_apic_irq(0, 0, apic_8254_intr);
|
|
|
|
}
|
|
|
|
apic_8254_intr = apic_irq(0, 0);
|
1998-03-15 13:35:42 +00:00
|
|
|
setup_8254_mixed_mode();
|
2000-10-06 11:50:19 +00:00
|
|
|
inthand_add("clk", apic_8254_intr,
|
|
|
|
(driver_intr_t *)clkintr, NULL,
|
2001-02-13 10:35:15 +00:00
|
|
|
INTR_TYPE_CLK | INTR_FAST, NULL);
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_lock_spin(&icu_lock);
|
2000-10-06 11:50:19 +00:00
|
|
|
INTREN(1 << apic_8254_intr);
|
2001-12-22 00:38:32 +00:00
|
|
|
mtx_unlock_spin(&icu_lock);
|
1998-03-15 13:35:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2000-01-05 12:35:03 +00:00
|
|
|
if (apic_int_type(0, 0) != 3 ||
|
|
|
|
int_to_apicintpin[apic_8254_intr].ioapic != 0 ||
|
|
|
|
int_to_apicintpin[apic_8254_intr].int_pin != 0)
|
|
|
|
printf("APIC_IO: routing 8254 via IOAPIC #%d intpin %d\n",
|
|
|
|
int_to_apicintpin[apic_8254_intr].ioapic,
|
|
|
|
int_to_apicintpin[apic_8254_intr].int_pin);
|
1998-03-15 13:35:42 +00:00
|
|
|
else
|
2000-01-05 12:35:03 +00:00
|
|
|
printf("APIC_IO: "
|
|
|
|
"routing 8254 via 8259 and IOAPIC #0 intpin 0\n");
|
1998-03-15 13:35:42 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef APIC_IO
|
|
|
|
static u_long
|
|
|
|
read_intr_count(int vec)
|
|
|
|
{
|
|
|
|
u_long *up;
|
|
|
|
up = intr_countp[vec];
|
|
|
|
if (up)
|
|
|
|
return *up;
|
|
|
|
return 0UL;
|
1996-10-23 07:25:35 +00:00
|
|
|
}
|
|
|
|
|
1998-03-15 13:35:42 +00:00
|
|
|
static void
|
|
|
|
setup_8254_mixed_mode()
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Allow 8254 timer to INTerrupt 8259:
|
|
|
|
* re-initialize master 8259:
|
|
|
|
* reset; prog 4 bytes, single ICU, edge triggered
|
|
|
|
*/
|
|
|
|
outb(IO_ICU1, 0x13);
|
|
|
|
#ifdef PC98
|
|
|
|
outb(IO_ICU1 + 2, NRSVIDT); /* start vector (unused) */
|
|
|
|
outb(IO_ICU1 + 2, 0x00); /* ignore slave */
|
|
|
|
outb(IO_ICU1 + 2, 0x03); /* auto EOI, 8086 */
|
|
|
|
outb(IO_ICU1 + 2, 0xfe); /* unmask INT0 */
|
|
|
|
#else
|
|
|
|
outb(IO_ICU1 + 1, NRSVIDT); /* start vector (unused) */
|
|
|
|
outb(IO_ICU1 + 1, 0x00); /* ignore slave */
|
|
|
|
outb(IO_ICU1 + 1, 0x03); /* auto EOI, 8086 */
|
|
|
|
outb(IO_ICU1 + 1, 0xfe); /* unmask INT0 */
|
|
|
|
#endif
|
2001-09-16 05:29:27 +00:00
|
|
|
|
1998-03-15 13:35:42 +00:00
|
|
|
/* program IO APIC for type 3 INT on INT0 */
|
|
|
|
if (ext_int_setup(0, 0) < 0)
|
|
|
|
panic("8254 redirect via APIC pin0 impossible!");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1996-10-23 07:25:35 +00:00
|
|
|
void
|
|
|
|
setstatclockrate(int newhz)
|
|
|
|
{
|
|
|
|
#ifndef PC98
|
|
|
|
if (newhz == RTC_PROFRATE)
|
|
|
|
rtc_statusa = RTCSA_DIVIDER | RTCSA_PROF;
|
|
|
|
else
|
|
|
|
rtc_statusa = RTCSA_DIVIDER | RTCSA_NOPROF;
|
|
|
|
writertc(RTC_STATUSA, rtc_statusa);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
static int
|
2000-07-04 11:25:35 +00:00
|
|
|
sysctl_machdep_i8254_freq(SYSCTL_HANDLER_ARGS)
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
u_int freq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use `i8254' instead of `timer' in external names because `timer'
|
|
|
|
* is is too generic. Should use it everywhere.
|
|
|
|
*/
|
|
|
|
freq = timer_freq;
|
1999-07-26 12:21:09 +00:00
|
|
|
error = sysctl_handle_int(oidp, &freq, sizeof(freq), req);
|
1996-08-30 10:43:14 +00:00
|
|
|
if (error == 0 && req->newptr != NULL) {
|
1998-02-21 15:52:40 +00:00
|
|
|
if (timer0_state != RELEASED)
|
1996-06-14 10:04:54 +00:00
|
|
|
return (EBUSY); /* too much trouble to handle */
|
|
|
|
set_timer_freq(freq, hz);
|
1998-10-23 13:13:43 +00:00
|
|
|
i8254_timecounter.tc_frequency = freq;
|
2000-03-23 08:55:45 +00:00
|
|
|
tc_update(&i8254_timecounter);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSCTL_PROC(_machdep, OID_AUTO, i8254_freq, CTLTYPE_INT | CTLFLAG_RW,
|
1999-07-26 12:21:09 +00:00
|
|
|
0, sizeof(u_int), sysctl_machdep_i8254_freq, "I", "");
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
static int
|
2000-07-04 11:25:35 +00:00
|
|
|
sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS)
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
u_int freq;
|
|
|
|
|
1999-07-26 12:21:09 +00:00
|
|
|
if (tsc_timecounter.tc_frequency == 0)
|
1996-06-14 10:04:54 +00:00
|
|
|
return (EOPNOTSUPP);
|
1997-12-26 20:42:37 +00:00
|
|
|
freq = tsc_freq;
|
1999-07-26 12:21:09 +00:00
|
|
|
error = sysctl_handle_int(oidp, &freq, sizeof(freq), req);
|
1998-02-21 15:52:40 +00:00
|
|
|
if (error == 0 && req->newptr != NULL) {
|
|
|
|
tsc_freq = freq;
|
1998-10-23 13:13:43 +00:00
|
|
|
tsc_timecounter.tc_frequency = tsc_freq;
|
2000-03-23 08:55:45 +00:00
|
|
|
tc_update(&tsc_timecounter);
|
1998-02-21 15:52:40 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-12-29 16:15:57 +00:00
|
|
|
SYSCTL_PROC(_machdep, OID_AUTO, tsc_freq, CTLTYPE_INT | CTLFLAG_RW,
|
1999-07-26 12:21:09 +00:00
|
|
|
0, sizeof(u_int), sysctl_machdep_tsc_freq, "I", "");
|
1998-02-21 15:52:40 +00:00
|
|
|
|
1998-05-28 13:51:39 +00:00
|
|
|
static unsigned
|
1998-06-07 09:51:08 +00:00
|
|
|
i8254_get_timecount(struct timecounter *tc)
|
1998-02-21 15:52:40 +00:00
|
|
|
{
|
1998-05-20 13:38:42 +00:00
|
|
|
u_int count;
|
1998-02-21 15:52:40 +00:00
|
|
|
u_int high, low;
|
2000-10-06 11:50:19 +00:00
|
|
|
u_int eflags;
|
1998-02-21 15:52:40 +00:00
|
|
|
|
2000-10-06 11:50:19 +00:00
|
|
|
eflags = read_eflags();
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&clock_lock);
|
1998-02-21 15:52:40 +00:00
|
|
|
|
|
|
|
/* Select timer0 and latch counter value. */
|
|
|
|
outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH);
|
|
|
|
|
|
|
|
low = inb(TIMER_CNTR0);
|
|
|
|
high = inb(TIMER_CNTR0);
|
1999-06-01 12:32:54 +00:00
|
|
|
count = timer0_max_count - ((high << 8) | low);
|
|
|
|
if (count < i8254_lastcount ||
|
|
|
|
(!i8254_ticked && (clkintr_pending ||
|
2000-10-06 11:50:19 +00:00
|
|
|
((count < 20 || (!(eflags & PSL_I) && count < timer0_max_count / 2u)) &&
|
1999-06-01 12:32:54 +00:00
|
|
|
#ifdef APIC_IO
|
|
|
|
#define lapic_irr1 ((volatile u_int *)&lapic)[0x210 / 4] /* XXX XXX */
|
|
|
|
/* XXX this assumes that apic_8254_intr is < 24. */
|
|
|
|
(lapic_irr1 & (1 << apic_8254_intr))))
|
|
|
|
#else
|
|
|
|
(inb(IO_ICU1) & 1)))
|
|
|
|
#endif
|
|
|
|
)) {
|
1998-02-21 15:52:40 +00:00
|
|
|
i8254_ticked = 1;
|
1999-06-01 12:32:54 +00:00
|
|
|
i8254_offset += timer0_max_count;
|
1998-02-21 15:52:40 +00:00
|
|
|
}
|
|
|
|
i8254_lastcount = count;
|
|
|
|
count += i8254_offset;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&clock_lock);
|
1998-02-21 15:52:40 +00:00
|
|
|
return (count);
|
|
|
|
}
|
|
|
|
|
1998-05-28 13:51:39 +00:00
|
|
|
static unsigned
|
1998-06-07 09:51:08 +00:00
|
|
|
tsc_get_timecount(struct timecounter *tc)
|
1998-02-21 15:52:40 +00:00
|
|
|
{
|
1998-05-20 13:38:42 +00:00
|
|
|
return (rdtsc());
|
1998-02-21 15:52:40 +00:00
|
|
|
}
|
2000-06-28 03:17:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Attach to the ISA PnP descriptors for the timer and realtime clock.
|
|
|
|
*/
|
|
|
|
static struct isa_pnp_id attimer_ids[] = {
|
|
|
|
{ 0x0001d041 /* PNP0100 */, "AT timer" },
|
|
|
|
{ 0x000bd041 /* PNP0B00 */, "AT realtime clock" },
|
|
|
|
{ 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
static int
|
|
|
|
attimer_probe(device_t dev)
|
|
|
|
{
|
|
|
|
int result;
|
|
|
|
|
|
|
|
if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, attimer_ids)) <= 0)
|
|
|
|
device_quiet(dev);
|
|
|
|
return(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
attimer_attach(device_t dev)
|
|
|
|
{
|
|
|
|
return(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static device_method_t attimer_methods[] = {
|
|
|
|
/* Device interface */
|
|
|
|
DEVMETHOD(device_probe, attimer_probe),
|
|
|
|
DEVMETHOD(device_attach, attimer_attach),
|
|
|
|
DEVMETHOD(device_detach, bus_generic_detach),
|
|
|
|
DEVMETHOD(device_shutdown, bus_generic_shutdown),
|
|
|
|
DEVMETHOD(device_suspend, bus_generic_suspend), /* XXX stop statclock? */
|
|
|
|
DEVMETHOD(device_resume, bus_generic_resume), /* XXX restart statclock? */
|
|
|
|
{ 0, 0 }
|
|
|
|
};
|
|
|
|
|
|
|
|
static driver_t attimer_driver = {
|
|
|
|
"attimer",
|
|
|
|
attimer_methods,
|
|
|
|
1, /* no softc */
|
|
|
|
};
|
|
|
|
|
|
|
|
static devclass_t attimer_devclass;
|
|
|
|
|
|
|
|
DRIVER_MODULE(attimer, isa, attimer_driver, attimer_devclass, 0, 0);
|
2001-09-16 05:29:27 +00:00
|
|
|
#ifndef PC98
|
|
|
|
DRIVER_MODULE(attimer, acpi, attimer_driver, attimer_devclass, 0, 0);
|
|
|
|
#endif
|