1996-06-14 10:04:54 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1991 The Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1996-06-14 10:04:54 +00:00
|
|
|
* from: @(#)com.c 7.5 (Berkeley) 5/16/91
|
1999-04-19 11:11:01 +00:00
|
|
|
* from: i386/isa sio.c,v 1.234
|
1996-06-14 10:04:54 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "opt_comconsole.h"
|
1997-12-16 17:40:42 +00:00
|
|
|
#include "opt_compat.h"
|
1996-06-14 10:04:54 +00:00
|
|
|
#include "opt_ddb.h"
|
1999-05-10 09:14:40 +00:00
|
|
|
#include "opt_sio.h"
|
1997-09-20 05:28:02 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Serial driver, based on 386BSD-0.1 com driver.
|
|
|
|
* Mostly rewritten to use pseudo-DMA.
|
|
|
|
* Works for National Semiconductor NS8250-NS16550AF UARTs.
|
|
|
|
* COM driver, based on HP dca driver.
|
|
|
|
*
|
|
|
|
* Changes for PC-Card integration:
|
|
|
|
* - Added PC-Card driver table and handlers
|
|
|
|
*/
|
|
|
|
/*===============================================================
|
|
|
|
* 386BSD(98),FreeBSD-1.1x(98) com driver.
|
|
|
|
* -----
|
|
|
|
* modified for PC9801 by M.Ishii
|
|
|
|
* Kyoto University Microcomputer Club (KMC)
|
|
|
|
* Chou "TEFUTEFU" Hirotomi
|
|
|
|
* Kyoto Univ. the faculty of medicine
|
|
|
|
*===============================================================
|
|
|
|
* FreeBSD-2.0.1(98) sio driver.
|
|
|
|
* -----
|
|
|
|
* modified for pc98 Internal i8251 and MICRO CORE MC16550II
|
|
|
|
* T.Koike(hfc01340@niftyserve.or.jp)
|
|
|
|
* implement kernel device configuration
|
|
|
|
* aizu@orient.center.nitech.ac.jp
|
|
|
|
*
|
|
|
|
* Notes.
|
|
|
|
* -----
|
|
|
|
* PC98 localization based on 386BSD(98) com driver. Using its PC98 local
|
|
|
|
* functions.
|
|
|
|
* This driver is under debugging,has bugs.
|
|
|
|
*
|
|
|
|
* 1) config
|
|
|
|
* options COM_MULTIPORT #if using MC16550II
|
1998-10-22 05:58:45 +00:00
|
|
|
* device sio0 at nec? port 0x30 tty irq 4 #internal
|
|
|
|
* device sio1 at nec? port 0xd2 tty irq 5 flags 0x101 #mc1
|
|
|
|
* device sio2 at nec? port 0x8d2 tty flags 0x101 #mc2
|
1996-06-14 10:04:54 +00:00
|
|
|
* # ~~~~~iobase ~~multi port flag
|
|
|
|
* # ~ master device is sio1
|
|
|
|
* 2) device
|
|
|
|
* cd /dev; MAKEDEV ttyd0 ttyd1 ..
|
|
|
|
* 3) /etc/rc.serial
|
|
|
|
* 57600bps is too fast for sio0(internal8251)
|
|
|
|
* my ex.
|
|
|
|
* #set default speed 9600
|
|
|
|
* modem()
|
|
|
|
* :
|
|
|
|
* stty </dev/ttyid$i crtscts 9600
|
|
|
|
* : # ~~~~ default speed(can change after init.)
|
|
|
|
* modem 0 1 2
|
|
|
|
* 4) COMCONSOLE
|
|
|
|
* not changed.
|
|
|
|
* 5) PC9861K,PIO9032B,B98_01
|
|
|
|
* not tested.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* modified for AIWA B98-01
|
|
|
|
* by T.Hatanou <hatanou@yasuda.comm.waseda.ac.jp> last update: 15 Sep.1995
|
|
|
|
*
|
|
|
|
* How to configure...
|
|
|
|
* # options COM_MULTIPORT # support for MICROCORE MC16550II
|
|
|
|
* ... comment-out this line, which will conflict with B98_01.
|
|
|
|
* options "B98_01" # support for AIWA B98-01
|
1998-10-22 05:58:45 +00:00
|
|
|
* device sio1 at nec? port 0x00d1 tty irq ?
|
|
|
|
* device sio2 at nec? port 0x00d5 tty irq ?
|
1996-06-14 10:04:54 +00:00
|
|
|
* ... you can leave these lines `irq ?', irq will be autodetected.
|
|
|
|
*/
|
1999-01-03 05:03:47 +00:00
|
|
|
/*
|
|
|
|
* Modified by Y.Takahashi of Kogakuin University.
|
|
|
|
*/
|
1999-12-06 00:23:38 +00:00
|
|
|
/*
|
|
|
|
* modified for 8251(FIFO) by Seigo TANIMURA <tanimura@FreeBSD.org>
|
|
|
|
*/
|
1999-01-03 05:03:47 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2001-05-02 14:02:16 +00:00
|
|
|
#include <sys/bus.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
#include <sys/conf.h>
|
|
|
|
#include <sys/dkstat.h>
|
1997-03-23 03:49:00 +00:00
|
|
|
#include <sys/fcntl.h>
|
1998-08-13 07:36:40 +00:00
|
|
|
#include <sys/interrupt.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-05-02 14:02:16 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/module.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/reboot.h>
|
1997-06-04 10:27:53 +00:00
|
|
|
#include <sys/sysctl.h>
|
2001-05-02 14:02:16 +00:00
|
|
|
#include <sys/syslog.h>
|
|
|
|
#include <sys/tty.h>
|
1999-04-18 14:42:20 +00:00
|
|
|
#include <machine/bus.h>
|
|
|
|
#include <sys/rman.h>
|
2000-03-23 08:55:45 +00:00
|
|
|
#include <sys/timetc.h>
|
1999-04-01 13:44:15 +00:00
|
|
|
#include <sys/timepps.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
#include <isa/isavar.h>
|
|
|
|
|
2002-03-25 13:41:06 +00:00
|
|
|
#include <machine/limits.h>
|
1999-04-18 14:42:20 +00:00
|
|
|
#include <machine/resource.h>
|
|
|
|
|
2001-10-22 02:48:38 +00:00
|
|
|
#include <dev/sio/sioreg.h>
|
2001-11-26 12:29:53 +00:00
|
|
|
#include <dev/sio/siovar.h>
|
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
#include <pc98/pc98/pc98.h>
|
|
|
|
#include <pc98/pc98/pc98_machdep.h>
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#ifdef COM_ESP
|
2001-06-10 04:28:39 +00:00
|
|
|
#include <dev/ic/esp.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
2001-06-10 04:28:39 +00:00
|
|
|
#include <dev/ic/ns16550.h>
|
1999-01-03 15:57:02 +00:00
|
|
|
#ifdef PC98
|
2001-06-10 04:28:39 +00:00
|
|
|
#include <dev/ic/i8251.h>
|
|
|
|
#include <dev/ic/rsa.h>
|
1999-01-03 15:57:02 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#define LOTS_OF_EVENTS 64 /* helps separate urgent events from input */
|
|
|
|
|
|
|
|
#define CALLOUT_MASK 0x80
|
|
|
|
#define CONTROL_MASK 0x60
|
|
|
|
#define CONTROL_INIT_STATE 0x20
|
|
|
|
#define CONTROL_LOCK_STATE 0x40
|
|
|
|
#define DEV_TO_UNIT(dev) (MINOR_TO_UNIT(minor(dev)))
|
|
|
|
#define MINOR_MAGIC_MASK (CALLOUT_MASK | CONTROL_MASK)
|
|
|
|
#define MINOR_TO_UNIT(mynor) ((mynor) & ~MINOR_MAGIC_MASK)
|
|
|
|
|
|
|
|
#ifdef COM_MULTIPORT
|
|
|
|
/* checks in flags for multiport and which is multiport "master chip"
|
|
|
|
* for a given card
|
|
|
|
*/
|
1999-04-18 14:42:20 +00:00
|
|
|
#define COM_ISMULTIPORT(flags) ((flags) & 0x01)
|
|
|
|
#define COM_MPMASTER(flags) (((flags) >> 8) & 0x0ff)
|
|
|
|
#define COM_NOTAST4(flags) ((flags) & 0x04)
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif /* COM_MULTIPORT */
|
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
#define COM_CONSOLE(flags) ((flags) & 0x10)
|
|
|
|
#define COM_FORCECONSOLE(flags) ((flags) & 0x20)
|
|
|
|
#define COM_LLCONSOLE(flags) ((flags) & 0x40)
|
1999-05-09 05:00:54 +00:00
|
|
|
#define COM_DEBUGGER(flags) ((flags) & 0x80)
|
1999-04-18 14:42:20 +00:00
|
|
|
#define COM_LOSESOUTINTS(flags) ((flags) & 0x08)
|
|
|
|
#define COM_NOFIFO(flags) ((flags) & 0x02)
|
|
|
|
#define COM_ST16650A(flags) ((flags) & 0x20000)
|
|
|
|
#define COM_C_NOPROBE (0x40000)
|
|
|
|
#define COM_NOPROBE(flags) ((flags) & COM_C_NOPROBE)
|
|
|
|
#define COM_C_IIR_TXRDYBUG (0x80000)
|
|
|
|
#define COM_IIR_TXRDYBUG(flags) ((flags) & COM_C_IIR_TXRDYBUG)
|
|
|
|
#define COM_FIFOSIZE(flags) (((flags) & 0xff000000) >> 24)
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1999-01-03 15:57:02 +00:00
|
|
|
#ifdef PC98
|
|
|
|
#define com_emr com_msr /* Extension mode register for RSB-2000/3000 */
|
|
|
|
#endif
|
2000-05-12 12:38:25 +00:00
|
|
|
#define com_scr 7 /* scratch register for 16450-16550 (R/W) */
|
|
|
|
|
|
|
|
#define sio_getreg(com, off) \
|
|
|
|
(bus_space_read_1((com)->bst, (com)->bsh, (off)))
|
|
|
|
#define sio_setreg(com, off, value) \
|
|
|
|
(bus_space_write_1((com)->bst, (com)->bsh, (off), (value)))
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* com state bits.
|
|
|
|
* (CS_BUSY | CS_TTGO) and (CS_BUSY | CS_TTGO | CS_ODEVREADY) must be higher
|
|
|
|
* than the other bits so that they can be tested as a group without masking
|
|
|
|
* off the low bits.
|
|
|
|
*
|
|
|
|
* The following com and tty flags correspond closely:
|
|
|
|
* CS_BUSY = TS_BUSY (maintained by comstart(), siopoll() and
|
1999-09-25 16:21:39 +00:00
|
|
|
* comstop())
|
1996-06-14 10:04:54 +00:00
|
|
|
* CS_TTGO = ~TS_TTSTOP (maintained by comparam() and comstart())
|
|
|
|
* CS_CTS_OFLOW = CCTS_OFLOW (maintained by comparam())
|
|
|
|
* CS_RTS_IFLOW = CRTS_IFLOW (maintained by comparam())
|
|
|
|
* TS_FLUSH is not used.
|
|
|
|
* XXX I think TIOCSETA doesn't clear TS_TTSTOP when it clears IXON.
|
|
|
|
* XXX CS_*FLOW should be CF_*FLOW in com->flags (control flags not state).
|
|
|
|
*/
|
|
|
|
#define CS_BUSY 0x80 /* output in progress */
|
|
|
|
#define CS_TTGO 0x40 /* output not stopped by XOFF */
|
|
|
|
#define CS_ODEVREADY 0x20 /* external device h/w ready (CTS) */
|
|
|
|
#define CS_CHECKMSR 1 /* check of MSR scheduled */
|
|
|
|
#define CS_CTS_OFLOW 2 /* use CTS output flow control */
|
|
|
|
#define CS_DTR_OFF 0x10 /* DTR held off */
|
|
|
|
#define CS_ODONE 4 /* output completed */
|
|
|
|
#define CS_RTS_IFLOW 8 /* use RTS input flow control */
|
1997-04-19 14:54:32 +00:00
|
|
|
#define CSE_BUSYCHECK 1 /* siobusycheck() scheduled */
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
static char const * const error_desc[] = {
|
|
|
|
#define CE_OVERRUN 0
|
|
|
|
"silo overflow",
|
|
|
|
#define CE_INTERRUPT_BUF_OVERFLOW 1
|
|
|
|
"interrupt-level buffer overflow",
|
|
|
|
#define CE_TTY_BUF_OVERFLOW 2
|
|
|
|
"tty-level buffer overflow",
|
|
|
|
};
|
|
|
|
|
|
|
|
#define CE_NTYPES 3
|
|
|
|
#define CE_RECORD(com, errnum) (++(com)->delta_error_counts[errnum])
|
|
|
|
|
|
|
|
/* types. XXX - should be elsewhere */
|
|
|
|
typedef u_int Port_t; /* hardware port */
|
|
|
|
typedef u_char bool_t; /* boolean */
|
|
|
|
|
|
|
|
/* queue of linear buffers */
|
|
|
|
struct lbq {
|
|
|
|
u_char *l_head; /* next char to process */
|
|
|
|
u_char *l_tail; /* one past the last char to process */
|
|
|
|
struct lbq *l_next; /* next in queue */
|
|
|
|
bool_t l_queued; /* nonzero if queued */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* com device structure */
|
|
|
|
struct com_s {
|
1999-04-18 14:42:20 +00:00
|
|
|
u_int flags; /* Copy isa device flags */
|
1996-06-14 10:04:54 +00:00
|
|
|
u_char state; /* miscellaneous flag bits */
|
|
|
|
bool_t active_out; /* nonzero if the callout device is open */
|
|
|
|
u_char cfcr_image; /* copy of value written to CFCR */
|
|
|
|
#ifdef COM_ESP
|
|
|
|
bool_t esp; /* is this unit a hayes esp board? */
|
|
|
|
#endif
|
1997-04-19 14:54:32 +00:00
|
|
|
u_char extra_state; /* more flag bits, separate for order trick */
|
1996-06-14 10:04:54 +00:00
|
|
|
u_char fifo_image; /* copy of value written to FIFO */
|
|
|
|
bool_t hasfifo; /* nonzero for 16550 UARTs */
|
1997-07-17 10:35:43 +00:00
|
|
|
bool_t st16650a; /* Is a Startech 16650A or RTS/CTS compat */
|
1996-06-14 10:04:54 +00:00
|
|
|
bool_t loses_outints; /* nonzero if device loses output interrupts */
|
|
|
|
u_char mcr_image; /* copy of value written to MCR */
|
|
|
|
#ifdef COM_MULTIPORT
|
|
|
|
bool_t multiport; /* is this unit part of a multiport device? */
|
|
|
|
#endif /* COM_MULTIPORT */
|
|
|
|
bool_t no_irq; /* nonzero if irq is not attached */
|
|
|
|
bool_t gone; /* hardware disappeared */
|
|
|
|
bool_t poll; /* nonzero if polling is required */
|
|
|
|
bool_t poll_output; /* nonzero if polling for output is required */
|
|
|
|
int unit; /* unit number */
|
|
|
|
int dtr_wait; /* time to hold DTR down on close (* 1/hz) */
|
|
|
|
u_int tx_fifo_size;
|
|
|
|
u_int wopeners; /* # processes waiting for DCD in open() */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The high level of the driver never reads status registers directly
|
|
|
|
* because there would be too many side effects to handle conveniently.
|
|
|
|
* Instead, it reads copies of the registers stored here by the
|
|
|
|
* interrupt handler.
|
|
|
|
*/
|
|
|
|
u_char last_modem_status; /* last MSR read by intr handler */
|
|
|
|
u_char prev_modem_status; /* last MSR handled by high level */
|
|
|
|
|
|
|
|
u_char hotchar; /* ldisc-specific char to be handled ASAP */
|
|
|
|
u_char *ibuf; /* start of input buffer */
|
|
|
|
u_char *ibufend; /* end of input buffer */
|
1999-02-05 11:37:40 +00:00
|
|
|
u_char *ibufold; /* old input buffer, to be freed */
|
1996-06-14 10:04:54 +00:00
|
|
|
u_char *ihighwater; /* threshold in input buffer */
|
|
|
|
u_char *iptr; /* next free spot in input buffer */
|
1999-02-05 11:37:40 +00:00
|
|
|
int ibufsize; /* size of ibuf (not include error bytes) */
|
|
|
|
int ierroff; /* offset of error bytes in ibuf */
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
struct lbq obufq; /* head of queue of output buffers */
|
|
|
|
struct lbq obufs[2]; /* output buffers */
|
|
|
|
|
2000-05-12 12:38:25 +00:00
|
|
|
bus_space_tag_t bst;
|
|
|
|
bus_space_handle_t bsh;
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
|
|
|
Port_t cmd_port;
|
|
|
|
Port_t sts_port;
|
|
|
|
Port_t in_modem_port;
|
|
|
|
Port_t intr_ctrl_port;
|
2000-05-12 12:38:25 +00:00
|
|
|
Port_t rsabase; /* iobase address of a I/O-DATA RSA board */
|
1996-06-14 10:04:54 +00:00
|
|
|
int intr_enable;
|
|
|
|
int pc98_prev_modem_status;
|
|
|
|
int pc98_modem_delta;
|
|
|
|
int modem_car_chg_timer;
|
|
|
|
int pc98_prev_siocmd;
|
|
|
|
int pc98_prev_siomod;
|
|
|
|
int modem_checking;
|
|
|
|
int pc98_if_type;
|
1999-12-06 00:23:38 +00:00
|
|
|
|
|
|
|
bool_t pc98_8251fifo;
|
|
|
|
bool_t pc98_8251fifo_enable;
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif /* PC98 */
|
|
|
|
Port_t data_port; /* i/o ports */
|
|
|
|
#ifdef COM_ESP
|
|
|
|
Port_t esp_port;
|
|
|
|
#endif
|
|
|
|
Port_t int_id_port;
|
|
|
|
Port_t modem_ctl_port;
|
|
|
|
Port_t line_status_port;
|
|
|
|
Port_t modem_status_port;
|
1998-01-08 10:50:06 +00:00
|
|
|
Port_t intr_ctl_port; /* Ports of IIR register */
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
struct tty *tp; /* cross reference */
|
|
|
|
|
|
|
|
/* Initial state. */
|
|
|
|
struct termios it_in; /* should be in struct tty */
|
|
|
|
struct termios it_out;
|
|
|
|
|
|
|
|
/* Lock state. */
|
|
|
|
struct termios lt_in; /* should be in struct tty */
|
|
|
|
struct termios lt_out;
|
|
|
|
|
|
|
|
bool_t do_timestamp;
|
1996-07-23 07:46:59 +00:00
|
|
|
bool_t do_dcd_timestamp;
|
1996-06-14 10:04:54 +00:00
|
|
|
struct timeval timestamp;
|
1996-07-23 07:46:59 +00:00
|
|
|
struct timeval dcd_timestamp;
|
1999-04-01 13:44:15 +00:00
|
|
|
struct pps_state pps;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
u_long bytes_in; /* statistics */
|
|
|
|
u_long bytes_out;
|
|
|
|
u_int delta_error_counts[CE_NTYPES];
|
|
|
|
u_long error_counts[CE_NTYPES];
|
|
|
|
|
2002-01-31 08:26:45 +00:00
|
|
|
u_long rclk;
|
|
|
|
|
1999-09-12 13:44:54 +00:00
|
|
|
struct resource *irqres;
|
|
|
|
struct resource *ioportres;
|
1999-12-10 14:03:47 +00:00
|
|
|
void *cookie;
|
2000-11-05 14:31:19 +00:00
|
|
|
dev_t devs[6];
|
1999-09-12 13:44:54 +00:00
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
/*
|
|
|
|
* Data area for output buffers. Someday we should build the output
|
|
|
|
* buffer queue without copying data.
|
|
|
|
*/
|
1999-02-05 11:37:40 +00:00
|
|
|
#ifdef PC98
|
|
|
|
int obufsize;
|
1999-01-03 05:03:47 +00:00
|
|
|
u_char *obuf1;
|
|
|
|
u_char *obuf2;
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
u_char obuf1[256];
|
|
|
|
u_char obuf2[256];
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef COM_ESP
|
2002-03-20 12:22:31 +00:00
|
|
|
static int espattach(struct com_s *com, Port_t esp_port);
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
1999-04-18 14:42:20 +00:00
|
|
|
|
1996-12-04 04:36:59 +00:00
|
|
|
static timeout_t siobusycheck;
|
2002-03-20 12:22:31 +00:00
|
|
|
static u_int siodivisor(u_long rclk, speed_t speed);
|
1996-06-14 10:04:54 +00:00
|
|
|
static timeout_t siodtrwakeup;
|
2002-03-20 12:22:31 +00:00
|
|
|
static void comhardclose(struct com_s *com);
|
|
|
|
static void sioinput(struct com_s *com);
|
|
|
|
static void siointr1(struct com_s *com);
|
|
|
|
static void siointr(void *arg);
|
|
|
|
static int commctl(struct com_s *com, int bits, int how);
|
|
|
|
static int comparam(struct tty *tp, struct termios *t);
|
|
|
|
static void siopoll(void *);
|
|
|
|
static void siosettimeout(void);
|
|
|
|
static int siosetwater(struct com_s *com, speed_t speed);
|
|
|
|
static void comstart(struct tty *tp);
|
|
|
|
static void comstop(struct tty *tp, int rw);
|
1996-06-14 10:04:54 +00:00
|
|
|
static timeout_t comwakeup;
|
2002-03-25 12:44:03 +00:00
|
|
|
static void disc_optim(struct tty *tp, struct termios *t,
|
|
|
|
struct com_s *com);
|
1996-06-14 10:04:54 +00:00
|
|
|
|
2001-11-26 12:29:53 +00:00
|
|
|
char sio_driver_name[] = "sio";
|
2001-01-27 13:02:06 +00:00
|
|
|
static struct mtx sio_lock;
|
|
|
|
static int sio_inited;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/* table and macro for fast conversion from a unit number to its com struct */
|
2001-11-26 12:29:53 +00:00
|
|
|
devclass_t sio_devclass;
|
1999-04-18 14:42:20 +00:00
|
|
|
#define com_addr(unit) ((struct com_s *) \
|
2001-11-26 12:29:53 +00:00
|
|
|
devclass_get_softc(sio_devclass, unit)) /* XXX */
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
static d_open_t sioopen;
|
|
|
|
static d_close_t sioclose;
|
|
|
|
static d_read_t sioread;
|
|
|
|
static d_write_t siowrite;
|
|
|
|
static d_ioctl_t sioioctl;
|
|
|
|
|
1998-08-28 12:44:49 +00:00
|
|
|
#define CDEV_MAJOR 28
|
1999-05-30 16:53:49 +00:00
|
|
|
static struct cdevsw sio_cdevsw = {
|
|
|
|
/* open */ sioopen,
|
|
|
|
/* close */ sioclose,
|
|
|
|
/* read */ sioread,
|
|
|
|
/* write */ siowrite,
|
|
|
|
/* ioctl */ sioioctl,
|
1999-09-25 16:21:39 +00:00
|
|
|
/* poll */ ttypoll,
|
1999-05-30 16:53:49 +00:00
|
|
|
/* mmap */ nommap,
|
|
|
|
/* strategy */ nostrategy,
|
2001-11-26 12:29:53 +00:00
|
|
|
/* name */ sio_driver_name,
|
1999-05-30 16:53:49 +00:00
|
|
|
/* maj */ CDEV_MAJOR,
|
|
|
|
/* dump */ nodump,
|
|
|
|
/* psize */ nopsize,
|
2001-02-21 10:24:21 +00:00
|
|
|
/* flags */ D_TTY | D_KQFILTER,
|
|
|
|
/* kqfilter */ ttykqfilter,
|
1996-06-14 10:04:54 +00:00
|
|
|
};
|
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
int comconsole = -1;
|
1997-06-06 13:09:55 +00:00
|
|
|
static volatile speed_t comdefaultrate = CONSPEED;
|
2002-01-31 08:26:45 +00:00
|
|
|
static u_long comdefaultrclk = DEFAULT_RCLK;
|
|
|
|
SYSCTL_ULONG(_machdep, OID_AUTO, conrclk, CTLFLAG_RW, &comdefaultrclk, 0, "");
|
1999-04-19 11:11:01 +00:00
|
|
|
#ifdef __alpha__
|
1999-04-18 14:42:20 +00:00
|
|
|
static volatile speed_t gdbdefaultrate = CONSPEED;
|
1999-04-19 11:11:01 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
static u_int com_events; /* input chars + weighted output completions */
|
1997-04-05 15:04:32 +00:00
|
|
|
static Port_t siocniobase;
|
2000-09-22 12:56:55 +00:00
|
|
|
#ifndef __alpha__
|
1999-05-09 05:00:54 +00:00
|
|
|
static int siocnunit;
|
2000-09-22 12:56:55 +00:00
|
|
|
#endif
|
1999-04-18 14:42:20 +00:00
|
|
|
static Port_t siogdbiobase;
|
1999-05-09 05:00:54 +00:00
|
|
|
static int siogdbunit = -1;
|
2001-02-13 09:55:20 +00:00
|
|
|
static void *sio_slow_ih;
|
|
|
|
static void *sio_fast_ih;
|
1996-06-14 10:04:54 +00:00
|
|
|
static int sio_timeout;
|
|
|
|
static int sio_timeouts_until_log;
|
1997-09-22 12:23:49 +00:00
|
|
|
static struct callout_handle sio_timeout_handle
|
|
|
|
= CALLOUT_HANDLE_INITIALIZER(&sio_timeout_handle);
|
1999-11-18 12:22:09 +00:00
|
|
|
static int sio_numunits;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
struct siodev {
|
|
|
|
short if_type;
|
|
|
|
short irq;
|
|
|
|
Port_t cmd, sts, ctrl, mod;
|
1999-01-03 05:03:47 +00:00
|
|
|
};
|
1996-06-14 10:04:54 +00:00
|
|
|
static int sysclock;
|
1999-01-03 05:03:47 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#define COM_INT_DISABLE {int previpri; previpri=spltty();
|
|
|
|
#define COM_INT_ENABLE splx(previpri);}
|
|
|
|
#define IEN_TxFLAG IEN_Tx
|
|
|
|
|
|
|
|
#define COM_CARRIER_DETECT_EMULATE 0
|
|
|
|
#define PC98_CHECK_MODEM_INTERVAL (hz/10)
|
|
|
|
#define DCD_OFF_TOLERANCE 2
|
|
|
|
#define DCD_ON_RECOGNITION 2
|
1999-11-18 12:22:09 +00:00
|
|
|
#define IS_8251(if_type) (!(if_type & 0x10))
|
1999-01-03 05:03:47 +00:00
|
|
|
#define COM1_EXT_CLOCK 0x40000
|
1996-06-14 10:04:54 +00:00
|
|
|
|
2002-03-20 12:22:31 +00:00
|
|
|
static void commint(dev_t dev);
|
|
|
|
static void com_tiocm_set(struct com_s *com, int msr);
|
|
|
|
static void com_tiocm_bis(struct com_s *com, int msr);
|
|
|
|
static void com_tiocm_bic(struct com_s *com, int msr);
|
|
|
|
static int com_tiocm_get(struct com_s *com);
|
|
|
|
static int com_tiocm_get_delta(struct com_s *com);
|
|
|
|
static void pc98_msrint_start(dev_t dev);
|
|
|
|
static void com_cflag_and_speed_set(struct com_s *com, int cflag, int speed);
|
|
|
|
static int pc98_ttspeedtab(struct com_s *com, int speed, u_int *divisor);
|
|
|
|
static int pc98_get_modem_status(struct com_s *com);
|
1996-06-14 10:04:54 +00:00
|
|
|
static timeout_t pc98_check_msr;
|
2002-03-20 12:22:31 +00:00
|
|
|
static void pc98_set_baud_rate(struct com_s *com, u_int count);
|
|
|
|
static void pc98_i8251_reset(struct com_s *com, int mode, int command);
|
|
|
|
static void pc98_disable_i8251_interrupt(struct com_s *com, int mod);
|
|
|
|
static void pc98_enable_i8251_interrupt(struct com_s *com, int mod);
|
|
|
|
static int pc98_check_i8251_interrupt(struct com_s *com);
|
|
|
|
static int pc98_i8251_get_cmd(struct com_s *com);
|
|
|
|
static int pc98_i8251_get_mod(struct com_s *com);
|
|
|
|
static void pc98_i8251_set_cmd(struct com_s *com, int x);
|
|
|
|
static void pc98_i8251_or_cmd(struct com_s *com, int x);
|
|
|
|
static void pc98_i8251_clear_cmd(struct com_s *com, int x);
|
|
|
|
static void pc98_i8251_clear_or_cmd(struct com_s *com, int clr, int x);
|
|
|
|
static int pc98_check_if_type(device_t dev, struct siodev *iod);
|
|
|
|
static int pc98_check_8251vfast(void);
|
|
|
|
static int pc98_check_8251fifo(void);
|
|
|
|
static void pc98_check_sysclock(void);
|
|
|
|
static void pc98_set_ioport(struct com_s *com);
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#define com_int_Tx_disable(com) \
|
|
|
|
pc98_disable_i8251_interrupt(com,IEN_Tx|IEN_TxEMP)
|
|
|
|
#define com_int_Tx_enable(com) \
|
|
|
|
pc98_enable_i8251_interrupt(com,IEN_TxFLAG)
|
|
|
|
#define com_int_Rx_disable(com) \
|
|
|
|
pc98_disable_i8251_interrupt(com,IEN_Rx)
|
|
|
|
#define com_int_Rx_enable(com) \
|
|
|
|
pc98_enable_i8251_interrupt(com,IEN_Rx)
|
|
|
|
#define com_int_TxRx_disable(com) \
|
|
|
|
pc98_disable_i8251_interrupt(com,IEN_Tx|IEN_TxEMP|IEN_Rx)
|
|
|
|
#define com_int_TxRx_enable(com) \
|
|
|
|
pc98_enable_i8251_interrupt(com,IEN_TxFLAG|IEN_Rx)
|
|
|
|
#define com_send_break_on(com) \
|
|
|
|
pc98_i8251_or_cmd(com,CMD8251_SBRK)
|
|
|
|
#define com_send_break_off(com) \
|
|
|
|
pc98_i8251_clear_cmd(com,CMD8251_SBRK)
|
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
static struct speedtab pc98speedtab[] = { /* internal RS232C interface */
|
1999-02-02 17:26:03 +00:00
|
|
|
{ 0, 0, },
|
|
|
|
{ 50, 50, },
|
|
|
|
{ 75, 75, },
|
|
|
|
{ 150, 150, },
|
|
|
|
{ 200, 200, },
|
|
|
|
{ 300, 300, },
|
|
|
|
{ 600, 600, },
|
|
|
|
{ 1200, 1200, },
|
|
|
|
{ 2400, 2400, },
|
|
|
|
{ 4800, 4800, },
|
|
|
|
{ 9600, 9600, },
|
|
|
|
{ 19200, 19200, },
|
|
|
|
{ 38400, 38400, },
|
|
|
|
{ 51200, 51200, },
|
|
|
|
{ 76800, 76800, },
|
|
|
|
{ 20800, 20800, },
|
|
|
|
{ 31200, 31200, },
|
|
|
|
{ 41600, 41600, },
|
|
|
|
{ 62400, 62400, },
|
|
|
|
{ -1, -1 }
|
1996-06-14 10:04:54 +00:00
|
|
|
};
|
1999-01-03 05:03:47 +00:00
|
|
|
static struct speedtab pc98fast_speedtab[] = {
|
2002-01-31 08:26:45 +00:00
|
|
|
{ 9600, 0x80 | (DEFAULT_RCLK / (16 * (9600))), },
|
|
|
|
{ 19200, 0x80 | (DEFAULT_RCLK / (16 * (19200))), },
|
|
|
|
{ 38400, 0x80 | (DEFAULT_RCLK / (16 * (38400))), },
|
|
|
|
{ 57600, 0x80 | (DEFAULT_RCLK / (16 * (57600))), },
|
|
|
|
{ 115200, 0x80 | (DEFAULT_RCLK / (16 * (115200))), },
|
1999-02-02 17:26:03 +00:00
|
|
|
{ -1, -1 }
|
1999-01-03 05:03:47 +00:00
|
|
|
};
|
|
|
|
static struct speedtab comspeedtab_pio9032b[] = {
|
1999-02-02 17:26:03 +00:00
|
|
|
{ 300, 6, },
|
|
|
|
{ 600, 5, },
|
|
|
|
{ 1200, 4, },
|
|
|
|
{ 2400, 3, },
|
|
|
|
{ 4800, 2, },
|
|
|
|
{ 9600, 1, },
|
|
|
|
{ 19200, 0, },
|
|
|
|
{ 38400, 7, },
|
|
|
|
{ -1, -1 }
|
1996-06-14 10:04:54 +00:00
|
|
|
};
|
1999-01-03 05:03:47 +00:00
|
|
|
static struct speedtab comspeedtab_b98_01[] = {
|
1999-02-02 17:26:03 +00:00
|
|
|
{ 75, 11, },
|
|
|
|
{ 150, 10, },
|
|
|
|
{ 300, 9, },
|
|
|
|
{ 600, 8, },
|
|
|
|
{ 1200, 7, },
|
|
|
|
{ 2400, 6, },
|
|
|
|
{ 4800, 5, },
|
|
|
|
{ 9600, 4, },
|
|
|
|
{ 19200, 3, },
|
|
|
|
{ 38400, 2, },
|
|
|
|
{ 76800, 1, },
|
|
|
|
{ 153600, 0, },
|
|
|
|
{ -1, -1 }
|
1996-06-14 10:04:54 +00:00
|
|
|
};
|
2002-01-31 08:26:45 +00:00
|
|
|
static struct speedtab comspeedtab_ind[] = {
|
1999-02-02 17:26:03 +00:00
|
|
|
{ 300, 1536, },
|
|
|
|
{ 600, 768, },
|
|
|
|
{ 1200, 384, },
|
|
|
|
{ 2400, 192, },
|
|
|
|
{ 4800, 96, },
|
|
|
|
{ 9600, 48, },
|
|
|
|
{ 19200, 24, },
|
|
|
|
{ 38400, 12, },
|
|
|
|
{ 57600, 8, },
|
|
|
|
{ 115200, 4, },
|
|
|
|
{ 153600, 3, },
|
|
|
|
{ 230400, 2, },
|
|
|
|
{ 460800, 1, },
|
|
|
|
{ -1, -1 }
|
1999-01-03 05:03:47 +00:00
|
|
|
};
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
struct {
|
|
|
|
char *name;
|
|
|
|
short port_table[7];
|
|
|
|
short irr_mask;
|
|
|
|
struct speedtab *speedtab;
|
|
|
|
short check_irq;
|
|
|
|
} if_8251_type[] = {
|
|
|
|
/* COM_IF_INTERNAL */
|
|
|
|
{ " (internal)", {0x30, 0x32, 0x32, 0x33, 0x35, -1, -1},
|
|
|
|
-1, pc98speedtab, 1 },
|
|
|
|
/* COM_IF_PC9861K_1 */
|
|
|
|
{ " (PC9861K)", {0xb1, 0xb3, 0xb3, 0xb0, 0xb0, -1, -1},
|
|
|
|
3, NULL, 1 },
|
|
|
|
/* COM_IF_PC9861K_2 */
|
|
|
|
{ " (PC9861K)", {0xb9, 0xbb, 0xbb, 0xb2, 0xb2, -1, -1},
|
|
|
|
3, NULL, 1 },
|
|
|
|
/* COM_IF_IND_SS_1 */
|
|
|
|
{ " (IND-SS)", {0xb1, 0xb3, 0xb3, 0xb0, 0xb0, 0xb3, -1},
|
2002-01-31 08:26:45 +00:00
|
|
|
3, comspeedtab_ind, 1 },
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_IND_SS_2 */
|
|
|
|
{ " (IND-SS)", {0xb9, 0xbb, 0xbb, 0xb2, 0xb2, 0xbb, -1},
|
2002-01-31 08:26:45 +00:00
|
|
|
3, comspeedtab_ind, 1 },
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_PIO9032B_1 */
|
|
|
|
{ " (PIO9032B)", {0xb1, 0xb3, 0xb3, 0xb0, 0xb0, 0xb8, -1},
|
|
|
|
7, comspeedtab_pio9032b, 1 },
|
|
|
|
/* COM_IF_PIO9032B_2 */
|
|
|
|
{ " (PIO9032B)", {0xb9, 0xbb, 0xbb, 0xb2, 0xb2, 0xba, -1},
|
|
|
|
7, comspeedtab_pio9032b, 1 },
|
|
|
|
/* COM_IF_B98_01_1 */
|
|
|
|
{ " (B98-01)", {0xb1, 0xb3, 0xb3, 0xb0, 0xb0, 0xd1, 0xd3},
|
|
|
|
7, comspeedtab_b98_01, 0 },
|
|
|
|
/* COM_IF_B98_01_2 */
|
|
|
|
{ " (B98-01)", {0xb9, 0xbb, 0xbb, 0xb2, 0xb2, 0xd5, 0xd7},
|
|
|
|
7, comspeedtab_b98_01, 0 },
|
|
|
|
};
|
|
|
|
#define PC98SIO_data_port(type) (if_8251_type[type].port_table[0])
|
|
|
|
#define PC98SIO_cmd_port(type) (if_8251_type[type].port_table[1])
|
|
|
|
#define PC98SIO_sts_port(type) (if_8251_type[type].port_table[2])
|
|
|
|
#define PC98SIO_in_modem_port(type) (if_8251_type[type].port_table[3])
|
|
|
|
#define PC98SIO_intr_ctrl_port(type) (if_8251_type[type].port_table[4])
|
|
|
|
#define PC98SIO_baud_rate_port(type) (if_8251_type[type].port_table[5])
|
|
|
|
#define PC98SIO_func_port(type) (if_8251_type[type].port_table[6])
|
|
|
|
|
1999-12-06 00:23:38 +00:00
|
|
|
#define I8251F_data 0x130
|
|
|
|
#define I8251F_lsr 0x132
|
|
|
|
#define I8251F_msr 0x134
|
|
|
|
#define I8251F_iir 0x136
|
|
|
|
#define I8251F_fcr 0x138
|
|
|
|
#define I8251F_div 0x13a
|
|
|
|
|
|
|
|
|
2000-05-12 12:38:25 +00:00
|
|
|
static bus_addr_t port_table_0[] =
|
|
|
|
{0x000, 0x001, 0x002, 0x003, 0x004, 0x005, 0x006, 0x007};
|
|
|
|
static bus_addr_t port_table_1[] =
|
|
|
|
{0x000, 0x002, 0x004, 0x006, 0x008, 0x00a, 0x00c, 0x00e};
|
|
|
|
static bus_addr_t port_table_8[] =
|
|
|
|
{0x000, 0x100, 0x200, 0x300, 0x400, 0x500, 0x600, 0x700};
|
2000-06-21 11:21:14 +00:00
|
|
|
static bus_addr_t port_table_rsa[] = {
|
|
|
|
0x008, 0x009, 0x00a, 0x00b, 0x00c, 0x00d, 0x00e, 0x00f,
|
|
|
|
0x000, 0x001, 0x002, 0x003, 0x004, 0x005, 0x006, 0x007
|
|
|
|
};
|
2000-05-12 12:38:25 +00:00
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
struct {
|
2000-05-12 12:38:25 +00:00
|
|
|
char *name;
|
|
|
|
short irr_read;
|
|
|
|
short irr_write;
|
2000-06-21 11:21:14 +00:00
|
|
|
bus_addr_t *iat;
|
|
|
|
bus_size_t iatsz;
|
2002-01-31 08:26:45 +00:00
|
|
|
u_long rclk;
|
1999-01-03 05:03:47 +00:00
|
|
|
} if_16550a_type[] = {
|
|
|
|
/* COM_IF_RSA98 */
|
2002-01-31 08:26:45 +00:00
|
|
|
{" (RSA-98)", -1, -1, port_table_0, IO_COMSIZE, DEFAULT_RCLK},
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_NS16550 */
|
2002-01-31 08:26:45 +00:00
|
|
|
{"", -1, -1, port_table_0, IO_COMSIZE, DEFAULT_RCLK},
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_SECOND_CCU */
|
2002-01-31 08:26:45 +00:00
|
|
|
{"", -1, -1, port_table_0, IO_COMSIZE, DEFAULT_RCLK},
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_MC16550II */
|
2000-06-21 11:21:14 +00:00
|
|
|
{" (MC16550II)", -1, 0x1000, port_table_8, IO_COMSIZE,
|
2002-01-31 08:26:45 +00:00
|
|
|
DEFAULT_RCLK * 4},
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_MCRS98 */
|
2002-01-31 08:26:45 +00:00
|
|
|
{" (MC-RS98)", -1, 0x1000, port_table_8, IO_COMSIZE, DEFAULT_RCLK * 4},
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_RSB3000 */
|
2002-01-31 08:26:45 +00:00
|
|
|
{" (RSB-3000)", 0xbf, -1, port_table_1, IO_COMSIZE, DEFAULT_RCLK * 10},
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_RSB384 */
|
2002-01-31 08:26:45 +00:00
|
|
|
{" (RSB-384)", 0xbf, -1, port_table_1, IO_COMSIZE, DEFAULT_RCLK * 10},
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_MODEM_CARD */
|
2002-01-31 08:26:45 +00:00
|
|
|
{"", -1, -1, port_table_0, IO_COMSIZE, DEFAULT_RCLK},
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_RSA98III */
|
2002-01-31 08:26:45 +00:00
|
|
|
{" (RSA-98III)", -1, -1, port_table_rsa, 16, DEFAULT_RCLK * 8},
|
1999-01-03 05:03:47 +00:00
|
|
|
/* COM_IF_ESP98 */
|
2002-01-31 08:26:45 +00:00
|
|
|
{" (ESP98)", -1, -1, port_table_1, IO_COMSIZE, DEFAULT_RCLK * 4},
|
1999-01-03 05:03:47 +00:00
|
|
|
};
|
|
|
|
#endif /* PC98 */
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef COM_ESP
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
1999-01-03 15:57:02 +00:00
|
|
|
|
|
|
|
/* XXX configure this properly. */
|
2001-11-26 12:29:53 +00:00
|
|
|
/* XXX quite broken for new-bus. */
|
1999-01-03 05:03:47 +00:00
|
|
|
static Port_t likely_com_ports[] = { 0, 0xb0, 0xb1, 0 };
|
|
|
|
static Port_t likely_esp_ports[] = { 0xc0d0, 0 };
|
1999-01-03 15:57:02 +00:00
|
|
|
|
|
|
|
#define ESP98_CMD1 (ESP_CMD1 * 0x100)
|
|
|
|
#define ESP98_CMD2 (ESP_CMD2 * 0x100)
|
|
|
|
#define ESP98_STATUS1 (ESP_STATUS1 * 0x100)
|
|
|
|
#define ESP98_STATUS2 (ESP_STATUS2 * 0x100)
|
|
|
|
|
|
|
|
#else /* PC98 */
|
|
|
|
|
|
|
|
/* XXX configure this properly. */
|
1996-06-14 10:04:54 +00:00
|
|
|
static Port_t likely_com_ports[] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, };
|
|
|
|
static Port_t likely_esp_ports[] = { 0x140, 0x180, 0x280, 0 };
|
1999-01-03 15:57:02 +00:00
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif /* PC98 */
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
|
1997-06-04 10:27:53 +00:00
|
|
|
/*
|
|
|
|
* handle sysctl read/write requests for console speed
|
|
|
|
*
|
|
|
|
* In addition to setting comdefaultrate for I/O through /dev/console,
|
|
|
|
* also set the initial and lock values for the /dev/ttyXX device
|
|
|
|
* if there is one associated with the console. Finally, if the /dev/tty
|
|
|
|
* device has already been open, change the speed on the open running port
|
|
|
|
* itself.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
2000-07-04 11:25:35 +00:00
|
|
|
sysctl_machdep_comdefaultrate(SYSCTL_HANDLER_ARGS)
|
1997-06-04 10:27:53 +00:00
|
|
|
{
|
|
|
|
int error, s;
|
|
|
|
speed_t newspeed;
|
|
|
|
struct com_s *com;
|
|
|
|
struct tty *tp;
|
|
|
|
|
|
|
|
newspeed = comdefaultrate;
|
|
|
|
|
|
|
|
error = sysctl_handle_opaque(oidp, &newspeed, sizeof newspeed, req);
|
|
|
|
if (error || !req->newptr)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
comdefaultrate = newspeed;
|
|
|
|
|
|
|
|
if (comconsole < 0) /* serial console not selected? */
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
com = com_addr(comconsole);
|
2000-03-12 13:14:51 +00:00
|
|
|
if (com == NULL)
|
1997-06-04 10:27:53 +00:00
|
|
|
return (ENXIO);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set the initial and lock rates for /dev/ttydXX and /dev/cuaXX
|
|
|
|
* (note, the lock rates really are boolean -- if non-zero, disallow
|
|
|
|
* speed changes)
|
|
|
|
*/
|
|
|
|
com->it_in.c_ispeed = com->it_in.c_ospeed =
|
|
|
|
com->lt_in.c_ispeed = com->lt_in.c_ospeed =
|
|
|
|
com->it_out.c_ispeed = com->it_out.c_ospeed =
|
|
|
|
com->lt_out.c_ispeed = com->lt_out.c_ospeed = comdefaultrate;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* if we're open, change the running rate too
|
|
|
|
*/
|
|
|
|
tp = com->tp;
|
|
|
|
if (tp && (tp->t_state & TS_ISOPEN)) {
|
|
|
|
tp->t_termios.c_ispeed =
|
|
|
|
tp->t_termios.c_ospeed = comdefaultrate;
|
|
|
|
s = spltty();
|
|
|
|
error = comparam(tp, &tp->t_termios);
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSCTL_PROC(_machdep, OID_AUTO, conspeed, CTLTYPE_INT | CTLFLAG_RW,
|
|
|
|
0, 0, sysctl_machdep_comdefaultrate, "I", "");
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
2001-11-26 12:29:53 +00:00
|
|
|
* Unload the driver and clear the table.
|
|
|
|
* XXX this is mostly wrong.
|
1996-06-14 10:04:54 +00:00
|
|
|
* XXX TODO:
|
|
|
|
* This is usually called when the card is ejected, but
|
|
|
|
* can be caused by a modunload of a controller driver.
|
|
|
|
* The idea is to reset the driver's view of the device
|
|
|
|
* and ensure that any driver entry points such as
|
|
|
|
* read and write do not hang.
|
|
|
|
*/
|
2001-11-26 12:29:53 +00:00
|
|
|
int
|
|
|
|
siodetach(dev)
|
1999-11-03 09:02:23 +00:00
|
|
|
device_t dev;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
struct com_s *com;
|
2000-11-05 14:31:19 +00:00
|
|
|
int i;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1999-11-03 09:02:23 +00:00
|
|
|
com = (struct com_s *) device_get_softc(dev);
|
2000-03-12 13:14:51 +00:00
|
|
|
if (com == NULL) {
|
1999-11-03 09:02:23 +00:00
|
|
|
device_printf(dev, "NULL com in siounload\n");
|
1999-12-10 14:03:47 +00:00
|
|
|
return (0);
|
1998-06-24 13:37:23 +00:00
|
|
|
}
|
1999-12-10 14:03:47 +00:00
|
|
|
com->gone = 1;
|
2000-11-05 14:31:19 +00:00
|
|
|
for (i = 0 ; i < 6; i++)
|
|
|
|
destroy_dev(com->devs[i]);
|
1999-12-10 14:03:47 +00:00
|
|
|
if (com->irqres) {
|
|
|
|
bus_teardown_intr(dev, com->irqres, com->cookie);
|
|
|
|
bus_release_resource(dev, SYS_RES_IRQ, 0, com->irqres);
|
|
|
|
}
|
|
|
|
if (com->ioportres)
|
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, 0, com->ioportres);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (com->tp && (com->tp->t_state & TS_ISOPEN)) {
|
2000-03-12 13:14:51 +00:00
|
|
|
device_printf(dev, "still open, forcing close\n");
|
2001-06-26 11:48:17 +00:00
|
|
|
(*linesw[com->tp->t_line].l_close)(com->tp, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
com->tp->t_gen++;
|
|
|
|
ttyclose(com->tp);
|
|
|
|
ttwakeup(com->tp);
|
|
|
|
ttwwakeup(com->tp);
|
|
|
|
} else {
|
1999-02-05 11:37:40 +00:00
|
|
|
if (com->ibuf != NULL)
|
|
|
|
free(com->ibuf, M_DEVBUF);
|
2002-03-10 07:22:42 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->obuf1 != NULL)
|
|
|
|
free(com->obuf1, M_DEVBUF);
|
|
|
|
#endif
|
2001-11-26 12:29:53 +00:00
|
|
|
device_set_softc(dev, NULL);
|
|
|
|
free(com, M_DEVBUF);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-12-01 13:40:03 +00:00
|
|
|
return (0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
2000-04-01 11:27:31 +00:00
|
|
|
|
2001-11-26 12:29:53 +00:00
|
|
|
int
|
2002-01-31 08:26:45 +00:00
|
|
|
sioprobe(dev, xrid, rclk, noprobe)
|
1999-04-18 14:42:20 +00:00
|
|
|
device_t dev;
|
2000-04-01 11:27:31 +00:00
|
|
|
int xrid;
|
2002-01-31 08:26:45 +00:00
|
|
|
u_long rclk;
|
2001-11-26 12:29:53 +00:00
|
|
|
int noprobe;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
1999-11-18 12:22:09 +00:00
|
|
|
#if 0
|
1996-06-14 10:04:54 +00:00
|
|
|
static bool_t already_init;
|
1999-11-18 12:22:09 +00:00
|
|
|
device_t xdev;
|
|
|
|
#endif
|
2000-05-12 12:38:25 +00:00
|
|
|
struct com_s *com;
|
2002-01-31 08:26:45 +00:00
|
|
|
u_int divisor;
|
1996-06-14 10:04:54 +00:00
|
|
|
bool_t failures[10];
|
|
|
|
int fn;
|
1999-04-18 14:42:20 +00:00
|
|
|
device_t idev;
|
1996-06-14 10:04:54 +00:00
|
|
|
Port_t iobase;
|
1998-06-01 12:40:24 +00:00
|
|
|
intrmask_t irqmap[4];
|
|
|
|
intrmask_t irqs;
|
1996-06-14 10:04:54 +00:00
|
|
|
u_char mcr_image;
|
|
|
|
int result;
|
1999-12-07 09:29:15 +00:00
|
|
|
u_long xirq;
|
1999-09-07 11:17:09 +00:00
|
|
|
u_int flags = device_get_flags(dev);
|
1999-09-12 13:44:54 +00:00
|
|
|
int rid;
|
|
|
|
struct resource *port;
|
1998-01-16 11:20:22 +00:00
|
|
|
#ifdef PC98
|
1996-06-14 10:04:54 +00:00
|
|
|
int tmp;
|
1999-01-03 05:03:47 +00:00
|
|
|
struct siodev iod;
|
2000-05-12 12:38:25 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
iod.if_type = GET_IFTYPE(flags);
|
|
|
|
if ((iod.if_type < 0 || iod.if_type > COM_IF_END1) &&
|
|
|
|
(iod.if_type < 0x10 || iod.if_type > COM_IF_END2))
|
2001-01-27 13:02:06 +00:00
|
|
|
return ENXIO;
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
|
2000-04-01 11:27:31 +00:00
|
|
|
rid = xrid;
|
1999-09-12 13:44:54 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
if (IS_8251(iod.if_type)) {
|
|
|
|
port = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
|
|
|
|
0, ~0, 1, RF_ACTIVE);
|
2001-02-25 08:55:07 +00:00
|
|
|
} else if (iod.if_type == COM_IF_MODEM_CARD ||
|
|
|
|
iod.if_type == COM_IF_RSA98III ||
|
2000-06-21 11:21:14 +00:00
|
|
|
isa_get_vendorid(dev)) {
|
|
|
|
port = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0,
|
2001-01-27 13:02:06 +00:00
|
|
|
if_16550a_type[iod.if_type & 0x0f].iatsz, RF_ACTIVE);
|
2000-05-12 12:38:25 +00:00
|
|
|
} else {
|
|
|
|
port = isa_alloc_resourcev(dev, SYS_RES_IOPORT, &rid,
|
2001-01-27 13:02:06 +00:00
|
|
|
if_16550a_type[iod.if_type & 0x0f].iat,
|
|
|
|
if_16550a_type[iod.if_type & 0x0f].iatsz, RF_ACTIVE);
|
2000-05-12 12:38:25 +00:00
|
|
|
}
|
1999-09-12 13:44:54 +00:00
|
|
|
#else
|
|
|
|
port = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
|
|
|
|
0, ~0, IO_COMSIZE, RF_ACTIVE);
|
|
|
|
#endif
|
|
|
|
if (!port)
|
2000-03-12 13:14:51 +00:00
|
|
|
return (ENXIO);
|
2000-05-12 12:38:25 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (!IS_8251(iod.if_type)) {
|
|
|
|
if (isa_load_resourcev(port,
|
2001-01-27 13:02:06 +00:00
|
|
|
if_16550a_type[iod.if_type & 0x0f].iat,
|
|
|
|
if_16550a_type[iod.if_type & 0x0f].iatsz) != 0) {
|
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
|
|
|
return ENXIO;
|
2000-05-12 12:38:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2001-11-26 12:29:53 +00:00
|
|
|
com = malloc(sizeof(*com), M_DEVBUF, M_NOWAIT | M_ZERO);
|
|
|
|
if (com == NULL)
|
|
|
|
return (ENOMEM);
|
|
|
|
device_set_softc(dev, com);
|
2000-05-12 12:38:25 +00:00
|
|
|
com->bst = rman_get_bustag(port);
|
|
|
|
com->bsh = rman_get_bushandle(port);
|
2002-01-31 08:26:45 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (!IS_8251(iod.if_type) && rclk == 0)
|
|
|
|
rclk = if_16550a_type[iod.if_type & 0x0f].rclk;
|
|
|
|
#else
|
|
|
|
if (rclk == 0)
|
|
|
|
rclk = DEFAULT_RCLK;
|
|
|
|
#endif
|
|
|
|
com->rclk = rclk;
|
1999-09-12 13:44:54 +00:00
|
|
|
|
2001-11-26 12:29:53 +00:00
|
|
|
while (sio_inited != 2)
|
|
|
|
if (atomic_cmpset_int(&sio_inited, 0, 1)) {
|
|
|
|
mtx_init(&sio_lock, sio_driver_name, (comconsole != -1) ?
|
|
|
|
MTX_SPIN | MTX_QUIET : MTX_SPIN);
|
|
|
|
atomic_store_rel_int(&sio_inited, 2);
|
|
|
|
}
|
2001-01-27 13:02:06 +00:00
|
|
|
|
1999-11-18 12:22:09 +00:00
|
|
|
#if 0
|
|
|
|
/*
|
|
|
|
* XXX this is broken - when we are first called, there are no
|
|
|
|
* previously configured IO ports. We could hard code
|
|
|
|
* 0x3f8, 0x2f8, 0x3e8, 0x2e8 etc but that's probably worse.
|
|
|
|
* This code has been doing nothing since the conversion since
|
|
|
|
* "count" is zero the first time around.
|
|
|
|
*/
|
1996-06-14 10:04:54 +00:00
|
|
|
if (!already_init) {
|
|
|
|
/*
|
|
|
|
* Turn off MCR_IENABLE for all likely serial ports. An unused
|
|
|
|
* port with its MCR_IENABLE gate open will inhibit interrupts
|
|
|
|
* from any used port that shares the interrupt vector.
|
|
|
|
* XXX the gate enable is elsewhere for some multiports.
|
|
|
|
*/
|
1999-04-18 14:42:20 +00:00
|
|
|
device_t *devs;
|
1999-11-18 12:22:09 +00:00
|
|
|
int count, i, xioport;
|
|
|
|
#ifdef PC98
|
|
|
|
int xiftype;
|
|
|
|
#endif
|
1999-04-18 14:42:20 +00:00
|
|
|
|
|
|
|
devclass_get_devices(sio_devclass, &devs, &count);
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
1999-04-18 14:42:20 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
xdev = devs[i];
|
1999-11-18 12:22:09 +00:00
|
|
|
xioport = bus_get_resource_start(xdev, SYS_RES_IOPORT, 0);
|
|
|
|
xiftype = GET_IFTYPE(device_get_flags(xdev));
|
|
|
|
if (device_is_enabled(xdev) && xioport > 0) {
|
|
|
|
if (IS_8251(xiftype))
|
2000-05-12 12:38:25 +00:00
|
|
|
outb((xioport & 0xff00) | PC98SIO_cmd_port(xiftype & 0x0f), 0xf2);
|
|
|
|
else
|
2000-06-21 11:21:14 +00:00
|
|
|
outb(xioport + if_16550a_type[xiftype & 0x0f].iat[com_mcr], 0);
|
1999-09-12 13:44:54 +00:00
|
|
|
}
|
1999-04-18 14:42:20 +00:00
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
#else
|
1999-04-18 14:42:20 +00:00
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
xdev = devs[i];
|
1999-12-07 09:29:15 +00:00
|
|
|
if (device_is_enabled(xdev) &&
|
|
|
|
bus_get_resource(xdev, SYS_RES_IOPORT, 0, &xioport,
|
|
|
|
NULL) == 0)
|
1999-11-18 12:22:09 +00:00
|
|
|
outb(xioport + com_mcr, 0);
|
1999-04-18 14:42:20 +00:00
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1999-04-18 14:42:20 +00:00
|
|
|
free(devs, M_TEMP);
|
1996-06-14 10:04:54 +00:00
|
|
|
already_init = TRUE;
|
|
|
|
}
|
1999-11-18 12:22:09 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
if (COM_LLCONSOLE(flags)) {
|
|
|
|
printf("sio%d: reserved for low-level i/o\n",
|
|
|
|
device_get_unit(dev));
|
2000-01-29 04:47:22 +00:00
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
2001-11-26 12:29:53 +00:00
|
|
|
device_set_softc(dev, NULL);
|
|
|
|
free(com, M_DEVBUF);
|
1999-04-18 14:42:20 +00:00
|
|
|
return (ENXIO);
|
1998-01-16 11:20:22 +00:00
|
|
|
}
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
1998-01-22 03:52:55 +00:00
|
|
|
DELAY(10);
|
1999-01-03 05:03:47 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* If the port is i8251 UART (internal, B98_01)
|
|
|
|
*/
|
2000-04-22 15:12:52 +00:00
|
|
|
if (pc98_check_if_type(dev, &iod) == -1) {
|
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
2001-11-26 12:29:53 +00:00
|
|
|
device_set_softc(dev, NULL);
|
|
|
|
free(com, M_DEVBUF);
|
2000-03-12 13:14:51 +00:00
|
|
|
return (ENXIO);
|
2000-04-22 15:12:52 +00:00
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
if (iod.irq > 0)
|
1999-11-29 13:20:47 +00:00
|
|
|
bus_set_resource(dev, SYS_RES_IRQ, 0, iod.irq, 1);
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(iod.if_type)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(iod.cmd, 0);
|
|
|
|
DELAY(10);
|
|
|
|
outb(iod.cmd, 0);
|
|
|
|
DELAY(10);
|
|
|
|
outb(iod.cmd, 0);
|
|
|
|
DELAY(10);
|
|
|
|
outb(iod.cmd, CMD8251_RESET);
|
|
|
|
DELAY(1000); /* for a while...*/
|
|
|
|
outb(iod.cmd, 0xf2); /* MODE (dummy) */
|
|
|
|
DELAY(10);
|
|
|
|
outb(iod.cmd, 0x01); /* CMD (dummy) */
|
|
|
|
DELAY(1000); /* for a while...*/
|
|
|
|
if (( inb(iod.sts) & STS8251_TxEMP ) == 0 ) {
|
2000-03-12 13:14:51 +00:00
|
|
|
result = (ENXIO);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
if (if_8251_type[iod.if_type & 0x0f].check_irq) {
|
|
|
|
COM_INT_DISABLE
|
|
|
|
tmp = ( inb( iod.ctrl ) & ~(IEN_Rx|IEN_TxEMP|IEN_Tx));
|
|
|
|
outb( iod.ctrl, tmp|IEN_TxEMP );
|
|
|
|
DELAY(10);
|
1999-04-19 16:10:40 +00:00
|
|
|
result = isa_irq_pending() ? 0 : ENXIO;
|
1999-01-03 05:03:47 +00:00
|
|
|
outb( iod.ctrl, tmp );
|
|
|
|
COM_INT_ENABLE
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* B98_01 doesn't activate TxEMP interrupt line
|
|
|
|
* when being reset, so we can't check irq pending.
|
|
|
|
*/
|
1999-04-19 16:10:40 +00:00
|
|
|
result = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
if (epson_machine_id==0x20) { /* XXX */
|
1999-04-19 16:10:40 +00:00
|
|
|
result = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-09-12 13:44:54 +00:00
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
2001-11-26 12:29:53 +00:00
|
|
|
if (result) {
|
|
|
|
device_set_softc(dev, NULL);
|
|
|
|
free(com, M_DEVBUF);
|
|
|
|
}
|
1999-04-19 16:10:40 +00:00
|
|
|
return result;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#endif /* PC98 */
|
|
|
|
/*
|
|
|
|
* If the device is on a multiport card and has an AST/4
|
|
|
|
* compatible interrupt control register, initialize this
|
|
|
|
* register and prepare to leave MCR_IENABLE clear in the mcr.
|
|
|
|
* Otherwise, prepare to set MCR_IENABLE in the mcr.
|
|
|
|
* Point idev to the device struct giving the correct id_irq.
|
|
|
|
* This is the struct for the master device if there is one.
|
|
|
|
*/
|
|
|
|
idev = dev;
|
|
|
|
mcr_image = MCR_IENABLE;
|
|
|
|
#ifdef COM_MULTIPORT
|
2000-02-17 15:09:12 +00:00
|
|
|
if (COM_ISMULTIPORT(flags)) {
|
2000-05-12 12:38:25 +00:00
|
|
|
#ifndef PC98
|
1999-12-07 09:29:15 +00:00
|
|
|
Port_t xiobase;
|
|
|
|
u_long io;
|
2000-05-12 12:38:25 +00:00
|
|
|
#endif
|
1999-12-07 09:29:15 +00:00
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
idev = devclass_get_device(sio_devclass, COM_MPMASTER(flags));
|
1996-06-14 10:04:54 +00:00
|
|
|
if (idev == NULL) {
|
|
|
|
printf("sio%d: master device %d not configured\n",
|
1999-04-18 14:42:20 +00:00
|
|
|
device_get_unit(dev), COM_MPMASTER(flags));
|
1998-06-05 08:31:01 +00:00
|
|
|
idev = dev;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#ifndef PC98
|
2000-02-17 15:09:12 +00:00
|
|
|
if (!COM_NOTAST4(flags)) {
|
|
|
|
if (bus_get_resource(idev, SYS_RES_IOPORT, 0, &io,
|
|
|
|
NULL) == 0) {
|
|
|
|
xiobase = io;
|
|
|
|
if (bus_get_resource(idev, SYS_RES_IRQ, 0,
|
|
|
|
NULL, NULL) == 0)
|
|
|
|
outb(xiobase + com_scr, 0x80);
|
|
|
|
else
|
|
|
|
outb(xiobase + com_scr, 0);
|
|
|
|
}
|
|
|
|
mcr_image = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-11-29 13:20:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#endif /* COM_MULTIPORT */
|
1999-12-07 09:29:15 +00:00
|
|
|
if (bus_get_resource(idev, SYS_RES_IRQ, 0, NULL, NULL) != 0)
|
1996-06-14 10:04:54 +00:00
|
|
|
mcr_image = 0;
|
|
|
|
|
1999-11-18 12:22:09 +00:00
|
|
|
bzero(failures, sizeof failures);
|
|
|
|
iobase = rman_get_start(port);
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
1999-11-18 12:22:09 +00:00
|
|
|
if (iod.if_type == COM_IF_RSA98III) {
|
|
|
|
mcr_image = 0;
|
|
|
|
|
2000-05-12 12:38:25 +00:00
|
|
|
outb(iobase + rsa_msr, 0x04);
|
|
|
|
outb(iobase + rsa_frr, 0x00);
|
|
|
|
if ((inb(iobase + rsa_srr) & 0x36) != 0x36) {
|
2000-04-22 15:12:52 +00:00
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
2001-11-26 12:29:53 +00:00
|
|
|
device_set_softc(dev, NULL);
|
|
|
|
free(com, M_DEVBUF);
|
2000-04-22 15:12:52 +00:00
|
|
|
return (ENXIO);
|
|
|
|
}
|
2000-05-12 12:38:25 +00:00
|
|
|
outb(iobase + rsa_ier, 0x00);
|
|
|
|
outb(iobase + rsa_frr, 0x00);
|
|
|
|
outb(iobase + rsa_tivsr, 0x00);
|
|
|
|
outb(iobase + rsa_tcr, 0x00);
|
1999-11-18 12:22:09 +00:00
|
|
|
}
|
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
tmp = if_16550a_type[iod.if_type & 0x0f].irr_write;
|
|
|
|
if (tmp != -1) {
|
|
|
|
/* MC16550II */
|
2000-05-12 12:38:25 +00:00
|
|
|
int irqout;
|
1999-04-18 14:42:20 +00:00
|
|
|
switch (isa_get_irq(idev)) {
|
|
|
|
case 3: irqout = 4; break;
|
|
|
|
case 5: irqout = 5; break;
|
|
|
|
case 6: irqout = 6; break;
|
|
|
|
case 12: irqout = 7; break;
|
1999-01-03 05:03:47 +00:00
|
|
|
default:
|
1999-04-18 14:42:20 +00:00
|
|
|
printf("sio%d: irq configuration error\n",
|
|
|
|
device_get_unit(dev));
|
2000-04-22 15:12:52 +00:00
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
2001-11-26 12:29:53 +00:00
|
|
|
device_set_softc(dev, NULL);
|
|
|
|
free(com, M_DEVBUF);
|
2000-04-22 15:12:52 +00:00
|
|
|
return (ENXIO);
|
1999-01-03 05:03:47 +00:00
|
|
|
}
|
2000-05-12 12:38:25 +00:00
|
|
|
outb((iobase & 0x00ff) | tmp, irqout);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't want to get actual interrupts, just masked ones.
|
|
|
|
* Interrupts from this line should already be masked in the ICU,
|
|
|
|
* but mask them in the processor as well in case there are some
|
|
|
|
* (misconfigured) shared interrupts.
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
/* EXTRA DELAY? */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the speed and the word size and wait long enough to
|
|
|
|
* drain the maximum of 16 bytes of junk in device output queues.
|
|
|
|
* The speed is undefined after a master reset and must be set
|
|
|
|
* before relying on anything related to output. There may be
|
|
|
|
* junk after a (very fast) soft reboot and (apparently) after
|
|
|
|
* master reset.
|
|
|
|
* XXX what about the UART bug avoided by waiting in comparam()?
|
|
|
|
* We don't want to to wait long enough to drain at 2 bps.
|
|
|
|
*/
|
1997-04-05 15:04:32 +00:00
|
|
|
if (iobase == siocniobase)
|
|
|
|
DELAY((16 + 1) * 1000000 / (comdefaultrate / 10));
|
|
|
|
else {
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, CFCR_DLAB | CFCR_8BITS);
|
2002-01-31 08:26:45 +00:00
|
|
|
divisor = siodivisor(rclk, SIO_TEST_SPEED);
|
|
|
|
sio_setreg(com, com_dlbl, divisor & 0xff);
|
|
|
|
sio_setreg(com, com_dlbh, divisor >> 8);
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, CFCR_8BITS);
|
1997-06-04 10:27:53 +00:00
|
|
|
DELAY((16 + 1) * 1000000 / (SIO_TEST_SPEED / 10));
|
1997-04-05 15:04:32 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable the interrupt gate and disable device interupts. This
|
|
|
|
* should leave the device driving the interrupt line low and
|
|
|
|
* guarantee an edge trigger if an interrupt can be generated.
|
|
|
|
*/
|
|
|
|
/* EXTRA DELAY? */
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_mcr, mcr_image);
|
|
|
|
sio_setreg(com, com_ier, 0);
|
1998-06-01 12:40:24 +00:00
|
|
|
DELAY(1000); /* XXX */
|
|
|
|
irqmap[0] = isa_irq_pending();
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to set loopback mode so that we can send a null byte
|
|
|
|
* without annoying any external device.
|
|
|
|
*/
|
|
|
|
/* EXTRA DELAY? */
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_mcr, mcr_image | MCR_LOOPBACK);
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to generate an output interrupt. On 8250's, setting
|
|
|
|
* IER_ETXRDY generates an interrupt independent of the current
|
|
|
|
* setting and independent of whether the THR is empty. On 16450's,
|
|
|
|
* setting IER_ETXRDY generates an interrupt independent of the
|
|
|
|
* current setting. On 16550A's, setting IER_ETXRDY only
|
|
|
|
* generates an interrupt when IER_ETXRDY is not already set.
|
|
|
|
*/
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_ier, IER_ETXRDY);
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
1999-04-18 14:42:20 +00:00
|
|
|
if (iod.if_type == COM_IF_RSA98III)
|
2000-05-12 12:38:25 +00:00
|
|
|
outb(iobase + rsa_ier, 0x04);
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On some 16x50 incompatibles, setting IER_ETXRDY doesn't generate
|
|
|
|
* an interrupt. They'd better generate one for actually doing
|
|
|
|
* output. Loopback may be broken on the same incompatibles but
|
|
|
|
* it's unlikely to do more than allow the null byte out.
|
|
|
|
*/
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_data, 0);
|
1997-06-04 10:27:53 +00:00
|
|
|
DELAY((1 + 2) * 1000000 / (SIO_TEST_SPEED / 10));
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Turn off loopback mode so that the interrupt gate works again
|
|
|
|
* (MCR_IENABLE was hidden). This should leave the device driving
|
|
|
|
* an interrupt line high. It doesn't matter if the interrupt
|
|
|
|
* line oscillates while we are not looking at it, since interrupts
|
|
|
|
* are disabled.
|
|
|
|
*/
|
|
|
|
/* EXTRA DELAY? */
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_mcr, mcr_image);
|
2002-03-25 13:41:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It seems my Xircom CBEM56G Cardbus modem wants to be reset
|
|
|
|
* to 8 bits *again*, or else probe test 0 will fail.
|
|
|
|
* gwk@sgi.com, 4/19/2001
|
|
|
|
*/
|
|
|
|
sio_setreg(com, com_cfcr, CFCR_8BITS);
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1999-09-12 13:44:54 +00:00
|
|
|
/*
|
1999-11-03 09:02:23 +00:00
|
|
|
* Some pcmcia cards have the "TXRDY bug", so we check everyone
|
1998-01-08 10:50:06 +00:00
|
|
|
* for IIR_TXRDY implementation ( Palido 321s, DC-1S... )
|
|
|
|
*/
|
2001-11-26 12:29:53 +00:00
|
|
|
if (noprobe) {
|
1999-11-18 12:22:09 +00:00
|
|
|
/* Reading IIR register twice */
|
|
|
|
for (fn = 0; fn < 2; fn ++) {
|
|
|
|
DELAY(10000);
|
2000-05-12 12:38:25 +00:00
|
|
|
failures[6] = sio_getreg(com, com_iir);
|
1999-11-18 12:22:09 +00:00
|
|
|
}
|
|
|
|
/* Check IIR_TXRDY clear ? */
|
|
|
|
result = 0;
|
|
|
|
if (failures[6] & IIR_TXRDY) {
|
2002-03-25 13:41:06 +00:00
|
|
|
/* No, Double check with clearing IER */
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_ier, 0);
|
|
|
|
if (sio_getreg(com, com_iir) & IIR_NOPEND) {
|
2002-03-25 13:41:06 +00:00
|
|
|
/* Ok. We discovered TXRDY bug! */
|
1999-11-18 12:22:09 +00:00
|
|
|
SET_FLAG(dev, COM_C_IIR_TXRDYBUG);
|
|
|
|
} else {
|
|
|
|
/* Unknown, Just omit this chip.. XXX */
|
|
|
|
result = ENXIO;
|
2001-09-16 05:33:07 +00:00
|
|
|
sio_setreg(com, com_mcr, 0);
|
1999-11-18 12:22:09 +00:00
|
|
|
}
|
1998-01-08 10:50:06 +00:00
|
|
|
} else {
|
1999-11-18 12:22:09 +00:00
|
|
|
/* OK. this is well-known guys */
|
|
|
|
CLR_FLAG(dev, COM_C_IIR_TXRDYBUG);
|
1998-01-08 10:50:06 +00:00
|
|
|
}
|
2001-09-16 05:33:07 +00:00
|
|
|
sio_setreg(com, com_ier, 0);
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, CFCR_8BITS);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1999-11-18 12:22:09 +00:00
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
2001-11-26 12:29:53 +00:00
|
|
|
if (iobase == siocniobase)
|
|
|
|
result = 0;
|
|
|
|
if (result != 0) {
|
|
|
|
device_set_softc(dev, NULL);
|
|
|
|
free(com, M_DEVBUF);
|
|
|
|
}
|
|
|
|
return (result);
|
1999-11-18 12:22:09 +00:00
|
|
|
}
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Check that
|
|
|
|
* o the CFCR, IER and MCR in UART hold the values written to them
|
|
|
|
* (the values happen to be all distinct - this is good for
|
|
|
|
* avoiding false positive tests from bus echoes).
|
|
|
|
* o an output interrupt is generated and its vector is correct.
|
|
|
|
* o the interrupt goes away when the IIR in the UART is read.
|
|
|
|
*/
|
|
|
|
/* EXTRA DELAY? */
|
2000-05-12 12:38:25 +00:00
|
|
|
failures[0] = sio_getreg(com, com_cfcr) - CFCR_8BITS;
|
|
|
|
failures[1] = sio_getreg(com, com_ier) - IER_ETXRDY;
|
|
|
|
failures[2] = sio_getreg(com, com_mcr) - mcr_image;
|
1996-07-23 07:46:59 +00:00
|
|
|
DELAY(10000); /* Some internal modems need this time */
|
1998-06-01 12:40:24 +00:00
|
|
|
irqmap[1] = isa_irq_pending();
|
2000-05-12 12:38:25 +00:00
|
|
|
failures[4] = (sio_getreg(com, com_iir) & IIR_IMASK) - IIR_TXRDY;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
1999-04-18 14:42:20 +00:00
|
|
|
if (iod.if_type == COM_IF_RSA98III)
|
2000-05-12 12:38:25 +00:00
|
|
|
inb(iobase + rsa_srr);
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
DELAY(1000); /* XXX */
|
1998-06-01 12:40:24 +00:00
|
|
|
irqmap[2] = isa_irq_pending();
|
2000-05-12 12:38:25 +00:00
|
|
|
failures[6] = (sio_getreg(com, com_iir) & IIR_IMASK) - IIR_NOPEND;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
1999-04-18 14:42:20 +00:00
|
|
|
if (iod.if_type == COM_IF_RSA98III)
|
2000-05-12 12:38:25 +00:00
|
|
|
inb(iobase + rsa_srr);
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Turn off all device interrupts and check that they go off properly.
|
|
|
|
* Leave MCR_IENABLE alone. For ports without a master port, it gates
|
|
|
|
* the OUT2 output of the UART to
|
|
|
|
* the ICU input. Closing the gate would give a floating ICU input
|
1999-04-19 11:11:01 +00:00
|
|
|
* (unless there is another device driving it) and spurious interrupts.
|
1996-06-14 10:04:54 +00:00
|
|
|
* (On the system that this was first tested on, the input floats high
|
|
|
|
* and gives a (masked) interrupt as soon as the gate is closed.)
|
|
|
|
*/
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_ier, 0);
|
|
|
|
sio_setreg(com, com_cfcr, CFCR_8BITS); /* dummy to avoid bus echo */
|
|
|
|
failures[7] = sio_getreg(com, com_ier);
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
1999-04-18 14:42:20 +00:00
|
|
|
if (iod.if_type == COM_IF_RSA98III)
|
2000-05-12 12:38:25 +00:00
|
|
|
outb(iobase + rsa_ier, 0x00);
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
DELAY(1000); /* XXX */
|
1998-06-01 12:40:24 +00:00
|
|
|
irqmap[3] = isa_irq_pending();
|
2000-05-12 12:38:25 +00:00
|
|
|
failures[9] = (sio_getreg(com, com_iir) & IIR_IMASK) - IIR_NOPEND;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (iod.if_type == COM_IF_RSA98III) {
|
2000-05-12 12:38:25 +00:00
|
|
|
inb(iobase + rsa_srr);
|
|
|
|
outb(iobase + rsa_frr, 0x00);
|
1999-01-03 05:03:47 +00:00
|
|
|
}
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1998-06-05 08:31:01 +00:00
|
|
|
irqs = irqmap[1] & ~irqmap[0];
|
1999-12-07 09:29:15 +00:00
|
|
|
if (bus_get_resource(idev, SYS_RES_IRQ, 0, &xirq, NULL) == 0 &&
|
2002-01-18 03:30:22 +00:00
|
|
|
((1 << xirq) & irqs) == 0) {
|
1998-06-01 12:40:24 +00:00
|
|
|
printf(
|
1999-12-07 09:29:15 +00:00
|
|
|
"sio%d: configured irq %ld not in bitmap of probed irqs %#x\n",
|
1999-11-18 12:22:09 +00:00
|
|
|
device_get_unit(dev), xirq, irqs);
|
2002-01-18 03:30:22 +00:00
|
|
|
printf(
|
|
|
|
"sio%d: port may not be enabled\n",
|
|
|
|
device_get_unit(dev));
|
|
|
|
}
|
1998-06-01 12:40:24 +00:00
|
|
|
if (bootverbose)
|
1998-06-05 08:31:01 +00:00
|
|
|
printf("sio%d: irq maps: %#x %#x %#x %#x\n",
|
1999-04-18 14:42:20 +00:00
|
|
|
device_get_unit(dev),
|
|
|
|
irqmap[0], irqmap[1], irqmap[2], irqmap[3]);
|
1998-06-01 12:40:24 +00:00
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
result = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
for (fn = 0; fn < sizeof failures; ++fn)
|
|
|
|
if (failures[fn]) {
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_mcr, 0);
|
1999-04-18 14:42:20 +00:00
|
|
|
result = ENXIO;
|
1998-06-01 12:40:24 +00:00
|
|
|
if (bootverbose) {
|
|
|
|
printf("sio%d: probe failed test(s):",
|
1999-04-18 14:42:20 +00:00
|
|
|
device_get_unit(dev));
|
1998-06-01 12:40:24 +00:00
|
|
|
for (fn = 0; fn < sizeof failures; ++fn)
|
|
|
|
if (failures[fn])
|
|
|
|
printf(" %d", fn);
|
|
|
|
printf("\n");
|
|
|
|
}
|
|
|
|
break;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-09-12 13:44:54 +00:00
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
2001-11-26 12:29:53 +00:00
|
|
|
if (iobase == siocniobase)
|
|
|
|
result = 0;
|
|
|
|
if (result != 0) {
|
|
|
|
device_set_softc(dev, NULL);
|
|
|
|
free(com, M_DEVBUF);
|
|
|
|
}
|
|
|
|
return (result);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COM_ESP
|
|
|
|
static int
|
1999-05-10 09:14:40 +00:00
|
|
|
espattach(com, esp_port)
|
1996-06-14 10:04:54 +00:00
|
|
|
struct com_s *com;
|
|
|
|
Port_t esp_port;
|
|
|
|
{
|
|
|
|
u_char dips;
|
|
|
|
u_char val;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check the ESP-specific I/O port to see if we're an ESP
|
|
|
|
* card. If not, return failure immediately.
|
|
|
|
*/
|
|
|
|
if ((inb(esp_port) & 0xf3) == 0) {
|
|
|
|
printf(" port 0x%x is not an ESP board?\n", esp_port);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We've got something that claims to be a Hayes ESP card.
|
|
|
|
* Let's hope so.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Get the dip-switch configuration */
|
1999-01-03 15:57:02 +00:00
|
|
|
#ifdef PC98
|
|
|
|
outb(esp_port + ESP98_CMD1, ESP_GETDIPS);
|
|
|
|
dips = inb(esp_port + ESP98_STATUS1);
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(esp_port + ESP_CMD1, ESP_GETDIPS);
|
|
|
|
dips = inb(esp_port + ESP_STATUS1);
|
1999-01-03 15:57:02 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Bits 0,1 of dips say which COM port we are.
|
|
|
|
*/
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
if ((rman_get_start(com->ioportres) & 0xff) ==
|
|
|
|
likely_com_ports[dips & 0x03])
|
1999-01-03 05:03:47 +00:00
|
|
|
#else
|
2000-05-12 12:38:25 +00:00
|
|
|
if (rman_get_start(com->ioportres) == likely_com_ports[dips & 0x03])
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
printf(" : ESP");
|
|
|
|
else {
|
|
|
|
printf(" esp_port has com %d\n", dips & 0x03);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for ESP version 2.0 or later: bits 4,5,6 = 010.
|
|
|
|
*/
|
1999-01-03 15:57:02 +00:00
|
|
|
#ifdef PC98
|
|
|
|
outb(esp_port + ESP98_CMD1, ESP_GETTEST);
|
|
|
|
val = inb(esp_port + ESP98_STATUS1); /* clear reg 1 */
|
|
|
|
val = inb(esp_port + ESP98_STATUS2);
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(esp_port + ESP_CMD1, ESP_GETTEST);
|
|
|
|
val = inb(esp_port + ESP_STATUS1); /* clear reg 1 */
|
|
|
|
val = inb(esp_port + ESP_STATUS2);
|
1999-01-03 15:57:02 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
if ((val & 0x70) < 0x20) {
|
|
|
|
printf("-old (%o)", val & 0x70);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for ability to emulate 16550: bit 7 == 1
|
|
|
|
*/
|
|
|
|
if ((dips & 0x80) == 0) {
|
|
|
|
printf(" slave");
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Okay, we seem to be a Hayes ESP card. Whee.
|
|
|
|
*/
|
|
|
|
com->esp = TRUE;
|
|
|
|
com->esp_port = esp_port;
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
#endif /* COM_ESP */
|
|
|
|
|
2001-11-26 12:29:53 +00:00
|
|
|
int
|
2002-01-31 08:26:45 +00:00
|
|
|
sioattach(dev, xrid, rclk)
|
1999-04-18 14:42:20 +00:00
|
|
|
device_t dev;
|
2000-04-01 11:27:31 +00:00
|
|
|
int xrid;
|
2002-01-31 08:26:45 +00:00
|
|
|
u_long rclk;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
#ifdef COM_ESP
|
|
|
|
Port_t *espp;
|
|
|
|
#endif
|
|
|
|
Port_t iobase;
|
|
|
|
int unit;
|
1999-11-18 12:22:09 +00:00
|
|
|
u_int flags;
|
1999-09-12 13:44:54 +00:00
|
|
|
int rid;
|
|
|
|
struct resource *port;
|
1999-11-18 12:22:09 +00:00
|
|
|
int ret;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
1999-04-18 14:42:20 +00:00
|
|
|
u_char *obuf;
|
1999-02-05 11:37:40 +00:00
|
|
|
u_long obufsize;
|
2000-05-12 12:38:25 +00:00
|
|
|
int if_type = GET_IFTYPE(device_get_flags(dev));
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
2000-04-01 11:27:31 +00:00
|
|
|
rid = xrid;
|
1999-09-12 13:44:54 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
if (IS_8251(if_type)) {
|
|
|
|
port = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
|
|
|
|
0, ~0, 1, RF_ACTIVE);
|
2001-02-25 08:55:07 +00:00
|
|
|
} else if (if_type == COM_IF_MODEM_CARD ||
|
|
|
|
if_type == COM_IF_RSA98III ||
|
2000-06-21 11:21:14 +00:00
|
|
|
isa_get_vendorid(dev)) {
|
|
|
|
port = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0,
|
2001-01-27 13:02:06 +00:00
|
|
|
if_16550a_type[if_type & 0x0f].iatsz, RF_ACTIVE);
|
2000-05-12 12:38:25 +00:00
|
|
|
} else {
|
|
|
|
port = isa_alloc_resourcev(dev, SYS_RES_IOPORT, &rid,
|
2001-01-27 13:02:06 +00:00
|
|
|
if_16550a_type[if_type & 0x0f].iat,
|
|
|
|
if_16550a_type[if_type & 0x0f].iatsz, RF_ACTIVE);
|
2000-05-12 12:38:25 +00:00
|
|
|
}
|
1999-09-12 13:44:54 +00:00
|
|
|
#else
|
|
|
|
port = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid,
|
|
|
|
0, ~0, IO_COMSIZE, RF_ACTIVE);
|
|
|
|
#endif
|
|
|
|
if (!port)
|
2000-03-12 13:14:51 +00:00
|
|
|
return (ENXIO);
|
2000-05-12 12:38:25 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (!IS_8251(if_type)) {
|
|
|
|
if (isa_load_resourcev(port,
|
2001-01-27 13:02:06 +00:00
|
|
|
if_16550a_type[if_type & 0x0f].iat,
|
|
|
|
if_16550a_type[if_type & 0x0f].iatsz) != 0) {
|
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
|
|
|
return ENXIO;
|
2000-05-12 12:38:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
1999-09-12 13:44:54 +00:00
|
|
|
|
|
|
|
iobase = rman_get_start(port);
|
1999-04-18 14:42:20 +00:00
|
|
|
unit = device_get_unit(dev);
|
|
|
|
com = device_get_softc(dev);
|
1999-11-18 12:22:09 +00:00
|
|
|
flags = device_get_flags(dev);
|
|
|
|
|
|
|
|
if (unit >= sio_numunits)
|
|
|
|
sio_numunits = unit + 1;
|
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
#ifdef PC98
|
1999-02-05 11:37:40 +00:00
|
|
|
obufsize = 256;
|
2000-05-12 12:38:25 +00:00
|
|
|
if (if_type == COM_IF_RSA98III)
|
1999-02-05 11:37:40 +00:00
|
|
|
obufsize = 2048;
|
2000-04-22 15:12:52 +00:00
|
|
|
if ((obuf = malloc(obufsize * 2, M_DEVBUF, M_NOWAIT)) == NULL) {
|
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
2000-01-24 08:20:54 +00:00
|
|
|
return ENXIO;
|
2000-04-22 15:12:52 +00:00
|
|
|
}
|
1999-04-18 14:42:20 +00:00
|
|
|
bzero(obuf, obufsize * 2);
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* sioprobe() has initialized the device registers as follows:
|
|
|
|
* o cfcr = CFCR_8BITS.
|
|
|
|
* It is most important that CFCR_DLAB is off, so that the
|
|
|
|
* data port is not hidden when we enable interrupts.
|
|
|
|
* o ier = 0.
|
|
|
|
* Interrupts are only enabled when the line is open.
|
|
|
|
* o mcr = MCR_IENABLE, or 0 if the port has AST/4 compatible
|
|
|
|
* interrupt control register or the config specifies no irq.
|
|
|
|
* Keeping MCR_DTR and MCR_RTS off might stop the external
|
|
|
|
* device from sending before we are ready.
|
|
|
|
*/
|
|
|
|
bzero(com, sizeof *com);
|
|
|
|
com->unit = unit;
|
1999-09-12 13:44:54 +00:00
|
|
|
com->ioportres = port;
|
2000-05-12 12:38:25 +00:00
|
|
|
com->bst = rman_get_bustag(port);
|
|
|
|
com->bsh = rman_get_bushandle(port);
|
1996-06-14 10:04:54 +00:00
|
|
|
com->cfcr_image = CFCR_8BITS;
|
|
|
|
com->dtr_wait = 3 * hz;
|
1999-04-18 14:42:20 +00:00
|
|
|
com->loses_outints = COM_LOSESOUTINTS(flags) != 0;
|
2000-02-17 15:09:12 +00:00
|
|
|
com->no_irq = bus_get_resource(dev, SYS_RES_IRQ, 0, NULL, NULL) != 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
com->tx_fifo_size = 1;
|
2000-05-12 12:38:25 +00:00
|
|
|
#ifdef PC98
|
|
|
|
com->obufsize = obufsize;
|
|
|
|
com->obuf1 = obuf;
|
|
|
|
com->obuf2 = obuf + obufsize;
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
com->obufs[0].l_head = com->obuf1;
|
|
|
|
com->obufs[1].l_head = com->obuf2;
|
|
|
|
|
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
com->pc98_if_type = if_type;
|
|
|
|
|
|
|
|
if (IS_8251(if_type)) {
|
|
|
|
pc98_set_ioport(com);
|
1999-12-06 00:23:38 +00:00
|
|
|
|
2000-05-12 12:38:25 +00:00
|
|
|
if (if_type == COM_IF_INTERNAL && pc98_check_8251fifo()) {
|
|
|
|
com->pc98_8251fifo = 1;
|
|
|
|
com->pc98_8251fifo_enable = 0;
|
|
|
|
}
|
|
|
|
} else {
|
2000-06-21 11:21:14 +00:00
|
|
|
bus_addr_t *iat = if_16550a_type[if_type & 0x0f].iat;
|
1999-12-06 00:23:38 +00:00
|
|
|
|
2000-05-12 12:38:25 +00:00
|
|
|
com->data_port = iobase + iat[com_data];
|
|
|
|
com->int_id_port = iobase + iat[com_iir];
|
|
|
|
com->modem_ctl_port = iobase + iat[com_mcr];
|
1999-01-03 05:03:47 +00:00
|
|
|
com->mcr_image = inb(com->modem_ctl_port);
|
2000-05-12 12:38:25 +00:00
|
|
|
com->line_status_port = iobase + iat[com_lsr];
|
|
|
|
com->modem_status_port = iobase + iat[com_msr];
|
|
|
|
com->intr_ctl_port = iobase + iat[com_ier];
|
1999-12-06 00:23:38 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#else /* not PC98 */
|
|
|
|
com->data_port = iobase + com_data;
|
|
|
|
com->int_id_port = iobase + com_iir;
|
|
|
|
com->modem_ctl_port = iobase + com_mcr;
|
|
|
|
com->mcr_image = inb(com->modem_ctl_port);
|
|
|
|
com->line_status_port = iobase + com_lsr;
|
|
|
|
com->modem_status_port = iobase + com_msr;
|
1998-01-08 10:50:06 +00:00
|
|
|
com->intr_ctl_port = iobase + com_ier;
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
|
2002-01-31 08:26:45 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (!IS_8251(if_type) && rclk == 0)
|
|
|
|
rclk = if_16550a_type[if_type & 0x0f].rclk;
|
|
|
|
#else
|
|
|
|
if (rclk == 0)
|
|
|
|
rclk = DEFAULT_RCLK;
|
|
|
|
#endif
|
|
|
|
com->rclk = rclk;
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* We don't use all the flags from <sys/ttydefaults.h> since they
|
|
|
|
* are only relevant for logins. It's important to have echo off
|
|
|
|
* initially so that the line doesn't start blathering before the
|
|
|
|
* echo flag can be turned off.
|
|
|
|
*/
|
|
|
|
com->it_in.c_iflag = 0;
|
|
|
|
com->it_in.c_oflag = 0;
|
|
|
|
com->it_in.c_cflag = TTYDEF_CFLAG;
|
|
|
|
com->it_in.c_lflag = 0;
|
|
|
|
if (unit == comconsole) {
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
1996-06-14 10:04:54 +00:00
|
|
|
DELAY(100000);
|
|
|
|
#endif
|
|
|
|
com->it_in.c_iflag = TTYDEF_IFLAG;
|
|
|
|
com->it_in.c_oflag = TTYDEF_OFLAG;
|
|
|
|
com->it_in.c_cflag = TTYDEF_CFLAG | CLOCAL;
|
|
|
|
com->it_in.c_lflag = TTYDEF_LFLAG;
|
|
|
|
com->lt_out.c_cflag = com->lt_in.c_cflag = CLOCAL;
|
1997-06-04 10:27:53 +00:00
|
|
|
com->lt_out.c_ispeed = com->lt_out.c_ospeed =
|
|
|
|
com->lt_in.c_ispeed = com->lt_in.c_ospeed =
|
1996-11-02 10:41:28 +00:00
|
|
|
com->it_in.c_ispeed = com->it_in.c_ospeed = comdefaultrate;
|
1996-12-04 04:36:59 +00:00
|
|
|
} else
|
|
|
|
com->it_in.c_ispeed = com->it_in.c_ospeed = TTYDEF_SPEED;
|
1999-02-05 11:37:40 +00:00
|
|
|
if (siosetwater(com, com->it_in.c_ispeed) != 0) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
2000-01-29 04:47:22 +00:00
|
|
|
/*
|
|
|
|
* Leave i/o resources allocated if this is a `cn'-level
|
|
|
|
* console, so that other devices can't snarf them.
|
|
|
|
*/
|
|
|
|
if (iobase != siocniobase)
|
|
|
|
bus_release_resource(dev, SYS_RES_IOPORT, rid, port);
|
|
|
|
return (ENOMEM);
|
1999-02-05 11:37:40 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
termioschars(&com->it_in);
|
|
|
|
com->it_out = com->it_in;
|
|
|
|
|
|
|
|
/* attempt to determine UART type */
|
|
|
|
printf("sio%d: type", unit);
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef PC98
|
|
|
|
#ifdef COM_MULTIPORT
|
1999-04-18 14:42:20 +00:00
|
|
|
if (!COM_ISMULTIPORT(flags) && !COM_IIR_TXRDYBUG(flags))
|
1998-01-08 10:50:06 +00:00
|
|
|
#else
|
1999-04-18 14:42:20 +00:00
|
|
|
if (!COM_IIR_TXRDYBUG(flags))
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
{
|
|
|
|
u_char scr;
|
|
|
|
u_char scr1;
|
|
|
|
u_char scr2;
|
|
|
|
|
2000-05-12 12:38:25 +00:00
|
|
|
scr = sio_getreg(com, com_scr);
|
|
|
|
sio_setreg(com, com_scr, 0xa5);
|
|
|
|
scr1 = sio_getreg(com, com_scr);
|
|
|
|
sio_setreg(com, com_scr, 0x5a);
|
|
|
|
scr2 = sio_getreg(com, com_scr);
|
|
|
|
sio_setreg(com, com_scr, scr);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (scr1 != 0xa5 || scr2 != 0x5a) {
|
2002-01-18 03:30:22 +00:00
|
|
|
printf(" 8250 or not responding");
|
1996-06-14 10:04:54 +00:00
|
|
|
goto determined_type;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* !PC98 */
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo && !COM_NOFIFO(flags))
|
|
|
|
com->tx_fifo_size = 16;
|
1999-01-03 05:03:47 +00:00
|
|
|
com_int_TxRx_disable( com );
|
|
|
|
com_cflag_and_speed_set( com, com->it_in.c_cflag, comdefaultrate );
|
|
|
|
com_tiocm_bic( com, TIOCM_DTR|TIOCM_RTS|TIOCM_LE );
|
|
|
|
com_send_break_off( com );
|
1999-12-06 00:23:38 +00:00
|
|
|
|
|
|
|
if (com->pc98_if_type == COM_IF_INTERNAL) {
|
|
|
|
printf(" (internal%s%s)",
|
|
|
|
com->pc98_8251fifo ? " fifo" : "",
|
|
|
|
PC98SIO_baud_rate_port(com->pc98_if_type) != -1 ?
|
|
|
|
" v-fast" : "");
|
|
|
|
} else {
|
|
|
|
printf(" 8251%s", if_8251_type[com->pc98_if_type & 0x0f].name);
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
} else {
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif /* PC98 */
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_fifo, FIFO_ENABLE | FIFO_RX_HIGH);
|
1996-06-14 10:04:54 +00:00
|
|
|
DELAY(100);
|
1997-07-17 10:35:43 +00:00
|
|
|
com->st16650a = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
switch (inb(com->int_id_port) & IIR_FIFO_MASK) {
|
|
|
|
case FIFO_RX_LOW:
|
|
|
|
printf(" 16450");
|
|
|
|
break;
|
|
|
|
case FIFO_RX_MEDL:
|
|
|
|
printf(" 16450?");
|
|
|
|
break;
|
|
|
|
case FIFO_RX_MEDH:
|
|
|
|
printf(" 16550?");
|
|
|
|
break;
|
|
|
|
case FIFO_RX_HIGH:
|
1999-04-18 14:42:20 +00:00
|
|
|
if (COM_NOFIFO(flags)) {
|
1997-05-19 12:39:42 +00:00
|
|
|
printf(" 16550A fifo disabled");
|
1996-06-14 10:04:54 +00:00
|
|
|
} else {
|
|
|
|
com->hasfifo = TRUE;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
com->tx_fifo_size = 0; /* XXX flag conflicts. */
|
|
|
|
printf(" 16550A");
|
|
|
|
#else
|
1999-04-18 14:42:20 +00:00
|
|
|
if (COM_ST16650A(flags)) {
|
1997-07-17 10:35:43 +00:00
|
|
|
com->st16650a = 1;
|
|
|
|
com->tx_fifo_size = 32;
|
|
|
|
printf(" ST16650A");
|
|
|
|
} else {
|
1999-04-18 14:42:20 +00:00
|
|
|
com->tx_fifo_size = COM_FIFOSIZE(flags);
|
1997-07-17 10:35:43 +00:00
|
|
|
printf(" 16550A");
|
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III) {
|
|
|
|
com->tx_fifo_size = 2048;
|
2000-05-12 12:38:25 +00:00
|
|
|
com->rsabase = iobase;
|
1999-01-03 05:03:47 +00:00
|
|
|
outb(com->rsabase + rsa_ier, 0x00);
|
|
|
|
outb(com->rsabase + rsa_frr, 0x00);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1997-06-02 10:51:34 +00:00
|
|
|
#ifdef COM_ESP
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_if_type == COM_IF_ESP98)
|
|
|
|
#endif
|
1997-06-02 10:51:34 +00:00
|
|
|
for (espp = likely_esp_ports; *espp != 0; espp++)
|
1999-05-10 09:14:40 +00:00
|
|
|
if (espattach(com, *espp)) {
|
1997-06-02 10:51:34 +00:00
|
|
|
com->tx_fifo_size = 1024;
|
|
|
|
break;
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
1997-07-17 10:35:43 +00:00
|
|
|
if (!com->st16650a) {
|
|
|
|
if (!com->tx_fifo_size)
|
|
|
|
com->tx_fifo_size = 16;
|
|
|
|
else
|
|
|
|
printf(" lookalike with %d bytes FIFO",
|
|
|
|
com->tx_fifo_size);
|
|
|
|
}
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
|
|
|
}
|
1997-06-02 10:51:34 +00:00
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_if_type == COM_IF_RSB3000) {
|
|
|
|
/* Set RSB-2000/3000 Extended Buffer mode. */
|
|
|
|
u_char lcr;
|
2000-05-12 12:38:25 +00:00
|
|
|
lcr = sio_getreg(com, com_cfcr);
|
|
|
|
sio_setreg(com, com_cfcr, lcr | CFCR_DLAB);
|
|
|
|
sio_setreg(com, com_emr, EMR_EXBUFF | EMR_EFMODE);
|
|
|
|
sio_setreg(com, com_cfcr, lcr);
|
1999-01-03 05:03:47 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef COM_ESP
|
|
|
|
if (com->esp) {
|
1997-01-30 10:48:06 +00:00
|
|
|
/*
|
|
|
|
* Set 16550 compatibility mode.
|
|
|
|
* We don't use the ESP_MODE_SCALE bit to increase the
|
|
|
|
* fifo trigger levels because we can't handle large
|
|
|
|
* bursts of input.
|
|
|
|
* XXX flow control should be set in comparam(), not here.
|
|
|
|
*/
|
1999-01-03 15:57:02 +00:00
|
|
|
#ifdef PC98
|
|
|
|
outb(com->esp_port + ESP98_CMD1, ESP_SETMODE);
|
|
|
|
outb(com->esp_port + ESP98_CMD2, ESP_MODE_RTS | ESP_MODE_FIFO);
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(com->esp_port + ESP_CMD1, ESP_SETMODE);
|
1997-01-30 10:48:06 +00:00
|
|
|
outb(com->esp_port + ESP_CMD2, ESP_MODE_RTS | ESP_MODE_FIFO);
|
1999-01-03 15:57:02 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/* Set RTS/CTS flow control. */
|
1999-01-03 15:57:02 +00:00
|
|
|
#ifdef PC98
|
|
|
|
outb(com->esp_port + ESP98_CMD1, ESP_SETFLOWTYPE);
|
|
|
|
outb(com->esp_port + ESP98_CMD2, ESP_FLOW_RTS);
|
|
|
|
outb(com->esp_port + ESP98_CMD2, ESP_FLOW_CTS);
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(com->esp_port + ESP_CMD1, ESP_SETFLOWTYPE);
|
|
|
|
outb(com->esp_port + ESP_CMD2, ESP_FLOW_RTS);
|
|
|
|
outb(com->esp_port + ESP_CMD2, ESP_FLOW_CTS);
|
1999-01-03 15:57:02 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/* Set flow-control levels. */
|
1999-01-03 15:57:02 +00:00
|
|
|
#ifdef PC98
|
|
|
|
outb(com->esp_port + ESP98_CMD1, ESP_SETRXFLOW);
|
|
|
|
outb(com->esp_port + ESP98_CMD2, HIBYTE(768));
|
|
|
|
outb(com->esp_port + ESP98_CMD2, LOBYTE(768));
|
|
|
|
outb(com->esp_port + ESP98_CMD2, HIBYTE(512));
|
|
|
|
outb(com->esp_port + ESP98_CMD2, LOBYTE(512));
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(com->esp_port + ESP_CMD1, ESP_SETRXFLOW);
|
|
|
|
outb(com->esp_port + ESP_CMD2, HIBYTE(768));
|
|
|
|
outb(com->esp_port + ESP_CMD2, LOBYTE(768));
|
|
|
|
outb(com->esp_port + ESP_CMD2, HIBYTE(512));
|
|
|
|
outb(com->esp_port + ESP_CMD2, LOBYTE(512));
|
1999-01-03 15:57:02 +00:00
|
|
|
#endif
|
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
/* Set UART clock prescaler. */
|
1999-01-03 15:57:02 +00:00
|
|
|
outb(com->esp_port + ESP98_CMD1, ESP_SETCLOCK);
|
|
|
|
outb(com->esp_port + ESP98_CMD2, 2); /* 4 times */
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#endif /* COM_ESP */
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_fifo, 0);
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
printf("%s", if_16550a_type[com->pc98_if_type & 0x0f].name);
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
determined_type: ;
|
1999-01-28 11:24:36 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#ifdef COM_MULTIPORT
|
1999-04-18 14:42:20 +00:00
|
|
|
if (COM_ISMULTIPORT(flags)) {
|
1999-11-18 12:22:09 +00:00
|
|
|
device_t masterdev;
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
com->multiport = TRUE;
|
|
|
|
printf(" (multiport");
|
1999-04-18 14:42:20 +00:00
|
|
|
if (unit == COM_MPMASTER(flags))
|
1996-06-14 10:04:54 +00:00
|
|
|
printf(" master");
|
|
|
|
printf(")");
|
2000-02-17 15:09:12 +00:00
|
|
|
masterdev = devclass_get_device(sio_devclass,
|
|
|
|
COM_MPMASTER(flags));
|
|
|
|
com->no_irq = (masterdev == NULL || bus_get_resource(masterdev,
|
|
|
|
SYS_RES_IRQ, 0, NULL, NULL) != 0);
|
1999-02-05 11:37:40 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif /* COM_MULTIPORT */
|
|
|
|
#ifdef PC98
|
|
|
|
}
|
|
|
|
#endif
|
1997-06-04 10:27:53 +00:00
|
|
|
if (unit == comconsole)
|
|
|
|
printf(", console");
|
1999-11-18 12:22:09 +00:00
|
|
|
if (COM_IIR_TXRDYBUG(flags))
|
1998-01-08 10:50:06 +00:00
|
|
|
printf(" with a bogus IIR_TXRDY register");
|
1996-06-14 10:04:54 +00:00
|
|
|
printf("\n");
|
|
|
|
|
2000-10-25 05:19:40 +00:00
|
|
|
if (sio_fast_ih == NULL) {
|
2001-02-13 09:55:20 +00:00
|
|
|
swi_add(&tty_ithd, "tty:sio", siopoll, NULL, SWI_TTY, 0,
|
|
|
|
&sio_fast_ih);
|
|
|
|
swi_add(&clk_ithd, "tty:sio", siopoll, NULL, SWI_TTY, 0,
|
|
|
|
&sio_slow_ih);
|
1998-08-28 12:44:49 +00:00
|
|
|
}
|
2000-11-05 14:31:19 +00:00
|
|
|
com->devs[0] = make_dev(&sio_cdevsw, unit,
|
1999-09-12 13:44:54 +00:00
|
|
|
UID_ROOT, GID_WHEEL, 0600, "ttyd%r", unit);
|
2000-11-05 14:31:19 +00:00
|
|
|
com->devs[1] = make_dev(&sio_cdevsw, unit | CONTROL_INIT_STATE,
|
1999-09-12 13:44:54 +00:00
|
|
|
UID_ROOT, GID_WHEEL, 0600, "ttyid%r", unit);
|
2000-11-05 14:31:19 +00:00
|
|
|
com->devs[2] = make_dev(&sio_cdevsw, unit | CONTROL_LOCK_STATE,
|
1999-09-12 13:44:54 +00:00
|
|
|
UID_ROOT, GID_WHEEL, 0600, "ttyld%r", unit);
|
2000-11-05 14:31:19 +00:00
|
|
|
com->devs[3] = make_dev(&sio_cdevsw, unit | CALLOUT_MASK,
|
1999-09-12 13:44:54 +00:00
|
|
|
UID_UUCP, GID_DIALER, 0660, "cuaa%r", unit);
|
2000-11-05 14:31:19 +00:00
|
|
|
com->devs[4] = make_dev(&sio_cdevsw,
|
|
|
|
unit | CALLOUT_MASK | CONTROL_INIT_STATE,
|
1999-09-12 13:44:54 +00:00
|
|
|
UID_UUCP, GID_DIALER, 0660, "cuaia%r", unit);
|
2000-11-05 14:31:19 +00:00
|
|
|
com->devs[5] = make_dev(&sio_cdevsw,
|
|
|
|
unit | CALLOUT_MASK | CONTROL_LOCK_STATE,
|
1999-09-12 13:44:54 +00:00
|
|
|
UID_UUCP, GID_DIALER, 0660, "cuala%r", unit);
|
|
|
|
com->flags = flags;
|
1999-04-01 13:44:15 +00:00
|
|
|
com->pps.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR;
|
|
|
|
pps_init(&com->pps);
|
1999-04-18 14:42:20 +00:00
|
|
|
|
1999-09-12 13:44:54 +00:00
|
|
|
rid = 0;
|
|
|
|
com->irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1,
|
1999-11-18 12:22:09 +00:00
|
|
|
RF_ACTIVE);
|
|
|
|
if (com->irqres) {
|
|
|
|
ret = BUS_SETUP_INTR(device_get_parent(dev), dev, com->irqres,
|
2000-09-07 13:34:45 +00:00
|
|
|
INTR_TYPE_TTY | INTR_FAST,
|
1999-12-10 14:03:47 +00:00
|
|
|
siointr, com, &com->cookie);
|
1999-12-07 09:29:15 +00:00
|
|
|
if (ret) {
|
|
|
|
ret = BUS_SETUP_INTR(device_get_parent(dev), dev,
|
|
|
|
com->irqres, INTR_TYPE_TTY,
|
1999-12-10 14:03:47 +00:00
|
|
|
siointr, com, &com->cookie);
|
1999-12-07 09:29:15 +00:00
|
|
|
if (ret == 0)
|
2001-09-16 05:33:07 +00:00
|
|
|
device_printf(dev, "unable to activate interrupt in fast mode - using normal mode\n");
|
1999-12-07 09:29:15 +00:00
|
|
|
}
|
1999-11-18 12:22:09 +00:00
|
|
|
if (ret)
|
|
|
|
device_printf(dev, "could not activate interrupt\n");
|
2001-09-16 05:33:07 +00:00
|
|
|
#if defined(DDB) && (defined(BREAK_TO_DEBUGGER) || \
|
|
|
|
defined(ALT_BREAK_TO_DEBUGGER))
|
|
|
|
/*
|
|
|
|
* Enable interrupts for early break-to-debugger support
|
|
|
|
* on the console.
|
|
|
|
*/
|
|
|
|
if (ret == 0 && unit == comconsole)
|
|
|
|
outb(siocniobase + com_ier, IER_ERXRDY | IER_ERLS |
|
|
|
|
IER_EMSC);
|
|
|
|
#endif
|
1999-11-18 12:22:09 +00:00
|
|
|
}
|
1999-04-18 14:42:20 +00:00
|
|
|
|
|
|
|
return (0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2001-09-14 05:05:08 +00:00
|
|
|
sioopen(dev, flag, mode, td)
|
1996-06-14 10:04:54 +00:00
|
|
|
dev_t dev;
|
|
|
|
int flag;
|
|
|
|
int mode;
|
2001-09-14 05:05:08 +00:00
|
|
|
struct thread *td;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
int error;
|
|
|
|
int mynor;
|
|
|
|
int s;
|
|
|
|
struct tty *tp;
|
|
|
|
int unit;
|
|
|
|
|
|
|
|
mynor = minor(dev);
|
|
|
|
unit = MINOR_TO_UNIT(mynor);
|
1999-11-18 12:22:09 +00:00
|
|
|
com = com_addr(unit);
|
|
|
|
if (com == NULL)
|
1996-06-14 10:04:54 +00:00
|
|
|
return (ENXIO);
|
|
|
|
if (com->gone)
|
|
|
|
return (ENXIO);
|
|
|
|
if (mynor & CONTROL_MASK)
|
|
|
|
return (0);
|
1999-09-12 13:44:54 +00:00
|
|
|
tp = dev->si_tty = com->tp = ttymalloc(com->tp);
|
1996-06-14 10:04:54 +00:00
|
|
|
s = spltty();
|
|
|
|
/*
|
|
|
|
* We jump to this label after all non-interrupted sleeps to pick
|
|
|
|
* up any changes of the device state.
|
|
|
|
*/
|
|
|
|
open_top:
|
|
|
|
while (com->state & CS_DTR_OFF) {
|
|
|
|
error = tsleep(&com->dtr_wait, TTIPRI | PCATCH, "siodtr", 0);
|
|
|
|
if (com_addr(unit) == NULL)
|
|
|
|
return (ENXIO);
|
|
|
|
if (error != 0 || com->gone)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (tp->t_state & TS_ISOPEN) {
|
|
|
|
/*
|
|
|
|
* The device is open, so everything has been initialized.
|
|
|
|
* Handle conflicts.
|
|
|
|
*/
|
|
|
|
if (mynor & CALLOUT_MASK) {
|
|
|
|
if (!com->active_out) {
|
|
|
|
error = EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (com->active_out) {
|
|
|
|
if (flag & O_NONBLOCK) {
|
|
|
|
error = EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
error = tsleep(&com->active_out,
|
|
|
|
TTIPRI | PCATCH, "siobi", 0);
|
|
|
|
if (com_addr(unit) == NULL)
|
|
|
|
return (ENXIO);
|
|
|
|
if (error != 0 || com->gone)
|
|
|
|
goto out;
|
|
|
|
goto open_top;
|
|
|
|
}
|
|
|
|
}
|
1999-01-30 12:17:38 +00:00
|
|
|
if (tp->t_state & TS_XCLUDE &&
|
2002-04-01 21:31:13 +00:00
|
|
|
suser(td)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
error = EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The device isn't open, so there are no conflicts.
|
|
|
|
* Initialize it. Initialization is done twice in many
|
|
|
|
* cases: to preempt sleeping callin opens if we are
|
|
|
|
* callout, and to complete a callin open after DCD rises.
|
|
|
|
*/
|
|
|
|
tp->t_oproc = comstart;
|
|
|
|
tp->t_param = comparam;
|
1999-11-03 09:02:23 +00:00
|
|
|
tp->t_stop = comstop;
|
1996-06-14 10:04:54 +00:00
|
|
|
tp->t_dev = dev;
|
|
|
|
tp->t_termios = mynor & CALLOUT_MASK
|
|
|
|
? com->it_out : com->it_in;
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (!IS_8251(com->pc98_if_type))
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
(void)commctl(com, TIOCM_DTR | TIOCM_RTS, DMSET);
|
|
|
|
com->poll = com->no_irq;
|
|
|
|
com->poll_output = com->loses_outints;
|
|
|
|
++com->wopeners;
|
|
|
|
error = comparam(tp, &tp->t_termios);
|
|
|
|
--com->wopeners;
|
|
|
|
if (error != 0)
|
|
|
|
goto out;
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
com_tiocm_bis(com, TIOCM_DTR|TIOCM_RTS);
|
|
|
|
pc98_msrint_start(dev);
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo) {
|
|
|
|
com->pc98_8251fifo_enable = 1;
|
|
|
|
outb(I8251F_fcr, CTRL8251F_ENABLE |
|
|
|
|
CTRL8251F_XMT_RST | CTRL8251F_RCV_RST);
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* XXX we should goto open_top if comparam() slept.
|
|
|
|
*/
|
|
|
|
if (com->hasfifo) {
|
|
|
|
/*
|
|
|
|
* (Re)enable and drain fifos.
|
|
|
|
*
|
|
|
|
* Certain SMC chips cause problems if the fifos
|
|
|
|
* are enabled while input is ready. Turn off the
|
|
|
|
* fifo if necessary to clear the input. We test
|
|
|
|
* the input ready bit after enabling the fifos
|
|
|
|
* since we've already enabled them in comparam()
|
|
|
|
* and to handle races between enabling and fresh
|
|
|
|
* input.
|
|
|
|
*/
|
|
|
|
while (TRUE) {
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_fifo,
|
|
|
|
FIFO_RCV_RST | FIFO_XMT_RST
|
|
|
|
| com->fifo_image);
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III)
|
2000-05-12 12:38:25 +00:00
|
|
|
outb(com->rsabase + rsa_frr , 0x00);
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1997-02-02 08:09:41 +00:00
|
|
|
/*
|
|
|
|
* XXX the delays are for superstitious
|
|
|
|
* historical reasons. It must be less than
|
|
|
|
* the character time at the maximum
|
|
|
|
* supported speed (87 usec at 115200 bps
|
|
|
|
* 8N1). Otherwise we might loop endlessly
|
|
|
|
* if data is streaming in. We used to use
|
|
|
|
* delays of 100. That usually worked
|
|
|
|
* because DELAY(100) used to usually delay
|
|
|
|
* for about 85 usec instead of 100.
|
|
|
|
*/
|
|
|
|
DELAY(50);
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III ?
|
|
|
|
!(inb(com->rsabase + rsa_srr) & 0x08) :
|
|
|
|
!(inb(com->line_status_port) & LSR_RXRDY))
|
|
|
|
break;
|
1999-01-03 05:03:47 +00:00
|
|
|
#else
|
2000-05-12 12:38:25 +00:00
|
|
|
if (!(inb(com->line_status_port) & LSR_RXRDY))
|
|
|
|
break;
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_fifo, 0);
|
1997-02-02 08:09:41 +00:00
|
|
|
DELAY(50);
|
1996-06-14 10:04:54 +00:00
|
|
|
(void) inb(com->data_port);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
|
|
|
com_tiocm_bis(com, TIOCM_LE);
|
|
|
|
com->pc98_prev_modem_status = pc98_get_modem_status(com);
|
|
|
|
com_int_Rx_enable(com);
|
1996-06-14 10:04:54 +00:00
|
|
|
} else {
|
|
|
|
#endif
|
|
|
|
(void) inb(com->line_status_port);
|
|
|
|
(void) inb(com->data_port);
|
|
|
|
com->prev_modem_status = com->last_modem_status
|
|
|
|
= inb(com->modem_status_port);
|
1999-04-18 14:42:20 +00:00
|
|
|
if (COM_IIR_TXRDYBUG(com->flags)) {
|
1998-01-08 10:50:06 +00:00
|
|
|
outb(com->intr_ctl_port, IER_ERXRDY | IER_ERLS
|
|
|
|
| IER_EMSC);
|
|
|
|
} else {
|
|
|
|
outb(com->intr_ctl_port, IER_ERXRDY | IER_ETXRDY
|
|
|
|
| IER_ERLS | IER_EMSC);
|
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III) {
|
|
|
|
outb(com->rsabase + rsa_ier, 0x1d);
|
|
|
|
outb(com->intr_ctl_port, IER_ERLS | IER_EMSC);
|
|
|
|
}
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
|
|
|
}
|
|
|
|
#endif
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Handle initial DCD. Callout devices get a fake initial
|
|
|
|
* DCD (trapdoor DCD). If we are callout, then any sleeping
|
|
|
|
* callin opens get woken up and resume sleeping on "siobi"
|
|
|
|
* instead of "siodcd".
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* XXX `mynor & CALLOUT_MASK' should be
|
|
|
|
* `tp->t_cflag & (SOFT_CARRIER | TRAPDOOR_CARRIER) where
|
|
|
|
* TRAPDOOR_CARRIER is the default initial state for callout
|
|
|
|
* devices and SOFT_CARRIER is like CLOCAL except it hides
|
|
|
|
* the true carrier.
|
|
|
|
*/
|
|
|
|
#ifdef PC98
|
|
|
|
if ((IS_8251(com->pc98_if_type) &&
|
|
|
|
(pc98_get_modem_status(com) & TIOCM_CAR)) ||
|
|
|
|
(!IS_8251(com->pc98_if_type) &&
|
|
|
|
(com->prev_modem_status & MSR_DCD)) ||
|
|
|
|
mynor & CALLOUT_MASK)
|
2000-05-12 12:38:25 +00:00
|
|
|
(*linesw[tp->t_line].l_modem)(tp, 1);
|
1996-06-14 10:04:54 +00:00
|
|
|
#else
|
|
|
|
if (com->prev_modem_status & MSR_DCD || mynor & CALLOUT_MASK)
|
|
|
|
(*linesw[tp->t_line].l_modem)(tp, 1);
|
2000-05-12 12:38:25 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Wait for DCD if necessary.
|
|
|
|
*/
|
|
|
|
if (!(tp->t_state & TS_CARR_ON) && !(mynor & CALLOUT_MASK)
|
|
|
|
&& !(tp->t_cflag & CLOCAL) && !(flag & O_NONBLOCK)) {
|
|
|
|
++com->wopeners;
|
|
|
|
error = tsleep(TSA_CARR_ON(tp), TTIPRI | PCATCH, "siodcd", 0);
|
|
|
|
if (com_addr(unit) == NULL)
|
|
|
|
return (ENXIO);
|
|
|
|
--com->wopeners;
|
|
|
|
if (error != 0 || com->gone)
|
|
|
|
goto out;
|
|
|
|
goto open_top;
|
|
|
|
}
|
|
|
|
error = (*linesw[tp->t_line].l_open)(dev, tp);
|
|
|
|
disc_optim(tp, &tp->t_termios, com);
|
|
|
|
if (tp->t_state & TS_ISOPEN && mynor & CALLOUT_MASK)
|
|
|
|
com->active_out = TRUE;
|
|
|
|
siosettimeout();
|
|
|
|
out:
|
|
|
|
splx(s);
|
|
|
|
if (!(tp->t_state & TS_ISOPEN) && com->wopeners == 0)
|
|
|
|
comhardclose(com);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2001-09-14 05:05:08 +00:00
|
|
|
sioclose(dev, flag, mode, td)
|
1996-06-14 10:04:54 +00:00
|
|
|
dev_t dev;
|
|
|
|
int flag;
|
|
|
|
int mode;
|
2001-09-14 05:05:08 +00:00
|
|
|
struct thread *td;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
int mynor;
|
|
|
|
int s;
|
|
|
|
struct tty *tp;
|
|
|
|
|
|
|
|
mynor = minor(dev);
|
|
|
|
if (mynor & CONTROL_MASK)
|
|
|
|
return (0);
|
|
|
|
com = com_addr(MINOR_TO_UNIT(mynor));
|
2000-03-12 13:14:51 +00:00
|
|
|
if (com == NULL)
|
|
|
|
return (ENODEV);
|
1996-06-14 10:04:54 +00:00
|
|
|
tp = com->tp;
|
|
|
|
s = spltty();
|
|
|
|
(*linesw[tp->t_line].l_close)(tp, flag);
|
|
|
|
#ifdef PC98
|
|
|
|
com->modem_checking = 0;
|
|
|
|
#endif
|
|
|
|
disc_optim(tp, &tp->t_termios, com);
|
1999-09-25 16:21:39 +00:00
|
|
|
comstop(tp, FREAD | FWRITE);
|
1996-06-14 10:04:54 +00:00
|
|
|
comhardclose(com);
|
|
|
|
ttyclose(tp);
|
|
|
|
siosettimeout();
|
|
|
|
splx(s);
|
|
|
|
if (com->gone) {
|
|
|
|
printf("sio%d: gone\n", com->unit);
|
|
|
|
s = spltty();
|
1999-02-05 11:37:40 +00:00
|
|
|
if (com->ibuf != NULL)
|
|
|
|
free(com->ibuf, M_DEVBUF);
|
1999-04-19 11:11:01 +00:00
|
|
|
bzero(tp, sizeof *tp);
|
1996-06-14 10:04:54 +00:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
comhardclose(com)
|
|
|
|
struct com_s *com;
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
struct tty *tp;
|
|
|
|
int unit;
|
|
|
|
|
|
|
|
unit = com->unit;
|
|
|
|
s = spltty();
|
|
|
|
com->poll = FALSE;
|
|
|
|
com->poll_output = FALSE;
|
1997-02-02 08:09:41 +00:00
|
|
|
com->do_timestamp = FALSE;
|
|
|
|
com->do_dcd_timestamp = FALSE;
|
1999-04-01 13:44:15 +00:00
|
|
|
com->pps.ppsparam.mode = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
2000-05-12 12:38:25 +00:00
|
|
|
com_send_break_off(com);
|
|
|
|
else
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, com->cfcr_image &= ~CFCR_SBREAK);
|
2001-09-16 05:33:07 +00:00
|
|
|
tp = com->tp;
|
|
|
|
|
|
|
|
#if defined(DDB) && (defined(BREAK_TO_DEBUGGER) || \
|
|
|
|
defined(ALT_BREAK_TO_DEBUGGER))
|
|
|
|
/*
|
|
|
|
* Leave interrupts enabled and don't clear DTR if this is the
|
|
|
|
* console. This allows us to detect break-to-debugger events
|
|
|
|
* while the console device is closed.
|
|
|
|
*/
|
|
|
|
if (com->unit != comconsole)
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
#ifdef PC98
|
2001-09-16 05:33:07 +00:00
|
|
|
int tmp;
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
1996-06-14 10:04:54 +00:00
|
|
|
com_int_TxRx_disable(com);
|
|
|
|
else
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_ier, 0);
|
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III)
|
1999-01-03 05:03:47 +00:00
|
|
|
outb(com->rsabase + rsa_ier, 0x00);
|
|
|
|
if (IS_8251(com->pc98_if_type))
|
1996-06-14 10:04:54 +00:00
|
|
|
tmp = pc98_get_modem_status(com) & TIOCM_CAR;
|
|
|
|
else
|
|
|
|
tmp = com->prev_modem_status & MSR_DCD;
|
2001-09-16 05:33:07 +00:00
|
|
|
#else
|
|
|
|
sio_setreg(com, com_ier, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
if (tp->t_cflag & HUPCL
|
|
|
|
/*
|
|
|
|
* XXX we will miss any carrier drop between here and the
|
|
|
|
* next open. Perhaps we should watch DCD even when the
|
|
|
|
* port is closed; it is not sufficient to check it at
|
|
|
|
* the next open because it might go up and down while
|
|
|
|
* we're not watching.
|
|
|
|
*/
|
1999-05-09 05:00:54 +00:00
|
|
|
|| (!com->active_out
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
&& !(tmp)
|
1996-06-14 10:04:54 +00:00
|
|
|
#else
|
1999-05-09 05:00:54 +00:00
|
|
|
&& !(com->prev_modem_status & MSR_DCD)
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
1999-05-09 05:00:54 +00:00
|
|
|
&& !(com->it_in.c_cflag & CLOCAL))
|
1996-06-14 10:04:54 +00:00
|
|
|
|| !(tp->t_state & TS_ISOPEN)) {
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
|
|
|
com_tiocm_bic(com, TIOCM_DTR|TIOCM_RTS|TIOCM_LE);
|
1996-06-14 10:04:54 +00:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
(void)commctl(com, TIOCM_DTR, DMBIC);
|
1997-04-19 14:54:32 +00:00
|
|
|
if (com->dtr_wait != 0 && !(com->state & CS_DTR_OFF)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
timeout(siodtrwakeup, com, com->dtr_wait);
|
|
|
|
com->state |= CS_DTR_OFF;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifdef PC98
|
|
|
|
else {
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
2000-05-12 12:38:25 +00:00
|
|
|
com_tiocm_bic(com, TIOCM_LE);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
1999-12-06 00:23:38 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_8251fifo) {
|
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, CTRL8251F_XMT_RST | CTRL8251F_RCV_RST);
|
|
|
|
com->pc98_8251fifo_enable = 0;
|
|
|
|
}
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
if (com->hasfifo) {
|
|
|
|
/*
|
|
|
|
* Disable fifos so that they are off after controlled
|
|
|
|
* reboots. Some BIOSes fail to detect 16550s when the
|
|
|
|
* fifos are enabled.
|
|
|
|
*/
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_fifo, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
com->active_out = FALSE;
|
|
|
|
wakeup(&com->active_out);
|
|
|
|
wakeup(TSA_CARR_ON(tp)); /* restart any wopeners */
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sioread(dev, uio, flag)
|
|
|
|
dev_t dev;
|
|
|
|
struct uio *uio;
|
|
|
|
int flag;
|
|
|
|
{
|
|
|
|
int mynor;
|
1999-05-10 09:14:40 +00:00
|
|
|
struct com_s *com;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
mynor = minor(dev);
|
|
|
|
if (mynor & CONTROL_MASK)
|
|
|
|
return (ENODEV);
|
1999-05-10 09:14:40 +00:00
|
|
|
com = com_addr(MINOR_TO_UNIT(mynor));
|
2000-03-12 13:14:51 +00:00
|
|
|
if (com == NULL || com->gone)
|
1996-06-14 10:04:54 +00:00
|
|
|
return (ENODEV);
|
1999-05-10 09:14:40 +00:00
|
|
|
return ((*linesw[com->tp->t_line].l_read)(com->tp, uio, flag));
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
siowrite(dev, uio, flag)
|
|
|
|
dev_t dev;
|
|
|
|
struct uio *uio;
|
|
|
|
int flag;
|
|
|
|
{
|
|
|
|
int mynor;
|
1999-05-10 09:14:40 +00:00
|
|
|
struct com_s *com;
|
1996-06-14 10:04:54 +00:00
|
|
|
int unit;
|
|
|
|
|
|
|
|
mynor = minor(dev);
|
|
|
|
if (mynor & CONTROL_MASK)
|
|
|
|
return (ENODEV);
|
|
|
|
|
|
|
|
unit = MINOR_TO_UNIT(mynor);
|
1999-05-10 09:14:40 +00:00
|
|
|
com = com_addr(unit);
|
2000-03-12 13:14:51 +00:00
|
|
|
if (com == NULL || com->gone)
|
1996-06-14 10:04:54 +00:00
|
|
|
return (ENODEV);
|
|
|
|
/*
|
|
|
|
* (XXX) We disallow virtual consoles if the physical console is
|
|
|
|
* a serial port. This is in case there is a display attached that
|
|
|
|
* is not the console. In that situation we don't need/want the X
|
|
|
|
* server taking over the console.
|
|
|
|
*/
|
|
|
|
if (constty != NULL && unit == comconsole)
|
|
|
|
constty = NULL;
|
1999-05-10 09:14:40 +00:00
|
|
|
return ((*linesw[com->tp->t_line].l_write)(com->tp, uio, flag));
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
1996-12-04 04:36:59 +00:00
|
|
|
static void
|
|
|
|
siobusycheck(chan)
|
|
|
|
void *chan;
|
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
int s;
|
|
|
|
|
|
|
|
com = (struct com_s *)chan;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear TS_BUSY if low-level output is complete.
|
|
|
|
* spl locking is sufficient because siointr1() does not set CS_BUSY.
|
1997-04-19 14:54:32 +00:00
|
|
|
* If siointr1() clears CS_BUSY after we look at it, then we'll get
|
1996-12-04 04:36:59 +00:00
|
|
|
* called again. Reading the line status port outside of siointr1()
|
|
|
|
* is safe because CS_BUSY is clear so there are no output interrupts
|
|
|
|
* to lose.
|
|
|
|
*/
|
|
|
|
s = spltty();
|
|
|
|
if (com->state & CS_BUSY)
|
1997-04-19 14:54:32 +00:00
|
|
|
com->extra_state &= ~CSE_BUSYCHECK; /* False alarm. */
|
1997-03-06 15:07:04 +00:00
|
|
|
#ifdef PC98
|
1999-02-02 17:26:03 +00:00
|
|
|
else if ((IS_8251(com->pc98_if_type) &&
|
1999-12-06 00:23:38 +00:00
|
|
|
((com->pc98_8251fifo_enable &&
|
|
|
|
(inb(I8251F_lsr) & (STS8251F_TxRDY | STS8251F_TxEMP))
|
|
|
|
== (STS8251F_TxRDY | STS8251F_TxEMP)) ||
|
|
|
|
(!com->pc98_8251fifo_enable &&
|
|
|
|
(inb(com->sts_port) & (STS8251_TxRDY | STS8251_TxEMP))
|
|
|
|
== (STS8251_TxRDY | STS8251_TxEMP)))) ||
|
|
|
|
((inb(com->line_status_port) & (LSR_TSRE | LSR_TXRDY))
|
|
|
|
== (LSR_TSRE | LSR_TXRDY))) {
|
1997-03-06 15:07:04 +00:00
|
|
|
#else
|
1996-12-04 04:36:59 +00:00
|
|
|
else if ((inb(com->line_status_port) & (LSR_TSRE | LSR_TXRDY))
|
|
|
|
== (LSR_TSRE | LSR_TXRDY)) {
|
1997-03-06 15:07:04 +00:00
|
|
|
#endif
|
1996-12-04 04:36:59 +00:00
|
|
|
com->tp->t_state &= ~TS_BUSY;
|
|
|
|
ttwwakeup(com->tp);
|
1997-04-19 14:54:32 +00:00
|
|
|
com->extra_state &= ~CSE_BUSYCHECK;
|
1996-12-04 04:36:59 +00:00
|
|
|
} else
|
|
|
|
timeout(siobusycheck, com, hz / 100);
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
2002-01-31 08:26:45 +00:00
|
|
|
static u_int
|
|
|
|
siodivisor(rclk, speed)
|
|
|
|
u_long rclk;
|
|
|
|
speed_t speed;
|
|
|
|
{
|
|
|
|
long actual_speed;
|
|
|
|
u_int divisor;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (speed == 0 || speed > (ULONG_MAX - 1) / 8)
|
|
|
|
return (0);
|
|
|
|
divisor = (rclk / (8UL * speed) + 1) / 2;
|
|
|
|
if (divisor == 0 || divisor >= 65536)
|
|
|
|
return (0);
|
|
|
|
actual_speed = rclk / (16UL * divisor);
|
|
|
|
|
|
|
|
/* 10 times error in percent: */
|
|
|
|
error = ((actual_speed - (long)speed) * 2000 / (long)speed + 1) / 2;
|
|
|
|
|
|
|
|
/* 3.0% maximum error tolerance: */
|
|
|
|
if (error < -30 || error > 30)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
return (divisor);
|
|
|
|
}
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
static void
|
|
|
|
siodtrwakeup(chan)
|
|
|
|
void *chan;
|
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
|
|
|
|
com = (struct com_s *)chan;
|
|
|
|
com->state &= ~CS_DTR_OFF;
|
|
|
|
wakeup(&com->dtr_wait);
|
|
|
|
}
|
|
|
|
|
2000-09-07 13:34:45 +00:00
|
|
|
/*
|
2001-01-27 13:02:06 +00:00
|
|
|
* Call this function with the sio_lock mutex held. It will return with the
|
|
|
|
* lock still held.
|
2000-09-07 13:34:45 +00:00
|
|
|
*/
|
1999-02-05 11:37:40 +00:00
|
|
|
static void
|
|
|
|
sioinput(com)
|
|
|
|
struct com_s *com;
|
|
|
|
{
|
|
|
|
u_char *buf;
|
|
|
|
int incc;
|
|
|
|
u_char line_status;
|
|
|
|
int recv_data;
|
|
|
|
struct tty *tp;
|
|
|
|
|
|
|
|
buf = com->ibuf;
|
|
|
|
tp = com->tp;
|
|
|
|
if (!(tp->t_state & TS_ISOPEN) || !(tp->t_cflag & CREAD)) {
|
|
|
|
com_events -= (com->iptr - com->ibuf);
|
|
|
|
com->iptr = com->ibuf;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (tp->t_state & TS_CAN_BYPASS_L_RINT) {
|
|
|
|
/*
|
|
|
|
* Avoid the grotesquely inefficient lineswitch routine
|
|
|
|
* (ttyinput) in "raw" mode. It usually takes about 450
|
|
|
|
* instructions (that's without canonical processing or echo!).
|
|
|
|
* slinput is reasonably fast (usually 40 instructions plus
|
|
|
|
* call overhead).
|
|
|
|
*/
|
|
|
|
do {
|
2000-09-07 13:34:45 +00:00
|
|
|
/*
|
|
|
|
* This may look odd, but it is using save-and-enable
|
|
|
|
* semantics instead of the save-and-disable semantics
|
|
|
|
* that are used everywhere else.
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1999-02-05 11:37:40 +00:00
|
|
|
incc = com->iptr - buf;
|
|
|
|
if (tp->t_rawq.c_cc + incc > tp->t_ihiwat
|
|
|
|
&& (com->state & CS_RTS_IFLOW
|
|
|
|
|| tp->t_iflag & IXOFF)
|
|
|
|
&& !(tp->t_state & TS_TBLOCK))
|
|
|
|
ttyblock(tp);
|
|
|
|
com->delta_error_counts[CE_TTY_BUF_OVERFLOW]
|
|
|
|
+= b_to_q((char *)buf, incc, &tp->t_rawq);
|
|
|
|
buf += incc;
|
|
|
|
tk_nin += incc;
|
|
|
|
tk_rawcc += incc;
|
|
|
|
tp->t_rawcc += incc;
|
|
|
|
ttwakeup(tp);
|
|
|
|
if (tp->t_state & TS_TTSTOP
|
|
|
|
&& (tp->t_iflag & IXANY
|
|
|
|
|| tp->t_cc[VSTART] == tp->t_cc[VSTOP])) {
|
|
|
|
tp->t_state &= ~TS_TTSTOP;
|
|
|
|
tp->t_lflag &= ~FLUSHO;
|
|
|
|
comstart(tp);
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1999-02-05 11:37:40 +00:00
|
|
|
} while (buf < com->iptr);
|
|
|
|
} else {
|
|
|
|
do {
|
2000-09-07 13:34:45 +00:00
|
|
|
/*
|
|
|
|
* This may look odd, but it is using save-and-enable
|
|
|
|
* semantics instead of the save-and-disable semantics
|
|
|
|
* that are used everywhere else.
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1999-02-05 11:37:40 +00:00
|
|
|
line_status = buf[com->ierroff];
|
|
|
|
recv_data = *buf++;
|
|
|
|
if (line_status
|
|
|
|
& (LSR_BI | LSR_FE | LSR_OE | LSR_PE)) {
|
|
|
|
if (line_status & LSR_BI)
|
|
|
|
recv_data |= TTY_BI;
|
|
|
|
if (line_status & LSR_FE)
|
|
|
|
recv_data |= TTY_FE;
|
|
|
|
if (line_status & LSR_OE)
|
|
|
|
recv_data |= TTY_OE;
|
|
|
|
if (line_status & LSR_PE)
|
|
|
|
recv_data |= TTY_PE;
|
|
|
|
}
|
|
|
|
(*linesw[tp->t_line].l_rint)(recv_data, tp);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1999-02-05 11:37:40 +00:00
|
|
|
} while (buf < com->iptr);
|
|
|
|
}
|
|
|
|
com_events -= (com->iptr - com->ibuf);
|
|
|
|
com->iptr = com->ibuf;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There is now room for another low-level buffer full of input,
|
|
|
|
* so enable RTS if it is now disabled and there is room in the
|
|
|
|
* high-level buffer.
|
|
|
|
*/
|
|
|
|
#ifdef PC98
|
1999-11-29 13:20:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
|
|
|
if ((com->state & CS_RTS_IFLOW) &&
|
|
|
|
!(com_tiocm_get(com) & TIOCM_RTS) &&
|
|
|
|
!(tp->t_state & TS_TBLOCK))
|
1999-02-05 11:37:40 +00:00
|
|
|
com_tiocm_bis(com, TIOCM_RTS);
|
1999-11-29 13:20:47 +00:00
|
|
|
} else {
|
|
|
|
if ((com->state & CS_RTS_IFLOW) &&
|
|
|
|
!(com->mcr_image & MCR_RTS) &&
|
|
|
|
!(tp->t_state & TS_TBLOCK))
|
1999-02-05 11:37:40 +00:00
|
|
|
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
|
1999-11-29 13:20:47 +00:00
|
|
|
}
|
1999-02-05 11:37:40 +00:00
|
|
|
#else
|
|
|
|
if ((com->state & CS_RTS_IFLOW) && !(com->mcr_image & MCR_RTS) &&
|
|
|
|
!(tp->t_state & TS_TBLOCK))
|
|
|
|
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
void
|
|
|
|
siointr(arg)
|
|
|
|
void *arg;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
1999-05-10 09:14:40 +00:00
|
|
|
struct com_s *com;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
u_char rsa_buf_status;
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
2001-01-27 13:02:06 +00:00
|
|
|
#ifndef COM_MULTIPORT
|
|
|
|
com = (struct com_s *)arg;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
2001-01-27 13:02:06 +00:00
|
|
|
siointr1(com);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
2001-01-27 13:02:06 +00:00
|
|
|
#else /* COM_MULTIPORT */
|
|
|
|
bool_t possibly_more_intrs;
|
|
|
|
int unit;
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Loop until there is no activity on any port. This is necessary
|
|
|
|
* to get an interrupt edge more than to avoid another interrupt.
|
|
|
|
* If the IRQ signal is just an OR of the IRQ signals from several
|
|
|
|
* devices, then the edge from one may be lost because another is
|
|
|
|
* on.
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
do {
|
|
|
|
possibly_more_intrs = FALSE;
|
1999-11-18 12:22:09 +00:00
|
|
|
for (unit = 0; unit < sio_numunits; ++unit) {
|
1996-06-14 10:04:54 +00:00
|
|
|
com = com_addr(unit);
|
1997-08-30 15:47:49 +00:00
|
|
|
/*
|
1997-09-01 10:45:02 +00:00
|
|
|
* XXX COM_LOCK();
|
1997-08-30 15:47:49 +00:00
|
|
|
* would it work here, or be counter-productive?
|
|
|
|
*/
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com != NULL
|
|
|
|
&& !com->gone
|
2000-05-12 12:38:25 +00:00
|
|
|
&& IS_8251(com->pc98_if_type)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
siointr1(com);
|
2000-05-12 12:38:25 +00:00
|
|
|
} else if (com != NULL
|
1999-01-03 05:03:47 +00:00
|
|
|
&& !com->gone
|
|
|
|
&& com->pc98_if_type == COM_IF_RSA98III) {
|
2000-05-12 12:38:25 +00:00
|
|
|
rsa_buf_status =
|
|
|
|
inb(com->rsabase + rsa_srr) & 0xc9;
|
|
|
|
if ((rsa_buf_status & 0xc8)
|
|
|
|
|| !(rsa_buf_status & 0x01)) {
|
|
|
|
siointr1(com);
|
|
|
|
if (rsa_buf_status !=
|
|
|
|
(inb(com->rsabase + rsa_srr) & 0xc9))
|
|
|
|
possibly_more_intrs = TRUE;
|
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
} else
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
if (com != NULL
|
|
|
|
&& !com->gone
|
|
|
|
&& (inb(com->int_id_port) & IIR_IMASK)
|
|
|
|
!= IIR_NOPEND) {
|
|
|
|
siointr1(com);
|
|
|
|
possibly_more_intrs = TRUE;
|
|
|
|
}
|
1997-09-01 10:45:02 +00:00
|
|
|
/* XXX COM_UNLOCK(); */
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
} while (possibly_more_intrs);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif /* COM_MULTIPORT */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
siointr1(com)
|
|
|
|
struct com_s *com;
|
|
|
|
{
|
|
|
|
u_char line_status;
|
|
|
|
u_char modem_status;
|
|
|
|
u_char *ioptr;
|
|
|
|
u_char recv_data;
|
1998-01-08 10:50:06 +00:00
|
|
|
u_char int_ctl;
|
|
|
|
u_char int_ctl_new;
|
1999-04-01 13:44:15 +00:00
|
|
|
struct timecounter *tc;
|
|
|
|
u_int count;
|
1998-01-08 10:50:06 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
u_char tmp = 0;
|
1999-01-03 05:03:47 +00:00
|
|
|
u_char rsa_buf_status = 0;
|
2000-05-12 12:38:25 +00:00
|
|
|
int rsa_tx_fifo_size = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif /* PC98 */
|
|
|
|
|
1998-01-08 10:50:06 +00:00
|
|
|
int_ctl = inb(com->intr_ctl_port);
|
|
|
|
int_ctl_new = int_ctl;
|
|
|
|
|
1998-02-15 11:18:47 +00:00
|
|
|
while (!com->gone) {
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
|
|
|
status_read:;
|
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
tmp = inb(I8251F_lsr);
|
|
|
|
else
|
|
|
|
tmp = inb(com->sts_port);
|
1996-06-14 10:04:54 +00:00
|
|
|
more_intr:
|
|
|
|
line_status = 0;
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable) {
|
|
|
|
if (tmp & STS8251F_TxRDY) line_status |= LSR_TXRDY;
|
|
|
|
if (tmp & STS8251F_RxRDY) line_status |= LSR_RXRDY;
|
|
|
|
if (tmp & STS8251F_TxEMP) line_status |= LSR_TSRE;
|
|
|
|
if (tmp & STS8251F_PE) line_status |= LSR_PE;
|
|
|
|
if (tmp & STS8251F_OE) line_status |= LSR_OE;
|
|
|
|
if (tmp & STS8251F_BD_SD) line_status |= LSR_BI;
|
|
|
|
} else {
|
|
|
|
if (tmp & STS8251_TxRDY) line_status |= LSR_TXRDY;
|
|
|
|
if (tmp & STS8251_RxRDY) line_status |= LSR_RXRDY;
|
|
|
|
if (tmp & STS8251_TxEMP) line_status |= LSR_TSRE;
|
|
|
|
if (tmp & STS8251_PE) line_status |= LSR_PE;
|
|
|
|
if (tmp & STS8251_OE) line_status |= LSR_OE;
|
|
|
|
if (tmp & STS8251_FE) line_status |= LSR_FE;
|
|
|
|
if (tmp & STS8251_BD_SD) line_status |= LSR_BI;
|
|
|
|
}
|
1999-05-05 01:53:43 +00:00
|
|
|
} else {
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif /* PC98 */
|
1999-04-01 13:44:15 +00:00
|
|
|
if (com->pps.ppsparam.mode & PPS_CAPTUREBOTH) {
|
|
|
|
modem_status = inb(com->modem_status_port);
|
|
|
|
if ((modem_status ^ com->last_modem_status) & MSR_DCD) {
|
|
|
|
tc = timecounter;
|
|
|
|
count = tc->tc_get_timecount(tc);
|
|
|
|
pps_event(&com->pps, tc, count,
|
|
|
|
(modem_status & MSR_DCD) ?
|
1999-09-12 13:44:54 +00:00
|
|
|
PPS_CAPTUREASSERT : PPS_CAPTURECLEAR);
|
1999-04-01 13:44:15 +00:00
|
|
|
}
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
line_status = inb(com->line_status_port);
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
1999-05-05 01:53:43 +00:00
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III)
|
|
|
|
rsa_buf_status = inb(com->rsabase + rsa_srr);
|
|
|
|
#endif /* PC98 */
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/* input event? (check first to help avoid overruns) */
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifndef PC98
|
1996-06-14 10:04:54 +00:00
|
|
|
while (line_status & LSR_RCV_MASK) {
|
1999-01-03 05:03:47 +00:00
|
|
|
#else
|
|
|
|
while ((line_status & LSR_RCV_MASK)
|
|
|
|
|| (com->pc98_if_type == COM_IF_RSA98III
|
|
|
|
&& (rsa_buf_status & 0x08))) {
|
|
|
|
#endif /* PC98 */
|
1996-06-14 10:04:54 +00:00
|
|
|
/* break/unnattached error bits or real input? */
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable) {
|
|
|
|
recv_data = inb(I8251F_data);
|
|
|
|
if (tmp & (STS8251F_PE | STS8251F_OE |
|
|
|
|
STS8251F_BD_SD)) {
|
|
|
|
pc98_i8251_or_cmd(com, CMD8251_ER);
|
|
|
|
recv_data = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
recv_data = inb(com->data_port);
|
|
|
|
if (tmp & (STS8251_PE | STS8251_OE |
|
|
|
|
STS8251_FE | STS8251_BD_SD)) {
|
|
|
|
pc98_i8251_or_cmd(com, CMD8251_ER);
|
1996-06-14 10:04:54 +00:00
|
|
|
recv_data = 0;
|
1999-12-06 00:23:38 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
2000-05-12 12:38:25 +00:00
|
|
|
} else if (com->pc98_if_type == COM_IF_RSA98III) {
|
1999-12-06 00:23:38 +00:00
|
|
|
if (!(rsa_buf_status & 0x08))
|
|
|
|
recv_data = 0;
|
|
|
|
else
|
|
|
|
recv_data = inb(com->data_port);
|
1999-01-03 05:03:47 +00:00
|
|
|
} else
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
if (!(line_status & LSR_RXRDY))
|
|
|
|
recv_data = 0;
|
|
|
|
else
|
|
|
|
recv_data = inb(com->data_port);
|
2001-09-16 05:33:07 +00:00
|
|
|
#if defined(DDB) && defined(ALT_BREAK_TO_DEBUGGER)
|
|
|
|
/*
|
|
|
|
* Solaris implements a new BREAK which is initiated
|
|
|
|
* by a character sequence CR ~ ^b which is similar
|
|
|
|
* to a familiar pattern used on Sun servers by the
|
|
|
|
* Remote Console.
|
|
|
|
*/
|
|
|
|
#define KEY_CRTLB 2 /* ^B */
|
|
|
|
#define KEY_CR 13 /* CR '\r' */
|
|
|
|
#define KEY_TILDE 126 /* ~ */
|
|
|
|
|
|
|
|
if (com->unit == comconsole) {
|
|
|
|
static int brk_state1 = 0, brk_state2 = 0;
|
|
|
|
if (recv_data == KEY_CR) {
|
|
|
|
brk_state1 = recv_data;
|
|
|
|
brk_state2 = 0;
|
|
|
|
} else if (brk_state1 == KEY_CR && (recv_data == KEY_TILDE || recv_data == KEY_CRTLB)) {
|
|
|
|
if (recv_data == KEY_TILDE)
|
|
|
|
brk_state2 = recv_data;
|
|
|
|
else if (brk_state2 == KEY_TILDE && recv_data == KEY_CRTLB) {
|
|
|
|
breakpoint();
|
|
|
|
brk_state1 = brk_state2 = 0;
|
|
|
|
goto cont;
|
|
|
|
} else
|
|
|
|
brk_state2 = 0;
|
|
|
|
} else
|
|
|
|
brk_state1 = 0;
|
|
|
|
}
|
|
|
|
#endif
|
1996-12-04 04:36:59 +00:00
|
|
|
if (line_status & (LSR_BI | LSR_FE | LSR_PE)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
1996-12-04 04:36:59 +00:00
|
|
|
* Don't store BI if IGNBRK or FE/PE if IGNPAR.
|
|
|
|
* Otherwise, push the work to a higher level
|
|
|
|
* (to handle PARMRK) if we're bypassing.
|
|
|
|
* Otherwise, convert BI/FE and PE+INPCK to 0.
|
|
|
|
*
|
|
|
|
* This makes bypassing work right in the
|
|
|
|
* usual "raw" case (IGNBRK set, and IGNPAR
|
|
|
|
* and INPCK clear).
|
|
|
|
*
|
|
|
|
* Note: BI together with FE/PE means just BI.
|
|
|
|
*/
|
|
|
|
if (line_status & LSR_BI) {
|
|
|
|
#if defined(DDB) && defined(BREAK_TO_DEBUGGER)
|
|
|
|
if (com->unit == comconsole) {
|
1996-12-24 11:47:52 +00:00
|
|
|
breakpoint();
|
1996-12-04 04:36:59 +00:00
|
|
|
goto cont;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (com->tp == NULL
|
|
|
|
|| com->tp->t_iflag & IGNBRK)
|
|
|
|
goto cont;
|
|
|
|
} else {
|
|
|
|
if (com->tp == NULL
|
|
|
|
|| com->tp->t_iflag & IGNPAR)
|
|
|
|
goto cont;
|
|
|
|
}
|
|
|
|
if (com->tp->t_state & TS_CAN_BYPASS_L_RINT
|
|
|
|
&& (line_status & (LSR_BI | LSR_FE)
|
|
|
|
|| com->tp->t_iflag & INPCK))
|
1996-06-14 10:04:54 +00:00
|
|
|
recv_data = 0;
|
|
|
|
}
|
|
|
|
++com->bytes_in;
|
|
|
|
if (com->hotchar != 0 && recv_data == com->hotchar)
|
Change the preemption code for software interrupt thread schedules and
mutex releases to not require flags for the cases when preemption is
not allowed:
The purpose of the MTX_NOSWITCH and SWI_NOSWITCH flags is to prevent
switching to a higher priority thread on mutex releease and swi schedule,
respectively when that switch is not safe. Now that the critical section
API maintains a per-thread nesting count, the kernel can easily check
whether or not it should switch without relying on flags from the
programmer. This fixes a few bugs in that all current callers of
swi_sched() used SWI_NOSWITCH, when in fact, only the ones called from
fast interrupt handlers and the swi_sched of softclock needed this flag.
Note that to ensure that swi_sched()'s in clock and fast interrupt
handlers do not switch, these handlers have to be explicitly wrapped
in critical_enter/exit pairs. Presently, just wrapping the handlers is
sufficient, but in the future with the fully preemptive kernel, the
interrupt must be EOI'd before critical_exit() is called. (critical_exit()
can switch due to a deferred preemption in a fully preemptive kernel.)
I've tested the changes to the interrupt code on i386 and alpha. I have
not tested ia64, but the interrupt code is almost identical to the alpha
code, so I expect it will work fine. PowerPC and ARM do not yet have
interrupt code in the tree so they shouldn't be broken. Sparc64 is
broken, but that's been ok'd by jake and tmm who will be fixing the
interrupt code for sparc64 shortly.
Reviewed by: peter
Tested on: i386, alpha
2002-01-05 08:47:13 +00:00
|
|
|
swi_sched(sio_fast_ih, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
ioptr = com->iptr;
|
|
|
|
if (ioptr >= com->ibufend)
|
|
|
|
CE_RECORD(com, CE_INTERRUPT_BUF_OVERFLOW);
|
|
|
|
else {
|
1996-07-23 07:46:59 +00:00
|
|
|
if (com->do_timestamp)
|
|
|
|
microtime(&com->timestamp);
|
1996-06-14 10:04:54 +00:00
|
|
|
++com_events;
|
2001-02-13 09:55:20 +00:00
|
|
|
swi_sched(sio_slow_ih, SWI_DELAY);
|
1996-06-14 10:04:54 +00:00
|
|
|
#if 0 /* for testing input latency vs efficiency */
|
|
|
|
if (com->iptr - com->ibuf == 8)
|
Change the preemption code for software interrupt thread schedules and
mutex releases to not require flags for the cases when preemption is
not allowed:
The purpose of the MTX_NOSWITCH and SWI_NOSWITCH flags is to prevent
switching to a higher priority thread on mutex releease and swi schedule,
respectively when that switch is not safe. Now that the critical section
API maintains a per-thread nesting count, the kernel can easily check
whether or not it should switch without relying on flags from the
programmer. This fixes a few bugs in that all current callers of
swi_sched() used SWI_NOSWITCH, when in fact, only the ones called from
fast interrupt handlers and the swi_sched of softclock needed this flag.
Note that to ensure that swi_sched()'s in clock and fast interrupt
handlers do not switch, these handlers have to be explicitly wrapped
in critical_enter/exit pairs. Presently, just wrapping the handlers is
sufficient, but in the future with the fully preemptive kernel, the
interrupt must be EOI'd before critical_exit() is called. (critical_exit()
can switch due to a deferred preemption in a fully preemptive kernel.)
I've tested the changes to the interrupt code on i386 and alpha. I have
not tested ia64, but the interrupt code is almost identical to the alpha
code, so I expect it will work fine. PowerPC and ARM do not yet have
interrupt code in the tree so they shouldn't be broken. Sparc64 is
broken, but that's been ok'd by jake and tmm who will be fixing the
interrupt code for sparc64 shortly.
Reviewed by: peter
Tested on: i386, alpha
2002-01-05 08:47:13 +00:00
|
|
|
swi_sched(sio_fast_ih, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
ioptr[0] = recv_data;
|
1999-02-05 11:37:40 +00:00
|
|
|
ioptr[com->ierroff] = line_status;
|
1996-06-14 10:04:54 +00:00
|
|
|
com->iptr = ++ioptr;
|
|
|
|
if (ioptr == com->ihighwater
|
|
|
|
&& com->state & CS_RTS_IFLOW)
|
|
|
|
#ifdef PC98
|
1999-11-29 13:20:47 +00:00
|
|
|
IS_8251(com->pc98_if_type) ?
|
|
|
|
com_tiocm_bic(com, TIOCM_RTS) :
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
outb(com->modem_ctl_port,
|
|
|
|
com->mcr_image &= ~MCR_RTS);
|
|
|
|
if (line_status & LSR_OE)
|
|
|
|
CE_RECORD(com, CE_OVERRUN);
|
|
|
|
}
|
|
|
|
cont:
|
|
|
|
/*
|
|
|
|
* "& 0x7F" is to avoid the gcc-1.40 generating a slow
|
|
|
|
* jump from the top of the loop to here
|
|
|
|
*/
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
1996-06-14 10:04:54 +00:00
|
|
|
goto status_read;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
line_status = inb(com->line_status_port) & 0x7F;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III)
|
|
|
|
rsa_buf_status = inb(com->rsabase + rsa_srr);
|
|
|
|
#endif /* PC98 */
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* modem status change? (always check before doing output) */
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (!IS_8251(com->pc98_if_type)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
modem_status = inb(com->modem_status_port);
|
|
|
|
if (modem_status != com->last_modem_status) {
|
1996-07-23 07:46:59 +00:00
|
|
|
if (com->do_dcd_timestamp
|
|
|
|
&& !(com->last_modem_status & MSR_DCD)
|
|
|
|
&& modem_status & MSR_DCD)
|
|
|
|
microtime(&com->dcd_timestamp);
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Schedule high level to handle DCD changes. Note
|
|
|
|
* that we don't use the delta bits anywhere. Some
|
|
|
|
* UARTs mess them up, and it's easy to remember the
|
|
|
|
* previous bits and calculate the delta.
|
|
|
|
*/
|
|
|
|
com->last_modem_status = modem_status;
|
|
|
|
if (!(com->state & CS_CHECKMSR)) {
|
|
|
|
com_events += LOTS_OF_EVENTS;
|
|
|
|
com->state |= CS_CHECKMSR;
|
Change the preemption code for software interrupt thread schedules and
mutex releases to not require flags for the cases when preemption is
not allowed:
The purpose of the MTX_NOSWITCH and SWI_NOSWITCH flags is to prevent
switching to a higher priority thread on mutex releease and swi schedule,
respectively when that switch is not safe. Now that the critical section
API maintains a per-thread nesting count, the kernel can easily check
whether or not it should switch without relying on flags from the
programmer. This fixes a few bugs in that all current callers of
swi_sched() used SWI_NOSWITCH, when in fact, only the ones called from
fast interrupt handlers and the swi_sched of softclock needed this flag.
Note that to ensure that swi_sched()'s in clock and fast interrupt
handlers do not switch, these handlers have to be explicitly wrapped
in critical_enter/exit pairs. Presently, just wrapping the handlers is
sufficient, but in the future with the fully preemptive kernel, the
interrupt must be EOI'd before critical_exit() is called. (critical_exit()
can switch due to a deferred preemption in a fully preemptive kernel.)
I've tested the changes to the interrupt code on i386 and alpha. I have
not tested ia64, but the interrupt code is almost identical to the alpha
code, so I expect it will work fine. PowerPC and ARM do not yet have
interrupt code in the tree so they shouldn't be broken. Sparc64 is
broken, but that's been ok'd by jake and tmm who will be fixing the
interrupt code for sparc64 shortly.
Reviewed by: peter
Tested on: i386, alpha
2002-01-05 08:47:13 +00:00
|
|
|
swi_sched(sio_fast_ih, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* handle CTS change immediately for crisp flow ctl */
|
|
|
|
if (com->state & CS_CTS_OFLOW) {
|
|
|
|
if (modem_status & MSR_CTS)
|
|
|
|
com->state |= CS_ODEVREADY;
|
|
|
|
else
|
|
|
|
com->state &= ~CS_ODEVREADY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifdef PC98
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* output queued and everything ready? */
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifndef PC98
|
1996-06-14 10:04:54 +00:00
|
|
|
if (line_status & LSR_TXRDY
|
|
|
|
&& com->state >= (CS_BUSY | CS_TTGO | CS_ODEVREADY)) {
|
1999-01-03 05:03:47 +00:00
|
|
|
#else
|
|
|
|
if (((com->pc98_if_type == COM_IF_RSA98III)
|
|
|
|
? (rsa_buf_status & 0x02)
|
|
|
|
: (line_status & LSR_TXRDY))
|
|
|
|
&& com->state >= (CS_BUSY | CS_TTGO | CS_ODEVREADY)) {
|
|
|
|
#endif
|
1999-12-06 00:23:38 +00:00
|
|
|
#ifdef PC98
|
|
|
|
Port_t tmp_data_port;
|
|
|
|
|
|
|
|
if (IS_8251(com->pc98_if_type) &&
|
|
|
|
com->pc98_8251fifo_enable)
|
|
|
|
tmp_data_port = I8251F_data;
|
|
|
|
else
|
|
|
|
tmp_data_port = com->data_port;
|
|
|
|
#endif
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
ioptr = com->obufq.l_head;
|
|
|
|
if (com->tx_fifo_size > 1) {
|
|
|
|
u_int ocount;
|
|
|
|
|
|
|
|
ocount = com->obufq.l_tail - ioptr;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III) {
|
|
|
|
rsa_buf_status = inb(com->rsabase + rsa_srr);
|
|
|
|
rsa_tx_fifo_size = 1024;
|
|
|
|
if (!(rsa_buf_status & 0x01))
|
1999-12-06 00:23:38 +00:00
|
|
|
rsa_tx_fifo_size = 2048;
|
1999-01-03 05:03:47 +00:00
|
|
|
if (ocount > rsa_tx_fifo_size)
|
1999-12-06 00:23:38 +00:00
|
|
|
ocount = rsa_tx_fifo_size;
|
1999-01-03 05:03:47 +00:00
|
|
|
} else
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
if (ocount > com->tx_fifo_size)
|
|
|
|
ocount = com->tx_fifo_size;
|
|
|
|
com->bytes_out += ocount;
|
|
|
|
do
|
1999-12-06 00:23:38 +00:00
|
|
|
#ifdef PC98
|
|
|
|
outb(tmp_data_port, *ioptr++);
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(com->data_port, *ioptr++);
|
1999-12-06 00:23:38 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
while (--ocount != 0);
|
|
|
|
} else {
|
1999-12-06 00:23:38 +00:00
|
|
|
#ifdef PC98
|
|
|
|
outb(tmp_data_port, *ioptr++);
|
|
|
|
#else
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(com->data_port, *ioptr++);
|
1999-12-06 00:23:38 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
++com->bytes_out;
|
|
|
|
}
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
|
|
|
if (!(pc98_check_i8251_interrupt(com) & IEN_TxFLAG))
|
2000-05-12 12:38:25 +00:00
|
|
|
com_int_Tx_enable(com);
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
com->obufq.l_head = ioptr;
|
1999-04-18 14:42:20 +00:00
|
|
|
if (COM_IIR_TXRDYBUG(com->flags)) {
|
1998-01-08 10:50:06 +00:00
|
|
|
int_ctl_new = int_ctl | IER_ETXRDY;
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
if (ioptr >= com->obufq.l_tail) {
|
|
|
|
struct lbq *qp;
|
1996-07-23 07:46:59 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
qp = com->obufq.l_next;
|
|
|
|
qp->l_queued = FALSE;
|
|
|
|
qp = qp->l_next;
|
|
|
|
if (qp != NULL) {
|
|
|
|
com->obufq.l_head = qp->l_head;
|
|
|
|
com->obufq.l_tail = qp->l_tail;
|
|
|
|
com->obufq.l_next = qp;
|
|
|
|
} else {
|
|
|
|
/* output just completed */
|
1999-11-18 12:22:09 +00:00
|
|
|
if (COM_IIR_TXRDYBUG(com->flags)) {
|
1998-01-08 10:50:06 +00:00
|
|
|
int_ctl_new = int_ctl & ~IER_ETXRDY;
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
com->state &= ~CS_BUSY;
|
1996-07-23 07:46:59 +00:00
|
|
|
#if defined(PC98)
|
2000-05-12 12:38:25 +00:00
|
|
|
if (IS_8251(com->pc98_if_type) &&
|
|
|
|
pc98_check_i8251_interrupt(com) & IEN_TxFLAG)
|
1999-01-03 05:03:47 +00:00
|
|
|
com_int_Tx_disable(com);
|
1996-07-23 07:46:59 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
if (!(com->state & CS_ODONE)) {
|
|
|
|
com_events += LOTS_OF_EVENTS;
|
|
|
|
com->state |= CS_ODONE;
|
2000-10-25 05:19:40 +00:00
|
|
|
/* handle at high level ASAP */
|
Change the preemption code for software interrupt thread schedules and
mutex releases to not require flags for the cases when preemption is
not allowed:
The purpose of the MTX_NOSWITCH and SWI_NOSWITCH flags is to prevent
switching to a higher priority thread on mutex releease and swi schedule,
respectively when that switch is not safe. Now that the critical section
API maintains a per-thread nesting count, the kernel can easily check
whether or not it should switch without relying on flags from the
programmer. This fixes a few bugs in that all current callers of
swi_sched() used SWI_NOSWITCH, when in fact, only the ones called from
fast interrupt handlers and the swi_sched of softclock needed this flag.
Note that to ensure that swi_sched()'s in clock and fast interrupt
handlers do not switch, these handlers have to be explicitly wrapped
in critical_enter/exit pairs. Presently, just wrapping the handlers is
sufficient, but in the future with the fully preemptive kernel, the
interrupt must be EOI'd before critical_exit() is called. (critical_exit()
can switch due to a deferred preemption in a fully preemptive kernel.)
I've tested the changes to the interrupt code on i386 and alpha. I have
not tested ia64, but the interrupt code is almost identical to the alpha
code, so I expect it will work fine. PowerPC and ARM do not yet have
interrupt code in the tree so they shouldn't be broken. Sparc64 is
broken, but that's been ok'd by jake and tmm who will be fixing the
interrupt code for sparc64 shortly.
Reviewed by: peter
Tested on: i386, alpha
2002-01-05 08:47:13 +00:00
|
|
|
swi_sched(sio_fast_ih, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
}
|
1999-11-18 12:22:09 +00:00
|
|
|
if (COM_IIR_TXRDYBUG(com->flags) && (int_ctl != int_ctl_new)) {
|
1999-02-05 11:37:40 +00:00
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III) {
|
1999-11-18 12:22:09 +00:00
|
|
|
int_ctl_new &= ~(IER_ETXRDY | IER_ERXRDY);
|
|
|
|
outb(com->intr_ctl_port, int_ctl_new);
|
|
|
|
outb(com->rsabase + rsa_ier, 0x1d);
|
1999-01-03 05:03:47 +00:00
|
|
|
} else
|
1999-02-05 11:37:40 +00:00
|
|
|
#endif
|
1998-01-08 10:50:06 +00:00
|
|
|
outb(com->intr_ctl_port, int_ctl_new);
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#ifdef PC98
|
|
|
|
else if (line_status & LSR_TXRDY) {
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
2000-05-12 12:38:25 +00:00
|
|
|
if (pc98_check_i8251_interrupt(com) & IEN_TxFLAG)
|
1999-01-03 05:03:47 +00:00
|
|
|
com_int_Tx_disable(com);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-12-06 00:23:38 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
|
|
|
if (com->pc98_8251fifo_enable) {
|
|
|
|
if ((tmp = inb(I8251F_lsr)) & STS8251F_RxRDY)
|
|
|
|
goto more_intr;
|
|
|
|
} else {
|
|
|
|
if ((tmp = inb(com->sts_port)) & STS8251_RxRDY)
|
|
|
|
goto more_intr;
|
|
|
|
}
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* finished? */
|
|
|
|
#ifndef COM_MULTIPORT
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
1996-06-14 10:04:54 +00:00
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
if ((inb(com->int_id_port) & IIR_IMASK) == IIR_NOPEND)
|
|
|
|
#endif /* COM_MULTIPORT */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2001-09-14 05:05:08 +00:00
|
|
|
sioioctl(dev, cmd, data, flag, td)
|
1996-06-14 10:04:54 +00:00
|
|
|
dev_t dev;
|
1998-06-08 08:55:47 +00:00
|
|
|
u_long cmd;
|
1996-06-14 10:04:54 +00:00
|
|
|
caddr_t data;
|
|
|
|
int flag;
|
2001-09-14 05:05:08 +00:00
|
|
|
struct thread *td;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
int error;
|
|
|
|
int mynor;
|
|
|
|
int s;
|
|
|
|
struct tty *tp;
|
|
|
|
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
1999-04-18 14:42:20 +00:00
|
|
|
u_long oldcmd;
|
1996-06-14 10:04:54 +00:00
|
|
|
struct termios term;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
mynor = minor(dev);
|
|
|
|
com = com_addr(MINOR_TO_UNIT(mynor));
|
2000-03-12 13:14:51 +00:00
|
|
|
if (com == NULL || com->gone)
|
1996-06-14 10:04:54 +00:00
|
|
|
return (ENODEV);
|
|
|
|
if (mynor & CONTROL_MASK) {
|
|
|
|
struct termios *ct;
|
|
|
|
|
|
|
|
switch (mynor & CONTROL_MASK) {
|
|
|
|
case CONTROL_INIT_STATE:
|
|
|
|
ct = mynor & CALLOUT_MASK ? &com->it_out : &com->it_in;
|
|
|
|
break;
|
|
|
|
case CONTROL_LOCK_STATE:
|
|
|
|
ct = mynor & CALLOUT_MASK ? &com->lt_out : &com->lt_in;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (ENODEV); /* /dev/nodev */
|
|
|
|
}
|
|
|
|
switch (cmd) {
|
|
|
|
case TIOCSETA:
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
*ct = *(struct termios *)data;
|
|
|
|
return (0);
|
|
|
|
case TIOCGETA:
|
|
|
|
*(struct termios *)data = *ct;
|
|
|
|
return (0);
|
|
|
|
case TIOCGETD:
|
|
|
|
*(int *)data = TTYDISC;
|
|
|
|
return (0);
|
|
|
|
case TIOCGWINSZ:
|
|
|
|
bzero(data, sizeof(struct winsize));
|
|
|
|
return (0);
|
|
|
|
default:
|
|
|
|
return (ENOTTY);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tp = com->tp;
|
|
|
|
#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
|
|
|
|
term = tp->t_termios;
|
|
|
|
oldcmd = cmd;
|
|
|
|
error = ttsetcompat(tp, &cmd, data, &term);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
|
|
|
if (cmd != oldcmd)
|
|
|
|
data = (caddr_t)&term;
|
|
|
|
#endif
|
|
|
|
if (cmd == TIOCSETA || cmd == TIOCSETAW || cmd == TIOCSETAF) {
|
|
|
|
int cc;
|
|
|
|
struct termios *dt = (struct termios *)data;
|
|
|
|
struct termios *lt = mynor & CALLOUT_MASK
|
|
|
|
? &com->lt_out : &com->lt_in;
|
|
|
|
|
|
|
|
dt->c_iflag = (tp->t_iflag & lt->c_iflag)
|
|
|
|
| (dt->c_iflag & ~lt->c_iflag);
|
|
|
|
dt->c_oflag = (tp->t_oflag & lt->c_oflag)
|
|
|
|
| (dt->c_oflag & ~lt->c_oflag);
|
|
|
|
dt->c_cflag = (tp->t_cflag & lt->c_cflag)
|
|
|
|
| (dt->c_cflag & ~lt->c_cflag);
|
|
|
|
dt->c_lflag = (tp->t_lflag & lt->c_lflag)
|
|
|
|
| (dt->c_lflag & ~lt->c_lflag);
|
|
|
|
for (cc = 0; cc < NCCS; ++cc)
|
|
|
|
if (lt->c_cc[cc] != 0)
|
|
|
|
dt->c_cc[cc] = tp->t_cc[cc];
|
|
|
|
if (lt->c_ispeed != 0)
|
|
|
|
dt->c_ispeed = tp->t_ispeed;
|
|
|
|
if (lt->c_ospeed != 0)
|
|
|
|
dt->c_ospeed = tp->t_ospeed;
|
|
|
|
}
|
2001-09-14 05:05:08 +00:00
|
|
|
error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, td);
|
1997-12-06 13:25:01 +00:00
|
|
|
if (error != ENOIOCTL)
|
1996-06-14 10:04:54 +00:00
|
|
|
return (error);
|
|
|
|
s = spltty();
|
|
|
|
error = ttioctl(tp, cmd, data, flag);
|
|
|
|
disc_optim(tp, &tp->t_termios, com);
|
1997-12-06 13:25:01 +00:00
|
|
|
if (error != ENOIOCTL) {
|
1996-06-14 10:04:54 +00:00
|
|
|
splx(s);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
1998-02-02 07:59:05 +00:00
|
|
|
switch (cmd) {
|
|
|
|
case TIOCSBRK:
|
2000-05-12 12:38:25 +00:00
|
|
|
com_send_break_on(com);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCCBRK:
|
2000-05-12 12:38:25 +00:00
|
|
|
com_send_break_off(com);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCSDTR:
|
2000-05-12 12:38:25 +00:00
|
|
|
com_tiocm_bis(com, TIOCM_DTR | TIOCM_RTS);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCCDTR:
|
1997-03-06 15:07:04 +00:00
|
|
|
com_tiocm_bic(com, TIOCM_DTR);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1996-12-04 04:21:30 +00:00
|
|
|
/*
|
|
|
|
* XXX should disallow changing MCR_RTS if CS_RTS_IFLOW is set. The
|
|
|
|
* changes get undone on the next call to comparam().
|
|
|
|
*/
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCMSET:
|
2000-05-12 12:38:25 +00:00
|
|
|
com_tiocm_set(com, *(int *)data);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCMBIS:
|
2000-05-12 12:38:25 +00:00
|
|
|
com_tiocm_bis(com, *(int *)data);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCMBIC:
|
2000-05-12 12:38:25 +00:00
|
|
|
com_tiocm_bic(com, *(int *)data);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCMGET:
|
1997-03-06 15:07:04 +00:00
|
|
|
*(int *)data = com_tiocm_get(com);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCMSDTRWAIT:
|
1996-06-14 10:04:54 +00:00
|
|
|
/* must be root since the wait applies to following logins */
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (error != 0) {
|
|
|
|
splx(s);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
com->dtr_wait = *(int *)data * hz / 100;
|
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCMGDTRWAIT:
|
1996-06-14 10:04:54 +00:00
|
|
|
*(int *)data = com->dtr_wait * 100 / hz;
|
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCTIMESTAMP:
|
1996-06-14 10:04:54 +00:00
|
|
|
com->do_timestamp = TRUE;
|
|
|
|
*(struct timeval *)data = com->timestamp;
|
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCDCDTIMESTAMP:
|
1996-07-23 07:46:59 +00:00
|
|
|
com->do_dcd_timestamp = TRUE;
|
|
|
|
*(struct timeval *)data = com->dcd_timestamp;
|
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
default:
|
1996-06-14 10:04:54 +00:00
|
|
|
splx(s);
|
2000-05-12 12:38:25 +00:00
|
|
|
error = pps_ioctl(cmd, data, &com->pps);
|
|
|
|
if (error == ENODEV)
|
|
|
|
error = ENOTTY;
|
|
|
|
return (error);
|
1998-02-02 07:59:05 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
} else {
|
|
|
|
#endif
|
|
|
|
switch (cmd) {
|
|
|
|
case TIOCSBRK:
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, com->cfcr_image |= CFCR_SBREAK);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
|
|
|
case TIOCCBRK:
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, com->cfcr_image &= ~CFCR_SBREAK);
|
1996-06-14 10:04:54 +00:00
|
|
|
break;
|
|
|
|
case TIOCSDTR:
|
|
|
|
(void)commctl(com, TIOCM_DTR, DMBIS);
|
|
|
|
break;
|
|
|
|
case TIOCCDTR:
|
|
|
|
(void)commctl(com, TIOCM_DTR, DMBIC);
|
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
/*
|
|
|
|
* XXX should disallow changing MCR_RTS if CS_RTS_IFLOW is set. The
|
|
|
|
* changes get undone on the next call to comparam().
|
|
|
|
*/
|
1996-06-14 10:04:54 +00:00
|
|
|
case TIOCMSET:
|
|
|
|
(void)commctl(com, *(int *)data, DMSET);
|
|
|
|
break;
|
|
|
|
case TIOCMBIS:
|
|
|
|
(void)commctl(com, *(int *)data, DMBIS);
|
|
|
|
break;
|
|
|
|
case TIOCMBIC:
|
|
|
|
(void)commctl(com, *(int *)data, DMBIC);
|
|
|
|
break;
|
|
|
|
case TIOCMGET:
|
|
|
|
*(int *)data = commctl(com, 0, DMGET);
|
|
|
|
break;
|
|
|
|
case TIOCMSDTRWAIT:
|
|
|
|
/* must be root since the wait applies to following logins */
|
2002-04-01 21:31:13 +00:00
|
|
|
error = suser(td);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (error != 0) {
|
|
|
|
splx(s);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
com->dtr_wait = *(int *)data * hz / 100;
|
|
|
|
break;
|
|
|
|
case TIOCMGDTRWAIT:
|
|
|
|
*(int *)data = com->dtr_wait * 100 / hz;
|
|
|
|
break;
|
|
|
|
case TIOCTIMESTAMP:
|
|
|
|
com->do_timestamp = TRUE;
|
|
|
|
*(struct timeval *)data = com->timestamp;
|
|
|
|
break;
|
1998-02-02 07:59:05 +00:00
|
|
|
case TIOCDCDTIMESTAMP:
|
|
|
|
com->do_dcd_timestamp = TRUE;
|
|
|
|
*(struct timeval *)data = com->dcd_timestamp;
|
|
|
|
break;
|
1996-06-14 10:04:54 +00:00
|
|
|
default:
|
|
|
|
splx(s);
|
1999-04-01 13:44:15 +00:00
|
|
|
error = pps_ioctl(cmd, data, &com->pps);
|
|
|
|
if (error == ENODEV)
|
|
|
|
error = ENOTTY;
|
|
|
|
return (error);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#ifdef PC98
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
splx(s);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2000-09-07 13:34:45 +00:00
|
|
|
/* software interrupt handler for SWI_TTY */
|
1998-08-13 07:36:40 +00:00
|
|
|
static void
|
2000-10-25 20:21:42 +00:00
|
|
|
siopoll(void *dummy)
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
int unit;
|
|
|
|
|
|
|
|
if (com_events == 0)
|
|
|
|
return;
|
|
|
|
repeat:
|
1999-11-18 12:22:09 +00:00
|
|
|
for (unit = 0; unit < sio_numunits; ++unit) {
|
1996-06-14 10:04:54 +00:00
|
|
|
struct com_s *com;
|
|
|
|
int incc;
|
|
|
|
struct tty *tp;
|
|
|
|
|
|
|
|
com = com_addr(unit);
|
|
|
|
if (com == NULL)
|
|
|
|
continue;
|
|
|
|
tp = com->tp;
|
1998-06-17 09:27:15 +00:00
|
|
|
if (tp == NULL || com->gone) {
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
1998-06-17 09:27:15 +00:00
|
|
|
* Discard any events related to never-opened or
|
|
|
|
* going-away devices.
|
1996-06-14 10:04:54 +00:00
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
incc = com->iptr - com->ibuf;
|
|
|
|
com->iptr = com->ibuf;
|
|
|
|
if (com->state & CS_CHECKMSR) {
|
|
|
|
incc += LOTS_OF_EVENTS;
|
|
|
|
com->state &= ~CS_CHECKMSR;
|
|
|
|
}
|
|
|
|
com_events -= incc;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
continue;
|
|
|
|
}
|
1999-02-05 11:37:40 +00:00
|
|
|
if (com->iptr != com->ibuf) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1999-02-05 11:37:40 +00:00
|
|
|
sioinput(com);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
if (com->state & CS_CHECKMSR) {
|
|
|
|
u_char delta_modem_status;
|
|
|
|
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (!IS_8251(com->pc98_if_type)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
delta_modem_status = com->last_modem_status
|
|
|
|
^ com->prev_modem_status;
|
|
|
|
com->prev_modem_status = com->last_modem_status;
|
|
|
|
com_events -= LOTS_OF_EVENTS;
|
|
|
|
com->state &= ~CS_CHECKMSR;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (delta_modem_status & MSR_DCD)
|
|
|
|
(*linesw[tp->t_line].l_modem)
|
|
|
|
(tp, com->prev_modem_status & MSR_DCD);
|
|
|
|
#ifdef PC98
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
if (com->state & CS_ODONE) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
com_events -= LOTS_OF_EVENTS;
|
|
|
|
com->state &= ~CS_ODONE;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1997-04-19 14:54:32 +00:00
|
|
|
if (!(com->state & CS_BUSY)
|
|
|
|
&& !(com->extra_state & CSE_BUSYCHECK)) {
|
1996-12-04 04:36:59 +00:00
|
|
|
timeout(siobusycheck, com, hz / 100);
|
1997-04-19 14:54:32 +00:00
|
|
|
com->extra_state |= CSE_BUSYCHECK;
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
(*linesw[tp->t_line].l_start)(tp);
|
|
|
|
}
|
|
|
|
if (com_events == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (com_events >= LOTS_OF_EVENTS)
|
|
|
|
goto repeat;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
comparam(tp, t)
|
|
|
|
struct tty *tp;
|
|
|
|
struct termios *t;
|
|
|
|
{
|
|
|
|
u_int cfcr;
|
|
|
|
int cflag;
|
|
|
|
struct com_s *com;
|
2002-01-31 08:26:45 +00:00
|
|
|
u_int divisor;
|
1996-10-09 21:47:16 +00:00
|
|
|
u_char dlbh;
|
|
|
|
u_char dlbl;
|
1996-06-14 10:04:54 +00:00
|
|
|
int s;
|
|
|
|
int unit;
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
u_char param = 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
unit = DEV_TO_UNIT(tp->t_dev);
|
|
|
|
com = com_addr(unit);
|
2002-01-31 08:26:45 +00:00
|
|
|
if (com == NULL)
|
|
|
|
return (ENODEV);
|
2000-05-12 12:38:25 +00:00
|
|
|
|
2002-01-31 08:26:45 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
cfcr = 0;
|
2002-01-31 08:26:45 +00:00
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
2002-03-08 12:12:46 +00:00
|
|
|
if (pc98_ttspeedtab(com, t->c_ospeed, &divisor) != 0)
|
2002-01-31 08:26:45 +00:00
|
|
|
return (EINVAL);
|
1999-01-03 05:03:47 +00:00
|
|
|
} else {
|
2002-01-31 08:26:45 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
/* do historical conversions */
|
|
|
|
if (t->c_ispeed == 0)
|
|
|
|
t->c_ispeed = t->c_ospeed;
|
|
|
|
|
|
|
|
/* check requested parameters */
|
2002-01-31 08:26:45 +00:00
|
|
|
if (t->c_ospeed == 0)
|
|
|
|
divisor = 0;
|
|
|
|
else {
|
|
|
|
if (t->c_ispeed != t->c_ospeed)
|
|
|
|
return (EINVAL);
|
|
|
|
divisor = siodivisor(com->rclk, t->c_ispeed);
|
|
|
|
if (divisor == 0)
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
#ifdef PC98
|
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
2000-05-12 12:38:25 +00:00
|
|
|
/* parameters are OK, convert them to the com struct and the device */
|
1996-06-14 10:04:54 +00:00
|
|
|
s = spltty();
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
|
|
|
if (divisor == 0)
|
2000-05-12 12:38:25 +00:00
|
|
|
com_tiocm_bic(com, TIOCM_DTR|TIOCM_RTS|TIOCM_LE);
|
1997-12-29 16:08:48 +00:00
|
|
|
else
|
2000-05-12 12:38:25 +00:00
|
|
|
com_tiocm_bis(com, TIOCM_DTR|TIOCM_RTS|TIOCM_LE);
|
|
|
|
} else
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
if (divisor == 0)
|
|
|
|
(void)commctl(com, TIOCM_DTR, DMBIC); /* hang up line */
|
|
|
|
else
|
|
|
|
(void)commctl(com, TIOCM_DTR, DMBIS);
|
|
|
|
cflag = t->c_cflag;
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (!IS_8251(com->pc98_if_type)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
switch (cflag & CSIZE) {
|
|
|
|
case CS5:
|
|
|
|
cfcr = CFCR_5BITS;
|
|
|
|
break;
|
|
|
|
case CS6:
|
|
|
|
cfcr = CFCR_6BITS;
|
|
|
|
break;
|
|
|
|
case CS7:
|
|
|
|
cfcr = CFCR_7BITS;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
cfcr = CFCR_8BITS;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (cflag & PARENB) {
|
|
|
|
cfcr |= CFCR_PENAB;
|
|
|
|
if (!(cflag & PARODD))
|
|
|
|
cfcr |= CFCR_PEVEN;
|
|
|
|
}
|
|
|
|
if (cflag & CSTOPB)
|
|
|
|
cfcr |= CFCR_STOPB;
|
|
|
|
|
|
|
|
if (com->hasfifo && divisor != 0) {
|
|
|
|
/*
|
|
|
|
* Use a fifo trigger level low enough so that the input
|
|
|
|
* latency from the fifo is less than about 16 msec and
|
|
|
|
* the total latency is less than about 30 msec. These
|
|
|
|
* latencies are reasonable for humans. Serial comms
|
|
|
|
* protocols shouldn't expect anything better since modem
|
|
|
|
* latencies are larger.
|
2002-01-06 09:54:50 +00:00
|
|
|
*
|
|
|
|
* We have to set the FIFO trigger point such that we
|
|
|
|
* don't overflow it accidently if a serial interrupt
|
|
|
|
* is delayed. At high speeds, FIFO_RX_HIGH does not
|
|
|
|
* leave enough slots free.
|
1996-06-14 10:04:54 +00:00
|
|
|
*/
|
|
|
|
com->fifo_image = t->c_ospeed <= 4800
|
2002-01-06 09:54:50 +00:00
|
|
|
? FIFO_ENABLE : FIFO_ENABLE | FIFO_RX_MEDH;
|
1997-01-30 10:48:06 +00:00
|
|
|
#ifdef COM_ESP
|
|
|
|
/*
|
|
|
|
* The Hayes ESP card needs the fifo DMA mode bit set
|
|
|
|
* in compatibility mode. If not, it will interrupt
|
|
|
|
* for each character received.
|
|
|
|
*/
|
|
|
|
if (com->esp)
|
|
|
|
com->fifo_image |= FIFO_DMA_MODE;
|
|
|
|
#endif
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_fifo, com->fifo_image);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
#ifdef PC98
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2000-09-08 11:54:13 +00:00
|
|
|
/*
|
|
|
|
* This returns with interrupts disabled so that we can complete
|
|
|
|
* the speed change atomically. Keeping interrupts disabled is
|
|
|
|
* especially important while com_data is hidden.
|
|
|
|
*/
|
1999-02-05 11:37:40 +00:00
|
|
|
(void) siosetwater(com, t->c_ispeed);
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
2000-05-12 12:38:25 +00:00
|
|
|
com_cflag_and_speed_set(com, cflag, t->c_ospeed);
|
1999-01-03 05:03:47 +00:00
|
|
|
else {
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
if (divisor != 0) {
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, cfcr | CFCR_DLAB);
|
1996-10-09 21:47:16 +00:00
|
|
|
/*
|
|
|
|
* Only set the divisor registers if they would change,
|
|
|
|
* since on some 16550 incompatibles (UMC8669F), setting
|
|
|
|
* them while input is arriving them loses sync until
|
|
|
|
* data stops arriving.
|
|
|
|
*/
|
|
|
|
dlbl = divisor & 0xFF;
|
2000-05-12 12:38:25 +00:00
|
|
|
if (sio_getreg(com, com_dlbl) != dlbl)
|
|
|
|
sio_setreg(com, com_dlbl, dlbl);
|
2002-01-31 08:26:45 +00:00
|
|
|
dlbh = divisor >> 8;
|
2000-05-12 12:38:25 +00:00
|
|
|
if (sio_getreg(com, com_dlbh) != dlbh)
|
|
|
|
sio_setreg(com, com_dlbh, dlbh);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1997-07-17 10:35:43 +00:00
|
|
|
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, com->cfcr_image = cfcr);
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
1999-01-03 05:03:47 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
if (!(tp->t_state & TS_TTSTOP))
|
1998-01-16 11:20:22 +00:00
|
|
|
com->state |= CS_TTGO;
|
|
|
|
|
|
|
|
if (cflag & CRTS_IFLOW) {
|
2000-05-12 12:38:25 +00:00
|
|
|
#ifndef PC98
|
1997-07-17 10:35:43 +00:00
|
|
|
if (com->st16650a) {
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, 0xbf);
|
|
|
|
sio_setreg(com, com_fifo,
|
|
|
|
sio_getreg(com, com_fifo) | 0x40);
|
1997-07-17 10:35:43 +00:00
|
|
|
}
|
2000-05-12 12:38:25 +00:00
|
|
|
#endif
|
1996-12-04 04:21:30 +00:00
|
|
|
com->state |= CS_RTS_IFLOW;
|
|
|
|
/*
|
|
|
|
* If CS_RTS_IFLOW just changed from off to on, the change
|
|
|
|
* needs to be propagated to MCR_RTS. This isn't urgent,
|
|
|
|
* so do it later by calling comstart() instead of repeating
|
|
|
|
* a lot of code from comstart() here.
|
|
|
|
*/
|
|
|
|
} else if (com->state & CS_RTS_IFLOW) {
|
1996-06-14 10:04:54 +00:00
|
|
|
com->state &= ~CS_RTS_IFLOW;
|
1996-12-04 04:21:30 +00:00
|
|
|
/*
|
|
|
|
* CS_RTS_IFLOW just changed from on to off. Force MCR_RTS
|
|
|
|
* on here, since comstart() won't do it later.
|
|
|
|
*/
|
1997-03-06 15:07:04 +00:00
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type))
|
1997-03-06 15:07:04 +00:00
|
|
|
com_tiocm_bis(com, TIOCM_RTS);
|
|
|
|
else
|
2000-05-12 12:38:25 +00:00
|
|
|
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
|
|
|
|
#else
|
1996-12-04 04:21:30 +00:00
|
|
|
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
|
1997-07-17 10:35:43 +00:00
|
|
|
if (com->st16650a) {
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, 0xbf);
|
|
|
|
sio_setreg(com, com_fifo,
|
|
|
|
sio_getreg(com, com_fifo) & ~0x40);
|
1997-07-17 10:35:43 +00:00
|
|
|
}
|
2000-05-12 12:38:25 +00:00
|
|
|
#endif
|
1996-12-04 04:21:30 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1997-07-17 10:35:43 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/*
|
|
|
|
* Set up state to handle output flow control.
|
|
|
|
* XXX - worth handling MDMBUF (DCD) flow control at the lowest level?
|
|
|
|
* Now has 10+ msec latency, while CTS flow has 50- usec latency.
|
|
|
|
*/
|
|
|
|
com->state |= CS_ODEVREADY;
|
|
|
|
com->state &= ~CS_CTS_OFLOW;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III) {
|
|
|
|
param = inb(com->rsabase + rsa_msr);
|
|
|
|
outb(com->rsabase + rsa_msr, param & 0x14);
|
|
|
|
}
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
if (cflag & CCTS_OFLOW) {
|
|
|
|
com->state |= CS_CTS_OFLOW;
|
|
|
|
#ifdef PC98
|
1999-01-03 05:03:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
1996-06-14 10:04:54 +00:00
|
|
|
if (!(pc98_get_modem_status(com) & TIOCM_CTS))
|
|
|
|
com->state &= ~CS_ODEVREADY;
|
|
|
|
} else {
|
2000-05-12 12:38:25 +00:00
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III) {
|
|
|
|
/* Set automatic flow control mode */
|
|
|
|
outb(com->rsabase + rsa_msr, param | 0x08);
|
|
|
|
} else
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
|
|
|
if (!(com->last_modem_status & MSR_CTS))
|
1998-01-16 11:20:22 +00:00
|
|
|
com->state &= ~CS_ODEVREADY;
|
1997-07-20 11:59:48 +00:00
|
|
|
#ifdef PC98
|
|
|
|
}
|
2000-05-12 12:38:25 +00:00
|
|
|
#else
|
|
|
|
if (com->st16650a) {
|
|
|
|
sio_setreg(com, com_cfcr, 0xbf);
|
|
|
|
sio_setreg(com, com_fifo,
|
|
|
|
sio_getreg(com, com_fifo) | 0x80);
|
|
|
|
}
|
1997-07-17 10:35:43 +00:00
|
|
|
} else {
|
|
|
|
if (com->st16650a) {
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, 0xbf);
|
|
|
|
sio_setreg(com, com_fifo,
|
|
|
|
sio_getreg(com, com_fifo) & ~0x80);
|
1997-07-17 10:35:43 +00:00
|
|
|
}
|
2000-05-12 12:38:25 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1997-07-17 10:35:43 +00:00
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
if (!IS_8251(com->pc98_if_type))
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_cfcr, com->cfcr_image);
|
1997-07-17 10:35:43 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
/* XXX shouldn't call functions while intrs are disabled. */
|
|
|
|
disc_optim(tp, t, com);
|
|
|
|
/*
|
|
|
|
* Recover from fiddling with CS_TTGO. We used to call siointr1()
|
|
|
|
* unconditionally, but that defeated the careful discarding of
|
|
|
|
* stale input in sioopen().
|
|
|
|
*/
|
|
|
|
if (com->state >= (CS_BUSY | CS_TTGO))
|
|
|
|
siointr1(com);
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
splx(s);
|
1996-12-04 04:21:30 +00:00
|
|
|
comstart(tp);
|
1999-02-05 11:37:40 +00:00
|
|
|
if (com->ibufold != NULL) {
|
|
|
|
free(com->ibufold, M_DEVBUF);
|
|
|
|
com->ibufold = NULL;
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2000-09-08 11:54:13 +00:00
|
|
|
/*
|
2001-01-27 13:02:06 +00:00
|
|
|
* This function must be called with the sio_lock mutex released and will
|
|
|
|
* return with it obtained.
|
2000-09-08 11:54:13 +00:00
|
|
|
*/
|
1999-02-05 11:37:40 +00:00
|
|
|
static int
|
|
|
|
siosetwater(com, speed)
|
|
|
|
struct com_s *com;
|
|
|
|
speed_t speed;
|
|
|
|
{
|
|
|
|
int cp4ticks;
|
|
|
|
u_char *ibuf;
|
|
|
|
int ibufsize;
|
|
|
|
struct tty *tp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make the buffer size large enough to handle a softtty interrupt
|
|
|
|
* latency of about 2 ticks without loss of throughput or data
|
|
|
|
* (about 3 ticks if input flow control is not used or not honoured,
|
|
|
|
* but a bit less for CS5-CS7 modes).
|
|
|
|
*/
|
|
|
|
cp4ticks = speed / 10 / hz * 4;
|
|
|
|
for (ibufsize = 128; ibufsize < cp4ticks;)
|
|
|
|
ibufsize <<= 1;
|
|
|
|
#ifdef PC98
|
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III)
|
|
|
|
ibufsize = 2048;
|
|
|
|
#endif
|
2000-09-08 11:54:13 +00:00
|
|
|
if (ibufsize == com->ibufsize) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1999-02-05 11:37:40 +00:00
|
|
|
return (0);
|
2000-09-08 11:54:13 +00:00
|
|
|
}
|
1999-02-05 11:37:40 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate input buffer. The extra factor of 2 in the size is
|
|
|
|
* to allow for an error byte for each input byte.
|
|
|
|
*/
|
|
|
|
ibuf = malloc(2 * ibufsize, M_DEVBUF, M_NOWAIT);
|
2000-09-08 11:54:13 +00:00
|
|
|
if (ibuf == NULL) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1999-02-05 11:37:40 +00:00
|
|
|
return (ENOMEM);
|
2000-09-08 11:54:13 +00:00
|
|
|
}
|
1999-02-05 11:37:40 +00:00
|
|
|
|
|
|
|
/* Initialize non-critical variables. */
|
|
|
|
com->ibufold = com->ibuf;
|
|
|
|
com->ibufsize = ibufsize;
|
|
|
|
tp = com->tp;
|
|
|
|
if (tp != NULL) {
|
|
|
|
tp->t_ififosize = 2 * ibufsize;
|
|
|
|
tp->t_ispeedwat = (speed_t)-1;
|
|
|
|
tp->t_ospeedwat = (speed_t)-1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read current input buffer, if any. Continue with interrupts
|
|
|
|
* disabled.
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1999-02-05 11:37:40 +00:00
|
|
|
if (com->iptr != com->ibuf)
|
|
|
|
sioinput(com);
|
|
|
|
|
|
|
|
/*-
|
|
|
|
* Initialize critical variables, including input buffer watermarks.
|
|
|
|
* The external device is asked to stop sending when the buffer
|
|
|
|
* exactly reaches high water, or when the high level requests it.
|
|
|
|
* The high level is notified immediately (rather than at a later
|
|
|
|
* clock tick) when this watermark is reached.
|
|
|
|
* The buffer size is chosen so the watermark should almost never
|
|
|
|
* be reached.
|
|
|
|
* The low watermark is invisibly 0 since the buffer is always
|
|
|
|
* emptied all at once.
|
|
|
|
*/
|
|
|
|
com->iptr = com->ibuf = ibuf;
|
|
|
|
com->ibufend = ibuf + ibufsize;
|
|
|
|
com->ierroff = ibufsize;
|
|
|
|
com->ihighwater = ibuf + 3 * ibufsize / 4;
|
1996-06-14 10:04:54 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
comstart(tp)
|
|
|
|
struct tty *tp;
|
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
int s;
|
|
|
|
int unit;
|
|
|
|
|
|
|
|
unit = DEV_TO_UNIT(tp->t_dev);
|
|
|
|
com = com_addr(unit);
|
2000-03-12 13:14:51 +00:00
|
|
|
if (com == NULL)
|
|
|
|
return;
|
1996-06-14 10:04:54 +00:00
|
|
|
s = spltty();
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (tp->t_state & TS_TTSTOP)
|
|
|
|
com->state &= ~CS_TTGO;
|
|
|
|
else
|
|
|
|
com->state |= CS_TTGO;
|
|
|
|
if (tp->t_state & TS_TBLOCK) {
|
|
|
|
#ifdef PC98
|
1999-11-29 13:20:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
|
|
|
if ((com_tiocm_get(com) & TIOCM_RTS) &&
|
|
|
|
(com->state & CS_RTS_IFLOW))
|
|
|
|
com_tiocm_bic(com, TIOCM_RTS);
|
|
|
|
} else {
|
|
|
|
if ((com->mcr_image & MCR_RTS) &&
|
|
|
|
(com->state & CS_RTS_IFLOW))
|
|
|
|
outb(com->modem_ctl_port, com->mcr_image &= ~MCR_RTS);
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#else
|
|
|
|
if (com->mcr_image & MCR_RTS && com->state & CS_RTS_IFLOW)
|
|
|
|
outb(com->modem_ctl_port, com->mcr_image &= ~MCR_RTS);
|
1999-11-29 13:20:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
} else {
|
|
|
|
#ifdef PC98
|
1999-11-29 13:20:47 +00:00
|
|
|
if (IS_8251(com->pc98_if_type)) {
|
|
|
|
if (!(com_tiocm_get(com) & TIOCM_RTS) &&
|
|
|
|
com->iptr < com->ihighwater &&
|
|
|
|
com->state & CS_RTS_IFLOW)
|
|
|
|
com_tiocm_bis(com, TIOCM_RTS);
|
|
|
|
} else {
|
|
|
|
if (!(com->mcr_image & MCR_RTS) &&
|
|
|
|
com->iptr < com->ihighwater &&
|
|
|
|
com->state & CS_RTS_IFLOW)
|
|
|
|
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#else
|
1996-12-04 04:21:30 +00:00
|
|
|
if (!(com->mcr_image & MCR_RTS) && com->iptr < com->ihighwater
|
|
|
|
&& com->state & CS_RTS_IFLOW)
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS);
|
1999-11-29 13:20:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) {
|
1997-12-29 16:08:48 +00:00
|
|
|
ttwwakeup(tp);
|
1996-06-14 10:04:54 +00:00
|
|
|
splx(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (tp->t_outq.c_cc != 0) {
|
|
|
|
struct lbq *qp;
|
|
|
|
struct lbq *next;
|
|
|
|
|
|
|
|
if (!com->obufs[0].l_queued) {
|
|
|
|
com->obufs[0].l_tail
|
|
|
|
= com->obuf1 + q_to_b(&tp->t_outq, com->obuf1,
|
2000-05-12 12:38:25 +00:00
|
|
|
#ifdef PC98
|
1999-02-05 11:37:40 +00:00
|
|
|
com->obufsize);
|
2000-05-12 12:38:25 +00:00
|
|
|
#else
|
|
|
|
sizeof com->obuf1);
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
com->obufs[0].l_next = NULL;
|
|
|
|
com->obufs[0].l_queued = TRUE;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (com->state & CS_BUSY) {
|
|
|
|
qp = com->obufq.l_next;
|
|
|
|
while ((next = qp->l_next) != NULL)
|
|
|
|
qp = next;
|
|
|
|
qp->l_next = &com->obufs[0];
|
|
|
|
} else {
|
|
|
|
com->obufq.l_head = com->obufs[0].l_head;
|
|
|
|
com->obufq.l_tail = com->obufs[0].l_tail;
|
|
|
|
com->obufq.l_next = &com->obufs[0];
|
|
|
|
com->state |= CS_BUSY;
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
if (tp->t_outq.c_cc != 0 && !com->obufs[1].l_queued) {
|
|
|
|
com->obufs[1].l_tail
|
|
|
|
= com->obuf2 + q_to_b(&tp->t_outq, com->obuf2,
|
2000-05-12 12:38:25 +00:00
|
|
|
#ifdef PC98
|
1999-02-05 11:37:40 +00:00
|
|
|
com->obufsize);
|
2000-05-12 12:38:25 +00:00
|
|
|
#else
|
|
|
|
sizeof com->obuf2);
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
com->obufs[1].l_next = NULL;
|
|
|
|
com->obufs[1].l_queued = TRUE;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (com->state & CS_BUSY) {
|
|
|
|
qp = com->obufq.l_next;
|
|
|
|
while ((next = qp->l_next) != NULL)
|
|
|
|
qp = next;
|
|
|
|
qp->l_next = &com->obufs[1];
|
|
|
|
} else {
|
|
|
|
com->obufq.l_head = com->obufs[1].l_head;
|
|
|
|
com->obufq.l_tail = com->obufs[1].l_tail;
|
|
|
|
com->obufq.l_next = &com->obufs[1];
|
|
|
|
com->state |= CS_BUSY;
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
tp->t_state |= TS_BUSY;
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (com->state >= (CS_BUSY | CS_TTGO))
|
|
|
|
siointr1(com); /* fake interrupt to start output */
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
ttwwakeup(tp);
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
1999-09-25 16:21:39 +00:00
|
|
|
comstop(tp, rw)
|
1996-06-14 10:04:54 +00:00
|
|
|
struct tty *tp;
|
|
|
|
int rw;
|
|
|
|
{
|
|
|
|
struct com_s *com;
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
|
|
|
int rsa98_tmp = 0;
|
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
com = com_addr(DEV_TO_UNIT(tp->t_dev));
|
2000-03-12 13:14:51 +00:00
|
|
|
if (com == NULL || com->gone)
|
1996-06-14 10:04:54 +00:00
|
|
|
return;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (rw & FWRITE) {
|
2000-05-12 12:38:25 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (!IS_8251(com->pc98_if_type)) {
|
|
|
|
#endif
|
1996-12-04 04:36:59 +00:00
|
|
|
if (com->hasfifo)
|
1997-01-30 10:48:06 +00:00
|
|
|
#ifdef COM_ESP
|
|
|
|
/* XXX avoid h/w bug. */
|
|
|
|
if (!com->esp)
|
|
|
|
#endif
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_fifo,
|
|
|
|
FIFO_XMT_RST | com->fifo_image);
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III)
|
|
|
|
for (rsa98_tmp = 0; rsa98_tmp < 2048; rsa98_tmp++)
|
|
|
|
sio_setreg(com, com_fifo,
|
|
|
|
FIFO_XMT_RST | com->fifo_image);
|
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
com->obufs[0].l_queued = FALSE;
|
|
|
|
com->obufs[1].l_queued = FALSE;
|
|
|
|
if (com->state & CS_ODONE)
|
|
|
|
com_events -= LOTS_OF_EVENTS;
|
|
|
|
com->state &= ~(CS_ODONE | CS_BUSY);
|
|
|
|
com->tp->t_state &= ~TS_BUSY;
|
|
|
|
}
|
|
|
|
if (rw & FREAD) {
|
2000-05-12 12:38:25 +00:00
|
|
|
#ifdef PC98
|
|
|
|
if (!IS_8251(com->pc98_if_type)) {
|
|
|
|
if (com->pc98_if_type == COM_IF_RSA98III)
|
|
|
|
for (rsa98_tmp = 0; rsa98_tmp < 2048; rsa98_tmp++)
|
|
|
|
sio_getreg(com, com_data);
|
|
|
|
#endif
|
1996-12-04 04:36:59 +00:00
|
|
|
if (com->hasfifo)
|
1997-01-30 10:48:06 +00:00
|
|
|
#ifdef COM_ESP
|
|
|
|
/* XXX avoid h/w bug. */
|
|
|
|
if (!com->esp)
|
|
|
|
#endif
|
2000-05-12 12:38:25 +00:00
|
|
|
sio_setreg(com, com_fifo,
|
|
|
|
FIFO_RCV_RST | com->fifo_image);
|
1999-01-03 05:03:47 +00:00
|
|
|
#ifdef PC98
|
2000-05-12 12:38:25 +00:00
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
com_events -= (com->iptr - com->ibuf);
|
|
|
|
com->iptr = com->ibuf;
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
comstart(tp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
commctl(com, bits, how)
|
|
|
|
struct com_s *com;
|
|
|
|
int bits;
|
|
|
|
int how;
|
|
|
|
{
|
|
|
|
int mcr;
|
|
|
|
int msr;
|
|
|
|
|
|
|
|
if (how == DMGET) {
|
|
|
|
bits = TIOCM_LE; /* XXX - always enabled while open */
|
|
|
|
mcr = com->mcr_image;
|
|
|
|
if (mcr & MCR_DTR)
|
|
|
|
bits |= TIOCM_DTR;
|
|
|
|
if (mcr & MCR_RTS)
|
|
|
|
bits |= TIOCM_RTS;
|
|
|
|
msr = com->prev_modem_status;
|
|
|
|
if (msr & MSR_CTS)
|
|
|
|
bits |= TIOCM_CTS;
|
|
|
|
if (msr & MSR_DCD)
|
|
|
|
bits |= TIOCM_CD;
|
|
|
|
if (msr & MSR_DSR)
|
|
|
|
bits |= TIOCM_DSR;
|
|
|
|
/*
|
|
|
|
* XXX - MSR_RI is naturally volatile, and we make MSR_TERI
|
|
|
|
* more volatile by reading the modem status a lot. Perhaps
|
|
|
|
* we should latch both bits until the status is read here.
|
|
|
|
*/
|
|
|
|
if (msr & (MSR_RI | MSR_TERI))
|
|
|
|
bits |= TIOCM_RI;
|
|
|
|
return (bits);
|
|
|
|
}
|
|
|
|
mcr = 0;
|
|
|
|
if (bits & TIOCM_DTR)
|
|
|
|
mcr |= MCR_DTR;
|
|
|
|
if (bits & TIOCM_RTS)
|
|
|
|
mcr |= MCR_RTS;
|
|
|
|
if (com->gone)
|
|
|
|
return(0);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
switch (how) {
|
|
|
|
case DMSET:
|
|
|
|
outb(com->modem_ctl_port,
|
|
|
|
com->mcr_image = mcr | (com->mcr_image & MCR_IENABLE));
|
|
|
|
break;
|
|
|
|
case DMBIS:
|
|
|
|
outb(com->modem_ctl_port, com->mcr_image |= mcr);
|
|
|
|
break;
|
|
|
|
case DMBIC:
|
|
|
|
outb(com->modem_ctl_port, com->mcr_image &= ~mcr);
|
|
|
|
break;
|
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
siosettimeout()
|
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
bool_t someopen;
|
|
|
|
int unit;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set our timeout period to 1 second if no polled devices are open.
|
|
|
|
* Otherwise set it to max(1/200, 1/hz).
|
|
|
|
* Enable timeouts iff some device is open.
|
|
|
|
*/
|
1997-09-22 12:23:49 +00:00
|
|
|
untimeout(comwakeup, (void *)NULL, sio_timeout_handle);
|
1996-06-14 10:04:54 +00:00
|
|
|
sio_timeout = hz;
|
|
|
|
someopen = FALSE;
|
1999-11-18 12:22:09 +00:00
|
|
|
for (unit = 0; unit < sio_numunits; ++unit) {
|
1996-06-14 10:04:54 +00:00
|
|
|
com = com_addr(unit);
|
|
|
|
if (com != NULL && com->tp != NULL
|
|
|
|
&& com->tp->t_state & TS_ISOPEN && !com->gone) {
|
|
|
|
someopen = TRUE;
|
|
|
|
if (com->poll || com->poll_output) {
|
|
|
|
sio_timeout = hz > 200 ? hz / 200 : 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (someopen) {
|
|
|
|
sio_timeouts_until_log = hz / sio_timeout;
|
1997-09-22 12:23:49 +00:00
|
|
|
sio_timeout_handle = timeout(comwakeup, (void *)NULL,
|
|
|
|
sio_timeout);
|
1996-06-14 10:04:54 +00:00
|
|
|
} else {
|
|
|
|
/* Flush error messages, if any. */
|
|
|
|
sio_timeouts_until_log = 1;
|
|
|
|
comwakeup((void *)NULL);
|
1997-09-22 12:23:49 +00:00
|
|
|
untimeout(comwakeup, (void *)NULL, sio_timeout_handle);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
comwakeup(chan)
|
|
|
|
void *chan;
|
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
int unit;
|
|
|
|
|
1997-09-22 12:23:49 +00:00
|
|
|
sio_timeout_handle = timeout(comwakeup, (void *)NULL, sio_timeout);
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Recover from lost output interrupts.
|
|
|
|
* Poll any lines that don't use interrupts.
|
|
|
|
*/
|
1999-11-18 12:22:09 +00:00
|
|
|
for (unit = 0; unit < sio_numunits; ++unit) {
|
1996-06-14 10:04:54 +00:00
|
|
|
com = com_addr(unit);
|
|
|
|
if (com != NULL && !com->gone
|
|
|
|
&& (com->state >= (CS_BUSY | CS_TTGO) || com->poll)) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
siointr1(com);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check for and log errors, but not too often.
|
|
|
|
*/
|
|
|
|
if (--sio_timeouts_until_log > 0)
|
|
|
|
return;
|
|
|
|
sio_timeouts_until_log = hz / sio_timeout;
|
1999-11-18 12:22:09 +00:00
|
|
|
for (unit = 0; unit < sio_numunits; ++unit) {
|
1996-06-14 10:04:54 +00:00
|
|
|
int errnum;
|
|
|
|
|
|
|
|
com = com_addr(unit);
|
|
|
|
if (com == NULL)
|
|
|
|
continue;
|
|
|
|
if (com->gone)
|
|
|
|
continue;
|
|
|
|
for (errnum = 0; errnum < CE_NTYPES; ++errnum) {
|
|
|
|
u_int delta;
|
|
|
|
u_long total;
|
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
delta = com->delta_error_counts[errnum];
|
|
|
|
com->delta_error_counts[errnum] = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (delta == 0)
|
|
|
|
continue;
|
|
|
|
total = com->error_counts[errnum] += delta;
|
|
|
|
log(LOG_ERR, "sio%d: %u more %s%s (total %lu)\n",
|
|
|
|
unit, delta, error_desc[errnum],
|
|
|
|
delta == 1 ? "" : "s", total);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
/* commint is called when modem control line changes */
|
|
|
|
static void
|
|
|
|
commint(dev_t dev)
|
|
|
|
{
|
|
|
|
register struct tty *tp;
|
|
|
|
int stat,delta;
|
|
|
|
struct com_s *com;
|
|
|
|
int mynor,unit;
|
|
|
|
|
|
|
|
mynor = minor(dev);
|
|
|
|
unit = MINOR_TO_UNIT(mynor);
|
|
|
|
com = com_addr(unit);
|
|
|
|
tp = com->tp;
|
|
|
|
|
|
|
|
stat = com_tiocm_get(com);
|
|
|
|
delta = com_tiocm_get_delta(com);
|
|
|
|
|
|
|
|
if (com->state & CS_CTS_OFLOW) {
|
|
|
|
if (stat & TIOCM_CTS)
|
|
|
|
com->state |= CS_ODEVREADY;
|
|
|
|
else
|
|
|
|
com->state &= ~CS_ODEVREADY;
|
|
|
|
}
|
|
|
|
if ((delta & TIOCM_CAR) && (mynor & CALLOUT_MASK) == 0) {
|
|
|
|
if (stat & TIOCM_CAR )
|
|
|
|
(void)(*linesw[tp->t_line].l_modem)(tp, 1);
|
|
|
|
else if ((*linesw[tp->t_line].l_modem)(tp, 0) == 0) {
|
|
|
|
/* negate DTR, RTS */
|
|
|
|
com_tiocm_bic(com, (tp->t_cflag & HUPCL) ?
|
|
|
|
TIOCM_DTR|TIOCM_RTS|TIOCM_LE : TIOCM_LE );
|
|
|
|
/* disable IENABLE */
|
|
|
|
com_int_TxRx_disable( com );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void
|
|
|
|
disc_optim(tp, t, com)
|
|
|
|
struct tty *tp;
|
|
|
|
struct termios *t;
|
|
|
|
struct com_s *com;
|
|
|
|
{
|
|
|
|
if (!(t->c_iflag & (ICRNL | IGNCR | IMAXBEL | INLCR | ISTRIP | IXON))
|
|
|
|
&& (!(t->c_iflag & BRKINT) || (t->c_iflag & IGNBRK))
|
|
|
|
&& (!(t->c_iflag & PARMRK)
|
|
|
|
|| (t->c_iflag & (IGNPAR | IGNBRK)) == (IGNPAR | IGNBRK))
|
|
|
|
&& !(t->c_lflag & (ECHO | ICANON | IEXTEN | ISIG | PENDIN))
|
|
|
|
&& linesw[tp->t_line].l_rint == ttyinput)
|
|
|
|
tp->t_state |= TS_CAN_BYPASS_L_RINT;
|
|
|
|
else
|
|
|
|
tp->t_state &= ~TS_CAN_BYPASS_L_RINT;
|
1998-02-13 12:46:28 +00:00
|
|
|
com->hotchar = linesw[tp->t_line].l_hotchar;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Following are all routines needed for SIO to act as console
|
|
|
|
*/
|
1999-08-09 10:35:05 +00:00
|
|
|
#include <sys/cons.h>
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
struct siocnstate {
|
|
|
|
u_char dlbl;
|
|
|
|
u_char dlbh;
|
|
|
|
u_char ier;
|
|
|
|
u_char cfcr;
|
|
|
|
u_char mcr;
|
|
|
|
};
|
|
|
|
|
2000-09-22 12:56:55 +00:00
|
|
|
#ifndef __alpha__
|
2002-03-20 12:22:31 +00:00
|
|
|
static speed_t siocngetspeed(Port_t, u_long rclk);
|
2000-09-22 12:56:55 +00:00
|
|
|
#endif
|
2002-03-20 12:22:31 +00:00
|
|
|
static void siocnclose(struct siocnstate *sp, Port_t iobase);
|
|
|
|
static void siocnopen(struct siocnstate *sp, Port_t iobase, int speed);
|
|
|
|
static void siocntxwait(Port_t iobase);
|
1996-06-14 10:04:54 +00:00
|
|
|
|
2000-09-22 12:56:55 +00:00
|
|
|
#ifdef __alpha__
|
2002-03-20 12:22:31 +00:00
|
|
|
int siocnattach(int port, int speed);
|
|
|
|
int siogdbattach(int port, int speed);
|
|
|
|
int siogdbgetc(void);
|
|
|
|
void siogdbputc(int c);
|
2000-09-22 12:56:55 +00:00
|
|
|
#else
|
1999-01-08 16:09:23 +00:00
|
|
|
static cn_probe_t siocnprobe;
|
|
|
|
static cn_init_t siocninit;
|
2001-11-26 12:29:53 +00:00
|
|
|
static cn_term_t siocnterm;
|
2000-09-22 12:56:55 +00:00
|
|
|
#endif
|
1999-01-08 16:09:23 +00:00
|
|
|
static cn_checkc_t siocncheckc;
|
1999-05-10 09:14:40 +00:00
|
|
|
static cn_getc_t siocngetc;
|
|
|
|
static cn_putc_t siocnputc;
|
1999-01-08 16:09:23 +00:00
|
|
|
|
2001-10-19 11:48:47 +00:00
|
|
|
#ifndef __alpha__
|
2001-11-26 12:29:53 +00:00
|
|
|
CONS_DRIVER(sio, siocnprobe, siocninit, siocnterm, siocngetc, siocncheckc,
|
2000-01-13 11:45:33 +00:00
|
|
|
siocnputc, NULL);
|
1999-09-12 13:44:54 +00:00
|
|
|
#endif
|
|
|
|
|
1999-05-10 09:14:40 +00:00
|
|
|
/* To get the GDB related variables */
|
|
|
|
#if DDB > 0
|
|
|
|
#include <ddb/ddb.h>
|
|
|
|
#endif
|
1999-04-18 14:42:20 +00:00
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
static void
|
1999-04-18 14:42:20 +00:00
|
|
|
siocntxwait(iobase)
|
|
|
|
Port_t iobase;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
int timo;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for any pending transmission to finish. Required to avoid
|
|
|
|
* the UART lockup bug when the speed is changed, and for normal
|
|
|
|
* transmits.
|
|
|
|
*/
|
|
|
|
timo = 100000;
|
1999-04-18 14:42:20 +00:00
|
|
|
while ((inb(iobase + com_lsr) & (LSR_TSRE | LSR_TXRDY))
|
1996-06-14 10:04:54 +00:00
|
|
|
!= (LSR_TSRE | LSR_TXRDY) && --timo != 0)
|
|
|
|
;
|
|
|
|
}
|
|
|
|
|
2000-09-22 12:56:55 +00:00
|
|
|
#ifndef __alpha__
|
|
|
|
|
1997-06-04 10:27:53 +00:00
|
|
|
/*
|
|
|
|
* Read the serial port specified and try to figure out what speed
|
|
|
|
* it's currently running at. We're assuming the serial port has
|
|
|
|
* been initialized and is basicly idle. This routine is only intended
|
|
|
|
* to be run at system startup.
|
|
|
|
*
|
|
|
|
* If the value read from the serial port doesn't make sense, return 0.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static speed_t
|
2002-01-31 08:26:45 +00:00
|
|
|
siocngetspeed(iobase, rclk)
|
|
|
|
Port_t iobase;
|
|
|
|
u_long rclk;
|
1997-06-04 10:27:53 +00:00
|
|
|
{
|
2002-01-31 08:26:45 +00:00
|
|
|
u_int divisor;
|
1997-06-04 10:27:53 +00:00
|
|
|
u_char dlbh;
|
|
|
|
u_char dlbl;
|
|
|
|
u_char cfcr;
|
|
|
|
|
|
|
|
cfcr = inb(iobase + com_cfcr);
|
|
|
|
outb(iobase + com_cfcr, CFCR_DLAB | cfcr);
|
|
|
|
|
|
|
|
dlbl = inb(iobase + com_dlbl);
|
|
|
|
dlbh = inb(iobase + com_dlbh);
|
|
|
|
|
|
|
|
outb(iobase + com_cfcr, cfcr);
|
|
|
|
|
2002-01-31 08:26:45 +00:00
|
|
|
divisor = dlbh << 8 | dlbl;
|
1997-06-04 10:27:53 +00:00
|
|
|
|
2002-01-31 08:26:45 +00:00
|
|
|
/* XXX there should be more sanity checking. */
|
|
|
|
if (divisor == 0)
|
|
|
|
return (CONSPEED);
|
|
|
|
return (rclk / (16UL * divisor));
|
1997-06-04 10:27:53 +00:00
|
|
|
}
|
|
|
|
|
2000-09-22 12:56:55 +00:00
|
|
|
#endif
|
|
|
|
|
1996-06-14 10:04:54 +00:00
|
|
|
static void
|
1999-04-18 14:42:20 +00:00
|
|
|
siocnopen(sp, iobase, speed)
|
1996-06-14 10:04:54 +00:00
|
|
|
struct siocnstate *sp;
|
1999-04-18 14:42:20 +00:00
|
|
|
Port_t iobase;
|
|
|
|
int speed;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
2002-01-31 08:26:45 +00:00
|
|
|
u_int divisor;
|
1996-10-09 21:47:16 +00:00
|
|
|
u_char dlbh;
|
|
|
|
u_char dlbl;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Save all the device control registers except the fifo register
|
1996-12-04 04:36:59 +00:00
|
|
|
* and set our default ones (cs8 -parenb speed=comdefaultrate).
|
1996-06-14 10:04:54 +00:00
|
|
|
* We can't save the fifo register since it is read-only.
|
|
|
|
*/
|
|
|
|
sp->ier = inb(iobase + com_ier);
|
|
|
|
outb(iobase + com_ier, 0); /* spltty() doesn't stop siointr() */
|
1999-04-18 14:42:20 +00:00
|
|
|
siocntxwait(iobase);
|
1996-06-14 10:04:54 +00:00
|
|
|
sp->cfcr = inb(iobase + com_cfcr);
|
1996-10-09 21:47:16 +00:00
|
|
|
outb(iobase + com_cfcr, CFCR_DLAB | CFCR_8BITS);
|
1996-06-14 10:04:54 +00:00
|
|
|
sp->dlbl = inb(iobase + com_dlbl);
|
|
|
|
sp->dlbh = inb(iobase + com_dlbh);
|
1996-10-09 21:47:16 +00:00
|
|
|
/*
|
|
|
|
* Only set the divisor registers if they would change, since on
|
|
|
|
* some 16550 incompatibles (Startech), setting them clears the
|
|
|
|
* data input register. This also reduces the effects of the
|
|
|
|
* UMC8669F bug.
|
|
|
|
*/
|
2002-01-31 08:26:45 +00:00
|
|
|
divisor = siodivisor(comdefaultrclk, speed);
|
1996-10-09 21:47:16 +00:00
|
|
|
dlbl = divisor & 0xFF;
|
|
|
|
if (sp->dlbl != dlbl)
|
|
|
|
outb(iobase + com_dlbl, dlbl);
|
2002-01-31 08:26:45 +00:00
|
|
|
dlbh = divisor >> 8;
|
1996-10-09 21:47:16 +00:00
|
|
|
if (sp->dlbh != dlbh)
|
|
|
|
outb(iobase + com_dlbh, dlbh);
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(iobase + com_cfcr, CFCR_8BITS);
|
|
|
|
sp->mcr = inb(iobase + com_mcr);
|
|
|
|
/*
|
|
|
|
* We don't want interrupts, but must be careful not to "disable"
|
|
|
|
* them by clearing the MCR_IENABLE bit, since that might cause
|
|
|
|
* an interrupt by floating the IRQ line.
|
|
|
|
*/
|
|
|
|
outb(iobase + com_mcr, (sp->mcr & MCR_IENABLE) | MCR_DTR | MCR_RTS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
1999-04-18 14:42:20 +00:00
|
|
|
siocnclose(sp, iobase)
|
1996-06-14 10:04:54 +00:00
|
|
|
struct siocnstate *sp;
|
1999-04-18 14:42:20 +00:00
|
|
|
Port_t iobase;
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Restore the device control registers.
|
|
|
|
*/
|
1999-04-18 14:42:20 +00:00
|
|
|
siocntxwait(iobase);
|
1996-10-09 21:47:16 +00:00
|
|
|
outb(iobase + com_cfcr, CFCR_DLAB | CFCR_8BITS);
|
|
|
|
if (sp->dlbl != inb(iobase + com_dlbl))
|
|
|
|
outb(iobase + com_dlbl, sp->dlbl);
|
|
|
|
if (sp->dlbh != inb(iobase + com_dlbh))
|
|
|
|
outb(iobase + com_dlbh, sp->dlbh);
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(iobase + com_cfcr, sp->cfcr);
|
|
|
|
/*
|
|
|
|
* XXX damp oscillations of MCR_DTR and MCR_RTS by not restoring them.
|
|
|
|
*/
|
|
|
|
outb(iobase + com_mcr, sp->mcr | MCR_DTR | MCR_RTS);
|
|
|
|
outb(iobase + com_ier, sp->ier);
|
|
|
|
}
|
|
|
|
|
2000-09-22 12:56:55 +00:00
|
|
|
#ifndef __alpha__
|
|
|
|
|
1999-09-12 13:44:54 +00:00
|
|
|
static void
|
1996-06-14 10:04:54 +00:00
|
|
|
siocnprobe(cp)
|
|
|
|
struct consdev *cp;
|
|
|
|
{
|
1997-12-29 16:08:48 +00:00
|
|
|
speed_t boot_speed;
|
|
|
|
u_char cfcr;
|
2002-01-31 08:26:45 +00:00
|
|
|
u_int divisor;
|
1999-04-18 14:42:20 +00:00
|
|
|
int s, unit;
|
1997-04-05 15:04:32 +00:00
|
|
|
struct siocnstate sp;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1997-04-05 15:04:32 +00:00
|
|
|
/*
|
|
|
|
* Find our first enabled console, if any. If it is a high-level
|
|
|
|
* console device, then initialize it and return successfully.
|
|
|
|
* If it is a low-level console device, then initialize it and
|
|
|
|
* return unsuccessfully. It must be initialized in both cases
|
|
|
|
* for early use by console drivers and debuggers. Initializing
|
|
|
|
* the hardware is not necessary in all cases, since the i/o
|
|
|
|
* routines initialize it on the fly, but it is necessary if
|
|
|
|
* input might arrive while the hardware is switched back to an
|
|
|
|
* uninitialized state. We can't handle multiple console devices
|
|
|
|
* yet because our low-level routines don't take a device arg.
|
|
|
|
* We trust the user to set the console flags properly so that we
|
|
|
|
* don't need to probe.
|
|
|
|
*/
|
|
|
|
cp->cn_pri = CN_DEAD;
|
1999-04-18 14:42:20 +00:00
|
|
|
|
|
|
|
for (unit = 0; unit < 16; unit++) { /* XXX need to know how many */
|
|
|
|
int flags;
|
1999-09-12 13:44:54 +00:00
|
|
|
int disabled;
|
|
|
|
if (resource_int_value("sio", unit, "disabled", &disabled) == 0) {
|
|
|
|
if (disabled)
|
|
|
|
continue;
|
|
|
|
}
|
1999-04-18 14:42:20 +00:00
|
|
|
if (resource_int_value("sio", unit, "flags", &flags))
|
|
|
|
continue;
|
1999-05-09 05:00:54 +00:00
|
|
|
if (COM_CONSOLE(flags) || COM_DEBUGGER(flags)) {
|
1999-04-18 14:42:20 +00:00
|
|
|
int port;
|
1999-05-09 05:00:54 +00:00
|
|
|
Port_t iobase;
|
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
if (resource_int_value("sio", unit, "port", &port))
|
|
|
|
continue;
|
1999-05-09 05:00:54 +00:00
|
|
|
iobase = port;
|
1997-04-05 15:04:32 +00:00
|
|
|
s = spltty();
|
1997-06-04 10:27:53 +00:00
|
|
|
if (boothowto & RB_SERIAL) {
|
2002-01-31 08:26:45 +00:00
|
|
|
boot_speed =
|
|
|
|
siocngetspeed(iobase, comdefaultrclk);
|
1997-06-04 10:27:53 +00:00
|
|
|
if (boot_speed)
|
|
|
|
comdefaultrate = boot_speed;
|
|
|
|
}
|
1997-12-29 16:08:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the divisor latch. We can't rely on
|
|
|
|
* siocnopen() to do this the first time, since it
|
|
|
|
* avoids writing to the latch if the latch appears
|
|
|
|
* to have the correct value. Also, if we didn't
|
|
|
|
* just read the speed from the hardware, then we
|
|
|
|
* need to set the speed in hardware so that
|
|
|
|
* switching it later is null.
|
|
|
|
*/
|
1999-05-09 05:00:54 +00:00
|
|
|
cfcr = inb(iobase + com_cfcr);
|
|
|
|
outb(iobase + com_cfcr, CFCR_DLAB | cfcr);
|
2002-01-31 08:26:45 +00:00
|
|
|
divisor = siodivisor(comdefaultrclk, comdefaultrate);
|
|
|
|
outb(iobase + com_dlbl, divisor & 0xff);
|
|
|
|
outb(iobase + com_dlbh, divisor >> 8);
|
1999-05-09 05:00:54 +00:00
|
|
|
outb(iobase + com_cfcr, cfcr);
|
|
|
|
|
|
|
|
siocnopen(&sp, iobase, comdefaultrate);
|
1997-12-29 16:08:48 +00:00
|
|
|
|
1997-04-05 15:04:32 +00:00
|
|
|
splx(s);
|
1999-05-09 05:00:54 +00:00
|
|
|
if (COM_CONSOLE(flags) && !COM_LLCONSOLE(flags)) {
|
1999-04-18 14:42:20 +00:00
|
|
|
cp->cn_dev = makedev(CDEV_MAJOR, unit);
|
|
|
|
cp->cn_pri = COM_FORCECONSOLE(flags)
|
1997-04-05 15:04:32 +00:00
|
|
|
|| boothowto & RB_SERIAL
|
|
|
|
? CN_REMOTE : CN_NORMAL;
|
1999-05-09 05:00:54 +00:00
|
|
|
siocniobase = iobase;
|
|
|
|
siocnunit = unit;
|
|
|
|
}
|
1999-09-12 13:44:54 +00:00
|
|
|
if (COM_DEBUGGER(flags)) {
|
1999-05-09 05:00:54 +00:00
|
|
|
printf("sio%d: gdb debugging port\n", unit);
|
|
|
|
siogdbiobase = iobase;
|
|
|
|
siogdbunit = unit;
|
1999-05-10 09:14:40 +00:00
|
|
|
#if DDB > 0
|
1999-05-09 05:00:54 +00:00
|
|
|
gdbdev = makedev(CDEV_MAJOR, unit);
|
|
|
|
gdb_getc = siocngetc;
|
|
|
|
gdb_putc = siocnputc;
|
|
|
|
#endif
|
1997-04-05 15:04:32 +00:00
|
|
|
}
|
|
|
|
}
|
1999-04-18 14:42:20 +00:00
|
|
|
}
|
1999-05-09 05:00:54 +00:00
|
|
|
#ifdef __i386__
|
1999-05-10 09:14:40 +00:00
|
|
|
#if DDB > 0
|
1999-05-09 05:00:54 +00:00
|
|
|
/*
|
|
|
|
* XXX Ugly Compatability.
|
|
|
|
* If no gdb port has been specified, set it to be the console
|
|
|
|
* as some configuration files don't specify the gdb port.
|
|
|
|
*/
|
1999-05-10 09:14:40 +00:00
|
|
|
if (gdbdev == NODEV && (boothowto & RB_GDB)) {
|
1999-05-09 05:00:54 +00:00
|
|
|
printf("Warning: no GDB port specified. Defaulting to sio%d.\n",
|
|
|
|
siocnunit);
|
|
|
|
printf("Set flag 0x80 on desired GDB port in your\n");
|
|
|
|
printf("configuration file (currently sio only).\n");
|
|
|
|
siogdbiobase = siocniobase;
|
|
|
|
siogdbunit = siocnunit;
|
|
|
|
gdbdev = makedev(CDEV_MAJOR, siocnunit);
|
|
|
|
gdb_getc = siocngetc;
|
|
|
|
gdb_putc = siocnputc;
|
|
|
|
}
|
|
|
|
#endif
|
1999-05-10 09:14:40 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
2000-09-22 12:56:55 +00:00
|
|
|
static void
|
|
|
|
siocninit(cp)
|
|
|
|
struct consdev *cp;
|
|
|
|
{
|
|
|
|
comconsole = DEV_TO_UNIT(cp->cn_dev);
|
|
|
|
}
|
|
|
|
|
2001-11-26 12:29:53 +00:00
|
|
|
static void
|
|
|
|
siocnterm(cp)
|
|
|
|
struct consdev *cp;
|
|
|
|
{
|
|
|
|
comconsole = -1;
|
|
|
|
}
|
|
|
|
|
2000-09-22 12:56:55 +00:00
|
|
|
#endif
|
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
#ifdef __alpha__
|
|
|
|
|
2000-01-13 11:45:33 +00:00
|
|
|
CONS_DRIVER(sio, NULL, NULL, NULL, siocngetc, siocncheckc, siocnputc, NULL);
|
1999-04-18 14:42:20 +00:00
|
|
|
|
|
|
|
int
|
|
|
|
siocnattach(port, speed)
|
|
|
|
int port;
|
|
|
|
int speed;
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
u_char cfcr;
|
2002-01-31 08:26:45 +00:00
|
|
|
u_int divisor;
|
1999-04-18 14:42:20 +00:00
|
|
|
struct siocnstate sp;
|
|
|
|
|
|
|
|
siocniobase = port;
|
|
|
|
comdefaultrate = speed;
|
1999-09-12 13:44:54 +00:00
|
|
|
sio_consdev.cn_pri = CN_NORMAL;
|
|
|
|
sio_consdev.cn_dev = makedev(CDEV_MAJOR, 0);
|
1999-04-18 14:42:20 +00:00
|
|
|
|
|
|
|
s = spltty();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the divisor latch. We can't rely on
|
|
|
|
* siocnopen() to do this the first time, since it
|
|
|
|
* avoids writing to the latch if the latch appears
|
|
|
|
* to have the correct value. Also, if we didn't
|
|
|
|
* just read the speed from the hardware, then we
|
|
|
|
* need to set the speed in hardware so that
|
|
|
|
* switching it later is null.
|
|
|
|
*/
|
|
|
|
cfcr = inb(siocniobase + com_cfcr);
|
|
|
|
outb(siocniobase + com_cfcr, CFCR_DLAB | cfcr);
|
2002-01-31 08:26:45 +00:00
|
|
|
divisor = siodivisor(comdefaultrclk, comdefaultrate);
|
|
|
|
outb(siocniobase + com_dlbl, divisor & 0xff);
|
|
|
|
outb(siocniobase + com_dlbh, divisor >> 8);
|
1999-04-18 14:42:20 +00:00
|
|
|
outb(siocniobase + com_cfcr, cfcr);
|
|
|
|
|
|
|
|
siocnopen(&sp, siocniobase, comdefaultrate);
|
|
|
|
splx(s);
|
|
|
|
|
2001-10-24 18:30:05 +00:00
|
|
|
cnadd(&sio_consdev);
|
2000-04-01 11:27:31 +00:00
|
|
|
return (0);
|
1999-04-18 14:42:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
siogdbattach(port, speed)
|
|
|
|
int port;
|
|
|
|
int speed;
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
u_char cfcr;
|
2002-01-31 08:26:45 +00:00
|
|
|
u_int divisor;
|
1999-04-18 14:42:20 +00:00
|
|
|
struct siocnstate sp;
|
2000-09-13 10:14:43 +00:00
|
|
|
int unit = 1; /* XXX !!! */
|
1999-04-18 14:42:20 +00:00
|
|
|
|
|
|
|
siogdbiobase = port;
|
|
|
|
gdbdefaultrate = speed;
|
|
|
|
|
2000-09-13 10:14:43 +00:00
|
|
|
printf("sio%d: gdb debugging port\n", unit);
|
|
|
|
siogdbunit = unit;
|
|
|
|
#if DDB > 0
|
|
|
|
gdbdev = makedev(CDEV_MAJOR, unit);
|
|
|
|
gdb_getc = siocngetc;
|
|
|
|
gdb_putc = siocnputc;
|
|
|
|
#endif
|
|
|
|
|
1999-04-18 14:42:20 +00:00
|
|
|
s = spltty();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the divisor latch. We can't rely on
|
|
|
|
* siocnopen() to do this the first time, since it
|
|
|
|
* avoids writing to the latch if the latch appears
|
|
|
|
* to have the correct value. Also, if we didn't
|
|
|
|
* just read the speed from the hardware, then we
|
|
|
|
* need to set the speed in hardware so that
|
|
|
|
* switching it later is null.
|
|
|
|
*/
|
|
|
|
cfcr = inb(siogdbiobase + com_cfcr);
|
|
|
|
outb(siogdbiobase + com_cfcr, CFCR_DLAB | cfcr);
|
2002-01-31 08:26:45 +00:00
|
|
|
divisor = siodivisor(comdefaultrclk, gdbdefaultrate);
|
|
|
|
outb(siogdbiobase + com_dlbl, divisor & 0xff);
|
|
|
|
outb(siogdbiobase + com_dlbh, divisor >> 8);
|
1999-04-18 14:42:20 +00:00
|
|
|
outb(siogdbiobase + com_cfcr, cfcr);
|
|
|
|
|
|
|
|
siocnopen(&sp, siogdbiobase, gdbdefaultrate);
|
|
|
|
splx(s);
|
|
|
|
|
2000-04-01 11:27:31 +00:00
|
|
|
return (0);
|
1999-04-18 14:42:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
1999-09-12 13:44:54 +00:00
|
|
|
static int
|
1996-06-14 10:04:54 +00:00
|
|
|
siocncheckc(dev)
|
|
|
|
dev_t dev;
|
|
|
|
{
|
|
|
|
int c;
|
|
|
|
Port_t iobase;
|
|
|
|
int s;
|
|
|
|
struct siocnstate sp;
|
|
|
|
|
1999-05-09 05:00:54 +00:00
|
|
|
if (minor(dev) == siogdbunit)
|
|
|
|
iobase = siogdbiobase;
|
|
|
|
else
|
|
|
|
iobase = siocniobase;
|
1996-06-14 10:04:54 +00:00
|
|
|
s = spltty();
|
1999-04-18 14:42:20 +00:00
|
|
|
siocnopen(&sp, iobase, comdefaultrate);
|
1996-06-14 10:04:54 +00:00
|
|
|
if (inb(iobase + com_lsr) & LSR_RXRDY)
|
|
|
|
c = inb(iobase + com_data);
|
|
|
|
else
|
1996-10-09 21:47:16 +00:00
|
|
|
c = -1;
|
1999-04-18 14:42:20 +00:00
|
|
|
siocnclose(&sp, iobase);
|
1996-06-14 10:04:54 +00:00
|
|
|
splx(s);
|
|
|
|
return (c);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
siocngetc(dev)
|
|
|
|
dev_t dev;
|
|
|
|
{
|
|
|
|
int c;
|
|
|
|
Port_t iobase;
|
|
|
|
int s;
|
|
|
|
struct siocnstate sp;
|
|
|
|
|
1999-05-09 05:00:54 +00:00
|
|
|
if (minor(dev) == siogdbunit)
|
|
|
|
iobase = siogdbiobase;
|
|
|
|
else
|
|
|
|
iobase = siocniobase;
|
1996-06-14 10:04:54 +00:00
|
|
|
s = spltty();
|
1999-04-18 14:42:20 +00:00
|
|
|
siocnopen(&sp, iobase, comdefaultrate);
|
1996-06-14 10:04:54 +00:00
|
|
|
while (!(inb(iobase + com_lsr) & LSR_RXRDY))
|
|
|
|
;
|
|
|
|
c = inb(iobase + com_data);
|
1999-04-18 14:42:20 +00:00
|
|
|
siocnclose(&sp, iobase);
|
1996-06-14 10:04:54 +00:00
|
|
|
splx(s);
|
|
|
|
return (c);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
siocnputc(dev, c)
|
|
|
|
dev_t dev;
|
|
|
|
int c;
|
|
|
|
{
|
2002-01-06 09:54:50 +00:00
|
|
|
int need_unlock;
|
1996-06-14 10:04:54 +00:00
|
|
|
int s;
|
|
|
|
struct siocnstate sp;
|
1999-05-09 05:00:54 +00:00
|
|
|
Port_t iobase;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1999-05-09 05:00:54 +00:00
|
|
|
if (minor(dev) == siogdbunit)
|
|
|
|
iobase = siogdbiobase;
|
|
|
|
else
|
|
|
|
iobase = siocniobase;
|
1996-06-14 10:04:54 +00:00
|
|
|
s = spltty();
|
2002-01-06 09:54:50 +00:00
|
|
|
need_unlock = 0;
|
|
|
|
if (sio_inited == 2 && !mtx_owned(&sio_lock)) {
|
2001-10-19 11:48:47 +00:00
|
|
|
mtx_lock_spin(&sio_lock);
|
2002-01-06 09:54:50 +00:00
|
|
|
need_unlock = 1;
|
|
|
|
}
|
1999-05-09 05:00:54 +00:00
|
|
|
siocnopen(&sp, iobase, comdefaultrate);
|
|
|
|
siocntxwait(iobase);
|
|
|
|
outb(iobase + com_data, c);
|
|
|
|
siocnclose(&sp, iobase);
|
2002-01-06 09:54:50 +00:00
|
|
|
if (need_unlock)
|
2001-10-19 11:48:47 +00:00
|
|
|
mtx_unlock_spin(&sio_lock);
|
1999-04-18 14:42:20 +00:00
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
1999-04-19 11:11:01 +00:00
|
|
|
#ifdef __alpha__
|
1999-04-18 14:42:20 +00:00
|
|
|
int
|
|
|
|
siogdbgetc()
|
|
|
|
{
|
|
|
|
int c;
|
|
|
|
Port_t iobase;
|
|
|
|
int s;
|
|
|
|
struct siocnstate sp;
|
|
|
|
|
|
|
|
iobase = siogdbiobase;
|
|
|
|
s = spltty();
|
|
|
|
siocnopen(&sp, iobase, gdbdefaultrate);
|
|
|
|
while (!(inb(iobase + com_lsr) & LSR_RXRDY))
|
|
|
|
;
|
|
|
|
c = inb(iobase + com_data);
|
|
|
|
siocnclose(&sp, iobase);
|
|
|
|
splx(s);
|
|
|
|
return (c);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
siogdbputc(c)
|
|
|
|
int c;
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
struct siocnstate sp;
|
|
|
|
|
|
|
|
s = spltty();
|
|
|
|
siocnopen(&sp, siogdbiobase, gdbdefaultrate);
|
|
|
|
siocntxwait(siogdbiobase);
|
|
|
|
outb(siogdbiobase + com_data, c);
|
|
|
|
siocnclose(&sp, siogdbiobase);
|
1996-06-14 10:04:54 +00:00
|
|
|
splx(s);
|
|
|
|
}
|
1999-04-19 11:11:01 +00:00
|
|
|
#endif
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
#ifdef PC98
|
|
|
|
/*
|
|
|
|
* pc98 local function
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
com_tiocm_set(struct com_s *com, int msr)
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
int tmp = 0;
|
|
|
|
int mask = CMD8251_TxEN|CMD8251_RxEN|CMD8251_DTR|CMD8251_RTS;
|
|
|
|
|
|
|
|
s=spltty();
|
|
|
|
com->pc98_prev_modem_status = ( msr & (TIOCM_LE|TIOCM_DTR|TIOCM_RTS) )
|
|
|
|
| ( com->pc98_prev_modem_status & ~(TIOCM_LE|TIOCM_DTR|TIOCM_RTS) );
|
|
|
|
tmp |= (CMD8251_TxEN|CMD8251_RxEN);
|
|
|
|
if ( msr & TIOCM_DTR ) tmp |= CMD8251_DTR;
|
|
|
|
if ( msr & TIOCM_RTS ) tmp |= CMD8251_RTS;
|
|
|
|
pc98_i8251_clear_or_cmd( com, mask, tmp );
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
com_tiocm_bis(struct com_s *com, int msr)
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
int tmp = 0;
|
|
|
|
|
|
|
|
s=spltty();
|
|
|
|
com->pc98_prev_modem_status |= ( msr & (TIOCM_LE|TIOCM_DTR|TIOCM_RTS) );
|
|
|
|
tmp |= CMD8251_TxEN|CMD8251_RxEN;
|
|
|
|
if ( msr & TIOCM_DTR ) tmp |= CMD8251_DTR;
|
|
|
|
if ( msr & TIOCM_RTS ) tmp |= CMD8251_RTS;
|
|
|
|
|
|
|
|
pc98_i8251_or_cmd( com, tmp );
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
com_tiocm_bic(struct com_s *com, int msr)
|
|
|
|
{
|
|
|
|
int s;
|
|
|
|
int tmp = msr;
|
|
|
|
|
|
|
|
s=spltty();
|
|
|
|
com->pc98_prev_modem_status &= ~( msr & (TIOCM_LE|TIOCM_DTR|TIOCM_RTS) );
|
|
|
|
if ( msr & TIOCM_DTR ) tmp |= CMD8251_DTR;
|
|
|
|
if ( msr & TIOCM_RTS ) tmp |= CMD8251_RTS;
|
|
|
|
|
|
|
|
pc98_i8251_clear_cmd( com, tmp );
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
com_tiocm_get(struct com_s *com)
|
|
|
|
{
|
|
|
|
return( com->pc98_prev_modem_status );
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
com_tiocm_get_delta(struct com_s *com)
|
|
|
|
{
|
|
|
|
int tmp;
|
|
|
|
|
|
|
|
tmp = com->pc98_modem_delta;
|
|
|
|
com->pc98_modem_delta = 0;
|
|
|
|
return( tmp );
|
|
|
|
}
|
|
|
|
|
|
|
|
/* convert to TIOCM_?? ( ioctl.h ) */
|
|
|
|
static int
|
|
|
|
pc98_get_modem_status(struct com_s *com)
|
|
|
|
{
|
|
|
|
register int msr;
|
|
|
|
|
|
|
|
msr = com->pc98_prev_modem_status
|
|
|
|
& ~(TIOCM_CAR|TIOCM_RI|TIOCM_DSR|TIOCM_CTS);
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable) {
|
|
|
|
int stat2;
|
|
|
|
|
|
|
|
stat2 = inb(I8251F_msr);
|
|
|
|
if ( stat2 & CICSCDF_CD ) msr |= TIOCM_CAR;
|
|
|
|
if ( stat2 & CICSCDF_CI ) msr |= TIOCM_RI;
|
|
|
|
if ( stat2 & CICSCDF_DR ) msr |= TIOCM_DSR;
|
|
|
|
if ( stat2 & CICSCDF_CS ) msr |= TIOCM_CTS;
|
1996-06-14 10:04:54 +00:00
|
|
|
#if COM_CARRIER_DETECT_EMULATE
|
1999-12-06 00:23:38 +00:00
|
|
|
if ( msr & (TIOCM_DSR|TIOCM_CTS) ) {
|
|
|
|
msr |= TIOCM_CAR;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
int stat, stat2;
|
|
|
|
|
|
|
|
stat = inb(com->sts_port);
|
|
|
|
stat2 = inb(com->in_modem_port);
|
|
|
|
if ( !(stat2 & CICSCD_CD) ) msr |= TIOCM_CAR;
|
|
|
|
if ( !(stat2 & CICSCD_CI) ) msr |= TIOCM_RI;
|
|
|
|
if ( stat & STS8251_DSR ) msr |= TIOCM_DSR;
|
|
|
|
if ( !(stat2 & CICSCD_CS) ) msr |= TIOCM_CTS;
|
|
|
|
#if COM_CARRIER_DETECT_EMULATE
|
|
|
|
if ( msr & (TIOCM_DSR|TIOCM_CTS) ) {
|
|
|
|
msr |= TIOCM_CAR;
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
1999-12-06 00:23:38 +00:00
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
return(msr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_check_msr(void* chan)
|
|
|
|
{
|
|
|
|
int msr, delta;
|
|
|
|
int s;
|
|
|
|
register struct tty *tp;
|
|
|
|
struct com_s *com;
|
|
|
|
int mynor;
|
|
|
|
int unit;
|
|
|
|
dev_t dev;
|
|
|
|
|
|
|
|
dev=(dev_t)chan;
|
|
|
|
mynor = minor(dev);
|
|
|
|
unit = MINOR_TO_UNIT(mynor);
|
|
|
|
com = com_addr(unit);
|
|
|
|
tp = com->tp;
|
|
|
|
|
|
|
|
s = spltty();
|
|
|
|
msr = pc98_get_modem_status(com);
|
|
|
|
/* make change flag */
|
|
|
|
delta = msr ^ com->pc98_prev_modem_status;
|
|
|
|
if ( delta & TIOCM_CAR ) {
|
|
|
|
if ( com->modem_car_chg_timer ) {
|
|
|
|
if ( -- com->modem_car_chg_timer )
|
|
|
|
msr ^= TIOCM_CAR;
|
|
|
|
} else {
|
1999-02-02 17:26:03 +00:00
|
|
|
if ((com->modem_car_chg_timer = (msr & TIOCM_CAR) ?
|
|
|
|
DCD_ON_RECOGNITION : DCD_OFF_TOLERANCE) != 0)
|
1996-06-14 10:04:54 +00:00
|
|
|
msr ^= TIOCM_CAR;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
com->modem_car_chg_timer = 0;
|
|
|
|
delta = ( msr ^ com->pc98_prev_modem_status ) &
|
|
|
|
(TIOCM_CAR|TIOCM_RI|TIOCM_DSR|TIOCM_CTS);
|
|
|
|
com->pc98_prev_modem_status = msr;
|
|
|
|
delta = ( com->pc98_modem_delta |= delta );
|
|
|
|
splx(s);
|
|
|
|
if ( com->modem_checking || (tp->t_state & (TS_ISOPEN)) ) {
|
|
|
|
if ( delta ) {
|
|
|
|
commint(dev);
|
|
|
|
}
|
|
|
|
timeout(pc98_check_msr, (caddr_t)dev,
|
|
|
|
PC98_CHECK_MODEM_INTERVAL);
|
|
|
|
} else {
|
|
|
|
com->modem_checking = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_msrint_start(dev_t dev)
|
|
|
|
{
|
|
|
|
struct com_s *com;
|
|
|
|
int mynor;
|
|
|
|
int unit;
|
|
|
|
int s = spltty();
|
|
|
|
|
|
|
|
mynor = minor(dev);
|
|
|
|
unit = MINOR_TO_UNIT(mynor);
|
|
|
|
com = com_addr(unit);
|
|
|
|
/* modem control line check routine envoke interval is 1/10 sec */
|
|
|
|
if ( com->modem_checking == 0 ) {
|
|
|
|
com->pc98_prev_modem_status = pc98_get_modem_status(com);
|
|
|
|
com->pc98_modem_delta = 0;
|
|
|
|
timeout(pc98_check_msr, (caddr_t)dev,
|
|
|
|
PC98_CHECK_MODEM_INTERVAL);
|
|
|
|
com->modem_checking = 1;
|
|
|
|
}
|
|
|
|
splx(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_disable_i8251_interrupt(struct com_s *com, int mod)
|
|
|
|
{
|
|
|
|
/* disable interrupt */
|
|
|
|
register int tmp;
|
|
|
|
|
|
|
|
mod |= ~(IEN_Tx|IEN_TxEMP|IEN_Rx);
|
|
|
|
COM_INT_DISABLE
|
|
|
|
tmp = inb( com->intr_ctrl_port ) & ~(IEN_Tx|IEN_TxEMP|IEN_Rx);
|
|
|
|
outb( com->intr_ctrl_port, (com->intr_enable&=~mod) | tmp );
|
|
|
|
COM_INT_ENABLE
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_enable_i8251_interrupt(struct com_s *com, int mod)
|
|
|
|
{
|
|
|
|
register int tmp;
|
|
|
|
|
|
|
|
COM_INT_DISABLE
|
|
|
|
tmp = inb( com->intr_ctrl_port ) & ~(IEN_Tx|IEN_TxEMP|IEN_Rx);
|
|
|
|
outb( com->intr_ctrl_port, (com->intr_enable|=mod) | tmp );
|
|
|
|
COM_INT_ENABLE
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pc98_check_i8251_interrupt(struct com_s *com)
|
|
|
|
{
|
|
|
|
return ( com->intr_enable & 0x07 );
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_i8251_clear_cmd(struct com_s *com, int x)
|
|
|
|
{
|
|
|
|
int tmp;
|
|
|
|
|
|
|
|
COM_INT_DISABLE
|
|
|
|
tmp = com->pc98_prev_siocmd & ~(x);
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(com->cmd_port, tmp);
|
|
|
|
com->pc98_prev_siocmd = tmp & ~(CMD8251_ER|CMD8251_RESET|CMD8251_EH);
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, CTRL8251F_ENABLE);
|
1996-06-14 10:04:54 +00:00
|
|
|
COM_INT_ENABLE
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_i8251_or_cmd(struct com_s *com, int x)
|
|
|
|
{
|
|
|
|
int tmp;
|
|
|
|
|
|
|
|
COM_INT_DISABLE
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
tmp = com->pc98_prev_siocmd | (x);
|
|
|
|
outb(com->cmd_port, tmp);
|
|
|
|
com->pc98_prev_siocmd = tmp & ~(CMD8251_ER|CMD8251_RESET|CMD8251_EH);
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, CTRL8251F_ENABLE);
|
1996-06-14 10:04:54 +00:00
|
|
|
COM_INT_ENABLE
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_i8251_set_cmd(struct com_s *com, int x)
|
|
|
|
{
|
|
|
|
int tmp;
|
|
|
|
|
|
|
|
COM_INT_DISABLE
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
tmp = (x);
|
|
|
|
outb(com->cmd_port, tmp);
|
|
|
|
com->pc98_prev_siocmd = tmp & ~(CMD8251_ER|CMD8251_RESET|CMD8251_EH);
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, CTRL8251F_ENABLE);
|
1996-06-14 10:04:54 +00:00
|
|
|
COM_INT_ENABLE
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_i8251_clear_or_cmd(struct com_s *com, int clr, int x)
|
|
|
|
{
|
|
|
|
int tmp;
|
|
|
|
COM_INT_DISABLE
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
tmp = com->pc98_prev_siocmd & ~(clr);
|
|
|
|
tmp |= (x);
|
|
|
|
outb(com->cmd_port, tmp);
|
|
|
|
com->pc98_prev_siocmd = tmp & ~(CMD8251_ER|CMD8251_RESET|CMD8251_EH);
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, CTRL8251F_ENABLE);
|
1996-06-14 10:04:54 +00:00
|
|
|
COM_INT_ENABLE
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pc98_i8251_get_cmd(struct com_s *com)
|
|
|
|
{
|
|
|
|
return com->pc98_prev_siocmd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
pc98_i8251_get_mod(struct com_s *com)
|
|
|
|
{
|
|
|
|
return com->pc98_prev_siomod;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_i8251_reset(struct com_s *com, int mode, int command)
|
|
|
|
{
|
1999-12-06 00:23:38 +00:00
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, 0);
|
1996-06-14 10:04:54 +00:00
|
|
|
outb(com->cmd_port, 0); /* dummy */
|
|
|
|
DELAY(2);
|
|
|
|
outb(com->cmd_port, 0); /* dummy */
|
|
|
|
DELAY(2);
|
|
|
|
outb(com->cmd_port, 0); /* dummy */
|
|
|
|
DELAY(2);
|
|
|
|
outb(com->cmd_port, CMD8251_RESET); /* internal reset */
|
|
|
|
DELAY(2);
|
|
|
|
outb(com->cmd_port, mode ); /* mode register */
|
|
|
|
com->pc98_prev_siomod = mode;
|
|
|
|
DELAY(2);
|
|
|
|
pc98_i8251_set_cmd( com, (command|CMD8251_ER) );
|
1999-12-06 00:23:38 +00:00
|
|
|
DELAY(10);
|
|
|
|
if (com->pc98_8251fifo_enable)
|
|
|
|
outb(I8251F_fcr, CTRL8251F_ENABLE |
|
|
|
|
CTRL8251F_XMT_RST | CTRL8251F_RCV_RST);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pc98_check_sysclock(void)
|
|
|
|
{
|
|
|
|
/* get system clock from port */
|
|
|
|
if ( pc98_machine_type & M_8M ) {
|
|
|
|
/* 8 MHz system & H98 */
|
|
|
|
sysclock = 8;
|
|
|
|
} else {
|
|
|
|
/* 5 MHz system */
|
|
|
|
sysclock = 5;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
com_cflag_and_speed_set( struct com_s *com, int cflag, int speed)
|
|
|
|
{
|
2002-03-08 12:12:46 +00:00
|
|
|
int cfcr=0;
|
1996-07-23 07:46:59 +00:00
|
|
|
int previnterrupt;
|
2002-03-08 12:12:46 +00:00
|
|
|
u_int count;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
2002-03-08 12:12:46 +00:00
|
|
|
if (pc98_ttspeedtab(com, speed, &count) != 0)
|
|
|
|
return;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
previnterrupt = pc98_check_i8251_interrupt(com);
|
|
|
|
pc98_disable_i8251_interrupt( com, IEN_Tx|IEN_TxEMP|IEN_Rx );
|
|
|
|
|
|
|
|
switch ( cflag&CSIZE ) {
|
|
|
|
case CS5:
|
|
|
|
cfcr = MOD8251_5BITS; break;
|
|
|
|
case CS6:
|
|
|
|
cfcr = MOD8251_6BITS; break;
|
|
|
|
case CS7:
|
|
|
|
cfcr = MOD8251_7BITS; break;
|
|
|
|
case CS8:
|
|
|
|
cfcr = MOD8251_8BITS; break;
|
|
|
|
}
|
|
|
|
if ( cflag&PARENB ) {
|
|
|
|
if ( cflag&PARODD )
|
|
|
|
cfcr |= MOD8251_PODD;
|
|
|
|
else
|
|
|
|
cfcr |= MOD8251_PEVEN;
|
|
|
|
} else
|
|
|
|
cfcr |= MOD8251_PDISAB;
|
|
|
|
|
|
|
|
if ( cflag&CSTOPB )
|
|
|
|
cfcr |= MOD8251_STOP2;
|
|
|
|
else
|
|
|
|
cfcr |= MOD8251_STOP1;
|
|
|
|
|
|
|
|
if ( count & 0x10000 )
|
|
|
|
cfcr |= MOD8251_CLKX1;
|
|
|
|
else
|
|
|
|
cfcr |= MOD8251_CLKX16;
|
|
|
|
|
|
|
|
if (epson_machine_id != 0x20) { /* XXX */
|
|
|
|
int tmp;
|
|
|
|
while (!((tmp = inb(com->sts_port)) & STS8251_TxEMP))
|
|
|
|
;
|
|
|
|
}
|
|
|
|
/* set baud rate from ospeed */
|
|
|
|
pc98_set_baud_rate( com, count );
|
|
|
|
|
|
|
|
if ( cfcr != pc98_i8251_get_mod(com) )
|
|
|
|
pc98_i8251_reset(com, cfcr, pc98_i8251_get_cmd(com) );
|
|
|
|
|
|
|
|
pc98_enable_i8251_interrupt( com, previnterrupt );
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2002-03-08 12:12:46 +00:00
|
|
|
pc98_ttspeedtab(struct com_s *com, int speed, u_int *divisor)
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
1999-01-03 05:03:47 +00:00
|
|
|
int if_type, effect_sp, count = -1, mod;
|
|
|
|
|
|
|
|
if_type = com->pc98_if_type & 0x0f;
|
|
|
|
|
|
|
|
switch (com->pc98_if_type) {
|
|
|
|
case COM_IF_INTERNAL:
|
|
|
|
if (PC98SIO_baud_rate_port(if_type) != -1) {
|
|
|
|
count = ttspeedtab(speed, if_8251_type[if_type].speedtab);
|
|
|
|
if (count > 0) {
|
|
|
|
count |= COM1_EXT_CLOCK;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* for *1CLK asynchronous! mode, TEFUTEFU */
|
|
|
|
mod = (sysclock == 5) ? 2457600 : 1996800;
|
|
|
|
effect_sp = ttspeedtab( speed, pc98speedtab );
|
|
|
|
if ( effect_sp < 0 ) /* XXX */
|
|
|
|
effect_sp = ttspeedtab( (speed - 1), pc98speedtab );
|
|
|
|
if ( effect_sp <= 0 )
|
|
|
|
return effect_sp;
|
|
|
|
if ( effect_sp == speed )
|
|
|
|
mod /= 16;
|
|
|
|
if ( mod % effect_sp )
|
|
|
|
return(-1);
|
|
|
|
count = mod / effect_sp;
|
|
|
|
if ( count > 65535 )
|
|
|
|
return(-1);
|
|
|
|
if ( effect_sp != speed )
|
|
|
|
count |= 0x10000;
|
|
|
|
break;
|
|
|
|
case COM_IF_PC9861K_1:
|
|
|
|
case COM_IF_PC9861K_2:
|
|
|
|
count = 1;
|
|
|
|
break;
|
|
|
|
case COM_IF_IND_SS_1:
|
|
|
|
case COM_IF_IND_SS_2:
|
|
|
|
case COM_IF_PIO9032B_1:
|
|
|
|
case COM_IF_PIO9032B_2:
|
|
|
|
count = ttspeedtab( speed, if_8251_type[if_type].speedtab );
|
|
|
|
break;
|
|
|
|
case COM_IF_B98_01_1:
|
|
|
|
case COM_IF_B98_01_2:
|
|
|
|
count = ttspeedtab( speed, if_8251_type[if_type].speedtab );
|
|
|
|
#ifdef B98_01_OLD
|
|
|
|
if (count == 0 || count == 1) {
|
|
|
|
count += 4;
|
|
|
|
count |= 0x20000; /* x1 mode for 76800 and 153600 */
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
1999-01-03 05:03:47 +00:00
|
|
|
break;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
|
2002-03-08 12:12:46 +00:00
|
|
|
if (count < 0)
|
|
|
|
return count;
|
|
|
|
|
|
|
|
*divisor = (u_int) count;
|
|
|
|
return 0;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2002-03-08 12:12:46 +00:00
|
|
|
pc98_set_baud_rate( struct com_s *com, u_int count )
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
1999-01-03 05:03:47 +00:00
|
|
|
int if_type, io, s;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
if_type = com->pc98_if_type & 0x0f;
|
2000-05-12 12:38:25 +00:00
|
|
|
io = rman_get_start(com->ioportres) & 0xff00;
|
1999-01-03 05:03:47 +00:00
|
|
|
|
|
|
|
switch (com->pc98_if_type) {
|
|
|
|
case COM_IF_INTERNAL:
|
|
|
|
if (PC98SIO_baud_rate_port(if_type) != -1) {
|
|
|
|
if (count & COM1_EXT_CLOCK) {
|
|
|
|
outb((Port_t)PC98SIO_baud_rate_port(if_type), count & 0xff);
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
outb((Port_t)PC98SIO_baud_rate_port(if_type), 0x09);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-03-08 12:12:46 +00:00
|
|
|
if (count == 0)
|
1999-01-03 05:03:47 +00:00
|
|
|
return;
|
2002-03-08 12:12:46 +00:00
|
|
|
|
1999-01-03 05:03:47 +00:00
|
|
|
/* set i8253 */
|
|
|
|
s = splclock();
|
|
|
|
if (count != 3)
|
1996-06-14 10:04:54 +00:00
|
|
|
outb( 0x77, 0xb6 );
|
1999-01-03 05:03:47 +00:00
|
|
|
else
|
|
|
|
outb( 0x77, 0xb4 );
|
|
|
|
outb( 0x5f, 0);
|
|
|
|
outb( 0x75, count & 0xff );
|
|
|
|
outb( 0x5f, 0);
|
|
|
|
outb( 0x75, (count >> 8) & 0xff );
|
|
|
|
splx(s);
|
|
|
|
break;
|
|
|
|
case COM_IF_IND_SS_1:
|
|
|
|
case COM_IF_IND_SS_2:
|
|
|
|
outb(io | PC98SIO_intr_ctrl_port(if_type), 0);
|
|
|
|
outb(io | PC98SIO_baud_rate_port(if_type), 0);
|
|
|
|
outb(io | PC98SIO_baud_rate_port(if_type), 0xc0);
|
|
|
|
outb(io | PC98SIO_baud_rate_port(if_type), (count >> 8) | 0x80);
|
|
|
|
outb(io | PC98SIO_baud_rate_port(if_type), count & 0xff);
|
|
|
|
break;
|
|
|
|
case COM_IF_PIO9032B_1:
|
|
|
|
case COM_IF_PIO9032B_2:
|
|
|
|
outb(io | PC98SIO_baud_rate_port(if_type), count);
|
|
|
|
break;
|
|
|
|
case COM_IF_B98_01_1:
|
|
|
|
case COM_IF_B98_01_2:
|
|
|
|
outb(io | PC98SIO_baud_rate_port(if_type), count & 0x0f);
|
1996-06-14 10:04:54 +00:00
|
|
|
#ifdef B98_01_OLD
|
1999-01-03 05:03:47 +00:00
|
|
|
/*
|
|
|
|
* Some old B98_01 board should be controlled
|
|
|
|
* in different way, but this hasn't been tested yet.
|
|
|
|
*/
|
|
|
|
outb(io | PC98SIO_func_port(if_type),
|
|
|
|
(count & 0x20000) ? 0xf0 : 0xf2);
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
1999-01-03 05:03:47 +00:00
|
|
|
break;
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
static int
|
1999-04-18 14:42:20 +00:00
|
|
|
pc98_check_if_type(device_t dev, struct siodev *iod)
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
1999-01-03 05:03:47 +00:00
|
|
|
int irr, io, if_type, tmp;
|
1996-06-14 10:04:54 +00:00
|
|
|
static short irq_tab[2][8] = {
|
|
|
|
{ 3, 5, 6, 9, 10, 12, 13, -1},
|
|
|
|
{ 3, 10, 12, 13, 5, 6, 9, -1}
|
|
|
|
};
|
1999-01-03 05:03:47 +00:00
|
|
|
|
2000-05-12 12:38:25 +00:00
|
|
|
if_type = iod->if_type & 0x0f;
|
1996-06-14 10:04:54 +00:00
|
|
|
iod->irq = 0;
|
1999-04-18 14:42:20 +00:00
|
|
|
io = isa_get_port(dev) & 0xff00;
|
1999-01-03 05:03:47 +00:00
|
|
|
|
|
|
|
if (IS_8251(iod->if_type)) {
|
|
|
|
if (PC98SIO_func_port(if_type) != -1) {
|
|
|
|
outb(io | PC98SIO_func_port(if_type), 0xf2);
|
|
|
|
tmp = ttspeedtab(9600, if_8251_type[if_type].speedtab);
|
|
|
|
if (tmp != -1 && PC98SIO_baud_rate_port(if_type) != -1)
|
|
|
|
outb(io | PC98SIO_baud_rate_port(if_type), tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
iod->cmd = io | PC98SIO_cmd_port(if_type);
|
|
|
|
iod->sts = io | PC98SIO_sts_port(if_type);
|
|
|
|
iod->mod = io | PC98SIO_in_modem_port(if_type);
|
|
|
|
iod->ctrl = io | PC98SIO_intr_ctrl_port(if_type);
|
|
|
|
|
|
|
|
if (iod->if_type == COM_IF_INTERNAL) {
|
|
|
|
iod->irq = 4;
|
|
|
|
|
1999-12-06 00:23:38 +00:00
|
|
|
if (pc98_check_8251vfast()) {
|
|
|
|
PC98SIO_baud_rate_port(if_type) = I8251F_div;
|
1999-01-03 05:03:47 +00:00
|
|
|
if_8251_type[if_type].speedtab = pc98fast_speedtab;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tmp = inb( iod->mod ) & if_8251_type[if_type].irr_mask;
|
1999-04-18 14:42:20 +00:00
|
|
|
if ((isa_get_port(dev) & 0xff) == IO_COM2)
|
1999-01-03 05:03:47 +00:00
|
|
|
iod->irq = irq_tab[0][tmp];
|
|
|
|
else
|
|
|
|
iod->irq = irq_tab[1][tmp];
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
irr = if_16550a_type[if_type].irr_read;
|
|
|
|
#ifdef COM_MULTIPORT
|
1999-09-07 11:17:09 +00:00
|
|
|
if (!COM_ISMULTIPORT(device_get_flags(dev)) ||
|
|
|
|
device_get_unit(dev) == COM_MPMASTER(device_get_flags(dev)))
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif
|
1999-01-03 05:03:47 +00:00
|
|
|
if (irr != -1) {
|
|
|
|
tmp = inb(io | irr);
|
1999-04-18 14:42:20 +00:00
|
|
|
if (isa_get_port(dev) & 0x01) /* XXX depend on RSB-384 */
|
1999-01-03 05:03:47 +00:00
|
|
|
iod->irq = irq_tab[1][tmp >> 3];
|
|
|
|
else
|
|
|
|
iod->irq = irq_tab[0][tmp & 0x07];
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-01-03 05:03:47 +00:00
|
|
|
if ( iod->irq == -1 ) return -1;
|
1996-06-14 10:04:54 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2000-05-12 12:38:25 +00:00
|
|
|
static void
|
1999-12-06 00:23:38 +00:00
|
|
|
pc98_set_ioport(struct com_s *com)
|
1996-06-14 10:04:54 +00:00
|
|
|
{
|
1999-12-06 00:23:38 +00:00
|
|
|
int if_type = com->pc98_if_type & 0x0f;
|
2000-05-12 12:38:25 +00:00
|
|
|
Port_t io = rman_get_start(com->ioportres) & 0xff00;
|
|
|
|
|
|
|
|
pc98_check_sysclock();
|
|
|
|
com->data_port = io | PC98SIO_data_port(if_type);
|
|
|
|
com->cmd_port = io | PC98SIO_cmd_port(if_type);
|
|
|
|
com->sts_port = io | PC98SIO_sts_port(if_type);
|
|
|
|
com->in_modem_port = io | PC98SIO_in_modem_port(if_type);
|
|
|
|
com->intr_ctrl_port = io | PC98SIO_intr_ctrl_port(if_type);
|
1996-06-14 10:04:54 +00:00
|
|
|
}
|
1999-12-06 00:23:38 +00:00
|
|
|
static int
|
|
|
|
pc98_check_8251vfast(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
outb(I8251F_div, 0x8c);
|
|
|
|
DELAY(10);
|
|
|
|
for (i = 0; i < 100; i++) {
|
|
|
|
if ((inb(I8251F_div) & 0x80) != 0) {
|
|
|
|
i = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
DELAY(1);
|
|
|
|
}
|
|
|
|
outb(I8251F_div, 0);
|
|
|
|
DELAY(10);
|
|
|
|
for (; i < 100; i++) {
|
|
|
|
if ((inb(I8251F_div) & 0x80) == 0)
|
|
|
|
return 1;
|
|
|
|
DELAY(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static int
|
|
|
|
pc98_check_8251fifo(void)
|
|
|
|
{
|
|
|
|
u_char tmp1, tmp2;
|
|
|
|
|
|
|
|
tmp1 = inb(I8251F_iir);
|
|
|
|
DELAY(10);
|
|
|
|
tmp2 = inb(I8251F_iir);
|
|
|
|
if (((tmp1 ^ tmp2) & 0x40) != 0 && ((tmp1 | tmp2) & 0x20) == 0)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
1996-06-14 10:04:54 +00:00
|
|
|
#endif /* PC98 defined */
|