Add the ability to make real-mode BIOS calls from the kernel. Currently,

everything is contained inside #ifdef VM86, so this option must be
present in the config file to use this functionality.

Thanks to Tor Egge, these changes should work on SMP machines.  However,
it may not be throughly SMP-safe.

Currently, the only BIOS calls made are memory-sizing routines at bootup,
these replace reading the RTC values.
This commit is contained in:
Jonathan Lemon 1998-03-23 19:52:59 +00:00
parent 59088db3cb
commit 640c4313af
19 changed files with 986 additions and 113 deletions

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.46 1997/10/27 16:35:34 bde Exp $
* $Id: exception.s,v 1.47 1997/10/27 17:19:20 bde Exp $
*/
#include "npx.h"
@ -223,6 +223,10 @@ calltrap:
*/
#ifndef SMP
subl %eax,%eax
#endif
#ifdef VM86
cmpl $1,_in_vm86call
je 2f /* keep kernel cpl */
#endif
testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
jne 1f
@ -231,6 +235,7 @@ calltrap:
jne 1f
#endif /* VM86 */
2:
#ifdef SMP
ECPL_LOCK
#ifdef CPL_AND_CML
@ -362,6 +367,13 @@ ENTRY(fork_trampoline)
jmp _doreti
#ifdef VM86
/*
* Include vm86 call routines, which want to call _doreti.
*/
#include "i386/i386/vm86bios.s"
#endif /* VM86 */
/*
* Include what was once config+isa-dependent code.
* XXX it should be in a stand-alone file. It's still icu-dependent and

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.46 1997/10/27 16:35:34 bde Exp $
* $Id: exception.s,v 1.47 1997/10/27 17:19:20 bde Exp $
*/
#include "npx.h"
@ -223,6 +223,10 @@ calltrap:
*/
#ifndef SMP
subl %eax,%eax
#endif
#ifdef VM86
cmpl $1,_in_vm86call
je 2f /* keep kernel cpl */
#endif
testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
jne 1f
@ -231,6 +235,7 @@ calltrap:
jne 1f
#endif /* VM86 */
2:
#ifdef SMP
ECPL_LOCK
#ifdef CPL_AND_CML
@ -362,6 +367,13 @@ ENTRY(fork_trampoline)
jmp _doreti
#ifdef VM86
/*
* Include vm86 call routines, which want to call _doreti.
*/
#include "i386/i386/vm86bios.s"
#endif /* VM86 */
/*
* Include what was once config+isa-dependent code.
* XXX it should be in a stand-alone file. It's still icu-dependent and

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.103 1998/01/09 03:20:58 eivind Exp $
* $Id: locore.s,v 1.104 1998/01/31 02:53:41 eivind Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -185,6 +185,12 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */
_proc0paddr: .long 0 /* address of proc 0 address space */
p0upa: .long 0 /* phys addr of proc0's UPAGES */
#ifdef VM86
.globl _vm86paddr, _vm86pa
_vm86paddr: .long 0 /* address of vm86 region */
_vm86pa: .long 0 /* phys addr of vm86 region */
#endif
#ifdef BDE_DEBUGGER
.globl _bdb_exists /* flag to indicate BDE debugger is present */
_bdb_exists: .long 0
@ -828,6 +834,13 @@ over_symalloc:
addl $KERNBASE, %esi
movl %esi, R(_proc0paddr)
#ifdef VM86
ALLOCPAGES(4) /* IOPAGES + ext + stack */
movl %esi,R(_vm86pa)
addl $KERNBASE, %esi
movl %esi, R(_vm86paddr)
#endif /* VM86 */
#ifdef SMP
/* Allocate cpu0's private data page */
ALLOCPAGES(1)
@ -894,6 +907,25 @@ map_read_write:
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys($PG_RW)
#ifdef VM86
/* Map space for the vm86 region */
movl R(_vm86pa), %eax
movl $4, %ecx
fillkptphys($PG_RW)
/* Map page 0 into the vm86 page table */
movl $0, %eax
movl $0, %ebx
movl $1, %ecx
fillkpt(R(_vm86pa), $PG_RW|PG_U)
/* ...likewise for the ISA hole */
movl $ISA_HOLE_START, %eax
movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkpt(R(_vm86pa), $PG_RW|PG_U)
#endif /* VM86 */
#ifdef SMP
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
movl R(cpu0pp), %eax
@ -922,6 +954,25 @@ map_read_write:
movl $MPPTDI, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), $PG_RW)
/* Fakeup VA for the local apic to allow early traps. */
ALLOCPAGES(1)
movl %esi, %eax
movl $2, %ebx /* pte offset = 2 */
movl $1, %ecx /* one private pt coming right up */
fillkpt(R(cpu0pt), $PG_RW)
/* Initialize mp lock to allow early traps */
movl $1, R(_mp_lock)
/* Initialize curproc to &proc0 */
movl R(cpu0pp), %eax
movl $CNAME(proc0), 4(%eax)
/* Initialize my_idlePTD to IdlePTD */
movl R(_IdlePTD), %ecx
movl %ecx,32(%eax)
#endif /* SMP */
/* install a pde for temporary double map of bottom of VA */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.103 1998/01/09 03:20:58 eivind Exp $
* $Id: locore.s,v 1.104 1998/01/31 02:53:41 eivind Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -185,6 +185,12 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */
_proc0paddr: .long 0 /* address of proc 0 address space */
p0upa: .long 0 /* phys addr of proc0's UPAGES */
#ifdef VM86
.globl _vm86paddr, _vm86pa
_vm86paddr: .long 0 /* address of vm86 region */
_vm86pa: .long 0 /* phys addr of vm86 region */
#endif
#ifdef BDE_DEBUGGER
.globl _bdb_exists /* flag to indicate BDE debugger is present */
_bdb_exists: .long 0
@ -828,6 +834,13 @@ over_symalloc:
addl $KERNBASE, %esi
movl %esi, R(_proc0paddr)
#ifdef VM86
ALLOCPAGES(4) /* IOPAGES + ext + stack */
movl %esi,R(_vm86pa)
addl $KERNBASE, %esi
movl %esi, R(_vm86paddr)
#endif /* VM86 */
#ifdef SMP
/* Allocate cpu0's private data page */
ALLOCPAGES(1)
@ -894,6 +907,25 @@ map_read_write:
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys($PG_RW)
#ifdef VM86
/* Map space for the vm86 region */
movl R(_vm86pa), %eax
movl $4, %ecx
fillkptphys($PG_RW)
/* Map page 0 into the vm86 page table */
movl $0, %eax
movl $0, %ebx
movl $1, %ecx
fillkpt(R(_vm86pa), $PG_RW|PG_U)
/* ...likewise for the ISA hole */
movl $ISA_HOLE_START, %eax
movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkpt(R(_vm86pa), $PG_RW|PG_U)
#endif /* VM86 */
#ifdef SMP
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
movl R(cpu0pp), %eax
@ -922,6 +954,25 @@ map_read_write:
movl $MPPTDI, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), $PG_RW)
/* Fakeup VA for the local apic to allow early traps. */
ALLOCPAGES(1)
movl %esi, %eax
movl $2, %ebx /* pte offset = 2 */
movl $1, %ecx /* one private pt coming right up */
fillkpt(R(cpu0pt), $PG_RW)
/* Initialize mp lock to allow early traps */
movl $1, R(_mp_lock)
/* Initialize curproc to &proc0 */
movl R(cpu0pp), %eax
movl $CNAME(proc0), 4(%eax)
/* Initialize my_idlePTD to IdlePTD */
movl R(_IdlePTD), %ecx
movl %ecx,32(%eax)
#endif /* SMP */
/* install a pde for temporary double map of bottom of VA */

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.291 1998/03/05 19:37:03 tegge Exp $
* $Id: machdep.c,v 1.292 1998/03/07 20:16:47 tegge Exp $
*/
#include "apm.h"
@ -645,8 +645,6 @@ sigreturn(p, uap)
if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
trapsignal(p, SIGBUS, 0);
#define VM_USERCHANGE (PSL_USERCHANGE | PSL_RF)
#define VME_USERCHANGE (VM_USERCHANGE | PSL_VIP | PSL_VIF)
if (vm86->vm86_has_vme) {
eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
(eflags & VME_USERCHANGE) | PSL_VM;
@ -1239,11 +1237,43 @@ init386(first)
setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
initializecpu(); /* Initialize CPU registers */
/* make an initial tss so cpu can get interrupt stack on syscall! */
#ifdef VM86
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
#else
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
#endif /* VM86 */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
common_tss.tss_ioopt = (sizeof common_tss) << 16;
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
ltr(gsel_tss);
#ifdef VM86
private_tss = 0;
my_tr = GPROC0_SEL;
#endif
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_cr3 = (int)IdlePTD;
dblfault_tss.tss_eip = (int) dblfault_handler;
dblfault_tss.tss_eflags = PSL_KERNEL;
dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs =
dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
#ifdef VM86
initial_bioscalls(&biosbasemem, &biosextmem);
#else
/* Use BIOS values stored in RTC CMOS RAM, since probing
* breaks certain 386 AT relics.
*/
biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
#endif
/*
* If BIOS tells us that it has more than 640k in the basemem,
@ -1290,7 +1320,7 @@ init386(first)
* remain read-only and are unused by the kernel.
* The base memory area is below the physical end of
* the kernel and right now forms a read-only hole.
* The part of it from 0 to
* The part of it from PAGE_SIZE to
* (trunc_page(biosbasemem * 1024) - 1) will be
* remapped and used by the kernel later.)
*
@ -1505,33 +1535,6 @@ init386(first)
avail_end + off, VM_PROT_ALL, TRUE);
msgbufmapped = 1;
/* make an initial tss so cpu can get interrupt stack on syscall! */
#ifdef VM86
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
#else
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
#endif /* VM86 */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
common_tss.tss_ioopt = (sizeof common_tss) << 16;
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
ltr(gsel_tss);
#ifdef VM86
private_tss = 0;
my_tr = GPROC0_SEL;
#endif
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_cr3 = (int)IdlePTD;
dblfault_tss.tss_eip = (int) dblfault_handler;
dblfault_tss.tss_eflags = PSL_KERNEL;
dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs =
dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
/* make a call gate to reenter kernel with */
gdp = &ldt[LSYS5CALLS_SEL].gd;

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
* $Id: sys_machdep.c,v 1.32 1998/02/09 06:08:18 eivind Exp $
* $Id: sys_machdep.c,v 1.33 1998/02/13 05:25:37 bde Exp $
*
*/
@ -140,7 +140,7 @@ i386_extend_pcb(struct proc *p)
if (ext == 0)
return (ENOMEM);
p->p_addr->u_pcb.pcb_ext = ext;
bzero(&ext->ext_tss, sizeof(struct i386tss));
bzero(ext, sizeof(struct pcb_ext));
ext->ext_tss.tss_esp0 = (unsigned)p->p_addr + ctob(UPAGES) - 16;
ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
/*
@ -153,7 +153,6 @@ i386_extend_pcb(struct proc *p)
(offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
ext->ext_iomap = (caddr_t)ext + offset;
ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
ext->ext_vm86.vm86_inited = 0;
addr = (u_long *)ext->ext_vm86.vm86_intmap;
for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.121 1998/02/04 22:32:12 eivind Exp $
* $Id: trap.c,v 1.122 1998/02/06 12:13:10 eivind Exp $
*/
/*
@ -225,6 +225,35 @@ trap(frame)
type = frame.tf_trapno;
code = frame.tf_err;
#ifdef VM86
if (in_vm86call) {
if (frame.tf_eflags & PSL_VM &&
(type == T_PROTFLT || type == T_STKFLT)) {
i = vm86_emulate((struct vm86frame *)&frame);
if (i != 0)
/*
* returns to original process
*/
vm86_trap((struct vm86frame *)&frame);
return;
}
switch (type) {
/*
* these traps want either a process context, or
* assume a normal userspace trap.
*/
case T_PROTFLT:
case T_SEGNPFLT:
trap_fatal(&frame);
return;
case T_TRCTRAP:
type = T_BPTFLT; /* kernel breakpoint */
/* FALL THROUGH */
}
goto kernel_trap; /* normal kernel trap handling */
}
#endif
if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
/* user trap */
@ -356,6 +385,9 @@ trap(frame)
break;
}
} else {
#ifdef VM86
kernel_trap:
#endif
/* kernel trap */
switch (type) {

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.103 1998/03/14 03:02:15 tegge Exp $
* $Id: vm_machdep.c,v 1.104 1998/03/17 09:10:05 kato Exp $
*/
#include "npx.h"
@ -65,6 +65,10 @@
#ifdef SMP
#include <machine/smp.h>
#endif
#ifdef VM86
#include <machine/pcb_ext.h>
#include <machine/vm86.h>
#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -599,7 +603,11 @@ cpu_fork(p1, p2)
* syscall. This copies the user mode register values.
*/
p2->p_md.md_regs = (struct trapframe *)
#ifdef VM86
((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
#else
((int)p2->p_addr + UPAGES * PAGE_SIZE) - 1;
#endif /* VM86 */
*p2->p_md.md_regs = *p1->p_md.md_regs;
/*

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.46 1997/10/27 16:35:34 bde Exp $
* $Id: exception.s,v 1.47 1997/10/27 17:19:20 bde Exp $
*/
#include "npx.h"
@ -223,6 +223,10 @@ calltrap:
*/
#ifndef SMP
subl %eax,%eax
#endif
#ifdef VM86
cmpl $1,_in_vm86call
je 2f /* keep kernel cpl */
#endif
testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
jne 1f
@ -231,6 +235,7 @@ calltrap:
jne 1f
#endif /* VM86 */
2:
#ifdef SMP
ECPL_LOCK
#ifdef CPL_AND_CML
@ -362,6 +367,13 @@ ENTRY(fork_trampoline)
jmp _doreti
#ifdef VM86
/*
* Include vm86 call routines, which want to call _doreti.
*/
#include "i386/i386/vm86bios.s"
#endif /* VM86 */
/*
* Include what was once config+isa-dependent code.
* XXX it should be in a stand-alone file. It's still icu-dependent and

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.103 1998/01/09 03:20:58 eivind Exp $
* $Id: locore.s,v 1.104 1998/01/31 02:53:41 eivind Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -185,6 +185,12 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */
_proc0paddr: .long 0 /* address of proc 0 address space */
p0upa: .long 0 /* phys addr of proc0's UPAGES */
#ifdef VM86
.globl _vm86paddr, _vm86pa
_vm86paddr: .long 0 /* address of vm86 region */
_vm86pa: .long 0 /* phys addr of vm86 region */
#endif
#ifdef BDE_DEBUGGER
.globl _bdb_exists /* flag to indicate BDE debugger is present */
_bdb_exists: .long 0
@ -828,6 +834,13 @@ over_symalloc:
addl $KERNBASE, %esi
movl %esi, R(_proc0paddr)
#ifdef VM86
ALLOCPAGES(4) /* IOPAGES + ext + stack */
movl %esi,R(_vm86pa)
addl $KERNBASE, %esi
movl %esi, R(_vm86paddr)
#endif /* VM86 */
#ifdef SMP
/* Allocate cpu0's private data page */
ALLOCPAGES(1)
@ -894,6 +907,25 @@ map_read_write:
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys($PG_RW)
#ifdef VM86
/* Map space for the vm86 region */
movl R(_vm86pa), %eax
movl $4, %ecx
fillkptphys($PG_RW)
/* Map page 0 into the vm86 page table */
movl $0, %eax
movl $0, %ebx
movl $1, %ecx
fillkpt(R(_vm86pa), $PG_RW|PG_U)
/* ...likewise for the ISA hole */
movl $ISA_HOLE_START, %eax
movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkpt(R(_vm86pa), $PG_RW|PG_U)
#endif /* VM86 */
#ifdef SMP
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */
movl R(cpu0pp), %eax
@ -922,6 +954,25 @@ map_read_write:
movl $MPPTDI, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), $PG_RW)
/* Fakeup VA for the local apic to allow early traps. */
ALLOCPAGES(1)
movl %esi, %eax
movl $2, %ebx /* pte offset = 2 */
movl $1, %ecx /* one private pt coming right up */
fillkpt(R(cpu0pt), $PG_RW)
/* Initialize mp lock to allow early traps */
movl $1, R(_mp_lock)
/* Initialize curproc to &proc0 */
movl R(cpu0pp), %eax
movl $CNAME(proc0), 4(%eax)
/* Initialize my_idlePTD to IdlePTD */
movl R(_IdlePTD), %ecx
movl %ecx,32(%eax)
#endif /* SMP */
/* install a pde for temporary double map of bottom of VA */

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.291 1998/03/05 19:37:03 tegge Exp $
* $Id: machdep.c,v 1.292 1998/03/07 20:16:47 tegge Exp $
*/
#include "apm.h"
@ -645,8 +645,6 @@ sigreturn(p, uap)
if ((eflags & PSL_VIP) && (eflags & PSL_VIF))
trapsignal(p, SIGBUS, 0);
#define VM_USERCHANGE (PSL_USERCHANGE | PSL_RF)
#define VME_USERCHANGE (VM_USERCHANGE | PSL_VIP | PSL_VIF)
if (vm86->vm86_has_vme) {
eflags = (tf->tf_eflags & ~VME_USERCHANGE) |
(eflags & VME_USERCHANGE) | PSL_VM;
@ -1239,11 +1237,43 @@ init386(first)
setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL));
initializecpu(); /* Initialize CPU registers */
/* make an initial tss so cpu can get interrupt stack on syscall! */
#ifdef VM86
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
#else
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
#endif /* VM86 */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
common_tss.tss_ioopt = (sizeof common_tss) << 16;
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
ltr(gsel_tss);
#ifdef VM86
private_tss = 0;
my_tr = GPROC0_SEL;
#endif
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_cr3 = (int)IdlePTD;
dblfault_tss.tss_eip = (int) dblfault_handler;
dblfault_tss.tss_eflags = PSL_KERNEL;
dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs =
dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
#ifdef VM86
initial_bioscalls(&biosbasemem, &biosextmem);
#else
/* Use BIOS values stored in RTC CMOS RAM, since probing
* breaks certain 386 AT relics.
*/
biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
#endif
/*
* If BIOS tells us that it has more than 640k in the basemem,
@ -1290,7 +1320,7 @@ init386(first)
* remain read-only and are unused by the kernel.
* The base memory area is below the physical end of
* the kernel and right now forms a read-only hole.
* The part of it from 0 to
* The part of it from PAGE_SIZE to
* (trunc_page(biosbasemem * 1024) - 1) will be
* remapped and used by the kernel later.)
*
@ -1505,33 +1535,6 @@ init386(first)
avail_end + off, VM_PROT_ALL, TRUE);
msgbufmapped = 1;
/* make an initial tss so cpu can get interrupt stack on syscall! */
#ifdef VM86
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
#else
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
#endif /* VM86 */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
common_tss.tss_ioopt = (sizeof common_tss) << 16;
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
ltr(gsel_tss);
#ifdef VM86
private_tss = 0;
my_tr = GPROC0_SEL;
#endif
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_cr3 = (int)IdlePTD;
dblfault_tss.tss_eip = (int) dblfault_handler;
dblfault_tss.tss_eflags = PSL_KERNEL;
dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs =
dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
/* make a call gate to reenter kernel with */
gdp = &ldt[LSYS5CALLS_SEL].gd;

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
* $Id: sys_machdep.c,v 1.32 1998/02/09 06:08:18 eivind Exp $
* $Id: sys_machdep.c,v 1.33 1998/02/13 05:25:37 bde Exp $
*
*/
@ -140,7 +140,7 @@ i386_extend_pcb(struct proc *p)
if (ext == 0)
return (ENOMEM);
p->p_addr->u_pcb.pcb_ext = ext;
bzero(&ext->ext_tss, sizeof(struct i386tss));
bzero(ext, sizeof(struct pcb_ext));
ext->ext_tss.tss_esp0 = (unsigned)p->p_addr + ctob(UPAGES) - 16;
ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
/*
@ -153,7 +153,6 @@ i386_extend_pcb(struct proc *p)
(offset - ((unsigned)&ext->ext_tss - (unsigned)ext)) << 16;
ext->ext_iomap = (caddr_t)ext + offset;
ext->ext_vm86.vm86_intmap = (caddr_t)ext + offset - 32;
ext->ext_vm86.vm86_inited = 0;
addr = (u_long *)ext->ext_vm86.vm86_intmap;
for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.121 1998/02/04 22:32:12 eivind Exp $
* $Id: trap.c,v 1.122 1998/02/06 12:13:10 eivind Exp $
*/
/*
@ -225,6 +225,35 @@ trap(frame)
type = frame.tf_trapno;
code = frame.tf_err;
#ifdef VM86
if (in_vm86call) {
if (frame.tf_eflags & PSL_VM &&
(type == T_PROTFLT || type == T_STKFLT)) {
i = vm86_emulate((struct vm86frame *)&frame);
if (i != 0)
/*
* returns to original process
*/
vm86_trap((struct vm86frame *)&frame);
return;
}
switch (type) {
/*
* these traps want either a process context, or
* assume a normal userspace trap.
*/
case T_PROTFLT:
case T_SEGNPFLT:
trap_fatal(&frame);
return;
case T_TRCTRAP:
type = T_BPTFLT; /* kernel breakpoint */
/* FALL THROUGH */
}
goto kernel_trap; /* normal kernel trap handling */
}
#endif
if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
/* user trap */
@ -356,6 +385,9 @@ trap(frame)
break;
}
} else {
#ifdef VM86
kernel_trap:
#endif
/* kernel trap */
switch (type) {

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: vm86.c,v 1.7 1998/02/04 22:32:12 eivind Exp $
* $Id: vm86.c,v 1.8 1998/02/06 12:13:11 eivind Exp $
*/
#include "opt_vm86.h"
@ -38,6 +38,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_param.h>
#include <sys/user.h>
@ -47,13 +48,23 @@
#include <machine/specialreg.h>
extern int i386_extend_pcb __P((struct proc *));
extern struct segment_descriptor common_tssd;
extern int vm86paddr, vm86pa;
extern struct pcb *vm86pcb;
extern int vm86_bioscall(struct vm86frame *);
extern void vm86_biosret(struct vm86frame *);
void vm86_prepcall(struct vm86frame);
#define HLT 0xf4
#define CLI 0xfa
#define STI 0xfb
#define PUSHF 0x9c
#define POPF 0x9d
#define INTn 0xcd
#define IRET 0xcf
#define CALLm 0xff
#define OPERAND_SIZE_PREFIX 0x66
#define ADDRESS_SIZE_PREFIX 0x67
#define PUSH_MASK ~(PSL_VM | PSL_RF | PSL_I)
@ -323,6 +334,303 @@ vm86_emulate(vmf)
return (SIGBUS);
}
static void
vm86_initialize(void)
{
int i, offset;
u_long *addr;
struct pcb *pcb;
struct pcb_ext *ext;
struct segment_descriptor sd;
struct soft_segment_descriptor ssd = {
0, /* segment base address (overwritten) */
ctob(IOPAGES + 1) - 1, /* length */
SDT_SYS386TSS, /* segment type */
0, /* priority level */
1, /* descriptor present */
0, 0,
0, /* default 32 size */
0 /* granularity */
};
/*
* Below is the memory layout that we use for the vm86 region.
*
* The last byte of the i/o map must be followed by an 0xff byte.
* We arbitrarily allocate 16 bytes here, to keep the starting
* address on a doubleword boundary.
*
* If a ~2K stack is enough for interrupt handling, then
* it may be possible to get the page count down to 3 pages.
*
* +--------+ +--------+
* | | |Page Tbl| 1M + 64K = 272 entries = 1088 bytes
* | | +--------+
* | page 0 |
* | | +--------+
* | | | stack |
* +--------+ +--------+
* +--------+ +--------+
* | | | PCB | size: ~240 bytes
* | | |PCB Ext | size: ~140 bytes (includes TSS)
* | | +--------+
* | page 1 |
* | | +--------+
* | | |int map |
* | | +--------+ <-- &(PAGE 1) - 16
* +--------+ | |
* | page 2 | | I/O |
* +--------+ | bitmap |
* | page 3 | | |
* +--------+ +--------+
*/
/*
* A rudimentary PCB must be installed, in order to get to the
* PCB extension area. We use the PCB area as a scratchpad for
* data storage, the layout of which is shown below.
*
* pcb_esi = new PTD entry 0
* pcb_ebp = pointer to frame on vm86 stack
* pcb_esp = stack frame pointer at time of switch
* pcb_ebx = va of vm86 page table
* pcb_eip = argument pointer to initial call
* pcb_fs = saved TSS descriptor, word 0
* pcb_gs = saved TSS descriptor, word 1
*/
pcb = (struct pcb *)(vm86paddr + PAGE_SIZE);
bzero(pcb, sizeof(struct pcb));
pcb->pcb_esi = vm86pa | PG_V | PG_RW | PG_U;
pcb->pcb_ebp = vm86paddr + PAGE_SIZE - sizeof(struct vm86frame);
pcb->pcb_ebx = vm86paddr;
ext = (struct pcb_ext *)((u_int)pcb + sizeof(struct pcb));
pcb->pcb_ext = ext;
bzero(ext, sizeof(struct pcb_ext));
ext->ext_tss.tss_esp0 = vm86paddr + PAGE_SIZE;
ext->ext_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
offset = PAGE_SIZE - 16;
ext->ext_tss.tss_ioopt =
(offset - ((u_int)&ext->ext_tss & PAGE_MASK)) << 16;
ext->ext_iomap = (caddr_t)(offset + ((u_int)&ext->ext_tss & PG_FRAME));
ext->ext_vm86.vm86_intmap = ext->ext_iomap - 32;
ext->ext_vm86.vm86_has_vme = (rcr4() & CR4_VME ? 1 : 0);
addr = (u_long *)ext->ext_vm86.vm86_intmap;
for (i = 0; i < (ctob(IOPAGES) + 32 + 16) / sizeof(u_long); i++)
*addr++ = 0;
ssd.ssd_base = (u_int)&ext->ext_tss;
ssd.ssd_limit -= ((u_int)&ext->ext_tss & PAGE_MASK);
ssdtosd(&ssd, &ext->ext_tssd);
vm86pcb = pcb;
}
void
initial_bioscalls(u_int *basemem, u_int *extmem)
{
int i, method;
struct vm86frame vmf;
u_int64_t highwat = 0;
struct {
u_int64_t base;
u_int64_t length;
u_int32_t type;
} smap;
bzero(&vmf, sizeof(struct vm86frame)); /* safety */
vm86_initialize();
vm86_intcall(0x12, &vmf);
*basemem = vmf.vmf_ax;
*extmem = 0;
/*
* if basemem != 640, map pages r/w into vm86 page table so
* that the bios can scribble on it.
*/
for (i = *basemem / 4; i < 160; i++) {
u_int *pte = (u_int *)vm86paddr;
pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
}
/*
* get memory map with INT 15:E820
*/
#define SMAPSIZ sizeof(smap)
#define SMAP_SIG 0x534D4150 /* 'SMAP' */
vmf.vmf_ebx = 0;
do {
vmf.vmf_eax = 0xE820;
vmf.vmf_edx = SMAP_SIG;
vmf.vmf_ecx = SMAPSIZ;
i = vm86_datacall(0x15, &vmf,
(char *)&smap, SMAPSIZ, &vmf.vmf_es, &vmf.vmf_di);
if (i || vmf.vmf_eax != SMAP_SIG)
break;
if (smap.type == 0x01 && smap.base >= highwat) {
*extmem += (smap.length / 1024);
highwat = smap.base + smap.length;
}
} while (vmf.vmf_ebx != 0);
if (*extmem != 0) {
if (*extmem > *basemem) {
*extmem -= *basemem;
method = 0xE820;
goto done;
}
printf("E820: extmem (%d) < basemem (%d)\n", *extmem, *basemem);
}
/*
* try memory map with INT 15:E801
*/
vmf.vmf_ax = 0xE801;
if (vm86_intcall(0x15, &vmf) == 0) {
*extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
method = 0xE801;
goto done;
}
vmf.vmf_ah = 0x88;
vm86_intcall(0x15, &vmf);
*extmem = vmf.vmf_ax;
method = 0x88;
done:
printf("BIOS basemem: %dK, extmem: %dK (from %p call)\n",
*basemem, *extmem, method);
#if 0
/* VESA setup -- ? */
vmf.vmf_ax = 0x4f02;
error = vm86_intcall(0x10, &vmf);
#endif
}
static void
vm86_initflags(struct vm86frame *vmf)
{
int eflags = vmf->vmf_eflags;
struct vm86_kernel *vm86 = &curpcb->pcb_ext->ext_vm86;
if (vm86->vm86_has_vme) {
eflags = (vmf->vmf_eflags & ~VME_USERCHANGE) |
(eflags & VME_USERCHANGE) | PSL_VM;
} else {
vm86->vm86_eflags = eflags; /* save VIF, VIP */
eflags = (vmf->vmf_eflags & ~VM_USERCHANGE) |
(eflags & VM_USERCHANGE) | PSL_VM;
}
vmf->vmf_eflags = eflags | PSL_VM;
}
/*
* called from vm86_bioscall, while in vm86 address space, to finalize setup.
*/
void
vm86_prepcall(struct vm86frame vmf)
{
u_long addr[] = { 0xA00, 0x1000 }; /* code, stack */
u_char intcall[] = {
CLI, INTn, 0x00, STI, HLT
};
if ((vmf.vmf_trapno & PAGE_MASK) <= 0xff) {
/* interrupt call requested */
intcall[2] = (u_char)(vmf.vmf_trapno & 0xff);
memcpy((void *)addr[0], (void *)intcall, sizeof(intcall));
vmf.vmf_ip = addr[0];
vmf.vmf_cs = 0;
}
vmf.vmf_sp = addr[1] - 2; /* keep aligned */
vmf.kernel_es = vmf.kernel_ds = 0;
vmf.vmf_ss = 0;
vmf.vmf_eflags = PSL_VIF | PSL_VM | PSL_USER;
vm86_initflags(&vmf);
}
/*
* vm86 trap handler; determines whether routine succeeded or not.
* Called while in vm86 space, returns to calling process.
*/
void
vm86_trap(struct vm86frame *vmf)
{
caddr_t addr;
/* "should not happen" */
if ((vmf->vmf_eflags & PSL_VM) == 0)
panic("vm86_trap called, but not in vm86 mode");
addr = MAKE_ADDR(vmf->vmf_cs, vmf->vmf_ip);
if (*(u_char *)addr == HLT)
vmf->vmf_trapno = vmf->vmf_eflags & PSL_C;
else
vmf->vmf_trapno = vmf->vmf_trapno << 16;
vm86_biosret(vmf);
}
int
vm86_intcall(int intnum, struct vm86frame *vmf)
{
if (intnum < 0 || intnum > 0xff)
return (EINVAL);
vmf->vmf_trapno = intnum;
return (vm86_bioscall(vmf));
}
/*
* buffer must be entirely contained in a wired down page in kernel memory,
* and is mapped into page 1 in vm86 space. segment/offset will be filled
* in to create a vm86 pointer to the buffer. If intnum is a valid
* interrupt number (0-255), then the "interrupt trampoline" will be
* used, otherwise we use the caller's cs:ip routine.
*
* a future revision may allow multiple pages to be mapped, or allow
* the caller to pass in a custom page table to use.
*/
int
vm86_datacall(intnum, vmf, buffer, buflen, segment, offset)
int intnum;
struct vm86frame *vmf;
char *buffer;
int buflen;
u_short *segment, *offset;
{
u_int page;
page = (u_int)buffer & PG_FRAME;
*offset = (u_int)buffer & PAGE_MASK;
if ((*offset + buflen) & PG_FRAME)
return (-1); /* XXX fixme! */
*segment = 0x100;
page = vtophys(page);
vmf->vmf_trapno = page | (intnum & PAGE_MASK);
return (vm86_bioscall(vmf));
}
#if 0
int
vm86_datacall(int intnum, u_int kpage, struct vm86frame *vmf)
{
if (kpage & PAGE_MASK)
return (EINVAL);
kpage = vtophys(kpage);
vmf->vmf_trapno = kpage | (intnum & PAGE_MASK);
return (vm86_bioscall(vmf));
}
#endif
int
vm86_sysarch(p, args)
struct proc *p;
@ -381,6 +689,19 @@ vm86_sysarch(p, args)
}
break;
#if 0
case VM86_INTCALL: {
struct vm86_intcall_args sa;
if (error = copyin(ua.sub_args, &sa, sizeof(sa)))
return (error);
if (error = vm86_intcall(sa.intnum, &sa.vmf))
return (error);
error = copyout(&sa, ua.sub_args, sizeof(sa));
}
break;
#endif
default:
error = EINVAL;
}

209
sys/i386/i386/vm86bios.s Normal file
View File

@ -0,0 +1,209 @@
/*-
* Copyright (c) 1998 Jonathan Lemon
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id$
*/
#include "opt_vm86.h"
#include <machine/asmacros.h> /* miscellaneous asm macros */
#include <machine/trap.h>
#include "assym.s"
.data
ALIGN_DATA
.globl _in_vm86call, _vm86pcb
_in_vm86call: .long 0
_vm86pcb: .long 0
.text
/*
* vm86_bioscall(struct trapframe_vm86 *vm86)
*/
ENTRY(vm86_bioscall)
movl _vm86pcb,%edx /* data area, see vm86.c for layout */
movl 4(%esp),%eax
movl %eax,PCB_EIP(%edx) /* save argument pointer */
pushl %ebx
pushl %ebp
pushl %esi
pushl %edi
pushl %fs
pushl %gs
#ifdef SMP
pushl %edx
ALIGN_LOCK /* Get global lock */
popl %edx
#endif
movl _curproc,%ecx
pushl %ecx /* save _curproc value */
testl %ecx,%ecx
je 1f /* no process to save */
#if NNPX > 0
cmpl %ecx,_npxproc /* do we need to save fp? */
jne 1f
movl P_ADDR(%ecx),%ecx
addl $PCB_SAVEFPU,%ecx
pushl %edx
call _npxsave
popl %edx /* recover our pcb */
#endif
1:
movl PCB_EBP(%edx),%ebx /* target frame location */
movl %ebx,%edi /* destination */
movl PCB_EIP(%edx),%esi /* source (set on entry) */
movl $21,%ecx /* sizeof(struct vm86frame)/4 */
cld
rep
movsl /* copy frame to new stack */
movl TF_TRAPNO(%ebx),%ebx
cmpl $256,%ebx
jb 1f /* no page frame to map */
andl $~PAGE_MASK,%ebx
#if 0
orl $PG_V|PG_RW|PG_U,%ebx /* XXX assembler error?? */
#endif
orl $0x7,%ebx
movl PCB_EBX(%edx),%eax /* va of vm86 page table */
movl %ebx,4(%eax) /* set vm86 PTE entry 1 */
1:
movl _curpcb,%eax
pushl %eax /* save curpcb */
movl %edx,_curpcb /* set curpcb to vm86pcb */
movl $0,_curproc /* erase curproc */
movl _my_tr,%esi
leal _gdt(,%esi,8),%ebx /* entry in GDT */
movl 0(%ebx),%eax
movl %eax,PCB_FS(%edx) /* save first word */
movl 4(%ebx),%eax
andl $~0x200, %eax /* flip 386BSY -> 386TSS */
movl %eax,PCB_GS(%edx) /* save second word */
movl PCB_EXT(%edx),%edi /* vm86 tssd entry */
movl 0(%edi),%eax
movl %eax,0(%ebx)
movl 4(%edi),%eax
movl %eax,4(%ebx)
shll $3,%esi /* GSEL(entry, SEL_KPL) */
ltr %si
movl %cr3,%eax
pushl %eax /* save address space */
#ifdef SMP
movl _my_idlePTD,%ecx
#else
movl _IdlePTD,%ecx
#endif
movl %ecx,%ebx
addl $KERNBASE,%ebx /* va of Idle PTD */
movl 0(%ebx),%eax
pushl %eax /* old ptde != 0 when booting */
pushl %ebx /* keep for reuse */
movl %esp,PCB_ESP(%edx) /* save current stack location */
movl PCB_ESI(%edx),%eax /* mapping for vm86 page table */
movl %eax,0(%ebx) /* ... install as PTD entry 0 */
movl %ecx,%cr3 /* new page tables */
movl PCB_EBP(%edx),%esp /* switch to new stack */
call _vm86_prepcall /* finish setup */
movl $1,_in_vm86call /* set flag for trap() */
/*
* Return via _doreti
*/
#ifdef SMP
ECPL_LOCK
#ifdef CPL_AND_CML
#error Not ready for CPL_AND_CML
#endif
pushl _cpl /* cpl to restore */
ECPL_UNLOCK
#else
pushl _cpl /* cpl to restore */
#endif
subl $4,%esp /* dummy unit */
MPLOCKED incb _intr_nesting_level
MEXITCOUNT
jmp _doreti
/*
* vm86_biosret(struct trapframe_vm86 *vm86)
*/
ENTRY(vm86_biosret)
movl _vm86pcb,%edx /* data area */
movl 4(%esp),%esi /* source */
movl PCB_EIP(%edx),%edi /* destination */
movl $21,%ecx /* size */
cld
rep
movsl /* copy frame to original frame */
movl PCB_ESP(%edx),%esp /* back to old stack */
popl %ebx /* saved va of Idle PTD */
popl %eax
movl %eax,0(%ebx) /* restore old pte */
popl %eax
movl %eax,%cr3 /* install old page table */
movl $0,_in_vm86call /* reset trapflag */
movl PCB_EBX(%edx),%ebx /* va of vm86 page table */
movl $0,4(%ebx) /* ...clear entry 1 */
movl _my_tr,%esi
leal _gdt(,%esi,8),%ebx /* entry in GDT */
movl PCB_FS(%edx),%eax
movl %eax,0(%ebx) /* restore first word */
movl PCB_GS(%edx),%eax
movl %eax,4(%ebx) /* restore second word */
shll $3,%esi /* GSEL(entry, SEL_KPL) */
ltr %si
popl _curpcb /* restore curpcb/curproc */
popl _curproc
movl TF_TRAPNO(%edx),%eax /* return (trapno) */
popl %gs
popl %fs
popl %edi
popl %esi
popl %ebp
popl %ebx
ret /* back to our normal program */

View File

@ -38,7 +38,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.103 1998/03/14 03:02:15 tegge Exp $
* $Id: vm_machdep.c,v 1.104 1998/03/17 09:10:05 kato Exp $
*/
#include "npx.h"
@ -65,6 +65,10 @@
#ifdef SMP
#include <machine/smp.h>
#endif
#ifdef VM86
#include <machine/pcb_ext.h>
#include <machine/vm86.h>
#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -599,7 +603,11 @@ cpu_fork(p1, p2)
* syscall. This copies the user mode register values.
*/
p2->p_md.md_regs = (struct trapframe *)
#ifdef VM86
((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
#else
((int)p2->p_addr + UPAGES * PAGE_SIZE) - 1;
#endif /* VM86 */
*p2->p_md.md_regs = *p1->p_md.md_regs;
/*

View File

@ -26,39 +26,12 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: vm86.h,v 1.3 1997/08/28 14:36:56 jlemon Exp $
* $Id: vm86.h,v 1.4 1997/11/20 18:43:55 bde Exp $
*/
#ifndef _MACHINE_VM86_H_
#define _MACHINE_VM86_H_ 1
struct vm86_kernel {
caddr_t vm86_intmap; /* interrupt map */
u_long vm86_eflags; /* emulated flags */
int vm86_has_vme; /* VME support */
int vm86_inited; /* we were initialized */
int vm86_debug;
};
struct i386_vm86_args {
int sub_op; /* sub-operation to perform */
char *sub_args; /* args */
};
#define VM86_INIT 1
#define VM86_SET_VME 2
#define VM86_GET_VME 3
struct vm86_init_args {
int debug; /* debug flag */
int cpu_type; /* cpu type to emulate */
u_char int_map[32]; /* interrupt map */
};
struct vm86_vme_args {
int state; /* status */
};
/* standard register representation */
typedef union {
u_long r_ex;
@ -76,8 +49,8 @@ typedef union {
/* layout must match definition of struct trapframe_vm86 in <machine/frame.h> */
struct vm86frame {
int :32; /* kernel ES */
int :32; /* kernel DS */
int kernel_es;
int kernel_ds;
reg86_t edi;
reg86_t esi;
reg86_t ebp;
@ -86,8 +59,8 @@ struct vm86frame {
reg86_t edx;
reg86_t ecx;
reg86_t eax;
int :32; /* trapno */
int :32; /* err */
int vmf_trapno;
int vmf_err;
reg86_t eip;
reg86_t cs;
reg86_t eflags;
@ -97,7 +70,21 @@ struct vm86frame {
reg86_t ds;
reg86_t fs;
reg86_t gs;
#define vmf_ah eax.r_b.r_h
#define vmf_al eax.r_b.r_l
#define vmf_ax eax.r_w.r_x
#define vmf_eax eax.r_ex
#define vmf_bx ebx.r_w.r_x
#define vmf_ebx ebx.r_ex
#define vmf_cx ecx.r_w.r_x
#define vmf_ecx ecx.r_ex
#define vmf_dx edx.r_w.r_x
#define vmf_edx edx.r_ex
#define vmf_si esi.r_w.r_x
#define vmf_di edi.r_w.r_x
#define vmf_cs cs.r_w.r_x
#define vmf_ds ds.r_w.r_x
#define vmf_es es.r_w.r_x
#define vmf_ss ss.r_w.r_x
#define vmf_sp esp.r_w.r_x
#define vmf_ip eip.r_w.r_x
@ -105,8 +92,52 @@ struct vm86frame {
#define vmf_eflags eflags.r_ex
};
#define VM_USERCHANGE (PSL_USERCHANGE | PSL_RF)
#define VME_USERCHANGE (VM_USERCHANGE | PSL_VIP | PSL_VIF)
struct vm86_kernel {
caddr_t vm86_intmap; /* interrupt map */
u_long vm86_eflags; /* emulated flags */
int vm86_has_vme; /* VME support */
int vm86_inited; /* we were initialized */
int vm86_debug;
caddr_t vm86_sproc; /* address of sproc */
};
struct i386_vm86_args {
int sub_op; /* sub-operation to perform */
char *sub_args; /* args */
};
#define VM86_INIT 1
#define VM86_SET_VME 2
#define VM86_GET_VME 3
#define VM86_INTCALL 4
struct vm86_init_args {
int debug; /* debug flag */
int cpu_type; /* cpu type to emulate */
u_char int_map[32]; /* interrupt map */
};
struct vm86_vme_args {
int state; /* status */
};
struct vm86_intcall_args {
int intnum;
struct vm86frame vmf;
};
extern int in_vm86call;
struct proc;
extern int vm86_emulate __P((struct vm86frame *));
extern int vm86_sysarch __P((struct proc *, char *));
extern void vm86_trap __P((struct vm86frame *));
extern int vm86_intcall __P((int, struct vm86frame *));
extern int vm86_datacall __P((int, struct vm86frame *, char *, int,
u_short *, u_short *));
extern void initial_bioscalls __P((u_int *, u_int *));
#endif /* _MACHINE_VM86_H_ */

View File

@ -36,7 +36,7 @@
*
* @(#)ipl.s
*
* $Id: ipl.s,v 1.19 1997/12/15 02:18:35 tegge Exp $
* $Id: ipl.s,v 1.20 1998/03/03 22:56:29 tegge Exp $
*/
@ -159,6 +159,8 @@ doreti_exit:
/* XXX CPL_AND_CML needs work */
#error not ready for vm86
#endif
cmpl $1,_in_vm86call
je 1f /* want cpl == SWI_AST_PENDING */
/*
* XXX
* Sometimes when attempting to return to vm86 mode, cpl is not
@ -342,6 +344,10 @@ doreti_swi:
ALIGN_TEXT
swi_ast:
addl $8,%esp /* discard raddr & cpl to get trap frame */
#ifdef VM86
cmpl $1,_in_vm86call
je 1f /* stay in kernel mode */
#endif
testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp)
je swi_ast_phantom
swi_ast_user:
@ -364,6 +370,7 @@ swi_ast_phantom:
*/
testl $PSL_VM,TF_EFLAGS(%esp)
jne swi_ast_user
1:
#endif /* VM86 */
/*
* These happen when there is an interrupt in a trap handler before

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.121 1998/02/04 22:32:12 eivind Exp $
* $Id: trap.c,v 1.122 1998/02/06 12:13:10 eivind Exp $
*/
/*
@ -225,6 +225,35 @@ trap(frame)
type = frame.tf_trapno;
code = frame.tf_err;
#ifdef VM86
if (in_vm86call) {
if (frame.tf_eflags & PSL_VM &&
(type == T_PROTFLT || type == T_STKFLT)) {
i = vm86_emulate((struct vm86frame *)&frame);
if (i != 0)
/*
* returns to original process
*/
vm86_trap((struct vm86frame *)&frame);
return;
}
switch (type) {
/*
* these traps want either a process context, or
* assume a normal userspace trap.
*/
case T_PROTFLT:
case T_SEGNPFLT:
trap_fatal(&frame);
return;
case T_TRCTRAP:
type = T_BPTFLT; /* kernel breakpoint */
/* FALL THROUGH */
}
goto kernel_trap; /* normal kernel trap handling */
}
#endif
if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
/* user trap */
@ -356,6 +385,9 @@ trap(frame)
break;
}
} else {
#ifdef VM86
kernel_trap:
#endif
/* kernel trap */
switch (type) {