Unifdef VM86.

Reviewed by:	silence on on -current
This commit is contained in:
jlemon 1999-06-01 18:20:36 +00:00
parent d4d2c87363
commit b5d4171ff6
50 changed files with 812 additions and 977 deletions

View File

@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.37 1999/04/28 01:04:12 luoqi Exp $
* $Id: apic_vector.s,v 1.38 1999/05/28 14:08:57 bde Exp $
*/
@ -628,10 +628,8 @@ _Xcpucheckstate:
andl $3, %eax
cmpl $3, %eax
je 1f
#ifdef VM86
testl $PSL_VM, 24(%esp)
jne 1f
#endif
incl %ebx /* system or interrupt */
#ifdef CPL_AND_CML
cmpl $0, _inside_intr

View File

@ -33,12 +33,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: swtch.s,v 1.80 1999/05/06 09:44:49 bde Exp $
* $Id: swtch.s,v 1.81 1999/05/12 21:38:45 luoqi Exp $
*/
#include "npx.h"
#include "opt_user_ldt.h"
#include "opt_vm86.h"
#include <sys/rtprio.h>
@ -277,7 +276,6 @@ _idle:
/* update common_tss.tss_esp0 pointer */
movl %ecx, _common_tss + TSS_ESP0
#ifdef VM86
movl _cpuid, %esi
btrl %esi, _private_tss
jae 1f
@ -294,7 +292,6 @@ _idle:
movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
ltr %si
1:
#endif /* VM86 */
sti
@ -397,7 +394,6 @@ idle_loop:
/* update common_tss.tss_esp0 pointer */
movl %esp, _common_tss + TSS_ESP0
#ifdef VM86
movl $0, %esi
btrl %esi, _private_tss
jae 1f
@ -413,7 +409,6 @@ idle_loop:
movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
ltr %si
1:
#endif /* VM86 */
sti
@ -630,7 +625,6 @@ swtch_com:
movl %ebx,%cr3
4:
#ifdef VM86
#ifdef SMP
movl _cpuid, %esi
#else
@ -642,18 +636,12 @@ swtch_com:
movl PCB_EXT(%edx), %edi /* new tss descriptor */
jmp 2f
1:
#endif
/* update common_tss.tss_esp0 pointer */
movl %edx, %ebx /* pcb */
#ifdef VM86
addl $(UPAGES * PAGE_SIZE - 16), %ebx
#else
addl $(UPAGES * PAGE_SIZE), %ebx
#endif /* VM86 */
movl %ebx, _common_tss + TSS_ESP0
#ifdef VM86
btrl %esi, _private_tss
jae 3f
#ifdef SMP
@ -672,7 +660,6 @@ swtch_com:
movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
ltr %si
3:
#endif /* VM86 */
movl P_VMSPACE(%ecx), %ebx
#ifdef SMP
movl _cpuid, %eax

View File

@ -30,11 +30,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.59 1999/04/28 01:03:18 luoqi Exp $
* $Id: exception.s,v 1.60 1999/05/06 09:44:49 bde Exp $
*/
#include "npx.h"
#include "opt_vm86.h"
#include <machine/asmacros.h>
#include <machine/ipl.h>
@ -362,12 +361,10 @@ ENTRY(fork_trampoline)
jmp _doreti
#ifdef VM86
/*
* Include vm86 call routines, which want to call _doreti.
*/
#include "i386/i386/vm86bios.s"
#endif /* VM86 */
/*
* Include what was once config+isa-dependent code.

View File

@ -30,11 +30,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.59 1999/04/28 01:03:18 luoqi Exp $
* $Id: exception.s,v 1.60 1999/05/06 09:44:49 bde Exp $
*/
#include "npx.h"
#include "opt_vm86.h"
#include <machine/asmacros.h>
#include <machine/ipl.h>
@ -362,12 +361,10 @@ ENTRY(fork_trampoline)
jmp _doreti
#ifdef VM86
/*
* Include vm86 call routines, which want to call _doreti.
*/
#include "i386/i386/vm86bios.s"
#endif /* VM86 */
/*
* Include what was once config+isa-dependent code.

View File

@ -34,10 +34,9 @@
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
* $Id: genassym.c,v 1.68 1999/05/12 21:30:49 luoqi Exp $
* $Id: genassym.c,v 1.69 1999/05/12 21:38:40 luoqi Exp $
*/
#include "opt_vm86.h"
#include "opt_user_ldt.h"
#include <stddef.h>
@ -68,9 +67,7 @@
#endif
#include <machine/segments.h>
#include <machine/globaldata.h>
#ifdef VM86
#include <machine/vm86.h>
#endif
#define OS(s, m) ((u_int)offsetof(struct s, m))
@ -128,9 +125,7 @@ main()
printf("#define\tTSS_ESP0 %#x\n", OS(i386tss, tss_esp0));
printf("#define\tPCB_USERLDT %#x\n", OS(pcb, pcb_ldt));
printf("#define\tPCB_GS %#x\n", OS(pcb, pcb_gs));
#ifdef VM86
printf("#define\tPCB_EXT %#x\n", OS(pcb, pcb_ext));
#endif
#ifdef SMP
printf("#define\tPCB_MPNEST %#x\n", OS(pcb, pcb_mpnest));
#endif
@ -201,10 +196,8 @@ main()
printf("#define\tGD_COMMON_TSS %#x\n", OS(globaldata, gd_common_tss));
printf("#define\tGD_SWITCHTIME %#x\n", OS(globaldata, gd_switchtime));
printf("#define\tGD_SWITCHTICKS %#x\n", OS(globaldata, gd_switchticks));
#ifdef VM86
printf("#define\tGD_COMMON_TSSD %#x\n", OS(globaldata, gd_common_tssd));
printf("#define\tGD_TSS_GDT %#x\n", OS(globaldata, gd_tss_gdt));
#endif
#ifdef USER_LDT
printf("#define\tGD_CURRENTLDT %#x\n", OS(globaldata, gd_currentldt));
#endif
@ -233,9 +226,7 @@ main()
printf("#define\tKPSEL %#x\n", GSEL(GPRIV_SEL, SEL_KPL));
#endif
printf("#define\tGPROC0_SEL %#x\n", GPROC0_SEL);
#ifdef VM86
printf("#define\tVM86_FRAMESIZE %#x\n", sizeof(struct vm86frame));
#endif
return (0);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.121 1999/04/28 01:03:20 luoqi Exp $
* $Id: locore.s,v 1.122 1999/05/09 19:01:49 peter Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -48,7 +48,6 @@
#include "opt_ddb.h"
#include "opt_nfsroot.h"
#include "opt_userconfig.h"
#include "opt_vm86.h"
#include <sys/syscall.h>
#include <sys/reboot.h>
@ -134,13 +133,11 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */
_proc0paddr: .long 0 /* address of proc 0 address space */
p0upa: .long 0 /* phys addr of proc0's UPAGES */
#ifdef VM86
vm86phystk: .long 0 /* PA of vm86/bios stack */
.globl _vm86paddr, _vm86pa
_vm86paddr: .long 0 /* address of vm86 region */
_vm86pa: .long 0 /* phys addr of vm86 region */
#endif
#ifdef BDE_DEBUGGER
.globl _bdb_exists /* flag to indicate BDE debugger is present */
@ -311,18 +308,10 @@ NON_GPROF_ENTRY(btext)
stosb
#if NAPM > 0
#ifndef VM86
/*
* XXX it's not clear that APM can live in the current environonment.
* Only pc-relative addressing works.
*/
call _apm_setup
#endif
#endif
call create_pagetables
#ifdef VM86
/*
* If the CPU has support for VME, turn it on.
*/
@ -332,7 +321,6 @@ NON_GPROF_ENTRY(btext)
orl $CR4_VME, %eax
movl %eax, %cr4
1:
#endif /* VM86 */
#ifdef BDE_DEBUGGER
/*
@ -786,7 +774,6 @@ no_kernend:
addl $KERNBASE, %esi
movl %esi, R(_proc0paddr)
#ifdef VM86
ALLOCPAGES(1) /* vm86/bios stack */
movl %esi,R(vm86phystk)
@ -794,7 +781,6 @@ no_kernend:
movl %esi,R(_vm86pa)
addl $KERNBASE, %esi
movl %esi, R(_vm86paddr)
#endif /* VM86 */
#ifdef SMP
/* Allocate cpu0's private data page */
@ -862,7 +848,6 @@ map_read_write:
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys($PG_RW)
#ifdef VM86
/* Map space for the vm86 region */
movl R(vm86phystk), %eax
movl $4, %ecx
@ -879,7 +864,6 @@ map_read_write:
movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkpt(R(_vm86pa), $PG_RW|PG_U)
#endif /* VM86 */
#ifdef SMP
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.121 1999/04/28 01:03:20 luoqi Exp $
* $Id: locore.s,v 1.122 1999/05/09 19:01:49 peter Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -48,7 +48,6 @@
#include "opt_ddb.h"
#include "opt_nfsroot.h"
#include "opt_userconfig.h"
#include "opt_vm86.h"
#include <sys/syscall.h>
#include <sys/reboot.h>
@ -134,13 +133,11 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */
_proc0paddr: .long 0 /* address of proc 0 address space */
p0upa: .long 0 /* phys addr of proc0's UPAGES */
#ifdef VM86
vm86phystk: .long 0 /* PA of vm86/bios stack */
.globl _vm86paddr, _vm86pa
_vm86paddr: .long 0 /* address of vm86 region */
_vm86pa: .long 0 /* phys addr of vm86 region */
#endif
#ifdef BDE_DEBUGGER
.globl _bdb_exists /* flag to indicate BDE debugger is present */
@ -311,18 +308,10 @@ NON_GPROF_ENTRY(btext)
stosb
#if NAPM > 0
#ifndef VM86
/*
* XXX it's not clear that APM can live in the current environonment.
* Only pc-relative addressing works.
*/
call _apm_setup
#endif
#endif
call create_pagetables
#ifdef VM86
/*
* If the CPU has support for VME, turn it on.
*/
@ -332,7 +321,6 @@ NON_GPROF_ENTRY(btext)
orl $CR4_VME, %eax
movl %eax, %cr4
1:
#endif /* VM86 */
#ifdef BDE_DEBUGGER
/*
@ -786,7 +774,6 @@ no_kernend:
addl $KERNBASE, %esi
movl %esi, R(_proc0paddr)
#ifdef VM86
ALLOCPAGES(1) /* vm86/bios stack */
movl %esi,R(vm86phystk)
@ -794,7 +781,6 @@ no_kernend:
movl %esi,R(_vm86pa)
addl $KERNBASE, %esi
movl %esi, R(_vm86paddr)
#endif /* VM86 */
#ifdef SMP
/* Allocate cpu0's private data page */
@ -862,7 +848,6 @@ map_read_write:
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys($PG_RW)
#ifdef VM86
/* Map space for the vm86 region */
movl R(vm86phystk), %eax
movl $4, %ecx
@ -879,7 +864,6 @@ map_read_write:
movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkpt(R(_vm86pa), $PG_RW|PG_U)
#endif /* VM86 */
#ifdef SMP
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.335 1999/05/12 21:38:42 luoqi Exp $
* $Id: machdep.c,v 1.336 1999/05/31 18:35:53 dfr Exp $
*/
#include "apm.h"
@ -53,7 +53,6 @@
#include "opt_sysvipc.h"
#include "opt_user_ldt.h"
#include "opt_userconfig.h"
#include "opt_vm86.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -124,9 +123,8 @@
#include <i386/isa/isa_device.h>
#endif
#include <i386/isa/intr_machdep.h>
#ifndef VM86
#include <isa/rtc.h>
#endif
#include <machine/vm86.h>
#include <machine/random.h>
#include <sys/ptrace.h>
@ -568,7 +566,6 @@ sendsig(catcher, sig, mask, code)
sf.sf_sc.sc_trapno = regs->tf_trapno;
sf.sf_sc.sc_err = regs->tf_err;
#ifdef VM86
/*
* If we're a vm86 process, we want to save the segment registers.
* We also change eflags to be our emulated eflags, not the actual
@ -600,7 +597,6 @@ sendsig(catcher, sig, mask, code)
*/
tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP);
}
#endif /* VM86 */
/*
* Copy the sigframe out to the user's stack.
@ -657,7 +653,6 @@ sigreturn(p, uap)
return(EFAULT);
eflags = scp->sc_ps;
#ifdef VM86
if (eflags & PSL_VM) {
struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
struct vm86_kernel *vm86;
@ -691,7 +686,6 @@ sigreturn(p, uap)
tf->tf_es = _udatasel;
tf->tf_fs = _udatasel;
} else {
#endif /* VM86 */
/*
* Don't allow users to change privileged or reserved flags.
*/
@ -729,9 +723,7 @@ sigreturn(p, uap)
regs->tf_ds = scp->sc_ds;
regs->tf_es = scp->sc_es;
regs->tf_fs = scp->sc_fs;
#ifdef VM86
}
#endif
/* restore scratch registers */
regs->tf_eax = scp->sc_eax;
@ -902,12 +894,10 @@ union descriptor ldt[NLDT]; /* local descriptor table */
struct region_descriptor r_gdt, r_idt;
#endif
#ifdef VM86
#ifndef SMP
extern struct segment_descriptor common_tssd, *tss_gdt;
#endif
int private_tss; /* flag indicating private tss */
#endif /* VM86 */
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
struct gate_descriptor *t_idt;
@ -1138,26 +1128,375 @@ sdtossd(sd, ssd)
ssd->ssd_gran = sd->sd_gran;
}
#define PHYSMAP_SIZE (2 * 8)
static void
getmemsize(int first)
{
int i, physmap_idx, pa_indx;
u_int basemem, extmem;
int speculative_mprobe = FALSE;
struct vm86frame vmf;
struct vm86context vmc;
vm_offset_t pa, physmap[PHYSMAP_SIZE];
pt_entry_t pte;
struct {
u_int64_t base;
u_int64_t length;
u_int32_t type;
} *smap;
#if NNPX > 0
int msize;
#endif
bzero(&vmf, sizeof(struct vm86frame));
bzero(physmap, sizeof(physmap));
vm86_intcall(0x12, &vmf);
basemem = vmf.vmf_ax;
if (basemem > 640) {
printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
basemem);
basemem = 640;
}
/*
* XXX if biosbasemem is now < 640, there is `hole'
* between the end of base memory and the start of
* ISA memory. The hole may be empty or it may
* contain BIOS code or data. Map it read/write so
* that the BIOS can write to it. (Memory from 0 to
* the physical end of the kernel is mapped read-only
* to begin with and then parts of it are remapped.
* The parts that aren't remapped form holes that
* remain read-only and are unused by the kernel.
* The base memory area is below the physical end of
* the kernel and right now forms a read-only hole.
* The part of it from PAGE_SIZE to
* (trunc_page(biosbasemem * 1024) - 1) will be
* remapped and used by the kernel later.)
*
* This code is similar to the code used in
* pmap_mapdev, but since no memory needs to be
* allocated we simply change the mapping.
*/
for (pa = trunc_page(basemem * 1024);
pa < ISA_HOLE_START; pa += PAGE_SIZE) {
pte = (pt_entry_t)vtopte(pa + KERNBASE);
*pte = pa | PG_RW | PG_V;
}
/*
* if basemem != 640, map pages r/w into vm86 page table so
* that the bios can scribble on it.
*/
pte = (pt_entry_t)vm86paddr;
for (i = basemem / 4; i < 160; i++)
pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
/*
* map page 1 R/W into the kernel page table so we can use it
* as a buffer. The kernel will unmap this page later.
*/
pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT));
*pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
/*
* get memory map with INT 15:E820
*/
#define SMAPSIZ sizeof(*smap)
#define SMAP_SIG 0x534D4150 /* 'SMAP' */
vmc.npages = 0;
smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
physmap_idx = 0;
vmf.vmf_ebx = 0;
do {
vmf.vmf_eax = 0xE820;
vmf.vmf_edx = SMAP_SIG;
vmf.vmf_ecx = SMAPSIZ;
i = vm86_datacall(0x15, &vmf, &vmc);
if (i || vmf.vmf_eax != SMAP_SIG)
break;
if (boothowto & RB_VERBOSE)
printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n",
smap->type,
*(u_int32_t *)((char *)&smap->base + 4),
(u_int32_t)smap->base,
*(u_int32_t *)((char *)&smap->length + 4),
(u_int32_t)smap->length);
if (smap->type != 0x01)
goto next_run;
if (smap->length == 0)
goto next_run;
for (i = 0; i <= physmap_idx; i += 2) {
if (smap->base < physmap[i + 1]) {
if (boothowto & RB_VERBOSE)
printf(
"Overlapping or non-montonic memory region, ignoring second region\n");
goto next_run;
}
}
if (smap->base == physmap[physmap_idx + 1]) {
physmap[physmap_idx + 1] += smap->length;
goto next_run;
}
physmap_idx += 2;
if (physmap_idx == PHYSMAP_SIZE) {
printf(
"Too many segments in the physical address map, giving up\n");
break;
}
physmap[physmap_idx] = smap->base;
physmap[physmap_idx + 1] = smap->base + smap->length;
next_run:
} while (vmf.vmf_ebx != 0);
if (physmap[1] != 0)
goto physmap_done;
/*
* try memory map with INT 15:E801
*/
vmf.vmf_ax = 0xE801;
if (vm86_intcall(0x15, &vmf) == 0) {
extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
} else {
#if 0
vmf.vmf_ah = 0x88;
vm86_intcall(0x15, &vmf);
extmem = vmf.vmf_ax;
#else
/*
* Prefer the RTC value for extended memory.
*/
extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
#endif
}
/*
* Only perform calculations in this section if there is no system
* map; any system new enough that supports SMAP probably does not
* need these workarounds.
*/
/*
* Special hack for chipsets that still remap the 384k hole when
* there's 16MB of memory - this really confuses people that
* are trying to use bus mastering ISA controllers with the
* "16MB limit"; they only have 16MB, but the remapping puts
* them beyond the limit.
*/
/*
* If extended memory is between 15-16MB (16-17MB phys address range),
* chop it to 15MB.
*/
if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
extmem = 15 * 1024;
physmap[0] = 0;
physmap[1] = basemem * 1024;
physmap_idx = 2;
physmap[physmap_idx] = 0x100000;
physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
/*
* Indicate that we wish to do a speculative search for memory
* beyond the end of the reported size if the indicated amount
* is 64M (or more).
*
* XXX we should only do this in the RTC / 0x88 case
*/
if (extmem >= 16 * 1024)
speculative_mprobe = TRUE;
physmap_done:
/*
* Now, physmap contains a map of physical memory.
*/
#ifdef SMP
/* make hole for AP bootstrap code */
physmap[1] = mp_bootaddress(physmap[1] / 1024);
#endif
/*
* Maxmem isn't the "maximum memory", it's one larger than the
* highest page of the physical address space. It should be
* called something like "Maxphyspage".
*/
Maxmem = physmap[physmap_idx + 1] / PAGE_SIZE;
/*
* If a specific amount of memory is indicated via the MAXMEM
* option or the npx0 "msize", then don't do the speculative
* memory probe.
*/
#ifdef MAXMEM
Maxmem = MAXMEM / 4;
speculative_mprobe = FALSE;
#endif
#if NNPX > 0
if (resource_int_value("npx", 0, "msize", &msize) == 0) {
if (msize != 0) {
Maxmem = msize / 4;
speculative_mprobe = FALSE;
}
}
#endif
/* XXX former point of mp_probe() and pmap_bootstrap() */
/*
* Size up each available chunk of physical memory.
*/
physmap[0] = PAGE_SIZE; /* mask off page 0 */
pa_indx = 0;
phys_avail[pa_indx++] = physmap[0];
phys_avail[pa_indx] = physmap[0];
pte = (pt_entry_t)vtopte(KERNBASE);
*pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
/*
* physmap is in bytes, so when converting to page boundaries,
* round up the start address and round down the end address.
*/
for (i = 0; i <= physmap_idx; i += 2) {
int end;
end = ptoa(Maxmem);
if (physmap[i + 1] < end)
end = trunc_page(physmap[i + 1]);
for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
int tmp, page_bad;
int *ptr = 0;
/*
* block out kernel memory as not available.
*/
if (pa >= 0x100000 && pa < first)
continue;
page_bad = FALSE;
/*
* map page into kernel: valid, read/write,non-cacheable
*/
*pte = pa | PG_V | PG_RW | PG_N;
invltlb();
tmp = *(int *)ptr;
/*
* Test for alternating 1's and 0's
*/
*(volatile int *)ptr = 0xaaaaaaaa;
if (*(volatile int *)ptr != 0xaaaaaaaa) {
page_bad = TRUE;
}
/*
* Test for alternating 0's and 1's
*/
*(volatile int *)ptr = 0x55555555;
if (*(volatile int *)ptr != 0x55555555) {
page_bad = TRUE;
}
/*
* Test for all 1's
*/
*(volatile int *)ptr = 0xffffffff;
if (*(volatile int *)ptr != 0xffffffff) {
page_bad = TRUE;
}
/*
* Test for all 0's
*/
*(volatile int *)ptr = 0x0;
if (*(volatile int *)ptr != 0x0) {
page_bad = TRUE;
}
/*
* Restore original value.
*/
*(int *)ptr = tmp;
/*
* Adjust array of valid/good pages.
*/
if (page_bad == TRUE) {
continue;
}
/*
* If this good page is a continuation of the
* previous set of good pages, then just increase
* the end pointer. Otherwise start a new chunk.
* Note that "end" points one higher than end,
* making the range >= start and < end.
* If we're also doing a speculative memory
* test and we at or past the end, bump up Maxmem
* so that we keep going. The first bad page
* will terminate the loop.
*/
if (phys_avail[pa_indx] == pa) {
phys_avail[pa_indx] += PAGE_SIZE;
if (speculative_mprobe == TRUE &&
phys_avail[pa_indx] >= (64*1024*1024))
end += PAGE_SIZE;
} else {
pa_indx++;
if (pa_indx == PHYS_AVAIL_ARRAY_END) {
printf("Too many holes in the physical address space, giving up\n");
pa_indx--;
break;
}
phys_avail[pa_indx++] = pa; /* start */
phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
}
physmem++;
}
}
*pte = 0;
invltlb();
/*
* XXX
* The last chunk must contain at least one page plus the message
* buffer to avoid complicating other code (message buffer address
* calculation, etc.).
*/
while (phys_avail[pa_indx - 1] + PAGE_SIZE +
round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
phys_avail[pa_indx--] = 0;
phys_avail[pa_indx--] = 0;
}
Maxmem = atop(phys_avail[pa_indx]);
/* Trim off space for the message buffer. */
phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
avail_end = phys_avail[pa_indx];
}
void
init386(first)
int first;
{
int x;
unsigned biosbasemem, biosextmem;
struct gate_descriptor *gdp;
int gsel_tss;
#if NNPX > 0
int msize;
#endif
#ifndef SMP
/* table descriptors - used to load tables by microp */
struct region_descriptor r_gdt, r_idt;
#endif
int pagesinbase, pagesinext;
vm_offset_t target_page;
int pa_indx, off;
int speculative_mprobe;
int off;
/*
* Prevent lowering of the ipl if we call tsleep() early.
@ -1286,20 +1625,14 @@ init386(first)
initializecpu(); /* Initialize CPU registers */
/* make an initial tss so cpu can get interrupt stack on syscall! */
#ifdef VM86
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
#else
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
#endif /* VM86 */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
common_tss.tss_ioopt = (sizeof common_tss) << 16;
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
ltr(gsel_tss);
#ifdef VM86
private_tss = 0;
tss_gdt = &gdt[GPROC0_SEL].sd;
common_tssd = *tss_gdt;
#endif
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
@ -1314,138 +1647,8 @@ init386(first)
dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
#ifdef VM86
initial_bioscalls(&biosbasemem, &biosextmem);
#else
/* Use BIOS values stored in RTC CMOS RAM, since probing
* breaks certain 386 AT relics.
*/
biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
#endif
/*
* If BIOS tells us that it has more than 640k in the basemem,
* don't believe it - set it to 640k.
*/
if (biosbasemem > 640) {
printf("Preposterous RTC basemem of %uK, truncating to 640K\n",
biosbasemem);
biosbasemem = 640;
}
if (bootinfo.bi_memsizes_valid && bootinfo.bi_basemem > 640) {
printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
bootinfo.bi_basemem);
bootinfo.bi_basemem = 640;
}
/*
* Warn if the official BIOS interface disagrees with the RTC
* interface used above about the amount of base memory or the
* amount of extended memory. Prefer the BIOS value for the base
* memory. This is necessary for machines that `steal' base
* memory for use as BIOS memory, at least if we are going to use
* the BIOS for apm. Prefer the RTC value for extended memory.
* Eventually the hackish interface shouldn't even be looked at.
*/
if (bootinfo.bi_memsizes_valid) {
if (bootinfo.bi_basemem != biosbasemem) {
vm_offset_t pa;
printf(
"BIOS basemem (%uK) != RTC basemem (%uK), setting to BIOS value\n",
bootinfo.bi_basemem, biosbasemem);
biosbasemem = bootinfo.bi_basemem;
/*
* XXX if biosbasemem is now < 640, there is `hole'
* between the end of base memory and the start of
* ISA memory. The hole may be empty or it may
* contain BIOS code or data. Map it read/write so
* that the BIOS can write to it. (Memory from 0 to
* the physical end of the kernel is mapped read-only
* to begin with and then parts of it are remapped.
* The parts that aren't remapped form holes that
* remain read-only and are unused by the kernel.
* The base memory area is below the physical end of
* the kernel and right now forms a read-only hole.
* The part of it from PAGE_SIZE to
* (trunc_page(biosbasemem * 1024) - 1) will be
* remapped and used by the kernel later.)
*
* This code is similar to the code used in
* pmap_mapdev, but since no memory needs to be
* allocated we simply change the mapping.
*/
for (pa = trunc_page(biosbasemem * 1024);
pa < ISA_HOLE_START; pa += PAGE_SIZE) {
unsigned *pte;
pte = (unsigned *)vtopte(pa + KERNBASE);
*pte = pa | PG_RW | PG_V;
}
}
if (bootinfo.bi_extmem != biosextmem)
printf("BIOS extmem (%uK) != RTC extmem (%uK)\n",
bootinfo.bi_extmem, biosextmem);
}
#ifdef SMP
/* make hole for AP bootstrap code */
pagesinbase = mp_bootaddress(biosbasemem) / PAGE_SIZE;
#else
pagesinbase = biosbasemem * 1024 / PAGE_SIZE;
#endif
pagesinext = biosextmem * 1024 / PAGE_SIZE;
/*
* Special hack for chipsets that still remap the 384k hole when
* there's 16MB of memory - this really confuses people that
* are trying to use bus mastering ISA controllers with the
* "16MB limit"; they only have 16MB, but the remapping puts
* them beyond the limit.
*/
/*
* If extended memory is between 15-16MB (16-17MB phys address range),
* chop it to 15MB.
*/
if ((pagesinext > 3840) && (pagesinext < 4096))
pagesinext = 3840;
/*
* Maxmem isn't the "maximum memory", it's one larger than the
* highest page of the physical address space. It should be
* called something like "Maxphyspage".
*/
Maxmem = pagesinext + 0x100000/PAGE_SIZE;
/*
* Indicate that we wish to do a speculative search for memory beyond
* the end of the reported size if the indicated amount is 64MB (0x4000
* pages) - which is the largest amount that the BIOS/bootblocks can
* currently report. If a specific amount of memory is indicated via
* the MAXMEM option or the npx0 "msize", then don't do the speculative
* memory probe.
*/
if (Maxmem >= 0x4000)
speculative_mprobe = TRUE;
else
speculative_mprobe = FALSE;
#ifdef MAXMEM
Maxmem = MAXMEM/4;
speculative_mprobe = FALSE;
#endif
#if NNPX > 0
if (resource_int_value("npx", 0, "msize", &msize) == 0) {
if (msize != 0) {
Maxmem = msize / 4;
speculative_mprobe = FALSE;
}
}
#endif
vm86_initialize();
getmemsize(first);
#ifdef SMP
/* look for the MP hardware - needed for apic addresses */
@ -1453,130 +1656,7 @@ init386(first)
#endif
/* call pmap initialization to make new kernel address space */
pmap_bootstrap (first, 0);
/*
* Size up each available chunk of physical memory.
*/
/*
* We currently don't bother testing base memory.
* XXX ...but we probably should.
*/
pa_indx = 0;
if (pagesinbase > 1) {
phys_avail[pa_indx++] = PAGE_SIZE; /* skip first page of memory */
phys_avail[pa_indx] = ptoa(pagesinbase);/* memory up to the ISA hole */
physmem = pagesinbase - 1;
} else {
/* point at first chunk end */
pa_indx++;
}
for (target_page = avail_start; target_page < ptoa(Maxmem); target_page += PAGE_SIZE) {
int tmp, page_bad;
page_bad = FALSE;
/*
* map page into kernel: valid, read/write, non-cacheable
*/
*(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page;
invltlb();
tmp = *(int *)CADDR1;
/*
* Test for alternating 1's and 0's
*/
*(volatile int *)CADDR1 = 0xaaaaaaaa;
if (*(volatile int *)CADDR1 != 0xaaaaaaaa) {
page_bad = TRUE;
}
/*
* Test for alternating 0's and 1's
*/
*(volatile int *)CADDR1 = 0x55555555;
if (*(volatile int *)CADDR1 != 0x55555555) {
page_bad = TRUE;
}
/*
* Test for all 1's
*/
*(volatile int *)CADDR1 = 0xffffffff;
if (*(volatile int *)CADDR1 != 0xffffffff) {
page_bad = TRUE;
}
/*
* Test for all 0's
*/
*(volatile int *)CADDR1 = 0x0;
if (*(volatile int *)CADDR1 != 0x0) {
/*
* test of page failed
*/
page_bad = TRUE;
}
/*
* Restore original value.
*/
*(int *)CADDR1 = tmp;
/*
* Adjust array of valid/good pages.
*/
if (page_bad == FALSE) {
/*
* If this good page is a continuation of the
* previous set of good pages, then just increase
* the end pointer. Otherwise start a new chunk.
* Note that "end" points one higher than end,
* making the range >= start and < end.
* If we're also doing a speculative memory
* test and we at or past the end, bump up Maxmem
* so that we keep going. The first bad page
* will terminate the loop.
*/
if (phys_avail[pa_indx] == target_page) {
phys_avail[pa_indx] += PAGE_SIZE;
if (speculative_mprobe == TRUE &&
phys_avail[pa_indx] >= (64*1024*1024))
Maxmem++;
} else {
pa_indx++;
if (pa_indx == PHYS_AVAIL_ARRAY_END) {
printf("Too many holes in the physical address space, giving up\n");
pa_indx--;
break;
}
phys_avail[pa_indx++] = target_page; /* start */
phys_avail[pa_indx] = target_page + PAGE_SIZE; /* end */
}
physmem++;
}
}
*(int *)CMAP1 = 0;
invltlb();
/*
* XXX
* The last chunk must contain at least one page plus the message
* buffer to avoid complicating other code (message buffer address
* calculation, etc.).
*/
while (phys_avail[pa_indx - 1] + PAGE_SIZE +
round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
phys_avail[pa_indx--] = 0;
phys_avail[pa_indx--] = 0;
}
Maxmem = atop(phys_avail[pa_indx]);
/* Trim off space for the message buffer. */
phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
avail_end = phys_avail[pa_indx];
pmap_bootstrap(first, 0);
/* now running on new page tables, configured,and u/iom is accessible */
@ -1614,9 +1694,7 @@ init386(first)
#ifdef SMP
proc0.p_addr->u_pcb.pcb_mpnest = 1;
#endif
#ifdef VM86
proc0.p_addr->u_pcb.pcb_ext = 0;
#endif
/* Sigh, relocate physical addresses left from bootstrap */
if (bootinfo.bi_modulep) {

View File

@ -22,11 +22,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $
* $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $
*/
#include "opt_smp.h"
#include "opt_vm86.h"
#include "opt_cpu.h"
#include "opt_user_ldt.h"
@ -482,10 +481,8 @@ init_secondary(void)
common_tss.tss_esp0 = 0; /* not used until after switch */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
common_tss.tss_ioopt = (sizeof common_tss) << 16;
#ifdef VM86
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
common_tssd = *tss_gdt;
#endif
ltr(gsel_tss);
load_cr0(0x8005003b); /* XXX! */

View File

@ -31,11 +31,9 @@
* mpboot.s: FreeBSD machine support for the Intel MP Spec
* multiprocessor systems.
*
* $Id: mpboot.s,v 1.9 1999/04/10 22:58:29 tegge Exp $
* $Id: mpboot.s,v 1.10 1999/04/28 01:03:22 luoqi Exp $
*/
#include "opt_vm86.h"
#include <machine/asmacros.h> /* miscellaneous asm macros */
#include <machine/apic.h>
#include <machine/specialreg.h>
@ -94,7 +92,6 @@ mp_begin: /* now running relocated at KERNBASE */
call _init_secondary /* load i386 tables */
CHECKPOINT(0x38, 5)
#ifdef VM86
/*
* If the [BSP] CPU has support for VME, turn it on.
*/
@ -104,7 +101,6 @@ mp_begin: /* now running relocated at KERNBASE */
orl $CR4_VME, %eax
movl %eax, %cr4
1:
#endif
/* disable the APIC, just to be SURE */
movl lapic_svr, %eax /* get spurious vector reg. */

View File

@ -22,11 +22,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $
* $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $
*/
#include "opt_smp.h"
#include "opt_vm86.h"
#include "opt_cpu.h"
#include "opt_user_ldt.h"
@ -482,10 +481,8 @@ init_secondary(void)
common_tss.tss_esp0 = 0; /* not used until after switch */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
common_tss.tss_ioopt = (sizeof common_tss) << 16;
#ifdef VM86
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
common_tssd = *tss_gdt;
#endif
ltr(gsel_tss);
load_cr0(0x8005003b); /* XXX! */

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.235 1999/05/18 06:01:49 alc Exp $
* $Id: pmap.c,v 1.236 1999/05/28 05:38:56 alc Exp $
*/
/*
@ -71,7 +71,6 @@
#include "opt_disable_pse.h"
#include "opt_pmap.h"
#include "opt_msgbuf.h"
#include "opt_vm86.h"
#include "opt_user_ldt.h"
#include <sys/param.h>

View File

@ -33,12 +33,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: swtch.s,v 1.80 1999/05/06 09:44:49 bde Exp $
* $Id: swtch.s,v 1.81 1999/05/12 21:38:45 luoqi Exp $
*/
#include "npx.h"
#include "opt_user_ldt.h"
#include "opt_vm86.h"
#include <sys/rtprio.h>
@ -277,7 +276,6 @@ _idle:
/* update common_tss.tss_esp0 pointer */
movl %ecx, _common_tss + TSS_ESP0
#ifdef VM86
movl _cpuid, %esi
btrl %esi, _private_tss
jae 1f
@ -294,7 +292,6 @@ _idle:
movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
ltr %si
1:
#endif /* VM86 */
sti
@ -397,7 +394,6 @@ idle_loop:
/* update common_tss.tss_esp0 pointer */
movl %esp, _common_tss + TSS_ESP0
#ifdef VM86
movl $0, %esi
btrl %esi, _private_tss
jae 1f
@ -413,7 +409,6 @@ idle_loop:
movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
ltr %si
1:
#endif /* VM86 */
sti
@ -630,7 +625,6 @@ swtch_com:
movl %ebx,%cr3
4:
#ifdef VM86
#ifdef SMP
movl _cpuid, %esi
#else
@ -642,18 +636,12 @@ swtch_com:
movl PCB_EXT(%edx), %edi /* new tss descriptor */
jmp 2f
1:
#endif
/* update common_tss.tss_esp0 pointer */
movl %edx, %ebx /* pcb */
#ifdef VM86
addl $(UPAGES * PAGE_SIZE - 16), %ebx
#else
addl $(UPAGES * PAGE_SIZE), %ebx
#endif /* VM86 */
movl %ebx, _common_tss + TSS_ESP0
#ifdef VM86
btrl %esi, _private_tss
jae 3f
#ifdef SMP
@ -672,7 +660,6 @@ swtch_com:
movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
ltr %si
3:
#endif /* VM86 */
movl P_VMSPACE(%ecx), %ebx
#ifdef SMP
movl _cpuid, %eax

View File

@ -31,12 +31,11 @@
* SUCH DAMAGE.
*
* from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
* $Id: sys_machdep.c,v 1.40 1999/04/27 11:14:33 phk Exp $
* $Id: sys_machdep.c,v 1.41 1999/04/28 01:03:25 luoqi Exp $
*
*/
#include "opt_user_ldt.h"
#include "opt_vm86.h"
#include "opt_smp.h"
#include <sys/param.h>
@ -71,11 +70,9 @@ void set_user_ldt __P((struct pcb *pcb));
static int i386_get_ldt __P((struct proc *, char *));
static int i386_set_ldt __P((struct proc *, char *));
#endif
#ifdef VM86
static int i386_get_ioperm __P((struct proc *, char *));
static int i386_set_ioperm __P((struct proc *, char *));
int i386_extend_pcb __P((struct proc *));
#endif
#ifndef _SYS_SYSPROTO_H_
struct sysarch_args {
@ -101,7 +98,6 @@ sysarch(p, uap)
error = i386_set_ldt(p, uap->parms);
break;
#endif
#ifdef VM86
case I386_GET_IOPERM:
error = i386_get_ioperm(p, uap->parms);
break;
@ -111,7 +107,6 @@ sysarch(p, uap)
case I386_VM86:
error = vm86_sysarch(p, uap->parms);
break;
#endif
default:
error = EINVAL;
break;
@ -119,7 +114,6 @@ sysarch(p, uap)
return (error);
}
#ifdef VM86
int
i386_extend_pcb(struct proc *p)
{
@ -251,7 +245,6 @@ i386_get_ioperm(p, args)
error = copyout(&ua, args, sizeof(struct i386_ioperm_args));
return (error);
}
#endif /* VM86 */
#ifdef USER_LDT
/*

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.136 1999/04/28 01:03:26 luoqi Exp $
* $Id: trap.c,v 1.137 1999/05/06 18:12:17 peter Exp $
*/
/*
@ -47,7 +47,6 @@
#include "opt_ktrace.h"
#include "opt_clock.h"
#include "opt_trap.h"
#include "opt_vm86.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -90,9 +89,7 @@
#include <machine/clock.h>
#endif
#ifdef VM86
#include <machine/vm86.h>
#endif
#ifdef DDB
extern int in_Debugger, debugger_on_panic;
@ -266,7 +263,6 @@ trap(frame)
type = frame.tf_trapno;
code = frame.tf_err;
#ifdef VM86
if (in_vm86call) {
if (frame.tf_eflags & PSL_VM &&
(type == T_PROTFLT || type == T_STKFLT)) {
@ -293,7 +289,6 @@ trap(frame)
}
goto kernel_trap; /* normal kernel trap handling */
}
#endif
if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
/* user trap */
@ -335,14 +330,12 @@ trap(frame)
*/
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
#ifdef VM86
if (frame.tf_eflags & PSL_VM) {
i = vm86_emulate((struct vm86frame *)&frame);
if (i == 0)
goto out;
break;
}
#endif /* VM86 */
/* FALL THROUGH */
case T_SEGNPFLT: /* segment not present fault */
@ -426,9 +419,7 @@ trap(frame)
break;
}
} else {
#ifdef VM86
kernel_trap:
#endif
/* kernel trap */
switch (type) {

View File

@ -38,12 +38,11 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.120 1999/02/19 14:25:33 luoqi Exp $
* $Id: vm_machdep.c,v 1.121 1999/04/19 14:14:13 peter Exp $
*/
#include "npx.h"
#include "opt_user_ldt.h"
#include "opt_vm86.h"
#ifdef PC98
#include "opt_pc98.h"
#endif
@ -64,10 +63,8 @@
#ifdef SMP
#include <machine/smp.h>
#endif
#ifdef VM86
#include <machine/pcb_ext.h>
#include <machine/vm86.h>
#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -133,11 +130,7 @@ cpu_fork(p1, p2)
* syscall. This copies the user mode register values.
*/
p2->p_md.md_regs = (struct trapframe *)
#ifdef VM86
((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
#else
((int)p2->p_addr + UPAGES * PAGE_SIZE) - 1;
#endif /* VM86 */
*p2->p_md.md_regs = *p1->p_md.md_regs;
/*
@ -162,12 +155,10 @@ cpu_fork(p1, p2)
#ifdef SMP
pcb2->pcb_mpnest = 1;
#endif
#ifdef VM86
/*
* XXX don't copy the i/o pages. this should probably be fixed.
*/
pcb2->pcb_ext = 0;
#endif
#ifdef USER_LDT
/* Copy the LDT, if necessary. */
@ -216,14 +207,11 @@ void
cpu_exit(p)
register struct proc *p;
{
#if defined(USER_LDT) || defined(VM86)
struct pcb *pcb = &p->p_addr->u_pcb;
#endif
#if NNPX > 0
npxexit(p);
#endif /* NNPX */
#ifdef VM86
if (pcb->pcb_ext != 0) {
/*
* XXX do we need to move the TSS off the allocated pages
@ -233,7 +221,6 @@ cpu_exit(p)
ctob(IOPAGES + 1));
pcb->pcb_ext = 0;
}
#endif
#ifdef USER_LDT
if (pcb->pcb_ldt != 0) {
if (pcb == curpcb) {

View File

@ -22,11 +22,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $
* $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $
*/
#include "opt_smp.h"
#include "opt_vm86.h"
#include "opt_cpu.h"
#include "opt_user_ldt.h"
@ -482,10 +481,8 @@ init_secondary(void)
common_tss.tss_esp0 = 0; /* not used until after switch */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
common_tss.tss_ioopt = (sizeof common_tss) << 16;
#ifdef VM86
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
common_tssd = *tss_gdt;
#endif
ltr(gsel_tss);
load_cr0(0x8005003b); /* XXX! */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)pcb.h 5.10 (Berkeley) 5/12/91
* $Id: pcb.h,v 1.26 1998/02/03 21:27:50 bde Exp $
* $Id: pcb.h,v 1.27 1999/04/28 01:04:05 luoqi Exp $
*/
#ifndef _I386_PCB_H_
@ -66,11 +66,7 @@ struct pcb {
u_long pcb_mpnest_dontuse;
#endif
int pcb_gs;
#ifdef VM86
struct pcb_ext *pcb_ext; /* optional pcb extension */
#else
struct pcb_ext *pcb_ext_dontuse;
#endif
u_long __pcb_spare[2]; /* adjust to avoid core dump size changes */
};

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: globaldata.h,v 1.8 1999/04/28 01:04:00 luoqi Exp $
* $Id: globaldata.h,v 1.9 1999/05/12 21:39:00 luoqi Exp $
*/
/*
@ -46,10 +46,8 @@ struct globaldata {
struct timeval gd_switchtime;
struct i386tss gd_common_tss;
int gd_switchticks;
#ifdef VM86
struct segment_descriptor gd_common_tssd;
struct segment_descriptor *gd_tss_gdt;
#endif
#ifdef USER_LDT
int gd_currentldt;
#endif

View File

@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.37 1999/04/28 01:04:12 luoqi Exp $
* $Id: apic_vector.s,v 1.38 1999/05/28 14:08:57 bde Exp $
*/
@ -628,10 +628,8 @@ _Xcpucheckstate:
andl $3, %eax
cmpl $3, %eax
je 1f
#ifdef VM86
testl $PSL_VM, 24(%esp)
jne 1f
#endif
incl %ebx /* system or interrupt */
#ifdef CPL_AND_CML
cmpl $0, _inside_intr

View File

@ -30,11 +30,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: exception.s,v 1.59 1999/04/28 01:03:18 luoqi Exp $
* $Id: exception.s,v 1.60 1999/05/06 09:44:49 bde Exp $
*/
#include "npx.h"
#include "opt_vm86.h"
#include <machine/asmacros.h>
#include <machine/ipl.h>
@ -362,12 +361,10 @@ ENTRY(fork_trampoline)
jmp _doreti
#ifdef VM86
/*
* Include vm86 call routines, which want to call _doreti.
*/
#include "i386/i386/vm86bios.s"
#endif /* VM86 */
/*
* Include what was once config+isa-dependent code.

View File

@ -34,10 +34,9 @@
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
* $Id: genassym.c,v 1.68 1999/05/12 21:30:49 luoqi Exp $
* $Id: genassym.c,v 1.69 1999/05/12 21:38:40 luoqi Exp $
*/
#include "opt_vm86.h"
#include "opt_user_ldt.h"
#include <stddef.h>
@ -68,9 +67,7 @@
#endif
#include <machine/segments.h>
#include <machine/globaldata.h>
#ifdef VM86
#include <machine/vm86.h>
#endif
#define OS(s, m) ((u_int)offsetof(struct s, m))
@ -128,9 +125,7 @@ main()
printf("#define\tTSS_ESP0 %#x\n", OS(i386tss, tss_esp0));
printf("#define\tPCB_USERLDT %#x\n", OS(pcb, pcb_ldt));
printf("#define\tPCB_GS %#x\n", OS(pcb, pcb_gs));
#ifdef VM86
printf("#define\tPCB_EXT %#x\n", OS(pcb, pcb_ext));
#endif
#ifdef SMP
printf("#define\tPCB_MPNEST %#x\n", OS(pcb, pcb_mpnest));
#endif
@ -201,10 +196,8 @@ main()
printf("#define\tGD_COMMON_TSS %#x\n", OS(globaldata, gd_common_tss));
printf("#define\tGD_SWITCHTIME %#x\n", OS(globaldata, gd_switchtime));
printf("#define\tGD_SWITCHTICKS %#x\n", OS(globaldata, gd_switchticks));
#ifdef VM86
printf("#define\tGD_COMMON_TSSD %#x\n", OS(globaldata, gd_common_tssd));
printf("#define\tGD_TSS_GDT %#x\n", OS(globaldata, gd_tss_gdt));
#endif
#ifdef USER_LDT
printf("#define\tGD_CURRENTLDT %#x\n", OS(globaldata, gd_currentldt));
#endif
@ -233,9 +226,7 @@ main()
printf("#define\tKPSEL %#x\n", GSEL(GPRIV_SEL, SEL_KPL));
#endif
printf("#define\tGPROC0_SEL %#x\n", GPROC0_SEL);
#ifdef VM86
printf("#define\tVM86_FRAMESIZE %#x\n", sizeof(struct vm86frame));
#endif
return (0);
}

View File

@ -23,10 +23,9 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: globals.s,v 1.10 1999/04/28 01:03:19 luoqi Exp $
* $Id: globals.s,v 1.11 1999/05/12 21:38:41 luoqi Exp $
*/
#include "opt_vm86.h"
#include "opt_user_ldt.h"
#include <machine/asmacros.h>
@ -71,11 +70,9 @@ globaldata:
.set gd_switchtime,globaldata + GD_SWITCHTIME
.set gd_switchticks,globaldata + GD_SWITCHTICKS
#ifdef VM86
.globl gd_common_tssd, gd_tss_gdt
.set gd_common_tssd,globaldata + GD_COMMON_TSSD
.set gd_tss_gdt,globaldata + GD_TSS_GDT
#endif
#ifdef USER_LDT
.globl gd_currentldt
@ -92,11 +89,9 @@ globaldata:
.set _switchtime,globaldata + GD_SWITCHTIME
.set _switchticks,globaldata + GD_SWITCHTICKS
#ifdef VM86
.globl _common_tssd, _tss_gdt
.set _common_tssd,globaldata + GD_COMMON_TSSD
.set _tss_gdt,globaldata + GD_TSS_GDT
#endif
#ifdef USER_LDT
.globl _currentldt

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.121 1999/04/28 01:03:20 luoqi Exp $
* $Id: locore.s,v 1.122 1999/05/09 19:01:49 peter Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -48,7 +48,6 @@
#include "opt_ddb.h"
#include "opt_nfsroot.h"
#include "opt_userconfig.h"
#include "opt_vm86.h"
#include <sys/syscall.h>
#include <sys/reboot.h>
@ -134,13 +133,11 @@ _KPTphys: .long 0 /* phys addr of kernel page tables */
_proc0paddr: .long 0 /* address of proc 0 address space */
p0upa: .long 0 /* phys addr of proc0's UPAGES */
#ifdef VM86
vm86phystk: .long 0 /* PA of vm86/bios stack */
.globl _vm86paddr, _vm86pa
_vm86paddr: .long 0 /* address of vm86 region */
_vm86pa: .long 0 /* phys addr of vm86 region */
#endif
#ifdef BDE_DEBUGGER
.globl _bdb_exists /* flag to indicate BDE debugger is present */
@ -311,18 +308,10 @@ NON_GPROF_ENTRY(btext)
stosb
#if NAPM > 0
#ifndef VM86
/*
* XXX it's not clear that APM can live in the current environonment.
* Only pc-relative addressing works.
*/
call _apm_setup
#endif
#endif
call create_pagetables
#ifdef VM86
/*
* If the CPU has support for VME, turn it on.
*/
@ -332,7 +321,6 @@ NON_GPROF_ENTRY(btext)
orl $CR4_VME, %eax
movl %eax, %cr4
1:
#endif /* VM86 */
#ifdef BDE_DEBUGGER
/*
@ -786,7 +774,6 @@ no_kernend:
addl $KERNBASE, %esi
movl %esi, R(_proc0paddr)
#ifdef VM86
ALLOCPAGES(1) /* vm86/bios stack */
movl %esi,R(vm86phystk)
@ -794,7 +781,6 @@ no_kernend:
movl %esi,R(_vm86pa)
addl $KERNBASE, %esi
movl %esi, R(_vm86paddr)
#endif /* VM86 */
#ifdef SMP
/* Allocate cpu0's private data page */
@ -862,7 +848,6 @@ map_read_write:
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys($PG_RW)
#ifdef VM86
/* Map space for the vm86 region */
movl R(vm86phystk), %eax
movl $4, %ecx
@ -879,7 +864,6 @@ map_read_write:
movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkpt(R(_vm86pa), $PG_RW|PG_U)
#endif /* VM86 */
#ifdef SMP
/* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.335 1999/05/12 21:38:42 luoqi Exp $
* $Id: machdep.c,v 1.336 1999/05/31 18:35:53 dfr Exp $
*/
#include "apm.h"
@ -53,7 +53,6 @@
#include "opt_sysvipc.h"
#include "opt_user_ldt.h"
#include "opt_userconfig.h"
#include "opt_vm86.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -124,9 +123,8 @@
#include <i386/isa/isa_device.h>
#endif
#include <i386/isa/intr_machdep.h>
#ifndef VM86
#include <isa/rtc.h>
#endif
#include <machine/vm86.h>
#include <machine/random.h>
#include <sys/ptrace.h>
@ -568,7 +566,6 @@ sendsig(catcher, sig, mask, code)
sf.sf_sc.sc_trapno = regs->tf_trapno;
sf.sf_sc.sc_err = regs->tf_err;
#ifdef VM86
/*
* If we're a vm86 process, we want to save the segment registers.
* We also change eflags to be our emulated eflags, not the actual
@ -600,7 +597,6 @@ sendsig(catcher, sig, mask, code)
*/
tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP);
}
#endif /* VM86 */
/*
* Copy the sigframe out to the user's stack.
@ -657,7 +653,6 @@ sigreturn(p, uap)
return(EFAULT);
eflags = scp->sc_ps;
#ifdef VM86
if (eflags & PSL_VM) {
struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs;
struct vm86_kernel *vm86;
@ -691,7 +686,6 @@ sigreturn(p, uap)
tf->tf_es = _udatasel;
tf->tf_fs = _udatasel;
} else {
#endif /* VM86 */
/*
* Don't allow users to change privileged or reserved flags.
*/
@ -729,9 +723,7 @@ sigreturn(p, uap)
regs->tf_ds = scp->sc_ds;
regs->tf_es = scp->sc_es;
regs->tf_fs = scp->sc_fs;
#ifdef VM86
}
#endif
/* restore scratch registers */
regs->tf_eax = scp->sc_eax;
@ -902,12 +894,10 @@ union descriptor ldt[NLDT]; /* local descriptor table */
struct region_descriptor r_gdt, r_idt;
#endif
#ifdef VM86
#ifndef SMP
extern struct segment_descriptor common_tssd, *tss_gdt;
#endif
int private_tss; /* flag indicating private tss */
#endif /* VM86 */
#if defined(I586_CPU) && !defined(NO_F00F_HACK)
struct gate_descriptor *t_idt;
@ -1138,26 +1128,375 @@ sdtossd(sd, ssd)
ssd->ssd_gran = sd->sd_gran;
}
#define PHYSMAP_SIZE (2 * 8)
static void
getmemsize(int first)
{
int i, physmap_idx, pa_indx;
u_int basemem, extmem;
int speculative_mprobe = FALSE;
struct vm86frame vmf;
struct vm86context vmc;
vm_offset_t pa, physmap[PHYSMAP_SIZE];
pt_entry_t pte;
struct {
u_int64_t base;
u_int64_t length;
u_int32_t type;
} *smap;
#if NNPX > 0
int msize;
#endif
bzero(&vmf, sizeof(struct vm86frame));
bzero(physmap, sizeof(physmap));
vm86_intcall(0x12, &vmf);
basemem = vmf.vmf_ax;
if (basemem > 640) {
printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
basemem);
basemem = 640;
}
/*
* XXX if biosbasemem is now < 640, there is `hole'
* between the end of base memory and the start of
* ISA memory. The hole may be empty or it may
* contain BIOS code or data. Map it read/write so
* that the BIOS can write to it. (Memory from 0 to
* the physical end of the kernel is mapped read-only
* to begin with and then parts of it are remapped.
* The parts that aren't remapped form holes that
* remain read-only and are unused by the kernel.
* The base memory area is below the physical end of
* the kernel and right now forms a read-only hole.
* The part of it from PAGE_SIZE to
* (trunc_page(biosbasemem * 1024) - 1) will be
* remapped and used by the kernel later.)
*
* This code is similar to the code used in
* pmap_mapdev, but since no memory needs to be
* allocated we simply change the mapping.
*/
for (pa = trunc_page(basemem * 1024);
pa < ISA_HOLE_START; pa += PAGE_SIZE) {
pte = (pt_entry_t)vtopte(pa + KERNBASE);
*pte = pa | PG_RW | PG_V;
}
/*
* if basemem != 640, map pages r/w into vm86 page table so
* that the bios can scribble on it.
*/
pte = (pt_entry_t)vm86paddr;
for (i = basemem / 4; i < 160; i++)
pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
/*
* map page 1 R/W into the kernel page table so we can use it
* as a buffer. The kernel will unmap this page later.
*/
pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT));
*pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
/*
* get memory map with INT 15:E820
*/
#define SMAPSIZ sizeof(*smap)
#define SMAP_SIG 0x534D4150 /* 'SMAP' */
vmc.npages = 0;
smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
physmap_idx = 0;
vmf.vmf_ebx = 0;
do {
vmf.vmf_eax = 0xE820;
vmf.vmf_edx = SMAP_SIG;
vmf.vmf_ecx = SMAPSIZ;
i = vm86_datacall(0x15, &vmf, &vmc);
if (i || vmf.vmf_eax != SMAP_SIG)
break;
if (boothowto & RB_VERBOSE)
printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n",
smap->type,
*(u_int32_t *)((char *)&smap->base + 4),
(u_int32_t)smap->base,
*(u_int32_t *)((char *)&smap->length + 4),
(u_int32_t)smap->length);
if (smap->type != 0x01)
goto next_run;
if (smap->length == 0)
goto next_run;
for (i = 0; i <= physmap_idx; i += 2) {
if (smap->base < physmap[i + 1]) {
if (boothowto & RB_VERBOSE)
printf(
"Overlapping or non-montonic memory region, ignoring second region\n");
goto next_run;
}
}
if (smap->base == physmap[physmap_idx + 1]) {
physmap[physmap_idx + 1] += smap->length;
goto next_run;
}
physmap_idx += 2;
if (physmap_idx == PHYSMAP_SIZE) {
printf(
"Too many segments in the physical address map, giving up\n");
break;
}
physmap[physmap_idx] = smap->base;
physmap[physmap_idx + 1] = smap->base + smap->length;
next_run:
} while (vmf.vmf_ebx != 0);
if (physmap[1] != 0)
goto physmap_done;
/*
* try memory map with INT 15:E801
*/
vmf.vmf_ax = 0xE801;
if (vm86_intcall(0x15, &vmf) == 0) {
extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
} else {
#if 0
vmf.vmf_ah = 0x88;
vm86_intcall(0x15, &vmf);
extmem = vmf.vmf_ax;
#else
/*
* Prefer the RTC value for extended memory.
*/
extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8);
#endif
}
/*
* Only perform calculations in this section if there is no system
* map; any system new enough that supports SMAP probably does not
* need these workarounds.
*/
/*
* Special hack for chipsets that still remap the 384k hole when
* there's 16MB of memory - this really confuses people that
* are trying to use bus mastering ISA controllers with the
* "16MB limit"; they only have 16MB, but the remapping puts
* them beyond the limit.
*/
/*
* If extended memory is between 15-16MB (16-17MB phys address range),
* chop it to 15MB.
*/
if ((extmem > 15 * 1024) && (extmem < 16 * 1024))
extmem = 15 * 1024;
physmap[0] = 0;
physmap[1] = basemem * 1024;
physmap_idx = 2;
physmap[physmap_idx] = 0x100000;
physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024;
/*
* Indicate that we wish to do a speculative search for memory
* beyond the end of the reported size if the indicated amount
* is 64M (or more).
*
* XXX we should only do this in the RTC / 0x88 case
*/
if (extmem >= 16 * 1024)
speculative_mprobe = TRUE;
physmap_done:
/*
* Now, physmap contains a map of physical memory.
*/
#ifdef SMP
/* make hole for AP bootstrap code */
physmap[1] = mp_bootaddress(physmap[1] / 1024);
#endif
/*
* Maxmem isn't the "maximum memory", it's one larger than the
* highest page of the physical address space. It should be
* called something like "Maxphyspage".
*/
Maxmem = physmap[physmap_idx + 1] / PAGE_SIZE;
/*
* If a specific amount of memory is indicated via the MAXMEM
* option or the npx0 "msize", then don't do the speculative
* memory probe.
*/
#ifdef MAXMEM
Maxmem = MAXMEM / 4;
speculative_mprobe = FALSE;
#endif
#if NNPX > 0
if (resource_int_value("npx", 0, "msize", &msize) == 0) {
if (msize != 0) {
Maxmem = msize / 4;
speculative_mprobe = FALSE;
}
}
#endif
/* XXX former point of mp_probe() and pmap_bootstrap() */
/*
* Size up each available chunk of physical memory.
*/
physmap[0] = PAGE_SIZE; /* mask off page 0 */
pa_indx = 0;
phys_avail[pa_indx++] = physmap[0];
phys_avail[pa_indx] = physmap[0];
pte = (pt_entry_t)vtopte(KERNBASE);
*pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
/*
* physmap is in bytes, so when converting to page boundaries,
* round up the start address and round down the end address.
*/
for (i = 0; i <= physmap_idx; i += 2) {
int end;
end = ptoa(Maxmem);
if (physmap[i + 1] < end)
end = trunc_page(physmap[i + 1]);
for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) {
int tmp, page_bad;
int *ptr = 0;
/*
* block out kernel memory as not available.
*/
if (pa >= 0x100000 && pa < first)
continue;
page_bad = FALSE;
/*
* map page into kernel: valid, read/write,non-cacheable
*/
*pte = pa | PG_V | PG_RW | PG_N;
invltlb();
tmp = *(int *)ptr;
/*
* Test for alternating 1's and 0's
*/
*(volatile int *)ptr = 0xaaaaaaaa;
if (*(volatile int *)ptr != 0xaaaaaaaa) {
page_bad = TRUE;
}
/*
* Test for alternating 0's and 1's
*/
*(volatile int *)ptr = 0x55555555;
if (*(volatile int *)ptr != 0x55555555) {
page_bad = TRUE;
}
/*
* Test for all 1's
*/
*(volatile int *)ptr = 0xffffffff;
if (*(volatile int *)ptr != 0xffffffff) {
page_bad = TRUE;
}
/*
* Test for all 0's
*/
*(volatile int *)ptr = 0x0;
if (*(volatile int *)ptr != 0x0) {
page_bad = TRUE;
}
/*
* Restore original value.
*/
*(int *)ptr = tmp;
/*
* Adjust array of valid/good pages.
*/
if (page_bad == TRUE) {
continue;
}
/*
* If this good page is a continuation of the
* previous set of good pages, then just increase
* the end pointer. Otherwise start a new chunk.
* Note that "end" points one higher than end,
* making the range >= start and < end.
* If we're also doing a speculative memory
* test and we at or past the end, bump up Maxmem
* so that we keep going. The first bad page
* will terminate the loop.
*/
if (phys_avail[pa_indx] == pa) {
phys_avail[pa_indx] += PAGE_SIZE;
if (speculative_mprobe == TRUE &&
phys_avail[pa_indx] >= (64*1024*1024))
end += PAGE_SIZE;
} else {
pa_indx++;
if (pa_indx == PHYS_AVAIL_ARRAY_END) {
printf("Too many holes in the physical address space, giving up\n");
pa_indx--;
break;
}
phys_avail[pa_indx++] = pa; /* start */
phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */
}
physmem++;
}
}
*pte = 0;
invltlb();
/*
* XXX
* The last chunk must contain at least one page plus the message
* buffer to avoid complicating other code (message buffer address
* calculation, etc.).
*/
while (phys_avail[pa_indx - 1] + PAGE_SIZE +
round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
phys_avail[pa_indx--] = 0;
phys_avail[pa_indx--] = 0;
}
Maxmem = atop(phys_avail[pa_indx]);
/* Trim off space for the message buffer. */
phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
avail_end = phys_avail[pa_indx];
}
void
init386(first)
int first;
{
int x;
unsigned biosbasemem, biosextmem;
struct gate_descriptor *gdp;
int gsel_tss;
#if NNPX > 0
int msize;
#endif
#ifndef SMP
/* table descriptors - used to load tables by microp */
struct region_descriptor r_gdt, r_idt;
#endif
int pagesinbase, pagesinext;
vm_offset_t target_page;
int pa_indx, off;
int speculative_mprobe;
int off;
/*
* Prevent lowering of the ipl if we call tsleep() early.
@ -1286,20 +1625,14 @@ init386(first)
initializecpu(); /* Initialize CPU registers */
/* make an initial tss so cpu can get interrupt stack on syscall! */
#ifdef VM86
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16;
#else
common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE;
#endif /* VM86 */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ;
common_tss.tss_ioopt = (sizeof common_tss) << 16;
gsel_tss = GSEL(GPROC0_SEL, SEL_KPL);
ltr(gsel_tss);
#ifdef VM86
private_tss = 0;
tss_gdt = &gdt[GPROC0_SEL].sd;
common_tssd = *tss_gdt;
#endif
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)];
@ -1314,138 +1647,8 @@ init386(first)
dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
#ifdef VM86
initial_bioscalls(&biosbasemem, &biosextmem);
#else
/* Use BIOS values stored in RTC CMOS RAM, since probing
* breaks certain 386 AT relics.
*/
biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8);
biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8);
#endif
/*
* If BIOS tells us that it has more than 640k in the basemem,
* don't believe it - set it to 640k.
*/
if (biosbasemem > 640) {
printf("Preposterous RTC basemem of %uK, truncating to 640K\n",
biosbasemem);
biosbasemem = 640;
}
if (bootinfo.bi_memsizes_valid && bootinfo.bi_basemem > 640) {
printf("Preposterous BIOS basemem of %uK, truncating to 640K\n",
bootinfo.bi_basemem);
bootinfo.bi_basemem = 640;
}
/*
* Warn if the official BIOS interface disagrees with the RTC
* interface used above about the amount of base memory or the
* amount of extended memory. Prefer the BIOS value for the base
* memory. This is necessary for machines that `steal' base
* memory for use as BIOS memory, at least if we are going to use
* the BIOS for apm. Prefer the RTC value for extended memory.
* Eventually the hackish interface shouldn't even be looked at.
*/
if (bootinfo.bi_memsizes_valid) {
if (bootinfo.bi_basemem != biosbasemem) {
vm_offset_t pa;
printf(
"BIOS basemem (%uK) != RTC basemem (%uK), setting to BIOS value\n",
bootinfo.bi_basemem, biosbasemem);
biosbasemem = bootinfo.bi_basemem;
/*
* XXX if biosbasemem is now < 640, there is `hole'
* between the end of base memory and the start of
* ISA memory. The hole may be empty or it may
* contain BIOS code or data. Map it read/write so
* that the BIOS can write to it. (Memory from 0 to
* the physical end of the kernel is mapped read-only
* to begin with and then parts of it are remapped.
* The parts that aren't remapped form holes that
* remain read-only and are unused by the kernel.
* The base memory area is below the physical end of
* the kernel and right now forms a read-only hole.
* The part of it from PAGE_SIZE to
* (trunc_page(biosbasemem * 1024) - 1) will be
* remapped and used by the kernel later.)
*
* This code is similar to the code used in
* pmap_mapdev, but since no memory needs to be
* allocated we simply change the mapping.
*/
for (pa = trunc_page(biosbasemem * 1024);
pa < ISA_HOLE_START; pa += PAGE_SIZE) {
unsigned *pte;
pte = (unsigned *)vtopte(pa + KERNBASE);
*pte = pa | PG_RW | PG_V;
}
}
if (bootinfo.bi_extmem != biosextmem)
printf("BIOS extmem (%uK) != RTC extmem (%uK)\n",
bootinfo.bi_extmem, biosextmem);
}
#ifdef SMP
/* make hole for AP bootstrap code */
pagesinbase = mp_bootaddress(biosbasemem) / PAGE_SIZE;
#else
pagesinbase = biosbasemem * 1024 / PAGE_SIZE;
#endif
pagesinext = biosextmem * 1024 / PAGE_SIZE;
/*
* Special hack for chipsets that still remap the 384k hole when
* there's 16MB of memory - this really confuses people that
* are trying to use bus mastering ISA controllers with the
* "16MB limit"; they only have 16MB, but the remapping puts
* them beyond the limit.
*/
/*
* If extended memory is between 15-16MB (16-17MB phys address range),
* chop it to 15MB.
*/
if ((pagesinext > 3840) && (pagesinext < 4096))
pagesinext = 3840;
/*
* Maxmem isn't the "maximum memory", it's one larger than the
* highest page of the physical address space. It should be
* called something like "Maxphyspage".
*/
Maxmem = pagesinext + 0x100000/PAGE_SIZE;
/*
* Indicate that we wish to do a speculative search for memory beyond
* the end of the reported size if the indicated amount is 64MB (0x4000
* pages) - which is the largest amount that the BIOS/bootblocks can
* currently report. If a specific amount of memory is indicated via
* the MAXMEM option or the npx0 "msize", then don't do the speculative
* memory probe.
*/
if (Maxmem >= 0x4000)
speculative_mprobe = TRUE;
else
speculative_mprobe = FALSE;
#ifdef MAXMEM
Maxmem = MAXMEM/4;
speculative_mprobe = FALSE;
#endif
#if NNPX > 0
if (resource_int_value("npx", 0, "msize", &msize) == 0) {
if (msize != 0) {
Maxmem = msize / 4;
speculative_mprobe = FALSE;
}
}
#endif
vm86_initialize();
getmemsize(first);
#ifdef SMP
/* look for the MP hardware - needed for apic addresses */
@ -1453,130 +1656,7 @@ init386(first)
#endif
/* call pmap initialization to make new kernel address space */
pmap_bootstrap (first, 0);
/*
* Size up each available chunk of physical memory.
*/
/*
* We currently don't bother testing base memory.
* XXX ...but we probably should.
*/
pa_indx = 0;
if (pagesinbase > 1) {
phys_avail[pa_indx++] = PAGE_SIZE; /* skip first page of memory */
phys_avail[pa_indx] = ptoa(pagesinbase);/* memory up to the ISA hole */
physmem = pagesinbase - 1;
} else {
/* point at first chunk end */
pa_indx++;
}
for (target_page = avail_start; target_page < ptoa(Maxmem); target_page += PAGE_SIZE) {
int tmp, page_bad;
page_bad = FALSE;
/*
* map page into kernel: valid, read/write, non-cacheable
*/
*(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page;
invltlb();
tmp = *(int *)CADDR1;
/*
* Test for alternating 1's and 0's
*/
*(volatile int *)CADDR1 = 0xaaaaaaaa;
if (*(volatile int *)CADDR1 != 0xaaaaaaaa) {
page_bad = TRUE;
}
/*
* Test for alternating 0's and 1's
*/
*(volatile int *)CADDR1 = 0x55555555;
if (*(volatile int *)CADDR1 != 0x55555555) {
page_bad = TRUE;
}
/*
* Test for all 1's
*/
*(volatile int *)CADDR1 = 0xffffffff;
if (*(volatile int *)CADDR1 != 0xffffffff) {
page_bad = TRUE;
}
/*
* Test for all 0's
*/
*(volatile int *)CADDR1 = 0x0;
if (*(volatile int *)CADDR1 != 0x0) {
/*
* test of page failed
*/
page_bad = TRUE;
}
/*
* Restore original value.
*/
*(int *)CADDR1 = tmp;
/*
* Adjust array of valid/good pages.
*/
if (page_bad == FALSE) {
/*
* If this good page is a continuation of the
* previous set of good pages, then just increase
* the end pointer. Otherwise start a new chunk.
* Note that "end" points one higher than end,
* making the range >= start and < end.
* If we're also doing a speculative memory
* test and we at or past the end, bump up Maxmem
* so that we keep going. The first bad page
* will terminate the loop.
*/
if (phys_avail[pa_indx] == target_page) {
phys_avail[pa_indx] += PAGE_SIZE;
if (speculative_mprobe == TRUE &&
phys_avail[pa_indx] >= (64*1024*1024))
Maxmem++;
} else {
pa_indx++;
if (pa_indx == PHYS_AVAIL_ARRAY_END) {
printf("Too many holes in the physical address space, giving up\n");
pa_indx--;
break;
}
phys_avail[pa_indx++] = target_page; /* start */
phys_avail[pa_indx] = target_page + PAGE_SIZE; /* end */
}
physmem++;
}
}
*(int *)CMAP1 = 0;
invltlb();
/*
* XXX
* The last chunk must contain at least one page plus the message
* buffer to avoid complicating other code (message buffer address
* calculation, etc.).
*/
while (phys_avail[pa_indx - 1] + PAGE_SIZE +
round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) {
physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]);
phys_avail[pa_indx--] = 0;
phys_avail[pa_indx--] = 0;
}
Maxmem = atop(phys_avail[pa_indx]);
/* Trim off space for the message buffer. */
phys_avail[pa_indx] -= round_page(MSGBUF_SIZE);
avail_end = phys_avail[pa_indx];
pmap_bootstrap(first, 0);
/* now running on new page tables, configured,and u/iom is accessible */
@ -1614,9 +1694,7 @@ init386(first)
#ifdef SMP
proc0.p_addr->u_pcb.pcb_mpnest = 1;
#endif
#ifdef VM86
proc0.p_addr->u_pcb.pcb_ext = 0;
#endif
/* Sigh, relocate physical addresses left from bootstrap */
if (bootinfo.bi_modulep) {

View File

@ -22,11 +22,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $
* $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $
*/
#include "opt_smp.h"
#include "opt_vm86.h"
#include "opt_cpu.h"
#include "opt_user_ldt.h"
@ -482,10 +481,8 @@ init_secondary(void)
common_tss.tss_esp0 = 0; /* not used until after switch */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
common_tss.tss_ioopt = (sizeof common_tss) << 16;
#ifdef VM86
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
common_tssd = *tss_gdt;
#endif
ltr(gsel_tss);
load_cr0(0x8005003b); /* XXX! */

View File

@ -31,11 +31,9 @@
* mpboot.s: FreeBSD machine support for the Intel MP Spec
* multiprocessor systems.
*
* $Id: mpboot.s,v 1.9 1999/04/10 22:58:29 tegge Exp $
* $Id: mpboot.s,v 1.10 1999/04/28 01:03:22 luoqi Exp $
*/
#include "opt_vm86.h"
#include <machine/asmacros.h> /* miscellaneous asm macros */
#include <machine/apic.h>
#include <machine/specialreg.h>
@ -94,7 +92,6 @@ mp_begin: /* now running relocated at KERNBASE */
call _init_secondary /* load i386 tables */
CHECKPOINT(0x38, 5)
#ifdef VM86
/*
* If the [BSP] CPU has support for VME, turn it on.
*/
@ -104,7 +101,6 @@ mp_begin: /* now running relocated at KERNBASE */
orl $CR4_VME, %eax
movl %eax, %cr4
1:
#endif
/* disable the APIC, just to be SURE */
movl lapic_svr, %eax /* get spurious vector reg. */

View File

@ -22,11 +22,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $
* $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $
*/
#include "opt_smp.h"
#include "opt_vm86.h"
#include "opt_cpu.h"
#include "opt_user_ldt.h"
@ -482,10 +481,8 @@ init_secondary(void)
common_tss.tss_esp0 = 0; /* not used until after switch */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
common_tss.tss_ioopt = (sizeof common_tss) << 16;
#ifdef VM86
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
common_tssd = *tss_gdt;
#endif
ltr(gsel_tss);
load_cr0(0x8005003b); /* XXX! */

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.235 1999/05/18 06:01:49 alc Exp $
* $Id: pmap.c,v 1.236 1999/05/28 05:38:56 alc Exp $
*/
/*
@ -71,7 +71,6 @@
#include "opt_disable_pse.h"
#include "opt_pmap.h"
#include "opt_msgbuf.h"
#include "opt_vm86.h"
#include "opt_user_ldt.h"
#include <sys/param.h>

View File

@ -33,12 +33,11 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: swtch.s,v 1.80 1999/05/06 09:44:49 bde Exp $
* $Id: swtch.s,v 1.81 1999/05/12 21:38:45 luoqi Exp $
*/
#include "npx.h"
#include "opt_user_ldt.h"
#include "opt_vm86.h"
#include <sys/rtprio.h>
@ -277,7 +276,6 @@ _idle:
/* update common_tss.tss_esp0 pointer */
movl %ecx, _common_tss + TSS_ESP0
#ifdef VM86
movl _cpuid, %esi
btrl %esi, _private_tss
jae 1f
@ -294,7 +292,6 @@ _idle:
movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
ltr %si
1:
#endif /* VM86 */
sti
@ -397,7 +394,6 @@ idle_loop:
/* update common_tss.tss_esp0 pointer */
movl %esp, _common_tss + TSS_ESP0
#ifdef VM86
movl $0, %esi
btrl %esi, _private_tss
jae 1f
@ -413,7 +409,6 @@ idle_loop:
movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
ltr %si
1:
#endif /* VM86 */
sti
@ -630,7 +625,6 @@ swtch_com:
movl %ebx,%cr3
4:
#ifdef VM86
#ifdef SMP
movl _cpuid, %esi
#else
@ -642,18 +636,12 @@ swtch_com:
movl PCB_EXT(%edx), %edi /* new tss descriptor */
jmp 2f
1:
#endif
/* update common_tss.tss_esp0 pointer */
movl %edx, %ebx /* pcb */
#ifdef VM86
addl $(UPAGES * PAGE_SIZE - 16), %ebx
#else
addl $(UPAGES * PAGE_SIZE), %ebx
#endif /* VM86 */
movl %ebx, _common_tss + TSS_ESP0
#ifdef VM86
btrl %esi, _private_tss
jae 3f
#ifdef SMP
@ -672,7 +660,6 @@ swtch_com:
movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */
ltr %si
3:
#endif /* VM86 */
movl P_VMSPACE(%ecx), %ebx
#ifdef SMP
movl _cpuid, %eax

View File

@ -31,12 +31,11 @@
* SUCH DAMAGE.
*
* from: @(#)sys_machdep.c 5.5 (Berkeley) 1/19/91
* $Id: sys_machdep.c,v 1.40 1999/04/27 11:14:33 phk Exp $
* $Id: sys_machdep.c,v 1.41 1999/04/28 01:03:25 luoqi Exp $
*
*/
#include "opt_user_ldt.h"
#include "opt_vm86.h"
#include "opt_smp.h"
#include <sys/param.h>
@ -71,11 +70,9 @@ void set_user_ldt __P((struct pcb *pcb));
static int i386_get_ldt __P((struct proc *, char *));
static int i386_set_ldt __P((struct proc *, char *));
#endif
#ifdef VM86
static int i386_get_ioperm __P((struct proc *, char *));
static int i386_set_ioperm __P((struct proc *, char *));
int i386_extend_pcb __P((struct proc *));
#endif
#ifndef _SYS_SYSPROTO_H_
struct sysarch_args {
@ -101,7 +98,6 @@ sysarch(p, uap)
error = i386_set_ldt(p, uap->parms);
break;
#endif
#ifdef VM86
case I386_GET_IOPERM:
error = i386_get_ioperm(p, uap->parms);
break;
@ -111,7 +107,6 @@ sysarch(p, uap)
case I386_VM86:
error = vm86_sysarch(p, uap->parms);
break;
#endif
default:
error = EINVAL;
break;
@ -119,7 +114,6 @@ sysarch(p, uap)
return (error);
}
#ifdef VM86
int
i386_extend_pcb(struct proc *p)
{
@ -251,7 +245,6 @@ i386_get_ioperm(p, args)
error = copyout(&ua, args, sizeof(struct i386_ioperm_args));
return (error);
}
#endif /* VM86 */
#ifdef USER_LDT
/*

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.136 1999/04/28 01:03:26 luoqi Exp $
* $Id: trap.c,v 1.137 1999/05/06 18:12:17 peter Exp $
*/
/*
@ -47,7 +47,6 @@
#include "opt_ktrace.h"
#include "opt_clock.h"
#include "opt_trap.h"
#include "opt_vm86.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -90,9 +89,7 @@
#include <machine/clock.h>
#endif
#ifdef VM86
#include <machine/vm86.h>
#endif
#ifdef DDB
extern int in_Debugger, debugger_on_panic;
@ -266,7 +263,6 @@ trap(frame)
type = frame.tf_trapno;
code = frame.tf_err;
#ifdef VM86
if (in_vm86call) {
if (frame.tf_eflags & PSL_VM &&
(type == T_PROTFLT || type == T_STKFLT)) {
@ -293,7 +289,6 @@ trap(frame)
}
goto kernel_trap; /* normal kernel trap handling */
}
#endif
if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
/* user trap */
@ -335,14 +330,12 @@ trap(frame)
*/
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
#ifdef VM86
if (frame.tf_eflags & PSL_VM) {
i = vm86_emulate((struct vm86frame *)&frame);
if (i == 0)
goto out;
break;
}
#endif /* VM86 */
/* FALL THROUGH */
case T_SEGNPFLT: /* segment not present fault */
@ -426,9 +419,7 @@ trap(frame)
break;
}
} else {
#ifdef VM86
kernel_trap:
#endif
/* kernel trap */
switch (type) {

View File

@ -23,11 +23,9 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: vm86.c,v 1.24 1999/04/28 01:03:27 luoqi Exp $
* $Id: vm86.c,v 1.25 1999/05/12 21:38:45 luoqi Exp $
*/
#include "opt_vm86.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -41,6 +39,7 @@
#include <vm/vm_page.h>
#include <vm/vm_param.h>
#include <sys/reboot.h>
#include <sys/user.h>
#include <machine/md_var.h>
@ -49,14 +48,20 @@
#include <machine/specialreg.h>
extern int i386_extend_pcb __P((struct proc *));
extern int vm86paddr, vm86pa;
extern int vm86pa;
extern struct pcb *vm86pcb;
extern int vm86_bioscall(struct vm86frame *);
extern void vm86_biosret(struct vm86frame *);
void vm86_prepcall(struct vm86frame);
struct system_map {
int type;
vm_offset_t start;
vm_offset_t end;
};
#define HLT 0xf4
#define CLI 0xfa
#define STI 0xfb
@ -350,7 +355,7 @@ struct vm86_layout {
char vml_iomap_trailer;
};
static void
void
vm86_initialize(void)
{
int i;
@ -447,6 +452,13 @@ vm86_initialize(void)
ssdtosd(&ssd, &ext->ext_tssd);
vm86pcb = pcb;
/*
* use whatever is leftover of the vm86 page layout as a
* message buffer so we can capture early output.
*/
msgbufinit((vm_offset_t)vm86paddr + sizeof(struct vm86_layout),
ctob(3) - sizeof(struct vm86_layout));
}
vm_offset_t
@ -486,97 +498,6 @@ vm86_addpage(struct vm86context *vmc, int pagenum, vm_offset_t kva)
panic("vm86_addpage: not enough room, or overlap");
}
void
initial_bioscalls(u_int *basemem, u_int *extmem)
{
int i, method;
struct vm86frame vmf;
struct vm86context vmc;
u_int64_t highwat = 0;
pt_entry_t pte;
struct {
u_int64_t base;
u_int64_t length;
u_int32_t type;
} *smap;
bzero(&vmf, sizeof(struct vm86frame)); /* safety */
vm86_initialize();
#ifndef PC98
vm86_intcall(0x12, &vmf);
*basemem = vmf.vmf_ax;
*extmem = 0;
/*
* if basemem != 640, map pages r/w into vm86 page table so
* that the bios can scribble on it.
*/
pte = (pt_entry_t)vm86paddr;
for (i = *basemem / 4; i < 160; i++)
pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U;
/*
* map page 1 R/W into the kernel page table so we can use it
* as a buffer. The kernel will unmap this page later.
*/
pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT));
*pte = (1 << PAGE_SHIFT) | PG_RW | PG_V;
/*
* get memory map with INT 15:E820
*/
#define SMAPSIZ sizeof(*smap)
#define SMAP_SIG 0x534D4150 /* 'SMAP' */
vmc.npages = 0;
smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT));
vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di);
vmf.vmf_ebx = 0;
do {
vmf.vmf_eax = 0xE820;
vmf.vmf_edx = SMAP_SIG;
vmf.vmf_ecx = SMAPSIZ;
i = vm86_datacall(0x15, &vmf, &vmc);
if (i || vmf.vmf_eax != SMAP_SIG)
break;
if (smap->type == 0x01 && smap->base >= highwat) {
*extmem += (smap->length / 1024);
highwat = smap->base + smap->length;
}
} while (vmf.vmf_ebx != 0);
if (*extmem != 0) {
if (*extmem > *basemem) {
*extmem -= *basemem;
method = 0xE820;
goto done;
}
printf("E820: extmem (%d) < basemem (%d)\n", *extmem, *basemem);
}
/*
* try memory map with INT 15:E801
*/
vmf.vmf_ax = 0xE801;
if (vm86_intcall(0x15, &vmf) == 0) {
*extmem = vmf.vmf_cx + vmf.vmf_dx * 64;
method = 0xE801;
goto done;
}
vmf.vmf_ah = 0x88;
vm86_intcall(0x15, &vmf);
*extmem = vmf.vmf_ax;
method = 0x88;
done:
printf("BIOS basemem: %dK, extmem: %dK (from %#x call)\n",
*basemem, *extmem, method);
#endif /* !PC98 */
}
static void
vm86_initflags(struct vm86frame *vmf)
{

View File

@ -23,11 +23,9 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: vm86bios.s,v 1.12 1999/05/12 21:30:51 luoqi Exp $
* $Id: vm86bios.s,v 1.13 1999/05/12 21:38:46 luoqi Exp $
*/
#include "opt_vm86.h"
#include <machine/asmacros.h> /* miscellaneous asm macros */
#include <machine/trap.h>

View File

@ -38,12 +38,11 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.120 1999/02/19 14:25:33 luoqi Exp $
* $Id: vm_machdep.c,v 1.121 1999/04/19 14:14:13 peter Exp $
*/
#include "npx.h"
#include "opt_user_ldt.h"
#include "opt_vm86.h"
#ifdef PC98
#include "opt_pc98.h"
#endif
@ -64,10 +63,8 @@
#ifdef SMP
#include <machine/smp.h>
#endif
#ifdef VM86
#include <machine/pcb_ext.h>
#include <machine/vm86.h>
#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -133,11 +130,7 @@ cpu_fork(p1, p2)
* syscall. This copies the user mode register values.
*/
p2->p_md.md_regs = (struct trapframe *)
#ifdef VM86
((int)p2->p_addr + UPAGES * PAGE_SIZE - 16) - 1;
#else
((int)p2->p_addr + UPAGES * PAGE_SIZE) - 1;
#endif /* VM86 */
*p2->p_md.md_regs = *p1->p_md.md_regs;
/*
@ -162,12 +155,10 @@ cpu_fork(p1, p2)
#ifdef SMP
pcb2->pcb_mpnest = 1;
#endif
#ifdef VM86
/*
* XXX don't copy the i/o pages. this should probably be fixed.
*/
pcb2->pcb_ext = 0;
#endif
#ifdef USER_LDT
/* Copy the LDT, if necessary. */
@ -216,14 +207,11 @@ void
cpu_exit(p)
register struct proc *p;
{
#if defined(USER_LDT) || defined(VM86)
struct pcb *pcb = &p->p_addr->u_pcb;
#endif
#if NNPX > 0
npxexit(p);
#endif /* NNPX */
#ifdef VM86
if (pcb->pcb_ext != 0) {
/*
* XXX do we need to move the TSS off the allocated pages
@ -233,7 +221,6 @@ cpu_exit(p)
ctob(IOPAGES + 1));
pcb->pcb_ext = 0;
}
#endif
#ifdef USER_LDT
if (pcb->pcb_ldt != 0) {
if (pcb == curpcb) {

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: globaldata.h,v 1.8 1999/04/28 01:04:00 luoqi Exp $
* $Id: globaldata.h,v 1.9 1999/05/12 21:39:00 luoqi Exp $
*/
/*
@ -46,10 +46,8 @@ struct globaldata {
struct timeval gd_switchtime;
struct i386tss gd_common_tss;
int gd_switchticks;
#ifdef VM86
struct segment_descriptor gd_common_tssd;
struct segment_descriptor *gd_tss_gdt;
#endif
#ifdef USER_LDT
int gd_currentldt;
#endif

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: globals.h,v 1.1 1999/04/28 01:04:01 luoqi Exp $
* $Id: globals.h,v 1.2 1999/05/12 21:39:01 luoqi Exp $
*/
#ifndef _MACHINE_GLOBALS_H_
@ -88,10 +88,8 @@
#define switchtime GLOBAL_LVALUE(switchtime, struct timeval)
#define switchticks GLOBAL_LVALUE(switchticks, int)
#ifdef VM86
#define common_tssd GLOBAL_LVALUE(common_tssd, struct segment_descriptor)
#define tss_gdt GLOBAL_LVALUE(tss_gdt, struct segment_descriptor *)
#endif
#ifdef USER_LDT
#define currentldt GLOBAL_LVALUE(currentldt, int)
@ -119,10 +117,8 @@ GLOBAL_FUNC(common_tss)
GLOBAL_FUNC(switchtime)
GLOBAL_FUNC(switchticks)
#ifdef VM86
GLOBAL_FUNC(common_tssd)
GLOBAL_FUNC(tss_gdt)
#endif
#ifdef USER_LDT
GLOBAL_FUNC(currentldt)

View File

@ -22,11 +22,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $
* $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $
*/
#include "opt_smp.h"
#include "opt_vm86.h"
#include "opt_cpu.h"
#include "opt_user_ldt.h"
@ -482,10 +481,8 @@ init_secondary(void)
common_tss.tss_esp0 = 0; /* not used until after switch */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
common_tss.tss_ioopt = (sizeof common_tss) << 16;
#ifdef VM86
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
common_tssd = *tss_gdt;
#endif
ltr(gsel_tss);
load_cr0(0x8005003b); /* XXX! */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)pcb.h 5.10 (Berkeley) 5/12/91
* $Id: pcb.h,v 1.26 1998/02/03 21:27:50 bde Exp $
* $Id: pcb.h,v 1.27 1999/04/28 01:04:05 luoqi Exp $
*/
#ifndef _I386_PCB_H_
@ -66,11 +66,7 @@ struct pcb {
u_long pcb_mpnest_dontuse;
#endif
int pcb_gs;
#ifdef VM86
struct pcb_ext *pcb_ext; /* optional pcb extension */
#else
struct pcb_ext *pcb_ext_dontuse;
#endif
u_long __pcb_spare[2]; /* adjust to avoid core dump size changes */
};

View File

@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: globaldata.h,v 1.8 1999/04/28 01:04:00 luoqi Exp $
* $Id: globaldata.h,v 1.9 1999/05/12 21:39:00 luoqi Exp $
*/
/*
@ -46,10 +46,8 @@ struct globaldata {
struct timeval gd_switchtime;
struct i386tss gd_common_tss;
int gd_switchticks;
#ifdef VM86
struct segment_descriptor gd_common_tssd;
struct segment_descriptor *gd_tss_gdt;
#endif
#ifdef USER_LDT
int gd_currentldt;
#endif

View File

@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: vm86.h,v 1.9 1999/03/18 04:37:35 jlemon Exp $
* $Id: vm86.h,v 1.10 1999/04/28 01:04:09 luoqi Exp $
*/
#ifndef _MACHINE_VM86_H_
@ -150,6 +150,7 @@ struct vm86_intcall_args {
};
extern int in_vm86call;
extern int vm86paddr;
struct proc;
extern int vm86_emulate __P((struct vm86frame *));
@ -157,7 +158,7 @@ extern int vm86_sysarch __P((struct proc *, char *));
extern void vm86_trap __P((struct vm86frame *));
extern int vm86_intcall __P((int, struct vm86frame *));
extern int vm86_datacall __P((int, struct vm86frame *, struct vm86context *));
extern void initial_bioscalls __P((u_int *, u_int *));
extern void vm86_initialize __P((void));
extern vm_offset_t vm86_getpage __P((struct vm86context *, int));
extern vm_offset_t vm86_addpage __P((struct vm86context *, int, vm_offset_t));
extern int vm86_getptr __P((struct vm86context *, vm_offset_t,

View File

@ -1,6 +1,6 @@
/*
* from: vector.s, 386BSD 0.1 unknown origin
* $Id: apic_vector.s,v 1.37 1999/04/28 01:04:12 luoqi Exp $
* $Id: apic_vector.s,v 1.38 1999/05/28 14:08:57 bde Exp $
*/
@ -628,10 +628,8 @@ _Xcpucheckstate:
andl $3, %eax
cmpl $3, %eax
je 1f
#ifdef VM86
testl $PSL_VM, 24(%esp)
jne 1f
#endif
incl %ebx /* system or interrupt */
#ifdef CPL_AND_CML
cmpl $0, _inside_intr

View File

@ -36,7 +36,7 @@
*
* @(#)ipl.s
*
* $Id: ipl.s,v 1.26 1999/04/28 01:04:14 luoqi Exp $
* $Id: ipl.s,v 1.27 1999/05/06 09:44:54 bde Exp $
*/
@ -155,7 +155,6 @@ doreti_exit:
FAST_ICPL_UNLOCK /* preserves %eax */
MPLOCKED decb _intr_nesting_level
MEXITCOUNT
#ifdef VM86
#ifdef CPL_AND_CML
/* XXX CPL_AND_CML needs work */
#error not ready for vm86
@ -181,7 +180,6 @@ doreti_stop:
nop
1:
FAST_ICPL_UNLOCK /* preserves %eax */
#endif /* VM86 */
#ifdef SMP
#ifdef INTR_SIMPLELOCK
@ -346,10 +344,8 @@ doreti_swi:
ALIGN_TEXT
swi_ast:
addl $8,%esp /* discard raddr & cpl to get trap frame */
#ifdef VM86
cmpl $1,_in_vm86call
je 1f /* stay in kernel mode */
#endif
testb $SEL_RPL_MASK,TF_CS(%esp)
je swi_ast_phantom
swi_ast_user:
@ -365,7 +361,6 @@ swi_ast_user:
ALIGN_TEXT
swi_ast_phantom:
#ifdef VM86
/*
* check for ast from vm86 mode. Placed down here so the jumps do
* not get taken for mainline code.
@ -373,7 +368,6 @@ swi_ast_phantom:
testl $PSL_VM,TF_EFLAGS(%esp)
jne swi_ast_user
1:
#endif /* VM86 */
/*
* These happen when there is an interrupt in a trap handler before
* ASTs can be masked or in an lcall handler before they can be

View File

@ -23,20 +23,19 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: vesa.c,v 1.22 1999/03/31 15:27:00 yokota Exp $
* $Id: vesa.c,v 1.23 1999/05/09 15:57:52 peter Exp $
*/
#include "vga.h"
#include "opt_vga.h"
#include "opt_vesa.h"
#include "opt_vm86.h"
#include "opt_fb.h"
#ifdef VGA_NO_MODE_CHANGE
#undef VESA
#endif
#if (NVGA > 0 && defined(VESA) && defined(VM86)) || defined(KLD_MODULE)
#if (NVGA > 0 && defined(VESA)) || defined(KLD_MODULE)
#include <sys/param.h>
#include <sys/systm.h>
@ -1153,4 +1152,4 @@ static moduledata_t vesa_mod = {
DECLARE_MODULE(vesa, vesa_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
#endif /* (NVGA > 0 && VESA && VM86) || KLD_MODULE */
#endif /* (NVGA > 0 && VESA) || KLD_MODULE */

View File

@ -8,10 +8,8 @@ NON_GPROF_ENTRY(svr4_sigcode)
leal SVR4_SIGF_UC(%esp),%eax # ucp (the call may have clobbered the
# copy at SIGF_UCP(%esp))
#if defined(NOTYET)
#ifdef VM86
testl $PSL_VM,SVR4_UC_EFLAGS(%eax)
jnz 1f
#endif
#endif
movl SVR4_UC_GS(%eax),%edx
movl %dx,%gs

View File

@ -117,7 +117,6 @@ svr4_getcontext(p, uc, mask, oonstack)
/*
* Set the general purpose registers
*/
#ifdef VM86
if (tf->tf_eflags & PSL_VM) {
r[SVR4_X86_GS] = tf->tf_vm86_gs;
r[SVR4_X86_FS] = tf->tf_vm86_fs;
@ -125,7 +124,6 @@ svr4_getcontext(p, uc, mask, oonstack)
r[SVR4_X86_DS] = tf->tf_vm86_ds;
r[SVR4_X86_EFL] = get_vflags(p);
} else
#endif
{
#if defined(__NetBSD__)
__asm("movl %%gs,%w0" : "=r" (r[SVR4_X86_GS]));
@ -215,7 +213,6 @@ svr4_setcontext(p, uc)
/*
* Restore register context.
*/
#ifdef VM86
if (r[SVR4_X86_EFL] & PSL_VM) {
tf->tf_vm86_gs = r[SVR4_X86_GS];
tf->tf_vm86_fs = r[SVR4_X86_FS];
@ -223,7 +220,6 @@ svr4_setcontext(p, uc)
tf->tf_vm86_ds = r[SVR4_X86_DS];
set_vflags(p, r[SVR4_X86_EFL]);
} else
#endif
{
/*
* Check for security violations. If we're returning to

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)subr_prf.c 8.3 (Berkeley) 1/21/94
* $Id: subr_prf.c,v 1.50 1998/09/06 06:25:04 ache Exp $
* $Id: subr_prf.c,v 1.51 1998/12/03 04:45:56 archie Exp $
*/
#include <sys/param.h>
@ -674,10 +674,24 @@ msglogchar(int c, void *dummyarg)
}
}
static void
msgbufcopy(struct msgbuf *oldp)
{
int pos;
pos = oldp->msg_bufr;
while (pos != oldp->msg_bufx) {
msglogchar(oldp->msg_ptr[pos], NULL);
if (++pos >= oldp->msg_size)
pos = 0;
}
}
void
msgbufinit(void *ptr, size_t size)
{
char *cp;
static struct msgbuf *oldp = NULL;
cp = (char *)ptr;
msgbufp = (struct msgbuf *) (cp + size - sizeof(*msgbufp));
@ -687,7 +701,10 @@ msgbufinit(void *ptr, size_t size)
msgbufp->msg_size = (char *)msgbufp - cp;
msgbufp->msg_ptr = cp;
}
if (msgbufmapped && oldp != msgbufp)
msgbufcopy(oldp);
msgbufmapped = 1;
oldp = msgbufp;
}
#include "opt_ddb.h"

View File

@ -22,11 +22,10 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: mp_machdep.c,v 1.100 1999/05/08 17:48:22 peter Exp $
* $Id: mp_machdep.c,v 1.101 1999/05/12 21:38:43 luoqi Exp $
*/
#include "opt_smp.h"
#include "opt_vm86.h"
#include "opt_cpu.h"
#include "opt_user_ldt.h"
@ -482,10 +481,8 @@ init_secondary(void)
common_tss.tss_esp0 = 0; /* not used until after switch */
common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
common_tss.tss_ioopt = (sizeof common_tss) << 16;
#ifdef VM86
tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd;
common_tssd = *tss_gdt;
#endif
ltr(gsel_tss);
load_cr0(0x8005003b); /* XXX! */

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.136 1999/04/28 01:03:26 luoqi Exp $
* $Id: trap.c,v 1.137 1999/05/06 18:12:17 peter Exp $
*/
/*
@ -47,7 +47,6 @@
#include "opt_ktrace.h"
#include "opt_clock.h"
#include "opt_trap.h"
#include "opt_vm86.h"
#include <sys/param.h>
#include <sys/systm.h>
@ -90,9 +89,7 @@
#include <machine/clock.h>
#endif
#ifdef VM86
#include <machine/vm86.h>
#endif
#ifdef DDB
extern int in_Debugger, debugger_on_panic;
@ -266,7 +263,6 @@ trap(frame)
type = frame.tf_trapno;
code = frame.tf_err;
#ifdef VM86
if (in_vm86call) {
if (frame.tf_eflags & PSL_VM &&
(type == T_PROTFLT || type == T_STKFLT)) {
@ -293,7 +289,6 @@ trap(frame)
}
goto kernel_trap; /* normal kernel trap handling */
}
#endif
if ((ISPL(frame.tf_cs) == SEL_UPL) || (frame.tf_eflags & PSL_VM)) {
/* user trap */
@ -335,14 +330,12 @@ trap(frame)
*/
case T_PROTFLT: /* general protection fault */
case T_STKFLT: /* stack fault */
#ifdef VM86
if (frame.tf_eflags & PSL_VM) {
i = vm86_emulate((struct vm86frame *)&frame);
if (i == 0)
goto out;
break;
}
#endif /* VM86 */
/* FALL THROUGH */
case T_SEGNPFLT: /* segment not present fault */
@ -426,9 +419,7 @@ trap(frame)
break;
}
} else {
#ifdef VM86
kernel_trap:
#endif
/* kernel trap */
switch (type) {

View File

@ -1,12 +1,11 @@
# $Id: Makefile,v 1.4 1998/12/30 11:21:07 yokota Exp $
# $Id: Makefile,v 1.5 1999/01/11 03:18:56 yokota Exp $
.PATH: ${.CURDIR}/../../dev/syscons ${.CURDIR}/../../i386/isa
KMOD = vesa
SRCS = vesa.c scvesactl.c sc.h vga.h opt_syscons.h opt_vga.h \
opt_vesa.h opt_vm86.h opt_fb.h
opt_vesa.h opt_fb.h
NOMAN =
CLEANFILES += sc.h vga.h opt_syscons.h opt_vga.h opt_vesa.h opt_vm86.h \
opt_fb.h
CLEANFILES += sc.h vga.h opt_syscons.h opt_vga.h opt_vesa.h opt_fb.h
sc.h:
echo "#define NSC 1" > sc.h
@ -23,9 +22,6 @@ opt_vga.h:
opt_vesa.h:
echo "#define VESA 1" > opt_vesa.h
opt_vm86.h:
echo "#define VM86 1" > opt_vm86.h
opt_fb.h:
touch opt_fb.h