"New" VM system from John Dyson & myself. For a run-down of the

major changes, see the log of any effected file in the sys/vm
directory (swap_pager.c for instance).
This commit is contained in:
David Greenman 1994-01-14 16:25:31 +00:00
parent 9d4389fddc
commit 7f8cb36869
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=974
36 changed files with 3213 additions and 2890 deletions

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id$
* $Id: swtch.s,v 1.1 1993/11/13 02:25:06 davidg Exp $
*/
#include "npx.h" /* for NNPX */
@ -137,6 +137,9 @@ sw0: .asciz "swtch"
*/
ALIGN_TEXT
Idle:
movl _IdlePTD,%ecx
movl %ecx,%cr3
movl $tmpstk-4,%esp
sti
SHOW_STI

View File

@ -23,10 +23,15 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: db_trace.c,v 1.3 1993/12/19 00:50:01 wollman Exp $
* $Id: db_trace.c,v 1.4 1994/01/03 07:55:19 davidg Exp $
*/
#include "param.h"
#include <vm/vm_param.h>
#include <vm/lock.h>
#include <vm/vm_statistics.h>
#include <machine/pmap.h>
#include "systm.h"
#include "proc.h"
#include "ddb/ddb.h"

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
* $Id: genassym.c,v 1.5 1993/10/15 10:34:17 rgrimes Exp $
* $Id: genassym.c,v 1.6 1993/11/13 02:24:59 davidg Exp $
*/
#include "sys/param.h"
@ -99,6 +99,7 @@ main()
printf("#define\tPDESIZE %d\n", PDESIZE);
printf("#define\tPTESIZE %d\n", PTESIZE);
printf("#define\tNKPDE %d\n", NKPDE);
printf("#define\tNKPT %d\n", NKPT);
printf("#define\tKPTDI 0x%x\n", KPTDI);
printf("#define\tKSTKPTDI 0x%x\n", KSTKPTDI);
printf("#define\tKSTKPTEOFF 0x%x\n", KSTKPTEOFF);
@ -113,6 +114,7 @@ main()
printf("#define\tSHMMAXPGS %d\n", SHMMAXPGS);
#endif
printf("#define\tUSRSTACK 0x%x\n", USRSTACK);
printf("#define\tVM_MAXUSER_ADDRESS 0x%x\n", VM_MAXUSER_ADDRESS);
printf("#define\tKERNBASE 0x%x\n", KERNBASE);
printf("#define\tMSGBUFPTECNT %d\n", btoc(sizeof (struct msgbuf)));
printf("#define\tNMBCLUSTERS %d\n", NMBCLUSTERS);
@ -189,3 +191,4 @@ main()
printf("#define\tENAMETOOLONG %d\n", ENAMETOOLONG);
exit(0);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.10 1993/11/13 02:25:00 davidg Exp $
* $Id: locore.s,v 1.11 1993/11/14 02:27:22 rgrimes Exp $
*/
/*
@ -132,7 +132,7 @@ _bde_exists: .long 0
#endif
.globl tmpstk
.space 512
.space 0x1000
tmpstk:
@ -208,7 +208,7 @@ ENTRY(btext)
* Virtual address space of kernel:
*
* text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
* pages: 1 UPAGES (2) 1 NKPDE (7)
* pages: 1 UPAGES (2) 1 NKPT (7)
*/
/* find end of kernel image */
@ -227,29 +227,18 @@ ENTRY(btext)
stosb
/*
* If we are loaded at 0x0 check to see if we have space for the
* page dir/tables and stack area after the kernel and before the 640K
* ISA memory hole. If we do not have space relocate the page directory,
* UPAGES, proc 0 stack, and page table pages to start at 1MB. The value
* that ends up in esi, which points to the kernel page directory, is
* used by the rest of locore to build the tables.
* esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPDE(number of kernel
* The value in esi is both the end of the kernel bss and a pointer to
* the kernel page directory, and is used by the rest of locore to build
* the tables.
* esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPT(number of kernel
* page table pages) is then passed on the stack to init386(first) as
* the value first. esi should ALWAYS be page aligned!!
*/
movl %esi,%ecx /* Get current first availiable address */
cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
jge 1f /* yep, don't need to check for room */
addl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* XXX the 4 is for kstack */
/* space for kstack, PTD and PTE's */
cmpl $(640*1024),%ecx /* see if it fits in low memory */
jle 1f /* yep, don't need to relocate it */
movl $0x100000,%esi /* won't fit, so start it at 1MB */
1:
/* clear pagetables, page directory, stack, etc... */
movl %esi,%edi /* base (page directory) */
movl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* amount to clear */
movl $((1+UPAGES+1+NKPT)*NBPG),%ecx /* amount to clear */
xorl %eax,%eax /* specify zero fill */
cld
rep
@ -309,7 +298,7 @@ ENTRY(btext)
/* now initialize the page dir, upages, p0stack PT, and page tables */
movl $(1+UPAGES+1+NKPDE),%ecx /* number of PTEs */
movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
movl %esi,%eax /* phys address of PTD */
andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
@ -348,7 +337,7 @@ ENTRY(btext)
movl %eax,(%esi) /* which is where temp maps! */
/* initialize kernel pde's */
movl $(NKPDE),%ecx /* for this many PDEs */
movl $(NKPT),%ecx /* for this many PDEs */
lea (KPTDI*PDESIZE)(%esi),%ebx /* offset of pde for kernel */
fillkpt
@ -468,7 +457,7 @@ reloc_gdt:
/*
* Skip over the page tables and the kernel stack
*/
lea ((1+UPAGES+1+NKPDE)*NBPG)(%esi),%esi
lea ((1+UPAGES+1+NKPT)*NBPG)(%esi),%esi
pushl %esi /* value of first for init386(first) */
call _init386 /* wire 386 chip for unix operation */
@ -561,3 +550,4 @@ NON_GPROF_ENTRY(sigcode)
.globl _szsigcode
_szsigcode:
.long _szsigcode-_sigcode

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.10 1993/11/13 02:25:00 davidg Exp $
* $Id: locore.s,v 1.11 1993/11/14 02:27:22 rgrimes Exp $
*/
/*
@ -132,7 +132,7 @@ _bde_exists: .long 0
#endif
.globl tmpstk
.space 512
.space 0x1000
tmpstk:
@ -208,7 +208,7 @@ ENTRY(btext)
* Virtual address space of kernel:
*
* text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
* pages: 1 UPAGES (2) 1 NKPDE (7)
* pages: 1 UPAGES (2) 1 NKPT (7)
*/
/* find end of kernel image */
@ -227,29 +227,18 @@ ENTRY(btext)
stosb
/*
* If we are loaded at 0x0 check to see if we have space for the
* page dir/tables and stack area after the kernel and before the 640K
* ISA memory hole. If we do not have space relocate the page directory,
* UPAGES, proc 0 stack, and page table pages to start at 1MB. The value
* that ends up in esi, which points to the kernel page directory, is
* used by the rest of locore to build the tables.
* esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPDE(number of kernel
* The value in esi is both the end of the kernel bss and a pointer to
* the kernel page directory, and is used by the rest of locore to build
* the tables.
* esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPT(number of kernel
* page table pages) is then passed on the stack to init386(first) as
* the value first. esi should ALWAYS be page aligned!!
*/
movl %esi,%ecx /* Get current first availiable address */
cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
jge 1f /* yep, don't need to check for room */
addl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* XXX the 4 is for kstack */
/* space for kstack, PTD and PTE's */
cmpl $(640*1024),%ecx /* see if it fits in low memory */
jle 1f /* yep, don't need to relocate it */
movl $0x100000,%esi /* won't fit, so start it at 1MB */
1:
/* clear pagetables, page directory, stack, etc... */
movl %esi,%edi /* base (page directory) */
movl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* amount to clear */
movl $((1+UPAGES+1+NKPT)*NBPG),%ecx /* amount to clear */
xorl %eax,%eax /* specify zero fill */
cld
rep
@ -309,7 +298,7 @@ ENTRY(btext)
/* now initialize the page dir, upages, p0stack PT, and page tables */
movl $(1+UPAGES+1+NKPDE),%ecx /* number of PTEs */
movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
movl %esi,%eax /* phys address of PTD */
andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
@ -348,7 +337,7 @@ ENTRY(btext)
movl %eax,(%esi) /* which is where temp maps! */
/* initialize kernel pde's */
movl $(NKPDE),%ecx /* for this many PDEs */
movl $(NKPT),%ecx /* for this many PDEs */
lea (KPTDI*PDESIZE)(%esi),%ebx /* offset of pde for kernel */
fillkpt
@ -468,7 +457,7 @@ reloc_gdt:
/*
* Skip over the page tables and the kernel stack
*/
lea ((1+UPAGES+1+NKPDE)*NBPG)(%esi),%esi
lea ((1+UPAGES+1+NKPT)*NBPG)(%esi),%esi
pushl %esi /* value of first for init386(first) */
call _init386 /* wire 386 chip for unix operation */
@ -561,3 +550,4 @@ NON_GPROF_ENTRY(sigcode)
.globl _szsigcode
_szsigcode:
.long _szsigcode-_sigcode

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.23 1993/12/22 13:12:04 davidg Exp $
* $Id: machdep.c,v 1.24 1994/01/03 07:55:21 davidg Exp $
*/
#include "npx.h"
@ -71,14 +71,7 @@
#include "sys/exec.h"
#include "sys/vnode.h"
#ifndef MACHINE_NONCONTIG
extern vm_offset_t avail_end;
#else
extern vm_offset_t avail_start, avail_end;
static vm_offset_t hole_start, hole_end;
static vm_offset_t avail_next;
static unsigned int avail_remaining;
#endif /* MACHINE_NONCONTIG */
#include "machine/cpu.h"
#include "machine/reg.h"
@ -130,6 +123,8 @@ extern int forcemaxmem;
#endif
int biosmem;
vm_offset_t phys_avail[6];
extern cyloffset;
int cpu_class;
@ -156,14 +151,9 @@ cpu_startup()
/* avail_end was pre-decremented in pmap_bootstrap to compensate */
for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
#ifndef MACHINE_NONCONTIG
pmap_enter(pmap_kernel(), (vm_offset_t)msgbufp,
avail_end + i * NBPG,
VM_PROT_ALL, TRUE);
#else
pmap_enter(pmap_kernel(), (vm_offset_t)msgbufp + i * NBPG,
avail_end + i * NBPG, VM_PROT_ALL, TRUE);
#endif
msgbufmapped = 1;
/*
@ -280,7 +270,7 @@ cpu_startup()
mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
M_MBUF, M_NOWAIT);
bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
mb_map = kmem_suballoc(kernel_map, (vm_offset_t)&mbutl, &maxaddr,
mb_map = kmem_suballoc(kmem_map, (vm_offset_t)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
/*
* Initialize callouts
@ -556,7 +546,7 @@ boot(arghowto)
extern int cold;
int nomsg = 1;
if(cold) {
if (cold) {
printf("hit reset please");
for(;;);
}
@ -1068,13 +1058,13 @@ init386(first)
/*
* 15 Aug 92 Terry Lambert The real fix for the CMOS bug
*/
if( biosbasemem != EXPECT_BASEMEM) {
printf( "Warning: Base memory %dK, assuming %dK\n", biosbasemem, EXPECT_BASEMEM);
if (biosbasemem != EXPECT_BASEMEM) {
printf("Warning: Base memory %dK, assuming %dK\n", biosbasemem, EXPECT_BASEMEM);
biosbasemem = EXPECT_BASEMEM; /* assume base*/
}
if( biosextmem > 65536) {
printf( "Warning: Extended memory %dK(>64M), assuming 0K\n", biosextmem);
if (biosextmem > 65536) {
printf("Warning: Extended memory %dK(>64M), assuming 0K\n", biosextmem);
biosextmem = 0; /* assume none*/
}
@ -1093,34 +1083,36 @@ init386(first)
Maxmem = 640/4;
else {
Maxmem = pagesinext + 0x100000/NBPG;
if (first < 0x100000)
first = 0x100000; /* skip hole */
}
/* This used to explode, since Maxmem used to be 0 for bas CMOS*/
#ifdef MAXMEM
if (MAXMEM/4 < Maxmem)
Maxmem = MAXMEM/4;
#endif
maxmem = Maxmem - 1; /* highest page of usable memory */
physmem = maxmem; /* number of pages of physmem addr space */
/*printf("using first 0x%x to 0x%x\n ", first, maxmem*NBPG);*/
if (maxmem < 2048/4) {
printf("Too little RAM memory. Warning, running in degraded mode.\n");
#ifdef INFORM_WAIT
/*
* People with less than 2 Meg have to hit return; this way
* we see the messages and can tell them why they blow up later.
* If they get working well enough to recompile, they can unset
* the flag; otherwise, it's a toy and they have to lump it.
*/
cngetc();
#endif /* !INFORM_WAIT*/
panic("Too little RAM memory.\n");
/* NOT REACHED */
}
/* call pmap initialization to make new kernel address space */
#ifndef MACHINCE_NONCONTIG
pmap_bootstrap (first, 0);
#else
pmap_bootstrap ((vm_offset_t)atdevbase + IOM_SIZE);
#endif /* MACHINE_NONCONTIG */
/*
* Initialize pointers to the two chunks of memory; for use
* later in vm_page_startup.
*/
/* avail_start and avail_end are initialized in pmap_bootstrap */
phys_avail[0] = 0x1000; /* memory up to the ISA hole */
phys_avail[1] = 0xa0000;
phys_avail[2] = avail_start; /* memory up to the end */
phys_avail[3] = avail_end;
phys_avail[4] = 0; /* no more chunks */
phys_avail[5] = 0;
/* now running on new page tables, configured,and u/iom is accessible */
/* make a initial tss so microp can get interrupt stack on syscall! */
@ -1139,7 +1131,7 @@ init386(first)
x = (int) &IDTVEC(syscall);
gdp->gd_looffset = x++;
gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
gdp->gd_stkcpy = 1; /* Leaves room for eflags like a trap */
gdp->gd_stkcpy = 1;
gdp->gd_type = SDT_SYS386CGT;
gdp->gd_dpl = SEL_UPL;
gdp->gd_p = 1;
@ -1170,9 +1162,7 @@ clearseg(n)
*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
load_cr3(rcr3());
bzero(CADDR2,NBPG);
#ifndef MACHINE_NONCONTIG
*(int *) CADDR2 = 0;
#endif /* MACHINE_NONCONTIG */
}
/*

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id$
* $Id: support.s,v 1.1 1993/11/13 02:25:05 davidg Exp $
*/
#include "assym.s" /* system definitions */
@ -385,8 +385,7 @@ ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
movl %edi,%eax
addl %ebx,%eax
jc copyout_fault
#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
cmpl $VM_END_USER_ADDRESS,%eax
cmpl $VM_MAXUSER_ADDRESS,%eax
ja copyout_fault
#ifndef USE_486_WRITE_PROTECT
@ -708,7 +707,7 @@ ENTRY(copyoutstr)
* XXX - however, it would be faster to rewrite this function to use
* strlen() and copyout().
*/
cmpl $VM_END_USER_ADDRESS,%edi
cmpl $VM_MAXUSER_ADDRESS,%edi
jae cpystrflt
lodsb
gs
@ -742,7 +741,7 @@ ENTRY(copyoutstr)
* we look at a page at a time and the end address is on a page
* boundary.
*/
cmpl $VM_END_USER_ADDRESS,%edi
cmpl $VM_MAXUSER_ADDRESS,%edi
jae cpystrflt
movl %edi,%eax
shrl $IDXSHIFT,%eax

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id$
* $Id: support.s,v 1.1 1993/11/13 02:25:05 davidg Exp $
*/
#include "assym.s" /* system definitions */
@ -385,8 +385,7 @@ ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
movl %edi,%eax
addl %ebx,%eax
jc copyout_fault
#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
cmpl $VM_END_USER_ADDRESS,%eax
cmpl $VM_MAXUSER_ADDRESS,%eax
ja copyout_fault
#ifndef USE_486_WRITE_PROTECT
@ -708,7 +707,7 @@ ENTRY(copyoutstr)
* XXX - however, it would be faster to rewrite this function to use
* strlen() and copyout().
*/
cmpl $VM_END_USER_ADDRESS,%edi
cmpl $VM_MAXUSER_ADDRESS,%edi
jae cpystrflt
lodsb
gs
@ -742,7 +741,7 @@ ENTRY(copyoutstr)
* we look at a page at a time and the end address is on a page
* boundary.
*/
cmpl $VM_END_USER_ADDRESS,%edi
cmpl $VM_MAXUSER_ADDRESS,%edi
jae cpystrflt
movl %edi,%eax
shrl $IDXSHIFT,%eax

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id$
* $Id: swtch.s,v 1.1 1993/11/13 02:25:06 davidg Exp $
*/
#include "npx.h" /* for NNPX */
@ -137,6 +137,9 @@ sw0: .asciz "swtch"
*/
ALIGN_TEXT
Idle:
movl _IdlePTD,%ecx
movl %ecx,%cr3
movl $tmpstk-4,%esp
sti
SHOW_STI

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.12 1993/12/19 00:50:09 wollman Exp $
* $Id: trap.c,v 1.13 1994/01/03 07:55:24 davidg Exp $
*/
/*
@ -60,6 +60,7 @@
#include "vm/pmap.h"
#include "vm/vm_map.h"
#include "vm/vm_user.h"
#include "vm/vm_page.h"
#include "sys/vmmeter.h"
#include "machine/trap.h"
@ -152,7 +153,8 @@ trap(frame)
/*pg("trap type %d code = %x eip = %x cs = %x eva = %x esp %x",
frame.tf_trapno, frame.tf_err, frame.tf_eip,
frame.tf_cs, rcr2(), frame.tf_esp);*/
if(curpcb == 0 || curproc == 0) goto we_re_toast;
if (curpcb == 0 || curproc == 0)
goto skiptoswitch;
if (curpcb->pcb_onfault && frame.tf_trapno != T_PAGEFLT) {
extern int _udatasel;
@ -181,9 +183,14 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
p->p_regs = (int *)&frame;
}
skiptoswitch:
ucode=0;
eva = rcr2();
code = frame.tf_err;
if ((type & ~T_USER) == T_PAGEFLT)
goto pfault;
switch (type) {
default:
@ -280,17 +287,18 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
if (code & PGEX_P) goto we_re_toast;
#endif
pfault:
/* fall into */
case T_PAGEFLT|T_USER: /* page fault */
{
register vm_offset_t va;
register struct vmspace *vm = p->p_vmspace;
register struct vmspace *vm;
register vm_map_t map;
int rv = 0;
int rv=0;
vm_prot_t ftype;
extern vm_map_t kernel_map;
unsigned nss;
char *v;
unsigned nss,v;
int oldflags;
va = trunc_page((vm_offset_t)eva);
/*
@ -301,10 +309,15 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
* The last can occur during an exec() copyin where the
* argument space is lazy-allocated.
*/
if (type == T_PAGEFLT && va >= KERNBASE)
if ((p == 0) || (type == T_PAGEFLT && va >= KERNBASE)) {
vm = 0;
map = kernel_map;
else
} else {
vm = p->p_vmspace;
map = &vm->vm_map;
}
if (code & PGEX_W)
ftype = VM_PROT_READ | VM_PROT_WRITE;
else
@ -317,16 +330,27 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
}
#endif
/*
* keep swapout from messing with us during this
* critical time.
*/
oldflags = p->p_flag;
if (map != kernel_map) {
p->p_flag |= SLOCK;
}
/*
* XXX: rude hack to make stack limits "work"
*/
nss = 0;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK
&& map != kernel_map) {
if (map != kernel_map && (caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
caddr_t v;
nss = roundup(USRSTACK - (unsigned)va, PAGE_SIZE);
if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) {
rv = KERN_FAILURE;
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
goto nogo;
}
@ -341,7 +365,7 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
* Grow the VM by the amount requested rounded up to
* the nearest DFLSSIZ to provide for some hysteresis.
*/
grow_amount = roundup(nss, DFLSSIZ);
grow_amount = roundup((nss - (vm->vm_ssize << PGSHIFT)), DFLSSIZ);
v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT,
DFLSSIZ) - grow_amount;
/*
@ -355,42 +379,57 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
grow_amount, FALSE) !=
KERN_SUCCESS) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
goto nogo;
}
}
}
/* check if page table is mapped, if not, fault it first */
if (!pde_v(va)) {
v = (char *)trunc_page(vtopte(va));
rv = vm_fault(map, (vm_offset_t)v, ftype, FALSE);
if (rv != KERN_SUCCESS) goto nogo;
/* check if page table fault, increment wiring */
vm_map_pageable(map, (vm_offset_t)v,
round_page(v+1), FALSE);
} else v=0;
rv = vm_fault(map, va, ftype, FALSE);
#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
{
vm_offset_t v = trunc_page(vtopte(va));
if (map != kernel_map) {
vm_offset_t pa;
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
/* Get the physical address: */
pa = pmap_extract(vm_map_pmap(map), v);
/* And wire the page at system vm level: */
vm_page_wire(PHYS_TO_VM_PAGE(pa));
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
/* Unwire the pte page */
vm_page_unwire(PHYS_TO_VM_PAGE(pa));
} else {
rv = vm_fault(map, va, ftype, FALSE);
}
}
if (map != kernel_map) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
}
if (rv == KERN_SUCCESS) {
/*
* XXX: continuation of rude stack hack
*/
nss = nss >> PGSHIFT;
if (nss > vm->vm_ssize)
if (vm && nss > vm->vm_ssize) {
vm->vm_ssize = nss;
}
/*
* va could be a page table address, if the fault
* occurred from within copyout. In that case,
* we have to wire it. (EWS 12/11/93)
*/
if (ispt(va))
vm_map_pageable(map, va, round_page(va+1), FALSE);
va = trunc_page(vtopte(va));
/*
* for page table, increment wiring
* as long as not a page table fault as well
*/
if (!v && type != T_PAGEFLT)
vm_map_pageable(map, va, round_page(va+1), FALSE);
if (type == T_PAGEFLT)
return;
goto out;
@ -439,7 +478,7 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
return;
#endif
/* machine/parity/power fail/"kitchen sink" faults */
if(isa_nmi(code) == 0) return;
if (isa_nmi(code) == 0) return;
else goto we_re_toast;
#endif
}
@ -501,7 +540,8 @@ int trapwrite(addr)
struct proc *p;
vm_offset_t va;
struct vmspace *vm;
char *v;
int oldflags;
int rv;
va = trunc_page((vm_offset_t)addr);
/*
@ -515,14 +555,22 @@ int trapwrite(addr)
nss = 0;
p = curproc;
vm = p->p_vmspace;
oldflags = p->p_flag;
p->p_flag |= SLOCK;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
nss = roundup(USRSTACK - (unsigned)va, PAGE_SIZE);
if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
nss = roundup(((unsigned)USRSTACK - (unsigned)va), PAGE_SIZE);
if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
return (1);
}
if (vm->vm_ssize && roundup(vm->vm_ssize << PGSHIFT,
DFLSSIZ) < nss) {
caddr_t v;
int grow_amount;
/*
* If necessary, grow the VM that the stack occupies
@ -532,7 +580,7 @@ int trapwrite(addr)
* Grow the VM by the amount requested rounded up to
* the nearest DFLSSIZ to provide for some hysteresis.
*/
grow_amount = roundup(nss, DFLSSIZ);
grow_amount = roundup((nss - (vm->vm_ssize << PGSHIFT)), DFLSSIZ);
v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT, DFLSSIZ) -
grow_amount;
/*
@ -546,22 +594,38 @@ int trapwrite(addr)
if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
grow_amount, FALSE)
!= KERN_SUCCESS) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
return(1);
}
printf("new stack growth: %lx, %d\n", v, grow_amount);
}
}
if (vm_fault(&vm->vm_map, va, VM_PROT_READ | VM_PROT_WRITE, FALSE)
!= KERN_SUCCESS)
return (1);
{
vm_offset_t v;
v = trunc_page(vtopte(va));
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
}
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}
}
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
if (rv != KERN_SUCCESS)
return 1;
/*
* XXX: continuation of rude stack hack
*/
nss = nss >> PGSHIFT;
if (nss > vm->vm_ssize)
nss >>= PGSHIFT;
if (nss > vm->vm_ssize) {
vm->vm_ssize = nss;
}
return (0);
}

View File

@ -37,7 +37,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.7 1993/11/25 01:31:02 wollman Exp $
* $Id: vm_machdep.c,v 1.8 1993/12/19 00:50:10 wollman Exp $
*/
#include "npx.h"
@ -92,10 +92,11 @@ cpu_fork(p1, p2)
* Wire top of address space of child to it's kstack.
* First, fault in a page of pte's to map it.
*/
#if 0
addr = trunc_page((u_int)vtopte(kstack));
vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE);
for (i=0; i < UPAGES; i++)
pmap_enter(&p2->p_vmspace->vm_pmap, (vm_offset_t)kstack+i*NBPG,
pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG,
pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG),
/*
* The user area has to be mapped writable because
@ -105,6 +106,7 @@ cpu_fork(p1, p2)
* by the segment limits.
*/
VM_PROT_READ|VM_PROT_WRITE, TRUE);
#endif
pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
/*
@ -169,6 +171,7 @@ cpu_exit(p)
npxexit(p);
#endif /* NNPX */
splclock();
curproc = 0;
swtch();
/*
* This is to shutup the compiler, and if swtch() failed I suppose
@ -179,13 +182,15 @@ cpu_exit(p)
}
void
cpu_wait(p)
struct proc *p;
{
cpu_wait(p) struct proc *p; {
/* extern vm_map_t upages_map; */
extern char kstack[];
/* drop per-process resources */
vmspace_free(p->p_vmspace);
pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
((vm_offset_t) p->p_addr) + ctob(UPAGES));
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
vmspace_free(p->p_vmspace);
}
#endif
@ -237,15 +242,14 @@ pagemove(from, to, size)
* Convert kernel VA to physical address
*/
u_long
kvtop(addr)
register void *addr;
kvtop(void *addr)
{
vm_offset_t va;
va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
if (va == 0)
panic("kvtop: zero page frame");
return((u_long)va);
return((int)va);
}
#ifdef notdef
@ -287,6 +291,7 @@ probew(addr)
* NB: assumes a physically contiguous kernel page table
* (makes life a LOT simpler).
*/
int
kernacc(addr, count, rw)
register u_int addr;
int count, rw;
@ -316,6 +321,7 @@ kernacc(addr, count, rw)
return(1);
}
int
useracc(addr, count, rw)
register u_int addr;
int count, rw;
@ -413,7 +419,7 @@ vunmapbuf(bp)
/*
* Force reset the processor by invalidating the entire address space!
*/
void /* XXX should be __dead too */
void
cpu_reset() {
/* force a shutdown by unmapping entire address space ! */
@ -422,5 +428,5 @@ cpu_reset() {
/* "good night, sweet prince .... <THUNK!>" */
tlbflush();
/* NOTREACHED */
while(1); /* to fool compiler... */
while(1);
}

View File

@ -42,12 +42,13 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.6 1993/11/13 02:25:16 davidg Exp $
* $Id: pmap.h,v 1.7 1993/12/19 00:50:18 wollman Exp $
*/
#ifndef _PMAP_MACHINE_
#define _PMAP_MACHINE_ 1
#include "vm/vm_prot.h"
/*
* 386 page table entry and page table directory
* W.Jolitz, 8/89
@ -121,8 +122,19 @@ typedef struct pte pt_entry_t; /* Mach page table entry */
* NKPDE controls the virtual space of the kernel, what ever is left, minus
* the alternate page table area is given to the user (NUPDE)
*/
#define NKPDE 7 /* number of kernel pde's */
#define NUPDE (NPTEPG-NKPDE-1)/* number of user pde's */
/*
* NKPDE controls the virtual space of the kernel, what ever is left is
* given to the user (NUPDE)
*/
#ifndef NKPT
#define NKPT 15 /* actual number of kernel pte's */
#endif
#ifndef NKPDE
#define NKPDE 63 /* addressable number of kpte's */
#endif
#define NUPDE (NPTEPG-NKPDE) /* number of user pde's */
/*
* The *PTDI values control the layout of virtual memory
*
@ -215,7 +227,7 @@ typedef struct pv_entry {
struct pv_entry *pv_next; /* next pv_entry */
pmap_t pv_pmap; /* pmap where mapping lies */
vm_offset_t pv_va; /* virtual address for mapping */
int pv_flags; /* flags */
int pv_wire; /* wire count */
} *pv_entry_t;
#define PV_ENTRY_NULL ((pv_entry_t) 0)

View File

@ -1,6 +1,8 @@
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 1994 John S. Dyson
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
@ -34,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91
* $Id: vmparam.h,v 1.9 1993/12/19 00:50:19 wollman Exp $
* $Id: vmparam.h,v 1.10 1994/01/03 16:00:52 davidg Exp $
*/
@ -57,7 +59,7 @@
* kernal address space.
*/
#define USRTEXT 0UL
#define USRSTACK 0xFDBFE000UL
/* #define USRSTACK 0xFDBFE000UL */
#define BTOPUSRSTACK (0xFDC00-(UPAGES)) /* btop(USRSTACK) */
#define LOWPAGES 0UL
#define HIGHPAGES UPAGES
@ -104,7 +106,7 @@
/*
* Size of User Raw I/O map
*/
#define USRIOSIZE 300
#define USRIOSIZE 1024
/*
* The size of the clock loop.
@ -210,16 +212,23 @@
*/
/* user/kernel map constants */
#define KERNBASE (0-(NKPDE+1)*(NBPG*NPTEPG))
#define KERNSIZE (NKPDE*NBPG*NPTEPG)
#define VM_MIN_ADDRESS ((vm_offset_t)0)
#define VM_MAXUSER_ADDRESS ((vm_offset_t)0xFDBFE000UL)
#define UPT_MIN_ADDRESS ((vm_offset_t)0xFDC00000UL)
#define UPT_MAX_ADDRESS ((vm_offset_t)0xFDFF7000UL)
#define VM_MAXUSER_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*(NPTEPG+UPAGES)))
#define USRSTACK VM_MAXUSER_ADDRESS
#define UPT_MIN_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*NPTEPG))
#define UPT_MAX_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*(NKPDE+2)))
#define VM_MAX_ADDRESS UPT_MAX_ADDRESS
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)0xFDFF7000UL)
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*(NKPDE+2)))
#define UPDT VM_MIN_KERNEL_ADDRESS
#define KPT_MIN_ADDRESS ((vm_offset_t)0xFDFF8000UL)
#define KPT_MAX_ADDRESS ((vm_offset_t)0xFDFFF000UL)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)0xFF7FF000UL)
#define KPT_MIN_ADDRESS ((vm_offset_t)(KERNBASE) - (NBPG*(NKPDE+1)))
#define KPT_MAX_ADDRESS ((vm_offset_t)(KERNBASE) - NBPG)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)ALT_MIN_ADDRESS - NBPG)
#define ALT_MIN_ADDRESS ((vm_offset_t)((APTDPTDI) << 22))
#define HIGHPAGES UPAGES
/* virtual sizes (bytes) for various kernel submaps */
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)

View File

@ -1,6 +1,6 @@
# Copyright 1990 W. Jolitz
# from: @(#)Makefile.i386 7.1 5/10/91
# $Id: Makefile.i386,v 1.17 1993/12/12 12:22:28 davidg Exp $
# $Id: Makefile.i386,v 1.18 1993/12/19 00:49:53 wollman Exp $
#
# Makefile for FreeBSD
#
@ -43,7 +43,7 @@ INCLUDES= -I. -I$S -I$S/sys
COPTS= ${INCLUDES} ${IDENT} -DKERNEL -Di386 -DNPX
ASFLAGS=
CFLAGS= ${COPTFLAGS} ${CWARNFLAGS} ${DEBUG} ${COPTS}
LOAD_ADDRESS?= FE000000
LOAD_ADDRESS?= F0100000
NORMAL_C= ${CC} -c ${CFLAGS} ${PROF} $<
NORMAL_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<

View File

@ -1,6 +1,6 @@
# Copyright 1990 W. Jolitz
# from: @(#)Makefile.i386 7.1 5/10/91
# $Id: Makefile.i386,v 1.17 1993/12/12 12:22:28 davidg Exp $
# $Id: Makefile.i386,v 1.18 1993/12/19 00:49:53 wollman Exp $
#
# Makefile for FreeBSD
#
@ -43,7 +43,7 @@ INCLUDES= -I. -I$S -I$S/sys
COPTS= ${INCLUDES} ${IDENT} -DKERNEL -Di386 -DNPX
ASFLAGS=
CFLAGS= ${COPTFLAGS} ${CWARNFLAGS} ${DEBUG} ${COPTS}
LOAD_ADDRESS?= FE000000
LOAD_ADDRESS?= F0100000
NORMAL_C= ${CC} -c ${CFLAGS} ${PROF} $<
NORMAL_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<

View File

@ -23,7 +23,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: db_aout.c,v 1.4 1993/11/25 01:30:02 wollman Exp $
* $Id: db_aout.c,v 1.5 1994/01/03 07:54:08 davidg Exp $
*/
/*
@ -68,7 +68,7 @@
ep = (struct nlist *)((char *)sp + *(symtab)))
#ifndef SYMTAB_SPACE
#define SYMTAB_SPACE 63000
#define SYMTAB_SPACE 73000
#endif /*SYMTAB_SPACE*/
int db_symtabsize = SYMTAB_SPACE;

View File

@ -35,9 +35,9 @@
*
* from: @(#)pccons.c 5.11 (Berkeley) 5/21/91
* from: @(#)syscons.c 1.1 931021
* $Id: syscons.c,v 1.23 1993/12/21 03:27:26 rich Exp $
* $Id: syscons.c,v 1.24 1994/01/03 07:55:47 davidg Exp $
*
* Heavily modified by Søren Schmidt (sos@login.dkuug.dk) to provide:
* Heavily modified by Sxren Schmidt (sos@login.dkuug.dk) to provide:
*
* virtual consoles, SYSV ioctl's, ANSI emulation ....
*/
@ -91,9 +91,9 @@
/* virtual video memory addresses */
#if !defined(NetBSD)
#define MONO_BUF 0xFE0B0000
#define CGA_BUF 0xFE0B8000
#define VGA_BUF 0xFE0A0000
#define MONO_BUF (KERNBASE+0xB0000)
#define CGA_BUF (KERNBASE+0xB8000)
#define VGA_BUF (KERNBASE+0xA0000)
#endif
#define VIDEOMEM 0x000A0000

View File

@ -1,6 +1,6 @@
# Copyright 1990 W. Jolitz
# from: @(#)Makefile.i386 7.1 5/10/91
# $Id: Makefile.i386,v 1.17 1993/12/12 12:22:28 davidg Exp $
# $Id: Makefile.i386,v 1.18 1993/12/19 00:49:53 wollman Exp $
#
# Makefile for FreeBSD
#
@ -43,7 +43,7 @@ INCLUDES= -I. -I$S -I$S/sys
COPTS= ${INCLUDES} ${IDENT} -DKERNEL -Di386 -DNPX
ASFLAGS=
CFLAGS= ${COPTFLAGS} ${CWARNFLAGS} ${DEBUG} ${COPTS}
LOAD_ADDRESS?= FE000000
LOAD_ADDRESS?= F0100000
NORMAL_C= ${CC} -c ${CFLAGS} ${PROF} $<
NORMAL_C_C= ${CC} -c ${CFLAGS} ${PROF} ${PARAM} $<

View File

@ -23,10 +23,15 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: db_trace.c,v 1.3 1993/12/19 00:50:01 wollman Exp $
* $Id: db_trace.c,v 1.4 1994/01/03 07:55:19 davidg Exp $
*/
#include "param.h"
#include <vm/vm_param.h>
#include <vm/lock.h>
#include <vm/vm_statistics.h>
#include <machine/pmap.h>
#include "systm.h"
#include "proc.h"
#include "ddb/ddb.h"

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
* $Id: genassym.c,v 1.5 1993/10/15 10:34:17 rgrimes Exp $
* $Id: genassym.c,v 1.6 1993/11/13 02:24:59 davidg Exp $
*/
#include "sys/param.h"
@ -99,6 +99,7 @@ main()
printf("#define\tPDESIZE %d\n", PDESIZE);
printf("#define\tPTESIZE %d\n", PTESIZE);
printf("#define\tNKPDE %d\n", NKPDE);
printf("#define\tNKPT %d\n", NKPT);
printf("#define\tKPTDI 0x%x\n", KPTDI);
printf("#define\tKSTKPTDI 0x%x\n", KSTKPTDI);
printf("#define\tKSTKPTEOFF 0x%x\n", KSTKPTEOFF);
@ -113,6 +114,7 @@ main()
printf("#define\tSHMMAXPGS %d\n", SHMMAXPGS);
#endif
printf("#define\tUSRSTACK 0x%x\n", USRSTACK);
printf("#define\tVM_MAXUSER_ADDRESS 0x%x\n", VM_MAXUSER_ADDRESS);
printf("#define\tKERNBASE 0x%x\n", KERNBASE);
printf("#define\tMSGBUFPTECNT %d\n", btoc(sizeof (struct msgbuf)));
printf("#define\tNMBCLUSTERS %d\n", NMBCLUSTERS);
@ -189,3 +191,4 @@ main()
printf("#define\tENAMETOOLONG %d\n", ENAMETOOLONG);
exit(0);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.10 1993/11/13 02:25:00 davidg Exp $
* $Id: locore.s,v 1.11 1993/11/14 02:27:22 rgrimes Exp $
*/
/*
@ -132,7 +132,7 @@ _bde_exists: .long 0
#endif
.globl tmpstk
.space 512
.space 0x1000
tmpstk:
@ -208,7 +208,7 @@ ENTRY(btext)
* Virtual address space of kernel:
*
* text | data | bss | [syms] | page dir | proc0 kernel stack | usr stk map | Sysmap
* pages: 1 UPAGES (2) 1 NKPDE (7)
* pages: 1 UPAGES (2) 1 NKPT (7)
*/
/* find end of kernel image */
@ -227,29 +227,18 @@ ENTRY(btext)
stosb
/*
* If we are loaded at 0x0 check to see if we have space for the
* page dir/tables and stack area after the kernel and before the 640K
* ISA memory hole. If we do not have space relocate the page directory,
* UPAGES, proc 0 stack, and page table pages to start at 1MB. The value
* that ends up in esi, which points to the kernel page directory, is
* used by the rest of locore to build the tables.
* esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPDE(number of kernel
* The value in esi is both the end of the kernel bss and a pointer to
* the kernel page directory, and is used by the rest of locore to build
* the tables.
* esi + 1(page dir) + 2(UPAGES) + 1(p0stack) + NKPT(number of kernel
* page table pages) is then passed on the stack to init386(first) as
* the value first. esi should ALWAYS be page aligned!!
*/
movl %esi,%ecx /* Get current first availiable address */
cmpl $0x100000,%ecx /* Lets see if we are already above 1MB */
jge 1f /* yep, don't need to check for room */
addl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* XXX the 4 is for kstack */
/* space for kstack, PTD and PTE's */
cmpl $(640*1024),%ecx /* see if it fits in low memory */
jle 1f /* yep, don't need to relocate it */
movl $0x100000,%esi /* won't fit, so start it at 1MB */
1:
/* clear pagetables, page directory, stack, etc... */
movl %esi,%edi /* base (page directory) */
movl $((1+UPAGES+1+NKPDE)*NBPG),%ecx /* amount to clear */
movl $((1+UPAGES+1+NKPT)*NBPG),%ecx /* amount to clear */
xorl %eax,%eax /* specify zero fill */
cld
rep
@ -309,7 +298,7 @@ ENTRY(btext)
/* now initialize the page dir, upages, p0stack PT, and page tables */
movl $(1+UPAGES+1+NKPDE),%ecx /* number of PTEs */
movl $(1+UPAGES+1+NKPT),%ecx /* number of PTEs */
movl %esi,%eax /* phys address of PTD */
andl $PG_FRAME,%eax /* convert to PFN, should be a NOP */
orl $PG_V|PG_KW,%eax /* valid, kernel read/write */
@ -348,7 +337,7 @@ ENTRY(btext)
movl %eax,(%esi) /* which is where temp maps! */
/* initialize kernel pde's */
movl $(NKPDE),%ecx /* for this many PDEs */
movl $(NKPT),%ecx /* for this many PDEs */
lea (KPTDI*PDESIZE)(%esi),%ebx /* offset of pde for kernel */
fillkpt
@ -468,7 +457,7 @@ reloc_gdt:
/*
* Skip over the page tables and the kernel stack
*/
lea ((1+UPAGES+1+NKPDE)*NBPG)(%esi),%esi
lea ((1+UPAGES+1+NKPT)*NBPG)(%esi),%esi
pushl %esi /* value of first for init386(first) */
call _init386 /* wire 386 chip for unix operation */
@ -561,3 +550,4 @@ NON_GPROF_ENTRY(sigcode)
.globl _szsigcode
_szsigcode:
.long _szsigcode-_sigcode

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.23 1993/12/22 13:12:04 davidg Exp $
* $Id: machdep.c,v 1.24 1994/01/03 07:55:21 davidg Exp $
*/
#include "npx.h"
@ -71,14 +71,7 @@
#include "sys/exec.h"
#include "sys/vnode.h"
#ifndef MACHINE_NONCONTIG
extern vm_offset_t avail_end;
#else
extern vm_offset_t avail_start, avail_end;
static vm_offset_t hole_start, hole_end;
static vm_offset_t avail_next;
static unsigned int avail_remaining;
#endif /* MACHINE_NONCONTIG */
#include "machine/cpu.h"
#include "machine/reg.h"
@ -130,6 +123,8 @@ extern int forcemaxmem;
#endif
int biosmem;
vm_offset_t phys_avail[6];
extern cyloffset;
int cpu_class;
@ -156,14 +151,9 @@ cpu_startup()
/* avail_end was pre-decremented in pmap_bootstrap to compensate */
for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
#ifndef MACHINE_NONCONTIG
pmap_enter(pmap_kernel(), (vm_offset_t)msgbufp,
avail_end + i * NBPG,
VM_PROT_ALL, TRUE);
#else
pmap_enter(pmap_kernel(), (vm_offset_t)msgbufp + i * NBPG,
avail_end + i * NBPG, VM_PROT_ALL, TRUE);
#endif
msgbufmapped = 1;
/*
@ -280,7 +270,7 @@ cpu_startup()
mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
M_MBUF, M_NOWAIT);
bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
mb_map = kmem_suballoc(kernel_map, (vm_offset_t)&mbutl, &maxaddr,
mb_map = kmem_suballoc(kmem_map, (vm_offset_t)&mbutl, &maxaddr,
VM_MBUF_SIZE, FALSE);
/*
* Initialize callouts
@ -556,7 +546,7 @@ boot(arghowto)
extern int cold;
int nomsg = 1;
if(cold) {
if (cold) {
printf("hit reset please");
for(;;);
}
@ -1068,13 +1058,13 @@ init386(first)
/*
* 15 Aug 92 Terry Lambert The real fix for the CMOS bug
*/
if( biosbasemem != EXPECT_BASEMEM) {
printf( "Warning: Base memory %dK, assuming %dK\n", biosbasemem, EXPECT_BASEMEM);
if (biosbasemem != EXPECT_BASEMEM) {
printf("Warning: Base memory %dK, assuming %dK\n", biosbasemem, EXPECT_BASEMEM);
biosbasemem = EXPECT_BASEMEM; /* assume base*/
}
if( biosextmem > 65536) {
printf( "Warning: Extended memory %dK(>64M), assuming 0K\n", biosextmem);
if (biosextmem > 65536) {
printf("Warning: Extended memory %dK(>64M), assuming 0K\n", biosextmem);
biosextmem = 0; /* assume none*/
}
@ -1093,34 +1083,36 @@ init386(first)
Maxmem = 640/4;
else {
Maxmem = pagesinext + 0x100000/NBPG;
if (first < 0x100000)
first = 0x100000; /* skip hole */
}
/* This used to explode, since Maxmem used to be 0 for bas CMOS*/
#ifdef MAXMEM
if (MAXMEM/4 < Maxmem)
Maxmem = MAXMEM/4;
#endif
maxmem = Maxmem - 1; /* highest page of usable memory */
physmem = maxmem; /* number of pages of physmem addr space */
/*printf("using first 0x%x to 0x%x\n ", first, maxmem*NBPG);*/
if (maxmem < 2048/4) {
printf("Too little RAM memory. Warning, running in degraded mode.\n");
#ifdef INFORM_WAIT
/*
* People with less than 2 Meg have to hit return; this way
* we see the messages and can tell them why they blow up later.
* If they get working well enough to recompile, they can unset
* the flag; otherwise, it's a toy and they have to lump it.
*/
cngetc();
#endif /* !INFORM_WAIT*/
panic("Too little RAM memory.\n");
/* NOT REACHED */
}
/* call pmap initialization to make new kernel address space */
#ifndef MACHINCE_NONCONTIG
pmap_bootstrap (first, 0);
#else
pmap_bootstrap ((vm_offset_t)atdevbase + IOM_SIZE);
#endif /* MACHINE_NONCONTIG */
/*
* Initialize pointers to the two chunks of memory; for use
* later in vm_page_startup.
*/
/* avail_start and avail_end are initialized in pmap_bootstrap */
phys_avail[0] = 0x1000; /* memory up to the ISA hole */
phys_avail[1] = 0xa0000;
phys_avail[2] = avail_start; /* memory up to the end */
phys_avail[3] = avail_end;
phys_avail[4] = 0; /* no more chunks */
phys_avail[5] = 0;
/* now running on new page tables, configured,and u/iom is accessible */
/* make a initial tss so microp can get interrupt stack on syscall! */
@ -1139,7 +1131,7 @@ init386(first)
x = (int) &IDTVEC(syscall);
gdp->gd_looffset = x++;
gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL);
gdp->gd_stkcpy = 1; /* Leaves room for eflags like a trap */
gdp->gd_stkcpy = 1;
gdp->gd_type = SDT_SYS386CGT;
gdp->gd_dpl = SEL_UPL;
gdp->gd_p = 1;
@ -1170,9 +1162,7 @@ clearseg(n)
*(int *)CMAP2 = PG_V | PG_KW | ctob(n);
load_cr3(rcr3());
bzero(CADDR2,NBPG);
#ifndef MACHINE_NONCONTIG
*(int *) CADDR2 = 0;
#endif /* MACHINE_NONCONTIG */
}
/*

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id$
* $Id: support.s,v 1.1 1993/11/13 02:25:05 davidg Exp $
*/
#include "assym.s" /* system definitions */
@ -385,8 +385,7 @@ ENTRY(copyout) /* copyout(from_kernel, to_user, len) */
movl %edi,%eax
addl %ebx,%eax
jc copyout_fault
#define VM_END_USER_ADDRESS 0xFDBFE000 /* XXX */
cmpl $VM_END_USER_ADDRESS,%eax
cmpl $VM_MAXUSER_ADDRESS,%eax
ja copyout_fault
#ifndef USE_486_WRITE_PROTECT
@ -708,7 +707,7 @@ ENTRY(copyoutstr)
* XXX - however, it would be faster to rewrite this function to use
* strlen() and copyout().
*/
cmpl $VM_END_USER_ADDRESS,%edi
cmpl $VM_MAXUSER_ADDRESS,%edi
jae cpystrflt
lodsb
gs
@ -742,7 +741,7 @@ ENTRY(copyoutstr)
* we look at a page at a time and the end address is on a page
* boundary.
*/
cmpl $VM_END_USER_ADDRESS,%edi
cmpl $VM_MAXUSER_ADDRESS,%edi
jae cpystrflt
movl %edi,%eax
shrl $IDXSHIFT,%eax

View File

@ -33,7 +33,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id$
* $Id: swtch.s,v 1.1 1993/11/13 02:25:06 davidg Exp $
*/
#include "npx.h" /* for NNPX */
@ -137,6 +137,9 @@ sw0: .asciz "swtch"
*/
ALIGN_TEXT
Idle:
movl _IdlePTD,%ecx
movl %ecx,%cr3
movl $tmpstk-4,%esp
sti
SHOW_STI

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.12 1993/12/19 00:50:09 wollman Exp $
* $Id: trap.c,v 1.13 1994/01/03 07:55:24 davidg Exp $
*/
/*
@ -60,6 +60,7 @@
#include "vm/pmap.h"
#include "vm/vm_map.h"
#include "vm/vm_user.h"
#include "vm/vm_page.h"
#include "sys/vmmeter.h"
#include "machine/trap.h"
@ -152,7 +153,8 @@ trap(frame)
/*pg("trap type %d code = %x eip = %x cs = %x eva = %x esp %x",
frame.tf_trapno, frame.tf_err, frame.tf_eip,
frame.tf_cs, rcr2(), frame.tf_esp);*/
if(curpcb == 0 || curproc == 0) goto we_re_toast;
if (curpcb == 0 || curproc == 0)
goto skiptoswitch;
if (curpcb->pcb_onfault && frame.tf_trapno != T_PAGEFLT) {
extern int _udatasel;
@ -181,9 +183,14 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
p->p_regs = (int *)&frame;
}
skiptoswitch:
ucode=0;
eva = rcr2();
code = frame.tf_err;
if ((type & ~T_USER) == T_PAGEFLT)
goto pfault;
switch (type) {
default:
@ -280,17 +287,18 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
if (code & PGEX_P) goto we_re_toast;
#endif
pfault:
/* fall into */
case T_PAGEFLT|T_USER: /* page fault */
{
register vm_offset_t va;
register struct vmspace *vm = p->p_vmspace;
register struct vmspace *vm;
register vm_map_t map;
int rv = 0;
int rv=0;
vm_prot_t ftype;
extern vm_map_t kernel_map;
unsigned nss;
char *v;
unsigned nss,v;
int oldflags;
va = trunc_page((vm_offset_t)eva);
/*
@ -301,10 +309,15 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
* The last can occur during an exec() copyin where the
* argument space is lazy-allocated.
*/
if (type == T_PAGEFLT && va >= KERNBASE)
if ((p == 0) || (type == T_PAGEFLT && va >= KERNBASE)) {
vm = 0;
map = kernel_map;
else
} else {
vm = p->p_vmspace;
map = &vm->vm_map;
}
if (code & PGEX_W)
ftype = VM_PROT_READ | VM_PROT_WRITE;
else
@ -317,16 +330,27 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
}
#endif
/*
* keep swapout from messing with us during this
* critical time.
*/
oldflags = p->p_flag;
if (map != kernel_map) {
p->p_flag |= SLOCK;
}
/*
* XXX: rude hack to make stack limits "work"
*/
nss = 0;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK
&& map != kernel_map) {
if (map != kernel_map && (caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
caddr_t v;
nss = roundup(USRSTACK - (unsigned)va, PAGE_SIZE);
if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) {
rv = KERN_FAILURE;
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
goto nogo;
}
@ -341,7 +365,7 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
* Grow the VM by the amount requested rounded up to
* the nearest DFLSSIZ to provide for some hysteresis.
*/
grow_amount = roundup(nss, DFLSSIZ);
grow_amount = roundup((nss - (vm->vm_ssize << PGSHIFT)), DFLSSIZ);
v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT,
DFLSSIZ) - grow_amount;
/*
@ -355,42 +379,57 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
grow_amount, FALSE) !=
KERN_SUCCESS) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
goto nogo;
}
}
}
/* check if page table is mapped, if not, fault it first */
if (!pde_v(va)) {
v = (char *)trunc_page(vtopte(va));
rv = vm_fault(map, (vm_offset_t)v, ftype, FALSE);
if (rv != KERN_SUCCESS) goto nogo;
/* check if page table fault, increment wiring */
vm_map_pageable(map, (vm_offset_t)v,
round_page(v+1), FALSE);
} else v=0;
rv = vm_fault(map, va, ftype, FALSE);
#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
{
vm_offset_t v = trunc_page(vtopte(va));
if (map != kernel_map) {
vm_offset_t pa;
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
/* Get the physical address: */
pa = pmap_extract(vm_map_pmap(map), v);
/* And wire the page at system vm level: */
vm_page_wire(PHYS_TO_VM_PAGE(pa));
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
/* Unwire the pte page */
vm_page_unwire(PHYS_TO_VM_PAGE(pa));
} else {
rv = vm_fault(map, va, ftype, FALSE);
}
}
if (map != kernel_map) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
}
if (rv == KERN_SUCCESS) {
/*
* XXX: continuation of rude stack hack
*/
nss = nss >> PGSHIFT;
if (nss > vm->vm_ssize)
if (vm && nss > vm->vm_ssize) {
vm->vm_ssize = nss;
}
/*
* va could be a page table address, if the fault
* occurred from within copyout. In that case,
* we have to wire it. (EWS 12/11/93)
*/
if (ispt(va))
vm_map_pageable(map, va, round_page(va+1), FALSE);
va = trunc_page(vtopte(va));
/*
* for page table, increment wiring
* as long as not a page table fault as well
*/
if (!v && type != T_PAGEFLT)
vm_map_pageable(map, va, round_page(va+1), FALSE);
if (type == T_PAGEFLT)
return;
goto out;
@ -439,7 +478,7 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
return;
#endif
/* machine/parity/power fail/"kitchen sink" faults */
if(isa_nmi(code) == 0) return;
if (isa_nmi(code) == 0) return;
else goto we_re_toast;
#endif
}
@ -501,7 +540,8 @@ int trapwrite(addr)
struct proc *p;
vm_offset_t va;
struct vmspace *vm;
char *v;
int oldflags;
int rv;
va = trunc_page((vm_offset_t)addr);
/*
@ -515,14 +555,22 @@ int trapwrite(addr)
nss = 0;
p = curproc;
vm = p->p_vmspace;
oldflags = p->p_flag;
p->p_flag |= SLOCK;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
nss = roundup(USRSTACK - (unsigned)va, PAGE_SIZE);
if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
nss = roundup(((unsigned)USRSTACK - (unsigned)va), PAGE_SIZE);
if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
return (1);
}
if (vm->vm_ssize && roundup(vm->vm_ssize << PGSHIFT,
DFLSSIZ) < nss) {
caddr_t v;
int grow_amount;
/*
* If necessary, grow the VM that the stack occupies
@ -532,7 +580,7 @@ int trapwrite(addr)
* Grow the VM by the amount requested rounded up to
* the nearest DFLSSIZ to provide for some hysteresis.
*/
grow_amount = roundup(nss, DFLSSIZ);
grow_amount = roundup((nss - (vm->vm_ssize << PGSHIFT)), DFLSSIZ);
v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT, DFLSSIZ) -
grow_amount;
/*
@ -546,22 +594,38 @@ int trapwrite(addr)
if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
grow_amount, FALSE)
!= KERN_SUCCESS) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
return(1);
}
printf("new stack growth: %lx, %d\n", v, grow_amount);
}
}
if (vm_fault(&vm->vm_map, va, VM_PROT_READ | VM_PROT_WRITE, FALSE)
!= KERN_SUCCESS)
return (1);
{
vm_offset_t v;
v = trunc_page(vtopte(va));
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
}
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}
}
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
if (rv != KERN_SUCCESS)
return 1;
/*
* XXX: continuation of rude stack hack
*/
nss = nss >> PGSHIFT;
if (nss > vm->vm_ssize)
nss >>= PGSHIFT;
if (nss > vm->vm_ssize) {
vm->vm_ssize = nss;
}
return (0);
}

View File

@ -37,7 +37,7 @@
*
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
* $Id: vm_machdep.c,v 1.7 1993/11/25 01:31:02 wollman Exp $
* $Id: vm_machdep.c,v 1.8 1993/12/19 00:50:10 wollman Exp $
*/
#include "npx.h"
@ -92,10 +92,11 @@ cpu_fork(p1, p2)
* Wire top of address space of child to it's kstack.
* First, fault in a page of pte's to map it.
*/
#if 0
addr = trunc_page((u_int)vtopte(kstack));
vm_map_pageable(&p2->p_vmspace->vm_map, addr, addr+NBPG, FALSE);
for (i=0; i < UPAGES; i++)
pmap_enter(&p2->p_vmspace->vm_pmap, (vm_offset_t)kstack+i*NBPG,
pmap_enter(&p2->p_vmspace->vm_pmap, kstack+i*NBPG,
pmap_extract(kernel_pmap, ((int)p2->p_addr)+i*NBPG),
/*
* The user area has to be mapped writable because
@ -105,6 +106,7 @@ cpu_fork(p1, p2)
* by the segment limits.
*/
VM_PROT_READ|VM_PROT_WRITE, TRUE);
#endif
pmap_activate(&p2->p_vmspace->vm_pmap, &up->u_pcb);
/*
@ -169,6 +171,7 @@ cpu_exit(p)
npxexit(p);
#endif /* NNPX */
splclock();
curproc = 0;
swtch();
/*
* This is to shutup the compiler, and if swtch() failed I suppose
@ -179,13 +182,15 @@ cpu_exit(p)
}
void
cpu_wait(p)
struct proc *p;
{
cpu_wait(p) struct proc *p; {
/* extern vm_map_t upages_map; */
extern char kstack[];
/* drop per-process resources */
vmspace_free(p->p_vmspace);
pmap_remove(vm_map_pmap(kernel_map), (vm_offset_t) p->p_addr,
((vm_offset_t) p->p_addr) + ctob(UPAGES));
kmem_free(kernel_map, (vm_offset_t)p->p_addr, ctob(UPAGES));
vmspace_free(p->p_vmspace);
}
#endif
@ -237,15 +242,14 @@ pagemove(from, to, size)
* Convert kernel VA to physical address
*/
u_long
kvtop(addr)
register void *addr;
kvtop(void *addr)
{
vm_offset_t va;
va = pmap_extract(kernel_pmap, (vm_offset_t)addr);
if (va == 0)
panic("kvtop: zero page frame");
return((u_long)va);
return((int)va);
}
#ifdef notdef
@ -287,6 +291,7 @@ probew(addr)
* NB: assumes a physically contiguous kernel page table
* (makes life a LOT simpler).
*/
int
kernacc(addr, count, rw)
register u_int addr;
int count, rw;
@ -316,6 +321,7 @@ kernacc(addr, count, rw)
return(1);
}
int
useracc(addr, count, rw)
register u_int addr;
int count, rw;
@ -413,7 +419,7 @@ vunmapbuf(bp)
/*
* Force reset the processor by invalidating the entire address space!
*/
void /* XXX should be __dead too */
void
cpu_reset() {
/* force a shutdown by unmapping entire address space ! */
@ -422,5 +428,5 @@ cpu_reset() {
/* "good night, sweet prince .... <THUNK!>" */
tlbflush();
/* NOTREACHED */
while(1); /* to fool compiler... */
while(1);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)param.h 5.8 (Berkeley) 6/28/91
* $Id: param.h,v 1.10 1993/11/18 05:02:05 rgrimes Exp $
* $Id: param.h,v 1.11 1993/12/19 00:50:17 wollman Exp $
*/
#ifndef _MACHINE_PARAM_H_
@ -71,7 +71,6 @@
* defined in pmap.h which is included after this we can't do that
* (YET!)
*/
#define KERNBASE 0xFE000000UL /* start of kernel virtual */
#define BTOPKERNBASE (KERNBASE >> PGSHIFT)
#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */

View File

@ -42,12 +42,13 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.6 1993/11/13 02:25:16 davidg Exp $
* $Id: pmap.h,v 1.7 1993/12/19 00:50:18 wollman Exp $
*/
#ifndef _PMAP_MACHINE_
#define _PMAP_MACHINE_ 1
#include "vm/vm_prot.h"
/*
* 386 page table entry and page table directory
* W.Jolitz, 8/89
@ -121,8 +122,19 @@ typedef struct pte pt_entry_t; /* Mach page table entry */
* NKPDE controls the virtual space of the kernel, what ever is left, minus
* the alternate page table area is given to the user (NUPDE)
*/
#define NKPDE 7 /* number of kernel pde's */
#define NUPDE (NPTEPG-NKPDE-1)/* number of user pde's */
/*
* NKPDE controls the virtual space of the kernel, what ever is left is
* given to the user (NUPDE)
*/
#ifndef NKPT
#define NKPT 15 /* actual number of kernel pte's */
#endif
#ifndef NKPDE
#define NKPDE 63 /* addressable number of kpte's */
#endif
#define NUPDE (NPTEPG-NKPDE) /* number of user pde's */
/*
* The *PTDI values control the layout of virtual memory
*
@ -215,7 +227,7 @@ typedef struct pv_entry {
struct pv_entry *pv_next; /* next pv_entry */
pmap_t pv_pmap; /* pmap where mapping lies */
vm_offset_t pv_va; /* virtual address for mapping */
int pv_flags; /* flags */
int pv_wire; /* wire count */
} *pv_entry_t;
#define PV_ENTRY_NULL ((pv_entry_t) 0)

View File

@ -1,6 +1,8 @@
/*-
* Copyright (c) 1990 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 1994 John S. Dyson
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
* William Jolitz.
@ -34,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91
* $Id: vmparam.h,v 1.9 1993/12/19 00:50:19 wollman Exp $
* $Id: vmparam.h,v 1.10 1994/01/03 16:00:52 davidg Exp $
*/
@ -57,7 +59,7 @@
* kernal address space.
*/
#define USRTEXT 0UL
#define USRSTACK 0xFDBFE000UL
/* #define USRSTACK 0xFDBFE000UL */
#define BTOPUSRSTACK (0xFDC00-(UPAGES)) /* btop(USRSTACK) */
#define LOWPAGES 0UL
#define HIGHPAGES UPAGES
@ -104,7 +106,7 @@
/*
* Size of User Raw I/O map
*/
#define USRIOSIZE 300
#define USRIOSIZE 1024
/*
* The size of the clock loop.
@ -210,16 +212,23 @@
*/
/* user/kernel map constants */
#define KERNBASE (0-(NKPDE+1)*(NBPG*NPTEPG))
#define KERNSIZE (NKPDE*NBPG*NPTEPG)
#define VM_MIN_ADDRESS ((vm_offset_t)0)
#define VM_MAXUSER_ADDRESS ((vm_offset_t)0xFDBFE000UL)
#define UPT_MIN_ADDRESS ((vm_offset_t)0xFDC00000UL)
#define UPT_MAX_ADDRESS ((vm_offset_t)0xFDFF7000UL)
#define VM_MAXUSER_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*(NPTEPG+UPAGES)))
#define USRSTACK VM_MAXUSER_ADDRESS
#define UPT_MIN_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*NPTEPG))
#define UPT_MAX_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*(NKPDE+2)))
#define VM_MAX_ADDRESS UPT_MAX_ADDRESS
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)0xFDFF7000UL)
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNBASE - (NBPG*(NKPDE+2)))
#define UPDT VM_MIN_KERNEL_ADDRESS
#define KPT_MIN_ADDRESS ((vm_offset_t)0xFDFF8000UL)
#define KPT_MAX_ADDRESS ((vm_offset_t)0xFDFFF000UL)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)0xFF7FF000UL)
#define KPT_MIN_ADDRESS ((vm_offset_t)(KERNBASE) - (NBPG*(NKPDE+1)))
#define KPT_MAX_ADDRESS ((vm_offset_t)(KERNBASE) - NBPG)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)ALT_MIN_ADDRESS - NBPG)
#define ALT_MIN_ADDRESS ((vm_offset_t)((APTDPTDI) << 22))
#define HIGHPAGES UPAGES
/* virtual sizes (bytes) for various kernel submaps */
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)

View File

@ -35,9 +35,9 @@
*
* from: @(#)pccons.c 5.11 (Berkeley) 5/21/91
* from: @(#)syscons.c 1.1 931021
* $Id: syscons.c,v 1.23 1993/12/21 03:27:26 rich Exp $
* $Id: syscons.c,v 1.24 1994/01/03 07:55:47 davidg Exp $
*
* Heavily modified by Søren Schmidt (sos@login.dkuug.dk) to provide:
* Heavily modified by Sxren Schmidt (sos@login.dkuug.dk) to provide:
*
* virtual consoles, SYSV ioctl's, ANSI emulation ....
*/
@ -91,9 +91,9 @@
/* virtual video memory addresses */
#if !defined(NetBSD)
#define MONO_BUF 0xFE0B0000
#define CGA_BUF 0xFE0B8000
#define VGA_BUF 0xFE0A0000
#define MONO_BUF (KERNBASE+0xB0000)
#define CGA_BUF (KERNBASE+0xB8000)
#define VGA_BUF (KERNBASE+0xA0000)
#endif
#define VIDEOMEM 0x000A0000

View File

@ -35,9 +35,9 @@
*
* from: @(#)pccons.c 5.11 (Berkeley) 5/21/91
* from: @(#)syscons.c 1.1 931021
* $Id: syscons.c,v 1.23 1993/12/21 03:27:26 rich Exp $
* $Id: syscons.c,v 1.24 1994/01/03 07:55:47 davidg Exp $
*
* Heavily modified by Søren Schmidt (sos@login.dkuug.dk) to provide:
* Heavily modified by Sxren Schmidt (sos@login.dkuug.dk) to provide:
*
* virtual consoles, SYSV ioctl's, ANSI emulation ....
*/
@ -91,9 +91,9 @@
/* virtual video memory addresses */
#if !defined(NetBSD)
#define MONO_BUF 0xFE0B0000
#define CGA_BUF 0xFE0B8000
#define VGA_BUF 0xFE0A0000
#define MONO_BUF (KERNBASE+0xB0000)
#define CGA_BUF (KERNBASE+0xB8000)
#define VGA_BUF (KERNBASE+0xA0000)
#endif
#define VIDEOMEM 0x000A0000

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.12 1993/12/19 00:50:09 wollman Exp $
* $Id: trap.c,v 1.13 1994/01/03 07:55:24 davidg Exp $
*/
/*
@ -60,6 +60,7 @@
#include "vm/pmap.h"
#include "vm/vm_map.h"
#include "vm/vm_user.h"
#include "vm/vm_page.h"
#include "sys/vmmeter.h"
#include "machine/trap.h"
@ -152,7 +153,8 @@ trap(frame)
/*pg("trap type %d code = %x eip = %x cs = %x eva = %x esp %x",
frame.tf_trapno, frame.tf_err, frame.tf_eip,
frame.tf_cs, rcr2(), frame.tf_esp);*/
if(curpcb == 0 || curproc == 0) goto we_re_toast;
if (curpcb == 0 || curproc == 0)
goto skiptoswitch;
if (curpcb->pcb_onfault && frame.tf_trapno != T_PAGEFLT) {
extern int _udatasel;
@ -181,9 +183,14 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
p->p_regs = (int *)&frame;
}
skiptoswitch:
ucode=0;
eva = rcr2();
code = frame.tf_err;
if ((type & ~T_USER) == T_PAGEFLT)
goto pfault;
switch (type) {
default:
@ -280,17 +287,18 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
if (code & PGEX_P) goto we_re_toast;
#endif
pfault:
/* fall into */
case T_PAGEFLT|T_USER: /* page fault */
{
register vm_offset_t va;
register struct vmspace *vm = p->p_vmspace;
register struct vmspace *vm;
register vm_map_t map;
int rv = 0;
int rv=0;
vm_prot_t ftype;
extern vm_map_t kernel_map;
unsigned nss;
char *v;
unsigned nss,v;
int oldflags;
va = trunc_page((vm_offset_t)eva);
/*
@ -301,10 +309,15 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
* The last can occur during an exec() copyin where the
* argument space is lazy-allocated.
*/
if (type == T_PAGEFLT && va >= KERNBASE)
if ((p == 0) || (type == T_PAGEFLT && va >= KERNBASE)) {
vm = 0;
map = kernel_map;
else
} else {
vm = p->p_vmspace;
map = &vm->vm_map;
}
if (code & PGEX_W)
ftype = VM_PROT_READ | VM_PROT_WRITE;
else
@ -317,16 +330,27 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
}
#endif
/*
* keep swapout from messing with us during this
* critical time.
*/
oldflags = p->p_flag;
if (map != kernel_map) {
p->p_flag |= SLOCK;
}
/*
* XXX: rude hack to make stack limits "work"
*/
nss = 0;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK
&& map != kernel_map) {
if (map != kernel_map && (caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
caddr_t v;
nss = roundup(USRSTACK - (unsigned)va, PAGE_SIZE);
if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) {
rv = KERN_FAILURE;
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
goto nogo;
}
@ -341,7 +365,7 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
* Grow the VM by the amount requested rounded up to
* the nearest DFLSSIZ to provide for some hysteresis.
*/
grow_amount = roundup(nss, DFLSSIZ);
grow_amount = roundup((nss - (vm->vm_ssize << PGSHIFT)), DFLSSIZ);
v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT,
DFLSSIZ) - grow_amount;
/*
@ -355,42 +379,57 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
grow_amount, FALSE) !=
KERN_SUCCESS) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
goto nogo;
}
}
}
/* check if page table is mapped, if not, fault it first */
if (!pde_v(va)) {
v = (char *)trunc_page(vtopte(va));
rv = vm_fault(map, (vm_offset_t)v, ftype, FALSE);
if (rv != KERN_SUCCESS) goto nogo;
/* check if page table fault, increment wiring */
vm_map_pageable(map, (vm_offset_t)v,
round_page(v+1), FALSE);
} else v=0;
rv = vm_fault(map, va, ftype, FALSE);
#define pde_v(v) (PTD[((v)>>PD_SHIFT)&1023].pd_v)
{
vm_offset_t v = trunc_page(vtopte(va));
if (map != kernel_map) {
vm_offset_t pa;
/* Fault the pte only if needed: */
*(volatile char *)v += 0;
/* Get the physical address: */
pa = pmap_extract(vm_map_pmap(map), v);
/* And wire the page at system vm level: */
vm_page_wire(PHYS_TO_VM_PAGE(pa));
/* Fault in the user page: */
rv = vm_fault(map, va, ftype, FALSE);
/* Unwire the pte page */
vm_page_unwire(PHYS_TO_VM_PAGE(pa));
} else {
rv = vm_fault(map, va, ftype, FALSE);
}
}
if (map != kernel_map) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
}
if (rv == KERN_SUCCESS) {
/*
* XXX: continuation of rude stack hack
*/
nss = nss >> PGSHIFT;
if (nss > vm->vm_ssize)
if (vm && nss > vm->vm_ssize) {
vm->vm_ssize = nss;
}
/*
* va could be a page table address, if the fault
* occurred from within copyout. In that case,
* we have to wire it. (EWS 12/11/93)
*/
if (ispt(va))
vm_map_pageable(map, va, round_page(va+1), FALSE);
va = trunc_page(vtopte(va));
/*
* for page table, increment wiring
* as long as not a page table fault as well
*/
if (!v && type != T_PAGEFLT)
vm_map_pageable(map, va, round_page(va+1), FALSE);
if (type == T_PAGEFLT)
return;
goto out;
@ -439,7 +478,7 @@ if(curpcb == 0 || curproc == 0) goto we_re_toast;
return;
#endif
/* machine/parity/power fail/"kitchen sink" faults */
if(isa_nmi(code) == 0) return;
if (isa_nmi(code) == 0) return;
else goto we_re_toast;
#endif
}
@ -501,7 +540,8 @@ int trapwrite(addr)
struct proc *p;
vm_offset_t va;
struct vmspace *vm;
char *v;
int oldflags;
int rv;
va = trunc_page((vm_offset_t)addr);
/*
@ -515,14 +555,22 @@ int trapwrite(addr)
nss = 0;
p = curproc;
vm = p->p_vmspace;
oldflags = p->p_flag;
p->p_flag |= SLOCK;
if ((caddr_t)va >= vm->vm_maxsaddr
&& (caddr_t)va < (caddr_t)USRSTACK) {
nss = roundup(USRSTACK - (unsigned)va, PAGE_SIZE);
if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur)
nss = roundup(((unsigned)USRSTACK - (unsigned)va), PAGE_SIZE);
if (nss > p->p_rlimit[RLIMIT_STACK].rlim_cur) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
return (1);
}
if (vm->vm_ssize && roundup(vm->vm_ssize << PGSHIFT,
DFLSSIZ) < nss) {
caddr_t v;
int grow_amount;
/*
* If necessary, grow the VM that the stack occupies
@ -532,7 +580,7 @@ int trapwrite(addr)
* Grow the VM by the amount requested rounded up to
* the nearest DFLSSIZ to provide for some hysteresis.
*/
grow_amount = roundup(nss, DFLSSIZ);
grow_amount = roundup((nss - (vm->vm_ssize << PGSHIFT)), DFLSSIZ);
v = (char *)USRSTACK - roundup(vm->vm_ssize << PGSHIFT, DFLSSIZ) -
grow_amount;
/*
@ -546,22 +594,38 @@ int trapwrite(addr)
if (vm_allocate(&vm->vm_map, (vm_offset_t *)&v,
grow_amount, FALSE)
!= KERN_SUCCESS) {
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
return(1);
}
printf("new stack growth: %lx, %d\n", v, grow_amount);
}
}
if (vm_fault(&vm->vm_map, va, VM_PROT_READ | VM_PROT_WRITE, FALSE)
!= KERN_SUCCESS)
return (1);
{
vm_offset_t v;
v = trunc_page(vtopte(va));
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), FALSE);
}
rv = vm_fault(&vm->vm_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE);
if (va < USRSTACK) {
vm_map_pageable(&vm->vm_map, v, round_page(v+1), TRUE);
}
}
p->p_flag &= ~SLOCK;
p->p_flag |= (oldflags & SLOCK);
if (rv != KERN_SUCCESS)
return 1;
/*
* XXX: continuation of rude stack hack
*/
nss = nss >> PGSHIFT;
if (nss > vm->vm_ssize)
nss >>= PGSHIFT;
if (nss > vm->vm_ssize) {
vm->vm_ssize = nss;
}
return (0);
}

View File

@ -8,12 +8,19 @@
* file.
*
* Written by Julian Elischer (julian@dialix.oz.au)
* $Id: scsi_base.c,v 1.2 1993/11/25 06:30:58 davidg Exp $
* $Id: scsi_base.c,v 1.3 1993/12/19 00:54:50 wollman Exp $
*/
#define SPLSD splbio
#define ESUCCESS 0
#include <sys/types.h>
#include <sys/param.h>
#include <machine/param.h>
#include <vm/vm_statistics.h>
#include <vm/vm_param.h>
#include <vm/lock.h>
#include <machine/pmap.h>
#include <machine/vmparam.h>
#include "systm.h"
#include <sys/buf.h>
#include <sys/uio.h>
@ -411,7 +418,7 @@ scsi_scsi_cmd(sc_link, scsi_cmd, cmdlen, data_addr, datalen,
xs->resid = datalen;
xs->bp = bp;
/*XXX*/ /*use constant not magic number */
if (datalen && ((caddr_t) data_addr < (caddr_t) 0xfe000000UL)) {
if (datalen && ((caddr_t) data_addr < (caddr_t) KERNBASE)) {
if (bp) {
printf("Data buffered space not in kernel context\n");
#ifdef SCSIDEBUG
@ -439,7 +446,7 @@ scsi_scsi_cmd(sc_link, scsi_cmd, cmdlen, data_addr, datalen,
retry:
xs->error = XS_NOERROR;
#ifdef PARANOID
if (datalen && ((caddr_t) xs->data < (caddr_t) 0xfe000000)) {
if (datalen && ((caddr_t) xs->data < (caddr_t) KERNBASE)) {
printf("It's still wrong!\n");
}
#endif /*PARANOID*/

View File

@ -6,7 +6,14 @@
*
*
*/
#include <sys/types.h>
#include <sys/param.h>
#include <machine/param.h>
#include <vm/vm_statistics.h>
#include <vm/vm_param.h>
#include <vm/lock.h>
#include <machine/pmap.h>
#include <machine/vmparam.h>
#include "systm.h"
#include <sys/errno.h>
#include <sys/malloc.h>
@ -238,7 +245,7 @@ errval scsi_do_ioctl(struct scsi_link *sc_link, int cmd, caddr_t addr, int f)
caddr_t d_addr;
int len;
if((unsigned int)screq < (unsigned int)0xfe000000UL)
if((unsigned int)screq < (unsigned int)KERNBASE)
{
screq = malloc(sizeof(scsireq_t),M_TEMP,M_WAITOK);
bcopy(screq2,screq,sizeof(scsireq_t));
@ -269,7 +276,7 @@ errval scsi_do_ioctl(struct scsi_link *sc_link, int cmd, caddr_t addr, int f)
ret = bp->b_error;
}
free(bp,M_TEMP);
if((unsigned int)screq2 < (unsigned int)0xfe000000UL)
if((unsigned int)screq2 < (unsigned int)KERNBASE)
{
bcopy(screq,screq2,sizeof(scsireq_t));
free(screq,M_TEMP);