Move atdevbase out of locore.s and into machdep.c

Macroize locore.s' page table setup even more, now it's almost readable.
Rename PG_U to PG_A (so that I can...)
Rename PG_u to PG_U.  "PG_u" was just too ugly...
Remove some unused vars in pmap.c
Remove PG_KR and PG_KW
Remove SSIZE
Remove SINCR
Remove BTOPKERNBASE

This concludes my spring cleaning, modulus any bug fixes for messes I
have made on the way.

(Funny to be back here in pmap.c, that's where my first significant
contribution to 386BSD was... :-)
This commit is contained in:
Poul-Henning Kamp 1996-05-02 22:25:18 +00:00
parent 031ce85e23
commit 5084d10dd0
12 changed files with 282 additions and 341 deletions

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
* $Id: genassym.c,v 1.34 1996/04/13 11:22:57 bde Exp $
* $Id: genassym.c,v 1.35 1996/05/02 14:19:40 phk Exp $
*/
#include <stdio.h>
@ -111,6 +111,7 @@ main()
printf("#define\tNKPDE %d\n", NKPDE);
printf("#define\tNKPT %d\n", NKPT);
printf("#define\tPAGE_SHIFT %d\n", PAGE_SHIFT);
printf("#define\tPAGE_MASK %d\n", PAGE_MASK);
printf("#define\tPDRSHIFT %d\n", PDRSHIFT);
printf("#define\tUSRSTACK 0x%lx\n", USRSTACK);
printf("#define\tVM_MAXUSER_ADDRESS 0x%lx\n", VM_MAXUSER_ADDRESS);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.68 1996/04/30 11:58:56 phk Exp $
* $Id: locore.s,v 1.69 1996/05/02 14:19:43 phk Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -104,7 +104,7 @@ tmpstk:
.globl _boothowto,_bootdev
.globl _cpu,_atdevbase,_cpu_vendor,_cpu_id,_bootinfo
.globl _cpu,_cpu_vendor,_cpu_id,_bootinfo
.globl _cpu_high, _cpu_feature
_cpu: .long 0 /* are we 386, 386sx, or 486 */
@ -113,7 +113,6 @@ _cpu_high: .long 0 /* highest arg to CPUID */
_cpu_feature: .long 0 /* features */
_cpu_vendor: .space 20 /* CPU origin code */
_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
_atdevbase: .long 0 /* location of start of iomem in virtual */
_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
physfree: .long 0 /* phys addr of next free page */
@ -156,16 +155,32 @@ _bdb_exists: .long 0
/*
* fillkpt
* eax = (page frame address | control | status) == pte
* ebx = address of page table
* eax = page frame address
* ebx = index into page table
* ecx = how many pages to map
* base = base address of page dir/table
* prot = protection bits
*/
#define fillkpt \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $4,%ebx ; /* next pte */ \
#define fillkpt(base, prot) \
shll $2, %ebx ; \
addl base, %ebx ; \
orl $PG_V+prot, %eax ; \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $4,%ebx ; /* next pte */ \
loop 1b
/*
* fillkptphys(prot)
* eax = physical address
* ecx = how many pages to map
* prot = protection bits
*/
#define fillkptphys(prot) \
movl %eax, %ebx ; \
shrl $PAGE_SHIFT, %ebx ; \
fillkpt(R(_KPTphys), prot)
.text
/**********************************************************************
*
@ -688,8 +703,8 @@ create_pagetables:
over_symalloc:
#endif
addl $PAGE_SIZE-1,%esi
andl $~(PAGE_SIZE-1),%esi
addl $PAGE_MASK,%esi
andl $~PAGE_MASK,%esi
movl %esi,R(_KERNend) /* save end of kernel */
movl %esi,R(physfree) /* next free page is at end of kernel */
@ -712,105 +727,74 @@ over_symalloc:
movl %esi,R(p0upt)
/* Map read-only from zero to the end of the kernel text section */
movl R(_KPTphys), %esi
movl $R(_etext),%ecx
addl $PAGE_SIZE-1,%ecx
shrl $PAGE_SHIFT,%ecx
movl $PG_V|PG_KR,%eax
movl %esi, %ebx
xorl %eax, %eax
#ifdef BDE_DEBUGGER
/* If the debugger is present, actually map everything read-write. */
cmpl $0,R(_bdb_exists)
jne map_read_write
#endif
fillkpt
movl $R(_etext),%ecx
addl $PAGE_MASK,%ecx
shrl $PAGE_SHIFT,%ecx
fillkptphys(0)
/* Map read-write, data, bss and symbols */
movl $R(_etext),%eax
map_read_write:
andl $PG_FRAME,%eax
movl R(_KERNend),%ecx
subl %eax,%ecx
shrl $PAGE_SHIFT,%ecx
orl $PG_V|PG_KW,%eax
fillkpt
fillkptphys(PG_RW)
/* Map page directory. */
movl R(_IdlePTD), %eax
movl $1, %ecx
movl %eax, %ebx
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
orl $PG_V|PG_KW, %eax
fillkpt
fillkptphys(PG_RW)
/* Map proc0's page table for the UPAGES the physical way. */
movl R(p0upt), %eax
movl $1, %ecx
movl %eax, %ebx
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
orl $PG_V|PG_KW, %eax
fillkpt
fillkptphys(PG_RW)
/* Map proc0s UPAGES the physical way */
movl R(upa), %eax
movl $UPAGES, %ecx
movl %eax, %ebx
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
orl $PG_V|PG_KW, %eax
fillkpt
fillkptphys(PG_RW)
/* ... and in the special page table for this purpose. */
/* Map ISA hole */
movl $ISA_HOLE_START, %eax
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys(PG_RW|PG_N)
/* Map proc0s UPAGES in the special page table for this purpose. */
movl R(upa), %eax
movl $KSTKPTEOFF, %ebx
movl $UPAGES, %ecx
orl $PG_V|PG_KW, %eax
movl R(p0upt), %ebx
addl $(KSTKPTEOFF * PTESIZE), %ebx
fillkpt
fillkpt(R(p0upt), PG_RW)
/* and put the page table in the pde. */
movl R(p0upt), %eax
movl R(_IdlePTD), %esi
orl $PG_V|PG_KW,%eax
movl %eax,KSTKPTDI*PDESIZE(%esi)
/* Map ISA hole */
#define ISA_HOLE_START 0xa0000
#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
movl $ISA_HOLE_START, %eax
movl %eax, %ebx
/* XXX 2 is magic for log2(PTESIZE). */
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
/* XXX could load %eax directly with $ISA_HOLE_START|PG_V|PG_KW_PG_N. */
orl $PG_V|PG_KW|PG_N, %eax
fillkpt
/* XXX could load %eax directly with $ISA_HOLE_START+KERNBASE. */
movl $ISA_HOLE_START, %eax
addl $KERNBASE, %eax
movl %eax, R(_atdevbase)
movl $KSTKPTDI, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), PG_RW)
/* install a pde for temporary double map of bottom of VA */
movl R(_IdlePTD), %esi
movl R(_KPTphys), %eax
orl $PG_V|PG_KW, %eax
movl %eax, (%esi)
xorl %ebx, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), PG_RW)
/* install pde's for pt's */
movl R(_IdlePTD), %esi
movl R(_KPTphys), %eax
orl $PG_V|PG_KW, %eax
movl $(NKPT), %ecx
lea (KPTDI*PDESIZE)(%esi), %ebx
fillkpt
movl $KPTDI, %ebx
movl $NKPT, %ecx
fillkpt(R(_IdlePTD), PG_RW)
/* install a pde recursively mapping page directory as a page table */
movl R(_IdlePTD), %esi
movl %esi,%eax
orl $PG_V|PG_KW,%eax
movl %eax,PTDPTDI*PDESIZE(%esi)
movl R(_IdlePTD), %eax
movl $PTDPTDI, %ebx
movl $1,%ecx
fillkpt(R(_IdlePTD), PG_RW)
ret

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.68 1996/04/30 11:58:56 phk Exp $
* $Id: locore.s,v 1.69 1996/05/02 14:19:43 phk Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -104,7 +104,7 @@ tmpstk:
.globl _boothowto,_bootdev
.globl _cpu,_atdevbase,_cpu_vendor,_cpu_id,_bootinfo
.globl _cpu,_cpu_vendor,_cpu_id,_bootinfo
.globl _cpu_high, _cpu_feature
_cpu: .long 0 /* are we 386, 386sx, or 486 */
@ -113,7 +113,6 @@ _cpu_high: .long 0 /* highest arg to CPUID */
_cpu_feature: .long 0 /* features */
_cpu_vendor: .space 20 /* CPU origin code */
_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
_atdevbase: .long 0 /* location of start of iomem in virtual */
_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
physfree: .long 0 /* phys addr of next free page */
@ -156,16 +155,32 @@ _bdb_exists: .long 0
/*
* fillkpt
* eax = (page frame address | control | status) == pte
* ebx = address of page table
* eax = page frame address
* ebx = index into page table
* ecx = how many pages to map
* base = base address of page dir/table
* prot = protection bits
*/
#define fillkpt \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $4,%ebx ; /* next pte */ \
#define fillkpt(base, prot) \
shll $2, %ebx ; \
addl base, %ebx ; \
orl $PG_V+prot, %eax ; \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $4,%ebx ; /* next pte */ \
loop 1b
/*
* fillkptphys(prot)
* eax = physical address
* ecx = how many pages to map
* prot = protection bits
*/
#define fillkptphys(prot) \
movl %eax, %ebx ; \
shrl $PAGE_SHIFT, %ebx ; \
fillkpt(R(_KPTphys), prot)
.text
/**********************************************************************
*
@ -688,8 +703,8 @@ create_pagetables:
over_symalloc:
#endif
addl $PAGE_SIZE-1,%esi
andl $~(PAGE_SIZE-1),%esi
addl $PAGE_MASK,%esi
andl $~PAGE_MASK,%esi
movl %esi,R(_KERNend) /* save end of kernel */
movl %esi,R(physfree) /* next free page is at end of kernel */
@ -712,105 +727,74 @@ over_symalloc:
movl %esi,R(p0upt)
/* Map read-only from zero to the end of the kernel text section */
movl R(_KPTphys), %esi
movl $R(_etext),%ecx
addl $PAGE_SIZE-1,%ecx
shrl $PAGE_SHIFT,%ecx
movl $PG_V|PG_KR,%eax
movl %esi, %ebx
xorl %eax, %eax
#ifdef BDE_DEBUGGER
/* If the debugger is present, actually map everything read-write. */
cmpl $0,R(_bdb_exists)
jne map_read_write
#endif
fillkpt
movl $R(_etext),%ecx
addl $PAGE_MASK,%ecx
shrl $PAGE_SHIFT,%ecx
fillkptphys(0)
/* Map read-write, data, bss and symbols */
movl $R(_etext),%eax
map_read_write:
andl $PG_FRAME,%eax
movl R(_KERNend),%ecx
subl %eax,%ecx
shrl $PAGE_SHIFT,%ecx
orl $PG_V|PG_KW,%eax
fillkpt
fillkptphys(PG_RW)
/* Map page directory. */
movl R(_IdlePTD), %eax
movl $1, %ecx
movl %eax, %ebx
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
orl $PG_V|PG_KW, %eax
fillkpt
fillkptphys(PG_RW)
/* Map proc0's page table for the UPAGES the physical way. */
movl R(p0upt), %eax
movl $1, %ecx
movl %eax, %ebx
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
orl $PG_V|PG_KW, %eax
fillkpt
fillkptphys(PG_RW)
/* Map proc0s UPAGES the physical way */
movl R(upa), %eax
movl $UPAGES, %ecx
movl %eax, %ebx
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
orl $PG_V|PG_KW, %eax
fillkpt
fillkptphys(PG_RW)
/* ... and in the special page table for this purpose. */
/* Map ISA hole */
movl $ISA_HOLE_START, %eax
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys(PG_RW|PG_N)
/* Map proc0s UPAGES in the special page table for this purpose. */
movl R(upa), %eax
movl $KSTKPTEOFF, %ebx
movl $UPAGES, %ecx
orl $PG_V|PG_KW, %eax
movl R(p0upt), %ebx
addl $(KSTKPTEOFF * PTESIZE), %ebx
fillkpt
fillkpt(R(p0upt), PG_RW)
/* and put the page table in the pde. */
movl R(p0upt), %eax
movl R(_IdlePTD), %esi
orl $PG_V|PG_KW,%eax
movl %eax,KSTKPTDI*PDESIZE(%esi)
/* Map ISA hole */
#define ISA_HOLE_START 0xa0000
#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
movl $ISA_HOLE_START, %eax
movl %eax, %ebx
/* XXX 2 is magic for log2(PTESIZE). */
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
/* XXX could load %eax directly with $ISA_HOLE_START|PG_V|PG_KW_PG_N. */
orl $PG_V|PG_KW|PG_N, %eax
fillkpt
/* XXX could load %eax directly with $ISA_HOLE_START+KERNBASE. */
movl $ISA_HOLE_START, %eax
addl $KERNBASE, %eax
movl %eax, R(_atdevbase)
movl $KSTKPTDI, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), PG_RW)
/* install a pde for temporary double map of bottom of VA */
movl R(_IdlePTD), %esi
movl R(_KPTphys), %eax
orl $PG_V|PG_KW, %eax
movl %eax, (%esi)
xorl %ebx, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), PG_RW)
/* install pde's for pt's */
movl R(_IdlePTD), %esi
movl R(_KPTphys), %eax
orl $PG_V|PG_KW, %eax
movl $(NKPT), %ecx
lea (KPTDI*PDESIZE)(%esi), %ebx
fillkpt
movl $KPTDI, %ebx
movl $NKPT, %ecx
fillkpt(R(_IdlePTD), PG_RW)
/* install a pde recursively mapping page directory as a page table */
movl R(_IdlePTD), %esi
movl %esi,%eax
orl $PG_V|PG_KW,%eax
movl %eax,PTDPTDI*PDESIZE(%esi)
movl R(_IdlePTD), %eax
movl $PTDPTDI, %ebx
movl $1,%ecx
fillkpt(R(_IdlePTD), PG_RW)
ret

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.186 1996/05/01 08:38:36 bde Exp $
* $Id: machdep.c,v 1.187 1996/05/02 14:19:47 phk Exp $
*/
#include "npx.h"
@ -168,6 +168,7 @@ int bouncepages = 0;
extern int freebufspace;
int msgbufmapped = 0; /* set when safe to use msgbuf */
int _udatasel, _ucodesel;
u_int atdevbase;
int physmem = 0;
@ -1313,6 +1314,8 @@ init386(first)
proc0.p_addr = proc0paddr;
atdevbase = ISA_HOLE_START + KERNBASE;
/*
* Initialize the console before we print anything out.
*/
@ -1498,7 +1501,7 @@ init386(first)
/*
* map page into kernel: valid, read/write, non-cacheable
*/
*(int *)CMAP1 = PG_V | PG_KW | PG_N | target_page;
*(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page;
pmap_update();
tmp = *(int *)CADDR1;

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.86 1996/04/22 05:23:08 dyson Exp $
* $Id: pmap.c,v 1.87 1996/05/02 14:19:52 phk Exp $
*/
/*
@ -121,7 +121,7 @@ static void init_pv_entries __P((int));
#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
#define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0)
#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
@ -177,8 +177,6 @@ static __inline void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv,
vm_offset_t va));
static void pmap_remove_pte __P((struct pmap *pmap, pt_entry_t *ptq,
vm_offset_t sva));
static vm_page_t
pmap_pte_vm_page __P((pmap_t pmap, vm_offset_t pt));
static boolean_t
pmap_testbit __P((vm_offset_t pa, int bit));
static void * pmap_getpdir __P((void));
@ -608,7 +606,7 @@ pmap_pinit(pmap)
/* install self-referential address mapping entry */
*(int *) (pmap->pm_pdir + PTDPTDI) =
((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_KW;
((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_RW;
pmap->pm_count = 1;
}
@ -651,7 +649,7 @@ pmap_growkernel(vm_offset_t addr)
vm_page_remove(nkpg);
pmap_zero_page(VM_PAGE_TO_PHYS(nkpg));
}
pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_KW);
pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_RW);
nkpg = NULL;
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
@ -986,7 +984,6 @@ pmap_remove(pmap, sva, eva)
register vm_offset_t eva;
{
register pt_entry_t *ptbase;
vm_offset_t va;
vm_offset_t pdnxt;
vm_offset_t ptpaddr;
vm_offset_t sindex, eindex;
@ -1161,7 +1158,6 @@ pmap_protect(pmap, sva, eva, prot)
vm_prot_t prot;
{
register pt_entry_t *pte;
register vm_offset_t va;
register pt_entry_t *ptbase;
vm_offset_t pdnxt;
vm_offset_t ptpaddr;
@ -1188,7 +1184,6 @@ pmap_protect(pmap, sva, eva, prot)
eindex = i386_btop(eva);
for (; sindex < eindex; sindex = pdnxt) {
int pprot;
int pbits;
pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
@ -1254,7 +1249,6 @@ pmap_enter(pmap, va, pa, prot, wired)
register pt_entry_t *pte;
vm_offset_t opa;
register pv_entry_t pv, npv;
int ptevalid;
vm_offset_t origpte, newpte;
if (pmap == NULL)
@ -1389,15 +1383,15 @@ validate:
if (wired)
newpte |= PG_W;
if (va < UPT_MIN_ADDRESS)
newpte |= PG_u;
newpte |= PG_U;
else if (va < UPT_MAX_ADDRESS)
newpte |= PG_u | PG_RW;
newpte |= PG_U | PG_RW;
/*
* if the mapping or permission bits are different, we need
* to update the pte.
*/
if ((origpte & ~(PG_M|PG_U)) != newpte) {
if ((origpte & ~(PG_M|PG_A)) != newpte) {
*pte = (pt_entry_t) newpte;
if (origpte)
pmap_update_1pg(va);
@ -1426,7 +1420,6 @@ pmap_qenter(va, m, count)
int count;
{
int i;
int anyvalid = 0;
register pt_entry_t *pte;
for (i = 0; i < count; i++) {
@ -1559,7 +1552,7 @@ pmap_enter_quick(pmap, va, pa)
/*
* Now validate mapping with RO protection
*/
*pte = (pt_entry_t) ((int) (pa | PG_V | PG_u));
*pte = (pt_entry_t) ((int) (pa | PG_V | PG_U));
return;
}
@ -1678,7 +1671,6 @@ pmap_prefault(pmap, addra, entry, object)
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m;
int pageorder_index;
if (entry->object.vm_object != object)
return;
@ -1815,7 +1807,7 @@ pmap_zero_page(phys)
if (*(int *) CMAP2)
panic("pmap_zero_page: CMAP busy");
*(int *) CMAP2 = PG_V | PG_KW | (phys & PG_FRAME);
*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME);
bzero(CADDR2, PAGE_SIZE);
*(int *) CMAP2 = 0;
@ -1836,8 +1828,8 @@ pmap_copy_page(src, dst)
if (*(int *) CMAP1 || *(int *) CMAP2)
panic("pmap_copy_page: CMAP busy");
*(int *) CMAP1 = PG_V | PG_KW | (src & PG_FRAME);
*(int *) CMAP2 = PG_V | PG_KW | (dst & PG_FRAME);
*(int *) CMAP1 = PG_V | PG_RW | (src & PG_FRAME);
*(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME);
#if __GNUC__ > 1
memcpy(CADDR2, CADDR1, PAGE_SIZE);
@ -1935,7 +1927,7 @@ pmap_testbit(pa, bit)
* mark UPAGES as always modified, and ptes as never
* modified.
*/
if (bit & (PG_U|PG_M)) {
if (bit & (PG_A|PG_M)) {
if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) {
continue;
}
@ -1967,9 +1959,8 @@ pmap_changebit(pa, bit, setem)
boolean_t setem;
{
register pv_entry_t pv;
register pt_entry_t *pte, npte;
register pt_entry_t *pte;
vm_offset_t va;
int changed;
int s;
if (!pmap_is_managed(pa))
@ -2057,7 +2048,7 @@ pmap_phys_address(ppn)
boolean_t
pmap_is_referenced(vm_offset_t pa)
{
return pmap_testbit((pa), PG_U);
return pmap_testbit((pa), PG_A);
}
/*
@ -2089,7 +2080,7 @@ pmap_clear_modify(vm_offset_t pa)
void
pmap_clear_reference(vm_offset_t pa)
{
pmap_changebit((pa), PG_U, FALSE);
pmap_changebit((pa), PG_A, FALSE);
}
/*

View File

@ -42,28 +42,36 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.36 1996/04/30 12:02:11 phk Exp $
* $Id: pmap.h,v 1.37 1996/05/02 14:20:04 phk Exp $
*/
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
#define PG_V 0x00000001
#define PG_RW 0x00000002
#define PG_u 0x00000004
#define PG_PROT 0x00000006 /* all protection bits . */
#define PG_NC_PWT 0x00000008 /* page cache write through */
#define PG_NC_PCD 0x00000010 /* page cache disable */
#define PG_N 0x00000018 /* Non-cacheable */
#define PG_U 0x00000020 /* page was accessed */
#define PG_M 0x00000040 /* page was modified */
#define PG_PS 0x00000080 /* page is big size */
#define PG_G 0x00000100 /* page is global */
#define PG_W 0x00000200 /* "Wired" pseudoflag */
#define PG_FRAME 0xfffff000
/*
* Page-directory and page-table entires follow this format, with a few
* of the fields not present here and there, depending on a lot of things.
*/
/* ---- Intel Nomenclature ---- */
#define PG_V 0x001 /* P Valid */
#define PG_RW 0x002 /* R/W Read/Write */
#define PG_U 0x004 /* U/S User/Supervisor */
#define PG_NC_PWT 0x008 /* PWT Write through */
#define PG_NC_PCD 0x010 /* PCD Cache disable */
#define PG_A 0x020 /* A Accessed */
#define PG_M 0x040 /* D Dirty */
#define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */
#define PG_G 0x100 /* G Global */
#define PG_AVAIL1 0x200 /* / Available for system */
#define PG_AVAIL2 0x400 /* < programmers use */
#define PG_AVAIL3 0x800 /* \ */
#define PG_KR 0x00000000
#define PG_KW 0x00000002
/* Our various interpretations of the above */
#define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
#define PG_FRAME (~PAGE_MASK)
#define PG_PROT (PG_RW|PG_U) /* all protection bits . */
#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
/*
* Page Protection Exception bits
@ -78,14 +86,6 @@
*/
#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
/*
* NKPDE controls the virtual space of the kernel, what ever is left, minus
* the alternate page table area is given to the user (NUPDE)
*/
/*
* NKPDE controls the virtual space of the kernel, what ever is left is
* given to the user (NUPDE)
*/
#ifndef NKPT
#if 0
#define NKPT 26 /* actual number of kernel page tables */
@ -109,6 +109,12 @@
#define KSTKPTDI (PTDPTDI-1) /* ptd entry for u./kernel&user stack */
#define KSTKPTEOFF (NPTEPG-UPAGES) /* pte entry for kernel stack */
/*
* XXX doesn't really belong here I guess...
*/
#define ISA_HOLE_START 0xa0000
#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
#ifndef LOCORE
typedef unsigned int *pd_entry_t;
typedef unsigned int *pt_entry_t;
@ -151,7 +157,7 @@ static __inline vm_offset_t
pmap_kextract(vm_offset_t va)
{
vm_offset_t pa = *(int *)vtopte(va);
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
pa = (pa & PG_FRAME) | (va & PAGE_MASK);
return pa;
}
#endif

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)genassym.c 5.11 (Berkeley) 5/10/91
* $Id: genassym.c,v 1.34 1996/04/13 11:22:57 bde Exp $
* $Id: genassym.c,v 1.35 1996/05/02 14:19:40 phk Exp $
*/
#include <stdio.h>
@ -111,6 +111,7 @@ main()
printf("#define\tNKPDE %d\n", NKPDE);
printf("#define\tNKPT %d\n", NKPT);
printf("#define\tPAGE_SHIFT %d\n", PAGE_SHIFT);
printf("#define\tPAGE_MASK %d\n", PAGE_MASK);
printf("#define\tPDRSHIFT %d\n", PDRSHIFT);
printf("#define\tUSRSTACK 0x%lx\n", USRSTACK);
printf("#define\tVM_MAXUSER_ADDRESS 0x%lx\n", VM_MAXUSER_ADDRESS);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.68 1996/04/30 11:58:56 phk Exp $
* $Id: locore.s,v 1.69 1996/05/02 14:19:43 phk Exp $
*
* originally from: locore.s, by William F. Jolitz
*
@ -104,7 +104,7 @@ tmpstk:
.globl _boothowto,_bootdev
.globl _cpu,_atdevbase,_cpu_vendor,_cpu_id,_bootinfo
.globl _cpu,_cpu_vendor,_cpu_id,_bootinfo
.globl _cpu_high, _cpu_feature
_cpu: .long 0 /* are we 386, 386sx, or 486 */
@ -113,7 +113,6 @@ _cpu_high: .long 0 /* highest arg to CPUID */
_cpu_feature: .long 0 /* features */
_cpu_vendor: .space 20 /* CPU origin code */
_bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */
_atdevbase: .long 0 /* location of start of iomem in virtual */
_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
physfree: .long 0 /* phys addr of next free page */
@ -156,16 +155,32 @@ _bdb_exists: .long 0
/*
* fillkpt
* eax = (page frame address | control | status) == pte
* ebx = address of page table
* eax = page frame address
* ebx = index into page table
* ecx = how many pages to map
* base = base address of page dir/table
* prot = protection bits
*/
#define fillkpt \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $4,%ebx ; /* next pte */ \
#define fillkpt(base, prot) \
shll $2, %ebx ; \
addl base, %ebx ; \
orl $PG_V+prot, %eax ; \
1: movl %eax,(%ebx) ; \
addl $PAGE_SIZE,%eax ; /* increment physical address */ \
addl $4,%ebx ; /* next pte */ \
loop 1b
/*
* fillkptphys(prot)
* eax = physical address
* ecx = how many pages to map
* prot = protection bits
*/
#define fillkptphys(prot) \
movl %eax, %ebx ; \
shrl $PAGE_SHIFT, %ebx ; \
fillkpt(R(_KPTphys), prot)
.text
/**********************************************************************
*
@ -688,8 +703,8 @@ create_pagetables:
over_symalloc:
#endif
addl $PAGE_SIZE-1,%esi
andl $~(PAGE_SIZE-1),%esi
addl $PAGE_MASK,%esi
andl $~PAGE_MASK,%esi
movl %esi,R(_KERNend) /* save end of kernel */
movl %esi,R(physfree) /* next free page is at end of kernel */
@ -712,105 +727,74 @@ over_symalloc:
movl %esi,R(p0upt)
/* Map read-only from zero to the end of the kernel text section */
movl R(_KPTphys), %esi
movl $R(_etext),%ecx
addl $PAGE_SIZE-1,%ecx
shrl $PAGE_SHIFT,%ecx
movl $PG_V|PG_KR,%eax
movl %esi, %ebx
xorl %eax, %eax
#ifdef BDE_DEBUGGER
/* If the debugger is present, actually map everything read-write. */
cmpl $0,R(_bdb_exists)
jne map_read_write
#endif
fillkpt
movl $R(_etext),%ecx
addl $PAGE_MASK,%ecx
shrl $PAGE_SHIFT,%ecx
fillkptphys(0)
/* Map read-write, data, bss and symbols */
movl $R(_etext),%eax
map_read_write:
andl $PG_FRAME,%eax
movl R(_KERNend),%ecx
subl %eax,%ecx
shrl $PAGE_SHIFT,%ecx
orl $PG_V|PG_KW,%eax
fillkpt
fillkptphys(PG_RW)
/* Map page directory. */
movl R(_IdlePTD), %eax
movl $1, %ecx
movl %eax, %ebx
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
orl $PG_V|PG_KW, %eax
fillkpt
fillkptphys(PG_RW)
/* Map proc0's page table for the UPAGES the physical way. */
movl R(p0upt), %eax
movl $1, %ecx
movl %eax, %ebx
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
orl $PG_V|PG_KW, %eax
fillkpt
fillkptphys(PG_RW)
/* Map proc0s UPAGES the physical way */
movl R(upa), %eax
movl $UPAGES, %ecx
movl %eax, %ebx
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
orl $PG_V|PG_KW, %eax
fillkpt
fillkptphys(PG_RW)
/* ... and in the special page table for this purpose. */
/* Map ISA hole */
movl $ISA_HOLE_START, %eax
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
fillkptphys(PG_RW|PG_N)
/* Map proc0s UPAGES in the special page table for this purpose. */
movl R(upa), %eax
movl $KSTKPTEOFF, %ebx
movl $UPAGES, %ecx
orl $PG_V|PG_KW, %eax
movl R(p0upt), %ebx
addl $(KSTKPTEOFF * PTESIZE), %ebx
fillkpt
fillkpt(R(p0upt), PG_RW)
/* and put the page table in the pde. */
movl R(p0upt), %eax
movl R(_IdlePTD), %esi
orl $PG_V|PG_KW,%eax
movl %eax,KSTKPTDI*PDESIZE(%esi)
/* Map ISA hole */
#define ISA_HOLE_START 0xa0000
#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx
movl $ISA_HOLE_START, %eax
movl %eax, %ebx
/* XXX 2 is magic for log2(PTESIZE). */
shrl $PAGE_SHIFT-2, %ebx
addl R(_KPTphys), %ebx
/* XXX could load %eax directly with $ISA_HOLE_START|PG_V|PG_KW_PG_N. */
orl $PG_V|PG_KW|PG_N, %eax
fillkpt
/* XXX could load %eax directly with $ISA_HOLE_START+KERNBASE. */
movl $ISA_HOLE_START, %eax
addl $KERNBASE, %eax
movl %eax, R(_atdevbase)
movl $KSTKPTDI, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), PG_RW)
/* install a pde for temporary double map of bottom of VA */
movl R(_IdlePTD), %esi
movl R(_KPTphys), %eax
orl $PG_V|PG_KW, %eax
movl %eax, (%esi)
xorl %ebx, %ebx
movl $1, %ecx
fillkpt(R(_IdlePTD), PG_RW)
/* install pde's for pt's */
movl R(_IdlePTD), %esi
movl R(_KPTphys), %eax
orl $PG_V|PG_KW, %eax
movl $(NKPT), %ecx
lea (KPTDI*PDESIZE)(%esi), %ebx
fillkpt
movl $KPTDI, %ebx
movl $NKPT, %ecx
fillkpt(R(_IdlePTD), PG_RW)
/* install a pde recursively mapping page directory as a page table */
movl R(_IdlePTD), %esi
movl %esi,%eax
orl $PG_V|PG_KW,%eax
movl %eax,PTDPTDI*PDESIZE(%esi)
movl R(_IdlePTD), %eax
movl $PTDPTDI, %ebx
movl $1,%ecx
fillkpt(R(_IdlePTD), PG_RW)
ret

View File

@ -35,7 +35,7 @@
* SUCH DAMAGE.
*
* from: @(#)machdep.c 7.4 (Berkeley) 6/3/91
* $Id: machdep.c,v 1.186 1996/05/01 08:38:36 bde Exp $
* $Id: machdep.c,v 1.187 1996/05/02 14:19:47 phk Exp $
*/
#include "npx.h"
@ -168,6 +168,7 @@ int bouncepages = 0;
extern int freebufspace;
int msgbufmapped = 0; /* set when safe to use msgbuf */
int _udatasel, _ucodesel;
u_int atdevbase;
int physmem = 0;
@ -1313,6 +1314,8 @@ init386(first)
proc0.p_addr = proc0paddr;
atdevbase = ISA_HOLE_START + KERNBASE;
/*
* Initialize the console before we print anything out.
*/
@ -1498,7 +1501,7 @@ init386(first)
/*
* map page into kernel: valid, read/write, non-cacheable
*/
*(int *)CMAP1 = PG_V | PG_KW | PG_N | target_page;
*(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page;
pmap_update();
tmp = *(int *)CADDR1;

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.86 1996/04/22 05:23:08 dyson Exp $
* $Id: pmap.c,v 1.87 1996/05/02 14:19:52 phk Exp $
*/
/*
@ -121,7 +121,7 @@ static void init_pv_entries __P((int));
#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0)
#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0)
#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0)
#define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0)
#define pmap_pte_u(pte) ((*(int *)pte & PG_A) != 0)
#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0)
#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
@ -177,8 +177,6 @@ static __inline void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv,
vm_offset_t va));
static void pmap_remove_pte __P((struct pmap *pmap, pt_entry_t *ptq,
vm_offset_t sva));
static vm_page_t
pmap_pte_vm_page __P((pmap_t pmap, vm_offset_t pt));
static boolean_t
pmap_testbit __P((vm_offset_t pa, int bit));
static void * pmap_getpdir __P((void));
@ -608,7 +606,7 @@ pmap_pinit(pmap)
/* install self-referential address mapping entry */
*(int *) (pmap->pm_pdir + PTDPTDI) =
((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_KW;
((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_RW;
pmap->pm_count = 1;
}
@ -651,7 +649,7 @@ pmap_growkernel(vm_offset_t addr)
vm_page_remove(nkpg);
pmap_zero_page(VM_PAGE_TO_PHYS(nkpg));
}
pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_KW);
pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_RW);
nkpg = NULL;
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
@ -986,7 +984,6 @@ pmap_remove(pmap, sva, eva)
register vm_offset_t eva;
{
register pt_entry_t *ptbase;
vm_offset_t va;
vm_offset_t pdnxt;
vm_offset_t ptpaddr;
vm_offset_t sindex, eindex;
@ -1161,7 +1158,6 @@ pmap_protect(pmap, sva, eva, prot)
vm_prot_t prot;
{
register pt_entry_t *pte;
register vm_offset_t va;
register pt_entry_t *ptbase;
vm_offset_t pdnxt;
vm_offset_t ptpaddr;
@ -1188,7 +1184,6 @@ pmap_protect(pmap, sva, eva, prot)
eindex = i386_btop(eva);
for (; sindex < eindex; sindex = pdnxt) {
int pprot;
int pbits;
pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
@ -1254,7 +1249,6 @@ pmap_enter(pmap, va, pa, prot, wired)
register pt_entry_t *pte;
vm_offset_t opa;
register pv_entry_t pv, npv;
int ptevalid;
vm_offset_t origpte, newpte;
if (pmap == NULL)
@ -1389,15 +1383,15 @@ validate:
if (wired)
newpte |= PG_W;
if (va < UPT_MIN_ADDRESS)
newpte |= PG_u;
newpte |= PG_U;
else if (va < UPT_MAX_ADDRESS)
newpte |= PG_u | PG_RW;
newpte |= PG_U | PG_RW;
/*
* if the mapping or permission bits are different, we need
* to update the pte.
*/
if ((origpte & ~(PG_M|PG_U)) != newpte) {
if ((origpte & ~(PG_M|PG_A)) != newpte) {
*pte = (pt_entry_t) newpte;
if (origpte)
pmap_update_1pg(va);
@ -1426,7 +1420,6 @@ pmap_qenter(va, m, count)
int count;
{
int i;
int anyvalid = 0;
register pt_entry_t *pte;
for (i = 0; i < count; i++) {
@ -1559,7 +1552,7 @@ pmap_enter_quick(pmap, va, pa)
/*
* Now validate mapping with RO protection
*/
*pte = (pt_entry_t) ((int) (pa | PG_V | PG_u));
*pte = (pt_entry_t) ((int) (pa | PG_V | PG_U));
return;
}
@ -1678,7 +1671,6 @@ pmap_prefault(pmap, addra, entry, object)
vm_offset_t addr;
vm_pindex_t pindex;
vm_page_t m;
int pageorder_index;
if (entry->object.vm_object != object)
return;
@ -1815,7 +1807,7 @@ pmap_zero_page(phys)
if (*(int *) CMAP2)
panic("pmap_zero_page: CMAP busy");
*(int *) CMAP2 = PG_V | PG_KW | (phys & PG_FRAME);
*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME);
bzero(CADDR2, PAGE_SIZE);
*(int *) CMAP2 = 0;
@ -1836,8 +1828,8 @@ pmap_copy_page(src, dst)
if (*(int *) CMAP1 || *(int *) CMAP2)
panic("pmap_copy_page: CMAP busy");
*(int *) CMAP1 = PG_V | PG_KW | (src & PG_FRAME);
*(int *) CMAP2 = PG_V | PG_KW | (dst & PG_FRAME);
*(int *) CMAP1 = PG_V | PG_RW | (src & PG_FRAME);
*(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME);
#if __GNUC__ > 1
memcpy(CADDR2, CADDR1, PAGE_SIZE);
@ -1935,7 +1927,7 @@ pmap_testbit(pa, bit)
* mark UPAGES as always modified, and ptes as never
* modified.
*/
if (bit & (PG_U|PG_M)) {
if (bit & (PG_A|PG_M)) {
if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) {
continue;
}
@ -1967,9 +1959,8 @@ pmap_changebit(pa, bit, setem)
boolean_t setem;
{
register pv_entry_t pv;
register pt_entry_t *pte, npte;
register pt_entry_t *pte;
vm_offset_t va;
int changed;
int s;
if (!pmap_is_managed(pa))
@ -2057,7 +2048,7 @@ pmap_phys_address(ppn)
boolean_t
pmap_is_referenced(vm_offset_t pa)
{
return pmap_testbit((pa), PG_U);
return pmap_testbit((pa), PG_A);
}
/*
@ -2089,7 +2080,7 @@ pmap_clear_modify(vm_offset_t pa)
void
pmap_clear_reference(vm_offset_t pa)
{
pmap_changebit((pa), PG_U, FALSE);
pmap_changebit((pa), PG_A, FALSE);
}
/*

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)param.h 5.8 (Berkeley) 6/28/91
* $Id: param.h,v 1.19 1995/05/25 07:41:27 davidg Exp $
* $Id: param.h,v 1.20 1996/05/02 14:20:02 phk Exp $
*/
#ifndef _MACHINE_PARAM_H_
@ -56,33 +56,20 @@
#define ALIGN(p) (((unsigned)(p) + ALIGNBYTES) & ~ALIGNBYTES)
#define PAGE_SHIFT 12 /* LOG2(PAGE_SIZE) */
#define PAGE_SIZE (1 << PAGE_SHIFT) /* bytes/page */
#define PAGE_SIZE (1<<PAGE_SHIFT) /* bytes/page */
#define PAGE_MASK (PAGE_SIZE-1)
#define NPTEPG (PAGE_SIZE/(sizeof (pt_entry_t)))
#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
/* XXX PDRSHIFT and PD_SHIFT are two names for the same thing */
#define PDRSHIFT 22 /* LOG2(NBPDR) */
#define NBPDR (1 << PDRSHIFT) /* bytes/page dir */
#define PDROFSET (NBPDR-1) /* byte offset into page dir */
/*
* XXX This should really be KPTDPTDI << PDRSHIFT, but since KPTDPTDI is
* defined in pmap.h which is included after this we can't do that
* (YET!)
*/
#define BTOPKERNBASE (KERNBASE >> PAGE_SHIFT)
#define NBPDR (1<<PDRSHIFT) /* bytes/page dir */
#define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */
#define DEV_BSIZE (1 << DEV_BSHIFT)
#define DEV_BSIZE (1<<DEV_BSHIFT)
#define BLKDEV_IOSIZE 2048
#define MAXPHYS (64 * 1024) /* max raw I/O transfer size */
/* NOTE: SSIZE, SINCR and UPAGES must be multiples of CLSIZE */
#define SSIZE 1 /* initial stack size/PAGE_SIZE */
#define SINCR 1 /* increment of stack/PAGE_SIZE */
#define UPAGES 2 /* pages of u-area */
/*

View File

@ -42,28 +42,36 @@
*
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* $Id: pmap.h,v 1.36 1996/04/30 12:02:11 phk Exp $
* $Id: pmap.h,v 1.37 1996/05/02 14:20:04 phk Exp $
*/
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
#define PG_V 0x00000001
#define PG_RW 0x00000002
#define PG_u 0x00000004
#define PG_PROT 0x00000006 /* all protection bits . */
#define PG_NC_PWT 0x00000008 /* page cache write through */
#define PG_NC_PCD 0x00000010 /* page cache disable */
#define PG_N 0x00000018 /* Non-cacheable */
#define PG_U 0x00000020 /* page was accessed */
#define PG_M 0x00000040 /* page was modified */
#define PG_PS 0x00000080 /* page is big size */
#define PG_G 0x00000100 /* page is global */
#define PG_W 0x00000200 /* "Wired" pseudoflag */
#define PG_FRAME 0xfffff000
/*
* Page-directory and page-table entires follow this format, with a few
* of the fields not present here and there, depending on a lot of things.
*/
/* ---- Intel Nomenclature ---- */
#define PG_V 0x001 /* P Valid */
#define PG_RW 0x002 /* R/W Read/Write */
#define PG_U 0x004 /* U/S User/Supervisor */
#define PG_NC_PWT 0x008 /* PWT Write through */
#define PG_NC_PCD 0x010 /* PCD Cache disable */
#define PG_A 0x020 /* A Accessed */
#define PG_M 0x040 /* D Dirty */
#define PG_PS 0x080 /* PS Page size (0=4k,1=4M) */
#define PG_G 0x100 /* G Global */
#define PG_AVAIL1 0x200 /* / Available for system */
#define PG_AVAIL2 0x400 /* < programmers use */
#define PG_AVAIL3 0x800 /* \ */
#define PG_KR 0x00000000
#define PG_KW 0x00000002
/* Our various interpretations of the above */
#define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
#define PG_FRAME (~PAGE_MASK)
#define PG_PROT (PG_RW|PG_U) /* all protection bits . */
#define PG_N (PG_NC_PWT|PG_NC_PCD) /* Non-cacheable */
/*
* Page Protection Exception bits
@ -78,14 +86,6 @@
*/
#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
/*
* NKPDE controls the virtual space of the kernel, what ever is left, minus
* the alternate page table area is given to the user (NUPDE)
*/
/*
* NKPDE controls the virtual space of the kernel, what ever is left is
* given to the user (NUPDE)
*/
#ifndef NKPT
#if 0
#define NKPT 26 /* actual number of kernel page tables */
@ -109,6 +109,12 @@
#define KSTKPTDI (PTDPTDI-1) /* ptd entry for u./kernel&user stack */
#define KSTKPTEOFF (NPTEPG-UPAGES) /* pte entry for kernel stack */
/*
* XXX doesn't really belong here I guess...
*/
#define ISA_HOLE_START 0xa0000
#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
#ifndef LOCORE
typedef unsigned int *pd_entry_t;
typedef unsigned int *pt_entry_t;
@ -151,7 +157,7 @@ static __inline vm_offset_t
pmap_kextract(vm_offset_t va)
{
vm_offset_t pa = *(int *)vtopte(va);
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
pa = (pa & PG_FRAME) | (va & PAGE_MASK);
return pa;
}
#endif