MFamd64: Remove i386_protection_init() and the protection_codes[] array
and replace them with a simple if test to turn on PG_RW. i386 != vax.
This commit is contained in:
parent
5c3fbcd37c
commit
27c5291e94
@ -172,13 +172,6 @@ __FBSDID("$FreeBSD$");
|
||||
#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
|
||||
#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
|
||||
|
||||
/*
|
||||
* Given a map and a machine independent protection code,
|
||||
* convert to a vax protection code.
|
||||
*/
|
||||
#define pte_prot(m, p) (protection_codes[p])
|
||||
static int protection_codes[8];
|
||||
|
||||
struct pmap kernel_pmap_store;
|
||||
LIST_HEAD(pmaplist, pmap);
|
||||
static struct pmaplist allpmaps;
|
||||
@ -236,7 +229,6 @@ static pt_entry_t *PADDR1 = 0, *PADDR2;
|
||||
|
||||
static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
|
||||
static pv_entry_t get_pv_entry(void);
|
||||
static void i386_protection_init(void);
|
||||
static void pmap_clear_ptes(vm_page_t m, int bit)
|
||||
__always_inline;
|
||||
|
||||
@ -313,11 +305,6 @@ pmap_bootstrap(firstaddr, loadaddr)
|
||||
|
||||
virtual_end = VM_MAX_KERNEL_ADDRESS;
|
||||
|
||||
/*
|
||||
* Initialize protection array.
|
||||
*/
|
||||
i386_protection_init();
|
||||
|
||||
/*
|
||||
* Initialize the kernel pmap (which is statically allocated).
|
||||
*/
|
||||
@ -2017,8 +2004,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
/*
|
||||
* Now validate mapping with desired protection/wiring.
|
||||
*/
|
||||
newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) | PG_V);
|
||||
|
||||
newpte = (pt_entry_t)(pa | PG_V);
|
||||
if ((prot & VM_PROT_WRITE) != 0)
|
||||
newpte |= PG_RW;
|
||||
if (wired)
|
||||
newpte |= PG_W;
|
||||
if (va < VM_MAXUSER_ADDRESS)
|
||||
@ -2884,34 +2872,6 @@ pmap_clear_reference(vm_page_t m)
|
||||
* Miscellaneous support routines follow
|
||||
*/
|
||||
|
||||
static void
|
||||
i386_protection_init()
|
||||
{
|
||||
register int *kp, prot;
|
||||
|
||||
kp = protection_codes;
|
||||
for (prot = 0; prot < 8; prot++) {
|
||||
switch (prot) {
|
||||
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
|
||||
/*
|
||||
* Read access is also 0. There isn't any execute bit,
|
||||
* so just make it readable.
|
||||
*/
|
||||
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
|
||||
case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
|
||||
case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
|
||||
*kp++ = 0;
|
||||
break;
|
||||
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
|
||||
case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
|
||||
case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
|
||||
case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
|
||||
*kp++ = PG_RW;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a set of physical memory pages into the kernel virtual
|
||||
* address space. Return a pointer to where it is mapped. This
|
||||
|
Loading…
Reference in New Issue
Block a user