Remove all stuff related to __ARM_ARCH >= 6 from pmap.h header except
for <machine/pmap-v6.h> include. It was used by old pmap-v6 code.
This commit is contained in:
parent
8714daf023
commit
0924d1687c
@ -60,21 +60,10 @@
|
||||
/*
|
||||
* Pte related macros
|
||||
*/
|
||||
#if ARM_ARCH_6 || ARM_ARCH_7A
|
||||
#ifdef SMP
|
||||
#define PTE_NOCACHE 2
|
||||
#else
|
||||
#define PTE_NOCACHE 1
|
||||
#endif
|
||||
#define PTE_CACHE 6
|
||||
#define PTE_DEVICE 2
|
||||
#define PTE_PAGETABLE 6
|
||||
#else
|
||||
#define PTE_NOCACHE 1
|
||||
#define PTE_CACHE 2
|
||||
#define PTE_DEVICE PTE_NOCACHE
|
||||
#define PTE_PAGETABLE 3
|
||||
#endif
|
||||
|
||||
enum mem_type {
|
||||
STRONG_ORD = 0,
|
||||
@ -104,11 +93,7 @@ enum mem_type {
|
||||
|
||||
#define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
|
||||
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) > 0
|
||||
boolean_t pmap_page_is_mapped(vm_page_t);
|
||||
#else
|
||||
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
|
||||
#endif
|
||||
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
|
||||
|
||||
/*
|
||||
@ -131,9 +116,7 @@ struct pv_chunk;
|
||||
struct md_page {
|
||||
int pvh_attrs;
|
||||
vm_memattr_t pv_memattr;
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
|
||||
vm_offset_t pv_kva; /* first kernel VA mapping */
|
||||
#endif
|
||||
TAILQ_HEAD(,pv_entry) pv_list;
|
||||
};
|
||||
|
||||
@ -164,11 +147,7 @@ struct pmap {
|
||||
struct l2_dtable *pm_l2[L2_SIZE];
|
||||
cpuset_t pm_active; /* active on cpus */
|
||||
struct pmap_statistics pm_stats; /* pmap statictics */
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
||||
TAILQ_HEAD(,pv_chunk) pm_pvchunk; /* list of mappings in pmap */
|
||||
#else
|
||||
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef struct pmap *pmap_t;
|
||||
@ -198,10 +177,8 @@ typedef struct pv_entry {
|
||||
vm_offset_t pv_va; /* virtual address for mapping */
|
||||
TAILQ_ENTRY(pv_entry) pv_list;
|
||||
int pv_flags; /* flags (wired, etc...) */
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
|
||||
pmap_t pv_pmap; /* pmap where mapping lies */
|
||||
TAILQ_ENTRY(pv_entry) pv_plist;
|
||||
#endif
|
||||
} *pv_entry_t;
|
||||
|
||||
/*
|
||||
@ -270,9 +247,7 @@ void *pmap_mapdev(vm_offset_t, vm_size_t);
|
||||
void pmap_unmapdev(vm_offset_t, vm_size_t);
|
||||
vm_page_t pmap_use_pt(pmap_t, vm_offset_t);
|
||||
void pmap_debug(int);
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
|
||||
void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
|
||||
#endif
|
||||
void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
|
||||
vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
|
||||
void
|
||||
@ -340,119 +315,9 @@ extern int pmap_needs_pte_sync;
|
||||
/*
|
||||
* User-visible names for the ones that vary with MMU class.
|
||||
*/
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
||||
#define L2_AP(x) (L2_AP0(x))
|
||||
#else
|
||||
#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
|
||||
#endif
|
||||
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
||||
/*
|
||||
* AP[2:1] access permissions model:
|
||||
*
|
||||
* AP[2](APX) - Write Disable
|
||||
* AP[1] - User Enable
|
||||
* AP[0] - Reference Flag
|
||||
*
|
||||
* AP[2] AP[1] Kernel User
|
||||
* 0 0 R/W N
|
||||
* 0 1 R/W R/W
|
||||
* 1 0 R N
|
||||
* 1 1 R R
|
||||
*
|
||||
*/
|
||||
#define L2_S_PROT_R (0) /* kernel read */
|
||||
#define L2_S_PROT_U (L2_AP0(2)) /* user read */
|
||||
#define L2_S_REF (L2_AP0(1)) /* reference flag */
|
||||
|
||||
#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R|L2_APX)
|
||||
#define L2_S_EXECUTABLE(pte) (!(pte & L2_XN))
|
||||
#define L2_S_WRITABLE(pte) (!(pte & L2_APX))
|
||||
#define L2_S_REFERENCED(pte) (!!(pte & L2_S_REF))
|
||||
|
||||
#ifndef SMP
|
||||
#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C)
|
||||
#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C)
|
||||
#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C)
|
||||
#else
|
||||
#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED)
|
||||
#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED)
|
||||
#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED)
|
||||
#endif /* SMP */
|
||||
|
||||
#define L1_S_PROTO (L1_TYPE_S)
|
||||
#define L1_C_PROTO (L1_TYPE_C)
|
||||
#define L2_S_PROTO (L2_TYPE_S)
|
||||
|
||||
/*
|
||||
* Promotion to a 1MB (SECTION) mapping requires that the corresponding
|
||||
* 4KB (SMALL) page mappings have identical settings for the following fields:
|
||||
*/
|
||||
#define L2_S_PROMOTE (L2_S_REF | L2_SHARED | L2_S_PROT_MASK | \
|
||||
L2_XN | L2_S_PROTO)
|
||||
|
||||
/*
|
||||
* In order to compare 1MB (SECTION) entry settings with the 4KB (SMALL)
|
||||
* page mapping it is necessary to read and shift appropriate bits from
|
||||
* L1 entry to positions of the corresponding bits in the L2 entry.
|
||||
*/
|
||||
#define L1_S_DEMOTE(l1pd) ((((l1pd) & L1_S_PROTO) >> 0) | \
|
||||
(((l1pd) & L1_SHARED) >> 6) | \
|
||||
(((l1pd) & L1_S_REF) >> 6) | \
|
||||
(((l1pd) & L1_S_PROT_MASK) >> 6) | \
|
||||
(((l1pd) & L1_S_XN) >> 4))
|
||||
|
||||
#ifndef SMP
|
||||
#define ARM_L1S_STRONG_ORD (0)
|
||||
#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
|
||||
#define ARM_L1S_DEVICE_SHARE (L1_S_B)
|
||||
#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1))
|
||||
#define ARM_L1S_NRML_IWT_OWT (L1_S_C)
|
||||
#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B)
|
||||
#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B)
|
||||
|
||||
#define ARM_L2L_STRONG_ORD (0)
|
||||
#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
|
||||
#define ARM_L2L_DEVICE_SHARE (L2_B)
|
||||
#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1))
|
||||
#define ARM_L2L_NRML_IWT_OWT (L2_C)
|
||||
#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B)
|
||||
#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B)
|
||||
|
||||
#define ARM_L2S_STRONG_ORD (0)
|
||||
#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
|
||||
#define ARM_L2S_DEVICE_SHARE (L2_B)
|
||||
#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1))
|
||||
#define ARM_L2S_NRML_IWT_OWT (L2_C)
|
||||
#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B)
|
||||
#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B)
|
||||
#else
|
||||
#define ARM_L1S_STRONG_ORD (0)
|
||||
#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
|
||||
#define ARM_L1S_DEVICE_SHARE (L1_S_B)
|
||||
#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)|L1_SHARED)
|
||||
#define ARM_L1S_NRML_IWT_OWT (L1_S_C|L1_SHARED)
|
||||
#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B|L1_SHARED)
|
||||
#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED)
|
||||
|
||||
#define ARM_L2L_STRONG_ORD (0)
|
||||
#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
|
||||
#define ARM_L2L_DEVICE_SHARE (L2_B)
|
||||
#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)|L2_SHARED)
|
||||
#define ARM_L2L_NRML_IWT_OWT (L2_C|L2_SHARED)
|
||||
#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
|
||||
#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B|L2_SHARED)
|
||||
|
||||
#define ARM_L2S_STRONG_ORD (0)
|
||||
#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
|
||||
#define ARM_L2S_DEVICE_SHARE (L2_B)
|
||||
#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)|L2_SHARED)
|
||||
#define ARM_L2S_NRML_IWT_OWT (L2_C|L2_SHARED)
|
||||
#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
|
||||
#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B|L2_SHARED)
|
||||
#endif /* SMP */
|
||||
|
||||
#elif ARM_NMMUS > 1
|
||||
#if ARM_NMMUS > 1
|
||||
/* More than one MMU class configured; use variables. */
|
||||
#define L2_S_PROT_U pte_l2_s_prot_u
|
||||
#define L2_S_PROT_W pte_l2_s_prot_w
|
||||
@ -494,7 +359,7 @@ extern int pmap_needs_pte_sync;
|
||||
|
||||
#endif /* ARM_NMMUS > 1 */
|
||||
|
||||
#if defined(CPU_XSCALE_81342) || ARM_ARCH_6 || ARM_ARCH_7A
|
||||
#if defined(CPU_XSCALE_81342)
|
||||
#define PMAP_NEEDS_PTE_SYNC 1
|
||||
#define PMAP_INCLUDE_PTE_SYNC
|
||||
#else
|
||||
@ -505,8 +370,6 @@ extern int pmap_needs_pte_sync;
|
||||
* These macros return various bits based on kernel/user and protection.
|
||||
* Note that the compiler will usually fold these at compile time.
|
||||
*/
|
||||
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
|
||||
|
||||
#define L1_S_PROT_U (L1_S_AP(AP_U))
|
||||
#define L1_S_PROT_W (L1_S_AP(AP_W))
|
||||
#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
|
||||
@ -524,27 +387,6 @@ extern int pmap_needs_pte_sync;
|
||||
|
||||
#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
|
||||
(((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
|
||||
#else
|
||||
#define L1_S_PROT_U (L1_S_AP(AP_U))
|
||||
#define L1_S_PROT_W (L1_S_APX) /* Write disable */
|
||||
#define L1_S_PROT_MASK (L1_S_PROT_W|L1_S_PROT_U)
|
||||
#define L1_S_REF (L1_S_AP(AP_REF)) /* Reference flag */
|
||||
#define L1_S_WRITABLE(pd) (!((pd) & L1_S_PROT_W))
|
||||
#define L1_S_EXECUTABLE(pd) (!((pd) & L1_S_XN))
|
||||
#define L1_S_REFERENCED(pd) ((pd) & L1_S_REF)
|
||||
|
||||
#define L1_S_PROT(ku, pr) (((((ku) == PTE_KERNEL) ? 0 : L1_S_PROT_U) | \
|
||||
(((pr) & VM_PROT_WRITE) ? 0 : L1_S_PROT_W) | \
|
||||
(((pr) & VM_PROT_EXECUTE) ? 0 : L1_S_XN)))
|
||||
|
||||
#define L2_L_PROT_MASK (L2_APX|L2_AP0(0x3))
|
||||
#define L2_L_PROT(ku, pr) (L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
|
||||
(((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
|
||||
|
||||
#define L2_S_PROT(ku, pr) (L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
|
||||
(((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macros to test if a mapping is mappable with an L1 Section mapping
|
||||
@ -619,12 +461,12 @@ extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys,
|
||||
vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
|
||||
extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
|
||||
|
||||
#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 || defined(CPU_XSCALE_81342)
|
||||
#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342)
|
||||
void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
|
||||
void pmap_zero_page_generic(vm_paddr_t, int, int);
|
||||
|
||||
void pmap_pte_init_generic(void);
|
||||
#endif /* (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7) != 0 */
|
||||
#endif /* ARM_MMU_GENERIC != 0 */
|
||||
|
||||
#if ARM_MMU_XSCALE == 1
|
||||
void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
|
||||
|
Loading…
x
Reference in New Issue
Block a user