2005-01-05 21:58:49 +00:00
|
|
|
/*-
|
2004-05-14 11:46:45 +00:00
|
|
|
* Copyright (c) 1991 Regents of the University of California.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* the Systems Programming Group of the University of Utah Computer
|
|
|
|
* Science Department and William Jolitz of UUNET Technologies Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* Derived from hp300 version by Mike Hibler, this version by William
|
|
|
|
* Jolitz uses a recursive map [a pde points to the page directory] to
|
|
|
|
* map the page tables using the pagetables themselves. This is done to
|
|
|
|
* reduce the impact on kernel virtual memory for lots of sparse address
|
|
|
|
* space, and to reduce the cost of memory to each process.
|
|
|
|
*
|
|
|
|
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
|
|
|
|
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
|
|
|
|
* from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _MACHINE_PMAP_H_
|
|
|
|
#define _MACHINE_PMAP_H_
|
|
|
|
|
|
|
|
#include <machine/pte.h>
|
2006-05-31 11:57:37 +00:00
|
|
|
#include <machine/cpuconf.h>
|
2004-05-14 11:46:45 +00:00
|
|
|
/*
|
|
|
|
* Pte related macros
|
|
|
|
*/
|
2012-08-15 03:03:03 +00:00
|
|
|
#if ARM_ARCH_6 || ARM_ARCH_7A
|
|
|
|
#ifdef SMP
|
|
|
|
#define PTE_NOCACHE 2
|
|
|
|
#else
|
|
|
|
#define PTE_NOCACHE 1
|
|
|
|
#endif
|
|
|
|
#define PTE_CACHE 4
|
|
|
|
#define PTE_DEVICE 2
|
|
|
|
#define PTE_PAGETABLE 4
|
|
|
|
#else
|
|
|
|
#define PTE_NOCACHE 1
|
|
|
|
#define PTE_CACHE 2
|
|
|
|
#define PTE_PAGETABLE 3
|
|
|
|
#endif
|
|
|
|
|
|
|
|
enum mem_type {
|
|
|
|
STRONG_ORD = 0,
|
|
|
|
DEVICE_NOSHARE,
|
|
|
|
DEVICE_SHARE,
|
|
|
|
NRML_NOCACHE,
|
|
|
|
NRML_IWT_OWT,
|
|
|
|
NRML_IWB_OWB,
|
|
|
|
NRML_IWBA_OWBA
|
|
|
|
};
|
2012-06-13 05:02:51 +00:00
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
#ifndef LOCORE
|
|
|
|
|
|
|
|
#include <sys/queue.h>
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
#include <sys/_cpuset.h>
|
2006-06-06 04:32:20 +00:00
|
|
|
#include <sys/_lock.h>
|
|
|
|
#include <sys/_mutex.h>
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
#define PDESIZE sizeof(pd_entry_t) /* for assembly files */
|
|
|
|
#define PTESIZE sizeof(pt_entry_t) /* for assembly files */
|
|
|
|
|
|
|
|
#ifdef _KERNEL
|
|
|
|
|
2012-09-27 05:39:42 +00:00
|
|
|
#define vtophys(va) pmap_kextract((vm_offset_t)(va))
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2012-12-19 00:24:31 +00:00
|
|
|
#define pmap_page_get_memattr(m) ((m)->md.pv_memattr)
|
2004-09-23 21:54:25 +00:00
|
|
|
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
|
2012-06-16 18:56:19 +00:00
|
|
|
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
|
2012-12-19 00:24:31 +00:00
|
|
|
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
|
2009-07-12 23:31:20 +00:00
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
/*
|
2004-11-07 23:01:36 +00:00
|
|
|
* Pmap stuff
|
2004-05-14 11:46:45 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This structure is used to hold a virtual<->physical address
|
|
|
|
* association and is used mostly by bootstrap code
|
|
|
|
*/
|
|
|
|
struct pv_addr {
|
|
|
|
SLIST_ENTRY(pv_addr) pv_list;
|
|
|
|
vm_offset_t pv_va;
|
|
|
|
vm_paddr_t pv_pa;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pv_entry;
|
|
|
|
|
|
|
|
struct md_page {
|
|
|
|
int pvh_attrs;
|
2012-12-19 00:24:31 +00:00
|
|
|
vm_memattr_t pv_memattr;
|
2009-06-18 20:42:37 +00:00
|
|
|
vm_offset_t pv_kva; /* first kernel VA mapping */
|
2004-05-14 11:46:45 +00:00
|
|
|
TAILQ_HEAD(,pv_entry) pv_list;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct l1_ttable;
|
|
|
|
struct l2_dtable;
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The number of L2 descriptor tables which can be tracked by an l2_dtable.
|
|
|
|
* A bucket size of 16 provides for 16MB of contiguous virtual address
|
|
|
|
* space per l2_dtable. Most processes will, therefore, require only two or
|
|
|
|
* three of these to map their whole working set.
|
|
|
|
*/
|
|
|
|
#define L2_BUCKET_LOG2 4
|
|
|
|
#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2)
|
|
|
|
/*
|
|
|
|
* Given the above "L2-descriptors-per-l2_dtable" constant, the number
|
|
|
|
* of l2_dtable structures required to track all possible page descriptors
|
|
|
|
* mappable by an L1 translation table is given by the following constants:
|
|
|
|
*/
|
|
|
|
#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
|
|
|
|
#define L2_SIZE (1 << L2_LOG2)
|
|
|
|
|
|
|
|
struct pmap {
|
2006-06-06 04:32:20 +00:00
|
|
|
struct mtx pm_mtx;
|
2004-05-14 11:46:45 +00:00
|
|
|
u_int8_t pm_domain;
|
|
|
|
struct l1_ttable *pm_l1;
|
|
|
|
struct l2_dtable *pm_l2[L2_SIZE];
|
|
|
|
pd_entry_t *pm_pdir; /* KVA of page directory */
|
Commit the support for removing cpumask_t and replacing it directly with
cpuset_t objects.
That is going to offer the underlying support for a simple bump of
MAXCPU and then support for number of cpus > 32 (as it is today).
Right now, cpumask_t is an int, 32 bits on all our supported architecture.
cpumask_t on the other side is implemented as an array of longs, and
easilly extendible by definition.
The architectures touched by this commit are the following:
- amd64
- i386
- pc98
- arm
- ia64
- XEN
while the others are still missing.
Userland is believed to be fully converted with the changes contained
here.
Some technical notes:
- This commit may be considered an ABI nop for all the architectures
different from amd64 and ia64 (and sparc64 in the future)
- per-cpu members, which are now converted to cpuset_t, needs to be
accessed avoiding migration, because the size of cpuset_t should be
considered unknown
- size of cpuset_t objects is different from kernel and userland (this is
primirally done in order to leave some more space in userland to cope
with KBI extensions). If you need to access kernel cpuset_t from the
userland please refer to example in this patch on how to do that
correctly (kgdb may be a good source, for example).
- Support for other architectures is going to be added soon
- Only MAXCPU for amd64 is bumped now
The patch has been tested by sbruno and Nicholas Esborn on opteron
4 x 12 pack CPUs. More testing on big SMP is expected to came soon.
pluknet tested the patch with his 8-ways on both amd64 and i386.
Tested by: pluknet, sbruno, gianni, Nicholas Esborn
Reviewed by: jeff, jhb, sbruno
2011-05-05 14:39:14 +00:00
|
|
|
cpuset_t pm_active; /* active on cpus */
|
2004-05-14 11:46:45 +00:00
|
|
|
struct pmap_statistics pm_stats; /* pmap statictics */
|
2005-04-07 22:01:53 +00:00
|
|
|
TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */
|
2004-05-14 11:46:45 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct pmap *pmap_t;
|
|
|
|
|
|
|
|
#ifdef _KERNEL
|
2009-05-07 05:42:13 +00:00
|
|
|
extern struct pmap kernel_pmap_store;
|
|
|
|
#define kernel_pmap (&kernel_pmap_store)
|
2004-05-14 11:46:45 +00:00
|
|
|
#define pmap_kernel() kernel_pmap
|
2004-11-07 23:01:36 +00:00
|
|
|
|
2006-06-06 04:32:20 +00:00
|
|
|
#define PMAP_ASSERT_LOCKED(pmap) \
|
|
|
|
mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
|
|
|
|
#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
|
|
|
|
#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
|
|
|
|
#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \
|
|
|
|
NULL, MTX_DEF | MTX_DUPOK)
|
|
|
|
#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx)
|
|
|
|
#define PMAP_MTX(pmap) (&(pmap)->pm_mtx)
|
|
|
|
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
|
|
|
|
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
|
2004-05-14 11:46:45 +00:00
|
|
|
#endif
|
|
|
|
|
2004-09-23 21:54:25 +00:00
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
/*
|
|
|
|
* For each vm_page_t, there is a list of all currently valid virtual
|
2006-11-13 06:26:57 +00:00
|
|
|
* mappings of that page. An entry is a pv_entry_t, the list is pv_list.
|
2004-05-14 11:46:45 +00:00
|
|
|
*/
|
|
|
|
typedef struct pv_entry {
|
2004-12-05 22:46:30 +00:00
|
|
|
pmap_t pv_pmap; /* pmap where mapping lies */
|
|
|
|
vm_offset_t pv_va; /* virtual address for mapping */
|
|
|
|
TAILQ_ENTRY(pv_entry) pv_list;
|
2005-04-07 22:01:53 +00:00
|
|
|
TAILQ_ENTRY(pv_entry) pv_plist;
|
2004-05-14 11:46:45 +00:00
|
|
|
int pv_flags; /* flags (wired, etc...) */
|
|
|
|
} *pv_entry_t;
|
|
|
|
|
|
|
|
#ifdef _KERNEL
|
|
|
|
|
|
|
|
boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* virtual address to page table entry and
|
|
|
|
* to physical address. Likewise for alternate address space.
|
|
|
|
* Note: these work recursively, thus vtopte of a pte will give
|
|
|
|
* the corresponding pde that in turn maps it.
|
|
|
|
*/
|
|
|
|
|
2004-09-23 21:54:25 +00:00
|
|
|
/*
|
|
|
|
* The current top of kernel VM.
|
|
|
|
*/
|
|
|
|
extern vm_offset_t pmap_curmaxkvaddr;
|
|
|
|
|
2004-07-12 21:22:40 +00:00
|
|
|
struct pcb;
|
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
void pmap_set_pcb_pagedir(pmap_t, struct pcb *);
|
|
|
|
/* Virtual address to page table entry */
|
|
|
|
static __inline pt_entry_t *
|
|
|
|
vtopte(vm_offset_t va)
|
|
|
|
{
|
|
|
|
pd_entry_t *pdep;
|
|
|
|
pt_entry_t *ptep;
|
|
|
|
|
|
|
|
if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
|
|
|
|
return (NULL);
|
|
|
|
return (ptep);
|
|
|
|
}
|
|
|
|
|
2011-02-05 03:36:34 +00:00
|
|
|
extern vm_paddr_t phys_avail[];
|
2004-05-14 11:46:45 +00:00
|
|
|
extern vm_offset_t virtual_avail;
|
|
|
|
extern vm_offset_t virtual_end;
|
|
|
|
|
|
|
|
void pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
|
2012-08-15 03:03:03 +00:00
|
|
|
int pmap_change_attr(vm_offset_t, vm_size_t, int);
|
2004-05-14 11:46:45 +00:00
|
|
|
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
|
2006-03-01 23:04:25 +00:00
|
|
|
void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
|
2008-11-06 16:20:27 +00:00
|
|
|
void *pmap_kenter_temp(vm_paddr_t pa, int i);
|
Instead of using sysarch() to store-retrieve the tp, add a magic address,
ARM_TP_ADDRESS, where the tp will be stored. On CPUs that support it, a cache
line will be allocated and locked for this address, so that it will never go
to RAM. On CPUs that does not, a page is allocated for it (it will be a bit
slower, and is wrong for SMP, but should be fine for UP).
The tp is still stored in the mdthread struct, and at each context switch,
ARM_TP_ADDRESS gets updated.
Suggested by: davidxu
2005-02-26 18:59:01 +00:00
|
|
|
void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
|
2012-09-27 05:39:42 +00:00
|
|
|
vm_paddr_t pmap_kextract(vm_offset_t va);
|
2004-05-14 11:46:45 +00:00
|
|
|
void pmap_kremove(vm_offset_t);
|
|
|
|
void *pmap_mapdev(vm_offset_t, vm_size_t);
|
|
|
|
void pmap_unmapdev(vm_offset_t, vm_size_t);
|
|
|
|
vm_page_t pmap_use_pt(pmap_t, vm_offset_t);
|
|
|
|
void pmap_debug(int);
|
|
|
|
void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
|
|
|
|
void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
|
|
|
|
vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
|
|
|
|
void
|
|
|
|
pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
|
|
|
|
int cache);
|
|
|
|
int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
|
2012-08-15 03:03:03 +00:00
|
|
|
int pmap_dmap_iscurrent(pmap_t pmap);
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Definitions for MMU domains
|
|
|
|
*/
|
2007-05-19 12:47:34 +00:00
|
|
|
#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */
|
|
|
|
#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The new pmap ensures that page-tables are always mapping Write-Thru.
|
|
|
|
* Thus, on some platforms we can run fast and loose and avoid syncing PTEs
|
|
|
|
* on every change.
|
|
|
|
*
|
|
|
|
* Unfortunately, not all CPUs have a write-through cache mode. So we
|
|
|
|
* define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
|
|
|
|
* and if there is the chance for PTE syncs to be needed, we define
|
|
|
|
* PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
|
|
|
|
* the code.
|
|
|
|
*/
|
|
|
|
extern int pmap_needs_pte_sync;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These macros define the various bit masks in the PTE.
|
|
|
|
*
|
|
|
|
* We use these macros since we use different bits on different processor
|
|
|
|
* models.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C)
|
2007-07-27 14:45:04 +00:00
|
|
|
#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\
|
|
|
|
L1_S_XSCALE_TEX(TEX_XSCALE_T))
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
#define L2_L_CACHE_MASK_generic (L2_B|L2_C)
|
2007-07-27 14:45:04 +00:00
|
|
|
#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \
|
|
|
|
L2_XSCALE_L_TEX(TEX_XSCALE_T))
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
#define L2_S_PROT_U_generic (L2_AP(AP_U))
|
|
|
|
#define L2_S_PROT_W_generic (L2_AP(AP_W))
|
|
|
|
#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W)
|
|
|
|
|
|
|
|
#define L2_S_PROT_U_xscale (L2_AP0(AP_U))
|
|
|
|
#define L2_S_PROT_W_xscale (L2_AP0(AP_W))
|
|
|
|
#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W)
|
|
|
|
|
|
|
|
#define L2_S_CACHE_MASK_generic (L2_B|L2_C)
|
2007-07-27 14:45:04 +00:00
|
|
|
#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \
|
|
|
|
L2_XSCALE_T_TEX(TEX_XSCALE_X))
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP)
|
|
|
|
#define L1_S_PROTO_xscale (L1_TYPE_S)
|
|
|
|
|
|
|
|
#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2)
|
|
|
|
#define L1_C_PROTO_xscale (L1_TYPE_C)
|
|
|
|
|
|
|
|
#define L2_L_PROTO (L2_TYPE_L)
|
|
|
|
|
|
|
|
#define L2_S_PROTO_generic (L2_TYPE_S)
|
|
|
|
#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* User-visible names for the ones that vary with MMU class.
|
|
|
|
*/
|
2012-08-15 03:03:03 +00:00
|
|
|
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
|
|
|
#define L2_AP(x) (L2_AP0(x))
|
|
|
|
#else
|
|
|
|
#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
|
|
|
|
#endif
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
#if ARM_NMMUS > 1
|
|
|
|
/* More than one MMU class configured; use variables. */
|
|
|
|
#define L2_S_PROT_U pte_l2_s_prot_u
|
|
|
|
#define L2_S_PROT_W pte_l2_s_prot_w
|
|
|
|
#define L2_S_PROT_MASK pte_l2_s_prot_mask
|
|
|
|
|
|
|
|
#define L1_S_CACHE_MASK pte_l1_s_cache_mask
|
|
|
|
#define L2_L_CACHE_MASK pte_l2_l_cache_mask
|
|
|
|
#define L2_S_CACHE_MASK pte_l2_s_cache_mask
|
|
|
|
|
|
|
|
#define L1_S_PROTO pte_l1_s_proto
|
|
|
|
#define L1_C_PROTO pte_l1_c_proto
|
|
|
|
#define L2_S_PROTO pte_l2_s_proto
|
|
|
|
|
|
|
|
#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
|
|
|
|
#define L2_S_PROT_U L2_S_PROT_U_generic
|
|
|
|
#define L2_S_PROT_W L2_S_PROT_W_generic
|
|
|
|
#define L2_S_PROT_MASK L2_S_PROT_MASK_generic
|
|
|
|
|
|
|
|
#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic
|
|
|
|
#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic
|
|
|
|
#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic
|
|
|
|
|
|
|
|
#define L1_S_PROTO L1_S_PROTO_generic
|
|
|
|
#define L1_C_PROTO L1_C_PROTO_generic
|
|
|
|
#define L2_S_PROTO L2_S_PROTO_generic
|
|
|
|
|
|
|
|
#elif ARM_MMU_XSCALE == 1
|
|
|
|
#define L2_S_PROT_U L2_S_PROT_U_xscale
|
|
|
|
#define L2_S_PROT_W L2_S_PROT_W_xscale
|
|
|
|
#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale
|
|
|
|
|
|
|
|
#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale
|
|
|
|
#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale
|
|
|
|
#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale
|
|
|
|
|
|
|
|
#define L1_S_PROTO L1_S_PROTO_xscale
|
|
|
|
#define L1_C_PROTO L1_C_PROTO_xscale
|
|
|
|
#define L2_S_PROTO L2_S_PROTO_xscale
|
|
|
|
|
2012-08-15 03:03:03 +00:00
|
|
|
#elif (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
|
|
|
|
|
|
|
#define L2_S_PROT_U (L2_AP0(2)) /* user access */
|
|
|
|
#define L2_S_PROT_R (L2_APX|L2_AP0(1)) /* read access */
|
|
|
|
|
|
|
|
#define L2_S_PROT_MASK (L2_S_PROT_U|L2_S_PROT_R)
|
|
|
|
#define L2_S_WRITABLE(pte) (!(pte & L2_APX))
|
|
|
|
|
|
|
|
#ifndef SMP
|
|
|
|
#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C)
|
|
|
|
#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C)
|
|
|
|
#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C)
|
|
|
|
#else
|
|
|
|
#define L1_S_CACHE_MASK (L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED)
|
|
|
|
#define L2_L_CACHE_MASK (L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED)
|
|
|
|
#define L2_S_CACHE_MASK (L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED)
|
|
|
|
#endif /* SMP */
|
|
|
|
|
|
|
|
#define L1_S_PROTO (L1_TYPE_S)
|
|
|
|
#define L1_C_PROTO (L1_TYPE_C)
|
|
|
|
#define L2_S_PROTO (L2_TYPE_S)
|
|
|
|
|
|
|
|
#ifndef SMP
|
|
|
|
#define ARM_L1S_STRONG_ORD (0)
|
|
|
|
#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
|
|
|
|
#define ARM_L1S_DEVICE_SHARE (L1_S_B)
|
|
|
|
#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1))
|
|
|
|
#define ARM_L1S_NRML_IWT_OWT (L1_S_C)
|
|
|
|
#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B)
|
|
|
|
#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B)
|
|
|
|
|
|
|
|
#define ARM_L2L_STRONG_ORD (0)
|
|
|
|
#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
|
|
|
|
#define ARM_L2L_DEVICE_SHARE (L2_B)
|
|
|
|
#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1))
|
|
|
|
#define ARM_L2L_NRML_IWT_OWT (L2_C)
|
|
|
|
#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B)
|
|
|
|
#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B)
|
|
|
|
|
|
|
|
#define ARM_L2S_STRONG_ORD (0)
|
|
|
|
#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
|
|
|
|
#define ARM_L2S_DEVICE_SHARE (L2_B)
|
|
|
|
#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1))
|
|
|
|
#define ARM_L2S_NRML_IWT_OWT (L2_C)
|
|
|
|
#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B)
|
|
|
|
#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B)
|
|
|
|
#else
|
|
|
|
#define ARM_L1S_STRONG_ORD (0)
|
|
|
|
#define ARM_L1S_DEVICE_NOSHARE (L1_S_TEX(2))
|
|
|
|
#define ARM_L1S_DEVICE_SHARE (L1_S_B)
|
|
|
|
#define ARM_L1S_NRML_NOCACHE (L1_S_TEX(1)|L1_SHARED)
|
|
|
|
#define ARM_L1S_NRML_IWT_OWT (L1_S_C|L1_SHARED)
|
|
|
|
#define ARM_L1S_NRML_IWB_OWB (L1_S_C|L1_S_B|L1_SHARED)
|
|
|
|
#define ARM_L1S_NRML_IWBA_OWBA (L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED)
|
|
|
|
|
|
|
|
#define ARM_L2L_STRONG_ORD (0)
|
|
|
|
#define ARM_L2L_DEVICE_NOSHARE (L2_L_TEX(2))
|
|
|
|
#define ARM_L2L_DEVICE_SHARE (L2_B)
|
|
|
|
#define ARM_L2L_NRML_NOCACHE (L2_L_TEX(1)|L2_SHARED)
|
|
|
|
#define ARM_L2L_NRML_IWT_OWT (L2_C|L2_SHARED)
|
|
|
|
#define ARM_L2L_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
|
|
|
|
#define ARM_L2L_NRML_IWBA_OWBA (L2_L_TEX(1)|L2_C|L2_B|L2_SHARED)
|
|
|
|
|
|
|
|
#define ARM_L2S_STRONG_ORD (0)
|
|
|
|
#define ARM_L2S_DEVICE_NOSHARE (L2_S_TEX(2))
|
|
|
|
#define ARM_L2S_DEVICE_SHARE (L2_B)
|
|
|
|
#define ARM_L2S_NRML_NOCACHE (L2_S_TEX(1)|L2_SHARED)
|
|
|
|
#define ARM_L2S_NRML_IWT_OWT (L2_C|L2_SHARED)
|
|
|
|
#define ARM_L2S_NRML_IWB_OWB (L2_C|L2_B|L2_SHARED)
|
|
|
|
#define ARM_L2S_NRML_IWBA_OWBA (L2_S_TEX(1)|L2_C|L2_B|L2_SHARED)
|
|
|
|
#endif /* SMP */
|
2004-05-14 11:46:45 +00:00
|
|
|
#endif /* ARM_NMMUS > 1 */
|
|
|
|
|
|
|
|
#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
|
|
|
|
#define PMAP_NEEDS_PTE_SYNC 1
|
|
|
|
#define PMAP_INCLUDE_PTE_SYNC
|
2007-07-27 14:45:04 +00:00
|
|
|
#elif defined(CPU_XSCALE_81342)
|
|
|
|
#define PMAP_NEEDS_PTE_SYNC 1
|
|
|
|
#define PMAP_INCLUDE_PTE_SYNC
|
2004-05-14 11:46:45 +00:00
|
|
|
#elif (ARM_MMU_SA1 == 0)
|
|
|
|
#define PMAP_NEEDS_PTE_SYNC 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These macros return various bits based on kernel/user and protection.
|
|
|
|
* Note that the compiler will usually fold these at compile time.
|
|
|
|
*/
|
2012-08-15 03:03:03 +00:00
|
|
|
#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
|
|
|
|
|
|
|
|
#define L1_S_PROT_U (L1_S_AP(AP_U))
|
|
|
|
#define L1_S_PROT_W (L1_S_AP(AP_W))
|
|
|
|
#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W)
|
|
|
|
#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W)
|
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
|
|
|
|
(((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
|
|
|
|
|
2012-08-15 03:03:03 +00:00
|
|
|
#define L2_L_PROT_U (L2_AP(AP_U))
|
|
|
|
#define L2_L_PROT_W (L2_AP(AP_W))
|
|
|
|
#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W)
|
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
|
|
|
|
(((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
|
|
|
|
|
|
|
|
#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
|
|
|
|
(((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
|
2012-08-15 03:03:03 +00:00
|
|
|
#else
|
|
|
|
#define L1_S_PROT_U (L1_S_AP(AP_U))
|
|
|
|
#define L1_S_PROT_MASK (L1_S_APX|L1_S_AP(0x3))
|
|
|
|
#define L1_S_WRITABLE(pd) (!((pd) & L1_S_APX))
|
|
|
|
|
|
|
|
#define L1_S_PROT(ku, pr) (L1_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L1_S_PROT_U : 0) | \
|
|
|
|
(((pr) & VM_PROT_WRITE) ? L1_S_APX : 0)))
|
|
|
|
|
|
|
|
#define L2_L_PROT_MASK (L2_APX|L2_AP0(0x3))
|
|
|
|
#define L2_L_PROT(ku, pr) (L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
|
|
|
|
(((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
|
|
|
|
|
|
|
|
#define L2_S_PROT(ku, pr) (L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
|
|
|
|
(((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
|
|
|
|
|
|
|
|
#endif
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Macros to test if a mapping is mappable with an L1 Section mapping
|
|
|
|
* or an L2 Large Page mapping.
|
|
|
|
*/
|
|
|
|
#define L1_S_MAPPABLE_P(va, pa, size) \
|
|
|
|
((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
|
|
|
|
|
|
|
|
#define L2_L_MAPPABLE_P(va, pa, size) \
|
|
|
|
((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Provide a fallback in case we were not able to determine it at
|
|
|
|
* compile-time.
|
|
|
|
*/
|
|
|
|
#ifndef PMAP_NEEDS_PTE_SYNC
|
|
|
|
#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync
|
|
|
|
#define PMAP_INCLUDE_PTE_SYNC
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define PTE_SYNC(pte) \
|
|
|
|
do { \
|
2007-07-27 14:45:04 +00:00
|
|
|
if (PMAP_NEEDS_PTE_SYNC) { \
|
2004-05-14 11:46:45 +00:00
|
|
|
cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
|
2007-07-27 14:45:04 +00:00
|
|
|
cpu_l2cache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
|
2011-12-15 12:14:15 +00:00
|
|
|
} else \
|
|
|
|
cpu_drain_writebuf(); \
|
2004-05-14 11:46:45 +00:00
|
|
|
} while (/*CONSTCOND*/0)
|
|
|
|
|
|
|
|
#define PTE_SYNC_RANGE(pte, cnt) \
|
|
|
|
do { \
|
|
|
|
if (PMAP_NEEDS_PTE_SYNC) { \
|
|
|
|
cpu_dcache_wb_range((vm_offset_t)(pte), \
|
|
|
|
(cnt) << 2); /* * sizeof(pt_entry_t) */ \
|
2007-07-27 14:45:04 +00:00
|
|
|
cpu_l2cache_wb_range((vm_offset_t)(pte), \
|
|
|
|
(cnt) << 2); /* * sizeof(pt_entry_t) */ \
|
2011-12-15 12:14:15 +00:00
|
|
|
} else \
|
|
|
|
cpu_drain_writebuf(); \
|
2004-05-14 11:46:45 +00:00
|
|
|
} while (/*CONSTCOND*/0)
|
|
|
|
|
|
|
|
extern pt_entry_t pte_l1_s_cache_mode;
|
|
|
|
extern pt_entry_t pte_l1_s_cache_mask;
|
|
|
|
|
|
|
|
extern pt_entry_t pte_l2_l_cache_mode;
|
|
|
|
extern pt_entry_t pte_l2_l_cache_mask;
|
|
|
|
|
|
|
|
extern pt_entry_t pte_l2_s_cache_mode;
|
|
|
|
extern pt_entry_t pte_l2_s_cache_mask;
|
|
|
|
|
|
|
|
extern pt_entry_t pte_l1_s_cache_mode_pt;
|
|
|
|
extern pt_entry_t pte_l2_l_cache_mode_pt;
|
|
|
|
extern pt_entry_t pte_l2_s_cache_mode_pt;
|
|
|
|
|
|
|
|
extern pt_entry_t pte_l2_s_prot_u;
|
|
|
|
extern pt_entry_t pte_l2_s_prot_w;
|
|
|
|
extern pt_entry_t pte_l2_s_prot_mask;
|
2012-06-13 05:02:51 +00:00
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
extern pt_entry_t pte_l1_s_proto;
|
|
|
|
extern pt_entry_t pte_l1_c_proto;
|
|
|
|
extern pt_entry_t pte_l2_s_proto;
|
|
|
|
|
|
|
|
extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
|
|
|
|
extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
|
|
|
|
|
2012-08-15 03:03:03 +00:00
|
|
|
#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7 + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)
|
2004-05-14 11:46:45 +00:00
|
|
|
void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
|
|
|
|
void pmap_zero_page_generic(vm_paddr_t, int, int);
|
|
|
|
|
|
|
|
void pmap_pte_init_generic(void);
|
|
|
|
#if defined(CPU_ARM8)
|
|
|
|
void pmap_pte_init_arm8(void);
|
|
|
|
#endif
|
|
|
|
#if defined(CPU_ARM9)
|
|
|
|
void pmap_pte_init_arm9(void);
|
|
|
|
#endif /* CPU_ARM9 */
|
|
|
|
#if defined(CPU_ARM10)
|
|
|
|
void pmap_pte_init_arm10(void);
|
|
|
|
#endif /* CPU_ARM10 */
|
2012-08-15 03:03:03 +00:00
|
|
|
#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
|
|
|
|
void pmap_pte_init_mmu_v6(void);
|
|
|
|
#endif /* CPU_ARM11 */
|
2004-05-14 11:46:45 +00:00
|
|
|
#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
|
|
|
|
|
|
|
|
#if /* ARM_MMU_SA1 == */1
|
|
|
|
void pmap_pte_init_sa1(void);
|
|
|
|
#endif /* ARM_MMU_SA1 == 1 */
|
|
|
|
|
|
|
|
#if ARM_MMU_XSCALE == 1
|
|
|
|
void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
|
|
|
|
void pmap_zero_page_xscale(vm_paddr_t, int, int);
|
|
|
|
|
|
|
|
void pmap_pte_init_xscale(void);
|
|
|
|
|
|
|
|
void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
|
|
|
|
|
2004-09-23 21:54:25 +00:00
|
|
|
void pmap_use_minicache(vm_offset_t, vm_size_t);
|
2004-05-14 11:46:45 +00:00
|
|
|
#endif /* ARM_MMU_XSCALE == 1 */
|
2007-07-27 14:45:04 +00:00
|
|
|
#if defined(CPU_XSCALE_81342)
|
|
|
|
#define ARM_HAVE_SUPERSECTIONS
|
|
|
|
#endif
|
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
#define PTE_KERNEL 0
|
|
|
|
#define PTE_USER 1
|
|
|
|
#define l1pte_valid(pde) ((pde) != 0)
|
|
|
|
#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S)
|
|
|
|
#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C)
|
|
|
|
#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F)
|
|
|
|
|
|
|
|
#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
|
|
|
|
#define l2pte_valid(pte) ((pte) != 0)
|
|
|
|
#define l2pte_pa(pte) ((pte) & L2_S_FRAME)
|
|
|
|
#define l2pte_minidata(pte) (((pte) & \
|
|
|
|
(L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
|
|
|
|
== (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
|
|
|
|
|
|
|
|
/* L1 and L2 page table macros */
|
|
|
|
#define pmap_pde_v(pde) l1pte_valid(*(pde))
|
|
|
|
#define pmap_pde_section(pde) l1pte_section_p(*(pde))
|
|
|
|
#define pmap_pde_page(pde) l1pte_page_p(*(pde))
|
|
|
|
#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde))
|
|
|
|
|
|
|
|
#define pmap_pte_v(pte) l2pte_valid(*(pte))
|
|
|
|
#define pmap_pte_pa(pte) l2pte_pa(*(pte))
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flags that indicate attributes of pages or mappings of pages.
|
|
|
|
*
|
|
|
|
* The PVF_MOD and PVF_REF flags are stored in the mdpage for each
|
|
|
|
* page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
|
|
|
|
* pv_entry's for each page. They live in the same "namespace" so
|
|
|
|
* that we can clear multiple attributes at a time.
|
|
|
|
*
|
|
|
|
* Note the "non-cacheable" flag generally means the page has
|
|
|
|
* multiple mappings in a given address space.
|
|
|
|
*/
|
|
|
|
#define PVF_MOD 0x01 /* page is modified */
|
|
|
|
#define PVF_REF 0x02 /* page is referenced */
|
|
|
|
#define PVF_WIRED 0x04 /* mapping is wired */
|
|
|
|
#define PVF_WRITE 0x08 /* mapping is writable */
|
|
|
|
#define PVF_EXEC 0x10 /* mapping is executable */
|
Bring in the nice work from Mark Tinguely on arm pmap.
The only downside is that it renames pmap_vac_me_harder() to pmap_fix_cache().
From Mark's email on -arm :
pmap_get_vac_flags(), pmap_vac_me_harder(), pmap_vac_me_kpmap(), and
pmap_vac_me_user() has been rewritten as pmap_fix_cache() to be more
efficient in the kernel map case. I also removed the reference to
the md.kro_mappings, md.krw_mappings, md.uro_mappings, and md.urw_mappings
counts.
In pmap_clearbit(), we can also skip over tests and writeback/invalidations
in the PVF_MOD and PVF_REF cases if those bits are not set in the pv_flag.
PVF_WRITE will turn caching back on and remove the PV_MOD bit.
In pmap_nuke_pv(), the vm_page_flag_clear(pg, PG_WRITEABLE) has been moved
to the pmap_fix_cache().
We can be more agressive in attempting to turn caching back on by calling
pmap_fix_cache() at times that may be appropriate to turn cache on
(a kernel mapping has been removed, a write has been removed or a read
has been removed and we know the mapping does not have multiple write
mappings to a page).
In pmap_remove_pages() the cpu_idcache_wbinv_all() is moved to happen
before the page tables are NULLed because the caches are virtually
indexed and virtually tagged.
In pmap_remove_all(), the pmap_remove_write(m) is added before the
page tables are NULLed because the caches are virtually indexed and
virtually tagged. This also removes the need for the caches fixing routine
(whichever is being used pmap_vac_me_harder() or pmap_fix_cache()) to be
called on any of these mappings.
In pmap_remove(), I simplified the cache cleaning process and removed
extra TLB removals. Basically if more than PMAP_REMOVE_CLEAN_LIST_SIZE
are removed, then just flush the entire cache.
2008-01-31 00:05:40 +00:00
|
|
|
#define PVF_NC 0x20 /* mapping is non-cacheable */
|
|
|
|
#define PVF_MWC 0x40 /* mapping is used multiple times in userland */
|
2009-06-18 20:42:37 +00:00
|
|
|
#define PVF_UNMAN 0x80 /* mapping is unmanaged */
|
2004-05-14 11:46:45 +00:00
|
|
|
|
|
|
|
void vector_page_setprot(int);
|
2004-09-23 21:54:25 +00:00
|
|
|
|
2004-05-14 11:46:45 +00:00
|
|
|
/*
|
2004-09-23 21:54:25 +00:00
|
|
|
* This structure is used by machine-dependent code to describe
|
|
|
|
* static mappings of devices, created at bootstrap time.
|
2004-05-14 11:46:45 +00:00
|
|
|
*/
|
2004-09-23 21:54:25 +00:00
|
|
|
struct pmap_devmap {
|
|
|
|
vm_offset_t pd_va; /* virtual address */
|
|
|
|
vm_paddr_t pd_pa; /* physical address */
|
|
|
|
vm_size_t pd_size; /* size of region */
|
|
|
|
vm_prot_t pd_prot; /* protection code */
|
|
|
|
int pd_cache; /* cache attributes */
|
|
|
|
};
|
2004-05-14 11:46:45 +00:00
|
|
|
|
2004-09-23 21:54:25 +00:00
|
|
|
const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t);
|
|
|
|
const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t);
|
2004-05-14 11:46:45 +00:00
|
|
|
|
2004-09-23 21:54:25 +00:00
|
|
|
void pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *);
|
|
|
|
void pmap_devmap_register(const struct pmap_devmap *);
|
2004-11-07 23:01:36 +00:00
|
|
|
|
2005-06-07 23:04:24 +00:00
|
|
|
#define SECTION_CACHE 0x1
|
|
|
|
#define SECTION_PT 0x2
|
|
|
|
void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags);
|
2007-07-27 14:45:04 +00:00
|
|
|
#ifdef ARM_HAVE_SUPERSECTIONS
|
2007-06-11 21:29:26 +00:00
|
|
|
void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags);
|
2007-07-27 14:45:04 +00:00
|
|
|
#endif
|
2005-06-07 23:04:24 +00:00
|
|
|
|
2004-11-07 23:01:36 +00:00
|
|
|
extern char *_tmppt;
|
|
|
|
|
2005-11-06 16:10:28 +00:00
|
|
|
void pmap_postinit(void);
|
|
|
|
|
2005-06-07 23:04:24 +00:00
|
|
|
#ifdef ARM_USE_SMALL_ALLOC
|
|
|
|
void arm_add_smallalloc_pages(void *, void *, int, int);
|
2006-08-08 20:59:38 +00:00
|
|
|
vm_offset_t arm_ptovirt(vm_paddr_t);
|
|
|
|
void arm_init_smallalloc(void);
|
2005-06-07 23:04:24 +00:00
|
|
|
struct arm_small_page {
|
|
|
|
void *addr;
|
|
|
|
TAILQ_ENTRY(arm_small_page) pg_list;
|
|
|
|
};
|
2005-10-03 14:15:50 +00:00
|
|
|
|
2005-06-07 23:04:24 +00:00
|
|
|
#endif
|
2006-03-01 23:04:25 +00:00
|
|
|
|
2007-01-17 00:53:05 +00:00
|
|
|
#define ARM_NOCACHE_KVA_SIZE 0x1000000
|
2006-03-01 23:04:25 +00:00
|
|
|
extern vm_offset_t arm_nocache_startaddr;
|
|
|
|
void *arm_remap_nocache(void *, vm_size_t);
|
|
|
|
void arm_unmap_nocache(void *, vm_size_t);
|
|
|
|
|
2005-10-04 16:29:31 +00:00
|
|
|
extern vm_paddr_t dump_avail[];
|
2004-05-14 11:46:45 +00:00
|
|
|
#endif /* _KERNEL */
|
|
|
|
|
|
|
|
#endif /* !LOCORE */
|
|
|
|
|
|
|
|
#endif /* !_MACHINE_PMAP_H_ */
|