Start to remove XScale support from the ARMv4/v5 pmap. Support for XScale

has been removed from the kernel so we can remove it from here to help
simplify the code.

Sponsored by:	DARPA, AFRL
This commit is contained in:
Andrew Turner 2018-08-15 13:40:16 +00:00
parent 916e7b1252
commit daa5b12a0a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=337843
3 changed files with 0 additions and 332 deletions

View File

@ -406,10 +406,6 @@ static struct rwlock pvh_global_lock;
void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
#if ARM_MMU_XSCALE == 1
void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
vm_paddr_t b_phys, vm_offset_t b_offs, int cnt);
#endif
/*
* This list exists for the benefit of pmap_map_chunk(). It keeps track
@ -501,176 +497,6 @@ pmap_pte_init_generic(void)
#endif /* ARM_MMU_GENERIC != 0 */
#if ARM_MMU_XSCALE == 1
#if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3)
static u_int xscale_use_minidata;
#endif
void
pmap_pte_init_xscale(void)
{
uint32_t auxctl;
int write_through = 0;
pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P;
pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale;
pte_l2_l_cache_mode = L2_B|L2_C;
pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale;
pte_l2_s_cache_mode = L2_B|L2_C;
pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale;
pte_l1_s_cache_mode_pt = L1_S_C;
pte_l2_l_cache_mode_pt = L2_C;
pte_l2_s_cache_mode_pt = L2_C;
#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE
/*
* The XScale core has an enhanced mode where writes that
* miss the cache cause a cache line to be allocated. This
* is significantly faster than the traditional, write-through
* behavior of this case.
*/
pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X);
pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X);
pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X);
#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */
#ifdef XSCALE_CACHE_WRITE_THROUGH
/*
* Some versions of the XScale core have various bugs in
* their cache units, the work-around for which is to run
* the cache in write-through mode. Unfortunately, this
* has a major (negative) impact on performance. So, we
* go ahead and run fast-and-loose, in the hopes that we
* don't line up the planets in a way that will trip the
* bugs.
*
* However, we give you the option to be slow-but-correct.
*/
write_through = 1;
#elif defined(XSCALE_CACHE_WRITE_BACK)
/* force write back cache mode */
write_through = 0;
#elif defined(CPU_XSCALE_PXA2X0)
/*
* Intel PXA2[15]0 processors are known to have a bug in
* write-back cache on revision 4 and earlier (stepping
* A[01] and B[012]). Fixed for C0 and later.
*/
{
uint32_t id, type;
id = cpu_ident();
type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK);
if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) {
if ((id & CPU_ID_REVISION_MASK) < 5) {
/* write through for stepping A0-1 and B0-2 */
write_through = 1;
}
}
}
#endif /* XSCALE_CACHE_WRITE_THROUGH */
if (write_through) {
pte_l1_s_cache_mode = L1_S_C;
pte_l2_l_cache_mode = L2_C;
pte_l2_s_cache_mode = L2_C;
}
#if (ARM_NMMUS > 1)
xscale_use_minidata = 1;
#endif
pte_l2_s_prot_u = L2_S_PROT_U_xscale;
pte_l2_s_prot_w = L2_S_PROT_W_xscale;
pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale;
pte_l1_s_proto = L1_S_PROTO_xscale;
pte_l1_c_proto = L1_C_PROTO_xscale;
pte_l2_s_proto = L2_S_PROTO_xscale;
#ifdef CPU_XSCALE_CORE3
pmap_copy_page_func = pmap_copy_page_generic;
pmap_copy_page_offs_func = pmap_copy_page_offs_generic;
pmap_zero_page_func = pmap_zero_page_generic;
xscale_use_minidata = 0;
/* Make sure it is L2-cachable */
pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T);
pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P;
pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ;
pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode;
pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T);
pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode;
#else
pmap_copy_page_func = pmap_copy_page_xscale;
pmap_copy_page_offs_func = pmap_copy_page_offs_xscale;
pmap_zero_page_func = pmap_zero_page_xscale;
#endif
/*
* Disable ECC protection of page table access, for now.
*/
__asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
auxctl &= ~XSCALE_AUXCTL_P;
__asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
}
/*
* xscale_setup_minidata:
*
* Set up the mini-data cache clean area. We require the
* caller to allocate the right amount of physically and
* virtually contiguous space.
*/
extern vm_offset_t xscale_minidata_clean_addr;
extern vm_size_t xscale_minidata_clean_size; /* already initialized */
void
xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa)
{
pd_entry_t *pde = (pd_entry_t *) l1pt;
pt_entry_t *pte;
vm_size_t size;
uint32_t auxctl;
xscale_minidata_clean_addr = va;
/* Round it to page size. */
size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME;
for (; size != 0;
va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) {
pte = (pt_entry_t *) kernel_pt_lookup(
pde[L1_IDX(va)] & L1_C_ADDR_MASK);
if (pte == NULL)
panic("xscale_setup_minidata: can't find L2 table for "
"VA 0x%08x", (u_int32_t) va);
pte[l2pte_index(va)] =
L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
}
/*
* Configure the mini-data cache for write-back with
* read/write-allocate.
*
* NOTE: In order to reconfigure the mini-data cache, we must
* make sure it contains no valid data! In order to do that,
* we must issue a global data cache invalidate command!
*
* WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED!
* THIS IS VERY IMPORTANT!
*/
/* Invalidate data and mini-data. */
__asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0));
__asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl));
auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA;
__asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl));
}
#endif
/*
* Allocate an L1 translation table for the specified pmap.
* This is called at pmap creation time.
@ -3984,77 +3810,6 @@ pmap_zero_page_generic(vm_paddr_t phys, int off, int size)
}
#endif /* ARM_MMU_GENERIC != 0 */
#if ARM_MMU_XSCALE == 1
void
pmap_zero_page_xscale(vm_paddr_t phys, int off, int size)
{
if (_arm_bzero && size >= _min_bzero_size &&
_arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0)
return;
mtx_lock(&cmtx);
/*
* Hook in the page, zero it, and purge the cache for that
* zeroed page. Invalidate the TLB as needed.
*/
*cdst_pte = L2_S_PROTO | phys |
L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(cdstp);
cpu_cpwait();
if (off || size != PAGE_SIZE)
bzero((void *)(cdstp + off), size);
else
bzero_page(cdstp);
mtx_unlock(&cmtx);
xscale_cache_clean_minidata();
}
/*
* Change the PTEs for the specified kernel mappings such that they
* will use the mini data cache instead of the main data cache.
*/
void
pmap_use_minicache(vm_offset_t va, vm_size_t size)
{
struct l2_bucket *l2b;
pt_entry_t *ptep, *sptep, pte;
vm_offset_t next_bucket, eva;
#if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3)
if (xscale_use_minidata == 0)
return;
#endif
eva = va + size;
while (va < eva) {
next_bucket = L2_NEXT_BUCKET(va);
if (next_bucket > eva)
next_bucket = eva;
l2b = pmap_get_l2_bucket(kernel_pmap, va);
sptep = ptep = &l2b->l2b_kva[l2pte_index(va)];
while (va < next_bucket) {
pte = *ptep;
if (!l2pte_minidata(pte)) {
cpu_dcache_wbinv_range(va, PAGE_SIZE);
cpu_tlb_flushD_SE(va);
*ptep = pte & ~L2_B;
}
ptep++;
va += PAGE_SIZE;
}
PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep));
}
cpu_cpwait();
}
#endif /* ARM_MMU_XSCALE == 1 */
/*
* pmap_zero_page zeros the specified hardware page by mapping
* the page into KVM and using bzero to clear its contents.
@ -4252,72 +4007,6 @@ pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs,
}
#endif /* ARM_MMU_GENERIC != 0 */
#if ARM_MMU_XSCALE == 1
void
pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst)
{
#if 0
/* XXX: Only needed for pmap_clean_page(), which is commented out. */
struct vm_page *src_pg = PHYS_TO_VM_PAGE(src);
#endif
/*
* Clean the source page. Hold the source page's lock for
* the duration of the copy so that no other mappings can
* be created while we have a potentially aliased mapping.
*/
#if 0
/*
* XXX: Not needed while we call cpu_dcache_wbinv_all() in
* pmap_copy_page().
*/
(void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE);
#endif
/*
* Map the pages into the page hook points, copy them, and purge
* the cache for the appropriate page. Invalidate the TLB
* as required.
*/
mtx_lock(&cmtx);
*csrc_pte = L2_S_PROTO | src |
L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
PTE_SYNC(csrc_pte);
*cdst_pte = L2_S_PROTO | dst |
L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(csrcp);
cpu_tlb_flushD_SE(cdstp);
cpu_cpwait();
bcopy_page(csrcp, cdstp);
mtx_unlock(&cmtx);
xscale_cache_clean_minidata();
}
void
pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs,
vm_paddr_t b_phys, vm_offset_t b_offs, int cnt)
{
mtx_lock(&cmtx);
*csrc_pte = L2_S_PROTO | a_phys |
L2_S_PROT(PTE_KERNEL, VM_PROT_READ) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
PTE_SYNC(csrc_pte);
*cdst_pte = L2_S_PROTO | b_phys |
L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) |
L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X);
PTE_SYNC(cdst_pte);
cpu_tlb_flushD_SE(csrcp);
cpu_tlb_flushD_SE(cdstp);
cpu_cpwait();
bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt);
mtx_unlock(&cmtx);
xscale_cache_clean_minidata();
}
#endif /* ARM_MMU_XSCALE == 1 */
void
pmap_copy_page(vm_page_t src, vm_page_t dst)
{

View File

@ -105,11 +105,6 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
/* Point the pcb to the top of the stack */
pcb2 = (struct pcb *)
(td2->td_kstack + td2->td_kstack_pages * PAGE_SIZE) - 1;
#ifdef __XSCALE__
#ifndef CPU_XSCALE_CORE3
pmap_use_minicache(td2->td_kstack, td2->td_kstack_pages * PAGE_SIZE);
#endif
#endif
#ifdef VFP
/* Store actual state of VFP */
if (curthread == td1) {
@ -311,12 +306,6 @@ cpu_thread_alloc(struct thread *td)
* the ARM EABI.
*/
td->td_frame = (struct trapframe *)((caddr_t)td->td_pcb) - 1;
#ifdef __XSCALE__
#ifndef CPU_XSCALE_CORE3
pmap_use_minicache(td->td_kstack, td->td_kstack_pages * PAGE_SIZE);
#endif
#endif
}
void

View File

@ -464,16 +464,6 @@ void pmap_zero_page_generic(vm_paddr_t, int, int);
void pmap_pte_init_generic(void);
#endif /* ARM_MMU_GENERIC != 0 */
#if ARM_MMU_XSCALE == 1
void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
void pmap_zero_page_xscale(vm_paddr_t, int, int);
void pmap_pte_init_xscale(void);
void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
void pmap_use_minicache(vm_offset_t, vm_size_t);
#endif /* ARM_MMU_XSCALE == 1 */
#if defined(CPU_XSCALE_81342)
#define ARM_HAVE_SUPERSECTIONS
#endif