50e3ab6bcf
switching. The indirect costs being unnecessary TLB misses that are incurred when ASIDs are not used. In fact, currently, when we perform a context switch on one processor, we issue a broadcast TLB invalidation that flushes the TLB contents on every processor. Mark all user-space ("ttbr0") page table entries with the non-global flag so that they are cached in the TLB under their ASID. Correct an error in pmap_pinit0(). The pointer to the root of the page table was being initialized to the root of the kernel-space page table rather than a user-space page table. However, the root of the page table that was being cached in process 0's md_l0addr field correctly pointed to a user-space page table. As long as ASIDs weren't being used, this was harmless, except that it led to some unnecessary page table switches in pmap_switch(). Specifically, other kernel processes besides process 0 would have their md_l0addr field set to the root of the kernel-space page table, and so pmap_switch() would actually change page tables when switching between process 0 and other kernel processes. Implement a workaround for Cavium erratum 27456 affecting ThunderX machines. (I would like to thank andrew@ for providing the code to detect the affected machines.) Address integer overflow in the definition of TCR_ASID_16. Setup TCR according to the PARange and ASIDBits fields from ID_AA64MMFR0_EL1. Previously, TCR_ASID_16 was unconditionally set. Modify build_l1_block_pagetable so that lower attributes, such as ATTR_nG, can be specified as a parameter. Eliminate some unused code. Earlier versions were tested to varying degrees by: andrew, emaste, markj MFC after: 3 weeks Differential Revision: https://reviews.freebsd.org/D21922
231 lines
4.8 KiB
C
231 lines
4.8 KiB
C
/*-
|
|
* Copyright (c) 2014 Andrew Turner
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#ifndef _MACHINE_CPUFUNC_H_
|
|
#define _MACHINE_CPUFUNC_H_
|
|
|
|
static __inline void
|
|
breakpoint(void)
|
|
{
|
|
|
|
__asm("brk #0");
|
|
}
|
|
|
|
#ifdef _KERNEL
|
|
|
|
#define HAVE_INLINE_FFS
|
|
|
|
static __inline __pure2 int
|
|
ffs(int mask)
|
|
{
|
|
|
|
return (__builtin_ffs(mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FFSL
|
|
|
|
static __inline __pure2 int
|
|
ffsl(long mask)
|
|
{
|
|
|
|
return (__builtin_ffsl(mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FFSLL
|
|
|
|
static __inline __pure2 int
|
|
ffsll(long long mask)
|
|
{
|
|
|
|
return (__builtin_ffsll(mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FLS
|
|
|
|
static __inline __pure2 int
|
|
fls(int mask)
|
|
{
|
|
|
|
return (mask == 0 ? 0 :
|
|
8 * sizeof(mask) - __builtin_clz((u_int)mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FLSL
|
|
|
|
static __inline __pure2 int
|
|
flsl(long mask)
|
|
{
|
|
|
|
return (mask == 0 ? 0 :
|
|
8 * sizeof(mask) - __builtin_clzl((u_long)mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FLSLL
|
|
|
|
static __inline __pure2 int
|
|
flsll(long long mask)
|
|
{
|
|
|
|
return (mask == 0 ? 0 :
|
|
8 * sizeof(mask) - __builtin_clzll((unsigned long long)mask));
|
|
}
|
|
|
|
#include <machine/armreg.h>
|
|
|
|
void pan_enable(void);
|
|
|
|
static __inline register_t
|
|
dbg_disable(void)
|
|
{
|
|
uint32_t ret;
|
|
|
|
__asm __volatile(
|
|
"mrs %x0, daif \n"
|
|
"msr daifset, #8 \n"
|
|
: "=&r" (ret));
|
|
|
|
return (ret);
|
|
}
|
|
|
|
static __inline void
|
|
dbg_enable(void)
|
|
{
|
|
|
|
__asm __volatile("msr daifclr, #8");
|
|
}
|
|
|
|
static __inline register_t
|
|
intr_disable(void)
|
|
{
|
|
/* DAIF is a 32-bit register */
|
|
uint32_t ret;
|
|
|
|
__asm __volatile(
|
|
"mrs %x0, daif \n"
|
|
"msr daifset, #2 \n"
|
|
: "=&r" (ret));
|
|
|
|
return (ret);
|
|
}
|
|
|
|
static __inline void
|
|
intr_restore(register_t s)
|
|
{
|
|
|
|
WRITE_SPECIALREG(daif, s);
|
|
}
|
|
|
|
static __inline void
|
|
intr_enable(void)
|
|
{
|
|
|
|
__asm __volatile("msr daifclr, #2");
|
|
}
|
|
|
|
static __inline register_t
|
|
get_midr(void)
|
|
{
|
|
uint64_t midr;
|
|
|
|
midr = READ_SPECIALREG(midr_el1);
|
|
|
|
return (midr);
|
|
}
|
|
|
|
static __inline register_t
|
|
get_mpidr(void)
|
|
{
|
|
uint64_t mpidr;
|
|
|
|
mpidr = READ_SPECIALREG(mpidr_el1);
|
|
|
|
return (mpidr);
|
|
}
|
|
|
|
static __inline void
|
|
clrex(void)
|
|
{
|
|
|
|
/*
|
|
* Ensure compiler barrier, otherwise the monitor clear might
|
|
* occur too late for us ?
|
|
*/
|
|
__asm __volatile("clrex" : : : "memory");
|
|
}
|
|
|
|
static __inline void
|
|
set_ttbr0(uint64_t ttbr0)
|
|
{
|
|
|
|
__asm __volatile(
|
|
"msr ttbr0_el1, %0 \n"
|
|
"isb \n"
|
|
:
|
|
: "r" (ttbr0));
|
|
}
|
|
|
|
static __inline void
|
|
invalidate_local_icache(void)
|
|
{
|
|
|
|
__asm __volatile(
|
|
"ic iallu \n"
|
|
"dsb nsh \n"
|
|
"isb \n");
|
|
}
|
|
|
|
extern int64_t dcache_line_size;
|
|
extern int64_t icache_line_size;
|
|
extern int64_t idcache_line_size;
|
|
extern int64_t dczva_line_size;
|
|
|
|
#define cpu_nullop() arm64_nullop()
|
|
#define cpufunc_nullop() arm64_nullop()
|
|
|
|
#define cpu_tlb_flushID() arm64_tlb_flushID()
|
|
|
|
#define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s))
|
|
#define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s))
|
|
#define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s))
|
|
|
|
#define cpu_idcache_wbinv_range(a, s) arm64_idcache_wbinv_range((a), (s))
|
|
#define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s))
|
|
#define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
|
|
|
|
void arm64_nullop(void);
|
|
void arm64_tlb_flushID(void);
|
|
void arm64_icache_sync_range(vm_offset_t, vm_size_t);
|
|
int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
|
|
void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t);
|
|
void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
|
|
void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
|
|
void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
|
|
|
|
#endif /* _KERNEL */
|
|
#endif /* _MACHINE_CPUFUNC_H_ */
|