2cb0e95f48
Add minimal support for creating stage 2 IPA -> PA mappings. For this we need to: - Create a new vmid set to allocate a vmid for each Virtual Machine - Add the missing stage 2 attributes - Use these in pmap_enter to create a new mapping - Handle stage 2 faults The vmid set is based on the current asid set that was generalised in r358328. It adds a function pointer for bhyve to use when the kernel needs to reset the vmid set. This will need to call into EL2 and invalidate the TLB. The stage 2 attributes have been added. To simplify setting these fields two new functions are added to get the memory type and protection fields. These are slightly different on stage 1 and stage 2 tables. We then use them in pmap_enter to set the new level 3 entry to be stored. The D-cache on all entries is cleaned to the point of coherency. This is to allow the data to be visible to the VM. To allow for userspace to load code when creating a new executable entry an invalid entry is created. When the VM tried to use it the I-cache is invalidated. As the D-cache has already been cleaned this will ensure the I-cache is synchronised with the D-cache. When the hardware implements a VPIPT I-cache we need to either have the correct VMID set or invalidate it from EL2. As the host kernel will have the wrong VMID set we need to call into EL2 to clean it. For this a second function pointer is added that is called when this invalidation is needed. Sponsored by: Innovate UK Differential Revision: https://reviews.freebsd.org/D23875
245 lines
5.0 KiB
C
245 lines
5.0 KiB
C
/*-
|
|
* Copyright (c) 2014 Andrew Turner
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#ifndef _MACHINE_CPUFUNC_H_
|
|
#define _MACHINE_CPUFUNC_H_
|
|
|
|
static __inline void
|
|
breakpoint(void)
|
|
{
|
|
|
|
__asm("brk #0");
|
|
}
|
|
|
|
#ifdef _KERNEL
|
|
|
|
#define HAVE_INLINE_FFS
|
|
|
|
static __inline __pure2 int
|
|
ffs(int mask)
|
|
{
|
|
|
|
return (__builtin_ffs(mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FFSL
|
|
|
|
static __inline __pure2 int
|
|
ffsl(long mask)
|
|
{
|
|
|
|
return (__builtin_ffsl(mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FFSLL
|
|
|
|
static __inline __pure2 int
|
|
ffsll(long long mask)
|
|
{
|
|
|
|
return (__builtin_ffsll(mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FLS
|
|
|
|
static __inline __pure2 int
|
|
fls(int mask)
|
|
{
|
|
|
|
return (mask == 0 ? 0 :
|
|
8 * sizeof(mask) - __builtin_clz((u_int)mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FLSL
|
|
|
|
static __inline __pure2 int
|
|
flsl(long mask)
|
|
{
|
|
|
|
return (mask == 0 ? 0 :
|
|
8 * sizeof(mask) - __builtin_clzl((u_long)mask));
|
|
}
|
|
|
|
#define HAVE_INLINE_FLSLL
|
|
|
|
static __inline __pure2 int
|
|
flsll(long long mask)
|
|
{
|
|
|
|
return (mask == 0 ? 0 :
|
|
8 * sizeof(mask) - __builtin_clzll((unsigned long long)mask));
|
|
}
|
|
|
|
#include <machine/armreg.h>
|
|
|
|
void pan_enable(void);
|
|
|
|
static __inline register_t
|
|
dbg_disable(void)
|
|
{
|
|
uint32_t ret;
|
|
|
|
__asm __volatile(
|
|
"mrs %x0, daif \n"
|
|
"msr daifset, #8 \n"
|
|
: "=&r" (ret));
|
|
|
|
return (ret);
|
|
}
|
|
|
|
static __inline void
|
|
dbg_enable(void)
|
|
{
|
|
|
|
__asm __volatile("msr daifclr, #8");
|
|
}
|
|
|
|
static __inline register_t
|
|
intr_disable(void)
|
|
{
|
|
/* DAIF is a 32-bit register */
|
|
uint32_t ret;
|
|
|
|
__asm __volatile(
|
|
"mrs %x0, daif \n"
|
|
"msr daifset, #2 \n"
|
|
: "=&r" (ret));
|
|
|
|
return (ret);
|
|
}
|
|
|
|
static __inline void
|
|
intr_restore(register_t s)
|
|
{
|
|
|
|
WRITE_SPECIALREG(daif, s);
|
|
}
|
|
|
|
static __inline void
|
|
intr_enable(void)
|
|
{
|
|
|
|
__asm __volatile("msr daifclr, #2");
|
|
}
|
|
|
|
static __inline register_t
|
|
get_midr(void)
|
|
{
|
|
uint64_t midr;
|
|
|
|
midr = READ_SPECIALREG(midr_el1);
|
|
|
|
return (midr);
|
|
}
|
|
|
|
static __inline register_t
|
|
get_mpidr(void)
|
|
{
|
|
uint64_t mpidr;
|
|
|
|
mpidr = READ_SPECIALREG(mpidr_el1);
|
|
|
|
return (mpidr);
|
|
}
|
|
|
|
static __inline void
|
|
clrex(void)
|
|
{
|
|
|
|
/*
|
|
* Ensure compiler barrier, otherwise the monitor clear might
|
|
* occur too late for us ?
|
|
*/
|
|
__asm __volatile("clrex" : : : "memory");
|
|
}
|
|
|
|
static __inline void
|
|
set_ttbr0(uint64_t ttbr0)
|
|
{
|
|
|
|
__asm __volatile(
|
|
"msr ttbr0_el1, %0 \n"
|
|
"isb \n"
|
|
:
|
|
: "r" (ttbr0));
|
|
}
|
|
|
|
static __inline void
|
|
invalidate_icache(void)
|
|
{
|
|
|
|
__asm __volatile(
|
|
"ic ialluis \n"
|
|
"dsb ish \n"
|
|
"isb \n");
|
|
}
|
|
|
|
static __inline void
|
|
invalidate_local_icache(void)
|
|
{
|
|
|
|
__asm __volatile(
|
|
"ic iallu \n"
|
|
"dsb nsh \n"
|
|
"isb \n");
|
|
}
|
|
|
|
extern bool icache_aliasing;
|
|
extern bool icache_vmid;
|
|
|
|
extern int64_t dcache_line_size;
|
|
extern int64_t icache_line_size;
|
|
extern int64_t idcache_line_size;
|
|
extern int64_t dczva_line_size;
|
|
|
|
#define cpu_nullop() arm64_nullop()
|
|
#define cpufunc_nullop() arm64_nullop()
|
|
|
|
#define cpu_tlb_flushID() arm64_tlb_flushID()
|
|
|
|
#define cpu_dcache_wbinv_range(a, s) arm64_dcache_wbinv_range((a), (s))
|
|
#define cpu_dcache_inv_range(a, s) arm64_dcache_inv_range((a), (s))
|
|
#define cpu_dcache_wb_range(a, s) arm64_dcache_wb_range((a), (s))
|
|
|
|
extern void (*arm64_icache_sync_range)(vm_offset_t, vm_size_t);
|
|
|
|
#define cpu_icache_sync_range(a, s) arm64_icache_sync_range((a), (s))
|
|
#define cpu_icache_sync_range_checked(a, s) arm64_icache_sync_range_checked((a), (s))
|
|
|
|
void arm64_nullop(void);
|
|
void arm64_tlb_flushID(void);
|
|
void arm64_dic_idc_icache_sync_range(vm_offset_t, vm_size_t);
|
|
void arm64_aliasing_icache_sync_range(vm_offset_t, vm_size_t);
|
|
int arm64_icache_sync_range_checked(vm_offset_t, vm_size_t);
|
|
void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
|
|
void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
|
|
void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
|
|
|
|
#endif /* _KERNEL */
|
|
#endif /* _MACHINE_CPUFUNC_H_ */
|