Add arm64 pointer authentication support

Pointer authentication allows userspace to add instructions to insert
a Pointer Authentication Code (PAC) into a register based on an address
and modifier and check if the PAC is correct. If the check fails it will
either return an invalid address or fault to the kernel.

As many of these instructions are a NOP when disabled and in earlier
revisions of the architecture this can be used, for example, to sign
the return address before pushing it to the stack making Return-oriented
programming (ROP) attack more difficult on hardware that supports them.

The kernel manages five 128 bit signing keys: 2 instruction keys, 2 data
keys, and a generic key. The instructions then use one of these when
signing the registers. Instructions that use the first four store the
PAC in the register being signed, however the instructions that use the
generic key store the PAC in a separate register.

Currently all userspace threads share all the keys within a process
with a new set of userspace keys being generated when executing a new
process. This means a forked child will share its keys with its parent
until it calls an appropriate exec system call.

In the kernel we allow the use of one of the instruction keys, the ia
key. This will be used to sign return addresses in function calls.
Unlike userspace each kernel thread has its own randomly generated.

Thread0 has a static key as does the early code on secondary CPUs.
This should be safe as there is minimal user interaction with these
threads, however we could generate random keys when the Armv8.5
Random number generation instructions are present.

Sponsored by:	The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D31261
This commit is contained in:
Andrew Turner 2021-07-08 13:15:55 +00:00
parent a3cea15680
commit 85b7c566f1
20 changed files with 422 additions and 19 deletions

View File

@ -122,7 +122,7 @@ db_stack_trace_cmd(struct thread *td, struct unwind_state *frame)
}
frame->fp = tf->tf_x[29];
frame->pc = tf->tf_elr;
frame->pc = ADDR_MAKE_CANONICAL(tf->tf_elr);
if (!INKERNEL(frame->fp))
break;
} else {

View File

@ -88,6 +88,9 @@ __FBSDID("$FreeBSD$");
blr x1
1:
ldr x0, [x18, #PC_CURTHREAD]
bl ptrauth_exit_el0
ldr x0, [x18, #(PC_CURTHREAD)]
bl dbg_monitor_enter
@ -114,6 +117,9 @@ __FBSDID("$FreeBSD$");
mov x1, sp
bl dbg_monitor_exit
ldr x0, [x18, #PC_CURTHREAD]
bl ptrauth_enter_el0
/* Remove the SSBD (CVE-2018-3639) workaround if needed */
ldr x1, [x18, #PC_SSBD]
cbz x1, 1f

View File

@ -397,6 +397,9 @@ exec_setregs(struct thread *td, struct image_params *imgp, uintptr_t stack)
* Clear debug register state. It is not applicable to the new process.
*/
bzero(&pcb->pcb_dbg_regs, sizeof(pcb->pcb_dbg_regs));
/* Generate new pointer authentication keys */
ptrauth_exec(td);
}
/* Sanity check these are the same size, they will be memcpy'd to and from */

View File

@ -654,11 +654,21 @@ static struct mrs_field_value id_aa64isar1_gpi[] = {
MRS_FIELD_VALUE_END,
};
static struct mrs_field_hwcap id_aa64isar1_gpi_caps[] = {
MRS_HWCAP(&elf_hwcap, HWCAP_PACG, ID_AA64ISAR1_GPI_IMPL),
MRS_HWCAP_END
};
static struct mrs_field_value id_aa64isar1_gpa[] = {
MRS_FIELD_VALUE_NONE_IMPL(ID_AA64ISAR1, GPA, NONE, IMPL),
MRS_FIELD_VALUE_END,
};
static struct mrs_field_hwcap id_aa64isar1_gpa_caps[] = {
MRS_HWCAP(&elf_hwcap, HWCAP_PACG, ID_AA64ISAR1_GPA_IMPL),
MRS_HWCAP_END
};
static struct mrs_field_value id_aa64isar1_lrcpc[] = {
MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_NONE, ""),
MRS_FIELD_VALUE(ID_AA64ISAR1_LRCPC_RCPC_8_3, "RCPC-8.3"),
@ -699,6 +709,11 @@ static struct mrs_field_value id_aa64isar1_api[] = {
MRS_FIELD_VALUE_END,
};
static struct mrs_field_hwcap id_aa64isar1_api_caps[] = {
MRS_HWCAP(&elf_hwcap, HWCAP_PACA, ID_AA64ISAR1_API_PAC),
MRS_HWCAP_END
};
static struct mrs_field_value id_aa64isar1_apa[] = {
MRS_FIELD_VALUE(ID_AA64ISAR1_APA_NONE, ""),
MRS_FIELD_VALUE(ID_AA64ISAR1_APA_PAC, "APA PAC"),
@ -706,6 +721,11 @@ static struct mrs_field_value id_aa64isar1_apa[] = {
MRS_FIELD_VALUE_END,
};
static struct mrs_field_hwcap id_aa64isar1_apa_caps[] = {
MRS_HWCAP(&elf_hwcap, HWCAP_PACA, ID_AA64ISAR1_APA_PAC),
MRS_HWCAP_END
};
static struct mrs_field_value id_aa64isar1_dpb[] = {
MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_NONE, ""),
MRS_FIELD_VALUE(ID_AA64ISAR1_DPB_DCCVAP, "DCPoP"),
@ -732,16 +752,20 @@ static struct mrs_field id_aa64isar1_fields[] = {
id_aa64isar1_sb_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, FRINTTS, false, MRS_LOWER,
id_aa64isar1_frintts, id_aa64isar1_frintts_caps),
MRS_FIELD(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi),
MRS_FIELD(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa),
MRS_FIELD_HWCAP(ID_AA64ISAR1, GPI, false, MRS_EXACT, id_aa64isar1_gpi,
id_aa64isar1_gpi_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, GPA, false, MRS_EXACT, id_aa64isar1_gpa,
id_aa64isar1_gpa_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, LRCPC, false, MRS_LOWER,
id_aa64isar1_lrcpc, id_aa64isar1_lrcpc_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, FCMA, false, MRS_LOWER,
id_aa64isar1_fcma, id_aa64isar1_fcma_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, JSCVT, false, MRS_LOWER,
id_aa64isar1_jscvt, id_aa64isar1_jscvt_caps),
MRS_FIELD(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api),
MRS_FIELD(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa),
MRS_FIELD_HWCAP(ID_AA64ISAR1, API, false, MRS_EXACT, id_aa64isar1_api,
id_aa64isar1_api_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, APA, false, MRS_EXACT, id_aa64isar1_apa,
id_aa64isar1_apa_caps),
MRS_FIELD_HWCAP(ID_AA64ISAR1, DPB, false, MRS_LOWER, id_aa64isar1_dpb,
id_aa64isar1_dpb_caps),
MRS_FIELD_END,

View File

@ -152,6 +152,16 @@ virtdone:
bl initarm
/* We are done with the boot params */
add sp, sp, #BOOTPARAMS_SIZE
/*
* Enable pointer authentication in the kernel. We set the keys for
* thread0 in initarm so have to wait until it returns to enable it.
* If we were to enable it in initarm then any authentication when
* returning would fail as it was called with pointer authentication
* disabled.
*/
bl ptrauth_start
bl mi_startup
/* We should not get here */
@ -239,7 +249,7 @@ LENTRY(drop_to_el1)
ret
1:
/* Configure the Hypervisor */
mov x2, #(HCR_RW)
ldr x2, =(HCR_RW | HCR_APK | HCR_API)
msr hcr_el2, x2
/* Load the Virtualization Process ID Register */

View File

@ -374,6 +374,7 @@ init_proc0(vm_offset_t kstack)
thread0.td_pcb->pcb_fpusaved = &thread0.td_pcb->pcb_fpustate;
thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
thread0.td_frame = &proc0_tf;
ptrauth_thread0(&thread0);
pcpup->pc_curpcb = thread0.td_pcb;
/*
@ -832,6 +833,13 @@ initarm(struct arm64_bootparams *abp)
panic("Invalid bus configuration: %s",
kern_getenv("kern.cfg.order"));
/*
* Check if pointer authentication is available on this system, and
* if so enable its use. This needs to be called before init_proc0
* as that will configure the thread0 pointer authentication keys.
*/
ptrauth_init();
/*
* Dump the boot metadata. We have to wait for cninit() since console
* output is required. If it's grossly incorrect the kernel will never

View File

@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_map.h>
#include <machine/machdep.h>
#include <machine/cpu.h>
#include <machine/debug_monitor.h>
#include <machine/intr.h>
#include <machine/smp.h>
@ -208,6 +209,8 @@ init_secondary(uint64_t cpu)
pmap_t pmap0;
u_int mpidr;
ptrauth_mp_start(cpu);
/*
* Verify that the value passed in 'cpu' argument (aka context_id) is
* valid. Some older U-Boot based PSCI implementations are buggy,

View File

@ -6668,11 +6668,11 @@ pmap_activate(struct thread *td)
}
/*
* To eliminate the unused parameter "old", we would have to add an instruction
* to cpu_switch().
* Activate the thread we are switching to.
* To simplify the assembly in cpu_throw return the new threads pcb.
*/
struct pcb *
pmap_switch(struct thread *old __unused, struct thread *new)
pmap_switch(struct thread *new)
{
pcpu_bp_harden bp_harden;
struct pcb *pcb;

262
sys/arm64/arm64/ptrauth.c Normal file
View File

@ -0,0 +1,262 @@
/*-
* Copyright (c) 2021 The FreeBSD Foundation
*
* This software was developed by Andrew Turner under sponsorship from
* the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This manages pointer authentication. As it needs to enable the use of
* pointer authentication and change the keys we must built this with
* pointer authentication disabled.
*/
#ifdef __ARM_FEATURE_PAC_DEFAULT
#error Must be built with pointer authentication disabled
#endif
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/libkern.h>
#include <sys/proc.h>
#include <sys/reboot.h>
#include <machine/armreg.h>
#include <machine/cpu.h>
#define SCTLR_PTRAUTH (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)
static bool __read_mostly enable_ptrauth = false;
/* Functions called from assembly. */
void ptrauth_start(void);
struct thread *ptrauth_switch(struct thread *);
void ptrauth_exit_el0(struct thread *);
void ptrauth_enter_el0(struct thread *);
void
ptrauth_init(void)
{
uint64_t isar1;
int pac_enable;
/*
* Allow the sysadmin to disable pointer authentication globally,
* e.g. on broken hardware.
*/
pac_enable = 1;
TUNABLE_INT_FETCH("hw.pac.enable", &pac_enable);
if (!pac_enable) {
if (boothowto & RB_VERBOSE)
printf("Pointer authentication is disabled\n");
return;
}
get_kernel_reg(ID_AA64ISAR1_EL1, &isar1);
/*
* This assumes if there is pointer authentication on the boot CPU
* it will also be available on any non-boot CPUs. If this is ever
* not the case we will have to add a quirk.
*/
if (ID_AA64ISAR1_APA_VAL(isar1) > 0 || ID_AA64ISAR1_API_VAL(isar1) > 0)
enable_ptrauth = true;
}
/* Copy the keys when forking a new process */
void
ptrauth_fork(struct thread *new_td, struct thread *orig_td)
{
if (!enable_ptrauth)
return;
memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
sizeof(new_td->td_md.md_ptrauth_user));
}
/* Generate new userspace keys when executing a new process */
void
ptrauth_exec(struct thread *td)
{
if (!enable_ptrauth)
return;
arc4rand(&td->td_md.md_ptrauth_user, sizeof(td->td_md.md_ptrauth_user),
0);
}
/*
* Copy the user keys when creating a new userspace thread until it's clear
* how the ABI expects the various keys to be assigned.
*/
void
ptrauth_copy_thread(struct thread *new_td, struct thread *orig_td)
{
if (!enable_ptrauth)
return;
memcpy(&new_td->td_md.md_ptrauth_user, &orig_td->td_md.md_ptrauth_user,
sizeof(new_td->td_md.md_ptrauth_user));
}
/* Generate new kernel keys when executing a new kernel thread */
void
ptrauth_thread_alloc(struct thread *td)
{
if (!enable_ptrauth)
return;
arc4rand(&td->td_md.md_ptrauth_kern, sizeof(td->td_md.md_ptrauth_kern),
0);
}
/*
* Load the userspace keys. We can't use WRITE_SPECIALREG as we need
* to set the architecture extension.
*/
#define LOAD_KEY(space, name) \
__asm __volatile( \
".arch_extension pauth \n" \
"msr "#name"keylo_el1, %0 \n" \
"msr "#name"keyhi_el1, %1 \n" \
".arch_extension nopauth \n" \
:: "r"(td->td_md.md_ptrauth_##space.name.pa_key_lo), \
"r"(td->td_md.md_ptrauth_##space.name.pa_key_hi))
void
ptrauth_thread0(struct thread *td)
{
if (!enable_ptrauth)
return;
/* TODO: Generate a random number here */
memset(&td->td_md.md_ptrauth_kern, 0,
sizeof(td->td_md.md_ptrauth_kern));
LOAD_KEY(kern, apia);
/*
* No isb as this is called before ptrauth_start so can rely on
* the instruction barrier there.
*/
}
/*
* Enable pointer authentication. After this point userspace and the kernel
* can sign return addresses, etc. based on their keys
*
* This assumes either all or no CPUs have pointer authentication support,
* and, if supported, all CPUs have the same algorithm.
*/
void
ptrauth_start(void)
{
uint64_t sctlr;
if (!enable_ptrauth)
return;
/* Enable pointer authentication */
sctlr = READ_SPECIALREG(sctlr_el1);
sctlr |= SCTLR_PTRAUTH;
WRITE_SPECIALREG(sctlr_el1, sctlr);
isb();
}
#ifdef SMP
void
ptrauth_mp_start(uint64_t cpu)
{
struct ptrauth_key start_key;
uint64_t sctlr;
if (!enable_ptrauth)
return;
/*
* We need a key until we call sched_throw, however we don't have
* a thread until then. Create a key just for use within
* init_secondary and whatever it calls. As init_secondary never
* returns it is safe to do so from within it.
*
* As it's only used for a short length of time just use the cpu
* as the key.
*/
start_key.pa_key_lo = cpu;
start_key.pa_key_hi = ~cpu;
__asm __volatile(
".arch_extension pauth \n"
"msr apiakeylo_el1, %0 \n"
"msr apiakeyhi_el1, %1 \n"
".arch_extension nopauth \n"
:: "r"(start_key.pa_key_lo), "r"(start_key.pa_key_hi));
/* Enable pointer authentication */
sctlr = READ_SPECIALREG(sctlr_el1);
sctlr |= SCTLR_PTRAUTH;
WRITE_SPECIALREG(sctlr_el1, sctlr);
isb();
}
#endif
struct thread *
ptrauth_switch(struct thread *td)
{
if (enable_ptrauth) {
LOAD_KEY(kern, apia);
isb();
}
return (td);
}
/* Called when we are exiting uerspace and entering the kernel */
void
ptrauth_exit_el0(struct thread *td)
{
if (!enable_ptrauth)
return;
LOAD_KEY(kern, apia);
isb();
}
/* Called when we are about to exit the kernel and enter userspace */
void
ptrauth_enter_el0(struct thread *td)
{
if (!enable_ptrauth)
return;
LOAD_KEY(user, apia);
LOAD_KEY(user, apib);
LOAD_KEY(user, apda);
LOAD_KEY(user, apdb);
LOAD_KEY(user, apga);
/*
* No isb as this is called from the exception handler so can rely
* on the eret instruction to be the needed context synchronizing event.
*/
}

View File

@ -69,7 +69,7 @@ stack_save_td(struct stack *st, struct thread *td)
return (EOPNOTSUPP);
frame.fp = td->td_pcb->pcb_x[29];
frame.pc = td->td_pcb->pcb_lr;
frame.pc = ADDR_MAKE_CANONICAL(td->td_pcb->pcb_lr);
stack_capture(td, st, &frame);
return (0);

View File

@ -71,13 +71,16 @@ ENTRY(cpu_throw)
#ifdef VFP
/* Backup the new thread pointer around a call to C code */
mov x19, x0
mov x20, x1
mov x19, x1
bl vfp_discard
mov x1, x20
mov x0, x19
#else
mov x0, x1
#endif
/* This returns the thread pointer so no need to save it */
bl ptrauth_switch
/* This returns the thread pcb */
bl pmap_switch
mov x4, x0
@ -153,10 +156,14 @@ ENTRY(cpu_switch)
/* Load the pcb address */
mov x1, x4
bl vfp_save_state
mov x1, x20
mov x0, x19
mov x0, x20
#else
mov x0, x1
#endif
/* This returns the thread pointer so no need to save it */
bl ptrauth_switch
/* This returns the thread pcb */
bl pmap_switch
/* Move the new pcb out of the way */
mov x4, x0
@ -213,11 +220,15 @@ ENTRY(fork_trampoline)
bl _C_LABEL(fork_exit)
/*
* Disable interrupts to avoid
* overwriting spsr_el1 and sp_el0 by an IRQ exception.
* Disable interrupts as we are setting userspace specific
* state that we won't handle correctly in an interrupt while
* in the kernel.
*/
msr daifset, #(DAIF_D | DAIF_INTR)
ldr x0, [x18, #PC_CURTHREAD]
bl ptrauth_enter_el0
/* Restore sp, lr, elr, and spsr */
ldp x18, lr, [sp, #TF_SP]
ldp x10, x11, [sp, #TF_ELR]

View File

@ -483,6 +483,12 @@ do_el1h_sync(struct thread *td, struct trapframe *frame)
panic("No debugger in kernel.");
#endif
break;
case EXCP_FPAC:
/* We can see this if the authentication on PAC fails */
print_registers(frame);
printf(" far: %16lx\n", READ_SPECIALREG(far_el1));
panic("FPAC kernel exception");
break;
case EXCP_UNKNOWN:
if (undef_insn(1, frame))
break;
@ -573,6 +579,11 @@ do_el0_sync(struct thread *td, struct trapframe *frame)
exception);
userret(td, frame);
break;
case EXCP_FPAC:
call_trapsignal(td, SIGILL, ILL_ILLOPN, (void *)frame->tf_elr,
exception);
userret(td, frame);
break;
case EXCP_SP_ALIGN:
call_trapsignal(td, SIGBUS, BUS_ADRALN, (void *)frame->tf_sp,
exception);

View File

@ -47,7 +47,7 @@ unwind_frame(struct thread *td, struct unwind_state *frame)
/* FP to previous frame (X29) */
frame->fp = ((uintptr_t *)fp)[0];
/* LR (X30) */
frame->pc = ((uintptr_t *)fp)[1] - 4;
frame->pc = ADDR_MAKE_CANONICAL(((uintptr_t *)fp)[1] - 4);
return (true);
}

View File

@ -94,6 +94,8 @@ cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
/* Clear the debug register state. */
bzero(&pcb2->pcb_dbg_regs, sizeof(pcb2->pcb_dbg_regs));
ptrauth_fork(td2, td1);
tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1);
bcopy(td1->td_frame, tf, sizeof(*tf));
tf->tf_x[0] = 0;
@ -197,6 +199,9 @@ cpu_copy_thread(struct thread *td, struct thread *td0)
/* Set the new canary */
arc4random_buf(&td->td_md.md_canary, sizeof(td->td_md.md_canary));
#endif
/* Generate new pointer authentication keys. */
ptrauth_copy_thread(td, td0);
}
/*
@ -259,6 +264,7 @@ cpu_thread_alloc(struct thread *td)
td->td_kstack_pages * PAGE_SIZE) - 1;
td->td_frame = (struct trapframe *)STACKALIGN(
(struct trapframe *)td->td_pcb - 1);
ptrauth_thread_alloc(td);
}
void

View File

@ -225,6 +225,7 @@
#define EXCP_SVC64 0x15 /* SVC trap for AArch64 */
#define EXCP_HVC 0x16 /* HVC trap */
#define EXCP_MSR 0x18 /* MSR/MRS trap */
#define EXCP_FPAC 0x1c /* Faulting PAC trap */
#define EXCP_INSN_ABORT_L 0x20 /* Instruction abort, from lower EL */
#define EXCP_INSN_ABORT 0x21 /* Instruction abort, from same EL */
#define EXCP_PC_ALIGN 0x22 /* PC alignment fault */

View File

@ -171,6 +171,28 @@ void identify_cache(uint64_t);
void identify_cpu(u_int);
void install_cpu_errata(void);
/* Pointer Authentication Code (PAC) support */
void ptrauth_init(void);
void ptrauth_fork(struct thread *, struct thread *);
void ptrauth_exec(struct thread *);
void ptrauth_copy_thread(struct thread *, struct thread *);
void ptrauth_thread_alloc(struct thread *);
void ptrauth_thread0(struct thread *);
#ifdef SMP
void ptrauth_mp_start(uint64_t);
#endif
/* Pointer Authentication Code (PAC) support */
void ptrauth_init(void);
void ptrauth_fork(struct thread *, struct thread *);
void ptrauth_exec(struct thread *);
void ptrauth_copy_thread(struct thread *, struct thread *);
void ptrauth_thread_alloc(struct thread *);
void ptrauth_thread0(struct thread *);
#ifdef SMP
void ptrauth_mp_start(uint64_t);
#endif
/* Functions to read the sanitised view of the special registers */
void update_special_regs(u_int);
bool extract_user_id_field(u_int, u_int, uint8_t *);

View File

@ -192,7 +192,7 @@ bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
int pmap_fault(pmap_t, uint64_t, uint64_t);
struct pcb *pmap_switch(struct thread *, struct thread *);
struct pcb *pmap_switch(struct thread *);
extern void (*pmap_clean_stage2_tlbi)(void);
extern void (*pmap_invalidate_vpipt_icache)(void);

View File

@ -34,10 +34,35 @@
#ifndef _MACHINE_PROC_H_
#define _MACHINE_PROC_H_
struct ptrauth_key {
uint64_t pa_key_lo;
uint64_t pa_key_hi;
};
struct mdthread {
int md_spinlock_count; /* (k) */
register_t md_saved_daif; /* (k) */
uintptr_t md_canary;
/*
* The pointer authentication keys. These are shared within a process,
* however this may change for some keys as the PAuth ABI Extension to
* ELF for the Arm 64-bit Architecture [1] is currently (July 2021) at
* an Alpha release quality so may change.
*
* [1] https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst
*/
struct {
struct ptrauth_key apia;
struct ptrauth_key apib;
struct ptrauth_key apda;
struct ptrauth_key apdb;
struct ptrauth_key apga;
} md_ptrauth_user;
struct {
struct ptrauth_key apia;
} md_ptrauth_kern;
};
struct mdproc {

View File

@ -162,6 +162,15 @@
#define ADDR_IS_CANONICAL(addr) \
(((addr) & 0xffff000000000000UL) == 0 || \
((addr) & 0xffff000000000000UL) == 0xffff000000000000UL)
#define ADDR_MAKE_CANONICAL(addr) ({ \
__typeof(addr) _tmp_addr = (addr); \
\
_tmp_addr &= ~0xffff000000000000UL; \
if (ADDR_IS_KERNEL(addr)) \
_tmp_addr |= 0xffff000000000000UL; \
\
_tmp_addr; \
})
/* 95 TiB maximum for the direct map region */
#define DMAP_MIN_ADDRESS (0xffffa00000000000UL)

View File

@ -66,6 +66,8 @@ arm64/arm64/minidump_machdep.c standard
arm64/arm64/mp_machdep.c optional smp
arm64/arm64/nexus.c standard
arm64/arm64/ofw_machdep.c optional fdt
arm64/arm64/ptrauth.c standard \
compile-with "${NORMAL_C:N-mbranch-protection*}"
arm64/arm64/pmap.c standard
arm64/arm64/ptrace_machdep.c standard
arm64/arm64/sigtramp.S standard