diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index a8e53647adbf..672e12303865 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -2853,6 +2853,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde, void pmap_pinit0(pmap_t pmap) { + struct proc *p; int i; PMAP_LOCK_INIT(pmap); @@ -2871,6 +2872,12 @@ pmap_pinit0(pmap_t pmap) pmap->pm_pcids[i].pm_gen = 1; } pmap_activate_boot(pmap); + if (pti) { + p = curproc; + PROC_LOCK(p); + p->p_md.md_flags |= P_MD_KPTI; + PROC_UNLOCK(p); + } if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) { pmap_pkru_ranges_zone = uma_zcreate("pkru ranges", @@ -2957,7 +2964,7 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags) if (pm_type == PT_X86) { pmap->pm_cr3 = pml4phys; pmap_pinit_pml4(pml4pg); - if (pti) { + if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) { pml4pgu = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_WAITOK); pmap->pm_pml4u = (pml4_entry_t *)PHYS_TO_DMAP( diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c index aed84e2ca8a7..0cf9eecc6d45 100644 --- a/sys/amd64/amd64/vm_machdep.c +++ b/sys/amd64/amd64/vm_machdep.c @@ -369,6 +369,14 @@ cpu_thread_free(struct thread *td) cpu_thread_clean(td); } +bool +cpu_exec_vmspace_reuse(struct proc *p, vm_map_t map) +{ + + return (((curproc->p_md.md_flags & P_MD_KPTI) != 0) == + (vm_map_pmap(map)->pm_ucr3 != PMAP_NO_CR3)); +} + void cpu_set_syscall_retval(struct thread *td, int error) { diff --git a/sys/amd64/include/proc.h b/sys/amd64/include/proc.h index 2aebcf0da55a..c063849972fc 100644 --- a/sys/amd64/include/proc.h +++ b/sys/amd64/include/proc.h @@ -40,7 +40,8 @@ /* * List of locks - * k - only accessed by curthread + * c - proc lock + * k - only accessed by curthread * pp - pmap.c:invl_gen_mtx */ @@ -69,8 +70,11 @@ struct mdthread { struct mdproc { struct proc_ldt *md_ldt; /* (t) per-process ldt */ struct system_segment_descriptor md_ldt_sd; + u_int md_flags; /* (c) md process flags P_MD */ }; +#define P_MD_KPTI 0x00000001 /* Enable KPTI on exec */ + #define KINFO_PROC_SIZE 1088 #define KINFO_PROC32_SIZE 768 diff --git a/sys/arm/arm/vm_machdep.c b/sys/arm/arm/vm_machdep.c index 31527619a610..a2b457c2a253 100644 --- a/sys/arm/arm/vm_machdep.c +++ b/sys/arm/arm/vm_machdep.c @@ -345,3 +345,10 @@ cpu_exit(struct thread *td) { } +bool +cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) +{ + + return (true); +} + diff --git a/sys/arm64/arm64/vm_machdep.c b/sys/arm64/arm64/vm_machdep.c index d74041f0bcb0..083061b25846 100644 --- a/sys/arm64/arm64/vm_machdep.c +++ b/sys/arm64/arm64/vm_machdep.c @@ -279,6 +279,13 @@ cpu_exit(struct thread *td) { } +bool +cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) +{ + + return (true); +} + void swi_vm(void *v) { diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index 0255f1216008..acc5a439ab0c 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -382,6 +382,13 @@ cpu_thread_free(struct thread *td) cpu_thread_clean(td); } +bool +cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) +{ + + return (true); +} + void cpu_set_syscall_retval(struct thread *td, int error) { diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 6bef3f092e11..324e647c5a31 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -1100,7 +1100,8 @@ exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv) else sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE); if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv_minuser && - vm_map_max(map) == sv->sv_maxuser) { + vm_map_max(map) == sv->sv_maxuser && + cpu_exec_vmspace_reuse(p, map)) { shmexit(vmspace); pmap_remove_pages(vmspace_pmap(vmspace)); vm_map_remove(map, vm_map_min(map), vm_map_max(map)); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 69635f58f67e..da224aad228c 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -94,7 +94,7 @@ _Static_assert(offsetof(struct proc, p_filemon) == 0x3d0, "struct proc KBI p_filemon"); _Static_assert(offsetof(struct proc, p_comm) == 0x3e8, "struct proc KBI p_comm"); -_Static_assert(offsetof(struct proc, p_emuldata) == 0x4c0, +_Static_assert(offsetof(struct proc, p_emuldata) == 0x4c8, "struct proc KBI p_emuldata"); #endif #ifdef __i386__ diff --git a/sys/mips/mips/vm_machdep.c b/sys/mips/mips/vm_machdep.c index e13edbaf66c1..a224f81c4a68 100644 --- a/sys/mips/mips/vm_machdep.c +++ b/sys/mips/mips/vm_machdep.c @@ -453,6 +453,13 @@ cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg, */ } +bool +cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) +{ + + return (true); +} + /* * Software interrupt handler for queued VM system processing. */ diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c index c1b56544f29d..7649711cef0a 100644 --- a/sys/powerpc/powerpc/vm_machdep.c +++ b/sys/powerpc/powerpc/vm_machdep.c @@ -249,3 +249,10 @@ cpu_thread_swapout(struct thread *td) } +bool +cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) +{ + + return (true); +} + diff --git a/sys/riscv/riscv/vm_machdep.c b/sys/riscv/riscv/vm_machdep.c index c1801f01f2f1..39119a07f581 100644 --- a/sys/riscv/riscv/vm_machdep.c +++ b/sys/riscv/riscv/vm_machdep.c @@ -264,6 +264,13 @@ cpu_exit(struct thread *td) { } +bool +cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) +{ + + return (true); +} + void swi_vm(void *v) { diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c index d3966910888f..a4a316e0bc86 100644 --- a/sys/sparc64/sparc64/vm_machdep.c +++ b/sys/sparc64/sparc64/vm_machdep.c @@ -373,6 +373,13 @@ cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg) fp->fr_local[1] = (u_long)arg; } +bool +cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused) +{ + + return (true); +} + int is_physical_memory(vm_paddr_t addr) { diff --git a/sys/sys/proc.h b/sys/sys/proc.h index b2f75f921ec5..2d71a72e4d2c 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -1093,6 +1093,7 @@ void userret(struct thread *, struct trapframe *); void cpu_exit(struct thread *); void exit1(struct thread *, int, int) __dead2; void cpu_copy_thread(struct thread *td, struct thread *td0); +bool cpu_exec_vmspace_reuse(struct proc *p, struct vm_map *map); int cpu_fetch_syscall_args(struct thread *td); void cpu_fork(struct thread *, struct proc *, struct thread *, int); void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *);