amd64: Add md process flags and first P_MD_PTI flag.

PTI mode for the process pmap on exec is activated iff P_MD_PTI is set.

On exec, the existing vmspace can be reused only if pti mode of the
pmap matches the P_MD_PTI flag of the process.  Add MD
cpu_exec_vmspace_reuse() callback for exec_new_vmspace() which can
vetoed reuse of the existing vmspace.

MFC note: md_flags change struct proc KBI.

Reviewed by:	jhb, markj
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D19514
This commit is contained in:
Konstantin Belousov 2019-03-16 11:31:01 +00:00
parent c1c120b2cb
commit 6f1fe3305a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=345227
13 changed files with 74 additions and 4 deletions

View File

@ -2853,6 +2853,7 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
void
pmap_pinit0(pmap_t pmap)
{
struct proc *p;
int i;
PMAP_LOCK_INIT(pmap);
@ -2871,6 +2872,12 @@ pmap_pinit0(pmap_t pmap)
pmap->pm_pcids[i].pm_gen = 1;
}
pmap_activate_boot(pmap);
if (pti) {
p = curproc;
PROC_LOCK(p);
p->p_md.md_flags |= P_MD_KPTI;
PROC_UNLOCK(p);
}
if ((cpu_stdext_feature2 & CPUID_STDEXT2_PKU) != 0) {
pmap_pkru_ranges_zone = uma_zcreate("pkru ranges",
@ -2957,7 +2964,7 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags)
if (pm_type == PT_X86) {
pmap->pm_cr3 = pml4phys;
pmap_pinit_pml4(pml4pg);
if (pti) {
if ((curproc->p_md.md_flags & P_MD_KPTI) != 0) {
pml4pgu = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
pmap->pm_pml4u = (pml4_entry_t *)PHYS_TO_DMAP(

View File

@ -369,6 +369,14 @@ cpu_thread_free(struct thread *td)
cpu_thread_clean(td);
}
bool
cpu_exec_vmspace_reuse(struct proc *p, vm_map_t map)
{
return (((curproc->p_md.md_flags & P_MD_KPTI) != 0) ==
(vm_map_pmap(map)->pm_ucr3 != PMAP_NO_CR3));
}
void
cpu_set_syscall_retval(struct thread *td, int error)
{

View File

@ -40,7 +40,8 @@
/*
* List of locks
* k - only accessed by curthread
* c - proc lock
* k - only accessed by curthread
* pp - pmap.c:invl_gen_mtx
*/
@ -69,8 +70,11 @@ struct mdthread {
struct mdproc {
struct proc_ldt *md_ldt; /* (t) per-process ldt */
struct system_segment_descriptor md_ldt_sd;
u_int md_flags; /* (c) md process flags P_MD */
};
#define P_MD_KPTI 0x00000001 /* Enable KPTI on exec */
#define KINFO_PROC_SIZE 1088
#define KINFO_PROC32_SIZE 768

View File

@ -345,3 +345,10 @@ cpu_exit(struct thread *td)
{
}
bool
cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
{
return (true);
}

View File

@ -279,6 +279,13 @@ cpu_exit(struct thread *td)
{
}
bool
cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
{
return (true);
}
void
swi_vm(void *v)
{

View File

@ -382,6 +382,13 @@ cpu_thread_free(struct thread *td)
cpu_thread_clean(td);
}
bool
cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
{
return (true);
}
void
cpu_set_syscall_retval(struct thread *td, int error)
{

View File

@ -1100,7 +1100,8 @@ exec_new_vmspace(struct image_params *imgp, struct sysentvec *sv)
else
sv_minuser = MAX(sv->sv_minuser, PAGE_SIZE);
if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv_minuser &&
vm_map_max(map) == sv->sv_maxuser) {
vm_map_max(map) == sv->sv_maxuser &&
cpu_exec_vmspace_reuse(p, map)) {
shmexit(vmspace);
pmap_remove_pages(vmspace_pmap(vmspace));
vm_map_remove(map, vm_map_min(map), vm_map_max(map));

View File

@ -94,7 +94,7 @@ _Static_assert(offsetof(struct proc, p_filemon) == 0x3d0,
"struct proc KBI p_filemon");
_Static_assert(offsetof(struct proc, p_comm) == 0x3e8,
"struct proc KBI p_comm");
_Static_assert(offsetof(struct proc, p_emuldata) == 0x4c0,
_Static_assert(offsetof(struct proc, p_emuldata) == 0x4c8,
"struct proc KBI p_emuldata");
#endif
#ifdef __i386__

View File

@ -453,6 +453,13 @@ cpu_set_upcall(struct thread *td, void (*entry)(void *), void *arg,
*/
}
bool
cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
{
return (true);
}
/*
* Software interrupt handler for queued VM system processing.
*/

View File

@ -249,3 +249,10 @@ cpu_thread_swapout(struct thread *td)
}
bool
cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
{
return (true);
}

View File

@ -264,6 +264,13 @@ cpu_exit(struct thread *td)
{
}
bool
cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
{
return (true);
}
void
swi_vm(void *v)
{

View File

@ -373,6 +373,13 @@ cpu_fork_kthread_handler(struct thread *td, void (*func)(void *), void *arg)
fp->fr_local[1] = (u_long)arg;
}
bool
cpu_exec_vmspace_reuse(struct proc *p __unused, vm_map_t map __unused)
{
return (true);
}
int
is_physical_memory(vm_paddr_t addr)
{

View File

@ -1093,6 +1093,7 @@ void userret(struct thread *, struct trapframe *);
void cpu_exit(struct thread *);
void exit1(struct thread *, int, int) __dead2;
void cpu_copy_thread(struct thread *td, struct thread *td0);
bool cpu_exec_vmspace_reuse(struct proc *p, struct vm_map *map);
int cpu_fetch_syscall_args(struct thread *td);
void cpu_fork(struct thread *, struct proc *, struct thread *, int);
void cpu_fork_kthread_handler(struct thread *, void (*)(void *), void *);