Apply mapping protections to preloaded kernel modules on amd64.

With an upcoming change the amd64 kernel will map preloaded files RW
instead of RWX, so the kernel linker must adjust protections
appropriately using pmap_change_prot().

Reviewed by:	kib
MFC after:	1 month
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D21860
This commit is contained in:
Mark Johnston 2019-10-18 13:56:45 +00:00
parent 1d9eae9fb2
commit f822c9e287
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=353730
3 changed files with 72 additions and 4 deletions

View File

@ -736,9 +736,46 @@ parse_vnet(elf_file_t ef)
#endif
#undef LS_PADDING
/*
* Apply the specified protection to the loadable segments of a preloaded linker
* file.
*/
static int
link_elf_link_preload(linker_class_t cls,
const char* filename, linker_file_t *result)
preload_protect(elf_file_t ef, vm_prot_t prot)
{
#ifdef __amd64__
Elf_Ehdr *hdr;
Elf_Phdr *phdr, *phlimit;
vm_prot_t nprot;
int error;
error = 0;
hdr = (Elf_Ehdr *)ef->address;
phdr = (Elf_Phdr *)(ef->address + hdr->e_phoff);
phlimit = phdr + hdr->e_phnum;
for (; phdr < phlimit; phdr++) {
if (phdr->p_type != PT_LOAD)
continue;
nprot = prot | VM_PROT_READ;
if ((phdr->p_flags & PF_W) != 0)
nprot |= VM_PROT_WRITE;
if ((phdr->p_flags & PF_X) != 0)
nprot |= VM_PROT_EXECUTE;
error = pmap_change_prot((vm_offset_t)ef->address +
phdr->p_vaddr, round_page(phdr->p_memsz), nprot);
if (error != 0)
break;
}
return (error);
#else
return (0);
#endif
}
static int
link_elf_link_preload(linker_class_t cls, const char *filename,
linker_file_t *result)
{
Elf_Addr *ctors_addrp;
Elf_Size *ctors_sizep;
@ -798,6 +835,8 @@ link_elf_link_preload(linker_class_t cls,
if (error == 0)
error = parse_vnet(ef);
#endif
if (error == 0)
error = preload_protect(ef, VM_PROT_ALL);
if (error != 0) {
linker_file_unload(lf, LINKER_UNLOAD_FORCE);
return (error);
@ -815,6 +854,8 @@ link_elf_link_preload_finish(linker_file_t lf)
ef = (elf_file_t) lf;
error = relocate_file(ef);
if (error == 0)
error = preload_protect(ef, VM_PROT_NONE);
if (error != 0)
return (error);
(void)link_elf_preload_parse_symbols(ef);
@ -1274,6 +1315,7 @@ link_elf_unload_file(linker_file_t file)
static void
link_elf_unload_preload(linker_file_t file)
{
if (file->pathname != NULL)
preload_delete_name(file->pathname);
}

View File

@ -193,7 +193,6 @@ link_elf_init(void *arg)
linker_add_class(&link_elf_class);
}
SYSINIT(link_elf_obj, SI_SUB_KLD, SI_ORDER_SECOND, link_elf_init, NULL);
static void
@ -209,6 +208,15 @@ link_elf_protect_range(elf_file_t ef, vm_offset_t start, vm_offset_t end,
if (start == end)
return;
if (ef->preloaded) {
#ifdef __amd64__
error = pmap_change_prot(start, end - start, prot);
KASSERT(error == 0,
("link_elf_protect_range: pmap_change_prot() returned %d",
error));
#endif
return;
}
error = vm_map_protect(kernel_map, start, end, prot, FALSE);
KASSERT(error == KERN_SUCCESS,
("link_elf_protect_range: vm_map_protect() returned %d", error));
@ -564,6 +572,14 @@ link_elf_link_preload(linker_class_t cls, const char *filename,
goto out;
}
/*
* The file needs to be writeable and executable while applying
* relocations. Mapping protections are applied once relocation
* processing is complete.
*/
link_elf_protect_range(ef, (vm_offset_t)ef->address,
round_page((vm_offset_t)ef->address + ef->lf.size), VM_PROT_ALL);
/* Local intra-module relocations */
error = link_elf_reloc_local(lf, false);
if (error != 0)
@ -616,7 +632,9 @@ link_elf_link_preload_finish(linker_file_t lf)
return (error);
#endif
/* Invoke .ctors */
/* Apply protections now that relocation processing is complete. */
link_elf_protect(ef);
link_elf_invoke_ctors(lf->ctors_addr, lf->ctors_size);
return (0);
}

View File

@ -834,6 +834,14 @@ kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
end = trunc_page(start + size);
start = round_page(start);
#ifdef __amd64__
/*
* Preloaded files do not have execute permissions by default on amd64.
* Restore the default permissions to ensure that the direct map alias
* is updated.
*/
pmap_change_prot(start, end - start, VM_PROT_RW);
#endif
for (va = start; va < end; va += PAGE_SIZE) {
pa = pmap_kextract(va);
m = PHYS_TO_VM_PAGE(pa);