Port sysctl kern.elf32.read_exec from amd64 to i386.

Make it more comprehensive on i386, by not setting nx bit for any
mapping, not just adding PF_X to all kernel-loaded ELF segments.  This
is needed for the compatibility with older i386 programs that assume
that read access implies exec, e.g. old X servers with hand-rolled
module loader.

Reported and tested by:	bde
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
This commit is contained in:
kib 2019-02-07 02:17:34 +00:00
parent 1ff881a6a6
commit 219b0b3431
3 changed files with 8 additions and 11 deletions

View File

@ -3247,7 +3247,7 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
if ((prot & VM_PROT_WRITE) == 0)
newpde &= ~(PG_RW | PG_M);
#ifdef PMAP_PAE_COMP
if ((prot & VM_PROT_EXECUTE) == 0)
if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
newpde |= pg_nx;
#endif
if (newpde != oldpde) {
@ -3389,7 +3389,7 @@ __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pbits &= ~(PG_RW | PG_M);
}
#ifdef PMAP_PAE_COMP
if ((prot & VM_PROT_EXECUTE) == 0)
if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
pbits |= pg_nx;
#endif
@ -3604,7 +3604,7 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
KASSERT((newpte & (PG_M | PG_RW)) != PG_M,
("pmap_enter: flags includes VM_PROT_WRITE but prot doesn't"));
#ifdef PMAP_PAE_COMP
if ((prot & VM_PROT_EXECUTE) == 0)
if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
newpte |= pg_nx;
#endif
if ((flags & PMAP_ENTER_WIRED) != 0)
@ -3841,7 +3841,7 @@ pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
if ((m->oflags & VPO_UNMANAGED) == 0)
newpde |= PG_MANAGED;
#ifdef PMAP_PAE_COMP
if ((prot & VM_PROT_EXECUTE) == 0)
if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
newpde |= pg_nx;
#endif
if (pmap != kernel_pmap)
@ -4099,7 +4099,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if ((m->oflags & VPO_UNMANAGED) == 0)
newpte |= PG_MANAGED;
#ifdef PMAP_PAE_COMP
if ((prot & VM_PROT_EXECUTE) == 0)
if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
newpte |= pg_nx;
#endif
if (pmap != kernel_pmap)

View File

@ -130,13 +130,11 @@ SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable non-executable stack");
#if __ELF_WORD_SIZE == 32
#if defined(__amd64__)
#if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
int i386_read_exec = 0;
SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
"enable execution from readable segments");
#endif
#endif
static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
@ -2516,11 +2514,9 @@ __elfN(trans_prot)(Elf_Word flags)
prot |= VM_PROT_WRITE;
if (flags & PF_R)
prot |= VM_PROT_READ;
#if __ELF_WORD_SIZE == 32
#if defined(__amd64__)
#if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
if (i386_read_exec && (flags & PF_R))
prot |= VM_PROT_EXECUTE;
#endif
#endif
return (prot);
}

View File

@ -83,6 +83,7 @@ extern int _ugssel;
extern int use_xsave;
extern uint64_t xsave_mask;
extern u_int max_apic_id;
extern int i386_read_exec;
extern int pti;
extern int hw_ibrs_active;
extern int hw_ssb_active;