freebsd-skq/sys/kern/imgact_elf.c

1276 lines
33 KiB
C
Raw Normal View History

/*-
* Copyright (c) 2000 David O'Brien
* Copyright (c) 1995-1996 S<EFBFBD>ren Schmidt
* Copyright (c) 1996 Peter Wemm
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer
* in this position and unchanged.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
2003-06-11 00:56:59 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/exec.h>
#include <sys/fcntl.h>
#include <sys/imgact.h>
#include <sys/imgact_elf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/mman.h>
#include <sys/namei.h>
#include <sys/pioctl.h>
#include <sys/proc.h>
#include <sys/procfs.h>
#include <sys/resourcevar.h>
#include <sys/systm.h>
#include <sys/signalvar.h>
#include <sys/stat.h>
#include <sys/sx.h>
#include <sys/syscall.h>
#include <sys/sysctl.h>
#include <sys/sysent.h>
#include <sys/vnode.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_extern.h>
#include <machine/elf.h>
#include <machine/md_var.h>
#define OLD_EI_BRAND 8
static int __elfN(check_header)(const Elf_Ehdr *hdr);
static Elf_Brandinfo *__elfN(get_brandinfo)(const Elf_Ehdr *hdr,
const char *interp);
static int __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
u_long *entry, size_t pagesize);
static int __elfN(load_section)(struct proc *p,
struct vmspace *vmspace, struct vnode *vp, vm_object_t object,
vm_offset_t offset, caddr_t vmaddr, size_t memsz, size_t filsz,
vm_prot_t prot, size_t pagesize);
static int __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp);
SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE), CTLFLAG_RW, 0,
"");
int __elfN(fallback_brand) = -1;
SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
fallback_brand, CTLFLAG_RW, &__elfN(fallback_brand), 0,
__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
TUNABLE_INT("kern.elf" __XSTRING(__ELF_WORD_SIZE) ".fallback_brand",
&__elfN(fallback_brand));
static int elf_trace = 0;
SYSCTL_INT(_debug, OID_AUTO, __elfN(trace), CTLFLAG_RW, &elf_trace, 0, "");
static int elf_legacy_coredump = 0;
SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
&elf_legacy_coredump, 0, "");
static Elf_Brandinfo *elf_brand_list[MAX_BRANDS];
int
__elfN(insert_brand_entry)(Elf_Brandinfo *entry)
{
int i;
for (i = 0; i < MAX_BRANDS; i++) {
if (elf_brand_list[i] == NULL) {
elf_brand_list[i] = entry;
break;
}
}
if (i == MAX_BRANDS)
return (-1);
return (0);
}
int
__elfN(remove_brand_entry)(Elf_Brandinfo *entry)
{
int i;
for (i = 0; i < MAX_BRANDS; i++) {
if (elf_brand_list[i] == entry) {
elf_brand_list[i] = NULL;
break;
}
}
if (i == MAX_BRANDS)
return (-1);
return (0);
}
int
__elfN(brand_inuse)(Elf_Brandinfo *entry)
{
struct proc *p;
int rval = FALSE;
sx_slock(&allproc_lock);
LIST_FOREACH(p, &allproc, p_list) {
if (p->p_sysent == entry->sysvec) {
rval = TRUE;
break;
}
}
sx_sunlock(&allproc_lock);
return (rval);
}
static Elf_Brandinfo *
__elfN(get_brandinfo)(const Elf_Ehdr *hdr, const char *interp)
{
Elf_Brandinfo *bi;
int i;
/*
* We support three types of branding -- (1) the ELF EI_OSABI field
* that SCO added to the ELF spec, (2) FreeBSD 3.x's traditional string
* branding w/in the ELF header, and (3) path of the `interp_path'
* field. We should also look for an ".note.ABI-tag" ELF section now
* in all Linux ELF binaries, FreeBSD 4.1+, and some NetBSD ones.
*/
/* If the executable has a brand, search for it in the brand list. */
for (i = 0; i < MAX_BRANDS; i++) {
bi = elf_brand_list[i];
if (bi != NULL && hdr->e_machine == bi->machine &&
(hdr->e_ident[EI_OSABI] == bi->brand ||
strncmp((const char *)&hdr->e_ident[OLD_EI_BRAND],
bi->compat_3_brand, strlen(bi->compat_3_brand)) == 0))
return (bi);
}
/* Lacking a known brand, search for a recognized interpreter. */
if (interp != NULL) {
for (i = 0; i < MAX_BRANDS; i++) {
bi = elf_brand_list[i];
if (bi != NULL && hdr->e_machine == bi->machine &&
strcmp(interp, bi->interp_path) == 0)
return (bi);
}
}
/* Lacking a recognized interpreter, try the default brand */
for (i = 0; i < MAX_BRANDS; i++) {
bi = elf_brand_list[i];
if (bi != NULL && hdr->e_machine == bi->machine &&
__elfN(fallback_brand) == bi->brand)
return (bi);
}
return (NULL);
}
static int
__elfN(check_header)(const Elf_Ehdr *hdr)
{
Elf_Brandinfo *bi;
int i;
if (!IS_ELF(*hdr) ||
hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS ||
hdr->e_ident[EI_DATA] != ELF_TARG_DATA ||
hdr->e_ident[EI_VERSION] != EV_CURRENT)
return (ENOEXEC);
/*
* Make sure we have at least one brand for this machine.
*/
for (i = 0; i < MAX_BRANDS; i++) {
bi = elf_brand_list[i];
if (bi != NULL && bi->machine == hdr->e_machine)
break;
}
if (i == MAX_BRANDS)
return (ENOEXEC);
if (hdr->e_version != ELF_TARG_VER)
return (ENOEXEC);
return (0);
}
static int
__elfN(map_partial)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_offset_t start, vm_offset_t end, vm_prot_t prot,
vm_prot_t max)
{
int error, rv;
vm_offset_t off;
vm_offset_t data_buf = 0;
/*
* Create the page if it doesn't exist yet. Ignore errors.
*/
vm_map_lock(map);
2002-08-25 22:36:52 +00:00
vm_map_insert(map, NULL, 0, trunc_page(start), round_page(end), max,
max, 0);
vm_map_unlock(map);
/*
* Find the page from the underlying object.
*/
if (object) {
vm_object_reference(object);
rv = vm_map_find(exec_map,
object,
trunc_page(offset),
&data_buf,
PAGE_SIZE,
TRUE,
VM_PROT_READ,
VM_PROT_ALL,
MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
return (rv);
}
off = offset - trunc_page(offset);
error = copyout((caddr_t)data_buf + off, (caddr_t)start,
end - start);
vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
if (error) {
return (KERN_FAILURE);
}
}
return (KERN_SUCCESS);
}
static int
__elfN(map_insert)(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
vm_offset_t start, vm_offset_t end, vm_prot_t prot,
vm_prot_t max, int cow)
{
vm_offset_t data_buf, off;
vm_size_t sz;
int error, rv;
if (start != trunc_page(start)) {
2002-08-25 22:36:52 +00:00
rv = __elfN(map_partial)(map, object, offset, start,
round_page(start), prot, max);
if (rv)
return (rv);
offset += round_page(start) - start;
start = round_page(start);
}
if (end != round_page(end)) {
2002-08-25 22:36:52 +00:00
rv = __elfN(map_partial)(map, object, offset +
trunc_page(end) - start, trunc_page(end), end, prot, max);
if (rv)
return (rv);
end = trunc_page(end);
}
if (end > start) {
if (offset & PAGE_MASK) {
/*
* The mapping is not page aligned. This means we have
* to copy the data. Sigh.
*/
2002-08-25 22:36:52 +00:00
rv = vm_map_find(map, 0, 0, &start, end - start,
FALSE, prot, max, 0);
if (rv)
return (rv);
data_buf = 0;
while (start < end) {
vm_object_reference(object);
rv = vm_map_find(exec_map,
object,
trunc_page(offset),
&data_buf,
2 * PAGE_SIZE,
TRUE,
VM_PROT_READ,
VM_PROT_ALL,
(MAP_COPY_ON_WRITE
| MAP_PREFAULT_PARTIAL));
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
return (rv);
}
off = offset - trunc_page(offset);
sz = end - start;
if (sz > PAGE_SIZE)
sz = PAGE_SIZE;
error = copyout((caddr_t)data_buf + off,
2002-08-25 22:36:52 +00:00
(caddr_t)start, sz);
vm_map_remove(exec_map, data_buf,
2002-08-25 22:36:52 +00:00
data_buf + 2 * PAGE_SIZE);
if (error) {
return (KERN_FAILURE);
}
start += sz;
}
rv = KERN_SUCCESS;
} else {
vm_map_lock(map);
rv = vm_map_insert(map, object, offset, start, end,
2002-08-25 22:36:52 +00:00
prot, max, cow);
vm_map_unlock(map);
}
return (rv);
} else {
return (KERN_SUCCESS);
}
}
static int
__elfN(load_section)(struct proc *p, struct vmspace *vmspace,
struct vnode *vp, vm_object_t object, vm_offset_t offset,
caddr_t vmaddr, size_t memsz, size_t filsz, vm_prot_t prot,
size_t pagesize)
{
size_t map_len;
vm_offset_t map_addr;
int error, rv, cow;
size_t copy_len;
vm_offset_t file_addr;
vm_offset_t data_buf = 0;
GIANT_REQUIRED;
error = 0;
/*
* It's necessary to fail if the filsz + offset taken from the
* header is greater than the actual file pager object's size.
* If we were to allow this, then the vm_map_find() below would
* walk right off the end of the file object and into the ether.
*
* While I'm here, might as well check for something else that
* is invalid: filsz cannot be greater than memsz.
*/
if ((off_t)filsz + offset > object->un_pager.vnp.vnp_size ||
filsz > memsz) {
uprintf("elf_load_section: truncated ELF file\n");
return (ENOEXEC);
}
#define trunc_page_ps(va, ps) ((va) & ~(ps - 1))
#define round_page_ps(va, ps) (((va) + (ps - 1)) & ~(ps - 1))
map_addr = trunc_page_ps((vm_offset_t)vmaddr, pagesize);
file_addr = trunc_page_ps(offset, pagesize);
/*
* We have two choices. We can either clear the data in the last page
* of an oversized mapping, or we can start the anon mapping a page
* early and copy the initialized data into that first page. We
* choose the second..
*/
if (memsz > filsz)
map_len = trunc_page_ps(offset + filsz, pagesize) - file_addr;
else
map_len = round_page_ps(offset + filsz, pagesize) - file_addr;
if (map_len != 0) {
vm_object_reference(object);
/* cow flags: don't dump readonly sections in core */
cow = MAP_COPY_ON_WRITE | MAP_PREFAULT |
(prot & VM_PROT_WRITE ? 0 : MAP_DISABLE_COREDUMP);
rv = __elfN(map_insert)(&vmspace->vm_map,
object,
file_addr, /* file offset */
map_addr, /* virtual start */
map_addr + map_len,/* virtual end */
prot,
VM_PROT_ALL,
cow);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
return (EINVAL);
}
/* we can stop now if we've covered it all */
if (memsz == filsz) {
return (0);
}
}
/*
* We have to get the remaining bit of the file into the first part
* of the oversized map segment. This is normally because the .data
* segment in the file is extended to provide bss. It's a neat idea
* to try and save a page, but it's a pain in the behind to implement.
*/
copy_len = (offset + filsz) - trunc_page_ps(offset + filsz, pagesize);
map_addr = trunc_page_ps((vm_offset_t)vmaddr + filsz, pagesize);
map_len = round_page_ps((vm_offset_t)vmaddr + memsz, pagesize) -
map_addr;
/* This had damn well better be true! */
if (map_len != 0) {
2002-08-25 22:36:52 +00:00
rv = __elfN(map_insert)(&vmspace->vm_map, NULL, 0, map_addr,
map_addr + map_len, VM_PROT_ALL, VM_PROT_ALL, 0);
if (rv != KERN_SUCCESS) {
return (EINVAL);
2001-12-16 17:21:16 +00:00
}
}
if (copy_len != 0) {
vm_offset_t off;
vm_object_reference(object);
rv = vm_map_find(exec_map,
object,
trunc_page(offset + filsz),
&data_buf,
PAGE_SIZE,
TRUE,
VM_PROT_READ,
VM_PROT_ALL,
MAP_COPY_ON_WRITE | MAP_PREFAULT_PARTIAL);
if (rv != KERN_SUCCESS) {
vm_object_deallocate(object);
return (EINVAL);
}
/* send the page fragment to user space */
2002-08-25 22:36:52 +00:00
off = trunc_page_ps(offset + filsz, pagesize) -
trunc_page(offset + filsz);
error = copyout((caddr_t)data_buf + off, (caddr_t)map_addr,
2002-08-25 22:36:52 +00:00
copy_len);
vm_map_remove(exec_map, data_buf, data_buf + PAGE_SIZE);
if (error) {
return (error);
}
}
/*
* set it to the specified protection.
* XXX had better undo the damage from pasting over the cracks here!
*/
vm_map_protect(&vmspace->vm_map, trunc_page(map_addr),
round_page(map_addr + map_len), prot, FALSE);
return (error);
}
/*
* Load the file "file" into memory. It may be either a shared object
* or an executable.
*
* The "addr" reference parameter is in/out. On entry, it specifies
* the address where a shared object should be loaded. If the file is
* an executable, this value is ignored. On exit, "addr" specifies
* where the file was actually loaded.
*
* The "entry" reference parameter is out only. On exit, it specifies
* the entry point for the loaded file.
*/
static int
__elfN(load_file)(struct proc *p, const char *file, u_long *addr,
u_long *entry, size_t pagesize)
{
struct {
struct nameidata nd;
struct vattr attr;
struct image_params image_params;
} *tempdata;
const Elf_Ehdr *hdr = NULL;
const Elf_Phdr *phdr = NULL;
struct nameidata *nd;
struct vmspace *vmspace = p->p_vmspace;
struct vattr *attr;
struct image_params *imgp;
vm_prot_t prot;
u_long rbase;
u_long base_addr = 0;
int error, i, numsegs;
if (curthread->td_proc != p)
panic("elf_load_file - thread"); /* XXXKSE DIAGNOSTIC */
tempdata = malloc(sizeof(*tempdata), M_TEMP, M_WAITOK);
nd = &tempdata->nd;
attr = &tempdata->attr;
imgp = &tempdata->image_params;
/*
* Initialize part of the common data
*/
imgp->proc = p;
imgp->userspace_argv = NULL;
imgp->userspace_envv = NULL;
imgp->attr = attr;
imgp->firstpage = NULL;
imgp->image_header = (char *)kmem_alloc_wait(exec_map, PAGE_SIZE);
imgp->object = NULL;
imgp->execlabel = NULL;
if (imgp->image_header == NULL) {
nd->ni_vp = NULL;
error = ENOMEM;
goto fail;
}
/* XXXKSE */
NDINIT(nd, LOOKUP, LOCKLEAF|FOLLOW, UIO_SYSSPACE, file, curthread);
if ((error = namei(nd)) != 0) {
nd->ni_vp = NULL;
goto fail;
}
NDFREE(nd, NDF_ONLY_PNBUF);
imgp->vp = nd->ni_vp;
/*
* Check permissions, modes, uid, etc on the file, and "open" it.
*/
error = exec_check_permissions(imgp);
if (error) {
VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
goto fail;
}
error = exec_map_first_page(imgp);
/*
* Also make certain that the interpreter stays the same, so set
* its VV_TEXT flag, too.
*/
if (error == 0)
nd->ni_vp->v_vflag |= VV_TEXT;
VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
vm_object_reference(imgp->object);
VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
if (error)
goto fail;
hdr = (const Elf_Ehdr *)imgp->image_header;
if ((error = __elfN(check_header)(hdr)) != 0)
goto fail;
if (hdr->e_type == ET_DYN)
rbase = *addr;
else if (hdr->e_type == ET_EXEC)
rbase = 0;
else {
error = ENOEXEC;
goto fail;
}
/* Only support headers that fit within first page for now */
if ((hdr->e_phoff > PAGE_SIZE) ||
(hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
error = ENOEXEC;
goto fail;
}
phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
for (i = 0, numsegs = 0; i < hdr->e_phnum; i++) {
if (phdr[i].p_type == PT_LOAD) { /* Loadable segment */
prot = 0;
if (phdr[i].p_flags & PF_X)
prot |= VM_PROT_EXECUTE;
if (phdr[i].p_flags & PF_W)
prot |= VM_PROT_WRITE;
if (phdr[i].p_flags & PF_R)
prot |= VM_PROT_READ;
2002-08-25 22:36:52 +00:00
if ((error = __elfN(load_section)(p, vmspace,
nd->ni_vp, imgp->object, phdr[i].p_offset,
(caddr_t)(uintptr_t)phdr[i].p_vaddr + rbase,
phdr[i].p_memsz, phdr[i].p_filesz, prot,
pagesize)) != 0)
goto fail;
/*
* Establish the base address if this is the
* first segment.
*/
if (numsegs == 0)
base_addr = trunc_page(phdr[i].p_vaddr +
rbase);
numsegs++;
}
}
*addr = base_addr;
*entry = (unsigned long)hdr->e_entry + rbase;
fail:
if (imgp->firstpage)
exec_unmap_first_page(imgp);
if (imgp->image_header)
kmem_free_wakeup(exec_map, (vm_offset_t)imgp->image_header,
2002-08-25 22:36:52 +00:00
PAGE_SIZE);
if (imgp->object)
vm_object_deallocate(imgp->object);
if (nd->ni_vp)
vrele(nd->ni_vp);
free(tempdata, M_TEMP);
return (error);
}
1998-02-09 06:11:36 +00:00
static int
__CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
{
const Elf_Ehdr *hdr = (const Elf_Ehdr *)imgp->image_header;
const Elf_Phdr *phdr;
Elf_Auxargs *elf_auxargs = NULL;
struct vmspace *vmspace;
vm_prot_t prot;
u_long text_size = 0, data_size = 0, total_size = 0;
u_long text_addr = 0, data_addr = 0;
u_long seg_size, seg_addr;
u_long addr, entry = 0, proghdr = 0;
int error, i;
const char *interp = NULL;
Elf_Brandinfo *brand_info;
char *path;
struct thread *td = curthread;
struct sysentvec *sv;
GIANT_REQUIRED;
/*
* Do we have a valid ELF header ?
*/
if (__elfN(check_header)(hdr) != 0 || hdr->e_type != ET_EXEC)
return (-1);
/*
* From here on down, we return an errno, not -1, as we've
* detected an ELF file.
*/
if ((hdr->e_phoff > PAGE_SIZE) ||
(hdr->e_phoff + hdr->e_phentsize * hdr->e_phnum) > PAGE_SIZE) {
/* Only support headers in first page for now */
return (ENOEXEC);
}
phdr = (const Elf_Phdr *)(imgp->image_header + hdr->e_phoff);
/*
* From this point on, we may have resources that need to be freed.
*/
VOP_UNLOCK(imgp->vp, 0, td);
for (i = 0; i < hdr->e_phnum; i++) {
switch (phdr[i].p_type) {
case PT_INTERP: /* Path to interpreter */
if (phdr[i].p_filesz > MAXPATHLEN ||
phdr[i].p_offset + phdr[i].p_filesz > PAGE_SIZE) {
error = ENOEXEC;
goto fail;
}
interp = imgp->image_header + phdr[i].p_offset;
break;
default:
break;
}
}
brand_info = __elfN(get_brandinfo)(hdr, interp);
if (brand_info == NULL) {
uprintf("ELF binary type \"%u\" not known.\n",
hdr->e_ident[EI_OSABI]);
error = ENOEXEC;
goto fail;
}
sv = brand_info->sysvec;
if (interp != NULL && brand_info->interp_newpath != NULL)
interp = brand_info->interp_newpath;
if ((error = exec_extract_strings(imgp)) != 0)
goto fail;
exec_new_vmspace(imgp, sv);
vmspace = imgp->proc->p_vmspace;
for (i = 0; i < hdr->e_phnum; i++) {
switch (phdr[i].p_type) {
case PT_LOAD: /* Loadable segment */
prot = 0;
if (phdr[i].p_flags & PF_X)
prot |= VM_PROT_EXECUTE;
if (phdr[i].p_flags & PF_W)
prot |= VM_PROT_WRITE;
if (phdr[i].p_flags & PF_R)
prot |= VM_PROT_READ;
#if defined(__ia64__) && __ELF_WORD_SIZE == 32 && defined(IA32_ME_HARDER)
/*
* Some x86 binaries assume read == executable,
* notably the M3 runtime and therefore cvsup
*/
if (prot & VM_PROT_READ)
prot |= VM_PROT_EXECUTE;
#endif
2002-08-25 22:36:52 +00:00
if ((error = __elfN(load_section)(imgp->proc, vmspace,
imgp->vp, imgp->object, phdr[i].p_offset,
(caddr_t)(uintptr_t)phdr[i].p_vaddr,
phdr[i].p_memsz, phdr[i].p_filesz, prot,
sv->sv_pagesize)) != 0)
goto fail;
seg_addr = trunc_page(phdr[i].p_vaddr);
seg_size = round_page(phdr[i].p_memsz +
2002-09-02 02:41:26 +00:00
phdr[i].p_vaddr - seg_addr);
/*
* Is this .text or .data? We can't use
* VM_PROT_WRITE or VM_PROT_EXEC, it breaks the
* alpha terribly and possibly does other bad
* things so we stick to the old way of figuring
* it out: If the segment contains the program
* entry point, it's a text segment, otherwise it
* is a data segment.
*
* Note that obreak() assumes that data_addr +
* data_size == end of data load area, and the ELF
* file format expects segments to be sorted by
* address. If multiple data segments exist, the
* last one will be used.
*/
if (hdr->e_entry >= phdr[i].p_vaddr &&
hdr->e_entry < (phdr[i].p_vaddr +
phdr[i].p_memsz)) {
text_size = seg_size;
text_addr = seg_addr;
entry = (u_long)hdr->e_entry;
} else {
data_size = seg_size;
data_addr = seg_addr;
}
total_size += seg_size;
break;
case PT_PHDR: /* Program header table info */
proghdr = phdr[i].p_vaddr;
break;
default:
break;
}
}
if (data_addr == 0 && data_size == 0) {
data_addr = text_addr;
data_size = text_size;
}
/*
* Check limits. It should be safe to check the
* limits after loading the segments since we do
* not actually fault in all the segments pages.
*/
Locking for the per-process resource limits structure. - struct plimit includes a mutex to protect a reference count. The plimit structure is treated similarly to struct ucred in that is is always copy on write, so having a reference to a structure is sufficient to read from it without needing a further lock. - The proc lock protects the p_limit pointer and must be held while reading limits from a process to keep the limit structure from changing out from under you while reading from it. - Various global limits that are ints are not protected by a lock since int writes are atomic on all the archs we support and thus a lock wouldn't buy us anything. - All accesses to individual resource limits from a process are abstracted behind a simple lim_rlimit(), lim_max(), and lim_cur() API that return either an rlimit, or the current or max individual limit of the specified resource from a process. - dosetrlimit() was renamed to kern_setrlimit() to match existing style of other similar syscall helper functions. - The alpha OSF/1 compat layer no longer calls getrlimit() and setrlimit() (it didn't used the stackgap when it should have) but uses lim_rlimit() and kern_setrlimit() instead. - The svr4 compat no longer uses the stackgap for resource limits calls, but uses lim_rlimit() and kern_setrlimit() instead. - The ibcs2 compat no longer uses the stackgap for resource limits. It also no longer uses the stackgap for accessing sysctl's for the ibcs2_sysconf() syscall but uses kernel_sysctl() instead. As a result, ibcs2_sysconf() no longer needs Giant. - The p_rlimit macro no longer exists. Submitted by: mtm (mostly, I only did a few cleanups and catchups) Tested on: i386 Compiled on: alpha, amd64
2004-02-04 21:52:57 +00:00
PROC_LOCK(imgp->proc);
if (data_size > lim_cur(imgp->proc, RLIMIT_DATA) ||
text_size > maxtsiz ||
Locking for the per-process resource limits structure. - struct plimit includes a mutex to protect a reference count. The plimit structure is treated similarly to struct ucred in that is is always copy on write, so having a reference to a structure is sufficient to read from it without needing a further lock. - The proc lock protects the p_limit pointer and must be held while reading limits from a process to keep the limit structure from changing out from under you while reading from it. - Various global limits that are ints are not protected by a lock since int writes are atomic on all the archs we support and thus a lock wouldn't buy us anything. - All accesses to individual resource limits from a process are abstracted behind a simple lim_rlimit(), lim_max(), and lim_cur() API that return either an rlimit, or the current or max individual limit of the specified resource from a process. - dosetrlimit() was renamed to kern_setrlimit() to match existing style of other similar syscall helper functions. - The alpha OSF/1 compat layer no longer calls getrlimit() and setrlimit() (it didn't used the stackgap when it should have) but uses lim_rlimit() and kern_setrlimit() instead. - The svr4 compat no longer uses the stackgap for resource limits calls, but uses lim_rlimit() and kern_setrlimit() instead. - The ibcs2 compat no longer uses the stackgap for resource limits. It also no longer uses the stackgap for accessing sysctl's for the ibcs2_sysconf() syscall but uses kernel_sysctl() instead. As a result, ibcs2_sysconf() no longer needs Giant. - The p_rlimit macro no longer exists. Submitted by: mtm (mostly, I only did a few cleanups and catchups) Tested on: i386 Compiled on: alpha, amd64
2004-02-04 21:52:57 +00:00
total_size > lim_cur(imgp->proc, RLIMIT_VMEM)) {
PROC_UNLOCK(imgp->proc);
error = ENOMEM;
goto fail;
}
vmspace->vm_tsize = text_size >> PAGE_SHIFT;
vmspace->vm_taddr = (caddr_t)(uintptr_t)text_addr;
vmspace->vm_dsize = data_size >> PAGE_SHIFT;
vmspace->vm_daddr = (caddr_t)(uintptr_t)data_addr;
/*
* We load the dynamic linker where a userland call
* to mmap(0, ...) would put it. The rationale behind this
* calculation is that it leaves room for the heap to grow to
* its maximum allowed size.
*/
addr = round_page((vm_offset_t)imgp->proc->p_vmspace->vm_daddr +
Locking for the per-process resource limits structure. - struct plimit includes a mutex to protect a reference count. The plimit structure is treated similarly to struct ucred in that is is always copy on write, so having a reference to a structure is sufficient to read from it without needing a further lock. - The proc lock protects the p_limit pointer and must be held while reading limits from a process to keep the limit structure from changing out from under you while reading from it. - Various global limits that are ints are not protected by a lock since int writes are atomic on all the archs we support and thus a lock wouldn't buy us anything. - All accesses to individual resource limits from a process are abstracted behind a simple lim_rlimit(), lim_max(), and lim_cur() API that return either an rlimit, or the current or max individual limit of the specified resource from a process. - dosetrlimit() was renamed to kern_setrlimit() to match existing style of other similar syscall helper functions. - The alpha OSF/1 compat layer no longer calls getrlimit() and setrlimit() (it didn't used the stackgap when it should have) but uses lim_rlimit() and kern_setrlimit() instead. - The svr4 compat no longer uses the stackgap for resource limits calls, but uses lim_rlimit() and kern_setrlimit() instead. - The ibcs2 compat no longer uses the stackgap for resource limits. It also no longer uses the stackgap for accessing sysctl's for the ibcs2_sysconf() syscall but uses kernel_sysctl() instead. As a result, ibcs2_sysconf() no longer needs Giant. - The p_rlimit macro no longer exists. Submitted by: mtm (mostly, I only did a few cleanups and catchups) Tested on: i386 Compiled on: alpha, amd64
2004-02-04 21:52:57 +00:00
lim_max(imgp->proc, RLIMIT_DATA));
PROC_UNLOCK(imgp->proc);
imgp->entry_addr = entry;
imgp->proc->p_sysent = sv;
if (interp != NULL && brand_info->emul_path != NULL &&
brand_info->emul_path[0] != '\0') {
path = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
2002-09-02 02:41:26 +00:00
snprintf(path, MAXPATHLEN, "%s%s", brand_info->emul_path,
interp);
error = __elfN(load_file)(imgp->proc, path, &addr,
&imgp->entry_addr, sv->sv_pagesize);
free(path, M_TEMP);
if (error == 0)
interp = NULL;
}
if (interp != NULL) {
error = __elfN(load_file)(imgp->proc, interp, &addr,
&imgp->entry_addr, sv->sv_pagesize);
if (error != 0) {
uprintf("ELF interpreter %s not found\n", interp);
goto fail;
}
}
/*
* Construct auxargs table (used by the fixup routine)
*/
elf_auxargs = malloc(sizeof(Elf_Auxargs), M_TEMP, M_WAITOK);
elf_auxargs->execfd = -1;
elf_auxargs->phdr = proghdr;
elf_auxargs->phent = hdr->e_phentsize;
elf_auxargs->phnum = hdr->e_phnum;
elf_auxargs->pagesz = PAGE_SIZE;
elf_auxargs->base = addr;
elf_auxargs->flags = 0;
elf_auxargs->entry = entry;
elf_auxargs->trace = elf_trace;
imgp->auxargs = elf_auxargs;
imgp->interpreted = 0;
fail:
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
return (error);
}
#define suword __CONCAT(suword, __ELF_WORD_SIZE)
int
__elfN(freebsd_fixup)(register_t **stack_base, struct image_params *imgp)
{
Elf_Auxargs *args = (Elf_Auxargs *)imgp->auxargs;
Elf_Addr *base;
Elf_Addr *pos;
base = (Elf_Addr *)*stack_base;
pos = base + (imgp->argc + imgp->envc + 2);
if (args->trace) {
AUXARGS_ENTRY(pos, AT_DEBUG, 1);
}
if (args->execfd != -1) {
AUXARGS_ENTRY(pos, AT_EXECFD, args->execfd);
}
AUXARGS_ENTRY(pos, AT_PHDR, args->phdr);
AUXARGS_ENTRY(pos, AT_PHENT, args->phent);
AUXARGS_ENTRY(pos, AT_PHNUM, args->phnum);
AUXARGS_ENTRY(pos, AT_PAGESZ, args->pagesz);
AUXARGS_ENTRY(pos, AT_FLAGS, args->flags);
AUXARGS_ENTRY(pos, AT_ENTRY, args->entry);
AUXARGS_ENTRY(pos, AT_BASE, args->base);
AUXARGS_ENTRY(pos, AT_NULL, 0);
free(imgp->auxargs, M_TEMP);
imgp->auxargs = NULL;
base--;
suword(base, (long)imgp->argc);
*stack_base = (register_t *)base;
return (0);
}
/*
* Code for generating ELF core dumps.
*/
2002-03-19 21:25:46 +00:00
typedef void (*segment_callback)(vm_map_entry_t, void *);
/* Closure for cb_put_phdr(). */
struct phdr_closure {
Elf_Phdr *phdr; /* Program header to fill in */
Elf_Off offset; /* Offset of segment in core file */
};
/* Closure for cb_size_segment(). */
struct sseg_closure {
int count; /* Count of writable segments. */
size_t size; /* Total size of all writable segments. */
};
2002-03-19 21:25:46 +00:00
static void cb_put_phdr(vm_map_entry_t, void *);
static void cb_size_segment(vm_map_entry_t, void *);
static void each_writable_segment(struct proc *, segment_callback, void *);
static int __elfN(corehdr)(struct thread *, struct vnode *, struct ucred *,
2002-03-19 21:25:46 +00:00
int, void *, size_t);
static void __elfN(puthdr)(struct proc *, void *, size_t *,
2002-03-19 21:25:46 +00:00
const prstatus_t *, const prfpregset_t *, const prpsinfo_t *, int);
static void __elfN(putnote)(void *, size_t *, const char *, int,
2002-03-19 21:25:46 +00:00
const void *, size_t);
extern int osreldate;
int
__elfN(coredump)(td, vp, limit)
struct thread *td;
register struct vnode *vp;
off_t limit;
{
register struct proc *p = td->td_proc;
register struct ucred *cred = td->td_ucred;
int error = 0;
struct sseg_closure seginfo;
void *hdr;
size_t hdrsize;
/* Size the program segments. */
seginfo.count = 0;
seginfo.size = 0;
each_writable_segment(p, cb_size_segment, &seginfo);
/*
* Calculate the size of the core file header area by making
* a dry run of generating it. Nothing is written, but the
* size is calculated.
*/
hdrsize = 0;
__elfN(puthdr)((struct proc *)NULL, (void *)NULL, &hdrsize,
(const prstatus_t *)NULL, (const prfpregset_t *)NULL,
(const prpsinfo_t *)NULL, seginfo.count);
if (hdrsize + seginfo.size >= limit)
return (EFAULT);
/*
* Allocate memory for building the header, fill it up,
* and write it out.
*/
hdr = malloc(hdrsize, M_TEMP, M_WAITOK);
if (hdr == NULL) {
return (EINVAL);
}
error = __elfN(corehdr)(td, vp, cred, seginfo.count, hdr, hdrsize);
/* Write the contents of all of the writable segments. */
if (error == 0) {
Elf_Phdr *php;
off_t offset;
int i;
php = (Elf_Phdr *)((char *)hdr + sizeof(Elf_Ehdr)) + 1;
offset = hdrsize;
for (i = 0; i < seginfo.count; i++) {
error = vn_rdwr_inchunks(UIO_WRITE, vp,
(caddr_t)(uintptr_t)php->p_vaddr,
php->p_filesz, offset, UIO_USERSPACE,
In order to better support flexible and extensible access control, make a series of modifications to the credential arguments relating to file read and write operations to cliarfy which credential is used for what: - Change fo_read() and fo_write() to accept "active_cred" instead of "cred", and change the semantics of consumers of fo_read() and fo_write() to pass the active credential of the thread requesting an operation rather than the cached file cred. The cached file cred is still available in fo_read() and fo_write() consumers via fp->f_cred. These changes largely in sys_generic.c. For each implementation of fo_read() and fo_write(), update cred usage to reflect this change and maintain current semantics: - badfo_readwrite() unchanged - kqueue_read/write() unchanged pipe_read/write() now authorize MAC using active_cred rather than td->td_ucred - soo_read/write() unchanged - vn_read/write() now authorize MAC using active_cred but VOP_READ/WRITE() with fp->f_cred Modify vn_rdwr() to accept two credential arguments instead of a single credential: active_cred and file_cred. Use active_cred for MAC authorization, and select a credential for use in VOP_READ/WRITE() based on whether file_cred is NULL or not. If file_cred is provided, authorize the VOP using that cred, otherwise the active credential, matching current semantics. Modify current vn_rdwr() consumers to pass a file_cred if used in the context of a struct file, and to always pass active_cred. When vn_rdwr() is used without a file_cred, pass NOCRED. These changes should maintain current semantics for read/write, but avoid a redundant passing of fp->f_cred, as well as making it more clear what the origin of each credential is in file descriptor read/write operations. Follow-up commits will make similar changes to other file descriptor operations, and modify the MAC framework to pass both credentials to MAC policy modules so they can implement either semantic for revocation. Obtained from: TrustedBSD Project Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
IO_UNIT | IO_DIRECT, cred, NOCRED, (int *)NULL,
curthread); /* XXXKSE */
if (error != 0)
break;
offset += php->p_filesz;
php++;
}
}
free(hdr, M_TEMP);
return (error);
}
/*
* A callback for each_writable_segment() to write out the segment's
* program header entry.
*/
static void
cb_put_phdr(entry, closure)
vm_map_entry_t entry;
void *closure;
{
struct phdr_closure *phc = (struct phdr_closure *)closure;
Elf_Phdr *phdr = phc->phdr;
phc->offset = round_page(phc->offset);
phdr->p_type = PT_LOAD;
phdr->p_offset = phc->offset;
phdr->p_vaddr = entry->start;
phdr->p_paddr = 0;
phdr->p_filesz = phdr->p_memsz = entry->end - entry->start;
phdr->p_align = PAGE_SIZE;
phdr->p_flags = 0;
if (entry->protection & VM_PROT_READ)
phdr->p_flags |= PF_R;
if (entry->protection & VM_PROT_WRITE)
phdr->p_flags |= PF_W;
if (entry->protection & VM_PROT_EXECUTE)
phdr->p_flags |= PF_X;
phc->offset += phdr->p_filesz;
phc->phdr++;
}
/*
* A callback for each_writable_segment() to gather information about
* the number of segments and their total size.
*/
static void
cb_size_segment(entry, closure)
vm_map_entry_t entry;
void *closure;
{
struct sseg_closure *ssc = (struct sseg_closure *)closure;
ssc->count++;
ssc->size += entry->end - entry->start;
}
/*
* For each writable segment in the process's memory map, call the given
* function with a pointer to the map entry and some arbitrary
* caller-supplied data.
*/
static void
each_writable_segment(p, func, closure)
struct proc *p;
segment_callback func;
void *closure;
{
vm_map_t map = &p->p_vmspace->vm_map;
vm_map_entry_t entry;
for (entry = map->header.next; entry != &map->header;
entry = entry->next) {
vm_object_t obj;
/*
* Don't dump inaccessible mappings, deal with legacy
* coredump mode.
*
* Note that read-only segments related to the elf binary
* are marked MAP_ENTRY_NOCOREDUMP now so we no longer
* need to arbitrarily ignore such segments.
*/
if (elf_legacy_coredump) {
if ((entry->protection & VM_PROT_RW) != VM_PROT_RW)
continue;
} else {
if ((entry->protection & VM_PROT_ALL) == 0)
continue;
}
/*
* Dont include memory segment in the coredump if
* MAP_NOCORE is set in mmap(2) or MADV_NOCORE in
* madvise(2). Do not dump submaps (i.e. parts of the
* kernel map).
*/
if (entry->eflags & (MAP_ENTRY_NOCOREDUMP|MAP_ENTRY_IS_SUB_MAP))
continue;
if ((obj = entry->object.vm_object) == NULL)
continue;
/* Find the deepest backing object. */
while (obj->backing_object != NULL)
obj = obj->backing_object;
/* Ignore memory-mapped devices and such things. */
if (obj->type != OBJT_DEFAULT &&
obj->type != OBJT_SWAP &&
obj->type != OBJT_VNODE)
continue;
(*func)(entry, closure);
}
}
/*
* Write the core file header to the file, including padding up to
* the page boundary.
*/
static int
__elfN(corehdr)(td, vp, cred, numsegs, hdr, hdrsize)
struct thread *td;
struct vnode *vp;
struct ucred *cred;
int numsegs;
size_t hdrsize;
void *hdr;
{
struct {
prstatus_t status;
prfpregset_t fpregset;
prpsinfo_t psinfo;
} *tempdata;
struct proc *p = td->td_proc;
size_t off;
prstatus_t *status;
prfpregset_t *fpregset;
prpsinfo_t *psinfo;
tempdata = malloc(sizeof(*tempdata), M_TEMP, M_ZERO | M_WAITOK);
status = &tempdata->status;
fpregset = &tempdata->fpregset;
psinfo = &tempdata->psinfo;
/* Gather the information for the header. */
status->pr_version = PRSTATUS_VERSION;
status->pr_statussz = sizeof(prstatus_t);
status->pr_gregsetsz = sizeof(gregset_t);
status->pr_fpregsetsz = sizeof(fpregset_t);
status->pr_osreldate = osreldate;
status->pr_cursig = p->p_sig;
status->pr_pid = p->p_pid;
fill_regs(td, &status->pr_reg);
fill_fpregs(td, fpregset);
psinfo->pr_version = PRPSINFO_VERSION;
psinfo->pr_psinfosz = sizeof(prpsinfo_t);
strlcpy(psinfo->pr_fname, p->p_comm, sizeof(psinfo->pr_fname));
/* XXX - We don't fill in the command line arguments properly yet. */
strlcpy(psinfo->pr_psargs, p->p_comm, sizeof(psinfo->pr_psargs));
/* Fill in the header. */
bzero(hdr, hdrsize);
off = 0;
__elfN(puthdr)(p, hdr, &off, status, fpregset, psinfo, numsegs);
free(tempdata, M_TEMP);
/* Write it to the core file. */
return (vn_rdwr_inchunks(UIO_WRITE, vp, hdr, hdrsize, (off_t)0,
In order to better support flexible and extensible access control, make a series of modifications to the credential arguments relating to file read and write operations to cliarfy which credential is used for what: - Change fo_read() and fo_write() to accept "active_cred" instead of "cred", and change the semantics of consumers of fo_read() and fo_write() to pass the active credential of the thread requesting an operation rather than the cached file cred. The cached file cred is still available in fo_read() and fo_write() consumers via fp->f_cred. These changes largely in sys_generic.c. For each implementation of fo_read() and fo_write(), update cred usage to reflect this change and maintain current semantics: - badfo_readwrite() unchanged - kqueue_read/write() unchanged pipe_read/write() now authorize MAC using active_cred rather than td->td_ucred - soo_read/write() unchanged - vn_read/write() now authorize MAC using active_cred but VOP_READ/WRITE() with fp->f_cred Modify vn_rdwr() to accept two credential arguments instead of a single credential: active_cred and file_cred. Use active_cred for MAC authorization, and select a credential for use in VOP_READ/WRITE() based on whether file_cred is NULL or not. If file_cred is provided, authorize the VOP using that cred, otherwise the active credential, matching current semantics. Modify current vn_rdwr() consumers to pass a file_cred if used in the context of a struct file, and to always pass active_cred. When vn_rdwr() is used without a file_cred, pass NOCRED. These changes should maintain current semantics for read/write, but avoid a redundant passing of fp->f_cred, as well as making it more clear what the origin of each credential is in file descriptor read/write operations. Follow-up commits will make similar changes to other file descriptor operations, and modify the MAC framework to pass both credentials to MAC policy modules so they can implement either semantic for revocation. Obtained from: TrustedBSD Project Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
UIO_SYSSPACE, IO_UNIT | IO_DIRECT, cred, NOCRED, NULL,
td)); /* XXXKSE */
}
static void
__elfN(puthdr)(struct proc *p, void *dst, size_t *off, const prstatus_t *status,
const prfpregset_t *fpregset, const prpsinfo_t *psinfo, int numsegs)
{
size_t ehoff;
size_t phoff;
size_t noteoff;
size_t notesz;
ehoff = *off;
*off += sizeof(Elf_Ehdr);
phoff = *off;
*off += (numsegs + 1) * sizeof(Elf_Phdr);
noteoff = *off;
__elfN(putnote)(dst, off, "FreeBSD", NT_PRSTATUS, status,
sizeof *status);
__elfN(putnote)(dst, off, "FreeBSD", NT_FPREGSET, fpregset,
sizeof *fpregset);
__elfN(putnote)(dst, off, "FreeBSD", NT_PRPSINFO, psinfo,
sizeof *psinfo);
notesz = *off - noteoff;
/* Align up to a page boundary for the program segments. */
*off = round_page(*off);
if (dst != NULL) {
Elf_Ehdr *ehdr;
Elf_Phdr *phdr;
struct phdr_closure phc;
/*
* Fill in the ELF header.
*/
ehdr = (Elf_Ehdr *)((char *)dst + ehoff);
ehdr->e_ident[EI_MAG0] = ELFMAG0;
ehdr->e_ident[EI_MAG1] = ELFMAG1;
ehdr->e_ident[EI_MAG2] = ELFMAG2;
ehdr->e_ident[EI_MAG3] = ELFMAG3;
ehdr->e_ident[EI_CLASS] = ELF_CLASS;
ehdr->e_ident[EI_DATA] = ELF_DATA;
ehdr->e_ident[EI_VERSION] = EV_CURRENT;
ehdr->e_ident[EI_OSABI] = ELFOSABI_FREEBSD;
ehdr->e_ident[EI_ABIVERSION] = 0;
ehdr->e_ident[EI_PAD] = 0;
ehdr->e_type = ET_CORE;
ehdr->e_machine = ELF_ARCH;
ehdr->e_version = EV_CURRENT;
ehdr->e_entry = 0;
ehdr->e_phoff = phoff;
ehdr->e_flags = 0;
ehdr->e_ehsize = sizeof(Elf_Ehdr);
ehdr->e_phentsize = sizeof(Elf_Phdr);
ehdr->e_phnum = numsegs + 1;
ehdr->e_shentsize = sizeof(Elf_Shdr);
ehdr->e_shnum = 0;
ehdr->e_shstrndx = SHN_UNDEF;
/*
* Fill in the program header entries.
*/
phdr = (Elf_Phdr *)((char *)dst + phoff);
/* The note segement. */
phdr->p_type = PT_NOTE;
phdr->p_offset = noteoff;
phdr->p_vaddr = 0;
phdr->p_paddr = 0;
phdr->p_filesz = notesz;
phdr->p_memsz = 0;
phdr->p_flags = 0;
phdr->p_align = 0;
phdr++;
/* All the writable segments from the program. */
phc.phdr = phdr;
phc.offset = *off;
each_writable_segment(p, cb_put_phdr, &phc);
}
}
static void
__elfN(putnote)(void *dst, size_t *off, const char *name, int type,
const void *desc, size_t descsz)
{
Elf_Note note;
note.n_namesz = strlen(name) + 1;
note.n_descsz = descsz;
note.n_type = type;
if (dst != NULL)
bcopy(&note, (char *)dst + *off, sizeof note);
*off += sizeof note;
if (dst != NULL)
bcopy(name, (char *)dst + *off, note.n_namesz);
*off += roundup2(note.n_namesz, sizeof(Elf_Size));
if (dst != NULL)
bcopy(desc, (char *)dst + *off, note.n_descsz);
*off += roundup2(note.n_descsz, sizeof(Elf_Size));
}
/*
* Tell kern_execve.c about it, with a little help from the linker.
*/
static struct execsw __elfN(execsw) = {
__CONCAT(exec_, __elfN(imgact)),
__XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
};
EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));