1998-03-07 19:24:35 +00:00
|
|
|
/*-
|
|
|
|
* Copyright 1996-1998 John D. Polstra.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
1999-08-28 00:22:10 +00:00
|
|
|
* $FreeBSD$
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/mman.h>
|
1999-08-30 01:50:41 +00:00
|
|
|
#include <sys/stat.h>
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
#include <errno.h>
|
|
|
|
#include <stddef.h>
|
1999-08-30 01:48:19 +00:00
|
|
|
#include <stdlib.h>
|
1998-03-07 19:24:35 +00:00
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
2001-10-15 18:48:42 +00:00
|
|
|
#include "debug.h"
|
1998-03-07 19:24:35 +00:00
|
|
|
#include "rtld.h"
|
|
|
|
|
2003-05-31 14:48:59 +00:00
|
|
|
static Elf_Ehdr *get_elf_header (int, const char *);
|
2002-12-16 19:24:43 +00:00
|
|
|
static int convert_prot(int); /* Elf flags -> mmap protection */
|
|
|
|
static int convert_flags(int); /* Elf flags -> mmap flags */
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
1999-07-18 00:02:19 +00:00
|
|
|
* Map a shared object into memory. The "fd" argument is a file descriptor,
|
1998-03-07 19:24:35 +00:00
|
|
|
* which must be open on the object and positioned at its beginning.
|
1999-07-18 00:02:19 +00:00
|
|
|
* The "path" argument is a pathname that is used only for error messages.
|
1998-03-07 19:24:35 +00:00
|
|
|
*
|
|
|
|
* The return value is a pointer to a newly-allocated Obj_Entry structure
|
|
|
|
* for the shared object. Returns NULL on failure.
|
|
|
|
*/
|
|
|
|
Obj_Entry *
|
1999-08-30 01:50:41 +00:00
|
|
|
map_object(int fd, const char *path, const struct stat *sb)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
2003-05-31 14:48:59 +00:00
|
|
|
Elf_Ehdr *hdr;
|
2003-05-04 00:56:00 +00:00
|
|
|
int i;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Phdr *phdr;
|
|
|
|
Elf_Phdr *phlimit;
|
2002-10-23 01:43:29 +00:00
|
|
|
Elf_Phdr **segs;
|
1998-03-07 19:24:35 +00:00
|
|
|
int nsegs;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Phdr *phdyn;
|
1999-08-30 01:54:13 +00:00
|
|
|
Elf_Phdr *phinterp;
|
2004-08-03 08:51:00 +00:00
|
|
|
Elf_Phdr *phtls;
|
1998-03-07 19:24:35 +00:00
|
|
|
caddr_t mapbase;
|
|
|
|
size_t mapsize;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Off base_offset;
|
|
|
|
Elf_Addr base_vaddr;
|
|
|
|
Elf_Addr base_vlimit;
|
1998-03-07 19:24:35 +00:00
|
|
|
caddr_t base_addr;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Off data_offset;
|
|
|
|
Elf_Addr data_vaddr;
|
|
|
|
Elf_Addr data_vlimit;
|
1998-03-07 19:24:35 +00:00
|
|
|
caddr_t data_addr;
|
2002-10-23 01:43:29 +00:00
|
|
|
int data_prot;
|
2002-12-16 19:24:43 +00:00
|
|
|
int data_flags;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Addr clear_vaddr;
|
1998-03-07 19:24:35 +00:00
|
|
|
caddr_t clear_addr;
|
2002-10-23 01:43:29 +00:00
|
|
|
caddr_t clear_page;
|
2007-04-03 18:31:20 +00:00
|
|
|
Elf_Addr phdr_vaddr;
|
|
|
|
size_t nclear, phsize;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Addr bss_vaddr;
|
|
|
|
Elf_Addr bss_vlimit;
|
1998-03-07 19:24:35 +00:00
|
|
|
caddr_t bss_addr;
|
2011-01-08 17:11:49 +00:00
|
|
|
Elf_Word stack_flags;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-05-31 14:48:59 +00:00
|
|
|
hdr = get_elf_header(fd, path);
|
|
|
|
if (hdr == NULL)
|
|
|
|
return (NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan the program header entries, and save key information.
|
|
|
|
*
|
2009-04-10 09:52:42 +00:00
|
|
|
* We expect that the loadable segments are ordered by load address.
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
2003-05-31 14:48:59 +00:00
|
|
|
phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff);
|
2007-04-03 18:31:20 +00:00
|
|
|
phsize = hdr->e_phnum * sizeof (phdr[0]);
|
2003-05-31 14:48:59 +00:00
|
|
|
phlimit = phdr + hdr->e_phnum;
|
2002-10-23 01:43:29 +00:00
|
|
|
nsegs = -1;
|
2007-04-03 18:31:20 +00:00
|
|
|
phdyn = phinterp = phtls = NULL;
|
|
|
|
phdr_vaddr = 0;
|
2003-05-31 14:48:59 +00:00
|
|
|
segs = alloca(sizeof(segs[0]) * hdr->e_phnum);
|
2011-01-25 21:12:31 +00:00
|
|
|
stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W;
|
1998-03-07 19:24:35 +00:00
|
|
|
while (phdr < phlimit) {
|
|
|
|
switch (phdr->p_type) {
|
|
|
|
|
1999-08-30 01:54:13 +00:00
|
|
|
case PT_INTERP:
|
|
|
|
phinterp = phdr;
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case PT_LOAD:
|
2002-10-23 01:43:29 +00:00
|
|
|
segs[++nsegs] = phdr;
|
2007-04-03 18:31:20 +00:00
|
|
|
if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) {
|
2002-10-23 01:43:29 +00:00
|
|
|
_rtld_error("%s: PT_LOAD segment %d not page-aligned",
|
|
|
|
path, nsegs);
|
1999-07-18 00:02:19 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PT_PHDR:
|
2007-04-03 18:31:20 +00:00
|
|
|
phdr_vaddr = phdr->p_vaddr;
|
|
|
|
phsize = phdr->p_memsz;
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PT_DYNAMIC:
|
|
|
|
phdyn = phdr;
|
|
|
|
break;
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
case PT_TLS:
|
|
|
|
phtls = phdr;
|
|
|
|
break;
|
2011-01-08 17:11:49 +00:00
|
|
|
|
|
|
|
case PT_GNU_STACK:
|
|
|
|
stack_flags = phdr->p_flags;
|
|
|
|
break;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
++phdr;
|
|
|
|
}
|
|
|
|
if (phdyn == NULL) {
|
1999-07-18 00:02:19 +00:00
|
|
|
_rtld_error("%s: object is not dynamically-linked", path);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2002-10-23 01:43:29 +00:00
|
|
|
if (nsegs < 0) {
|
1999-07-18 00:02:19 +00:00
|
|
|
_rtld_error("%s: too few PT_LOAD segments", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Map the entire address space of the object, to stake out our
|
|
|
|
* contiguous region, and to establish the base address for relocation.
|
|
|
|
*/
|
|
|
|
base_offset = trunc_page(segs[0]->p_offset);
|
|
|
|
base_vaddr = trunc_page(segs[0]->p_vaddr);
|
2002-10-23 01:43:29 +00:00
|
|
|
base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
|
1998-03-07 19:24:35 +00:00
|
|
|
mapsize = base_vlimit - base_vaddr;
|
2003-05-31 14:48:59 +00:00
|
|
|
base_addr = hdr->e_type == ET_EXEC ? (caddr_t) base_vaddr : NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
Currently, when mapping an object, rtld reserves the whole address space
for the mapping by the object' file with the protection and mode of
the first loadable segment over the whole region. Then, it maps other
segments at the appropriate addresses inside the region.
On amd64, due to default alignment of the segments being 1Gb, the
subsequent segment mappings leave the holes in the region, that usually
contain mapping of the object' file past eof. Such mappings prevent
wiring of the address space, because the pages cannot be faulted in.
Change the way the mapping of the ELF objects is constructed, by first
mapping PROT_NONE anonymous memory over the whole range, and then
mapping the segments of the object over it. Take advantage of this new
order and allocate .bss by changing the protection of the range instead
of remapping.
Note that we cannot simply keep the holes between segments, because
other mappings may be made there. Among other issues, when the dso is
unloaded, rtld unmaps the whole region, deleting unrelated mappings.
The kernel ELF image activator does put the holes between segments, but
this is not critical for now because kernel loads only executable image
and interpreter, both cannot be unloaded. This will be fixed later, if
needed.
Reported and tested by: Hans Ottevanger <fbsdhackers beasties demon nl>
Suggested and reviewed by: kan, alc
2009-04-10 10:14:04 +00:00
|
|
|
mapbase = mmap(base_addr, mapsize, PROT_NONE, MAP_ANON | MAP_PRIVATE |
|
|
|
|
MAP_NOCORE, -1, 0);
|
1998-03-07 19:24:35 +00:00
|
|
|
if (mapbase == (caddr_t) -1) {
|
1999-07-18 00:02:19 +00:00
|
|
|
_rtld_error("%s: mmap of entire address space failed: %s",
|
|
|
|
path, strerror(errno));
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (base_addr != NULL && mapbase != base_addr) {
|
1999-07-18 00:02:19 +00:00
|
|
|
_rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
|
|
|
|
path, base_addr, mapbase);
|
1998-03-07 19:24:35 +00:00
|
|
|
munmap(mapbase, mapsize);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-04-10 09:52:42 +00:00
|
|
|
for (i = 0; i <= nsegs; i++) {
|
2002-10-23 01:43:29 +00:00
|
|
|
/* Overlay the segment onto the proper region. */
|
|
|
|
data_offset = trunc_page(segs[i]->p_offset);
|
|
|
|
data_vaddr = trunc_page(segs[i]->p_vaddr);
|
|
|
|
data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
|
|
|
|
data_addr = mapbase + (data_vaddr - base_vaddr);
|
2002-12-16 19:24:43 +00:00
|
|
|
data_prot = convert_prot(segs[i]->p_flags);
|
|
|
|
data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED;
|
Currently, when mapping an object, rtld reserves the whole address space
for the mapping by the object' file with the protection and mode of
the first loadable segment over the whole region. Then, it maps other
segments at the appropriate addresses inside the region.
On amd64, due to default alignment of the segments being 1Gb, the
subsequent segment mappings leave the holes in the region, that usually
contain mapping of the object' file past eof. Such mappings prevent
wiring of the address space, because the pages cannot be faulted in.
Change the way the mapping of the ELF objects is constructed, by first
mapping PROT_NONE anonymous memory over the whole range, and then
mapping the segments of the object over it. Take advantage of this new
order and allocate .bss by changing the protection of the range instead
of remapping.
Note that we cannot simply keep the holes between segments, because
other mappings may be made there. Among other issues, when the dso is
unloaded, rtld unmaps the whole region, deleting unrelated mappings.
The kernel ELF image activator does put the holes between segments, but
this is not critical for now because kernel loads only executable image
and interpreter, both cannot be unloaded. This will be fixed later, if
needed.
Reported and tested by: Hans Ottevanger <fbsdhackers beasties demon nl>
Suggested and reviewed by: kan, alc
2009-04-10 10:14:04 +00:00
|
|
|
if (mmap(data_addr, data_vlimit - data_vaddr, data_prot,
|
2002-12-16 19:24:43 +00:00
|
|
|
data_flags, fd, data_offset) == (caddr_t) -1) {
|
2002-10-23 01:43:29 +00:00
|
|
|
_rtld_error("%s: mmap of data failed: %s", path, strerror(errno));
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2002-10-23 01:43:29 +00:00
|
|
|
|
2009-07-17 19:32:04 +00:00
|
|
|
/* Do BSS setup */
|
|
|
|
if (segs[i]->p_filesz != segs[i]->p_memsz) {
|
|
|
|
|
|
|
|
/* Clear any BSS in the last page of the segment. */
|
|
|
|
clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
|
|
|
|
clear_addr = mapbase + (clear_vaddr - base_vaddr);
|
|
|
|
clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr);
|
|
|
|
|
|
|
|
if ((nclear = data_vlimit - clear_vaddr) > 0) {
|
|
|
|
/* Make sure the end of the segment is writable */
|
|
|
|
if ((data_prot & PROT_WRITE) == 0 && -1 ==
|
|
|
|
mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) {
|
2002-10-23 01:43:29 +00:00
|
|
|
_rtld_error("%s: mprotect failed: %s", path,
|
|
|
|
strerror(errno));
|
|
|
|
return NULL;
|
2009-07-17 19:32:04 +00:00
|
|
|
}
|
2002-10-23 01:43:29 +00:00
|
|
|
|
2009-07-17 19:32:04 +00:00
|
|
|
memset(clear_addr, 0, nclear);
|
2002-10-23 01:43:29 +00:00
|
|
|
|
2009-07-17 19:32:04 +00:00
|
|
|
/* Reset the data protection back */
|
|
|
|
if ((data_prot & PROT_WRITE) == 0)
|
|
|
|
mprotect(clear_page, PAGE_SIZE, data_prot);
|
|
|
|
}
|
2002-10-23 01:43:29 +00:00
|
|
|
|
2009-07-17 19:32:04 +00:00
|
|
|
/* Overlay the BSS segment onto the proper region. */
|
|
|
|
bss_vaddr = data_vlimit;
|
|
|
|
bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
|
|
|
|
bss_addr = mapbase + (bss_vaddr - base_vaddr);
|
|
|
|
if (bss_vlimit > bss_vaddr) { /* There is something to do */
|
2011-09-20 21:49:54 +00:00
|
|
|
if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
|
|
|
|
data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) {
|
|
|
|
_rtld_error("%s: mmap of bss failed: %s", path,
|
2002-10-23 01:43:29 +00:00
|
|
|
strerror(errno));
|
2009-07-17 19:32:04 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2002-10-23 01:43:29 +00:00
|
|
|
}
|
|
|
|
}
|
2009-07-17 19:32:04 +00:00
|
|
|
|
2007-04-03 18:31:20 +00:00
|
|
|
if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff &&
|
|
|
|
(data_vlimit - data_vaddr + data_offset) >=
|
|
|
|
(hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) {
|
|
|
|
phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
obj = obj_new();
|
1999-08-30 01:50:41 +00:00
|
|
|
if (sb != NULL) {
|
|
|
|
obj->dev = sb->st_dev;
|
|
|
|
obj->ino = sb->st_ino;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
obj->mapbase = mapbase;
|
|
|
|
obj->mapsize = mapsize;
|
|
|
|
obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) -
|
|
|
|
base_vaddr;
|
|
|
|
obj->vaddrbase = base_vaddr;
|
|
|
|
obj->relocbase = mapbase - base_vaddr;
|
1999-08-30 01:54:13 +00:00
|
|
|
obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr);
|
2003-05-31 14:48:59 +00:00
|
|
|
if (hdr->e_entry != 0)
|
|
|
|
obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry);
|
2007-04-03 18:31:20 +00:00
|
|
|
if (phdr_vaddr != 0) {
|
|
|
|
obj->phdr = (const Elf_Phdr *) (obj->relocbase + phdr_vaddr);
|
|
|
|
} else {
|
|
|
|
obj->phdr = malloc(phsize);
|
|
|
|
if (obj->phdr == NULL) {
|
|
|
|
obj_free(obj);
|
|
|
|
_rtld_error("%s: cannot allocate program header", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
memcpy((char *)obj->phdr, (char *)hdr + hdr->e_phoff, phsize);
|
|
|
|
obj->phdr_alloc = true;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
2007-04-03 18:31:20 +00:00
|
|
|
obj->phsize = phsize;
|
1999-08-30 01:54:13 +00:00
|
|
|
if (phinterp != NULL)
|
|
|
|
obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr);
|
2004-08-03 08:51:00 +00:00
|
|
|
if (phtls != NULL) {
|
|
|
|
tls_dtv_generation++;
|
|
|
|
obj->tlsindex = ++tls_max_index;
|
|
|
|
obj->tlssize = phtls->p_memsz;
|
|
|
|
obj->tlsalign = phtls->p_align;
|
|
|
|
obj->tlsinitsize = phtls->p_filesz;
|
|
|
|
obj->tlsinit = mapbase + phtls->p_vaddr;
|
|
|
|
}
|
2011-01-08 17:11:49 +00:00
|
|
|
obj->stack_flags = stack_flags;
|
1998-03-07 19:24:35 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2003-05-31 14:48:59 +00:00
|
|
|
static Elf_Ehdr *
|
|
|
|
get_elf_header (int fd, const char *path)
|
|
|
|
{
|
|
|
|
static union {
|
|
|
|
Elf_Ehdr hdr;
|
|
|
|
char buf[PAGE_SIZE];
|
|
|
|
} u;
|
|
|
|
ssize_t nbytes;
|
|
|
|
|
2009-10-06 17:14:39 +00:00
|
|
|
if ((nbytes = pread(fd, u.buf, PAGE_SIZE, 0)) == -1) {
|
2003-05-31 14:48:59 +00:00
|
|
|
_rtld_error("%s: read error: %s", path, strerror(errno));
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure the file is valid */
|
|
|
|
if (nbytes < (ssize_t)sizeof(Elf_Ehdr) || !IS_ELF(u.hdr)) {
|
|
|
|
_rtld_error("%s: invalid file format", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (u.hdr.e_ident[EI_CLASS] != ELF_TARG_CLASS
|
|
|
|
|| u.hdr.e_ident[EI_DATA] != ELF_TARG_DATA) {
|
|
|
|
_rtld_error("%s: unsupported file layout", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (u.hdr.e_ident[EI_VERSION] != EV_CURRENT
|
|
|
|
|| u.hdr.e_version != EV_CURRENT) {
|
|
|
|
_rtld_error("%s: unsupported file version", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (u.hdr.e_type != ET_EXEC && u.hdr.e_type != ET_DYN) {
|
|
|
|
_rtld_error("%s: unsupported file type", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (u.hdr.e_machine != ELF_TARG_MACH) {
|
|
|
|
_rtld_error("%s: unsupported machine", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We rely on the program header being in the first page. This is
|
|
|
|
* not strictly required by the ABI specification, but it seems to
|
|
|
|
* always true in practice. And, it simplifies things considerably.
|
|
|
|
*/
|
|
|
|
if (u.hdr.e_phentsize != sizeof(Elf_Phdr)) {
|
|
|
|
_rtld_error(
|
|
|
|
"%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
if (u.hdr.e_phoff + u.hdr.e_phnum * sizeof(Elf_Phdr) > (size_t)nbytes) {
|
|
|
|
_rtld_error("%s: program header too large", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (&u.hdr);
|
|
|
|
}
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
void
|
|
|
|
obj_free(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
if (obj->tls_done)
|
2005-02-27 12:55:40 +00:00
|
|
|
free_tls_offset(obj);
|
1999-08-30 01:48:19 +00:00
|
|
|
while (obj->needed != NULL) {
|
|
|
|
Needed_Entry *needed = obj->needed;
|
|
|
|
obj->needed = needed->next;
|
|
|
|
free(needed);
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
while (!STAILQ_EMPTY(&obj->names)) {
|
|
|
|
Name_Entry *entry = STAILQ_FIRST(&obj->names);
|
|
|
|
STAILQ_REMOVE_HEAD(&obj->names, link);
|
|
|
|
free(entry);
|
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
while (!STAILQ_EMPTY(&obj->dldags)) {
|
|
|
|
elm = STAILQ_FIRST(&obj->dldags);
|
|
|
|
STAILQ_REMOVE_HEAD(&obj->dldags, link);
|
|
|
|
free(elm);
|
|
|
|
}
|
|
|
|
while (!STAILQ_EMPTY(&obj->dagmembers)) {
|
|
|
|
elm = STAILQ_FIRST(&obj->dagmembers);
|
|
|
|
STAILQ_REMOVE_HEAD(&obj->dagmembers, link);
|
|
|
|
free(elm);
|
|
|
|
}
|
2007-04-03 18:31:20 +00:00
|
|
|
if (obj->vertab)
|
|
|
|
free(obj->vertab);
|
|
|
|
if (obj->origin_path)
|
|
|
|
free(obj->origin_path);
|
2009-03-18 13:40:37 +00:00
|
|
|
if (obj->z_origin)
|
|
|
|
free(obj->rpath);
|
2007-04-03 18:31:20 +00:00
|
|
|
if (obj->priv)
|
|
|
|
free(obj->priv);
|
|
|
|
if (obj->path)
|
|
|
|
free(obj->path);
|
|
|
|
if (obj->phdr_alloc)
|
|
|
|
free((void *)obj->phdr);
|
1999-08-30 01:48:19 +00:00
|
|
|
free(obj);
|
|
|
|
}
|
|
|
|
|
|
|
|
Obj_Entry *
|
|
|
|
obj_new(void)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
|
|
|
obj = CNEW(Obj_Entry);
|
|
|
|
STAILQ_INIT(&obj->dldags);
|
|
|
|
STAILQ_INIT(&obj->dagmembers);
|
2005-12-18 19:43:33 +00:00
|
|
|
STAILQ_INIT(&obj->names);
|
1999-08-30 01:48:19 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Given a set of ELF protection flags, return the corresponding protection
|
|
|
|
* flags for MMAP.
|
|
|
|
*/
|
|
|
|
static int
|
2002-12-16 19:24:43 +00:00
|
|
|
convert_prot(int elfflags)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
|
|
|
int prot = 0;
|
|
|
|
if (elfflags & PF_R)
|
|
|
|
prot |= PROT_READ;
|
|
|
|
if (elfflags & PF_W)
|
|
|
|
prot |= PROT_WRITE;
|
|
|
|
if (elfflags & PF_X)
|
|
|
|
prot |= PROT_EXEC;
|
|
|
|
return prot;
|
|
|
|
}
|
2002-12-16 19:24:43 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
convert_flags(int elfflags)
|
|
|
|
{
|
|
|
|
int flags = MAP_PRIVATE; /* All mappings are private */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Readonly mappings are marked "MAP_NOCORE", because they can be
|
|
|
|
* reconstructed by a debugger.
|
|
|
|
*/
|
|
|
|
if (!(elfflags & PF_W))
|
|
|
|
flags |= MAP_NOCORE;
|
|
|
|
return flags;
|
|
|
|
}
|