1998-03-07 19:24:35 +00:00
|
|
|
/*-
|
2000-01-09 21:13:48 +00:00
|
|
|
* Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra.
|
2003-02-13 17:47:44 +00:00
|
|
|
* Copyright 2003 Alexander Kabaev <kan@FreeBSD.ORG>.
|
2017-05-18 09:34:26 +00:00
|
|
|
* Copyright 2009-2013 Konstantin Belousov <kib@FreeBSD.ORG>.
|
2012-04-30 13:29:21 +00:00
|
|
|
* Copyright 2012 John Marino <draco@marino.st>.
|
2017-05-18 09:34:26 +00:00
|
|
|
* Copyright 2014-2017 The FreeBSD Foundation
|
1998-03-07 19:24:35 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
2017-05-18 09:34:26 +00:00
|
|
|
* Portions of this software were developed by Konstantin Belousov
|
|
|
|
* under sponsorship from the FreeBSD Foundation.
|
|
|
|
*
|
1998-03-07 19:24:35 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dynamic linker for ELF.
|
|
|
|
*
|
|
|
|
* John Polstra <jdp@polstra.com>.
|
|
|
|
*/
|
|
|
|
|
2017-05-18 09:34:26 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
#include <sys/param.h>
|
2005-03-24 10:12:29 +00:00
|
|
|
#include <sys/mount.h>
|
1998-03-07 19:24:35 +00:00
|
|
|
#include <sys/mman.h>
|
1999-08-30 01:50:41 +00:00
|
|
|
#include <sys/stat.h>
|
2010-08-17 09:05:39 +00:00
|
|
|
#include <sys/sysctl.h>
|
2007-01-09 17:50:05 +00:00
|
|
|
#include <sys/uio.h>
|
2009-03-18 13:40:37 +00:00
|
|
|
#include <sys/utsname.h>
|
2007-01-09 17:50:05 +00:00
|
|
|
#include <sys/ktrace.h>
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
#include <dlfcn.h>
|
|
|
|
#include <err.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <fcntl.h>
|
|
|
|
#include <stdarg.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
|
|
|
|
#include "debug.h"
|
|
|
|
#include "rtld.h"
|
2003-04-07 16:21:26 +00:00
|
|
|
#include "libmap.h"
|
2015-10-31 04:39:28 +00:00
|
|
|
#include "paths.h"
|
2004-08-03 08:51:00 +00:00
|
|
|
#include "rtld_tls.h"
|
2011-08-24 20:05:13 +00:00
|
|
|
#include "rtld_printf.h"
|
2016-08-12 19:31:41 +00:00
|
|
|
#include "rtld_utrace.h"
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
#include "notes.h"
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/* Types. */
|
|
|
|
typedef void (*func_ptr_type)();
|
2003-02-13 17:47:44 +00:00
|
|
|
typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Function declarations.
|
|
|
|
*/
|
1999-04-21 04:06:57 +00:00
|
|
|
static const char *basename(const char *);
|
2010-08-17 09:05:39 +00:00
|
|
|
static void digest_dynamic1(Obj_Entry *, int, const Elf_Dyn **,
|
2012-07-15 10:53:48 +00:00
|
|
|
const Elf_Dyn **, const Elf_Dyn **);
|
|
|
|
static void digest_dynamic2(Obj_Entry *, const Elf_Dyn *, const Elf_Dyn *,
|
|
|
|
const Elf_Dyn *);
|
2002-04-02 02:19:02 +00:00
|
|
|
static void digest_dynamic(Obj_Entry *, int);
|
1999-07-18 00:02:19 +00:00
|
|
|
static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *dlcheck(void *);
|
2017-03-09 21:05:47 +00:00
|
|
|
static int dlclose_locked(void *, RtldLockState *);
|
2012-01-07 10:33:01 +00:00
|
|
|
static Obj_Entry *dlopen_object(const char *name, int fd, Obj_Entry *refobj,
|
2012-04-12 10:32:22 +00:00
|
|
|
int lo_flags, int mode, RtldLockState *lockstate);
|
2009-11-26 13:57:20 +00:00
|
|
|
static Obj_Entry *do_load_object(int, const char *, char *, struct stat *, int);
|
2003-02-13 17:47:44 +00:00
|
|
|
static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *);
|
2000-09-19 04:27:16 +00:00
|
|
|
static bool donelist_check(DoneList *, const Obj_Entry *);
|
2001-01-05 04:36:17 +00:00
|
|
|
static void errmsg_restore(char *);
|
|
|
|
static char *errmsg_save(void);
|
2003-02-13 17:47:44 +00:00
|
|
|
static void *fill_search_info(const char *, size_t, void *);
|
2014-06-20 17:08:32 +00:00
|
|
|
static char *find_library(const char *, const Obj_Entry *, int *);
|
2012-07-15 10:53:48 +00:00
|
|
|
static const char *gethints(bool);
|
2016-12-22 17:37:39 +00:00
|
|
|
static void hold_object(Obj_Entry *);
|
|
|
|
static void unhold_object(Obj_Entry *);
|
1999-08-30 01:48:19 +00:00
|
|
|
static void init_dag(Obj_Entry *);
|
2016-12-22 17:44:27 +00:00
|
|
|
static void init_marker(Obj_Entry *);
|
Before calling mmap() on a shared library's text and data sections, rtld
first calls mmap() with the arguments PROT_NONE and MAP_ANON to reserve a
single, contiguous range of virtual addresses for the entire shared library.
Later, rtld calls mmap() with the the shared library's file descriptor
and the argument MAP_FIXED to place the text and data sections within the
reserved range. The rationale for mapping shared libraries in this way is
explained in the commit message for Revision 190885. However, this approach
does have an unintended, negative consequence. Since the first call to
mmap() specifies MAP_ANON and not the shared library's file descriptor, the
kernel has no idea what alignment the vm object backing the file prefers.
As a result, the reserved range's alignment is unlikely to be the same as
the vm object's, and so mapping with superpages becomes impossible. To
address this problem, this revision adds the argument MAP_ALIGNED_SUPER to
the first call to mmap() if the text section is larger than the smallest
superpage size.
To determine if the text section is larger than the smallest superpage
size, rtld must always fetch the page size information. As a result, the
private code for fetching the base page size in rtld's builtin malloc is
redundant. Eliminate it. Requested by: kib
Tested by: zbb (on arm)
Reviewed by: kib (an earlier version)
Discussed with: jhb
2014-04-11 16:55:25 +00:00
|
|
|
static void init_pagesizes(Elf_Auxinfo **aux_info);
|
2010-08-17 09:05:39 +00:00
|
|
|
static void init_rtld(caddr_t, Elf_Auxinfo **);
|
2006-09-19 16:48:08 +00:00
|
|
|
static void initlist_add_neededs(Needed_Entry *, Objlist *);
|
2016-01-20 07:21:33 +00:00
|
|
|
static void initlist_add_objects(Obj_Entry *, Obj_Entry *, Objlist *);
|
1998-09-02 02:00:20 +00:00
|
|
|
static void linkmap_add(Obj_Entry *);
|
|
|
|
static void linkmap_delete(Obj_Entry *);
|
2010-12-25 08:51:20 +00:00
|
|
|
static void load_filtees(Obj_Entry *, int flags, RtldLockState *);
|
2017-03-09 21:05:47 +00:00
|
|
|
static void unload_filtees(Obj_Entry *, RtldLockState *);
|
2009-11-26 13:57:20 +00:00
|
|
|
static int load_needed_objects(Obj_Entry *, int);
|
1998-09-22 02:09:56 +00:00
|
|
|
static int load_preload_objects(void);
|
2012-01-07 10:33:01 +00:00
|
|
|
static Obj_Entry *load_object(const char *, int fd, const Obj_Entry *, int);
|
2011-01-10 16:09:35 +00:00
|
|
|
static void map_stacks_exec(RtldLockState *);
|
2017-01-12 15:54:03 +00:00
|
|
|
static int obj_enforce_relro(Obj_Entry *);
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *obj_from_addr(const void *);
|
2010-12-25 08:51:20 +00:00
|
|
|
static void objlist_call_fini(Objlist *, Obj_Entry *, RtldLockState *);
|
|
|
|
static void objlist_call_init(Objlist *, RtldLockState *);
|
2000-07-26 04:24:40 +00:00
|
|
|
static void objlist_clear(Objlist *);
|
1999-08-30 01:48:19 +00:00
|
|
|
static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *);
|
2000-07-26 04:24:40 +00:00
|
|
|
static void objlist_init(Objlist *);
|
|
|
|
static void objlist_push_head(Objlist *, Obj_Entry *);
|
|
|
|
static void objlist_push_tail(Objlist *, Obj_Entry *);
|
2013-10-07 08:19:30 +00:00
|
|
|
static void objlist_put_after(Objlist *, Obj_Entry *, Obj_Entry *);
|
1999-08-30 01:48:19 +00:00
|
|
|
static void objlist_remove(Objlist *, Obj_Entry *);
|
2017-05-23 10:00:52 +00:00
|
|
|
static int open_binary_fd(const char *argv0, bool search_in_path);
|
2017-05-17 22:51:28 +00:00
|
|
|
static int parse_args(char* argv[], int argc, bool *use_pathp, int *fdp);
|
2017-05-16 13:27:44 +00:00
|
|
|
static int parse_integer(const char *);
|
2003-02-13 17:47:44 +00:00
|
|
|
static void *path_enumerate(const char *, path_enum_proc, void *);
|
2017-05-17 22:51:28 +00:00
|
|
|
static void print_usage(const char *argv0);
|
2016-12-22 17:37:39 +00:00
|
|
|
static void release_object(Obj_Entry *);
|
2012-06-27 20:24:25 +00:00
|
|
|
static int relocate_object_dag(Obj_Entry *root, bool bind_now,
|
|
|
|
Obj_Entry *rtldobj, int flags, RtldLockState *lockstate);
|
|
|
|
static int relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
|
|
|
|
int flags, RtldLockState *lockstate);
|
2012-03-20 13:20:49 +00:00
|
|
|
static int relocate_objects(Obj_Entry *, bool, Obj_Entry *, int,
|
|
|
|
RtldLockState *);
|
2011-12-14 16:47:53 +00:00
|
|
|
static int resolve_objects_ifunc(Obj_Entry *first, bool bind_now,
|
2012-03-20 13:20:49 +00:00
|
|
|
int flags, RtldLockState *lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
static int rtld_dirname(const char *, char *);
|
2009-03-18 13:40:37 +00:00
|
|
|
static int rtld_dirname_abs(const char *, char *);
|
2012-01-07 10:33:01 +00:00
|
|
|
static void *rtld_dlopen(const char *name, int fd, int mode);
|
1998-03-07 19:24:35 +00:00
|
|
|
static void rtld_exit(void);
|
|
|
|
static char *search_library_path(const char *, const char *);
|
2014-06-20 17:08:32 +00:00
|
|
|
static char *search_library_pathfds(const char *, const char *, int *);
|
2011-01-10 16:09:35 +00:00
|
|
|
static const void **get_program_var_addr(const char *, RtldLockState *);
|
1999-04-21 04:06:57 +00:00
|
|
|
static void set_program_var(const char *, const void *);
|
2010-12-25 08:51:20 +00:00
|
|
|
static int symlook_default(SymLook *, const Obj_Entry *refobj);
|
2011-01-10 16:09:35 +00:00
|
|
|
static int symlook_global(SymLook *, DoneList *);
|
2010-12-25 08:51:20 +00:00
|
|
|
static void symlook_init_from_req(SymLook *, const SymLook *);
|
|
|
|
static int symlook_list(SymLook *, const Objlist *, DoneList *);
|
|
|
|
static int symlook_needed(SymLook *, const Needed_Entry *, DoneList *);
|
2012-04-30 13:31:10 +00:00
|
|
|
static int symlook_obj1_sysv(SymLook *, const Obj_Entry *);
|
|
|
|
static int symlook_obj1_gnu(SymLook *, const Obj_Entry *);
|
2006-09-19 16:48:08 +00:00
|
|
|
static void trace_loaded_objects(Obj_Entry *);
|
2003-02-17 20:58:27 +00:00
|
|
|
static void unlink_object(Obj_Entry *);
|
2017-03-09 21:05:47 +00:00
|
|
|
static void unload_object(Obj_Entry *, RtldLockState *lockstate);
|
1999-08-30 01:48:19 +00:00
|
|
|
static void unref_dag(Obj_Entry *);
|
2003-05-08 01:31:36 +00:00
|
|
|
static void ref_dag(Obj_Entry *);
|
2015-04-27 18:41:31 +00:00
|
|
|
static char *origin_subst_one(Obj_Entry *, char *, const char *,
|
|
|
|
const char *, bool);
|
|
|
|
static char *origin_subst(Obj_Entry *, char *);
|
|
|
|
static bool obj_resolve_origin(Obj_Entry *obj);
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
static void preinit_main(void);
|
2005-12-18 19:43:33 +00:00
|
|
|
static int rtld_verify_versions(const Objlist *);
|
|
|
|
static int rtld_verify_object_versions(Obj_Entry *);
|
|
|
|
static void object_add_name(Obj_Entry *, const char *);
|
|
|
|
static int object_match_name(const Obj_Entry *, const char *);
|
2007-01-09 17:50:05 +00:00
|
|
|
static void ld_utrace_log(int, void *, void *, size_t, int, const char *);
|
2010-08-23 15:27:03 +00:00
|
|
|
static void rtld_fill_dl_phdr_info(const Obj_Entry *obj,
|
|
|
|
struct dl_phdr_info *phdr_info);
|
2012-04-30 13:31:10 +00:00
|
|
|
static uint32_t gnu_hash(const char *);
|
2012-04-30 13:29:21 +00:00
|
|
|
static bool matched_symbol(SymLook *, const Obj_Entry *, Sym_Match_Result *,
|
|
|
|
const unsigned long);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2015-03-29 18:53:21 +00:00
|
|
|
void r_debug_state(struct r_debug *, struct link_map *) __noinline __exported;
|
|
|
|
void _r_debug_postinit(struct link_map *) __noinline __exported;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2015-01-04 00:33:41 +00:00
|
|
|
int __sys_openat(int, const char *, int, ...);
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Data declarations.
|
|
|
|
*/
|
|
|
|
static char *error_message; /* Message for dlerror(), or NULL */
|
2015-03-29 18:53:21 +00:00
|
|
|
struct r_debug r_debug __exported; /* for GDB; */
|
2003-05-31 14:45:11 +00:00
|
|
|
static bool libmap_disable; /* Disable libmap */
|
2010-12-25 08:51:20 +00:00
|
|
|
static bool ld_loadfltr; /* Immediate filters processing */
|
2005-02-04 02:46:41 +00:00
|
|
|
static char *libmap_override; /* Maps to use in addition to libmap.conf */
|
1998-03-07 19:24:35 +00:00
|
|
|
static bool trust; /* False for setuid and setgid programs */
|
2005-03-24 10:12:29 +00:00
|
|
|
static bool dangerous_ld_env; /* True if environment variables have been
|
|
|
|
used to affect the libraries loaded */
|
2017-03-15 21:11:57 +00:00
|
|
|
bool ld_bind_not; /* Disable PLT update */
|
1998-03-07 19:24:35 +00:00
|
|
|
static char *ld_bind_now; /* Environment variable for immediate binding */
|
|
|
|
static char *ld_debug; /* Environment variable for debugging */
|
|
|
|
static char *ld_library_path; /* Environment variable for search path */
|
2014-06-20 17:08:32 +00:00
|
|
|
static char *ld_library_dirs; /* Environment variable for library descriptors */
|
1998-09-22 02:09:56 +00:00
|
|
|
static char *ld_preload; /* Environment variable for libraries to
|
|
|
|
load first */
|
2009-03-23 16:49:00 +00:00
|
|
|
static char *ld_elf_hints_path; /* Environment variable for alternative hints path */
|
1998-05-01 08:39:27 +00:00
|
|
|
static char *ld_tracing; /* Called from ldd to print libs */
|
2007-01-09 17:50:05 +00:00
|
|
|
static char *ld_utrace; /* Use utrace() to log events. */
|
2016-01-20 07:21:33 +00:00
|
|
|
static struct obj_entry_q obj_list; /* Queue of all loaded objects */
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *obj_main; /* The main program shared object */
|
|
|
|
static Obj_Entry obj_rtld; /* The dynamic linker shared object */
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
static unsigned int obj_count; /* Number of objects in obj_list */
|
2016-01-20 07:21:33 +00:00
|
|
|
static unsigned int obj_loads; /* Number of loads of objects (gen count) */
|
1999-08-30 01:48:19 +00:00
|
|
|
|
|
|
|
static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */
|
|
|
|
STAILQ_HEAD_INITIALIZER(list_global);
|
|
|
|
static Objlist list_main = /* Objects loaded at program startup */
|
|
|
|
STAILQ_HEAD_INITIALIZER(list_main);
|
2000-07-26 04:24:40 +00:00
|
|
|
static Objlist list_fini = /* Objects needing fini() calls */
|
|
|
|
STAILQ_HEAD_INITIALIZER(list_fini);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-09-12 17:04:51 +00:00
|
|
|
Elf_Sym sym_zero; /* For resolving undefined weak refs. */
|
1999-04-05 02:36:40 +00:00
|
|
|
|
2000-08-26 05:13:29 +00:00
|
|
|
#define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m);
|
1998-04-30 07:48:02 +00:00
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
extern Elf_Dyn _DYNAMIC;
|
1999-04-09 00:28:43 +00:00
|
|
|
#pragma weak _DYNAMIC
|
1999-04-05 02:36:40 +00:00
|
|
|
|
2015-03-29 18:53:21 +00:00
|
|
|
int dlclose(void *) __exported;
|
|
|
|
char *dlerror(void) __exported;
|
|
|
|
void *dlopen(const char *, int) __exported;
|
|
|
|
void *fdlopen(int, int) __exported;
|
|
|
|
void *dlsym(void *, const char *) __exported;
|
|
|
|
dlfunc_t dlfunc(void *, const char *) __exported;
|
|
|
|
void *dlvsym(void *, const char *, const char *) __exported;
|
|
|
|
int dladdr(const void *, Dl_info *) __exported;
|
|
|
|
void dllockinit(void *, void *(*)(void *), void (*)(void *), void (*)(void *),
|
|
|
|
void (*)(void *), void (*)(void *), void (*)(void *)) __exported;
|
|
|
|
int dlinfo(void *, int , void *) __exported;
|
|
|
|
int dl_iterate_phdr(__dl_iterate_hdr_callback, void *) __exported;
|
|
|
|
int _rtld_addr_phdr(const void *, struct dl_phdr_info *) __exported;
|
|
|
|
int _rtld_get_stack_prot(void) __exported;
|
|
|
|
int _rtld_is_dlopened(void *) __exported;
|
|
|
|
void _rtld_error(const char *, ...) __exported;
|
|
|
|
|
Before calling mmap() on a shared library's text and data sections, rtld
first calls mmap() with the arguments PROT_NONE and MAP_ANON to reserve a
single, contiguous range of virtual addresses for the entire shared library.
Later, rtld calls mmap() with the the shared library's file descriptor
and the argument MAP_FIXED to place the text and data sections within the
reserved range. The rationale for mapping shared libraries in this way is
explained in the commit message for Revision 190885. However, this approach
does have an unintended, negative consequence. Since the first call to
mmap() specifies MAP_ANON and not the shared library's file descriptor, the
kernel has no idea what alignment the vm object backing the file prefers.
As a result, the reserved range's alignment is unlikely to be the same as
the vm object's, and so mapping with superpages becomes impossible. To
address this problem, this revision adds the argument MAP_ALIGNED_SUPER to
the first call to mmap() if the text section is larger than the smallest
superpage size.
To determine if the text section is larger than the smallest superpage
size, rtld must always fetch the page size information. As a result, the
private code for fetching the base page size in rtld's builtin malloc is
redundant. Eliminate it. Requested by: kib
Tested by: zbb (on arm)
Reviewed by: kib (an earlier version)
Discussed with: jhb
2014-04-11 16:55:25 +00:00
|
|
|
int npagesizes, osreldate;
|
|
|
|
size_t *pagesizes;
|
2010-08-17 09:05:39 +00:00
|
|
|
|
2012-03-12 12:15:47 +00:00
|
|
|
long __stack_chk_guard[8] = {0, 0, 0, 0, 0, 0, 0, 0};
|
|
|
|
|
2011-01-25 21:12:31 +00:00
|
|
|
static int stack_prot = PROT_READ | PROT_WRITE | RTLD_DEFAULT_STACK_EXEC;
|
2011-01-08 17:11:49 +00:00
|
|
|
static int max_stack_flags;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Global declarations normally provided by crt1. The dynamic linker is
|
2000-01-09 21:13:48 +00:00
|
|
|
* not built with crt1, so we have to provide them ourselves.
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
|
|
|
char *__progname;
|
|
|
|
char **environ;
|
|
|
|
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
/*
|
|
|
|
* Used to pass argc, argv to init functions.
|
|
|
|
*/
|
|
|
|
int main_argc;
|
|
|
|
char **main_argv;
|
|
|
|
|
2004-08-03 08:51:00 +00:00
|
|
|
/*
|
|
|
|
* Globals to control TLS allocation.
|
|
|
|
*/
|
|
|
|
size_t tls_last_offset; /* Static TLS offset of last module */
|
|
|
|
size_t tls_last_size; /* Static TLS size of last module */
|
|
|
|
size_t tls_static_space; /* Static TLS space allocated */
|
2013-12-06 21:39:45 +00:00
|
|
|
size_t tls_static_max_align;
|
2004-08-03 08:51:00 +00:00
|
|
|
int tls_dtv_generation = 1; /* Used to detect when dtv size changes */
|
|
|
|
int tls_max_index = 1; /* Largest module index allocated */
|
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
bool ld_library_path_rpath = false;
|
|
|
|
|
2015-10-31 04:39:55 +00:00
|
|
|
/*
|
|
|
|
* Globals for path names, and such
|
|
|
|
*/
|
2015-12-18 05:45:49 +00:00
|
|
|
char *ld_elf_hints_default = _PATH_ELF_HINTS;
|
2015-10-31 04:39:55 +00:00
|
|
|
char *ld_path_libmap_conf = _PATH_LIBMAP_CONF;
|
|
|
|
char *ld_path_rtld = _PATH_RTLD;
|
|
|
|
char *ld_standard_library_path = STANDARD_LIBRARY_PATH;
|
|
|
|
char *ld_env_prefix = LD_;
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* Fill in a DoneList with an allocation large enough to hold all of
|
|
|
|
* the currently-loaded objects. Keep this as a macro since it calls
|
|
|
|
* alloca and we want that to occur within the scope of the caller.
|
|
|
|
*/
|
|
|
|
#define donelist_init(dlp) \
|
|
|
|
((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \
|
|
|
|
assert((dlp)->objs != NULL), \
|
|
|
|
(dlp)->num_alloc = obj_count, \
|
|
|
|
(dlp)->num_used = 0)
|
|
|
|
|
2007-01-09 17:50:05 +00:00
|
|
|
#define LD_UTRACE(e, h, mb, ms, r, n) do { \
|
|
|
|
if (ld_utrace != NULL) \
|
|
|
|
ld_utrace_log(e, h, mb, ms, r, n); \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
static void
|
|
|
|
ld_utrace_log(int event, void *handle, void *mapbase, size_t mapsize,
|
|
|
|
int refcnt, const char *name)
|
|
|
|
{
|
|
|
|
struct utrace_rtld ut;
|
2016-08-12 19:31:41 +00:00
|
|
|
static const char rtld_utrace_sig[RTLD_UTRACE_SIG_SZ] = RTLD_UTRACE_SIG;
|
2007-01-09 17:50:05 +00:00
|
|
|
|
2016-08-12 19:31:41 +00:00
|
|
|
memcpy(ut.sig, rtld_utrace_sig, sizeof(ut.sig));
|
2007-01-09 17:50:05 +00:00
|
|
|
ut.event = event;
|
|
|
|
ut.handle = handle;
|
|
|
|
ut.mapbase = mapbase;
|
|
|
|
ut.mapsize = mapsize;
|
|
|
|
ut.refcnt = refcnt;
|
|
|
|
bzero(ut.name, sizeof(ut.name));
|
|
|
|
if (name)
|
|
|
|
strlcpy(ut.name, name, sizeof(ut.name));
|
|
|
|
utrace(&ut, sizeof(ut));
|
|
|
|
}
|
|
|
|
|
2016-01-03 04:32:13 +00:00
|
|
|
#ifdef RTLD_VARIANT_ENV_NAMES
|
|
|
|
/*
|
|
|
|
* construct the env variable based on the type of binary that's
|
|
|
|
* running.
|
|
|
|
*/
|
|
|
|
static inline const char *
|
|
|
|
_LD(const char *var)
|
|
|
|
{
|
|
|
|
static char buffer[128];
|
|
|
|
|
|
|
|
strlcpy(buffer, ld_env_prefix, sizeof(buffer));
|
|
|
|
strlcat(buffer, var, sizeof(buffer));
|
|
|
|
return (buffer);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define _LD(x) LD_ x
|
|
|
|
#endif
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Main entry point for dynamic linking. The first argument is the
|
|
|
|
* stack pointer. The stack is expected to be laid out as described
|
|
|
|
* in the SVR4 ABI specification, Intel 386 Processor Supplement.
|
|
|
|
* Specifically, the stack pointer points to a word containing
|
|
|
|
* ARGC. Following that in the stack is a null-terminated sequence
|
|
|
|
* of pointers to argument strings. Then comes a null-terminated
|
|
|
|
* sequence of pointers to environment strings. Finally, there is a
|
|
|
|
* sequence of "auxiliary vector" entries.
|
|
|
|
*
|
|
|
|
* The second argument points to a place to store the dynamic linker's
|
1998-09-04 19:03:57 +00:00
|
|
|
* exit procedure pointer and the third to a place to store the main
|
|
|
|
* program's object.
|
1998-03-07 19:24:35 +00:00
|
|
|
*
|
|
|
|
* The return value is the main program's entry point.
|
|
|
|
*/
|
|
|
|
func_ptr_type
|
1998-09-04 19:03:57 +00:00
|
|
|
_rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2017-05-15 18:48:58 +00:00
|
|
|
Elf_Auxinfo *aux, *auxp, *auxpf, *aux_info[AT_COUNT];
|
2004-08-03 08:51:00 +00:00
|
|
|
Objlist_Entry *entry;
|
2017-05-13 18:59:27 +00:00
|
|
|
Obj_Entry *last_interposer, *obj, *preload_tail;
|
|
|
|
const Elf_Phdr *phdr;
|
2000-07-26 04:24:40 +00:00
|
|
|
Objlist initlist;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2017-05-16 19:53:38 +00:00
|
|
|
struct stat st;
|
2017-05-15 18:48:58 +00:00
|
|
|
Elf_Addr *argcp;
|
|
|
|
char **argv, *argv0, **env, **envp, *kexecpath, *library_path_rpath;
|
2017-05-13 18:59:27 +00:00
|
|
|
caddr_t imgentry;
|
|
|
|
char buf[MAXPATHLEN];
|
2017-05-17 22:51:28 +00:00
|
|
|
int argc, fd, i, mib[2], phnum, rtld_argc;
|
2012-03-12 12:15:47 +00:00
|
|
|
size_t len;
|
2017-05-17 22:51:28 +00:00
|
|
|
bool dir_enable, explicit_fd, search_in_path;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* On entry, the dynamic linker itself has not been relocated yet.
|
|
|
|
* Be very careful not to reference any global data until after
|
|
|
|
* init_rtld has returned. It is OK to reference file-scope statics
|
|
|
|
* and string constants, and to call static and global functions.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Find the auxiliary vector on the stack. */
|
2017-05-15 18:48:58 +00:00
|
|
|
argcp = sp;
|
1998-03-07 19:24:35 +00:00
|
|
|
argc = *sp++;
|
|
|
|
argv = (char **) sp;
|
|
|
|
sp += argc + 1; /* Skip over arguments and NULL terminator */
|
|
|
|
env = (char **) sp;
|
|
|
|
while (*sp++ != 0) /* Skip over environment, and NULL terminator */
|
|
|
|
;
|
1998-09-04 19:03:57 +00:00
|
|
|
aux = (Elf_Auxinfo *) sp;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
/* Digest the auxiliary vector. */
|
|
|
|
for (i = 0; i < AT_COUNT; i++)
|
|
|
|
aux_info[i] = NULL;
|
|
|
|
for (auxp = aux; auxp->a_type != AT_NULL; auxp++) {
|
|
|
|
if (auxp->a_type < AT_COUNT)
|
|
|
|
aux_info[auxp->a_type] = auxp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize and relocate ourselves. */
|
|
|
|
assert(aux_info[AT_BASE] != NULL);
|
2010-08-17 09:05:39 +00:00
|
|
|
init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr, aux_info);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
__progname = obj_rtld.path;
|
1999-07-18 00:02:19 +00:00
|
|
|
argv0 = argv[0] != NULL ? argv[0] : "(null)";
|
1998-03-07 19:24:35 +00:00
|
|
|
environ = env;
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
main_argc = argc;
|
|
|
|
main_argv = argv;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2012-04-05 18:47:54 +00:00
|
|
|
if (aux_info[AT_CANARY] != NULL &&
|
|
|
|
aux_info[AT_CANARY]->a_un.a_ptr != NULL) {
|
2012-03-12 12:15:47 +00:00
|
|
|
i = aux_info[AT_CANARYLEN]->a_un.a_val;
|
|
|
|
if (i > sizeof(__stack_chk_guard))
|
|
|
|
i = sizeof(__stack_chk_guard);
|
|
|
|
memcpy(__stack_chk_guard, aux_info[AT_CANARY]->a_un.a_ptr, i);
|
|
|
|
} else {
|
|
|
|
mib[0] = CTL_KERN;
|
|
|
|
mib[1] = KERN_ARND;
|
|
|
|
|
|
|
|
len = sizeof(__stack_chk_guard);
|
|
|
|
if (sysctl(mib, 2, __stack_chk_guard, &len, NULL, 0) == -1 ||
|
|
|
|
len != sizeof(__stack_chk_guard)) {
|
|
|
|
/* If sysctl was unsuccessful, use the "terminator canary". */
|
|
|
|
((unsigned char *)(void *)__stack_chk_guard)[0] = 0;
|
|
|
|
((unsigned char *)(void *)__stack_chk_guard)[1] = 0;
|
|
|
|
((unsigned char *)(void *)__stack_chk_guard)[2] = '\n';
|
|
|
|
((unsigned char *)(void *)__stack_chk_guard)[3] = 255;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-05-31 15:24:29 +00:00
|
|
|
trust = !issetugid();
|
2003-05-31 14:45:11 +00:00
|
|
|
|
2016-01-18 21:40:20 +00:00
|
|
|
md_abi_variant_hook(aux_info);
|
2016-01-03 04:32:02 +00:00
|
|
|
|
2017-05-15 18:48:58 +00:00
|
|
|
fd = -1;
|
|
|
|
if (aux_info[AT_EXECFD] != NULL) {
|
|
|
|
fd = aux_info[AT_EXECFD]->a_un.a_val;
|
|
|
|
} else {
|
|
|
|
assert(aux_info[AT_PHDR] != NULL);
|
|
|
|
phdr = (const Elf_Phdr *)aux_info[AT_PHDR]->a_un.a_ptr;
|
|
|
|
if (phdr == obj_rtld.phdr) {
|
2017-05-16 19:53:38 +00:00
|
|
|
if (!trust) {
|
|
|
|
rtld_printf("Tainted process refusing to run binary %s\n",
|
|
|
|
argv0);
|
|
|
|
rtld_die();
|
|
|
|
}
|
2017-05-15 18:48:58 +00:00
|
|
|
dbg("opening main program in direct exec mode");
|
|
|
|
if (argc >= 2) {
|
2017-05-17 22:51:28 +00:00
|
|
|
rtld_argc = parse_args(argv, argc, &search_in_path, &fd);
|
|
|
|
argv0 = argv[rtld_argc];
|
|
|
|
explicit_fd = (fd != -1);
|
|
|
|
if (!explicit_fd)
|
2017-05-23 10:00:52 +00:00
|
|
|
fd = open_binary_fd(argv0, search_in_path);
|
2017-05-16 19:53:38 +00:00
|
|
|
if (fstat(fd, &st) == -1) {
|
2017-05-17 22:51:28 +00:00
|
|
|
_rtld_error("failed to fstat FD %d (%s): %s", fd,
|
|
|
|
explicit_fd ? "user-provided descriptor" : argv0,
|
2017-05-16 19:53:38 +00:00
|
|
|
rtld_strerror(errno));
|
|
|
|
rtld_die();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rough emulation of the permission checks done by
|
|
|
|
* execve(2), only Unix DACs are checked, ACLs are
|
|
|
|
* ignored. Preserve the semantic of disabling owner
|
|
|
|
* to execute if owner x bit is cleared, even if
|
|
|
|
* others x bit is enabled.
|
|
|
|
* mmap(2) does not allow to mmap with PROT_EXEC if
|
|
|
|
* binary' file comes from noexec mount. We cannot
|
|
|
|
* set VV_TEXT on the binary.
|
|
|
|
*/
|
|
|
|
dir_enable = false;
|
|
|
|
if (st.st_uid == geteuid()) {
|
|
|
|
if ((st.st_mode & S_IXUSR) != 0)
|
|
|
|
dir_enable = true;
|
|
|
|
} else if (st.st_gid == getegid()) {
|
|
|
|
if ((st.st_mode & S_IXGRP) != 0)
|
|
|
|
dir_enable = true;
|
|
|
|
} else if ((st.st_mode & S_IXOTH) != 0) {
|
|
|
|
dir_enable = true;
|
|
|
|
}
|
|
|
|
if (!dir_enable) {
|
|
|
|
rtld_printf("No execute permission for binary %s\n",
|
|
|
|
argv0);
|
|
|
|
rtld_die();
|
|
|
|
}
|
2017-05-15 18:48:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For direct exec mode, argv[0] is the interpreter
|
2017-05-17 22:51:28 +00:00
|
|
|
* name, we must remove it and shift arguments left
|
|
|
|
* before invoking binary main. Since stack layout
|
2017-05-15 18:48:58 +00:00
|
|
|
* places environment pointers and aux vectors right
|
|
|
|
* after the terminating NULL, we must shift
|
|
|
|
* environment and aux as well.
|
|
|
|
*/
|
2017-05-17 22:51:28 +00:00
|
|
|
main_argc = argc - rtld_argc;
|
|
|
|
for (i = 0; i <= main_argc; i++)
|
|
|
|
argv[i] = argv[i + rtld_argc];
|
|
|
|
*argcp -= rtld_argc;
|
|
|
|
environ = env = envp = argv + main_argc + 1;
|
2017-05-15 18:48:58 +00:00
|
|
|
do {
|
2017-05-17 22:51:28 +00:00
|
|
|
*envp = *(envp + rtld_argc);
|
2017-05-15 18:48:58 +00:00
|
|
|
envp++;
|
|
|
|
} while (*envp != NULL);
|
|
|
|
aux = auxp = (Elf_Auxinfo *)envp;
|
2017-05-17 22:51:28 +00:00
|
|
|
auxpf = (Elf_Auxinfo *)(envp + rtld_argc);
|
2017-05-15 18:48:58 +00:00
|
|
|
for (;; auxp++, auxpf++) {
|
|
|
|
*auxp = *auxpf;
|
|
|
|
if (auxp->a_type == AT_NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rtld_printf("no binary\n");
|
|
|
|
rtld_die();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-27 23:04:12 +00:00
|
|
|
ld_bind_now = getenv(_LD("BIND_NOW"));
|
2017-03-15 21:11:57 +00:00
|
|
|
|
2007-05-17 18:00:27 +00:00
|
|
|
/*
|
|
|
|
* If the process is tainted, then we un-set the dangerous environment
|
|
|
|
* variables. The process will be marked as tainted until setuid(2)
|
|
|
|
* is called. If any child process calls setuid(2) we do not want any
|
|
|
|
* future processes to honor the potentially un-safe variables.
|
|
|
|
*/
|
|
|
|
if (!trust) {
|
2015-12-27 23:04:12 +00:00
|
|
|
if (unsetenv(_LD("PRELOAD")) || unsetenv(_LD("LIBMAP")) ||
|
|
|
|
unsetenv(_LD("LIBRARY_PATH")) || unsetenv(_LD("LIBRARY_PATH_FDS")) ||
|
2017-03-15 23:47:19 +00:00
|
|
|
unsetenv(_LD("LIBMAP_DISABLE")) || unsetenv(_LD("BIND_NOT")) ||
|
2015-12-27 23:04:12 +00:00
|
|
|
unsetenv(_LD("DEBUG")) || unsetenv(_LD("ELF_HINTS_PATH")) ||
|
|
|
|
unsetenv(_LD("LOADFLTR")) || unsetenv(_LD("LIBRARY_PATH_RPATH"))) {
|
2009-12-01 02:57:06 +00:00
|
|
|
_rtld_error("environment corrupt; aborting");
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2009-12-01 02:57:06 +00:00
|
|
|
}
|
2007-05-17 18:00:27 +00:00
|
|
|
}
|
2015-12-27 23:04:12 +00:00
|
|
|
ld_debug = getenv(_LD("DEBUG"));
|
2017-03-15 23:47:19 +00:00
|
|
|
if (ld_bind_now == NULL)
|
|
|
|
ld_bind_not = getenv(_LD("BIND_NOT")) != NULL;
|
2015-12-27 23:04:12 +00:00
|
|
|
libmap_disable = getenv(_LD("LIBMAP_DISABLE")) != NULL;
|
|
|
|
libmap_override = getenv(_LD("LIBMAP"));
|
|
|
|
ld_library_path = getenv(_LD("LIBRARY_PATH"));
|
|
|
|
ld_library_dirs = getenv(_LD("LIBRARY_PATH_FDS"));
|
|
|
|
ld_preload = getenv(_LD("PRELOAD"));
|
|
|
|
ld_elf_hints_path = getenv(_LD("ELF_HINTS_PATH"));
|
|
|
|
ld_loadfltr = getenv(_LD("LOADFLTR")) != NULL;
|
|
|
|
library_path_rpath = getenv(_LD("LIBRARY_PATH_RPATH"));
|
2012-07-15 10:53:48 +00:00
|
|
|
if (library_path_rpath != NULL) {
|
|
|
|
if (library_path_rpath[0] == 'y' ||
|
|
|
|
library_path_rpath[0] == 'Y' ||
|
|
|
|
library_path_rpath[0] == '1')
|
|
|
|
ld_library_path_rpath = true;
|
|
|
|
else
|
|
|
|
ld_library_path_rpath = false;
|
|
|
|
}
|
2007-05-17 18:00:27 +00:00
|
|
|
dangerous_ld_env = libmap_disable || (libmap_override != NULL) ||
|
2009-03-23 16:49:00 +00:00
|
|
|
(ld_library_path != NULL) || (ld_preload != NULL) ||
|
2010-12-25 08:51:20 +00:00
|
|
|
(ld_elf_hints_path != NULL) || ld_loadfltr;
|
2015-12-27 23:04:12 +00:00
|
|
|
ld_tracing = getenv(_LD("TRACE_LOADED_OBJECTS"));
|
|
|
|
ld_utrace = getenv(_LD("UTRACE"));
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2009-03-23 16:49:00 +00:00
|
|
|
if ((ld_elf_hints_path == NULL) || strlen(ld_elf_hints_path) == 0)
|
2015-12-18 05:45:49 +00:00
|
|
|
ld_elf_hints_path = ld_elf_hints_default;
|
2009-03-23 16:49:00 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
if (ld_debug != NULL && *ld_debug != '\0')
|
|
|
|
debug = 1;
|
|
|
|
dbg("%s is initialized, base address = %p", __progname,
|
|
|
|
(caddr_t) aux_info[AT_BASE]->a_un.a_ptr);
|
1999-04-09 00:28:43 +00:00
|
|
|
dbg("RTLD dynamic = %p", obj_rtld.dynamic);
|
|
|
|
dbg("RTLD pltgot = %p", obj_rtld.pltgot);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
dbg("initializing thread locks");
|
|
|
|
lockdflt_init();
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Load the main program, or process its program header if it is
|
|
|
|
* already loaded.
|
|
|
|
*/
|
2017-05-15 18:48:58 +00:00
|
|
|
if (fd != -1) { /* Load the main program. */
|
1998-03-07 19:24:35 +00:00
|
|
|
dbg("loading main program");
|
1999-08-30 01:50:41 +00:00
|
|
|
obj_main = map_object(fd, argv0, NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
close(fd);
|
|
|
|
if (obj_main == NULL)
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2011-01-08 17:11:49 +00:00
|
|
|
max_stack_flags = obj->stack_flags;
|
1998-03-07 19:24:35 +00:00
|
|
|
} else { /* Main program already loaded. */
|
|
|
|
dbg("processing main program's program header");
|
|
|
|
assert(aux_info[AT_PHDR] != NULL);
|
1998-09-04 19:03:57 +00:00
|
|
|
phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr;
|
1998-03-07 19:24:35 +00:00
|
|
|
assert(aux_info[AT_PHNUM] != NULL);
|
|
|
|
phnum = aux_info[AT_PHNUM]->a_un.a_val;
|
|
|
|
assert(aux_info[AT_PHENT] != NULL);
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr));
|
1998-03-07 19:24:35 +00:00
|
|
|
assert(aux_info[AT_ENTRY] != NULL);
|
2017-05-13 18:59:27 +00:00
|
|
|
imgentry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr;
|
|
|
|
if ((obj_main = digest_phdr(phdr, phnum, imgentry, argv0)) == NULL)
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 18:48:58 +00:00
|
|
|
if (aux_info[AT_EXECPATH] != NULL && fd == -1) {
|
2009-03-18 13:40:37 +00:00
|
|
|
kexecpath = aux_info[AT_EXECPATH]->a_un.a_ptr;
|
|
|
|
dbg("AT_EXECPATH %p %s", kexecpath, kexecpath);
|
|
|
|
if (kexecpath[0] == '/')
|
|
|
|
obj_main->path = kexecpath;
|
|
|
|
else if (getcwd(buf, sizeof(buf)) == NULL ||
|
|
|
|
strlcat(buf, "/", sizeof(buf)) >= sizeof(buf) ||
|
|
|
|
strlcat(buf, kexecpath, sizeof(buf)) >= sizeof(buf))
|
|
|
|
obj_main->path = xstrdup(argv0);
|
|
|
|
else
|
|
|
|
obj_main->path = xstrdup(buf);
|
|
|
|
} else {
|
2017-05-15 18:48:58 +00:00
|
|
|
dbg("No AT_EXECPATH or direct exec");
|
2009-03-18 13:40:37 +00:00
|
|
|
obj_main->path = xstrdup(argv0);
|
|
|
|
}
|
|
|
|
dbg("obj_main path %s", obj_main->path);
|
1998-03-07 19:24:35 +00:00
|
|
|
obj_main->mainprog = true;
|
1999-08-30 01:54:13 +00:00
|
|
|
|
2011-01-08 17:11:49 +00:00
|
|
|
if (aux_info[AT_STACKPROT] != NULL &&
|
|
|
|
aux_info[AT_STACKPROT]->a_un.a_val != 0)
|
|
|
|
stack_prot = aux_info[AT_STACKPROT]->a_un.a_val;
|
|
|
|
|
2014-12-02 16:22:08 +00:00
|
|
|
#ifndef COMPAT_32BIT
|
1999-08-30 01:54:13 +00:00
|
|
|
/*
|
|
|
|
* Get the actual dynamic linker pathname from the executable if
|
|
|
|
* possible. (It should always be possible.) That ensures that
|
|
|
|
* gdb will find the right dynamic linker even if a non-standard
|
|
|
|
* one is being used.
|
|
|
|
*/
|
|
|
|
if (obj_main->interp != NULL &&
|
|
|
|
strcmp(obj_main->interp, obj_rtld.path) != 0) {
|
|
|
|
free(obj_rtld.path);
|
|
|
|
obj_rtld.path = xstrdup(obj_main->interp);
|
2004-03-29 18:37:37 +00:00
|
|
|
__progname = obj_rtld.path;
|
1999-08-30 01:54:13 +00:00
|
|
|
}
|
2014-12-02 16:22:08 +00:00
|
|
|
#endif
|
1999-08-30 01:54:13 +00:00
|
|
|
|
2002-04-02 02:19:02 +00:00
|
|
|
digest_dynamic(obj_main, 0);
|
2012-04-30 13:31:10 +00:00
|
|
|
dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d",
|
|
|
|
obj_main->path, obj_main->valid_hash_sysv, obj_main->valid_hash_gnu,
|
|
|
|
obj_main->dynsymcount);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
linkmap_add(obj_main);
|
|
|
|
linkmap_add(&obj_rtld);
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/* Link the main program into the list of objects. */
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_INSERT_HEAD(&obj_list, obj_main, next);
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
obj_count++;
|
2007-04-03 18:31:20 +00:00
|
|
|
obj_loads++;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-04-05 02:36:40 +00:00
|
|
|
/* Initialize a fake symbol for resolving undefined weak references. */
|
|
|
|
sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE);
|
2002-04-27 05:32:51 +00:00
|
|
|
sym_zero.st_shndx = SHN_UNDEF;
|
2009-10-10 15:27:10 +00:00
|
|
|
sym_zero.st_value = -(uintptr_t)obj_main->relocbase;
|
1999-04-05 02:36:40 +00:00
|
|
|
|
2003-05-31 14:45:11 +00:00
|
|
|
if (!libmap_disable)
|
2005-02-04 02:46:41 +00:00
|
|
|
libmap_disable = (bool)lm_init(libmap_override);
|
2003-04-07 16:21:26 +00:00
|
|
|
|
1998-09-22 02:09:56 +00:00
|
|
|
dbg("loading LD_PRELOAD libraries");
|
|
|
|
if (load_preload_objects() == -1)
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2016-01-20 07:21:33 +00:00
|
|
|
preload_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
|
1998-09-22 02:09:56 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
dbg("loading needed objects");
|
2009-11-26 13:57:20 +00:00
|
|
|
if (load_needed_objects(obj_main, 0) == -1)
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
1999-08-30 01:48:19 +00:00
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make a list of all objects loaded at startup. */
|
2013-10-07 08:19:30 +00:00
|
|
|
last_interposer = obj_main;
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_FOREACH(obj, &obj_list, next) {
|
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
2013-10-07 08:19:30 +00:00
|
|
|
if (obj->z_interpose && obj != obj_main) {
|
|
|
|
objlist_put_after(&list_main, last_interposer, obj);
|
|
|
|
last_interposer = obj;
|
|
|
|
} else {
|
|
|
|
objlist_push_tail(&list_main, obj);
|
|
|
|
}
|
2003-05-08 01:31:36 +00:00
|
|
|
obj->refcount++;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
dbg("checking for required versions");
|
|
|
|
if (rtld_verify_versions(&list_main) == -1 && !ld_tracing)
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2005-12-18 19:43:33 +00:00
|
|
|
|
1998-05-01 08:39:27 +00:00
|
|
|
if (ld_tracing) { /* We're done */
|
|
|
|
trace_loaded_objects(obj_main);
|
|
|
|
exit(0);
|
|
|
|
}
|
|
|
|
|
2015-12-27 23:04:12 +00:00
|
|
|
if (getenv(_LD("DUMP_REL_PRE")) != NULL) {
|
2003-06-19 03:55:38 +00:00
|
|
|
dump_relocations(obj_main);
|
|
|
|
exit (0);
|
|
|
|
}
|
|
|
|
|
2011-10-08 12:39:47 +00:00
|
|
|
/*
|
|
|
|
* Processing tls relocations requires having the tls offsets
|
|
|
|
* initialized. Prepare offsets before starting initial
|
|
|
|
* relocation processing.
|
|
|
|
*/
|
|
|
|
dbg("initializing initial thread local storage offsets");
|
2004-11-02 09:42:21 +00:00
|
|
|
STAILQ_FOREACH(entry, &list_main, link) {
|
|
|
|
/*
|
|
|
|
* Allocate all the initial objects out of the static TLS
|
|
|
|
* block even if they didn't ask for it.
|
|
|
|
*/
|
|
|
|
allocate_tls_offset(entry->obj);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
if (relocate_objects(obj_main,
|
2012-03-20 13:20:49 +00:00
|
|
|
ld_bind_now != NULL && *ld_bind_now != '\0',
|
|
|
|
&obj_rtld, SYMLOOK_EARLY, NULL) == -1)
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
dbg("doing copy relocations");
|
|
|
|
if (do_copy_relocations(obj_main) == -1)
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2017-01-12 15:54:03 +00:00
|
|
|
dbg("enforcing main obj relro");
|
|
|
|
if (obj_enforce_relro(obj_main) == -1)
|
|
|
|
rtld_die();
|
|
|
|
|
2015-12-27 23:04:12 +00:00
|
|
|
if (getenv(_LD("DUMP_REL_POST")) != NULL) {
|
2003-06-19 03:55:38 +00:00
|
|
|
dump_relocations(obj_main);
|
|
|
|
exit (0);
|
|
|
|
}
|
|
|
|
|
2011-10-08 12:39:47 +00:00
|
|
|
/*
|
|
|
|
* Setup TLS for main thread. This must be done after the
|
|
|
|
* relocations are processed, since tls initialization section
|
|
|
|
* might be the subject for relocations.
|
|
|
|
*/
|
|
|
|
dbg("initializing initial thread local storage");
|
2016-01-20 07:21:33 +00:00
|
|
|
allocate_initial_tls(globallist_curr(TAILQ_FIRST(&obj_list)));
|
2011-10-08 12:39:47 +00:00
|
|
|
|
1999-04-21 04:06:57 +00:00
|
|
|
dbg("initializing key program variables");
|
|
|
|
set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : "");
|
|
|
|
set_program_var("environ", env);
|
2010-08-17 09:08:28 +00:00
|
|
|
set_program_var("__elf_aux_vector", aux);
|
1999-04-21 04:06:57 +00:00
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make a list of init functions to call. */
|
|
|
|
objlist_init(&initlist);
|
2016-01-20 07:21:33 +00:00
|
|
|
initlist_add_objects(globallist_curr(TAILQ_FIRST(&obj_list)),
|
|
|
|
preload_tail, &initlist);
|
2000-07-26 04:24:40 +00:00
|
|
|
|
2000-08-26 05:13:29 +00:00
|
|
|
r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */
|
1999-07-03 23:54:02 +00:00
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
map_stacks_exec(NULL);
|
2016-11-15 09:43:26 +00:00
|
|
|
ifunc_init(aux);
|
2011-01-08 17:11:49 +00:00
|
|
|
|
2012-01-04 17:17:11 +00:00
|
|
|
dbg("resolving ifuncs");
|
|
|
|
if (resolve_objects_ifunc(obj_main,
|
2012-03-20 13:20:49 +00:00
|
|
|
ld_bind_now != NULL && *ld_bind_now != '\0', SYMLOOK_EARLY,
|
|
|
|
NULL) == -1)
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2012-01-04 17:17:11 +00:00
|
|
|
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
if (!obj_main->crt_no_init) {
|
|
|
|
/*
|
|
|
|
* Make sure we don't call the main program's init and fini
|
|
|
|
* functions for binaries linked with old crt1 which calls
|
|
|
|
* _init itself.
|
|
|
|
*/
|
|
|
|
obj_main->init = obj_main->fini = (Elf_Addr)NULL;
|
|
|
|
obj_main->preinit_array = obj_main->init_array =
|
|
|
|
obj_main->fini_array = (Elf_Addr)NULL;
|
|
|
|
}
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
if (obj_main->crt_no_init)
|
|
|
|
preinit_main();
|
2008-09-03 01:05:32 +00:00
|
|
|
objlist_call_init(&initlist, &lockstate);
|
2014-05-06 18:07:58 +00:00
|
|
|
_r_debug_postinit(&obj_main->linkmap);
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_clear(&initlist);
|
2010-12-25 08:51:20 +00:00
|
|
|
dbg("loading filtees");
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_FOREACH(obj, &obj_list, next) {
|
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
2010-12-25 08:51:20 +00:00
|
|
|
if (ld_loadfltr || obj->z_loadfltr)
|
|
|
|
load_filtees(obj, 0, &lockstate);
|
|
|
|
}
|
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
dbg("transferring control to program entry point = %p", obj_main->entry);
|
|
|
|
|
|
|
|
/* Return the exit procedure and the program entry point. */
|
1998-09-04 19:03:57 +00:00
|
|
|
*exit_proc = rtld_exit;
|
|
|
|
*objp = obj_main;
|
1998-03-07 19:24:35 +00:00
|
|
|
return (func_ptr_type) obj_main->entry;
|
|
|
|
}
|
|
|
|
|
2011-12-12 11:03:14 +00:00
|
|
|
void *
|
|
|
|
rtld_resolve_ifunc(const Obj_Entry *obj, const Elf_Sym *def)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
Elf_Addr target;
|
|
|
|
|
|
|
|
ptr = (void *)make_function_pointer(def, obj);
|
2016-11-15 09:43:26 +00:00
|
|
|
target = call_ifunc_resolver(ptr);
|
2011-12-12 11:03:14 +00:00
|
|
|
return ((void *)target);
|
|
|
|
}
|
|
|
|
|
2016-11-23 20:21:53 +00:00
|
|
|
/*
|
|
|
|
* NB: MIPS uses a private version of this function (_mips_rtld_bind).
|
|
|
|
* Changes to this function should be applied there as well.
|
|
|
|
*/
|
1999-06-25 02:53:59 +00:00
|
|
|
Elf_Addr
|
2005-12-18 04:52:37 +00:00
|
|
|
_rtld_bind(Obj_Entry *obj, Elf_Size reloff)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Rel *rel;
|
|
|
|
const Elf_Sym *def;
|
1998-03-07 19:24:35 +00:00
|
|
|
const Obj_Entry *defobj;
|
1998-09-04 19:03:57 +00:00
|
|
|
Elf_Addr *where;
|
1999-06-25 02:53:59 +00:00
|
|
|
Elf_Addr target;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2011-02-09 09:20:27 +00:00
|
|
|
if (sigsetjmp(lockstate.env, 0) != 0)
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_upgrade(rtld_bind_lock, &lockstate);
|
1998-09-04 19:03:57 +00:00
|
|
|
if (obj->pltrel)
|
|
|
|
rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff);
|
|
|
|
else
|
|
|
|
rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
where = (Elf_Addr *) (obj->relocbase + rel->r_offset);
|
2016-11-08 22:41:11 +00:00
|
|
|
def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, SYMLOOK_IN_PLT,
|
|
|
|
NULL, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
if (def == NULL)
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2011-12-12 11:03:14 +00:00
|
|
|
if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
|
|
|
|
target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
|
|
|
|
else
|
|
|
|
target = (Elf_Addr)(defobj->relocbase + def->st_value);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
dbg("\"%s\" in \"%s\" ==> %p in \"%s\"",
|
|
|
|
defobj->strtab + def->st_name, basename(obj->path),
|
1999-06-25 02:53:59 +00:00
|
|
|
(void *)target, basename(defobj->path));
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2001-10-15 18:48:42 +00:00
|
|
|
/*
|
|
|
|
* Write the new contents for the jmpslot. Note that depending on
|
|
|
|
* architecture, the value which we need to return back to the
|
|
|
|
* lazy binding trampoline may or may not be the target
|
|
|
|
* address. The value returned from reloc_jmpslot() is the value
|
|
|
|
* that the trampoline needs.
|
|
|
|
*/
|
2002-11-18 22:08:50 +00:00
|
|
|
target = reloc_jmpslot(where, target, defobj, obj, rel);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
return target;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Error reporting function. Use it like printf. If formats the message
|
|
|
|
* into a buffer, and sets things up so that the next call to dlerror()
|
|
|
|
* will return the message.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_rtld_error(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
static char buf[512];
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_vsnprintf(buf, sizeof buf, fmt, ap);
|
1998-03-07 19:24:35 +00:00
|
|
|
error_message = buf;
|
|
|
|
va_end(ap);
|
2017-01-30 08:38:32 +00:00
|
|
|
LD_UTRACE(UTRACE_RTLD_ERROR, NULL, NULL, 0, 0, error_message);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
/*
|
|
|
|
* Return a dynamically-allocated copy of the current error message, if any.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
errmsg_save(void)
|
|
|
|
{
|
|
|
|
return error_message == NULL ? NULL : xstrdup(error_message);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore the current error message from a copy which was previously saved
|
|
|
|
* by errmsg_save(). The copy is freed.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
errmsg_restore(char *saved_msg)
|
|
|
|
{
|
|
|
|
if (saved_msg == NULL)
|
|
|
|
error_message = NULL;
|
|
|
|
else {
|
|
|
|
_rtld_error("%s", saved_msg);
|
|
|
|
free(saved_msg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
static const char *
|
|
|
|
basename(const char *name)
|
|
|
|
{
|
|
|
|
const char *p = strrchr(name, '/');
|
|
|
|
return p != NULL ? p + 1 : name;
|
|
|
|
}
|
|
|
|
|
2009-03-18 13:40:37 +00:00
|
|
|
static struct utsname uts;
|
|
|
|
|
2013-04-15 20:06:56 +00:00
|
|
|
static char *
|
2015-04-27 18:41:31 +00:00
|
|
|
origin_subst_one(Obj_Entry *obj, char *real, const char *kw,
|
|
|
|
const char *subst, bool may_free)
|
2009-03-18 13:40:37 +00:00
|
|
|
{
|
2013-04-15 20:06:56 +00:00
|
|
|
char *p, *p1, *res, *resp;
|
|
|
|
int subst_len, kw_len, subst_count, old_len, new_len;
|
|
|
|
|
|
|
|
kw_len = strlen(kw);
|
|
|
|
|
|
|
|
/*
|
2016-05-01 19:39:23 +00:00
|
|
|
* First, count the number of the keyword occurrences, to
|
2013-04-15 20:06:56 +00:00
|
|
|
* preallocate the final string.
|
|
|
|
*/
|
|
|
|
for (p = real, subst_count = 0;; p = p1 + kw_len, subst_count++) {
|
|
|
|
p1 = strstr(p, kw);
|
|
|
|
if (p1 == NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the keyword is not found, just return.
|
2015-04-27 18:41:31 +00:00
|
|
|
*
|
|
|
|
* Return non-substituted string if resolution failed. We
|
|
|
|
* cannot do anything more reasonable, the failure mode of the
|
|
|
|
* caller is unresolved library anyway.
|
2013-04-15 20:06:56 +00:00
|
|
|
*/
|
2015-04-27 18:41:31 +00:00
|
|
|
if (subst_count == 0 || (obj != NULL && !obj_resolve_origin(obj)))
|
2013-04-15 20:06:56 +00:00
|
|
|
return (may_free ? real : xstrdup(real));
|
2015-04-27 18:41:31 +00:00
|
|
|
if (obj != NULL)
|
|
|
|
subst = obj->origin_path;
|
2013-04-15 20:06:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There is indeed something to substitute. Calculate the
|
|
|
|
* length of the resulting string, and allocate it.
|
|
|
|
*/
|
|
|
|
subst_len = strlen(subst);
|
|
|
|
old_len = strlen(real);
|
|
|
|
new_len = old_len + (subst_len - kw_len) * subst_count;
|
|
|
|
res = xmalloc(new_len + 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now, execute the substitution loop.
|
|
|
|
*/
|
2013-04-29 21:12:25 +00:00
|
|
|
for (p = real, resp = res, *resp = '\0';;) {
|
2013-04-15 20:06:56 +00:00
|
|
|
p1 = strstr(p, kw);
|
|
|
|
if (p1 != NULL) {
|
|
|
|
/* Copy the prefix before keyword. */
|
|
|
|
memcpy(resp, p, p1 - p);
|
|
|
|
resp += p1 - p;
|
|
|
|
/* Keyword replacement. */
|
|
|
|
memcpy(resp, subst, subst_len);
|
|
|
|
resp += subst_len;
|
2013-04-29 21:12:25 +00:00
|
|
|
*resp = '\0';
|
2013-04-15 20:06:56 +00:00
|
|
|
p = p1 + kw_len;
|
|
|
|
} else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Copy to the end of string and finish. */
|
|
|
|
strcat(resp, p);
|
|
|
|
if (may_free)
|
|
|
|
free(real);
|
|
|
|
return (res);
|
2009-03-18 13:40:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static char *
|
2015-04-27 18:41:31 +00:00
|
|
|
origin_subst(Obj_Entry *obj, char *real)
|
2009-03-18 13:40:37 +00:00
|
|
|
{
|
2013-04-15 20:06:56 +00:00
|
|
|
char *res1, *res2, *res3, *res4;
|
2009-03-18 13:40:37 +00:00
|
|
|
|
2015-04-27 18:41:31 +00:00
|
|
|
if (obj == NULL || !trust)
|
|
|
|
return (xstrdup(real));
|
2013-04-15 20:06:56 +00:00
|
|
|
if (uts.sysname[0] == '\0') {
|
|
|
|
if (uname(&uts) != 0) {
|
|
|
|
_rtld_error("utsname failed: %d", errno);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2009-03-18 13:40:37 +00:00
|
|
|
}
|
2015-04-27 18:41:31 +00:00
|
|
|
res1 = origin_subst_one(obj, real, "$ORIGIN", NULL, false);
|
|
|
|
res2 = origin_subst_one(NULL, res1, "$OSNAME", uts.sysname, true);
|
|
|
|
res3 = origin_subst_one(NULL, res2, "$OSREL", uts.release, true);
|
|
|
|
res4 = origin_subst_one(NULL, res3, "$PLATFORM", uts.machine, true);
|
2013-04-15 20:06:56 +00:00
|
|
|
return (res4);
|
2009-03-18 13:40:37 +00:00
|
|
|
}
|
|
|
|
|
2015-04-02 21:35:36 +00:00
|
|
|
void
|
|
|
|
rtld_die(void)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
|
|
|
const char *msg = dlerror();
|
|
|
|
|
|
|
|
if (msg == NULL)
|
|
|
|
msg = "Fatal error";
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_fdputstr(STDERR_FILENO, msg);
|
2012-02-13 11:15:29 +00:00
|
|
|
rtld_fdputchar(STDERR_FILENO, '\n');
|
2011-08-24 20:05:13 +00:00
|
|
|
_exit(1);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process a shared object's DYNAMIC section, and save the important
|
|
|
|
* information in its Obj_Entry structure.
|
|
|
|
*/
|
|
|
|
static void
|
2010-08-17 09:05:39 +00:00
|
|
|
digest_dynamic1(Obj_Entry *obj, int early, const Elf_Dyn **dyn_rpath,
|
2012-07-15 10:53:48 +00:00
|
|
|
const Elf_Dyn **dyn_soname, const Elf_Dyn **dyn_runpath)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Dyn *dynp;
|
1998-03-07 19:24:35 +00:00
|
|
|
Needed_Entry **needed_tail = &obj->needed;
|
2010-12-25 08:51:20 +00:00
|
|
|
Needed_Entry **needed_filtees_tail = &obj->needed_filtees;
|
|
|
|
Needed_Entry **needed_aux_filtees_tail = &obj->needed_aux_filtees;
|
2012-04-30 13:31:10 +00:00
|
|
|
const Elf_Hashelt *hashtab;
|
|
|
|
const Elf32_Word *hashval;
|
|
|
|
Elf32_Word bkt, nmaskwords;
|
|
|
|
int bloom_size32;
|
1998-09-04 19:03:57 +00:00
|
|
|
int plttype = DT_REL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
*dyn_rpath = NULL;
|
|
|
|
*dyn_soname = NULL;
|
2012-07-15 10:53:48 +00:00
|
|
|
*dyn_runpath = NULL;
|
2010-08-17 09:05:39 +00:00
|
|
|
|
2003-12-31 15:10:41 +00:00
|
|
|
obj->bind_now = false;
|
1998-03-07 19:24:35 +00:00
|
|
|
for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) {
|
|
|
|
switch (dynp->d_tag) {
|
|
|
|
|
|
|
|
case DT_REL:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RELSZ:
|
|
|
|
obj->relsize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RELENT:
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(dynp->d_un.d_val == sizeof(Elf_Rel));
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_JMPREL:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->pltrel = (const Elf_Rel *)
|
1998-03-07 19:24:35 +00:00
|
|
|
(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_PLTRELSZ:
|
|
|
|
obj->pltrelsize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RELA:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_RELASZ:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->relasize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_RELAENT:
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(dynp->d_un.d_val == sizeof(Elf_Rela));
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_PLTREL:
|
1998-09-04 19:03:57 +00:00
|
|
|
plttype = dynp->d_un.d_val;
|
|
|
|
assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SYMTAB:
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->symtab = (const Elf_Sym *)
|
1998-03-07 19:24:35 +00:00
|
|
|
(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SYMENT:
|
1998-09-04 19:03:57 +00:00
|
|
|
assert(dynp->d_un.d_val == sizeof(Elf_Sym));
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_STRTAB:
|
|
|
|
obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_STRSZ:
|
|
|
|
obj->strsize = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
case DT_VERNEED:
|
|
|
|
obj->verneed = (const Elf_Verneed *) (obj->relocbase +
|
|
|
|
dynp->d_un.d_val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_VERNEEDNUM:
|
|
|
|
obj->verneednum = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_VERDEF:
|
|
|
|
obj->verdef = (const Elf_Verdef *) (obj->relocbase +
|
|
|
|
dynp->d_un.d_val);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_VERDEFNUM:
|
|
|
|
obj->verdefnum = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_VERSYM:
|
|
|
|
obj->versyms = (const Elf_Versym *)(obj->relocbase +
|
|
|
|
dynp->d_un.d_val);
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_HASH:
|
|
|
|
{
|
2012-04-30 13:31:10 +00:00
|
|
|
hashtab = (const Elf_Hashelt *)(obj->relocbase +
|
|
|
|
dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
obj->nbuckets = hashtab[0];
|
|
|
|
obj->nchains = hashtab[1];
|
|
|
|
obj->buckets = hashtab + 2;
|
|
|
|
obj->chains = obj->buckets + obj->nbuckets;
|
2012-04-30 13:31:10 +00:00
|
|
|
obj->valid_hash_sysv = obj->nbuckets > 0 && obj->nchains > 0 &&
|
|
|
|
obj->buckets != NULL;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_GNU_HASH:
|
|
|
|
{
|
|
|
|
hashtab = (const Elf_Hashelt *)(obj->relocbase +
|
|
|
|
dynp->d_un.d_ptr);
|
|
|
|
obj->nbuckets_gnu = hashtab[0];
|
|
|
|
obj->symndx_gnu = hashtab[1];
|
|
|
|
nmaskwords = hashtab[2];
|
|
|
|
bloom_size32 = (__ELF_WORD_SIZE / 32) * nmaskwords;
|
|
|
|
obj->maskwords_bm_gnu = nmaskwords - 1;
|
|
|
|
obj->shift2_gnu = hashtab[3];
|
|
|
|
obj->bloom_gnu = (Elf_Addr *) (hashtab + 4);
|
|
|
|
obj->buckets_gnu = hashtab + 4 + bloom_size32;
|
|
|
|
obj->chain_zero_gnu = obj->buckets_gnu + obj->nbuckets_gnu -
|
|
|
|
obj->symndx_gnu;
|
2015-01-30 15:32:35 +00:00
|
|
|
/* Number of bitmask words is required to be power of 2 */
|
|
|
|
obj->valid_hash_gnu = powerof2(nmaskwords) &&
|
|
|
|
obj->nbuckets_gnu > 0 && obj->buckets_gnu != NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_NEEDED:
|
1999-04-09 00:28:43 +00:00
|
|
|
if (!obj->rtld) {
|
1998-03-07 19:24:35 +00:00
|
|
|
Needed_Entry *nep = NEW(Needed_Entry);
|
|
|
|
nep->name = dynp->d_un.d_val;
|
|
|
|
nep->obj = NULL;
|
|
|
|
nep->next = NULL;
|
|
|
|
|
|
|
|
*needed_tail = nep;
|
|
|
|
needed_tail = &nep->next;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
case DT_FILTER:
|
|
|
|
if (!obj->rtld) {
|
|
|
|
Needed_Entry *nep = NEW(Needed_Entry);
|
|
|
|
nep->name = dynp->d_un.d_val;
|
|
|
|
nep->obj = NULL;
|
|
|
|
nep->next = NULL;
|
|
|
|
|
|
|
|
*needed_filtees_tail = nep;
|
|
|
|
needed_filtees_tail = &nep->next;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_AUXILIARY:
|
|
|
|
if (!obj->rtld) {
|
|
|
|
Needed_Entry *nep = NEW(Needed_Entry);
|
|
|
|
nep->name = dynp->d_un.d_val;
|
|
|
|
nep->obj = NULL;
|
|
|
|
nep->next = NULL;
|
|
|
|
|
|
|
|
*needed_aux_filtees_tail = nep;
|
|
|
|
needed_aux_filtees_tail = &nep->next;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_PLTGOT:
|
1999-04-09 00:28:43 +00:00
|
|
|
obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_TEXTREL:
|
|
|
|
obj->textrel = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SYMBOLIC:
|
|
|
|
obj->symbolic = true;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_RPATH:
|
|
|
|
/*
|
|
|
|
* We have to wait until later to process this, because we
|
|
|
|
* might not have gotten the address of the string table yet.
|
|
|
|
*/
|
2010-08-17 09:05:39 +00:00
|
|
|
*dyn_rpath = dynp;
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_SONAME:
|
2010-08-17 09:05:39 +00:00
|
|
|
*dyn_soname = dynp;
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
case DT_RUNPATH:
|
|
|
|
*dyn_runpath = dynp;
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_INIT:
|
2001-10-29 10:10:10 +00:00
|
|
|
obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
case DT_PREINIT_ARRAY:
|
|
|
|
obj->preinit_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_PREINIT_ARRAYSZ:
|
|
|
|
obj->preinit_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_INIT_ARRAY:
|
|
|
|
obj->init_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_INIT_ARRAYSZ:
|
|
|
|
obj->init_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
|
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_FINI:
|
2001-10-29 10:10:10 +00:00
|
|
|
obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
|
|
|
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
case DT_FINI_ARRAY:
|
|
|
|
obj->fini_array = (Elf_Addr)(obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_FINI_ARRAYSZ:
|
|
|
|
obj->fini_array_num = dynp->d_un.d_val / sizeof(Elf_Addr);
|
|
|
|
break;
|
|
|
|
|
2008-04-04 20:59:26 +00:00
|
|
|
/*
|
|
|
|
* Don't process DT_DEBUG on MIPS as the dynamic section
|
|
|
|
* is mapped read-only. DT_MIPS_RLD_MAP is used instead.
|
|
|
|
*/
|
|
|
|
|
2016-01-08 00:56:41 +00:00
|
|
|
#ifndef __mips__
|
1998-03-07 19:24:35 +00:00
|
|
|
case DT_DEBUG:
|
2002-04-02 02:19:02 +00:00
|
|
|
if (!early)
|
|
|
|
dbg("Filling in DT_DEBUG entry");
|
1998-09-04 19:03:57 +00:00
|
|
|
((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug;
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
2016-01-08 00:56:41 +00:00
|
|
|
#endif
|
1998-09-04 19:03:57 +00:00
|
|
|
|
2003-06-18 03:34:29 +00:00
|
|
|
case DT_FLAGS:
|
2015-04-27 18:41:31 +00:00
|
|
|
if (dynp->d_un.d_val & DF_ORIGIN)
|
2009-03-18 13:40:37 +00:00
|
|
|
obj->z_origin = true;
|
2003-06-18 03:34:29 +00:00
|
|
|
if (dynp->d_un.d_val & DF_SYMBOLIC)
|
|
|
|
obj->symbolic = true;
|
|
|
|
if (dynp->d_un.d_val & DF_TEXTREL)
|
|
|
|
obj->textrel = true;
|
|
|
|
if (dynp->d_un.d_val & DF_BIND_NOW)
|
|
|
|
obj->bind_now = true;
|
2011-06-18 13:56:33 +00:00
|
|
|
/*if (dynp->d_un.d_val & DF_STATIC_TLS)
|
|
|
|
;*/
|
2003-06-18 03:34:29 +00:00
|
|
|
break;
|
2008-04-04 20:59:26 +00:00
|
|
|
#ifdef __mips__
|
|
|
|
case DT_MIPS_LOCAL_GOTNO:
|
|
|
|
obj->local_gotno = dynp->d_un.d_val;
|
2015-12-03 00:10:57 +00:00
|
|
|
break;
|
2008-04-04 20:59:26 +00:00
|
|
|
|
|
|
|
case DT_MIPS_SYMTABNO:
|
|
|
|
obj->symtabno = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_MIPS_GOTSYM:
|
|
|
|
obj->gotsym = dynp->d_un.d_val;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DT_MIPS_RLD_MAP:
|
2013-10-02 02:32:58 +00:00
|
|
|
*((Elf_Addr *)(dynp->d_un.d_ptr)) = (Elf_Addr) &r_debug;
|
2008-04-04 20:59:26 +00:00
|
|
|
break;
|
|
|
|
#endif
|
2003-06-18 03:34:29 +00:00
|
|
|
|
2015-12-03 00:10:57 +00:00
|
|
|
#ifdef __powerpc64__
|
|
|
|
case DT_PPC64_GLINK:
|
|
|
|
obj->glink = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
|
2009-03-18 13:40:37 +00:00
|
|
|
case DT_FLAGS_1:
|
2009-11-26 13:57:20 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_NOOPEN)
|
|
|
|
obj->z_noopen = true;
|
2015-04-27 18:41:31 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_ORIGIN)
|
2009-03-18 13:40:37 +00:00
|
|
|
obj->z_origin = true;
|
2015-04-15 08:16:34 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_GLOBAL)
|
|
|
|
obj->z_global = true;
|
2009-03-18 13:40:37 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_BIND_NOW)
|
|
|
|
obj->bind_now = true;
|
2009-03-30 08:47:28 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_NODELETE)
|
|
|
|
obj->z_nodelete = true;
|
2010-12-25 08:51:20 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_LOADFLTR)
|
|
|
|
obj->z_loadfltr = true;
|
2013-10-07 08:19:30 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_INTERPOSE)
|
|
|
|
obj->z_interpose = true;
|
2012-07-15 10:53:48 +00:00
|
|
|
if (dynp->d_un.d_val & DF_1_NODEFLIB)
|
|
|
|
obj->z_nodeflib = true;
|
2009-03-18 13:40:37 +00:00
|
|
|
break;
|
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
default:
|
2002-04-02 02:19:02 +00:00
|
|
|
if (!early) {
|
|
|
|
dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag,
|
|
|
|
(long)dynp->d_tag);
|
|
|
|
}
|
1999-09-04 20:14:48 +00:00
|
|
|
break;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-09-04 19:03:57 +00:00
|
|
|
obj->traced = false;
|
|
|
|
|
|
|
|
if (plttype == DT_RELA) {
|
|
|
|
obj->pltrela = (const Elf_Rela *) obj->pltrel;
|
|
|
|
obj->pltrel = NULL;
|
|
|
|
obj->pltrelasize = obj->pltrelsize;
|
|
|
|
obj->pltrelsize = 0;
|
|
|
|
}
|
2012-04-30 13:31:10 +00:00
|
|
|
|
|
|
|
/* Determine size of dynsym table (equal to nchains of sysv hash) */
|
|
|
|
if (obj->valid_hash_sysv)
|
|
|
|
obj->dynsymcount = obj->nchains;
|
|
|
|
else if (obj->valid_hash_gnu) {
|
|
|
|
obj->dynsymcount = 0;
|
|
|
|
for (bkt = 0; bkt < obj->nbuckets_gnu; bkt++) {
|
|
|
|
if (obj->buckets_gnu[bkt] == 0)
|
|
|
|
continue;
|
|
|
|
hashval = &obj->chain_zero_gnu[obj->buckets_gnu[bkt]];
|
|
|
|
do
|
|
|
|
obj->dynsymcount++;
|
|
|
|
while ((*hashval++ & 1u) == 0);
|
|
|
|
}
|
|
|
|
obj->dynsymcount += obj->symndx_gnu;
|
|
|
|
}
|
2010-08-17 09:05:39 +00:00
|
|
|
}
|
|
|
|
|
2015-04-27 18:41:31 +00:00
|
|
|
static bool
|
|
|
|
obj_resolve_origin(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (obj->origin_path != NULL)
|
|
|
|
return (true);
|
|
|
|
obj->origin_path = xmalloc(PATH_MAX);
|
|
|
|
return (rtld_dirname_abs(obj->path, obj->origin_path) != -1);
|
|
|
|
}
|
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
static void
|
|
|
|
digest_dynamic2(Obj_Entry *obj, const Elf_Dyn *dyn_rpath,
|
2012-07-15 10:53:48 +00:00
|
|
|
const Elf_Dyn *dyn_soname, const Elf_Dyn *dyn_runpath)
|
2010-08-17 09:05:39 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
|
2015-04-27 18:41:31 +00:00
|
|
|
if (obj->z_origin && !obj_resolve_origin(obj))
|
|
|
|
rtld_die();
|
2005-12-18 19:43:33 +00:00
|
|
|
|
2015-04-27 18:41:31 +00:00
|
|
|
if (dyn_runpath != NULL) {
|
|
|
|
obj->runpath = (char *)obj->strtab + dyn_runpath->d_un.d_val;
|
|
|
|
obj->runpath = origin_subst(obj, obj->runpath);
|
|
|
|
} else if (dyn_rpath != NULL) {
|
|
|
|
obj->rpath = (char *)obj->strtab + dyn_rpath->d_un.d_val;
|
|
|
|
obj->rpath = origin_subst(obj, obj->rpath);
|
|
|
|
}
|
|
|
|
if (dyn_soname != NULL)
|
|
|
|
object_add_name(obj, obj->strtab + dyn_soname->d_un.d_val);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
static void
|
|
|
|
digest_dynamic(Obj_Entry *obj, int early)
|
|
|
|
{
|
|
|
|
const Elf_Dyn *dyn_rpath;
|
|
|
|
const Elf_Dyn *dyn_soname;
|
2012-07-15 10:53:48 +00:00
|
|
|
const Elf_Dyn *dyn_runpath;
|
2010-08-17 09:05:39 +00:00
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
digest_dynamic1(obj, early, &dyn_rpath, &dyn_soname, &dyn_runpath);
|
|
|
|
digest_dynamic2(obj, dyn_rpath, dyn_soname, dyn_runpath);
|
2010-08-17 09:05:39 +00:00
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Process a shared object's program header. This is used only for the
|
|
|
|
* main program, when the kernel has already loaded the main program
|
|
|
|
* into memory before calling the dynamic linker. It creates and
|
|
|
|
* returns an Obj_Entry structure.
|
|
|
|
*/
|
|
|
|
static Obj_Entry *
|
1999-07-18 00:02:19 +00:00
|
|
|
digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1999-08-30 01:48:19 +00:00
|
|
|
Obj_Entry *obj;
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Phdr *phlimit = phdr + phnum;
|
|
|
|
const Elf_Phdr *ph;
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
Elf_Addr note_start, note_end;
|
1998-03-07 19:24:35 +00:00
|
|
|
int nsegs = 0;
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
obj = obj_new();
|
1998-03-07 19:24:35 +00:00
|
|
|
for (ph = phdr; ph < phlimit; ph++) {
|
2009-10-10 15:27:10 +00:00
|
|
|
if (ph->p_type != PT_PHDR)
|
|
|
|
continue;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->phdr = phdr;
|
|
|
|
obj->phsize = ph->p_memsz;
|
|
|
|
obj->relocbase = (caddr_t)phdr - ph->p_vaddr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-01-08 17:11:49 +00:00
|
|
|
obj->stack_flags = PF_X | PF_R | PF_W;
|
|
|
|
|
2009-10-10 15:27:10 +00:00
|
|
|
for (ph = phdr; ph < phlimit; ph++) {
|
|
|
|
switch (ph->p_type) {
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-08-30 01:54:13 +00:00
|
|
|
case PT_INTERP:
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->interp = (const char *)(ph->p_vaddr + obj->relocbase);
|
1999-08-30 01:54:13 +00:00
|
|
|
break;
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
case PT_LOAD:
|
|
|
|
if (nsegs == 0) { /* First load segment */
|
|
|
|
obj->vaddrbase = trunc_page(ph->p_vaddr);
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->mapbase = obj->vaddrbase + obj->relocbase;
|
1998-03-07 19:24:35 +00:00
|
|
|
obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) -
|
|
|
|
obj->vaddrbase;
|
|
|
|
} else { /* Last load segment */
|
|
|
|
obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) -
|
|
|
|
obj->vaddrbase;
|
|
|
|
}
|
|
|
|
nsegs++;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PT_DYNAMIC:
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->dynamic = (const Elf_Dyn *)(ph->p_vaddr + obj->relocbase);
|
1998-03-07 19:24:35 +00:00
|
|
|
break;
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
case PT_TLS:
|
|
|
|
obj->tlsindex = 1;
|
|
|
|
obj->tlssize = ph->p_memsz;
|
|
|
|
obj->tlsalign = ph->p_align;
|
|
|
|
obj->tlsinitsize = ph->p_filesz;
|
2009-10-10 15:27:10 +00:00
|
|
|
obj->tlsinit = (void*)(ph->p_vaddr + obj->relocbase);
|
2004-08-03 08:51:00 +00:00
|
|
|
break;
|
2011-01-08 17:11:49 +00:00
|
|
|
|
|
|
|
case PT_GNU_STACK:
|
|
|
|
obj->stack_flags = ph->p_flags;
|
|
|
|
break;
|
2012-01-30 19:52:17 +00:00
|
|
|
|
|
|
|
case PT_GNU_RELRO:
|
|
|
|
obj->relro_page = obj->relocbase + trunc_page(ph->p_vaddr);
|
|
|
|
obj->relro_size = round_page(ph->p_memsz);
|
|
|
|
break;
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
|
|
|
|
case PT_NOTE:
|
|
|
|
note_start = (Elf_Addr)obj->relocbase + ph->p_vaddr;
|
|
|
|
note_end = note_start + ph->p_filesz;
|
|
|
|
digest_notes(obj, note_start, note_end);
|
|
|
|
break;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
2002-11-29 16:41:31 +00:00
|
|
|
if (nsegs < 1) {
|
|
|
|
_rtld_error("%s: too few PT_LOAD segments", path);
|
|
|
|
return NULL;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
obj->entry = entry;
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
void
|
|
|
|
digest_notes(Obj_Entry *obj, Elf_Addr note_start, Elf_Addr note_end)
|
|
|
|
{
|
|
|
|
const Elf_Note *note;
|
|
|
|
const char *note_name;
|
|
|
|
uintptr_t p;
|
|
|
|
|
|
|
|
for (note = (const Elf_Note *)note_start; (Elf_Addr)note < note_end;
|
|
|
|
note = (const Elf_Note *)((const char *)(note + 1) +
|
|
|
|
roundup2(note->n_namesz, sizeof(Elf32_Addr)) +
|
|
|
|
roundup2(note->n_descsz, sizeof(Elf32_Addr)))) {
|
|
|
|
if (note->n_namesz != sizeof(NOTE_FREEBSD_VENDOR) ||
|
|
|
|
note->n_descsz != sizeof(int32_t))
|
|
|
|
continue;
|
2015-12-21 22:40:29 +00:00
|
|
|
if (note->n_type != NT_FREEBSD_ABI_TAG &&
|
|
|
|
note->n_type != NT_FREEBSD_NOINIT_TAG)
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
continue;
|
|
|
|
note_name = (const char *)(note + 1);
|
|
|
|
if (strncmp(NOTE_FREEBSD_VENDOR, note_name,
|
|
|
|
sizeof(NOTE_FREEBSD_VENDOR)) != 0)
|
|
|
|
continue;
|
|
|
|
switch (note->n_type) {
|
2015-12-21 22:40:29 +00:00
|
|
|
case NT_FREEBSD_ABI_TAG:
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
/* FreeBSD osrel note */
|
|
|
|
p = (uintptr_t)(note + 1);
|
|
|
|
p += roundup2(note->n_namesz, sizeof(Elf32_Addr));
|
|
|
|
obj->osrel = *(const int32_t *)(p);
|
|
|
|
dbg("note osrel %d", obj->osrel);
|
|
|
|
break;
|
2015-12-21 22:40:29 +00:00
|
|
|
case NT_FREEBSD_NOINIT_TAG:
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
/* FreeBSD 'crt does not call init' note */
|
|
|
|
obj->crt_no_init = true;
|
|
|
|
dbg("note crt_no_init");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
static Obj_Entry *
|
|
|
|
dlcheck(void *handle)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_FOREACH(obj, &obj_list, next) {
|
1998-03-07 19:24:35 +00:00
|
|
|
if (obj == (Obj_Entry *) handle)
|
|
|
|
break;
|
2016-01-20 07:21:33 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) {
|
1998-03-07 19:24:35 +00:00
|
|
|
_rtld_error("Invalid shared object handle %p", handle);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* If the given object is already in the donelist, return true. Otherwise
|
|
|
|
* add the object to the list and return false.
|
|
|
|
*/
|
|
|
|
static bool
|
2000-09-19 04:27:16 +00:00
|
|
|
donelist_check(DoneList *dlp, const Obj_Entry *obj)
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dlp->num_used; i++)
|
|
|
|
if (dlp->objs[i] == obj)
|
|
|
|
return true;
|
|
|
|
/*
|
|
|
|
* Our donelist allocation should always be sufficient. But if
|
|
|
|
* our threads locking isn't working properly, more shared objects
|
|
|
|
* could have been loaded since we allocated the list. That should
|
|
|
|
* never happen, but we'll handle it properly just in case it does.
|
|
|
|
*/
|
|
|
|
if (dlp->num_used < dlp->num_alloc)
|
|
|
|
dlp->objs[dlp->num_used++] = obj;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Hash function for symbol table lookup. Don't even think about changing
|
|
|
|
* this. It is specified by the System V ABI.
|
|
|
|
*/
|
1998-09-04 19:03:57 +00:00
|
|
|
unsigned long
|
1998-03-07 19:24:35 +00:00
|
|
|
elf_hash(const char *name)
|
|
|
|
{
|
|
|
|
const unsigned char *p = (const unsigned char *) name;
|
|
|
|
unsigned long h = 0;
|
|
|
|
unsigned long g;
|
|
|
|
|
|
|
|
while (*p != '\0') {
|
|
|
|
h = (h << 4) + *p++;
|
|
|
|
if ((g = h & 0xf0000000) != 0)
|
|
|
|
h ^= g >> 24;
|
|
|
|
h &= ~g;
|
|
|
|
}
|
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
2012-04-30 13:31:10 +00:00
|
|
|
/*
|
|
|
|
* The GNU hash function is the Daniel J. Bernstein hash clipped to 32 bits
|
|
|
|
* unsigned in case it's implemented with a wider type.
|
|
|
|
*/
|
|
|
|
static uint32_t
|
|
|
|
gnu_hash(const char *s)
|
|
|
|
{
|
|
|
|
uint32_t h;
|
|
|
|
unsigned char c;
|
|
|
|
|
|
|
|
h = 5381;
|
|
|
|
for (c = *s; c != '\0'; c = *++s)
|
|
|
|
h = h * 33 + c;
|
|
|
|
return (h & 0xffffffff);
|
|
|
|
}
|
|
|
|
|
2014-06-20 17:08:32 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Find the library with the given name, and return its full pathname.
|
|
|
|
* The returned string is dynamically allocated. Generates an error
|
|
|
|
* message and returns NULL if the library cannot be found.
|
|
|
|
*
|
|
|
|
* If the second argument is non-NULL, then it refers to an already-
|
|
|
|
* loaded shared object, whose library search path will be searched.
|
1998-09-05 03:31:00 +00:00
|
|
|
*
|
2014-06-20 17:08:32 +00:00
|
|
|
* If a library is successfully located via LD_LIBRARY_PATH_FDS, its
|
|
|
|
* descriptor (which is close-on-exec) will be passed out via the third
|
|
|
|
* argument.
|
|
|
|
*
|
1998-09-05 03:31:00 +00:00
|
|
|
* The search order is:
|
2012-07-15 10:53:48 +00:00
|
|
|
* DT_RPATH in the referencing file _unless_ DT_RUNPATH is present (1)
|
|
|
|
* DT_RPATH of the main object if DSO without defined DT_RUNPATH (1)
|
1998-09-05 03:31:00 +00:00
|
|
|
* LD_LIBRARY_PATH
|
2012-07-15 10:53:48 +00:00
|
|
|
* DT_RUNPATH in the referencing file
|
|
|
|
* ldconfig hints (if -z nodefaultlib, filter out default library directories
|
|
|
|
* from list)
|
|
|
|
* /lib:/usr/lib _unless_ the referencing file is linked with -z nodefaultlib
|
|
|
|
*
|
|
|
|
* (1) Handled in digest_dynamic2 - rpath left NULL if runpath defined.
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
|
|
|
static char *
|
2014-06-20 17:08:32 +00:00
|
|
|
find_library(const char *xname, const Obj_Entry *refobj, int *fdp)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
|
|
|
char *pathname;
|
2003-04-07 16:21:26 +00:00
|
|
|
char *name;
|
2012-09-19 05:11:25 +00:00
|
|
|
bool nodeflib, objgiven;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
objgiven = refobj != NULL;
|
2017-07-27 08:33:31 +00:00
|
|
|
|
|
|
|
if (libmap_disable || !objgiven ||
|
|
|
|
(name = lm_find(refobj->path, xname)) == NULL)
|
|
|
|
name = (char *)xname;
|
|
|
|
|
|
|
|
if (strchr(name, '/') != NULL) { /* Hard coded pathname */
|
|
|
|
if (name[0] != '/' && !trust) {
|
1998-03-07 19:24:35 +00:00
|
|
|
_rtld_error("Absolute pathname required for shared object \"%s\"",
|
2017-07-27 08:33:31 +00:00
|
|
|
name);
|
|
|
|
return (NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
2015-04-27 18:41:31 +00:00
|
|
|
return (origin_subst(__DECONST(Obj_Entry *, refobj),
|
2017-07-27 08:33:31 +00:00
|
|
|
__DECONST(char *, name)));
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
dbg(" Searching for \"%s\"", name);
|
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
/*
|
|
|
|
* If refobj->rpath != NULL, then refobj->runpath is NULL. Fall
|
|
|
|
* back to pre-conforming behaviour if user requested so with
|
|
|
|
* LD_LIBRARY_PATH_RPATH environment variable and ignore -z
|
|
|
|
* nodeflib.
|
|
|
|
*/
|
|
|
|
if (objgiven && refobj->rpath != NULL && ld_library_path_rpath) {
|
|
|
|
if ((pathname = search_library_path(name, ld_library_path)) != NULL ||
|
|
|
|
(refobj != NULL &&
|
|
|
|
(pathname = search_library_path(name, refobj->rpath)) != NULL) ||
|
2014-06-20 17:08:32 +00:00
|
|
|
(pathname = search_library_pathfds(name, ld_library_dirs, fdp)) != NULL ||
|
2012-07-15 10:53:48 +00:00
|
|
|
(pathname = search_library_path(name, gethints(false))) != NULL ||
|
2015-10-31 04:39:55 +00:00
|
|
|
(pathname = search_library_path(name, ld_standard_library_path)) != NULL)
|
2012-07-15 10:53:48 +00:00
|
|
|
return (pathname);
|
|
|
|
} else {
|
2012-09-19 05:11:25 +00:00
|
|
|
nodeflib = objgiven ? refobj->z_nodeflib : false;
|
2012-07-15 10:53:48 +00:00
|
|
|
if ((objgiven &&
|
|
|
|
(pathname = search_library_path(name, refobj->rpath)) != NULL) ||
|
|
|
|
(objgiven && refobj->runpath == NULL && refobj != obj_main &&
|
|
|
|
(pathname = search_library_path(name, obj_main->rpath)) != NULL) ||
|
|
|
|
(pathname = search_library_path(name, ld_library_path)) != NULL ||
|
|
|
|
(objgiven &&
|
|
|
|
(pathname = search_library_path(name, refobj->runpath)) != NULL) ||
|
2014-06-20 17:08:32 +00:00
|
|
|
(pathname = search_library_pathfds(name, ld_library_dirs, fdp)) != NULL ||
|
2012-09-19 05:11:25 +00:00
|
|
|
(pathname = search_library_path(name, gethints(nodeflib))) != NULL ||
|
|
|
|
(objgiven && !nodeflib &&
|
2015-10-31 04:39:55 +00:00
|
|
|
(pathname = search_library_path(name, ld_standard_library_path)) != NULL))
|
2012-07-15 10:53:48 +00:00
|
|
|
return (pathname);
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
if (objgiven && refobj->path != NULL) {
|
2004-05-28 00:05:28 +00:00
|
|
|
_rtld_error("Shared object \"%s\" not found, required by \"%s\"",
|
|
|
|
name, basename(refobj->path));
|
|
|
|
} else {
|
|
|
|
_rtld_error("Shared object \"%s\" not found", name);
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a symbol number in a referencing object, find the corresponding
|
|
|
|
* definition of the symbol. Returns a pointer to the symbol, or NULL if
|
|
|
|
* no definition was found. Returns a pointer to the Obj_Entry of the
|
|
|
|
* defining object via the reference parameter DEFOBJ_OUT.
|
|
|
|
*/
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Sym *
|
2000-09-19 04:27:16 +00:00
|
|
|
find_symdef(unsigned long symnum, const Obj_Entry *refobj,
|
2010-12-25 08:51:20 +00:00
|
|
|
const Obj_Entry **defobj_out, int flags, SymCache *cache,
|
|
|
|
RtldLockState *lockstate)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1998-09-04 19:03:57 +00:00
|
|
|
const Elf_Sym *ref;
|
1999-08-30 01:24:08 +00:00
|
|
|
const Elf_Sym *def;
|
|
|
|
const Obj_Entry *defobj;
|
2017-07-04 20:19:36 +00:00
|
|
|
const Ver_Entry *ve;
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req;
|
1998-03-07 19:24:35 +00:00
|
|
|
const char *name;
|
2010-12-25 08:51:20 +00:00
|
|
|
int res;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2001-05-05 23:21:05 +00:00
|
|
|
/*
|
|
|
|
* If we have already found this symbol, get the information from
|
|
|
|
* the cache.
|
|
|
|
*/
|
2012-04-30 13:31:10 +00:00
|
|
|
if (symnum >= refobj->dynsymcount)
|
2001-05-05 23:21:05 +00:00
|
|
|
return NULL; /* Bad object */
|
|
|
|
if (cache != NULL && cache[symnum].sym != NULL) {
|
|
|
|
*defobj_out = cache[symnum].obj;
|
|
|
|
return cache[symnum].sym;
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
ref = refobj->symtab + symnum;
|
|
|
|
name = refobj->strtab + ref->st_name;
|
2010-12-25 08:51:20 +00:00
|
|
|
def = NULL;
|
1999-08-30 01:25:38 +00:00
|
|
|
defobj = NULL;
|
2017-07-04 20:19:36 +00:00
|
|
|
ve = NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2002-04-27 02:48:29 +00:00
|
|
|
/*
|
|
|
|
* We don't have to do a full scale lookup if the symbol is local.
|
|
|
|
* We know it will bind to the instance in this load module; to
|
|
|
|
* which we already have a pointer (ie ref). By not doing a lookup,
|
|
|
|
* we not only improve performance, but it also avoids unresolvable
|
|
|
|
* symbols when local symbols are not in the hash table. This has
|
|
|
|
* been seen with the ia64 toolchain.
|
|
|
|
*/
|
|
|
|
if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) {
|
|
|
|
if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) {
|
2002-04-02 02:19:02 +00:00
|
|
|
_rtld_error("%s: Bogus symbol table entry %lu", refobj->path,
|
|
|
|
symnum);
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init(&req, name);
|
|
|
|
req.flags = flags;
|
2017-07-04 20:19:36 +00:00
|
|
|
ve = req.ventry = fetch_ventry(refobj, symnum);
|
2010-12-25 08:51:20 +00:00
|
|
|
req.lockstate = lockstate;
|
|
|
|
res = symlook_default(&req, refobj);
|
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
}
|
2002-04-27 02:48:29 +00:00
|
|
|
} else {
|
2002-04-02 02:19:02 +00:00
|
|
|
def = ref;
|
|
|
|
defobj = refobj;
|
2002-04-27 02:48:29 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-08-30 01:24:08 +00:00
|
|
|
/*
|
1999-08-30 01:48:19 +00:00
|
|
|
* If we found no definition and the reference is weak, treat the
|
1999-08-30 01:24:08 +00:00
|
|
|
* symbol as having the value zero.
|
|
|
|
*/
|
1999-08-30 01:48:19 +00:00
|
|
|
if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) {
|
|
|
|
def = &sym_zero;
|
|
|
|
defobj = obj_main;
|
1999-04-05 02:36:40 +00:00
|
|
|
}
|
|
|
|
|
2001-05-05 23:21:05 +00:00
|
|
|
if (def != NULL) {
|
1999-08-30 01:48:19 +00:00
|
|
|
*defobj_out = defobj;
|
2001-05-05 23:21:05 +00:00
|
|
|
/* Record the information in the cache to avoid subsequent lookups. */
|
|
|
|
if (cache != NULL) {
|
|
|
|
cache[symnum].sym = def;
|
|
|
|
cache[symnum].obj = defobj;
|
|
|
|
}
|
2001-10-15 18:48:42 +00:00
|
|
|
} else {
|
|
|
|
if (refobj != &obj_rtld)
|
2017-07-04 20:19:36 +00:00
|
|
|
_rtld_error("%s: Undefined symbol \"%s%s%s\"", refobj->path, name,
|
|
|
|
ve != NULL ? "@" : "", ve != NULL ? ve->name : "");
|
2001-10-15 18:48:42 +00:00
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
return def;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1998-09-05 03:31:00 +00:00
|
|
|
/*
|
|
|
|
* Return the search path from the ldconfig hints file, reading it if
|
2012-07-15 10:53:48 +00:00
|
|
|
* necessary. If nostdlib is true, then the default search paths are
|
|
|
|
* not added to result.
|
|
|
|
*
|
|
|
|
* Returns NULL if there are problems with the hints file,
|
1998-09-05 03:31:00 +00:00
|
|
|
* or if the search path there is empty.
|
|
|
|
*/
|
|
|
|
static const char *
|
2012-07-15 10:53:48 +00:00
|
|
|
gethints(bool nostdlib)
|
1998-09-05 03:31:00 +00:00
|
|
|
{
|
2012-07-15 10:53:48 +00:00
|
|
|
static char *hints, *filtered_path;
|
2016-05-25 18:10:44 +00:00
|
|
|
static struct elfhints_hdr hdr;
|
2012-07-15 10:53:48 +00:00
|
|
|
struct fill_search_info_args sargs, hargs;
|
|
|
|
struct dl_serinfo smeta, hmeta, *SLPinfo, *hintinfo;
|
|
|
|
struct dl_serpath *SLPpath, *hintpath;
|
1998-09-05 03:31:00 +00:00
|
|
|
char *p;
|
2016-05-25 18:10:44 +00:00
|
|
|
struct stat hint_stat;
|
2012-07-15 10:53:48 +00:00
|
|
|
unsigned int SLPndx, hintndx, fndx, fcount;
|
|
|
|
int fd;
|
|
|
|
size_t flen;
|
2016-05-25 18:10:44 +00:00
|
|
|
uint32_t dl;
|
2012-07-15 10:53:48 +00:00
|
|
|
bool skip;
|
|
|
|
|
|
|
|
/* First call, read the hints file */
|
|
|
|
if (hints == NULL) {
|
|
|
|
/* Keep from trying again in case the hints file is bad. */
|
|
|
|
hints = "";
|
|
|
|
|
2012-11-04 21:42:24 +00:00
|
|
|
if ((fd = open(ld_elf_hints_path, O_RDONLY | O_CLOEXEC)) == -1)
|
2012-07-15 10:53:48 +00:00
|
|
|
return (NULL);
|
2016-05-25 18:10:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check of hdr.dirlistlen value against type limit
|
|
|
|
* intends to pacify static analyzers. Further
|
|
|
|
* paranoia leads to checks that dirlist is fully
|
|
|
|
* contained in the file range.
|
|
|
|
*/
|
2012-07-15 10:53:48 +00:00
|
|
|
if (read(fd, &hdr, sizeof hdr) != sizeof hdr ||
|
|
|
|
hdr.magic != ELFHINTS_MAGIC ||
|
2016-05-25 18:10:44 +00:00
|
|
|
hdr.version != 1 || hdr.dirlistlen > UINT_MAX / 2 ||
|
|
|
|
fstat(fd, &hint_stat) == -1) {
|
|
|
|
cleanup1:
|
2012-07-15 10:53:48 +00:00
|
|
|
close(fd);
|
2016-05-25 18:10:44 +00:00
|
|
|
hdr.dirlistlen = 0;
|
2012-07-15 10:53:48 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
2016-05-25 18:10:44 +00:00
|
|
|
dl = hdr.strtab;
|
|
|
|
if (dl + hdr.dirlist < dl)
|
|
|
|
goto cleanup1;
|
|
|
|
dl += hdr.dirlist;
|
|
|
|
if (dl + hdr.dirlistlen < dl)
|
|
|
|
goto cleanup1;
|
|
|
|
dl += hdr.dirlistlen;
|
|
|
|
if (dl > hint_stat.st_size)
|
|
|
|
goto cleanup1;
|
2012-07-15 10:53:48 +00:00
|
|
|
p = xmalloc(hdr.dirlistlen + 1);
|
2016-05-25 18:10:44 +00:00
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 ||
|
|
|
|
read(fd, p, hdr.dirlistlen + 1) !=
|
2016-05-25 18:10:44 +00:00
|
|
|
(ssize_t)hdr.dirlistlen + 1 || p[hdr.dirlistlen] != '\0') {
|
2012-07-15 10:53:48 +00:00
|
|
|
free(p);
|
2016-05-25 18:10:44 +00:00
|
|
|
goto cleanup1;
|
2012-07-15 10:53:48 +00:00
|
|
|
}
|
|
|
|
hints = p;
|
|
|
|
close(fd);
|
|
|
|
}
|
1998-09-05 03:31:00 +00:00
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
/*
|
|
|
|
* If caller agreed to receive list which includes the default
|
|
|
|
* paths, we are done. Otherwise, if we still did not
|
|
|
|
* calculated filtered result, do it now.
|
|
|
|
*/
|
|
|
|
if (!nostdlib)
|
|
|
|
return (hints[0] != '\0' ? hints : NULL);
|
|
|
|
if (filtered_path != NULL)
|
|
|
|
goto filt_ret;
|
1998-09-05 03:31:00 +00:00
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
/*
|
|
|
|
* Obtain the list of all configured search paths, and the
|
|
|
|
* list of the default paths.
|
|
|
|
*
|
|
|
|
* First estimate the size of the results.
|
|
|
|
*/
|
|
|
|
smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
|
|
|
|
smeta.dls_cnt = 0;
|
|
|
|
hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
|
|
|
|
hmeta.dls_cnt = 0;
|
|
|
|
|
|
|
|
sargs.request = RTLD_DI_SERINFOSIZE;
|
|
|
|
sargs.serinfo = &smeta;
|
|
|
|
hargs.request = RTLD_DI_SERINFOSIZE;
|
|
|
|
hargs.serinfo = &hmeta;
|
|
|
|
|
2015-10-31 04:39:55 +00:00
|
|
|
path_enumerate(ld_standard_library_path, fill_search_info, &sargs);
|
2016-05-25 18:10:44 +00:00
|
|
|
path_enumerate(hints, fill_search_info, &hargs);
|
2012-07-15 10:53:48 +00:00
|
|
|
|
|
|
|
SLPinfo = xmalloc(smeta.dls_size);
|
|
|
|
hintinfo = xmalloc(hmeta.dls_size);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Next fetch both sets of paths.
|
|
|
|
*/
|
|
|
|
sargs.request = RTLD_DI_SERINFO;
|
|
|
|
sargs.serinfo = SLPinfo;
|
|
|
|
sargs.serpath = &SLPinfo->dls_serpath[0];
|
|
|
|
sargs.strspace = (char *)&SLPinfo->dls_serpath[smeta.dls_cnt];
|
|
|
|
|
|
|
|
hargs.request = RTLD_DI_SERINFO;
|
|
|
|
hargs.serinfo = hintinfo;
|
|
|
|
hargs.serpath = &hintinfo->dls_serpath[0];
|
|
|
|
hargs.strspace = (char *)&hintinfo->dls_serpath[hmeta.dls_cnt];
|
|
|
|
|
2015-10-31 04:39:55 +00:00
|
|
|
path_enumerate(ld_standard_library_path, fill_search_info, &sargs);
|
2016-05-25 18:10:44 +00:00
|
|
|
path_enumerate(hints, fill_search_info, &hargs);
|
2012-07-15 10:53:48 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now calculate the difference between two sets, by excluding
|
|
|
|
* standard paths from the full set.
|
|
|
|
*/
|
|
|
|
fndx = 0;
|
|
|
|
fcount = 0;
|
|
|
|
filtered_path = xmalloc(hdr.dirlistlen + 1);
|
|
|
|
hintpath = &hintinfo->dls_serpath[0];
|
|
|
|
for (hintndx = 0; hintndx < hmeta.dls_cnt; hintndx++, hintpath++) {
|
|
|
|
skip = false;
|
|
|
|
SLPpath = &SLPinfo->dls_serpath[0];
|
|
|
|
/*
|
|
|
|
* Check each standard path against current.
|
|
|
|
*/
|
|
|
|
for (SLPndx = 0; SLPndx < smeta.dls_cnt; SLPndx++, SLPpath++) {
|
|
|
|
/* matched, skip the path */
|
|
|
|
if (!strcmp(hintpath->dls_name, SLPpath->dls_name)) {
|
|
|
|
skip = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (skip)
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Not matched against any standard path, add the path
|
|
|
|
* to result. Separate consequtive paths with ':'.
|
|
|
|
*/
|
|
|
|
if (fcount > 0) {
|
|
|
|
filtered_path[fndx] = ':';
|
|
|
|
fndx++;
|
|
|
|
}
|
|
|
|
fcount++;
|
|
|
|
flen = strlen(hintpath->dls_name);
|
|
|
|
strncpy((filtered_path + fndx), hintpath->dls_name, flen);
|
|
|
|
fndx += flen;
|
1998-09-05 03:31:00 +00:00
|
|
|
}
|
2012-07-15 10:53:48 +00:00
|
|
|
filtered_path[fndx] = '\0';
|
|
|
|
|
|
|
|
free(SLPinfo);
|
|
|
|
free(hintinfo);
|
|
|
|
|
|
|
|
filt_ret:
|
|
|
|
return (filtered_path[0] != '\0' ? filtered_path : NULL);
|
1998-09-05 03:31:00 +00:00
|
|
|
}
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
static void
|
|
|
|
init_dag(Obj_Entry *root)
|
|
|
|
{
|
2011-01-28 23:44:57 +00:00
|
|
|
const Needed_Entry *needed;
|
|
|
|
const Objlist_Entry *elm;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
DoneList donelist;
|
|
|
|
|
If dlopen() is called for the dso that has been already loaded as a
dependency, then the dso never has its DAG initialized. Empty DAG
makes ref_dag() call in dlopen() a nop, and the dso refcount is off
by one.
Initialize the DAG on the first dlopen() call, using a boolean flag
to prevent double initialization.
From the PR (edited):
Assume we have a library liba.so, containing a function a(), and a
library libb.so, containing function b(). liba.so needs functionality
from libb.so, so liba.so links in libb.so.
An application doesn't know about the relation between these libraries,
but needs to call a() and b(). It dlopen()s liba.so and obtains a
pointer to a(), then it dlopen()s libb.so and obtains a pointer to b().
As soon as the application doesn't need a() anymore, it dlclose()s liba.so.
Expected result: the pointer to b() is still valid and can be called
Actual result: the pointer to b() has become invalid, even though the
application did not dlclose() the handle to libb.so. On calling b(), the
application crashes with a segmentation fault.
PR: misc/151861
Based on patch by: jh
Reviewed by: kan
Tested by: Arjan van Leeuwen <freebsd-maintainer opera com>
MFC after: 1 week
2010-11-03 09:23:08 +00:00
|
|
|
if (root->dag_inited)
|
2010-11-04 09:19:14 +00:00
|
|
|
return;
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
donelist_init(&donelist);
|
1999-08-30 01:48:19 +00:00
|
|
|
|
2011-01-28 23:44:57 +00:00
|
|
|
/* Root object belongs to own DAG. */
|
|
|
|
objlist_push_tail(&root->dldags, root);
|
|
|
|
objlist_push_tail(&root->dagmembers, root);
|
|
|
|
donelist_check(&donelist, root);
|
2003-05-08 01:31:36 +00:00
|
|
|
|
2011-01-28 23:44:57 +00:00
|
|
|
/*
|
|
|
|
* Add dependencies of root object to DAG in breadth order
|
|
|
|
* by exploiting the fact that each new object get added
|
|
|
|
* to the tail of the dagmembers list.
|
|
|
|
*/
|
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link) {
|
|
|
|
for (needed = elm->obj->needed; needed != NULL; needed = needed->next) {
|
|
|
|
if (needed->obj == NULL || donelist_check(&donelist, needed->obj))
|
|
|
|
continue;
|
|
|
|
objlist_push_tail(&needed->obj->dldags, root);
|
|
|
|
objlist_push_tail(&root->dagmembers, needed->obj);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
root->dag_inited = true;
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
|
2016-12-22 17:44:27 +00:00
|
|
|
static void
|
|
|
|
init_marker(Obj_Entry *marker)
|
|
|
|
{
|
|
|
|
|
|
|
|
bzero(marker, sizeof(*marker));
|
|
|
|
marker->marker = true;
|
|
|
|
}
|
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
Obj_Entry *
|
|
|
|
globallist_curr(const Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if (obj == NULL)
|
|
|
|
return (NULL);
|
|
|
|
if (!obj->marker)
|
|
|
|
return (__DECONST(Obj_Entry *, obj));
|
|
|
|
obj = TAILQ_PREV(obj, obj_entry_q, next);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Obj_Entry *
|
|
|
|
globallist_next(const Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
obj = TAILQ_NEXT(obj, next);
|
|
|
|
if (obj == NULL)
|
|
|
|
return (NULL);
|
|
|
|
if (!obj->marker)
|
|
|
|
return (__DECONST(Obj_Entry *, obj));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-22 17:37:39 +00:00
|
|
|
/* Prevent the object from being unmapped while the bind lock is dropped. */
|
|
|
|
static void
|
|
|
|
hold_object(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
|
|
|
|
obj->holdcount++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
unhold_object(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
|
|
|
|
assert(obj->holdcount > 0);
|
|
|
|
if (--obj->holdcount == 0 && obj->unholdfree)
|
|
|
|
release_object(obj);
|
|
|
|
}
|
|
|
|
|
2012-08-14 13:28:30 +00:00
|
|
|
static void
|
2015-04-15 08:16:34 +00:00
|
|
|
process_z(Obj_Entry *root)
|
2012-08-14 13:28:30 +00:00
|
|
|
{
|
|
|
|
const Objlist_Entry *elm;
|
2015-04-15 08:16:34 +00:00
|
|
|
Obj_Entry *obj;
|
2012-08-14 13:28:30 +00:00
|
|
|
|
|
|
|
/*
|
2015-04-15 08:16:34 +00:00
|
|
|
* Walk over object DAG and process every dependent object
|
|
|
|
* that is marked as DF_1_NODELETE or DF_1_GLOBAL. They need
|
|
|
|
* to grow their own DAG.
|
|
|
|
*
|
|
|
|
* For DF_1_GLOBAL, DAG is required for symbol lookups in
|
|
|
|
* symlook_global() to work.
|
|
|
|
*
|
|
|
|
* For DF_1_NODELETE, the DAG should have its reference upped.
|
2012-08-14 13:28:30 +00:00
|
|
|
*/
|
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link) {
|
2015-04-15 08:16:34 +00:00
|
|
|
obj = elm->obj;
|
|
|
|
if (obj == NULL)
|
|
|
|
continue;
|
|
|
|
if (obj->z_nodelete && !obj->ref_nodel) {
|
|
|
|
dbg("obj %s -z nodelete", obj->path);
|
|
|
|
init_dag(obj);
|
|
|
|
ref_dag(obj);
|
|
|
|
obj->ref_nodel = true;
|
|
|
|
}
|
|
|
|
if (obj->z_global && objlist_find(&list_global, obj) == NULL) {
|
|
|
|
dbg("obj %s -z global", obj->path);
|
|
|
|
objlist_push_tail(&list_global, obj);
|
|
|
|
init_dag(obj);
|
2012-08-14 13:28:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Initialize the dynamic linker. The argument is the address at which
|
|
|
|
* the dynamic linker has been mapped into memory. The primary task of
|
|
|
|
* this function is to relocate the dynamic linker.
|
|
|
|
*/
|
|
|
|
static void
|
2010-08-17 09:05:39 +00:00
|
|
|
init_rtld(caddr_t mapbase, Elf_Auxinfo **aux_info)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2002-04-02 02:19:02 +00:00
|
|
|
Obj_Entry objtmp; /* Temporary rtld object */
|
2016-08-12 18:31:44 +00:00
|
|
|
const Elf_Ehdr *ehdr;
|
2010-08-17 09:05:39 +00:00
|
|
|
const Elf_Dyn *dyn_rpath;
|
|
|
|
const Elf_Dyn *dyn_soname;
|
2012-07-15 10:53:48 +00:00
|
|
|
const Elf_Dyn *dyn_runpath;
|
2002-04-02 02:19:02 +00:00
|
|
|
|
2014-07-02 22:04:12 +00:00
|
|
|
#ifdef RTLD_INIT_PAGESIZES_EARLY
|
|
|
|
/* The page size is required by the dynamic memory allocator. */
|
|
|
|
init_pagesizes(aux_info);
|
|
|
|
#endif
|
|
|
|
|
1998-09-15 21:07:52 +00:00
|
|
|
/*
|
|
|
|
* Conjure up an Obj_Entry structure for the dynamic linker.
|
|
|
|
*
|
2009-06-23 09:50:50 +00:00
|
|
|
* The "path" member can't be initialized yet because string constants
|
2009-06-23 14:12:49 +00:00
|
|
|
* cannot yet be accessed. Below we will set it correctly.
|
1998-09-15 21:07:52 +00:00
|
|
|
*/
|
2003-12-31 15:10:41 +00:00
|
|
|
memset(&objtmp, 0, sizeof(objtmp));
|
2002-04-02 02:19:02 +00:00
|
|
|
objtmp.path = NULL;
|
|
|
|
objtmp.rtld = true;
|
|
|
|
objtmp.mapbase = mapbase;
|
2014-11-25 03:50:31 +00:00
|
|
|
#ifdef PIC
|
2002-04-02 02:19:02 +00:00
|
|
|
objtmp.relocbase = mapbase;
|
1998-09-04 19:03:57 +00:00
|
|
|
#endif
|
2016-12-02 14:23:26 +00:00
|
|
|
|
|
|
|
objtmp.dynamic = rtld_dynamic(&objtmp);
|
|
|
|
digest_dynamic1(&objtmp, 1, &dyn_rpath, &dyn_soname, &dyn_runpath);
|
|
|
|
assert(objtmp.needed == NULL);
|
2008-04-04 20:59:26 +00:00
|
|
|
#if !defined(__mips__)
|
2016-12-02 14:23:26 +00:00
|
|
|
/* MIPS has a bogus DT_TEXTREL. */
|
|
|
|
assert(!objtmp.textrel);
|
2008-04-04 20:59:26 +00:00
|
|
|
#endif
|
2016-12-02 14:23:26 +00:00
|
|
|
/*
|
|
|
|
* Temporarily put the dynamic linker entry into the object list, so
|
|
|
|
* that symbols can be found.
|
|
|
|
*/
|
|
|
|
relocate_objects(&objtmp, true, &objtmp, 0, NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2016-08-12 18:31:44 +00:00
|
|
|
ehdr = (Elf_Ehdr *)mapbase;
|
|
|
|
objtmp.phdr = (Elf_Phdr *)((char *)mapbase + ehdr->e_phoff);
|
|
|
|
objtmp.phsize = ehdr->e_phnum * sizeof(objtmp.phdr[0]);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2002-04-02 02:19:02 +00:00
|
|
|
/* Initialize the object list. */
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_INIT(&obj_list);
|
2002-04-02 02:19:02 +00:00
|
|
|
|
|
|
|
/* Now that non-local variables can be accesses, copy out obj_rtld. */
|
|
|
|
memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld));
|
1998-04-30 07:48:02 +00:00
|
|
|
|
2014-07-02 22:04:12 +00:00
|
|
|
#ifndef RTLD_INIT_PAGESIZES_EARLY
|
Before calling mmap() on a shared library's text and data sections, rtld
first calls mmap() with the arguments PROT_NONE and MAP_ANON to reserve a
single, contiguous range of virtual addresses for the entire shared library.
Later, rtld calls mmap() with the the shared library's file descriptor
and the argument MAP_FIXED to place the text and data sections within the
reserved range. The rationale for mapping shared libraries in this way is
explained in the commit message for Revision 190885. However, this approach
does have an unintended, negative consequence. Since the first call to
mmap() specifies MAP_ANON and not the shared library's file descriptor, the
kernel has no idea what alignment the vm object backing the file prefers.
As a result, the reserved range's alignment is unlikely to be the same as
the vm object's, and so mapping with superpages becomes impossible. To
address this problem, this revision adds the argument MAP_ALIGNED_SUPER to
the first call to mmap() if the text section is larger than the smallest
superpage size.
To determine if the text section is larger than the smallest superpage
size, rtld must always fetch the page size information. As a result, the
private code for fetching the base page size in rtld's builtin malloc is
redundant. Eliminate it. Requested by: kib
Tested by: zbb (on arm)
Reviewed by: kib (an earlier version)
Discussed with: jhb
2014-04-11 16:55:25 +00:00
|
|
|
/* The page size is required by the dynamic memory allocator. */
|
|
|
|
init_pagesizes(aux_info);
|
2014-07-02 22:04:12 +00:00
|
|
|
#endif
|
Before calling mmap() on a shared library's text and data sections, rtld
first calls mmap() with the arguments PROT_NONE and MAP_ANON to reserve a
single, contiguous range of virtual addresses for the entire shared library.
Later, rtld calls mmap() with the the shared library's file descriptor
and the argument MAP_FIXED to place the text and data sections within the
reserved range. The rationale for mapping shared libraries in this way is
explained in the commit message for Revision 190885. However, this approach
does have an unintended, negative consequence. Since the first call to
mmap() specifies MAP_ANON and not the shared library's file descriptor, the
kernel has no idea what alignment the vm object backing the file prefers.
As a result, the reserved range's alignment is unlikely to be the same as
the vm object's, and so mapping with superpages becomes impossible. To
address this problem, this revision adds the argument MAP_ALIGNED_SUPER to
the first call to mmap() if the text section is larger than the smallest
superpage size.
To determine if the text section is larger than the smallest superpage
size, rtld must always fetch the page size information. As a result, the
private code for fetching the base page size in rtld's builtin malloc is
redundant. Eliminate it. Requested by: kib
Tested by: zbb (on arm)
Reviewed by: kib (an earlier version)
Discussed with: jhb
2014-04-11 16:55:25 +00:00
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
if (aux_info[AT_OSRELDATE] != NULL)
|
|
|
|
osreldate = aux_info[AT_OSRELDATE]->a_un.a_val;
|
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
digest_dynamic2(&obj_rtld, dyn_rpath, dyn_soname, dyn_runpath);
|
2010-08-17 09:05:39 +00:00
|
|
|
|
1998-09-15 21:07:52 +00:00
|
|
|
/* Replace the path with a dynamically allocated copy. */
|
2015-10-31 04:39:55 +00:00
|
|
|
obj_rtld.path = xstrdup(ld_path_rtld);
|
1998-09-15 21:07:52 +00:00
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
r_debug.r_brk = r_debug_state;
|
|
|
|
r_debug.r_state = RT_CONSISTENT;
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
Before calling mmap() on a shared library's text and data sections, rtld
first calls mmap() with the arguments PROT_NONE and MAP_ANON to reserve a
single, contiguous range of virtual addresses for the entire shared library.
Later, rtld calls mmap() with the the shared library's file descriptor
and the argument MAP_FIXED to place the text and data sections within the
reserved range. The rationale for mapping shared libraries in this way is
explained in the commit message for Revision 190885. However, this approach
does have an unintended, negative consequence. Since the first call to
mmap() specifies MAP_ANON and not the shared library's file descriptor, the
kernel has no idea what alignment the vm object backing the file prefers.
As a result, the reserved range's alignment is unlikely to be the same as
the vm object's, and so mapping with superpages becomes impossible. To
address this problem, this revision adds the argument MAP_ALIGNED_SUPER to
the first call to mmap() if the text section is larger than the smallest
superpage size.
To determine if the text section is larger than the smallest superpage
size, rtld must always fetch the page size information. As a result, the
private code for fetching the base page size in rtld's builtin malloc is
redundant. Eliminate it. Requested by: kib
Tested by: zbb (on arm)
Reviewed by: kib (an earlier version)
Discussed with: jhb
2014-04-11 16:55:25 +00:00
|
|
|
/*
|
|
|
|
* Retrieve the array of supported page sizes. The kernel provides the page
|
|
|
|
* sizes in increasing order.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
init_pagesizes(Elf_Auxinfo **aux_info)
|
|
|
|
{
|
|
|
|
static size_t psa[MAXPAGESIZES];
|
|
|
|
int mib[2];
|
|
|
|
size_t len, size;
|
|
|
|
|
|
|
|
if (aux_info[AT_PAGESIZES] != NULL && aux_info[AT_PAGESIZESLEN] !=
|
|
|
|
NULL) {
|
|
|
|
size = aux_info[AT_PAGESIZESLEN]->a_un.a_val;
|
|
|
|
pagesizes = aux_info[AT_PAGESIZES]->a_un.a_ptr;
|
|
|
|
} else {
|
|
|
|
len = 2;
|
|
|
|
if (sysctlnametomib("hw.pagesizes", mib, &len) == 0)
|
|
|
|
size = sizeof(psa);
|
|
|
|
else {
|
|
|
|
/* As a fallback, retrieve the base page size. */
|
|
|
|
size = sizeof(psa[0]);
|
|
|
|
if (aux_info[AT_PAGESZ] != NULL) {
|
|
|
|
psa[0] = aux_info[AT_PAGESZ]->a_un.a_val;
|
|
|
|
goto psa_filled;
|
|
|
|
} else {
|
|
|
|
mib[0] = CTL_HW;
|
|
|
|
mib[1] = HW_PAGESIZE;
|
|
|
|
len = 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sysctl(mib, len, psa, &size, NULL, 0) == -1) {
|
|
|
|
_rtld_error("sysctl for hw.pagesize(s) failed");
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
Before calling mmap() on a shared library's text and data sections, rtld
first calls mmap() with the arguments PROT_NONE and MAP_ANON to reserve a
single, contiguous range of virtual addresses for the entire shared library.
Later, rtld calls mmap() with the the shared library's file descriptor
and the argument MAP_FIXED to place the text and data sections within the
reserved range. The rationale for mapping shared libraries in this way is
explained in the commit message for Revision 190885. However, this approach
does have an unintended, negative consequence. Since the first call to
mmap() specifies MAP_ANON and not the shared library's file descriptor, the
kernel has no idea what alignment the vm object backing the file prefers.
As a result, the reserved range's alignment is unlikely to be the same as
the vm object's, and so mapping with superpages becomes impossible. To
address this problem, this revision adds the argument MAP_ALIGNED_SUPER to
the first call to mmap() if the text section is larger than the smallest
superpage size.
To determine if the text section is larger than the smallest superpage
size, rtld must always fetch the page size information. As a result, the
private code for fetching the base page size in rtld's builtin malloc is
redundant. Eliminate it. Requested by: kib
Tested by: zbb (on arm)
Reviewed by: kib (an earlier version)
Discussed with: jhb
2014-04-11 16:55:25 +00:00
|
|
|
}
|
|
|
|
psa_filled:
|
|
|
|
pagesizes = psa;
|
|
|
|
}
|
|
|
|
npagesizes = size / sizeof(pagesizes[0]);
|
|
|
|
/* Discard any invalid entries at the end of the array. */
|
|
|
|
while (npagesizes > 0 && pagesizes[npagesizes - 1] == 0)
|
|
|
|
npagesizes--;
|
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/*
|
|
|
|
* Add the init functions from a needed object list (and its recursive
|
|
|
|
* needed objects) to "list". This is not used directly; it is a helper
|
|
|
|
* function for initlist_add_objects(). The write lock must be held
|
|
|
|
* when this function is called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
initlist_add_neededs(Needed_Entry *needed, Objlist *list)
|
|
|
|
{
|
|
|
|
/* Recursively process the successor needed objects. */
|
|
|
|
if (needed->next != NULL)
|
|
|
|
initlist_add_neededs(needed->next, list);
|
|
|
|
|
|
|
|
/* Process the current needed object. */
|
|
|
|
if (needed->obj != NULL)
|
2016-01-20 23:26:35 +00:00
|
|
|
initlist_add_objects(needed->obj, needed->obj, list);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scan all of the DAGs rooted in the range of objects from "obj" to
|
|
|
|
* "tail" and add their init functions to "list". This recurses over
|
|
|
|
* the DAGs and ensure the proper init ordering such that each object's
|
|
|
|
* needed libraries are initialized before the object itself. At the
|
|
|
|
* same time, this function adds the objects to the global finalization
|
|
|
|
* list "list_fini" in the opposite order. The write lock must be
|
|
|
|
* held when this function is called.
|
|
|
|
*/
|
|
|
|
static void
|
2016-01-20 07:21:33 +00:00
|
|
|
initlist_add_objects(Obj_Entry *obj, Obj_Entry *tail, Objlist *list)
|
2000-07-26 04:24:40 +00:00
|
|
|
{
|
2016-01-20 07:21:33 +00:00
|
|
|
Obj_Entry *nobj;
|
2012-03-20 13:20:49 +00:00
|
|
|
|
2009-06-20 14:16:41 +00:00
|
|
|
if (obj->init_scanned || obj->init_done)
|
2000-07-26 04:24:40 +00:00
|
|
|
return;
|
2009-06-20 14:16:41 +00:00
|
|
|
obj->init_scanned = true;
|
2000-07-26 04:24:40 +00:00
|
|
|
|
|
|
|
/* Recursively process the successor objects. */
|
2016-01-20 07:21:33 +00:00
|
|
|
nobj = globallist_next(obj);
|
2016-01-20 23:26:35 +00:00
|
|
|
if (nobj != NULL && obj != tail)
|
2016-01-20 07:21:33 +00:00
|
|
|
initlist_add_objects(nobj, tail, list);
|
2000-07-26 04:24:40 +00:00
|
|
|
|
|
|
|
/* Recursively process the needed objects. */
|
|
|
|
if (obj->needed != NULL)
|
|
|
|
initlist_add_neededs(obj->needed, list);
|
2012-03-20 13:20:49 +00:00
|
|
|
if (obj->needed_filtees != NULL)
|
|
|
|
initlist_add_neededs(obj->needed_filtees, list);
|
|
|
|
if (obj->needed_aux_filtees != NULL)
|
|
|
|
initlist_add_neededs(obj->needed_aux_filtees, list);
|
2000-07-26 04:24:40 +00:00
|
|
|
|
|
|
|
/* Add the object to the init list. */
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
if (obj->preinit_array != (Elf_Addr)NULL || obj->init != (Elf_Addr)NULL ||
|
|
|
|
obj->init_array != (Elf_Addr)NULL)
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_push_tail(list, obj);
|
|
|
|
|
|
|
|
/* Add the object to the global fini list in the reverse order. */
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
if ((obj->fini != (Elf_Addr)NULL || obj->fini_array != (Elf_Addr)NULL)
|
|
|
|
&& !obj->on_fini_list) {
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_push_head(&list_fini, obj);
|
2009-06-20 14:16:41 +00:00
|
|
|
obj->on_fini_list = true;
|
|
|
|
}
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
|
2001-10-15 18:48:42 +00:00
|
|
|
#ifndef FPTR_TARGET
|
|
|
|
#define FPTR_TARGET(f) ((Elf_Addr) (f))
|
|
|
|
#endif
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
static void
|
2017-03-09 21:05:47 +00:00
|
|
|
free_needed_filtees(Needed_Entry *n, RtldLockState *lockstate)
|
2010-12-25 08:51:20 +00:00
|
|
|
{
|
|
|
|
Needed_Entry *needed, *needed1;
|
|
|
|
|
|
|
|
for (needed = n; needed != NULL; needed = needed->next) {
|
|
|
|
if (needed->obj != NULL) {
|
2017-03-09 21:05:47 +00:00
|
|
|
dlclose_locked(needed->obj, lockstate);
|
2010-12-25 08:51:20 +00:00
|
|
|
needed->obj = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (needed = n; needed != NULL; needed = needed1) {
|
|
|
|
needed1 = needed->next;
|
|
|
|
free(needed);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2017-03-09 21:05:47 +00:00
|
|
|
unload_filtees(Obj_Entry *obj, RtldLockState *lockstate)
|
2010-12-25 08:51:20 +00:00
|
|
|
{
|
|
|
|
|
2017-03-09 21:05:47 +00:00
|
|
|
free_needed_filtees(obj->needed_filtees, lockstate);
|
|
|
|
obj->needed_filtees = NULL;
|
|
|
|
free_needed_filtees(obj->needed_aux_filtees, lockstate);
|
|
|
|
obj->needed_aux_filtees = NULL;
|
|
|
|
obj->filtees_loaded = false;
|
2010-12-25 08:51:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-04-12 10:32:22 +00:00
|
|
|
load_filtee1(Obj_Entry *obj, Needed_Entry *needed, int flags,
|
|
|
|
RtldLockState *lockstate)
|
2010-12-25 08:51:20 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
for (; needed != NULL; needed = needed->next) {
|
2012-01-07 10:33:01 +00:00
|
|
|
needed->obj = dlopen_object(obj->strtab + needed->name, -1, obj,
|
2010-12-25 08:51:20 +00:00
|
|
|
flags, ((ld_loadfltr || obj->z_loadfltr) ? RTLD_NOW : RTLD_LAZY) |
|
2012-04-12 10:32:22 +00:00
|
|
|
RTLD_LOCAL, lockstate);
|
2010-12-25 08:51:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
load_filtees(Obj_Entry *obj, int flags, RtldLockState *lockstate)
|
|
|
|
{
|
|
|
|
|
|
|
|
lock_restart_for_upgrade(lockstate);
|
|
|
|
if (!obj->filtees_loaded) {
|
2012-04-12 10:32:22 +00:00
|
|
|
load_filtee1(obj, obj->needed_filtees, flags, lockstate);
|
|
|
|
load_filtee1(obj, obj->needed_aux_filtees, flags, lockstate);
|
2010-12-25 08:51:20 +00:00
|
|
|
obj->filtees_loaded = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_needed(Obj_Entry *obj, Needed_Entry *needed, int flags)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj1;
|
|
|
|
|
|
|
|
for (; needed != NULL; needed = needed->next) {
|
2012-01-07 10:33:01 +00:00
|
|
|
obj1 = needed->obj = load_object(obj->strtab + needed->name, -1, obj,
|
2010-12-25 08:51:20 +00:00
|
|
|
flags & ~RTLD_LO_NOLOAD);
|
|
|
|
if (obj1 == NULL && !ld_tracing && (flags & RTLD_LO_FILTEES) == 0)
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Given a shared object, traverse its list of needed objects, and load
|
|
|
|
* each of them. Returns 0 on success. Generates an error message and
|
|
|
|
* returns -1 on failure.
|
|
|
|
*/
|
|
|
|
static int
|
2009-11-26 13:57:20 +00:00
|
|
|
load_needed_objects(Obj_Entry *first, int flags)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2010-12-25 08:51:20 +00:00
|
|
|
Obj_Entry *obj;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2016-08-12 18:29:11 +00:00
|
|
|
for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
|
2016-01-20 07:21:33 +00:00
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
2010-12-25 08:51:20 +00:00
|
|
|
if (process_needed(obj, obj->needed, flags) == -1)
|
|
|
|
return (-1);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
return (0);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1998-09-22 02:09:56 +00:00
|
|
|
static int
|
|
|
|
load_preload_objects(void)
|
|
|
|
{
|
|
|
|
char *p = ld_preload;
|
2013-10-07 08:19:30 +00:00
|
|
|
Obj_Entry *obj;
|
2000-01-22 22:20:05 +00:00
|
|
|
static const char delim[] = " \t:;";
|
1998-09-22 02:09:56 +00:00
|
|
|
|
|
|
|
if (p == NULL)
|
2004-03-05 08:10:19 +00:00
|
|
|
return 0;
|
1998-09-22 02:09:56 +00:00
|
|
|
|
2000-01-22 22:20:05 +00:00
|
|
|
p += strspn(p, delim);
|
1998-09-22 02:09:56 +00:00
|
|
|
while (*p != '\0') {
|
2000-01-22 22:20:05 +00:00
|
|
|
size_t len = strcspn(p, delim);
|
1998-09-22 02:09:56 +00:00
|
|
|
char savech;
|
|
|
|
|
|
|
|
savech = p[len];
|
|
|
|
p[len] = '\0';
|
2013-10-07 08:19:30 +00:00
|
|
|
obj = load_object(p, -1, NULL, 0);
|
|
|
|
if (obj == NULL)
|
1998-09-22 02:09:56 +00:00
|
|
|
return -1; /* XXX - cleanup */
|
2013-10-07 08:19:30 +00:00
|
|
|
obj->z_interpose = true;
|
1998-09-22 02:09:56 +00:00
|
|
|
p[len] = savech;
|
|
|
|
p += len;
|
2000-01-22 22:20:05 +00:00
|
|
|
p += strspn(p, delim);
|
1998-09-22 02:09:56 +00:00
|
|
|
}
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_PRELOAD_FINISHED, NULL, NULL, 0, 0, NULL);
|
1998-09-22 02:09:56 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-01-07 10:33:01 +00:00
|
|
|
static const char *
|
|
|
|
printable_path(const char *path)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (path == NULL ? "<unknown>" : path);
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
2012-01-07 10:33:01 +00:00
|
|
|
* Load a shared object into memory, if it is not already loaded. The
|
|
|
|
* object may be specified by name or by user-supplied file descriptor
|
|
|
|
* fd_u. In the later case, the fd_u descriptor is not closed, but its
|
|
|
|
* duplicate is.
|
1998-03-07 19:24:35 +00:00
|
|
|
*
|
|
|
|
* Returns a pointer to the Obj_Entry for the object. Returns NULL
|
|
|
|
* on failure.
|
|
|
|
*/
|
|
|
|
static Obj_Entry *
|
2012-01-07 10:33:01 +00:00
|
|
|
load_object(const char *name, int fd_u, const Obj_Entry *refobj, int flags)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
2012-01-07 10:33:01 +00:00
|
|
|
int fd;
|
1999-08-30 01:50:41 +00:00
|
|
|
struct stat sb;
|
2005-12-18 19:43:33 +00:00
|
|
|
char *path;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2014-06-20 17:08:32 +00:00
|
|
|
fd = -1;
|
2012-01-07 10:33:01 +00:00
|
|
|
if (name != NULL) {
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_FOREACH(obj, &obj_list, next) {
|
2016-12-22 17:41:32 +00:00
|
|
|
if (obj->marker || obj->doomed)
|
2016-01-20 07:21:33 +00:00
|
|
|
continue;
|
2012-01-07 10:33:01 +00:00
|
|
|
if (object_match_name(obj, name))
|
|
|
|
return (obj);
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
|
2014-06-20 17:08:32 +00:00
|
|
|
path = find_library(name, refobj, &fd);
|
2012-01-07 10:33:01 +00:00
|
|
|
if (path == NULL)
|
|
|
|
return (NULL);
|
|
|
|
} else
|
|
|
|
path = NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2014-06-20 17:08:32 +00:00
|
|
|
if (fd >= 0) {
|
|
|
|
/*
|
|
|
|
* search_library_pathfds() opens a fresh file descriptor for the
|
|
|
|
* library, so there is no need to dup().
|
|
|
|
*/
|
|
|
|
} else if (fd_u == -1) {
|
|
|
|
/*
|
|
|
|
* If we didn't find a match by pathname, or the name is not
|
|
|
|
* supplied, open the file and check again by device and inode.
|
|
|
|
* This avoids false mismatches caused by multiple links or ".."
|
|
|
|
* in pathnames.
|
|
|
|
*
|
|
|
|
* To avoid a race, we open the file and use fstat() rather than
|
|
|
|
* using stat().
|
|
|
|
*/
|
2015-04-22 01:54:25 +00:00
|
|
|
if ((fd = open(path, O_RDONLY | O_CLOEXEC | O_VERIFY)) == -1) {
|
2012-01-07 10:33:01 +00:00
|
|
|
_rtld_error("Cannot open \"%s\"", path);
|
|
|
|
free(path);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
} else {
|
2012-11-04 21:42:24 +00:00
|
|
|
fd = fcntl(fd_u, F_DUPFD_CLOEXEC, 0);
|
2012-01-07 10:33:01 +00:00
|
|
|
if (fd == -1) {
|
|
|
|
_rtld_error("Cannot dup fd");
|
|
|
|
free(path);
|
|
|
|
return (NULL);
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
|
|
|
if (fstat(fd, &sb) == -1) {
|
2012-01-07 10:33:01 +00:00
|
|
|
_rtld_error("Cannot fstat \"%s\"", printable_path(path));
|
2005-12-18 19:43:33 +00:00
|
|
|
close(fd);
|
|
|
|
free(path);
|
|
|
|
return NULL;
|
|
|
|
}
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_FOREACH(obj, &obj_list, next) {
|
2016-12-22 17:41:32 +00:00
|
|
|
if (obj->marker || obj->doomed)
|
2016-01-20 07:21:33 +00:00
|
|
|
continue;
|
2011-03-25 18:23:10 +00:00
|
|
|
if (obj->ino == sb.st_ino && obj->dev == sb.st_dev)
|
2005-12-18 19:43:33 +00:00
|
|
|
break;
|
2016-01-20 07:21:33 +00:00
|
|
|
}
|
2012-01-07 10:33:01 +00:00
|
|
|
if (obj != NULL && name != NULL) {
|
2005-12-18 19:43:33 +00:00
|
|
|
object_add_name(obj, name);
|
|
|
|
free(path);
|
|
|
|
close(fd);
|
|
|
|
return obj;
|
|
|
|
}
|
2010-06-03 00:25:43 +00:00
|
|
|
if (flags & RTLD_LO_NOLOAD) {
|
|
|
|
free(path);
|
2011-11-18 09:55:47 +00:00
|
|
|
close(fd);
|
2009-07-17 19:45:42 +00:00
|
|
|
return (NULL);
|
2010-06-03 00:25:43 +00:00
|
|
|
}
|
1999-08-30 01:50:41 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
/* First use of this object, so we must map it in */
|
2009-11-26 13:57:20 +00:00
|
|
|
obj = do_load_object(fd, name, path, &sb, flags);
|
2005-12-18 19:43:33 +00:00
|
|
|
if (obj == NULL)
|
|
|
|
free(path);
|
|
|
|
close(fd);
|
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Obj_Entry *
|
2009-11-26 13:57:20 +00:00
|
|
|
do_load_object(int fd, const char *name, char *path, struct stat *sbp,
|
|
|
|
int flags)
|
2005-12-18 19:43:33 +00:00
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
struct statfs fs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* but first, make sure that environment variables haven't been
|
|
|
|
* used to circumvent the noexec flag on a filesystem.
|
|
|
|
*/
|
|
|
|
if (dangerous_ld_env) {
|
|
|
|
if (fstatfs(fd, &fs) != 0) {
|
2012-01-07 10:33:01 +00:00
|
|
|
_rtld_error("Cannot fstatfs \"%s\"", printable_path(path));
|
|
|
|
return NULL;
|
2005-03-24 10:12:29 +00:00
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
if (fs.f_flags & MNT_NOEXEC) {
|
|
|
|
_rtld_error("Cannot execute objects on %s\n", fs.f_mntonname);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
2012-01-07 10:33:01 +00:00
|
|
|
dbg("loading \"%s\"", printable_path(path));
|
|
|
|
obj = map_object(fd, printable_path(path), sbp);
|
2005-12-18 19:43:33 +00:00
|
|
|
if (obj == NULL)
|
|
|
|
return NULL;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2012-01-07 10:33:01 +00:00
|
|
|
/*
|
|
|
|
* If DT_SONAME is present in the object, digest_dynamic2 already
|
|
|
|
* added it to the object names.
|
|
|
|
*/
|
|
|
|
if (name != NULL)
|
|
|
|
object_add_name(obj, name);
|
2005-12-18 19:43:33 +00:00
|
|
|
obj->path = path;
|
|
|
|
digest_dynamic(obj, 0);
|
2012-04-30 13:31:10 +00:00
|
|
|
dbg("%s valid_hash_sysv %d valid_hash_gnu %d dynsymcount %d", obj->path,
|
|
|
|
obj->valid_hash_sysv, obj->valid_hash_gnu, obj->dynsymcount);
|
2009-11-28 14:29:32 +00:00
|
|
|
if (obj->z_noopen && (flags & (RTLD_LO_DLOPEN | RTLD_LO_TRACE)) ==
|
|
|
|
RTLD_LO_DLOPEN) {
|
2009-11-26 13:57:20 +00:00
|
|
|
dbg("refusing to load non-loadable \"%s\"", obj->path);
|
2009-11-28 14:29:32 +00:00
|
|
|
_rtld_error("Cannot dlopen non-loadable %s", obj->path);
|
2009-11-26 13:57:20 +00:00
|
|
|
munmap(obj->mapbase, obj->mapsize);
|
|
|
|
obj_free(obj);
|
|
|
|
return (NULL);
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2015-01-03 18:09:53 +00:00
|
|
|
obj->dlopened = (flags & RTLD_LO_DLOPEN) != 0;
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_INSERT_TAIL(&obj_list, obj, next);
|
2005-12-18 19:43:33 +00:00
|
|
|
obj_count++;
|
2007-04-03 18:31:20 +00:00
|
|
|
obj_loads++;
|
2005-12-18 19:43:33 +00:00
|
|
|
linkmap_add(obj); /* for GDB & dlinfo() */
|
2011-01-08 17:11:49 +00:00
|
|
|
max_stack_flags |= obj->stack_flags;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
dbg(" %p .. %p: %s", obj->mapbase,
|
|
|
|
obj->mapbase + obj->mapsize - 1, obj->path);
|
|
|
|
if (obj->textrel)
|
|
|
|
dbg(" WARNING: %s has impure text", obj->path);
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
|
|
|
|
obj->path);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Obj_Entry *
|
|
|
|
obj_from_addr(const void *addr)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_FOREACH(obj, &obj_list, next) {
|
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
1998-03-07 19:24:35 +00:00
|
|
|
if (addr < (void *) obj->mapbase)
|
|
|
|
continue;
|
2004-02-25 17:06:16 +00:00
|
|
|
if (addr < (void *) (obj->mapbase + obj->mapsize))
|
1998-03-07 19:24:35 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
static void
|
|
|
|
preinit_main(void)
|
|
|
|
{
|
|
|
|
Elf_Addr *preinit_addr;
|
|
|
|
int index;
|
|
|
|
|
|
|
|
preinit_addr = (Elf_Addr *)obj_main->preinit_array;
|
2012-03-12 11:22:23 +00:00
|
|
|
if (preinit_addr == NULL)
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
for (index = 0; index < obj_main->preinit_array_num; index++) {
|
|
|
|
if (preinit_addr[index] != 0 && preinit_addr[index] != 1) {
|
|
|
|
dbg("calling preinit function for %s at %p", obj_main->path,
|
|
|
|
(void *)preinit_addr[index]);
|
|
|
|
LD_UTRACE(UTRACE_INIT_CALL, obj_main, (void *)preinit_addr[index],
|
|
|
|
0, 0, obj_main->path);
|
|
|
|
call_init_pointer(obj_main, preinit_addr[index]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
/*
|
|
|
|
* Call the finalization functions for each of the objects in "list"
|
2010-12-16 16:56:44 +00:00
|
|
|
* belonging to the DAG of "root" and referenced once. If NULL "root"
|
|
|
|
* is specified, every finalization function will be called regardless
|
|
|
|
* of the reference count and the list elements won't be freed. All of
|
|
|
|
* the objects are expected to have non-NULL fini functions.
|
2000-07-26 04:24:40 +00:00
|
|
|
*/
|
1999-08-30 01:48:19 +00:00
|
|
|
static void
|
2010-12-25 08:51:20 +00:00
|
|
|
objlist_call_fini(Objlist *list, Obj_Entry *root, RtldLockState *lockstate)
|
1999-08-30 01:48:19 +00:00
|
|
|
{
|
2010-12-16 16:56:44 +00:00
|
|
|
Objlist_Entry *elm;
|
2001-01-05 04:36:17 +00:00
|
|
|
char *saved_msg;
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
Elf_Addr *fini_addr;
|
|
|
|
int index;
|
1999-08-30 01:48:19 +00:00
|
|
|
|
2010-12-16 16:56:44 +00:00
|
|
|
assert(root == NULL || root->refcount == 1);
|
|
|
|
|
2016-12-22 17:41:32 +00:00
|
|
|
if (root != NULL)
|
|
|
|
root->doomed = true;
|
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
/*
|
|
|
|
* Preserve the current error message since a fini function might
|
|
|
|
* call into the dynamic linker and overwrite it.
|
|
|
|
*/
|
|
|
|
saved_msg = errmsg_save();
|
2010-12-16 16:56:44 +00:00
|
|
|
do {
|
|
|
|
STAILQ_FOREACH(elm, list, link) {
|
|
|
|
if (root != NULL && (elm->obj->refcount != 1 ||
|
|
|
|
objlist_find(&root->dagmembers, elm->obj) == NULL))
|
|
|
|
continue;
|
2009-06-20 14:16:41 +00:00
|
|
|
/* Remove object from fini list to prevent recursive invocation. */
|
|
|
|
STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
|
2016-12-22 17:41:32 +00:00
|
|
|
/* Ensure that new references cannot be acquired. */
|
|
|
|
elm->obj->doomed = true;
|
|
|
|
|
2016-12-22 17:37:39 +00:00
|
|
|
hold_object(elm->obj);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, lockstate);
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
/*
|
|
|
|
* It is legal to have both DT_FINI and DT_FINI_ARRAY defined.
|
|
|
|
* When this happens, DT_FINI_ARRAY is processed first.
|
|
|
|
*/
|
|
|
|
fini_addr = (Elf_Addr *)elm->obj->fini_array;
|
|
|
|
if (fini_addr != NULL && elm->obj->fini_array_num > 0) {
|
|
|
|
for (index = elm->obj->fini_array_num - 1; index >= 0;
|
|
|
|
index--) {
|
|
|
|
if (fini_addr[index] != 0 && fini_addr[index] != 1) {
|
|
|
|
dbg("calling fini function for %s at %p",
|
|
|
|
elm->obj->path, (void *)fini_addr[index]);
|
|
|
|
LD_UTRACE(UTRACE_FINI_CALL, elm->obj,
|
|
|
|
(void *)fini_addr[index], 0, 0, elm->obj->path);
|
|
|
|
call_initfini_pointer(elm->obj, fini_addr[index]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (elm->obj->fini != (Elf_Addr)NULL) {
|
|
|
|
dbg("calling fini function for %s at %p", elm->obj->path,
|
|
|
|
(void *)elm->obj->fini);
|
|
|
|
LD_UTRACE(UTRACE_FINI_CALL, elm->obj, (void *)elm->obj->fini,
|
|
|
|
0, 0, elm->obj->path);
|
|
|
|
call_initfini_pointer(elm->obj, elm->obj->fini);
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, lockstate);
|
2016-12-22 17:37:39 +00:00
|
|
|
unhold_object(elm->obj);
|
2009-06-20 14:16:41 +00:00
|
|
|
/* No need to free anything if process is going down. */
|
2010-12-16 16:56:44 +00:00
|
|
|
if (root != NULL)
|
2009-06-20 14:16:41 +00:00
|
|
|
free(elm);
|
2010-12-16 16:56:44 +00:00
|
|
|
/*
|
|
|
|
* We must restart the list traversal after every fini call
|
|
|
|
* because a dlclose() call from the fini function or from
|
|
|
|
* another thread might have modified the reference counts.
|
|
|
|
*/
|
|
|
|
break;
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
2010-12-16 16:56:44 +00:00
|
|
|
} while (elm != NULL);
|
2001-01-05 04:36:17 +00:00
|
|
|
errmsg_restore(saved_msg);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call the initialization functions for each of the objects in
|
|
|
|
* "list". All of the objects are expected to have non-NULL init
|
|
|
|
* functions.
|
|
|
|
*/
|
|
|
|
static void
|
2010-12-25 08:51:20 +00:00
|
|
|
objlist_call_init(Objlist *list, RtldLockState *lockstate)
|
2000-07-26 04:24:40 +00:00
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
2009-06-20 14:16:41 +00:00
|
|
|
Obj_Entry *obj;
|
2001-01-05 04:36:17 +00:00
|
|
|
char *saved_msg;
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
Elf_Addr *init_addr;
|
|
|
|
int index;
|
2000-07-26 04:24:40 +00:00
|
|
|
|
2009-06-20 14:16:41 +00:00
|
|
|
/*
|
|
|
|
* Clean init_scanned flag so that objects can be rechecked and
|
|
|
|
* possibly initialized earlier if any of vectors called below
|
|
|
|
* cause the change by using dlopen.
|
|
|
|
*/
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_FOREACH(obj, &obj_list, next) {
|
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
2009-06-20 14:16:41 +00:00
|
|
|
obj->init_scanned = false;
|
2016-01-20 07:21:33 +00:00
|
|
|
}
|
2009-06-20 14:16:41 +00:00
|
|
|
|
2001-01-05 04:36:17 +00:00
|
|
|
/*
|
|
|
|
* Preserve the current error message since an init function might
|
|
|
|
* call into the dynamic linker and overwrite it.
|
|
|
|
*/
|
|
|
|
saved_msg = errmsg_save();
|
2000-07-26 04:24:40 +00:00
|
|
|
STAILQ_FOREACH(elm, list, link) {
|
2009-06-20 14:16:41 +00:00
|
|
|
if (elm->obj->init_done) /* Initialized early. */
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* Race: other thread might try to use this object before current
|
2016-12-16 14:23:08 +00:00
|
|
|
* one completes the initialization. Not much can be done here
|
2009-06-20 14:16:41 +00:00
|
|
|
* without better locking.
|
|
|
|
*/
|
|
|
|
elm->obj->init_done = true;
|
2016-12-22 17:37:39 +00:00
|
|
|
hold_object(elm->obj);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, lockstate);
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It is legal to have both DT_INIT and DT_INIT_ARRAY defined.
|
|
|
|
* When this happens, DT_INIT is processed first.
|
|
|
|
*/
|
|
|
|
if (elm->obj->init != (Elf_Addr)NULL) {
|
|
|
|
dbg("calling init function for %s at %p", elm->obj->path,
|
|
|
|
(void *)elm->obj->init);
|
|
|
|
LD_UTRACE(UTRACE_INIT_CALL, elm->obj, (void *)elm->obj->init,
|
|
|
|
0, 0, elm->obj->path);
|
|
|
|
call_initfini_pointer(elm->obj, elm->obj->init);
|
|
|
|
}
|
|
|
|
init_addr = (Elf_Addr *)elm->obj->init_array;
|
2012-03-12 11:22:23 +00:00
|
|
|
if (init_addr != NULL) {
|
Add support for preinit, init and fini arrays. Some ABIs, in
particular on ARM, do require working init arrays.
Traditional FreeBSD crt1 calls _init and _fini of the binary, instead
of allowing runtime linker to arrange the calls. This was probably
done to have the same crt code serve both statically and dynamically
linked binaries. Since ABI mandates that first is called preinit
array functions, then init, and then init array functions, the init
have to be called from rtld now.
To provide binary compatibility to old FreeBSD crt1, which calls _init
itself, rtld only calls intializers and finalizers for main binary if
binary has a note indicating that new crt was used for linking. Add
parsing of ELF notes to rtld, and cache p_osrel value since we parsed
it anyway.
The patch is inspired by init_array support for DragonflyBSD, written
by John Marino.
Reviewed by: kan
Tested by: andrew (arm, previous version), flo (sparc64, previous version)
MFC after: 3 weeks
2012-03-11 20:03:09 +00:00
|
|
|
for (index = 0; index < elm->obj->init_array_num; index++) {
|
|
|
|
if (init_addr[index] != 0 && init_addr[index] != 1) {
|
|
|
|
dbg("calling init function for %s at %p", elm->obj->path,
|
|
|
|
(void *)init_addr[index]);
|
|
|
|
LD_UTRACE(UTRACE_INIT_CALL, elm->obj,
|
|
|
|
(void *)init_addr[index], 0, 0, elm->obj->path);
|
|
|
|
call_init_pointer(elm->obj, init_addr[index]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, lockstate);
|
2016-12-22 17:37:39 +00:00
|
|
|
unhold_object(elm->obj);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
2001-01-05 04:36:17 +00:00
|
|
|
errmsg_restore(saved_msg);
|
2000-07-26 04:24:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
objlist_clear(Objlist *list)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
while (!STAILQ_EMPTY(list)) {
|
|
|
|
elm = STAILQ_FIRST(list);
|
|
|
|
STAILQ_REMOVE_HEAD(list, link);
|
|
|
|
free(elm);
|
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static Objlist_Entry *
|
|
|
|
objlist_find(Objlist *list, const Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(elm, list, link)
|
|
|
|
if (elm->obj == obj)
|
|
|
|
return elm;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
static void
|
|
|
|
objlist_init(Objlist *list)
|
|
|
|
{
|
|
|
|
STAILQ_INIT(list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
objlist_push_head(Objlist *list, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
elm = NEW(Objlist_Entry);
|
|
|
|
elm->obj = obj;
|
|
|
|
STAILQ_INSERT_HEAD(list, elm, link);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
objlist_push_tail(Objlist *list, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
elm = NEW(Objlist_Entry);
|
|
|
|
elm->obj = obj;
|
|
|
|
STAILQ_INSERT_TAIL(list, elm, link);
|
|
|
|
}
|
|
|
|
|
2013-10-07 08:19:30 +00:00
|
|
|
static void
|
|
|
|
objlist_put_after(Objlist *list, Obj_Entry *listobj, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm, *listelm;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(listelm, list, link) {
|
|
|
|
if (listelm->obj == listobj)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
elm = NEW(Objlist_Entry);
|
|
|
|
elm->obj = obj;
|
|
|
|
if (listelm != NULL)
|
|
|
|
STAILQ_INSERT_AFTER(list, listelm, elm, link);
|
|
|
|
else
|
|
|
|
STAILQ_INSERT_TAIL(list, elm, link);
|
|
|
|
}
|
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
static void
|
|
|
|
objlist_remove(Objlist *list, Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
if ((elm = objlist_find(list, obj)) != NULL) {
|
2000-05-26 02:09:24 +00:00
|
|
|
STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link);
|
1999-08-30 01:48:19 +00:00
|
|
|
free(elm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
2012-06-27 20:24:25 +00:00
|
|
|
* Relocate dag rooted in the specified object.
|
|
|
|
* Returns 0 on success, or -1 on failure.
|
1998-03-07 19:24:35 +00:00
|
|
|
*/
|
2012-06-27 20:24:25 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
static int
|
2012-06-27 20:24:25 +00:00
|
|
|
relocate_object_dag(Obj_Entry *root, bool bind_now, Obj_Entry *rtldobj,
|
|
|
|
int flags, RtldLockState *lockstate)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link) {
|
|
|
|
error = relocate_object(elm->obj, bind_now, rtldobj, flags,
|
|
|
|
lockstate);
|
|
|
|
if (error == -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2016-03-02 16:36:24 +00:00
|
|
|
/*
|
|
|
|
* Prepare for, or clean after, relocating an object marked with
|
|
|
|
* DT_TEXTREL or DF_TEXTREL. Before relocating, all read-only
|
|
|
|
* segments are remapped read-write. After relocations are done, the
|
|
|
|
* segment's permissions are returned back to the modes specified in
|
|
|
|
* the phdrs. If any relocation happened, or always for wired
|
|
|
|
* program, COW is triggered.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
reloc_textrel_prot(Obj_Entry *obj, bool before)
|
|
|
|
{
|
|
|
|
const Elf_Phdr *ph;
|
|
|
|
void *base;
|
|
|
|
size_t l, sz;
|
|
|
|
int prot;
|
|
|
|
|
|
|
|
for (l = obj->phsize / sizeof(*ph), ph = obj->phdr; l > 0;
|
|
|
|
l--, ph++) {
|
|
|
|
if (ph->p_type != PT_LOAD || (ph->p_flags & PF_W) != 0)
|
|
|
|
continue;
|
|
|
|
base = obj->relocbase + trunc_page(ph->p_vaddr);
|
|
|
|
sz = round_page(ph->p_vaddr + ph->p_filesz) -
|
|
|
|
trunc_page(ph->p_vaddr);
|
|
|
|
prot = convert_prot(ph->p_flags) | (before ? PROT_WRITE : 0);
|
|
|
|
if (mprotect(base, sz, prot) == -1) {
|
|
|
|
_rtld_error("%s: Cannot write-%sable text segment: %s",
|
|
|
|
obj->path, before ? "en" : "dis",
|
|
|
|
rtld_strerror(errno));
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2012-06-27 20:24:25 +00:00
|
|
|
/*
|
|
|
|
* Relocate single object.
|
|
|
|
* Returns 0 on success, or -1 on failure.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
relocate_object(Obj_Entry *obj, bool bind_now, Obj_Entry *rtldobj,
|
2012-03-20 13:20:49 +00:00
|
|
|
int flags, RtldLockState *lockstate)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
|
|
|
|
2012-03-20 13:20:49 +00:00
|
|
|
if (obj->relocated)
|
2012-06-27 20:24:25 +00:00
|
|
|
return (0);
|
2012-03-20 13:20:49 +00:00
|
|
|
obj->relocated = true;
|
2002-04-02 02:19:02 +00:00
|
|
|
if (obj != rtldobj)
|
2012-06-27 20:24:25 +00:00
|
|
|
dbg("relocating \"%s\"", obj->path);
|
2012-03-20 13:20:49 +00:00
|
|
|
|
2012-04-30 13:31:10 +00:00
|
|
|
if (obj->symtab == NULL || obj->strtab == NULL ||
|
2012-06-27 20:24:25 +00:00
|
|
|
!(obj->valid_hash_sysv || obj->valid_hash_gnu)) {
|
|
|
|
_rtld_error("%s: Shared object has no run-time symbol table",
|
|
|
|
obj->path);
|
|
|
|
return (-1);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2016-03-02 16:36:24 +00:00
|
|
|
/* There are relocations to the write-protected text segment. */
|
|
|
|
if (obj->textrel && reloc_textrel_prot(obj, true) != 0)
|
|
|
|
return (-1);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2014-08-29 09:29:10 +00:00
|
|
|
/* Process the non-PLT non-IFUNC relocations. */
|
2012-03-20 13:20:49 +00:00
|
|
|
if (reloc_non_plt(obj, rtldobj, flags, lockstate))
|
2012-06-27 20:24:25 +00:00
|
|
|
return (-1);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2016-03-02 16:36:24 +00:00
|
|
|
/* Re-protected the text segment. */
|
|
|
|
if (obj->textrel && reloc_textrel_prot(obj, false) != 0)
|
|
|
|
return (-1);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2011-12-12 11:03:14 +00:00
|
|
|
/* Set the special PLT or GOT entries. */
|
|
|
|
init_pltgot(obj);
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/* Process the PLT relocations. */
|
2000-01-29 01:27:04 +00:00
|
|
|
if (reloc_plt(obj) == -1)
|
2012-06-27 20:24:25 +00:00
|
|
|
return (-1);
|
2000-01-29 01:27:04 +00:00
|
|
|
/* Relocate the jump slots if we are doing immediate binding. */
|
2003-06-18 03:34:29 +00:00
|
|
|
if (obj->bind_now || bind_now)
|
2012-06-27 20:24:25 +00:00
|
|
|
if (reloc_jmpslots(obj, flags, lockstate) == -1)
|
|
|
|
return (-1);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2014-08-29 09:29:10 +00:00
|
|
|
/*
|
|
|
|
* Process the non-PLT IFUNC relocations. The relocations are
|
|
|
|
* processed in two phases, because IFUNC resolvers may
|
|
|
|
* reference other symbols, which must be readily processed
|
|
|
|
* before resolvers are called.
|
|
|
|
*/
|
2014-08-29 10:43:56 +00:00
|
|
|
if (obj->non_plt_gnu_ifunc &&
|
|
|
|
reloc_non_plt(obj, rtldobj, flags | SYMLOOK_IFUNC, lockstate))
|
2014-08-29 09:29:10 +00:00
|
|
|
return (-1);
|
|
|
|
|
2017-01-12 15:54:03 +00:00
|
|
|
if (!obj->mainprog && obj_enforce_relro(obj) == -1)
|
|
|
|
return (-1);
|
2012-01-30 19:52:17 +00:00
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
/*
|
|
|
|
* Set up the magic number and version in the Obj_Entry. These
|
|
|
|
* were checked in the crt1.o from the original ElfKit, so we
|
|
|
|
* set them for backward compatibility.
|
|
|
|
*/
|
|
|
|
obj->magic = RTLD_MAGIC;
|
|
|
|
obj->version = RTLD_VERSION;
|
|
|
|
|
2012-06-27 20:24:25 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Relocate newly-loaded shared objects. The argument is a pointer to
|
|
|
|
* the Obj_Entry for the first such object. All objects from the first
|
|
|
|
* to the end of the list of objects are relocated. Returns 0 on success,
|
|
|
|
* or -1 on failure.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj,
|
|
|
|
int flags, RtldLockState *lockstate)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
int error;
|
|
|
|
|
2016-08-12 18:29:11 +00:00
|
|
|
for (error = 0, obj = first; obj != NULL;
|
|
|
|
obj = TAILQ_NEXT(obj, next)) {
|
2016-01-20 07:21:33 +00:00
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
2012-06-27 20:24:25 +00:00
|
|
|
error = relocate_object(obj, bind_now, rtldobj, flags,
|
|
|
|
lockstate);
|
|
|
|
if (error == -1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return (error);
|
2011-12-14 16:47:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The handling of R_MACHINE_IRELATIVE relocations and jumpslots
|
|
|
|
* referencing STT_GNU_IFUNC symbols is postponed till the other
|
|
|
|
* relocations are done. The indirect functions specified as
|
|
|
|
* ifunc are allowed to call other symbols, so we need to have
|
|
|
|
* objects relocated before asking for resolution from indirects.
|
|
|
|
*
|
|
|
|
* The R_MACHINE_IRELATIVE slots are resolved in greedy fashion,
|
|
|
|
* instead of the usual lazy handling of PLT slots. It is
|
|
|
|
* consistent with how GNU does it.
|
|
|
|
*/
|
|
|
|
static int
|
2012-03-20 13:20:49 +00:00
|
|
|
resolve_object_ifunc(Obj_Entry *obj, bool bind_now, int flags,
|
|
|
|
RtldLockState *lockstate)
|
2011-12-14 16:47:53 +00:00
|
|
|
{
|
2011-12-12 11:03:14 +00:00
|
|
|
if (obj->irelative && reloc_iresolve(obj, lockstate) == -1)
|
2011-12-14 16:47:53 +00:00
|
|
|
return (-1);
|
2011-12-12 11:03:14 +00:00
|
|
|
if ((obj->bind_now || bind_now) && obj->gnu_ifunc &&
|
2012-03-20 13:20:49 +00:00
|
|
|
reloc_gnu_ifunc(obj, flags, lockstate) == -1)
|
2011-12-14 16:47:53 +00:00
|
|
|
return (-1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2012-03-20 13:20:49 +00:00
|
|
|
resolve_objects_ifunc(Obj_Entry *first, bool bind_now, int flags,
|
|
|
|
RtldLockState *lockstate)
|
2011-12-14 16:47:53 +00:00
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
|
2016-08-12 18:29:11 +00:00
|
|
|
for (obj = first; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
|
2016-01-20 07:21:33 +00:00
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
2012-03-20 13:20:49 +00:00
|
|
|
if (resolve_object_ifunc(obj, bind_now, flags, lockstate) == -1)
|
2011-12-14 16:47:53 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2012-03-20 13:20:49 +00:00
|
|
|
initlist_objects_ifunc(Objlist *list, bool bind_now, int flags,
|
|
|
|
RtldLockState *lockstate)
|
2011-12-14 16:47:53 +00:00
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(elm, list, link) {
|
2012-03-20 13:20:49 +00:00
|
|
|
if (resolve_object_ifunc(elm->obj, bind_now, flags,
|
|
|
|
lockstate) == -1)
|
2011-12-14 16:47:53 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
return (0);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cleanup procedure. It will be called (by the atexit mechanism) just
|
|
|
|
* before the process exits.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
rtld_exit(void)
|
|
|
|
{
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2000-01-09 21:13:48 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
dbg("rtld_exit()");
|
2010-12-16 16:56:44 +00:00
|
|
|
objlist_call_fini(&list_fini, NULL, &lockstate);
|
2000-07-26 04:24:40 +00:00
|
|
|
/* No need to remove the items from the list, since we are exiting. */
|
2003-05-31 14:45:11 +00:00
|
|
|
if (!libmap_disable)
|
|
|
|
lm_fini();
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2013-09-21 21:03:52 +00:00
|
|
|
/*
|
|
|
|
* Iterate over a search path, translate each element, and invoke the
|
|
|
|
* callback on the result.
|
|
|
|
*/
|
2003-02-13 17:47:44 +00:00
|
|
|
static void *
|
|
|
|
path_enumerate(const char *path, path_enum_proc callback, void *arg)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2004-03-21 01:21:26 +00:00
|
|
|
const char *trans;
|
2003-02-13 17:47:44 +00:00
|
|
|
if (path == NULL)
|
|
|
|
return (NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
path += strspn(path, ":;");
|
|
|
|
while (*path != '\0') {
|
|
|
|
size_t len;
|
|
|
|
char *res;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
len = strcspn(path, ":;");
|
2004-03-21 01:21:26 +00:00
|
|
|
trans = lm_findn(NULL, path, len);
|
|
|
|
if (trans)
|
|
|
|
res = callback(trans, strlen(trans), arg);
|
|
|
|
else
|
2013-09-21 21:03:52 +00:00
|
|
|
res = callback(path, len, arg);
|
2003-02-13 17:47:44 +00:00
|
|
|
|
|
|
|
if (res != NULL)
|
|
|
|
return (res);
|
|
|
|
|
|
|
|
path += len;
|
|
|
|
path += strspn(path, ":;");
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct try_library_args {
|
|
|
|
const char *name;
|
|
|
|
size_t namelen;
|
|
|
|
char *buffer;
|
|
|
|
size_t buflen;
|
|
|
|
};
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
static void *
|
|
|
|
try_library_path(const char *dir, size_t dirlen, void *param)
|
|
|
|
{
|
|
|
|
struct try_library_args *arg;
|
|
|
|
|
|
|
|
arg = param;
|
|
|
|
if (*dir == '/' || trust) {
|
|
|
|
char *pathname;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
if (dirlen + 1 + arg->namelen + 1 > arg->buflen)
|
|
|
|
return (NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
pathname = arg->buffer;
|
|
|
|
strncpy(pathname, dir, dirlen);
|
|
|
|
pathname[dirlen] = '/';
|
|
|
|
strcpy(pathname + dirlen + 1, arg->name);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
dbg(" Trying \"%s\"", pathname);
|
|
|
|
if (access(pathname, F_OK) == 0) { /* We found it */
|
|
|
|
pathname = xmalloc(dirlen + 1 + arg->namelen + 1);
|
|
|
|
strcpy(pathname, arg->buffer);
|
|
|
|
return (pathname);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
}
|
2003-02-13 17:47:44 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
static char *
|
|
|
|
search_library_path(const char *name, const char *path)
|
|
|
|
{
|
|
|
|
char *p;
|
|
|
|
struct try_library_args arg;
|
|
|
|
|
|
|
|
if (path == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
arg.name = name;
|
|
|
|
arg.namelen = strlen(name);
|
|
|
|
arg.buffer = xmalloc(PATH_MAX);
|
|
|
|
arg.buflen = PATH_MAX;
|
|
|
|
|
|
|
|
p = path_enumerate(path, try_library_path, &arg);
|
|
|
|
|
|
|
|
free(arg.buffer);
|
|
|
|
|
|
|
|
return (p);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2014-06-20 17:08:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Finds the library with the given name using the directory descriptors
|
|
|
|
* listed in the LD_LIBRARY_PATH_FDS environment variable.
|
|
|
|
*
|
|
|
|
* Returns a freshly-opened close-on-exec file descriptor for the library,
|
|
|
|
* or -1 if the library cannot be found.
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
search_library_pathfds(const char *name, const char *path, int *fdp)
|
|
|
|
{
|
|
|
|
char *envcopy, *fdstr, *found, *last_token;
|
|
|
|
size_t len;
|
|
|
|
int dirfd, fd;
|
|
|
|
|
2014-08-27 00:48:09 +00:00
|
|
|
dbg("%s('%s', '%s', fdp)", __func__, name, path);
|
2014-06-20 17:08:32 +00:00
|
|
|
|
|
|
|
/* Don't load from user-specified libdirs into setuid binaries. */
|
|
|
|
if (!trust)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
/* We can't do anything if LD_LIBRARY_PATH_FDS isn't set. */
|
|
|
|
if (path == NULL)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
/* LD_LIBRARY_PATH_FDS only works with relative paths. */
|
|
|
|
if (name[0] == '/') {
|
|
|
|
dbg("Absolute path (%s) passed to %s", name, __func__);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use strtok_r() to walk the FD:FD:FD list. This requires a local
|
|
|
|
* copy of the path, as strtok_r rewrites separator tokens
|
|
|
|
* with '\0'.
|
|
|
|
*/
|
|
|
|
found = NULL;
|
|
|
|
envcopy = xstrdup(path);
|
|
|
|
for (fdstr = strtok_r(envcopy, ":", &last_token); fdstr != NULL;
|
|
|
|
fdstr = strtok_r(NULL, ":", &last_token)) {
|
2017-05-16 13:27:44 +00:00
|
|
|
dirfd = parse_integer(fdstr);
|
|
|
|
if (dirfd < 0) {
|
|
|
|
_rtld_error("failed to parse directory FD: '%s'",
|
|
|
|
fdstr);
|
2014-06-20 17:08:32 +00:00
|
|
|
break;
|
2017-05-16 13:27:44 +00:00
|
|
|
}
|
2015-04-22 01:54:25 +00:00
|
|
|
fd = __sys_openat(dirfd, name, O_RDONLY | O_CLOEXEC | O_VERIFY);
|
2014-06-20 17:08:32 +00:00
|
|
|
if (fd >= 0) {
|
|
|
|
*fdp = fd;
|
|
|
|
len = strlen(fdstr) + strlen(name) + 3;
|
|
|
|
found = xmalloc(len);
|
|
|
|
if (rtld_snprintf(found, len, "#%d/%s", dirfd, name) < 0) {
|
|
|
|
_rtld_error("error generating '%d/%s'",
|
|
|
|
dirfd, name);
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2014-06-20 17:08:32 +00:00
|
|
|
}
|
|
|
|
dbg("open('%s') => %d", found, fd);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(envcopy);
|
|
|
|
|
|
|
|
return (found);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
int
|
|
|
|
dlclose(void *handle)
|
2017-03-09 21:05:47 +00:00
|
|
|
{
|
|
|
|
RtldLockState lockstate;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
|
|
|
error = dlclose_locked(handle, &lockstate);
|
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
dlclose_locked(void *handle, RtldLockState *lockstate)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
1999-12-27 04:44:04 +00:00
|
|
|
Obj_Entry *root;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
root = dlcheck(handle);
|
2017-03-09 21:05:47 +00:00
|
|
|
if (root == NULL)
|
1998-03-07 19:24:35 +00:00
|
|
|
return -1;
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_DLCLOSE_START, handle, NULL, 0, root->dl_refcount,
|
|
|
|
root->path);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2000-01-09 21:13:48 +00:00
|
|
|
/* Unreference the object and its dependencies. */
|
1998-03-07 19:24:35 +00:00
|
|
|
root->dl_refcount--;
|
2003-05-08 01:31:36 +00:00
|
|
|
|
2010-12-16 16:56:44 +00:00
|
|
|
if (root->refcount == 1) {
|
2000-01-09 21:13:48 +00:00
|
|
|
/*
|
2010-12-16 16:56:44 +00:00
|
|
|
* The object will be no longer referenced, so we must unload it.
|
2008-09-03 01:05:32 +00:00
|
|
|
* First, call the fini functions.
|
2000-01-09 21:13:48 +00:00
|
|
|
*/
|
2017-03-09 21:05:47 +00:00
|
|
|
objlist_call_fini(&list_fini, root, lockstate);
|
2010-12-16 16:56:44 +00:00
|
|
|
|
|
|
|
unref_dag(root);
|
2000-01-09 21:13:48 +00:00
|
|
|
|
|
|
|
/* Finish cleaning up the newly-unreferenced objects. */
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_DELETE,&root->linkmap);
|
2017-03-09 21:05:47 +00:00
|
|
|
unload_object(root, lockstate);
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_CONSISTENT,NULL);
|
2010-12-16 16:56:44 +00:00
|
|
|
} else
|
|
|
|
unref_dag(root);
|
|
|
|
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_DLCLOSE_STOP, handle, NULL, 0, 0, NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-03-24 15:59:51 +00:00
|
|
|
char *
|
1998-03-07 19:24:35 +00:00
|
|
|
dlerror(void)
|
|
|
|
{
|
|
|
|
char *msg = error_message;
|
|
|
|
error_message = NULL;
|
|
|
|
return msg;
|
|
|
|
}
|
|
|
|
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
/*
|
|
|
|
* This function is deprecated and has no effect.
|
|
|
|
*/
|
1999-12-27 04:44:04 +00:00
|
|
|
void
|
|
|
|
dllockinit(void *context,
|
|
|
|
void *(*lock_create)(void *context),
|
|
|
|
void (*rlock_acquire)(void *lock),
|
|
|
|
void (*wlock_acquire)(void *lock),
|
|
|
|
void (*lock_release)(void *lock),
|
|
|
|
void (*lock_destroy)(void *lock),
|
|
|
|
void (*context_destroy)(void *context))
|
|
|
|
{
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
static void *cur_context;
|
|
|
|
static void (*cur_context_destroy)(void *);
|
|
|
|
|
|
|
|
/* Just destroy the context from the previous call, if necessary. */
|
|
|
|
if (cur_context_destroy != NULL)
|
|
|
|
cur_context_destroy(cur_context);
|
|
|
|
cur_context = context;
|
|
|
|
cur_context_destroy = context_destroy;
|
2000-01-29 01:27:04 +00:00
|
|
|
}
|
|
|
|
|
1998-03-07 19:24:35 +00:00
|
|
|
void *
|
|
|
|
dlopen(const char *name, int mode)
|
2012-01-07 10:33:01 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (rtld_dlopen(name, -1, mode));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
fdlopen(int fd, int mode)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (rtld_dlopen(NULL, fd, mode));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
rtld_dlopen(const char *name, int fd, int mode)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2011-01-10 16:09:35 +00:00
|
|
|
RtldLockState lockstate;
|
2010-12-25 08:51:20 +00:00
|
|
|
int lo_flags;
|
2002-02-04 10:33:48 +00:00
|
|
|
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_DLOPEN_START, NULL, NULL, 0, mode, name);
|
2002-02-04 10:33:48 +00:00
|
|
|
ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1";
|
2011-01-10 16:09:35 +00:00
|
|
|
if (ld_tracing != NULL) {
|
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2011-02-09 09:20:27 +00:00
|
|
|
if (sigsetjmp(lockstate.env, 0) != 0)
|
2011-01-10 16:09:35 +00:00
|
|
|
lock_upgrade(rtld_bind_lock, &lockstate);
|
|
|
|
environ = (char **)*get_program_var_addr("environ", &lockstate);
|
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
|
|
|
}
|
2009-11-26 13:57:20 +00:00
|
|
|
lo_flags = RTLD_LO_DLOPEN;
|
2010-12-25 08:51:20 +00:00
|
|
|
if (mode & RTLD_NODELETE)
|
|
|
|
lo_flags |= RTLD_LO_NODELETE;
|
2009-11-26 13:57:20 +00:00
|
|
|
if (mode & RTLD_NOLOAD)
|
|
|
|
lo_flags |= RTLD_LO_NOLOAD;
|
2009-11-28 14:29:32 +00:00
|
|
|
if (ld_tracing != NULL)
|
|
|
|
lo_flags |= RTLD_LO_TRACE;
|
2000-01-09 21:13:48 +00:00
|
|
|
|
2012-01-07 10:33:01 +00:00
|
|
|
return (dlopen_object(name, fd, obj_main, lo_flags,
|
2012-04-12 10:32:22 +00:00
|
|
|
mode & (RTLD_MODEMASK | RTLD_GLOBAL), NULL));
|
2010-12-25 08:51:20 +00:00
|
|
|
}
|
|
|
|
|
2011-12-14 16:47:53 +00:00
|
|
|
static void
|
2017-03-09 21:05:47 +00:00
|
|
|
dlopen_cleanup(Obj_Entry *obj, RtldLockState *lockstate)
|
2011-12-14 16:47:53 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
obj->dl_refcount--;
|
|
|
|
unref_dag(obj);
|
|
|
|
if (obj->refcount == 0)
|
2017-03-09 21:05:47 +00:00
|
|
|
unload_object(obj, lockstate);
|
2011-12-14 16:47:53 +00:00
|
|
|
}
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
static Obj_Entry *
|
2012-01-07 10:33:01 +00:00
|
|
|
dlopen_object(const char *name, int fd, Obj_Entry *refobj, int lo_flags,
|
2012-04-12 10:32:22 +00:00
|
|
|
int mode, RtldLockState *lockstate)
|
2010-12-25 08:51:20 +00:00
|
|
|
{
|
2016-01-20 07:21:33 +00:00
|
|
|
Obj_Entry *old_obj_tail;
|
2010-12-25 08:51:20 +00:00
|
|
|
Obj_Entry *obj;
|
|
|
|
Objlist initlist;
|
2012-04-12 10:32:22 +00:00
|
|
|
RtldLockState mlockstate;
|
2010-12-25 08:51:20 +00:00
|
|
|
int result;
|
|
|
|
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_init(&initlist);
|
1998-04-30 07:48:02 +00:00
|
|
|
|
2012-04-12 10:32:22 +00:00
|
|
|
if (lockstate == NULL && !(lo_flags & RTLD_LO_EARLY)) {
|
|
|
|
wlock_acquire(rtld_bind_lock, &mlockstate);
|
|
|
|
lockstate = &mlockstate;
|
|
|
|
}
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_ADD,NULL);
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
old_obj_tail = globallist_curr(TAILQ_LAST(&obj_list, obj_entry_q));
|
1999-12-27 04:44:04 +00:00
|
|
|
obj = NULL;
|
2012-01-07 10:33:01 +00:00
|
|
|
if (name == NULL && fd == -1) {
|
1998-03-07 19:24:35 +00:00
|
|
|
obj = obj_main;
|
1999-06-25 04:50:06 +00:00
|
|
|
obj->refcount++;
|
|
|
|
} else {
|
2012-01-07 10:33:01 +00:00
|
|
|
obj = load_object(name, fd, refobj, lo_flags);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
if (obj) {
|
|
|
|
obj->dl_refcount++;
|
1999-08-30 01:48:19 +00:00
|
|
|
if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL)
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_push_tail(&list_global, obj);
|
2016-01-20 07:21:33 +00:00
|
|
|
if (globallist_next(old_obj_tail) != NULL) {
|
|
|
|
/* We loaded something new. */
|
|
|
|
assert(globallist_next(old_obj_tail) == obj);
|
2012-03-20 13:20:49 +00:00
|
|
|
result = load_needed_objects(obj,
|
|
|
|
lo_flags & (RTLD_LO_DLOPEN | RTLD_LO_EARLY));
|
2005-12-22 16:42:38 +00:00
|
|
|
init_dag(obj);
|
2010-11-04 09:29:00 +00:00
|
|
|
ref_dag(obj);
|
2005-12-18 19:43:33 +00:00
|
|
|
if (result != -1)
|
|
|
|
result = rtld_verify_versions(&obj->dagmembers);
|
2002-10-19 10:18:29 +00:00
|
|
|
if (result != -1 && ld_tracing)
|
|
|
|
goto trace;
|
2012-06-27 20:24:25 +00:00
|
|
|
if (result == -1 || relocate_object_dag(obj,
|
|
|
|
(mode & RTLD_MODEMASK) == RTLD_NOW, &obj_rtld,
|
2012-03-20 13:20:49 +00:00
|
|
|
(lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
|
2012-06-27 20:24:25 +00:00
|
|
|
lockstate) == -1) {
|
2017-03-09 21:05:47 +00:00
|
|
|
dlopen_cleanup(obj, lockstate);
|
1998-04-30 07:48:02 +00:00
|
|
|
obj = NULL;
|
2012-03-20 13:20:49 +00:00
|
|
|
} else if (lo_flags & RTLD_LO_EARLY) {
|
|
|
|
/*
|
|
|
|
* Do not call the init functions for early loaded
|
|
|
|
* filtees. The image is still not initialized enough
|
|
|
|
* for them to work.
|
|
|
|
*
|
|
|
|
* Our object is found by the global object list and
|
|
|
|
* will be ordered among all init calls done right
|
|
|
|
* before transferring control to main.
|
|
|
|
*/
|
2000-01-09 21:13:48 +00:00
|
|
|
} else {
|
2000-07-26 04:24:40 +00:00
|
|
|
/* Make list of init functions to call. */
|
2016-01-20 23:26:35 +00:00
|
|
|
initlist_add_objects(obj, obj, &initlist);
|
2000-01-09 21:13:48 +00:00
|
|
|
}
|
2012-08-14 13:28:30 +00:00
|
|
|
/*
|
2015-04-15 08:16:34 +00:00
|
|
|
* Process all no_delete or global objects here, given
|
|
|
|
* them own DAGs to prevent their dependencies from being
|
|
|
|
* unloaded. This has to be done after we have loaded all
|
|
|
|
* of the dependencies, so that we do not miss any.
|
2012-08-14 13:28:30 +00:00
|
|
|
*/
|
2012-08-20 23:43:03 +00:00
|
|
|
if (obj != NULL)
|
2015-04-15 08:16:34 +00:00
|
|
|
process_z(obj);
|
2003-05-08 01:31:36 +00:00
|
|
|
} else {
|
If dlopen() is called for the dso that has been already loaded as a
dependency, then the dso never has its DAG initialized. Empty DAG
makes ref_dag() call in dlopen() a nop, and the dso refcount is off
by one.
Initialize the DAG on the first dlopen() call, using a boolean flag
to prevent double initialization.
From the PR (edited):
Assume we have a library liba.so, containing a function a(), and a
library libb.so, containing function b(). liba.so needs functionality
from libb.so, so liba.so links in libb.so.
An application doesn't know about the relation between these libraries,
but needs to call a() and b(). It dlopen()s liba.so and obtains a
pointer to a(), then it dlopen()s libb.so and obtains a pointer to b().
As soon as the application doesn't need a() anymore, it dlclose()s liba.so.
Expected result: the pointer to b() is still valid and can be called
Actual result: the pointer to b() has become invalid, even though the
application did not dlclose() the handle to libb.so. On calling b(), the
application crashes with a segmentation fault.
PR: misc/151861
Based on patch by: jh
Reviewed by: kan
Tested by: Arjan van Leeuwen <freebsd-maintainer opera com>
MFC after: 1 week
2010-11-03 09:23:08 +00:00
|
|
|
/*
|
|
|
|
* Bump the reference counts for objects on this DAG. If
|
|
|
|
* this is the first dlopen() call for the object that was
|
|
|
|
* already loaded as a dependency, initialize the dag
|
|
|
|
* starting at it.
|
|
|
|
*/
|
2010-11-04 09:29:00 +00:00
|
|
|
init_dag(obj);
|
|
|
|
ref_dag(obj);
|
2003-05-08 01:31:36 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
if ((lo_flags & RTLD_LO_TRACE) != 0)
|
2003-05-08 01:31:36 +00:00
|
|
|
goto trace;
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (obj != NULL && ((lo_flags & RTLD_LO_NODELETE) != 0 ||
|
|
|
|
obj->z_nodelete) && !obj->ref_nodel) {
|
2009-03-30 08:47:28 +00:00
|
|
|
dbg("obj %s nodelete", obj->path);
|
|
|
|
ref_dag(obj);
|
|
|
|
obj->z_nodelete = obj->ref_nodel = true;
|
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
1998-04-30 07:48:02 +00:00
|
|
|
|
2007-01-09 17:50:05 +00:00
|
|
|
LD_UTRACE(UTRACE_DLOPEN_STOP, obj, NULL, 0, obj ? obj->dl_refcount : 0,
|
|
|
|
name);
|
2000-08-26 05:13:29 +00:00
|
|
|
GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL);
|
2000-01-09 21:13:48 +00:00
|
|
|
|
2012-04-02 11:50:14 +00:00
|
|
|
if (!(lo_flags & RTLD_LO_EARLY)) {
|
2012-04-12 10:32:22 +00:00
|
|
|
map_stacks_exec(lockstate);
|
2012-04-02 11:50:14 +00:00
|
|
|
}
|
2011-01-08 17:11:49 +00:00
|
|
|
|
2011-12-14 16:47:53 +00:00
|
|
|
if (initlist_objects_ifunc(&initlist, (mode & RTLD_MODEMASK) == RTLD_NOW,
|
2012-03-20 13:20:49 +00:00
|
|
|
(lo_flags & RTLD_LO_EARLY) ? SYMLOOK_EARLY : 0,
|
2012-04-12 10:32:22 +00:00
|
|
|
lockstate) == -1) {
|
2011-12-14 16:47:53 +00:00
|
|
|
objlist_clear(&initlist);
|
2017-03-09 21:05:47 +00:00
|
|
|
dlopen_cleanup(obj, lockstate);
|
2012-04-12 10:32:22 +00:00
|
|
|
if (lockstate == &mlockstate)
|
|
|
|
lock_release(rtld_bind_lock, lockstate);
|
2011-12-14 16:47:53 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2012-03-20 13:20:49 +00:00
|
|
|
if (!(lo_flags & RTLD_LO_EARLY)) {
|
|
|
|
/* Call the init functions. */
|
2012-04-12 10:32:22 +00:00
|
|
|
objlist_call_init(&initlist, lockstate);
|
2012-03-20 13:20:49 +00:00
|
|
|
}
|
2000-07-26 04:24:40 +00:00
|
|
|
objlist_clear(&initlist);
|
2012-04-12 10:32:22 +00:00
|
|
|
if (lockstate == &mlockstate)
|
|
|
|
lock_release(rtld_bind_lock, lockstate);
|
1998-03-07 19:24:35 +00:00
|
|
|
return obj;
|
2002-10-19 10:18:29 +00:00
|
|
|
trace:
|
|
|
|
trace_loaded_objects(obj);
|
2012-04-12 10:32:22 +00:00
|
|
|
if (lockstate == &mlockstate)
|
|
|
|
lock_release(rtld_bind_lock, lockstate);
|
2002-10-19 10:18:29 +00:00
|
|
|
exit(0);
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
static void *
|
|
|
|
do_dlsym(void *handle, const char *name, void *retaddr, const Ver_Entry *ve,
|
|
|
|
int flags)
|
1998-03-07 19:24:35 +00:00
|
|
|
{
|
2006-09-19 16:48:08 +00:00
|
|
|
DoneList donelist;
|
|
|
|
const Obj_Entry *obj, *defobj;
|
2010-12-25 08:51:20 +00:00
|
|
|
const Elf_Sym *def;
|
|
|
|
SymLook req;
|
|
|
|
RtldLockState lockstate;
|
2012-03-29 10:32:34 +00:00
|
|
|
tls_index ti;
|
2015-01-25 12:11:50 +00:00
|
|
|
void *sym;
|
2010-12-25 08:51:20 +00:00
|
|
|
int res;
|
1998-03-07 19:24:35 +00:00
|
|
|
|
1998-09-02 01:09:34 +00:00
|
|
|
def = NULL;
|
1999-08-30 01:48:19 +00:00
|
|
|
defobj = NULL;
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init(&req, name);
|
|
|
|
req.ventry = ve;
|
|
|
|
req.flags = flags | SYMLOOK_IN_PLT;
|
|
|
|
req.lockstate = &lockstate;
|
|
|
|
|
2015-01-25 12:11:50 +00:00
|
|
|
LD_UTRACE(UTRACE_DLSYM_START, handle, NULL, 0, 0, name);
|
2010-12-25 08:51:20 +00:00
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2011-02-09 09:20:27 +00:00
|
|
|
if (sigsetjmp(lockstate.env, 0) != 0)
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_upgrade(rtld_bind_lock, &lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
if (handle == NULL || handle == RTLD_NEXT ||
|
|
|
|
handle == RTLD_DEFAULT || handle == RTLD_SELF) {
|
1998-03-07 19:24:35 +00:00
|
|
|
|
|
|
|
if ((obj = obj_from_addr(retaddr)) == NULL) {
|
|
|
|
_rtld_error("Cannot determine caller's shared object");
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2015-01-25 12:11:50 +00:00
|
|
|
LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
1999-08-30 01:48:19 +00:00
|
|
|
if (handle == NULL) { /* Just the caller's shared object. */
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_obj(&req, obj);
|
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
}
|
2003-02-13 17:47:44 +00:00
|
|
|
} else if (handle == RTLD_NEXT || /* Objects after caller's */
|
|
|
|
handle == RTLD_SELF) { /* ... caller included */
|
|
|
|
if (handle == RTLD_NEXT)
|
2016-01-20 07:21:33 +00:00
|
|
|
obj = globallist_next(obj);
|
2016-07-15 19:07:00 +00:00
|
|
|
for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
|
2016-01-20 07:21:33 +00:00
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_obj(&req, obj);
|
|
|
|
if (res == 0) {
|
|
|
|
if (def == NULL ||
|
|
|
|
ELF_ST_BIND(req.sym_out->st_info) != STB_WEAK) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
2008-10-10 00:16:32 +00:00
|
|
|
if (ELF_ST_BIND(def->st_info) != STB_WEAK)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Search the dynamic linker itself, and possibly resolve the
|
|
|
|
* symbol from there. This is how the application links to
|
2010-08-24 13:01:14 +00:00
|
|
|
* dynamic linker services such as dlopen.
|
2008-10-10 00:16:32 +00:00
|
|
|
*/
|
|
|
|
if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_obj(&req, &obj_rtld);
|
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
}
|
2000-09-19 04:27:16 +00:00
|
|
|
} else {
|
|
|
|
assert(handle == RTLD_DEFAULT);
|
2010-12-27 00:30:29 +00:00
|
|
|
res = symlook_default(&req, obj);
|
2010-12-25 08:51:20 +00:00
|
|
|
if (res == 0) {
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
def = req.sym_out;
|
|
|
|
}
|
1998-09-02 01:09:34 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
} else {
|
1999-12-27 04:44:04 +00:00
|
|
|
if ((obj = dlcheck(handle)) == NULL) {
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2015-01-25 12:11:50 +00:00
|
|
|
LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
1998-09-02 01:09:34 +00:00
|
|
|
|
2006-09-08 14:59:54 +00:00
|
|
|
donelist_init(&donelist);
|
1998-09-02 01:09:34 +00:00
|
|
|
if (obj->mainprog) {
|
2011-01-28 23:44:57 +00:00
|
|
|
/* Handle obtained by dlopen(NULL, ...) implies global scope. */
|
|
|
|
res = symlook_global(&req, &donelist);
|
2010-12-25 08:51:20 +00:00
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
2011-01-28 23:44:57 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Search the dynamic linker itself, and possibly resolve the
|
|
|
|
* symbol from there. This is how the application links to
|
|
|
|
* dynamic linker services such as dlopen.
|
|
|
|
*/
|
|
|
|
if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) {
|
|
|
|
res = symlook_obj(&req, &obj_rtld);
|
2010-12-25 08:51:20 +00:00
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
}
|
|
|
|
}
|
2011-01-28 23:44:57 +00:00
|
|
|
}
|
|
|
|
else {
|
2006-09-08 14:59:54 +00:00
|
|
|
/* Search the whole DAG rooted at the given object. */
|
2011-01-28 23:44:57 +00:00
|
|
|
res = symlook_list(&req, &obj->dagmembers, &donelist);
|
2010-12-25 08:51:20 +00:00
|
|
|
if (res == 0) {
|
|
|
|
def = req.sym_out;
|
|
|
|
defobj = req.defobj_out;
|
|
|
|
}
|
1998-09-02 01:09:34 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
}
|
|
|
|
|
1999-12-27 04:44:04 +00:00
|
|
|
if (def != NULL) {
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2001-10-15 18:48:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The value required by the caller is derived from the value
|
2014-07-07 00:27:09 +00:00
|
|
|
* of the symbol. this is simply the relocated value of the
|
|
|
|
* symbol.
|
2001-10-15 18:48:42 +00:00
|
|
|
*/
|
|
|
|
if (ELF_ST_TYPE(def->st_info) == STT_FUNC)
|
2015-01-25 12:11:50 +00:00
|
|
|
sym = make_function_pointer(def, defobj);
|
2011-12-12 11:03:14 +00:00
|
|
|
else if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC)
|
2015-01-25 12:11:50 +00:00
|
|
|
sym = rtld_resolve_ifunc(defobj, def);
|
2012-03-29 10:32:34 +00:00
|
|
|
else if (ELF_ST_TYPE(def->st_info) == STT_TLS) {
|
|
|
|
ti.ti_module = defobj->tlsindex;
|
|
|
|
ti.ti_offset = def->st_value;
|
2015-01-25 12:11:50 +00:00
|
|
|
sym = __tls_get_addr(&ti);
|
2012-03-29 10:32:34 +00:00
|
|
|
} else
|
2015-01-25 12:11:50 +00:00
|
|
|
sym = defobj->relocbase + def->st_value;
|
|
|
|
LD_UTRACE(UTRACE_DLSYM_STOP, handle, sym, 0, 0, name);
|
|
|
|
return (sym);
|
1999-12-27 04:44:04 +00:00
|
|
|
}
|
1998-03-07 19:24:35 +00:00
|
|
|
|
2017-07-04 20:19:36 +00:00
|
|
|
_rtld_error("Undefined symbol \"%s%s%s\"", name, ve != NULL ? "@" : "",
|
|
|
|
ve != NULL ? ve->name : "");
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2015-01-25 12:11:50 +00:00
|
|
|
LD_UTRACE(UTRACE_DLSYM_STOP, handle, NULL, 0, 0, name);
|
1998-03-07 19:24:35 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
void *
|
|
|
|
dlsym(void *handle, const char *name)
|
|
|
|
{
|
|
|
|
return do_dlsym(handle, name, __builtin_return_address(0), NULL,
|
|
|
|
SYMLOOK_DLSYM);
|
|
|
|
}
|
|
|
|
|
2009-04-03 19:17:23 +00:00
|
|
|
dlfunc_t
|
|
|
|
dlfunc(void *handle, const char *name)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
void *d;
|
|
|
|
dlfunc_t f;
|
|
|
|
} rv;
|
|
|
|
|
|
|
|
rv.d = do_dlsym(handle, name, __builtin_return_address(0), NULL,
|
|
|
|
SYMLOOK_DLSYM);
|
|
|
|
return (rv.f);
|
|
|
|
}
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
void *
|
|
|
|
dlvsym(void *handle, const char *name, const char *version)
|
|
|
|
{
|
|
|
|
Ver_Entry ventry;
|
|
|
|
|
|
|
|
ventry.name = version;
|
|
|
|
ventry.file = NULL;
|
|
|
|
ventry.hash = elf_hash(version);
|
|
|
|
ventry.flags= 0;
|
|
|
|
return do_dlsym(handle, name, __builtin_return_address(0), &ventry,
|
|
|
|
SYMLOOK_DLSYM);
|
|
|
|
}
|
|
|
|
|
2010-08-23 15:27:03 +00:00
|
|
|
int
|
|
|
|
_rtld_addr_phdr(const void *addr, struct dl_phdr_info *phdr_info)
|
|
|
|
{
|
|
|
|
const Obj_Entry *obj;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2010-08-23 15:27:03 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2010-08-23 15:27:03 +00:00
|
|
|
obj = obj_from_addr(addr);
|
|
|
|
if (obj == NULL) {
|
|
|
|
_rtld_error("No shared object contains address");
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2010-08-23 15:27:03 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
rtld_fill_dl_phdr_info(obj, phdr_info);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2010-08-23 15:27:03 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
1999-03-24 23:37:35 +00:00
|
|
|
int
|
|
|
|
dladdr(const void *addr, Dl_info *info)
|
|
|
|
{
|
|
|
|
const Obj_Entry *obj;
|
|
|
|
const Elf_Sym *def;
|
|
|
|
void *symbol_addr;
|
|
|
|
unsigned long symoffset;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2003-05-29 22:58:26 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
1999-03-24 23:47:29 +00:00
|
|
|
obj = obj_from_addr(addr);
|
1999-03-24 23:37:35 +00:00
|
|
|
if (obj == NULL) {
|
1999-03-24 23:47:29 +00:00
|
|
|
_rtld_error("No shared object contains address");
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1999-03-24 23:37:35 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
info->dli_fname = obj->path;
|
1999-03-24 23:47:29 +00:00
|
|
|
info->dli_fbase = obj->mapbase;
|
1999-03-24 23:37:35 +00:00
|
|
|
info->dli_saddr = (void *)0;
|
|
|
|
info->dli_sname = NULL;
|
|
|
|
|
|
|
|
/*
|
1999-04-07 02:48:43 +00:00
|
|
|
* Walk the symbol list looking for the symbol whose address is
|
1999-03-24 23:37:35 +00:00
|
|
|
* closest to the address sent in.
|
|
|
|
*/
|
2012-04-30 13:31:10 +00:00
|
|
|
for (symoffset = 0; symoffset < obj->dynsymcount; symoffset++) {
|
1999-03-24 23:37:35 +00:00
|
|
|
def = obj->symtab + symoffset;
|
1999-03-24 23:47:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For skip the symbol if st_shndx is either SHN_UNDEF or
|
|
|
|
* SHN_COMMON.
|
|
|
|
*/
|
|
|
|
if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON)
|
|
|
|
continue;
|
|
|
|
|
1999-03-24 23:37:35 +00:00
|
|
|
/*
|
1999-04-07 02:48:43 +00:00
|
|
|
* If the symbol is greater than the specified address, or if it
|
1999-03-24 23:37:35 +00:00
|
|
|
* is further away from addr than the current nearest symbol,
|
|
|
|
* then reject it.
|
|
|
|
*/
|
1999-03-24 23:47:29 +00:00
|
|
|
symbol_addr = obj->relocbase + def->st_value;
|
|
|
|
if (symbol_addr > addr || symbol_addr < info->dli_saddr)
|
1999-03-24 23:37:35 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Update our idea of the nearest symbol. */
|
|
|
|
info->dli_sname = obj->strtab + def->st_name;
|
|
|
|
info->dli_saddr = symbol_addr;
|
|
|
|
|
|
|
|
/* Exact match? */
|
|
|
|
if (info->dli_saddr == addr)
|
|
|
|
break;
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
1999-03-24 23:37:35 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
int
|
|
|
|
dlinfo(void *handle, int request, void *p)
|
|
|
|
{
|
|
|
|
const Obj_Entry *obj;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
|
|
|
int error;
|
2003-02-13 17:47:44 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
|
|
|
|
if (handle == NULL || handle == RTLD_SELF) {
|
|
|
|
void *retaddr;
|
|
|
|
|
|
|
|
retaddr = __builtin_return_address(0); /* __GNUC__ only */
|
|
|
|
if ((obj = obj_from_addr(retaddr)) == NULL)
|
|
|
|
_rtld_error("Cannot determine caller's shared object");
|
|
|
|
} else
|
|
|
|
obj = dlcheck(handle);
|
|
|
|
|
|
|
|
if (obj == NULL) {
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
error = 0;
|
|
|
|
switch (request) {
|
|
|
|
case RTLD_DI_LINKMAP:
|
|
|
|
*((struct link_map const **)p) = &obj->linkmap;
|
|
|
|
break;
|
|
|
|
case RTLD_DI_ORIGIN:
|
|
|
|
error = rtld_dirname(obj->path, p);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case RTLD_DI_SERINFOSIZE:
|
|
|
|
case RTLD_DI_SERINFO:
|
|
|
|
error = do_search_info(obj, request, (struct dl_serinfo *)p);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
_rtld_error("Invalid request %d passed to dlinfo()", request);
|
|
|
|
error = -1;
|
|
|
|
}
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2003-02-13 17:47:44 +00:00
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2010-08-23 15:27:03 +00:00
|
|
|
static void
|
|
|
|
rtld_fill_dl_phdr_info(const Obj_Entry *obj, struct dl_phdr_info *phdr_info)
|
|
|
|
{
|
|
|
|
|
|
|
|
phdr_info->dlpi_addr = (Elf_Addr)obj->relocbase;
|
2014-10-09 20:39:18 +00:00
|
|
|
phdr_info->dlpi_name = obj->path;
|
2010-08-23 15:27:03 +00:00
|
|
|
phdr_info->dlpi_phdr = obj->phdr;
|
|
|
|
phdr_info->dlpi_phnum = obj->phsize / sizeof(obj->phdr[0]);
|
|
|
|
phdr_info->dlpi_tls_modid = obj->tlsindex;
|
|
|
|
phdr_info->dlpi_tls_data = obj->tlsinit;
|
|
|
|
phdr_info->dlpi_adds = obj_loads;
|
|
|
|
phdr_info->dlpi_subs = obj_loads - obj_count;
|
|
|
|
}
|
|
|
|
|
2007-04-03 18:31:20 +00:00
|
|
|
int
|
|
|
|
dl_iterate_phdr(__dl_iterate_hdr_callback callback, void *param)
|
|
|
|
{
|
2016-01-20 07:21:33 +00:00
|
|
|
struct dl_phdr_info phdr_info;
|
|
|
|
Obj_Entry *obj, marker;
|
|
|
|
RtldLockState bind_lockstate, phdr_lockstate;
|
|
|
|
int error;
|
2007-04-03 18:31:20 +00:00
|
|
|
|
2016-12-22 17:44:27 +00:00
|
|
|
init_marker(&marker);
|
2016-01-20 07:21:33 +00:00
|
|
|
error = 0;
|
2013-11-07 20:45:50 +00:00
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
wlock_acquire(rtld_phdr_lock, &phdr_lockstate);
|
2016-12-13 18:05:14 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &bind_lockstate);
|
2016-01-20 07:21:33 +00:00
|
|
|
for (obj = globallist_curr(TAILQ_FIRST(&obj_list)); obj != NULL;) {
|
|
|
|
TAILQ_INSERT_AFTER(&obj_list, obj, &marker, next);
|
|
|
|
rtld_fill_dl_phdr_info(obj, &phdr_info);
|
2016-12-22 17:37:39 +00:00
|
|
|
hold_object(obj);
|
2016-01-20 07:21:33 +00:00
|
|
|
lock_release(rtld_bind_lock, &bind_lockstate);
|
|
|
|
|
|
|
|
error = callback(&phdr_info, sizeof phdr_info, param);
|
|
|
|
|
2016-12-13 18:05:14 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &bind_lockstate);
|
2016-12-22 17:37:39 +00:00
|
|
|
unhold_object(obj);
|
2016-01-20 07:21:33 +00:00
|
|
|
obj = globallist_next(&marker);
|
|
|
|
TAILQ_REMOVE(&obj_list, &marker, next);
|
|
|
|
if (error != 0) {
|
|
|
|
lock_release(rtld_bind_lock, &bind_lockstate);
|
|
|
|
lock_release(rtld_phdr_lock, &phdr_lockstate);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
2007-04-03 18:31:20 +00:00
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
if (error == 0) {
|
|
|
|
rtld_fill_dl_phdr_info(&obj_rtld, &phdr_info);
|
|
|
|
lock_release(rtld_bind_lock, &bind_lockstate);
|
|
|
|
error = callback(&phdr_info, sizeof(phdr_info), param);
|
|
|
|
}
|
2016-01-27 20:20:37 +00:00
|
|
|
lock_release(rtld_phdr_lock, &phdr_lockstate);
|
2016-01-20 07:21:33 +00:00
|
|
|
return (error);
|
2007-04-03 18:31:20 +00:00
|
|
|
}
|
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
static void *
|
|
|
|
fill_search_info(const char *dir, size_t dirlen, void *param)
|
|
|
|
{
|
|
|
|
struct fill_search_info_args *arg;
|
|
|
|
|
|
|
|
arg = param;
|
|
|
|
|
|
|
|
if (arg->request == RTLD_DI_SERINFOSIZE) {
|
|
|
|
arg->serinfo->dls_cnt ++;
|
2012-07-15 10:53:48 +00:00
|
|
|
arg->serinfo->dls_size += sizeof(struct dl_serpath) + dirlen + 1;
|
2003-02-13 17:47:44 +00:00
|
|
|
} else {
|
|
|
|
struct dl_serpath *s_entry;
|
|
|
|
|
|
|
|
s_entry = arg->serpath;
|
|
|
|
s_entry->dls_name = arg->strspace;
|
|
|
|
s_entry->dls_flags = arg->flags;
|
|
|
|
|
|
|
|
strncpy(arg->strspace, dir, dirlen);
|
|
|
|
arg->strspace[dirlen] = '\0';
|
|
|
|
|
|
|
|
arg->strspace += dirlen + 1;
|
|
|
|
arg->serpath++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info)
|
|
|
|
{
|
|
|
|
struct dl_serinfo _info;
|
|
|
|
struct fill_search_info_args args;
|
|
|
|
|
|
|
|
args.request = RTLD_DI_SERINFOSIZE;
|
|
|
|
args.serinfo = &_info;
|
|
|
|
|
|
|
|
_info.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
|
|
|
|
_info.dls_cnt = 0;
|
|
|
|
|
|
|
|
path_enumerate(obj->rpath, fill_search_info, &args);
|
2012-07-15 10:53:48 +00:00
|
|
|
path_enumerate(ld_library_path, fill_search_info, &args);
|
|
|
|
path_enumerate(obj->runpath, fill_search_info, &args);
|
|
|
|
path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args);
|
|
|
|
if (!obj->z_nodeflib)
|
2015-10-31 04:39:55 +00:00
|
|
|
path_enumerate(ld_standard_library_path, fill_search_info, &args);
|
2003-02-13 17:47:44 +00:00
|
|
|
|
|
|
|
|
|
|
|
if (request == RTLD_DI_SERINFOSIZE) {
|
|
|
|
info->dls_size = _info.dls_size;
|
|
|
|
info->dls_cnt = _info.dls_cnt;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) {
|
|
|
|
_rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()");
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
args.request = RTLD_DI_SERINFO;
|
|
|
|
args.serinfo = info;
|
|
|
|
args.serpath = &info->dls_serpath[0];
|
|
|
|
args.strspace = (char *)&info->dls_serpath[_info.dls_cnt];
|
|
|
|
|
2012-07-15 10:53:48 +00:00
|
|
|
args.flags = LA_SER_RUNPATH;
|
|
|
|
if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL)
|
|
|
|
return (-1);
|
|
|
|
|
2003-02-13 17:47:44 +00:00
|
|
|
args.flags = LA_SER_LIBPATH;
|
|
|
|
if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL)
|
|
|
|
return (-1);
|
|
|
|
|
|
|
|
args.flags = LA_SER_RUNPATH;
|
2012-07-15 10:53:48 +00:00
|
|
|
if (path_enumerate(obj->runpath, fill_search_info, &args) != NULL)
|
2003-02-13 17:47:44 +00:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
args.flags = LA_SER_CONFIG;
|
2012-07-15 10:53:48 +00:00
|
|
|
if (path_enumerate(gethints(obj->z_nodeflib), fill_search_info, &args)
|
|
|
|
!= NULL)
|
2003-02-13 17:47:44 +00:00
|
|
|
return (-1);
|
|
|
|
|
|
|
|
args.flags = LA_SER_DEFAULT;
|
2012-07-15 10:53:48 +00:00
|
|
|
if (!obj->z_nodeflib &&
|
2015-10-31 04:39:55 +00:00
|
|
|
path_enumerate(ld_standard_library_path, fill_search_info, &args) != NULL)
|
2003-02-13 17:47:44 +00:00
|
|
|
return (-1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
rtld_dirname(const char *path, char *bname)
|
|
|
|
{
|
|
|
|
const char *endp;
|
|
|
|
|
|
|
|
/* Empty or NULL string gets treated as "." */
|
|
|
|
if (path == NULL || *path == '\0') {
|
|
|
|
bname[0] = '.';
|
2003-02-13 22:47:41 +00:00
|
|
|
bname[1] = '\0';
|
2003-02-13 17:47:44 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Strip trailing slashes */
|
|
|
|
endp = path + strlen(path) - 1;
|
|
|
|
while (endp > path && *endp == '/')
|
|
|
|
endp--;
|
|
|
|
|
|
|
|
/* Find the start of the dir */
|
|
|
|
while (endp > path && *endp != '/')
|
|
|
|
endp--;
|
|
|
|
|
|
|
|
/* Either the dir is "/" or there are no slashes */
|
|
|
|
if (endp == path) {
|
|
|
|
bname[0] = *endp == '/' ? '/' : '.';
|
|
|
|
bname[1] = '\0';
|
|
|
|
return (0);
|
|
|
|
} else {
|
|
|
|
do {
|
|
|
|
endp--;
|
|
|
|
} while (endp > path && *endp == '/');
|
|
|
|
}
|
|
|
|
|
|
|
|
if (endp - path + 2 > PATH_MAX)
|
|
|
|
{
|
|
|
|
_rtld_error("Filename is too long: %s", path);
|
|
|
|
return(-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
strncpy(bname, path, endp - path + 1);
|
|
|
|
bname[endp - path + 1] = '\0';
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2009-03-18 13:40:37 +00:00
|
|
|
static int
|
|
|
|
rtld_dirname_abs(const char *path, char *base)
|
|
|
|
{
|
2015-02-27 19:05:23 +00:00
|
|
|
char *last;
|
2009-03-18 13:40:37 +00:00
|
|
|
|
2015-02-27 19:05:23 +00:00
|
|
|
if (realpath(path, base) == NULL)
|
2009-03-18 13:40:37 +00:00
|
|
|
return (-1);
|
2015-02-27 19:05:23 +00:00
|
|
|
dbg("%s -> %s", path, base);
|
|
|
|
last = strrchr(base, '/');
|
|
|
|
if (last == NULL)
|
2009-03-18 13:40:37 +00:00
|
|
|
return (-1);
|
2015-02-27 19:05:23 +00:00
|
|
|
if (last != base)
|
|
|
|
*last = '\0';
|
2009-03-18 13:40:37 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1998-04-30 07:48:02 +00:00
|
|
|
static void
|
|
|
|
linkmap_add(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
struct link_map *l = &obj->linkmap;
|
|
|
|
struct link_map *prev;
|
|
|
|
|
|
|
|
obj->linkmap.l_name = obj->path;
|
|
|
|
obj->linkmap.l_addr = obj->mapbase;
|
|
|
|
obj->linkmap.l_ld = obj->dynamic;
|
|
|
|
#ifdef __mips__
|
|
|
|
/* GDB needs load offset on MIPS to use the symbols */
|
|
|
|
obj->linkmap.l_offs = obj->relocbase;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (r_debug.r_map == NULL) {
|
|
|
|
r_debug.r_map = l;
|
|
|
|
return;
|
|
|
|
}
|
2003-02-13 17:47:44 +00:00
|
|
|
|
1998-09-16 02:54:08 +00:00
|
|
|
/*
|
|
|
|
* Scan to the end of the list, but not past the entry for the
|
|
|
|
* dynamic linker, which we want to keep at the very end.
|
|
|
|
*/
|
|
|
|
for (prev = r_debug.r_map;
|
|
|
|
prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap;
|
|
|
|
prev = prev->l_next)
|
1998-04-30 07:48:02 +00:00
|
|
|
;
|
1998-09-16 02:54:08 +00:00
|
|
|
|
|
|
|
/* Link in the new entry. */
|
1998-04-30 07:48:02 +00:00
|
|
|
l->l_prev = prev;
|
1998-09-16 02:54:08 +00:00
|
|
|
l->l_next = prev->l_next;
|
|
|
|
if (l->l_next != NULL)
|
|
|
|
l->l_next->l_prev = l;
|
1998-04-30 07:48:02 +00:00
|
|
|
prev->l_next = l;
|
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
static void
|
|
|
|
linkmap_delete(Obj_Entry *obj)
|
1998-04-30 07:48:02 +00:00
|
|
|
{
|
|
|
|
struct link_map *l = &obj->linkmap;
|
|
|
|
|
|
|
|
if (l->l_prev == NULL) {
|
|
|
|
if ((r_debug.r_map = l->l_next) != NULL)
|
|
|
|
l->l_next->l_prev = NULL;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((l->l_prev->l_next = l->l_next) != NULL)
|
|
|
|
l->l_next->l_prev = l->l_prev;
|
|
|
|
}
|
1998-05-01 08:39:27 +00:00
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
/*
|
|
|
|
* Function for the debugger to set a breakpoint on to gain control.
|
2000-08-26 05:13:29 +00:00
|
|
|
*
|
|
|
|
* The two parameters allow the debugger to easily find and determine
|
|
|
|
* what the runtime loader is doing and to whom it is doing it.
|
|
|
|
*
|
|
|
|
* When the loadhook trap is hit (r_debug_state, set at program
|
|
|
|
* initialization), the arguments can be found on the stack:
|
|
|
|
*
|
|
|
|
* +8 struct link_map *m
|
|
|
|
* +4 struct r_debug *rd
|
|
|
|
* +0 RetAddr
|
1998-09-02 02:00:20 +00:00
|
|
|
*/
|
|
|
|
void
|
2000-08-26 05:13:29 +00:00
|
|
|
r_debug_state(struct r_debug* rd, struct link_map *m)
|
1998-09-02 02:00:20 +00:00
|
|
|
{
|
2011-09-03 11:41:00 +00:00
|
|
|
/*
|
|
|
|
* The following is a hack to force the compiler to emit calls to
|
|
|
|
* this function, even when optimizing. If the function is empty,
|
|
|
|
* the compiler is not obliged to emit any code for calls to it,
|
|
|
|
* even when marked __noinline. However, gdb depends on those
|
|
|
|
* calls being made.
|
|
|
|
*/
|
2014-05-07 17:21:22 +00:00
|
|
|
__compiler_membar();
|
1998-09-02 02:00:20 +00:00
|
|
|
}
|
|
|
|
|
2014-05-06 18:07:58 +00:00
|
|
|
/*
|
|
|
|
* A function called after init routines have completed. This can be used to
|
|
|
|
* break before a program's entry routine is called, and can be used when
|
|
|
|
* main is not available in the symbol table.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
_r_debug_postinit(struct link_map *m)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* See r_debug_state(). */
|
2014-05-07 17:21:22 +00:00
|
|
|
__compiler_membar();
|
2014-05-06 18:07:58 +00:00
|
|
|
}
|
|
|
|
|
2016-12-22 17:37:39 +00:00
|
|
|
static void
|
|
|
|
release_object(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (obj->holdcount > 0) {
|
|
|
|
obj->unholdfree = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
munmap(obj->mapbase, obj->mapsize);
|
|
|
|
linkmap_delete(obj);
|
|
|
|
obj_free(obj);
|
|
|
|
}
|
|
|
|
|
1999-04-21 04:06:57 +00:00
|
|
|
/*
|
2002-02-04 10:33:48 +00:00
|
|
|
* Get address of the pointer variable in the main program.
|
2011-01-10 16:09:35 +00:00
|
|
|
* Prefer non-weak symbol over the weak one.
|
1999-04-21 04:06:57 +00:00
|
|
|
*/
|
2002-02-04 10:33:48 +00:00
|
|
|
static const void **
|
2011-01-10 16:09:35 +00:00
|
|
|
get_program_var_addr(const char *name, RtldLockState *lockstate)
|
1999-04-21 04:06:57 +00:00
|
|
|
{
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req;
|
2011-01-10 16:09:35 +00:00
|
|
|
DoneList donelist;
|
1999-04-21 04:06:57 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init(&req, name);
|
2011-01-10 16:09:35 +00:00
|
|
|
req.lockstate = lockstate;
|
|
|
|
donelist_init(&donelist);
|
|
|
|
if (symlook_global(&req, &donelist) != 0)
|
|
|
|
return (NULL);
|
2011-01-25 21:04:55 +00:00
|
|
|
if (ELF_ST_TYPE(req.sym_out->st_info) == STT_FUNC)
|
|
|
|
return ((const void **)make_function_pointer(req.sym_out,
|
|
|
|
req.defobj_out));
|
2011-12-12 11:03:14 +00:00
|
|
|
else if (ELF_ST_TYPE(req.sym_out->st_info) == STT_GNU_IFUNC)
|
|
|
|
return ((const void **)rtld_resolve_ifunc(req.defobj_out, req.sym_out));
|
2011-01-25 21:04:55 +00:00
|
|
|
else
|
|
|
|
return ((const void **)(req.defobj_out->relocbase +
|
|
|
|
req.sym_out->st_value));
|
2002-02-04 10:33:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set a pointer variable in the main program to the given value. This
|
|
|
|
* is used to set key variables such as "environ" before any of the
|
|
|
|
* init functions are called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
set_program_var(const char *name, const void *value)
|
|
|
|
{
|
|
|
|
const void **addr;
|
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
if ((addr = get_program_var_addr(name, NULL)) != NULL) {
|
2002-02-04 10:33:48 +00:00
|
|
|
dbg("\"%s\": *%p <-- %p", name, addr, value);
|
|
|
|
*addr = value;
|
|
|
|
}
|
1999-04-21 04:06:57 +00:00
|
|
|
}
|
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
/*
|
|
|
|
* Search the global objects, including dependencies and main object,
|
|
|
|
* for the given symbol.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
symlook_global(SymLook *req, DoneList *donelist)
|
|
|
|
{
|
|
|
|
SymLook req1;
|
|
|
|
const Objlist_Entry *elm;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
symlook_init_from_req(&req1, req);
|
|
|
|
|
|
|
|
/* Search all objects loaded at program start up. */
|
|
|
|
if (req->defobj_out == NULL ||
|
|
|
|
ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
|
|
|
|
res = symlook_list(&req1, &list_main, donelist);
|
|
|
|
if (res == 0 && (req->defobj_out == NULL ||
|
|
|
|
ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Search all DAGs whose roots are RTLD_GLOBAL objects. */
|
|
|
|
STAILQ_FOREACH(elm, &list_global, link) {
|
|
|
|
if (req->defobj_out != NULL &&
|
|
|
|
ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
|
|
|
|
break;
|
|
|
|
res = symlook_list(&req1, &elm->obj->dagmembers, donelist);
|
|
|
|
if (res == 0 && (req->defobj_out == NULL ||
|
|
|
|
ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (req->sym_out != NULL ? 0 : ESRCH);
|
|
|
|
}
|
|
|
|
|
2000-09-19 04:27:16 +00:00
|
|
|
/*
|
|
|
|
* Given a symbol name in a referencing object, find the corresponding
|
|
|
|
* definition of the symbol. Returns a pointer to the symbol, or NULL if
|
|
|
|
* no definition was found. Returns a pointer to the Obj_Entry of the
|
|
|
|
* defining object via the reference parameter DEFOBJ_OUT.
|
|
|
|
*/
|
2010-12-25 08:51:20 +00:00
|
|
|
static int
|
|
|
|
symlook_default(SymLook *req, const Obj_Entry *refobj)
|
2000-09-19 04:27:16 +00:00
|
|
|
{
|
|
|
|
DoneList donelist;
|
|
|
|
const Objlist_Entry *elm;
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req1;
|
|
|
|
int res;
|
2011-01-10 16:09:35 +00:00
|
|
|
|
2000-09-19 04:27:16 +00:00
|
|
|
donelist_init(&donelist);
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init_from_req(&req1, req);
|
2000-09-19 04:27:16 +00:00
|
|
|
|
2017-02-09 23:33:06 +00:00
|
|
|
/*
|
|
|
|
* Look first in the referencing object if linked symbolically,
|
|
|
|
* and similarly handle protected symbols.
|
|
|
|
*/
|
|
|
|
res = symlook_obj(&req1, refobj);
|
|
|
|
if (res == 0 && (refobj->symbolic ||
|
|
|
|
ELF_ST_VISIBILITY(req1.sym_out->st_other) == STV_PROTECTED)) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
|
|
|
}
|
|
|
|
if (refobj->symbolic || req->defobj_out != NULL)
|
|
|
|
donelist_check(&donelist, refobj);
|
2000-09-19 04:27:16 +00:00
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
symlook_global(req, &donelist);
|
2000-09-19 04:27:16 +00:00
|
|
|
|
2002-10-19 23:03:35 +00:00
|
|
|
/* Search all dlopened DAGs containing the referencing object. */
|
|
|
|
STAILQ_FOREACH(elm, &refobj->dldags, link) {
|
2011-01-10 16:09:35 +00:00
|
|
|
if (req->sym_out != NULL &&
|
|
|
|
ELF_ST_BIND(req->sym_out->st_info) != STB_WEAK)
|
2002-10-19 23:03:35 +00:00
|
|
|
break;
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_list(&req1, &elm->obj->dagmembers, &donelist);
|
2011-01-10 16:09:35 +00:00
|
|
|
if (res == 0 && (req->sym_out == NULL ||
|
|
|
|
ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK)) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
2000-09-19 04:27:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Search the dynamic linker itself, and possibly resolve the
|
|
|
|
* symbol from there. This is how the application links to
|
2010-08-24 13:01:14 +00:00
|
|
|
* dynamic linker services such as dlopen.
|
2000-09-19 04:27:16 +00:00
|
|
|
*/
|
2011-01-10 16:09:35 +00:00
|
|
|
if (req->sym_out == NULL ||
|
|
|
|
ELF_ST_BIND(req->sym_out->st_info) == STB_WEAK) {
|
2010-12-25 08:51:20 +00:00
|
|
|
res = symlook_obj(&req1, &obj_rtld);
|
|
|
|
if (res == 0) {
|
2011-01-10 16:09:35 +00:00
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
assert(req->defobj_out != NULL);
|
2000-09-19 04:27:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-10 16:09:35 +00:00
|
|
|
return (req->sym_out != NULL ? 0 : ESRCH);
|
2000-09-19 04:27:16 +00:00
|
|
|
}
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
static int
|
|
|
|
symlook_list(SymLook *req, const Objlist *objlist, DoneList *dlp)
|
1999-08-30 01:48:19 +00:00
|
|
|
{
|
|
|
|
const Elf_Sym *def;
|
|
|
|
const Obj_Entry *defobj;
|
|
|
|
const Objlist_Entry *elm;
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req1;
|
|
|
|
int res;
|
1999-08-30 01:48:19 +00:00
|
|
|
|
|
|
|
def = NULL;
|
|
|
|
defobj = NULL;
|
|
|
|
STAILQ_FOREACH(elm, objlist, link) {
|
Solve the dynamic linker's problems with multithreaded programs once
and for all (I hope). Packages such as wine, JDK, and linuxthreads
should no longer have any problems with re-entering the dynamic
linker.
This commit replaces the locking used in the dynamic linker with a
new spinlock-based reader/writer lock implementation. Brian
Fundakowski Feldman <green> argued for this from the very beginning,
but it took me a long time to come around to his point of view.
Spinlocks are the only kinds of locks that work with all thread
packages. But on uniprocessor systems they can be inefficient,
because while a contender for the lock is spinning the holder of the
lock cannot make any progress toward releasing it. To alleviate
this disadvantage I have borrowed a trick from Sleepycat's Berkeley
DB implementation. When spinning for a lock, the requester does a
nanosleep() call for 1 usec. each time around the loop. This will
generally yield the CPU to other threads, allowing the lock holder
to finish its business and release the lock. I chose 1 usec. as the
minimum sleep which would with reasonable certainty not be rounded
down to 0.
The formerly machine-independent file "lockdflt.c" has been moved
into the architecture-specific subdirectories by repository copy.
It now contains the machine-dependent spinlocking code. For the
spinlocks I used the very nifty "simple, non-scalable reader-preference
lock" which I found at
<http://www.cs.rochester.edu/u/scott/synchronization/pseudocode/rw.html>
on all CPUs except the 80386 (the specific CPU model, not the
architecture). The 80386 CPU doesn't support the necessary "cmpxchg"
instruction, so on that CPU a simple exclusive test-and-set lock
is used instead. 80386 CPUs are detected at initialization time by
trying to execute "cmpxchg" and catching the resulting SIGILL
signal.
To reduce contention for the locks, I have revamped a couple of
key data structures, permitting all common operations to be done
under non-exclusive (reader) locking. The only operations that
require exclusive locking now are the rare intrusive operations
such as dlopen() and dlclose().
The dllockinit() interface is now deprecated. It still exists,
but only as a do-nothing stub. I plan to remove it as soon as is
reasonably possible. (From the very beginning it was clearly
labeled as experimental and subject to change.) As far as I know,
only the linuxthreads port uses dllockinit(). This interface turned
out to have several problems. As one example, when the dynamic
linker called a client-supplied locking function, that function
sometimes needed lazy binding, causing re-entry into the dynamic
linker and a big looping mess. And in any case, it turned out to be
too burdensome to require threads packages to register themselves
with the dynamic linker.
2000-07-08 04:10:38 +00:00
|
|
|
if (donelist_check(dlp, elm->obj))
|
1999-08-30 01:48:19 +00:00
|
|
|
continue;
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init_from_req(&req1, req);
|
|
|
|
if ((res = symlook_obj(&req1, elm->obj)) == 0) {
|
|
|
|
if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
|
|
|
|
def = req1.sym_out;
|
|
|
|
defobj = req1.defobj_out;
|
1999-08-30 01:48:19 +00:00
|
|
|
if (ELF_ST_BIND(def->st_info) != STB_WEAK)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (def != NULL) {
|
|
|
|
req->sym_out = def;
|
|
|
|
req->defobj_out = defobj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (ESRCH);
|
1999-08-30 01:48:19 +00:00
|
|
|
}
|
|
|
|
|
2006-09-19 12:47:13 +00:00
|
|
|
/*
|
2011-01-28 23:44:57 +00:00
|
|
|
* Search the chain of DAGS cointed to by the given Needed_Entry
|
|
|
|
* for a symbol of the given name. Each DAG is scanned completely
|
|
|
|
* before advancing to the next one. Returns a pointer to the symbol,
|
|
|
|
* or NULL if no definition was found.
|
2006-09-19 12:47:13 +00:00
|
|
|
*/
|
2010-12-25 08:51:20 +00:00
|
|
|
static int
|
|
|
|
symlook_needed(SymLook *req, const Needed_Entry *needed, DoneList *dlp)
|
2006-09-19 12:47:13 +00:00
|
|
|
{
|
2011-01-28 23:44:57 +00:00
|
|
|
const Elf_Sym *def;
|
2006-09-19 12:47:13 +00:00
|
|
|
const Needed_Entry *n;
|
2011-01-28 23:44:57 +00:00
|
|
|
const Obj_Entry *defobj;
|
2010-12-25 08:51:20 +00:00
|
|
|
SymLook req1;
|
|
|
|
int res;
|
2006-09-19 16:48:08 +00:00
|
|
|
|
2011-01-28 23:44:57 +00:00
|
|
|
def = NULL;
|
2006-09-19 12:47:13 +00:00
|
|
|
defobj = NULL;
|
2010-12-25 08:51:20 +00:00
|
|
|
symlook_init_from_req(&req1, req);
|
2006-09-19 12:47:13 +00:00
|
|
|
for (n = needed; n != NULL; n = n->next) {
|
2011-01-28 23:44:57 +00:00
|
|
|
if (n->obj == NULL ||
|
|
|
|
(res = symlook_list(&req1, &n->obj->dagmembers, dlp)) != 0)
|
2006-09-19 16:48:08 +00:00
|
|
|
continue;
|
2011-01-28 23:44:57 +00:00
|
|
|
if (def == NULL || ELF_ST_BIND(req1.sym_out->st_info) != STB_WEAK) {
|
|
|
|
def = req1.sym_out;
|
|
|
|
defobj = req1.defobj_out;
|
|
|
|
if (ELF_ST_BIND(def->st_info) != STB_WEAK)
|
|
|
|
break;
|
2006-09-19 16:48:08 +00:00
|
|
|
}
|
2006-09-19 12:47:13 +00:00
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (def != NULL) {
|
|
|
|
req->sym_out = def;
|
|
|
|
req->defobj_out = defobj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (ESRCH);
|
2006-09-19 12:47:13 +00:00
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
/*
|
|
|
|
* Search the symbol table of a single shared object for a symbol of
|
2005-12-18 19:43:33 +00:00
|
|
|
* the given name and version, if requested. Returns a pointer to the
|
2010-12-25 08:51:20 +00:00
|
|
|
* symbol, or NULL if no definition was found. If the object is
|
|
|
|
* filter, return filtered symbol from filtee.
|
1998-09-02 02:00:20 +00:00
|
|
|
*
|
|
|
|
* The symbol's hash value is passed in for efficiency reasons; that
|
|
|
|
* eliminates many recomputations of the hash value.
|
|
|
|
*/
|
2010-12-25 08:51:20 +00:00
|
|
|
int
|
|
|
|
symlook_obj(SymLook *req, const Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
DoneList donelist;
|
|
|
|
SymLook req1;
|
2012-03-20 13:20:49 +00:00
|
|
|
int flags, res, mres;
|
2010-12-25 08:51:20 +00:00
|
|
|
|
2012-04-30 13:31:10 +00:00
|
|
|
/*
|
2012-05-05 11:26:08 +00:00
|
|
|
* If there is at least one valid hash at this point, we prefer to
|
|
|
|
* use the faster GNU version if available.
|
2012-04-30 13:31:10 +00:00
|
|
|
*/
|
|
|
|
if (obj->valid_hash_gnu)
|
|
|
|
mres = symlook_obj1_gnu(req, obj);
|
2012-05-05 11:26:08 +00:00
|
|
|
else if (obj->valid_hash_sysv)
|
2012-04-30 13:31:10 +00:00
|
|
|
mres = symlook_obj1_sysv(req, obj);
|
2012-05-05 11:26:08 +00:00
|
|
|
else
|
|
|
|
return (EINVAL);
|
2012-04-30 13:31:10 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
if (mres == 0) {
|
|
|
|
if (obj->needed_filtees != NULL) {
|
2012-03-20 13:20:49 +00:00
|
|
|
flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
|
|
|
|
load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
|
2010-12-25 08:51:20 +00:00
|
|
|
donelist_init(&donelist);
|
|
|
|
symlook_init_from_req(&req1, req);
|
|
|
|
res = symlook_needed(&req1, obj->needed_filtees, &donelist);
|
|
|
|
if (res == 0) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
}
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
if (obj->needed_aux_filtees != NULL) {
|
2012-03-20 13:20:49 +00:00
|
|
|
flags = (req->flags & SYMLOOK_EARLY) ? RTLD_LO_EARLY : 0;
|
|
|
|
load_filtees(__DECONST(Obj_Entry *, obj), flags, req->lockstate);
|
2010-12-25 08:51:20 +00:00
|
|
|
donelist_init(&donelist);
|
|
|
|
symlook_init_from_req(&req1, req);
|
|
|
|
res = symlook_needed(&req1, obj->needed_aux_filtees, &donelist);
|
|
|
|
if (res == 0) {
|
|
|
|
req->sym_out = req1.sym_out;
|
|
|
|
req->defobj_out = req1.defobj_out;
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (mres);
|
|
|
|
}
|
|
|
|
|
2012-04-30 13:29:21 +00:00
|
|
|
/* Symbol match routine common to both hash functions */
|
|
|
|
static bool
|
|
|
|
matched_symbol(SymLook *req, const Obj_Entry *obj, Sym_Match_Result *result,
|
|
|
|
const unsigned long symnum)
|
1998-09-02 02:00:20 +00:00
|
|
|
{
|
2012-04-30 13:29:21 +00:00
|
|
|
Elf_Versym verndx;
|
2005-12-18 19:43:33 +00:00
|
|
|
const Elf_Sym *symp;
|
|
|
|
const char *strp;
|
|
|
|
|
|
|
|
symp = obj->symtab + symnum;
|
|
|
|
strp = obj->strtab + symp->st_name;
|
|
|
|
|
|
|
|
switch (ELF_ST_TYPE(symp->st_info)) {
|
|
|
|
case STT_FUNC:
|
|
|
|
case STT_NOTYPE:
|
|
|
|
case STT_OBJECT:
|
2012-04-30 13:29:21 +00:00
|
|
|
case STT_COMMON:
|
2011-12-12 11:03:14 +00:00
|
|
|
case STT_GNU_IFUNC:
|
2012-04-30 13:29:21 +00:00
|
|
|
if (symp->st_value == 0)
|
|
|
|
return (false);
|
2005-12-18 19:43:33 +00:00
|
|
|
/* fallthrough */
|
|
|
|
case STT_TLS:
|
2012-04-30 13:29:21 +00:00
|
|
|
if (symp->st_shndx != SHN_UNDEF)
|
|
|
|
break;
|
2008-04-04 20:59:26 +00:00
|
|
|
#ifndef __mips__
|
2012-04-30 13:29:21 +00:00
|
|
|
else if (((req->flags & SYMLOOK_IN_PLT) == 0) &&
|
|
|
|
(ELF_ST_TYPE(symp->st_info) == STT_FUNC))
|
|
|
|
break;
|
2005-12-18 19:43:33 +00:00
|
|
|
/* fallthrough */
|
2008-04-04 20:59:26 +00:00
|
|
|
#endif
|
2005-12-18 19:43:33 +00:00
|
|
|
default:
|
2012-04-30 13:29:21 +00:00
|
|
|
return (false);
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (req->name[0] != strp[0] || strcmp(req->name, strp) != 0)
|
2012-04-30 13:29:21 +00:00
|
|
|
return (false);
|
1998-09-02 02:00:20 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
if (req->ventry == NULL) {
|
2012-04-30 13:29:21 +00:00
|
|
|
if (obj->versyms != NULL) {
|
|
|
|
verndx = VER_NDX(obj->versyms[symnum]);
|
|
|
|
if (verndx > obj->vernum) {
|
|
|
|
_rtld_error(
|
|
|
|
"%s: symbol %s references wrong version %d",
|
|
|
|
obj->path, obj->strtab + symnum, verndx);
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If we are not called from dlsym (i.e. this
|
|
|
|
* is a normal relocation from unversioned
|
|
|
|
* binary), accept the symbol immediately if
|
|
|
|
* it happens to have first version after this
|
|
|
|
* shared object became versioned. Otherwise,
|
|
|
|
* if symbol is versioned and not hidden,
|
|
|
|
* remember it. If it is the only symbol with
|
|
|
|
* this name exported by the shared object, it
|
|
|
|
* will be returned as a match by the calling
|
|
|
|
* function. If symbol is global (verndx < 2)
|
|
|
|
* accept it unconditionally.
|
|
|
|
*/
|
|
|
|
if ((req->flags & SYMLOOK_DLSYM) == 0 &&
|
|
|
|
verndx == VER_NDX_GIVEN) {
|
|
|
|
result->sym_out = symp;
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
else if (verndx >= VER_NDX_GIVEN) {
|
|
|
|
if ((obj->versyms[symnum] & VER_NDX_HIDDEN)
|
|
|
|
== 0) {
|
|
|
|
if (result->vsymp == NULL)
|
|
|
|
result->vsymp = symp;
|
|
|
|
result->vcount++;
|
|
|
|
}
|
|
|
|
return (false);
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
2012-04-30 13:29:21 +00:00
|
|
|
result->sym_out = symp;
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
if (obj->versyms == NULL) {
|
2010-12-25 08:51:20 +00:00
|
|
|
if (object_match_name(obj, req->ventry->name)) {
|
2012-04-30 13:29:21 +00:00
|
|
|
_rtld_error("%s: object %s should provide version %s "
|
|
|
|
"for symbol %s", obj_rtld.path, obj->path,
|
|
|
|
req->ventry->name, obj->strtab + symnum);
|
|
|
|
return (false);
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
2012-04-30 13:29:21 +00:00
|
|
|
} else {
|
2005-12-18 19:43:33 +00:00
|
|
|
verndx = VER_NDX(obj->versyms[symnum]);
|
|
|
|
if (verndx > obj->vernum) {
|
2012-04-30 13:29:21 +00:00
|
|
|
_rtld_error("%s: symbol %s references wrong version %d",
|
|
|
|
obj->path, obj->strtab + symnum, verndx);
|
|
|
|
return (false);
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
2010-12-25 08:51:20 +00:00
|
|
|
if (obj->vertab[verndx].hash != req->ventry->hash ||
|
|
|
|
strcmp(obj->vertab[verndx].name, req->ventry->name)) {
|
2012-04-30 13:29:21 +00:00
|
|
|
/*
|
|
|
|
* Version does not match. Look if this is a
|
|
|
|
* global symbol and if it is not hidden. If
|
|
|
|
* global symbol (verndx < 2) is available,
|
|
|
|
* use it. Do not return symbol if we are
|
|
|
|
* called by dlvsym, because dlvsym looks for
|
|
|
|
* a specific version and default one is not
|
|
|
|
* what dlvsym wants.
|
|
|
|
*/
|
|
|
|
if ((req->flags & SYMLOOK_DLSYM) ||
|
|
|
|
(verndx >= VER_NDX_GIVEN) ||
|
|
|
|
(obj->versyms[symnum] & VER_NDX_HIDDEN))
|
|
|
|
return (false);
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
1999-04-09 00:28:43 +00:00
|
|
|
}
|
2012-04-30 13:29:21 +00:00
|
|
|
result->sym_out = symp;
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
|
2012-04-30 13:31:10 +00:00
|
|
|
/*
|
|
|
|
* Search for symbol using SysV hash function.
|
|
|
|
* obj->buckets is known not to be NULL at this point; the test for this was
|
|
|
|
* performed with the obj->valid_hash_sysv assignment.
|
|
|
|
*/
|
2012-04-30 13:29:21 +00:00
|
|
|
static int
|
2012-04-30 13:31:10 +00:00
|
|
|
symlook_obj1_sysv(SymLook *req, const Obj_Entry *obj)
|
2012-04-30 13:29:21 +00:00
|
|
|
{
|
|
|
|
unsigned long symnum;
|
|
|
|
Sym_Match_Result matchres;
|
|
|
|
|
|
|
|
matchres.sym_out = NULL;
|
|
|
|
matchres.vsymp = NULL;
|
|
|
|
matchres.vcount = 0;
|
|
|
|
|
|
|
|
for (symnum = obj->buckets[req->hash % obj->nbuckets];
|
|
|
|
symnum != STN_UNDEF; symnum = obj->chains[symnum]) {
|
|
|
|
if (symnum >= obj->nchains)
|
|
|
|
return (ESRCH); /* Bad object */
|
|
|
|
|
|
|
|
if (matched_symbol(req, obj, &matchres, symnum)) {
|
|
|
|
req->sym_out = matchres.sym_out;
|
|
|
|
req->defobj_out = obj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (matchres.vcount == 1) {
|
|
|
|
req->sym_out = matchres.vsymp;
|
|
|
|
req->defobj_out = obj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (ESRCH);
|
1998-09-02 02:00:20 +00:00
|
|
|
}
|
|
|
|
|
2012-04-30 13:31:10 +00:00
|
|
|
/* Search for symbol using GNU hash function */
|
|
|
|
static int
|
|
|
|
symlook_obj1_gnu(SymLook *req, const Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
Elf_Addr bloom_word;
|
|
|
|
const Elf32_Word *hashval;
|
|
|
|
Elf32_Word bucket;
|
|
|
|
Sym_Match_Result matchres;
|
|
|
|
unsigned int h1, h2;
|
|
|
|
unsigned long symnum;
|
|
|
|
|
|
|
|
matchres.sym_out = NULL;
|
|
|
|
matchres.vsymp = NULL;
|
|
|
|
matchres.vcount = 0;
|
|
|
|
|
|
|
|
/* Pick right bitmask word from Bloom filter array */
|
|
|
|
bloom_word = obj->bloom_gnu[(req->hash_gnu / __ELF_WORD_SIZE) &
|
|
|
|
obj->maskwords_bm_gnu];
|
|
|
|
|
|
|
|
/* Calculate modulus word size of gnu hash and its derivative */
|
|
|
|
h1 = req->hash_gnu & (__ELF_WORD_SIZE - 1);
|
|
|
|
h2 = ((req->hash_gnu >> obj->shift2_gnu) & (__ELF_WORD_SIZE - 1));
|
|
|
|
|
|
|
|
/* Filter out the "definitely not in set" queries */
|
|
|
|
if (((bloom_word >> h1) & (bloom_word >> h2) & 1) == 0)
|
|
|
|
return (ESRCH);
|
|
|
|
|
|
|
|
/* Locate hash chain and corresponding value element*/
|
|
|
|
bucket = obj->buckets_gnu[req->hash_gnu % obj->nbuckets_gnu];
|
|
|
|
if (bucket == 0)
|
|
|
|
return (ESRCH);
|
|
|
|
hashval = &obj->chain_zero_gnu[bucket];
|
|
|
|
do {
|
|
|
|
if (((*hashval ^ req->hash_gnu) >> 1) == 0) {
|
|
|
|
symnum = hashval - obj->chain_zero_gnu;
|
|
|
|
if (matched_symbol(req, obj, &matchres, symnum)) {
|
|
|
|
req->sym_out = matchres.sym_out;
|
|
|
|
req->defobj_out = obj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while ((*hashval++ & 1) == 0);
|
|
|
|
if (matchres.vcount == 1) {
|
|
|
|
req->sym_out = matchres.vsymp;
|
|
|
|
req->defobj_out = obj;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (ESRCH);
|
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
static void
|
|
|
|
trace_loaded_objects(Obj_Entry *obj)
|
1998-05-01 08:39:27 +00:00
|
|
|
{
|
2002-02-17 07:04:32 +00:00
|
|
|
char *fmt1, *fmt2, *fmt, *main_local, *list_containers;
|
1998-05-01 08:39:27 +00:00
|
|
|
int c;
|
|
|
|
|
2015-12-27 23:04:12 +00:00
|
|
|
if ((main_local = getenv(_LD("TRACE_LOADED_OBJECTS_PROGNAME"))) == NULL)
|
1998-05-01 08:39:27 +00:00
|
|
|
main_local = "";
|
|
|
|
|
2015-12-27 23:04:12 +00:00
|
|
|
if ((fmt1 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT1"))) == NULL)
|
1998-05-01 08:39:27 +00:00
|
|
|
fmt1 = "\t%o => %p (%x)\n";
|
|
|
|
|
2015-12-27 23:04:12 +00:00
|
|
|
if ((fmt2 = getenv(_LD("TRACE_LOADED_OBJECTS_FMT2"))) == NULL)
|
1998-05-01 08:39:27 +00:00
|
|
|
fmt2 = "\t%o (%x)\n";
|
|
|
|
|
2015-12-27 23:04:12 +00:00
|
|
|
list_containers = getenv(_LD("TRACE_LOADED_OBJECTS_ALL"));
|
2002-02-17 07:04:32 +00:00
|
|
|
|
2016-08-12 18:29:11 +00:00
|
|
|
for (; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
|
1998-05-01 08:39:27 +00:00
|
|
|
Needed_Entry *needed;
|
|
|
|
char *name, *path;
|
|
|
|
bool is_lib;
|
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
2002-02-17 07:04:32 +00:00
|
|
|
if (list_containers && obj->needed != NULL)
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_printf("%s:\n", obj->path);
|
1998-05-01 08:39:27 +00:00
|
|
|
for (needed = obj->needed; needed; needed = needed->next) {
|
1998-09-02 02:51:12 +00:00
|
|
|
if (needed->obj != NULL) {
|
2002-02-17 07:04:32 +00:00
|
|
|
if (needed->obj->traced && !list_containers)
|
1998-09-02 02:51:12 +00:00
|
|
|
continue;
|
|
|
|
needed->obj->traced = true;
|
1998-05-01 08:39:27 +00:00
|
|
|
path = needed->obj->path;
|
1998-09-02 02:51:12 +00:00
|
|
|
} else
|
|
|
|
path = "not found";
|
|
|
|
|
|
|
|
name = (char *)obj->strtab + needed->name;
|
|
|
|
is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */
|
1998-05-01 08:39:27 +00:00
|
|
|
|
|
|
|
fmt = is_lib ? fmt1 : fmt2;
|
|
|
|
while ((c = *fmt++) != '\0') {
|
|
|
|
switch (c) {
|
|
|
|
default:
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putchar(c);
|
1998-05-01 08:39:27 +00:00
|
|
|
continue;
|
|
|
|
case '\\':
|
|
|
|
switch (c = *fmt) {
|
|
|
|
case '\0':
|
|
|
|
continue;
|
|
|
|
case 'n':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putchar('\n');
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 't':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putchar('\t');
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case '%':
|
|
|
|
switch (c = *fmt) {
|
|
|
|
case '\0':
|
|
|
|
continue;
|
|
|
|
case '%':
|
|
|
|
default:
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putchar(c);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'A':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putstr(main_local);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'a':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putstr(obj_main->path);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'o':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putstr(name);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
#if 0
|
|
|
|
case 'm':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_printf("%d", sodp->sod_major);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'n':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_printf("%d", sodp->sod_minor);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
case 'p':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_putstr(path);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
case 'x':
|
2011-08-24 20:05:13 +00:00
|
|
|
rtld_printf("%p", needed->obj ? needed->obj->mapbase :
|
|
|
|
0);
|
1998-05-01 08:39:27 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
++fmt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1998-09-02 02:00:20 +00:00
|
|
|
|
1999-08-30 01:48:19 +00:00
|
|
|
/*
|
2000-01-09 21:13:48 +00:00
|
|
|
* Unload a dlopened object and its dependencies from memory and from
|
|
|
|
* our data structures. It is assumed that the DAG rooted in the
|
|
|
|
* object has already been unreferenced, and that the object has a
|
|
|
|
* reference count of 0.
|
1999-08-30 01:48:19 +00:00
|
|
|
*/
|
1999-07-09 16:22:55 +00:00
|
|
|
static void
|
2017-03-09 21:05:47 +00:00
|
|
|
unload_object(Obj_Entry *root, RtldLockState *lockstate)
|
1999-07-09 16:22:55 +00:00
|
|
|
{
|
2016-12-22 17:44:27 +00:00
|
|
|
Obj_Entry marker, *obj, *next;
|
2000-01-09 21:13:48 +00:00
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
assert(root->refcount == 0);
|
2000-01-09 21:13:48 +00:00
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
/*
|
|
|
|
* Pass over the DAG removing unreferenced objects from
|
|
|
|
* appropriate lists.
|
|
|
|
*/
|
|
|
|
unlink_object(root);
|
|
|
|
|
|
|
|
/* Unmap all objects that are no longer referenced. */
|
2016-12-22 17:44:27 +00:00
|
|
|
for (obj = TAILQ_FIRST(&obj_list); obj != NULL; obj = next) {
|
|
|
|
next = TAILQ_NEXT(obj, next);
|
2016-01-20 07:21:33 +00:00
|
|
|
if (obj->marker || obj->refcount != 0)
|
|
|
|
continue;
|
|
|
|
LD_UTRACE(UTRACE_UNLOAD_OBJECT, obj, obj->mapbase,
|
|
|
|
obj->mapsize, 0, obj->path);
|
|
|
|
dbg("unloading \"%s\"", obj->path);
|
2016-12-22 17:37:39 +00:00
|
|
|
/*
|
|
|
|
* Unlink the object now to prevent new references from
|
|
|
|
* being acquired while the bind lock is dropped in
|
|
|
|
* recursive dlclose() invocations.
|
|
|
|
*/
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_REMOVE(&obj_list, obj, next);
|
|
|
|
obj_count--;
|
2016-12-22 17:37:39 +00:00
|
|
|
|
2016-12-22 17:44:27 +00:00
|
|
|
if (obj->filtees_loaded) {
|
|
|
|
if (next != NULL) {
|
|
|
|
init_marker(&marker);
|
|
|
|
TAILQ_INSERT_BEFORE(next, &marker, next);
|
2017-03-09 21:05:47 +00:00
|
|
|
unload_filtees(obj, lockstate);
|
2016-12-22 17:44:27 +00:00
|
|
|
next = TAILQ_NEXT(&marker, next);
|
|
|
|
TAILQ_REMOVE(&obj_list, &marker, next);
|
|
|
|
} else
|
2017-03-09 21:05:47 +00:00
|
|
|
unload_filtees(obj, lockstate);
|
2016-12-22 17:44:27 +00:00
|
|
|
}
|
2016-12-22 17:37:39 +00:00
|
|
|
release_object(obj);
|
2016-01-20 07:21:33 +00:00
|
|
|
}
|
1999-07-09 16:22:55 +00:00
|
|
|
}
|
|
|
|
|
1998-09-02 02:00:20 +00:00
|
|
|
static void
|
2003-02-17 20:58:27 +00:00
|
|
|
unlink_object(Obj_Entry *root)
|
1998-09-02 02:00:20 +00:00
|
|
|
{
|
2003-02-10 23:15:07 +00:00
|
|
|
Objlist_Entry *elm;
|
1999-08-20 22:33:44 +00:00
|
|
|
|
2003-02-10 23:15:07 +00:00
|
|
|
if (root->refcount == 0) {
|
|
|
|
/* Remove the object from the RTLD_GLOBAL list. */
|
|
|
|
objlist_remove(&list_global, root);
|
|
|
|
|
|
|
|
/* Remove the object from all objects' DAG lists. */
|
2005-12-18 19:43:33 +00:00
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link) {
|
2003-02-10 23:15:07 +00:00
|
|
|
objlist_remove(&elm->obj->dldags, root);
|
2003-05-08 01:31:36 +00:00
|
|
|
if (elm->obj != root)
|
|
|
|
unlink_object(elm->obj);
|
|
|
|
}
|
2003-02-10 23:15:07 +00:00
|
|
|
}
|
2003-05-08 01:31:36 +00:00
|
|
|
}
|
2003-02-17 20:58:27 +00:00
|
|
|
|
2003-05-08 01:31:36 +00:00
|
|
|
static void
|
|
|
|
ref_dag(Obj_Entry *root)
|
|
|
|
{
|
|
|
|
Objlist_Entry *elm;
|
|
|
|
|
2010-11-04 09:29:00 +00:00
|
|
|
assert(root->dag_inited);
|
2005-12-18 19:43:33 +00:00
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link)
|
2003-05-08 01:31:36 +00:00
|
|
|
elm->obj->refcount++;
|
2003-02-17 20:58:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
unref_dag(Obj_Entry *root)
|
|
|
|
{
|
2003-05-08 01:31:36 +00:00
|
|
|
Objlist_Entry *elm;
|
2003-02-17 20:58:27 +00:00
|
|
|
|
2010-11-04 09:29:00 +00:00
|
|
|
assert(root->dag_inited);
|
2005-12-18 19:43:33 +00:00
|
|
|
STAILQ_FOREACH(elm, &root->dagmembers, link)
|
2003-05-08 01:31:36 +00:00
|
|
|
elm->obj->refcount--;
|
1998-09-02 02:00:20 +00:00
|
|
|
}
|
2003-05-29 22:58:26 +00:00
|
|
|
|
2004-08-03 08:51:00 +00:00
|
|
|
/*
|
|
|
|
* Common code for MD __tls_get_addr().
|
|
|
|
*/
|
2012-03-10 08:49:44 +00:00
|
|
|
static void *tls_get_addr_slow(Elf_Addr **, int, size_t) __noinline;
|
|
|
|
static void *
|
|
|
|
tls_get_addr_slow(Elf_Addr **dtvp, int index, size_t offset)
|
2004-08-03 08:51:00 +00:00
|
|
|
{
|
2012-03-10 08:49:44 +00:00
|
|
|
Elf_Addr *newdtv, *dtv;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2012-03-10 08:49:44 +00:00
|
|
|
int to_copy;
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2012-03-10 08:49:44 +00:00
|
|
|
dtv = *dtvp;
|
2004-08-03 08:51:00 +00:00
|
|
|
/* Check dtv generation in case new modules have arrived */
|
|
|
|
if (dtv[0] != tls_dtv_generation) {
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2012-03-22 14:20:51 +00:00
|
|
|
newdtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
|
2004-08-03 08:51:00 +00:00
|
|
|
to_copy = dtv[1];
|
|
|
|
if (to_copy > tls_max_index)
|
|
|
|
to_copy = tls_max_index;
|
|
|
|
memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr));
|
|
|
|
newdtv[0] = tls_dtv_generation;
|
|
|
|
newdtv[1] = tls_max_index;
|
|
|
|
free(dtv);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2011-09-15 11:50:09 +00:00
|
|
|
dtv = *dtvp = newdtv;
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Dynamically allocate module TLS if necessary */
|
2012-03-10 08:49:44 +00:00
|
|
|
if (dtv[index + 1] == 0) {
|
2005-03-20 23:28:25 +00:00
|
|
|
/* Signal safe, wlock will block out signals. */
|
2012-03-10 08:49:44 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2005-03-20 23:28:25 +00:00
|
|
|
if (!dtv[index + 1])
|
|
|
|
dtv[index + 1] = (Elf_Addr)allocate_module_tls(index);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2005-03-20 23:28:25 +00:00
|
|
|
}
|
2012-03-10 08:49:44 +00:00
|
|
|
return ((void *)(dtv[index + 1] + offset));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *
|
|
|
|
tls_get_addr_common(Elf_Addr **dtvp, int index, size_t offset)
|
|
|
|
{
|
|
|
|
Elf_Addr *dtv;
|
|
|
|
|
|
|
|
dtv = *dtvp;
|
|
|
|
/* Check dtv generation in case new modules have arrived */
|
|
|
|
if (__predict_true(dtv[0] == tls_dtv_generation &&
|
|
|
|
dtv[index + 1] != 0))
|
|
|
|
return ((void *)(dtv[index + 1] + offset));
|
|
|
|
return (tls_get_addr_slow(dtvp, index, offset));
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
2015-03-31 09:51:19 +00:00
|
|
|
#if defined(__aarch64__) || defined(__arm__) || defined(__mips__) || \
|
2015-12-24 15:47:51 +00:00
|
|
|
defined(__powerpc__) || defined(__riscv__)
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate Static TLS using the Variant I method.
|
|
|
|
*/
|
|
|
|
void *
|
2006-09-01 06:13:16 +00:00
|
|
|
allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign)
|
2004-08-03 08:51:00 +00:00
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
2006-09-01 06:13:16 +00:00
|
|
|
char *tcb;
|
|
|
|
Elf_Addr **tls;
|
|
|
|
Elf_Addr *dtv;
|
2004-08-03 08:51:00 +00:00
|
|
|
Elf_Addr addr;
|
|
|
|
int i;
|
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE)
|
|
|
|
return (oldtcb);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
assert(tcbsize >= TLS_TCB_SIZE);
|
2012-03-22 14:20:51 +00:00
|
|
|
tcb = xcalloc(1, tls_static_space - TLS_TCB_SIZE + tcbsize);
|
2006-09-01 06:13:16 +00:00
|
|
|
tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
if (oldtcb != NULL) {
|
|
|
|
memcpy(tls, oldtcb, tls_static_space);
|
|
|
|
free(oldtcb);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
/* Adjust the DTV. */
|
|
|
|
dtv = tls[0];
|
|
|
|
for (i = 0; i < dtv[1]; i++) {
|
|
|
|
if (dtv[i+2] >= (Elf_Addr)oldtcb &&
|
|
|
|
dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) {
|
|
|
|
dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tls;
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2012-03-22 14:20:51 +00:00
|
|
|
dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
|
2006-09-01 06:13:16 +00:00
|
|
|
tls[0] = dtv;
|
|
|
|
dtv[0] = tls_dtv_generation;
|
|
|
|
dtv[1] = tls_max_index;
|
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
for (obj = globallist_curr(objs); obj != NULL;
|
|
|
|
obj = globallist_next(obj)) {
|
2010-02-16 02:48:11 +00:00
|
|
|
if (obj->tlsoffset > 0) {
|
2004-08-03 08:51:00 +00:00
|
|
|
addr = (Elf_Addr)tls + obj->tlsoffset;
|
2010-02-16 02:48:11 +00:00
|
|
|
if (obj->tlsinitsize > 0)
|
|
|
|
memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
|
|
|
|
if (obj->tlssize > obj->tlsinitsize)
|
|
|
|
memset((void*) (addr + obj->tlsinitsize), 0,
|
|
|
|
obj->tlssize - obj->tlsinitsize);
|
2004-08-03 08:51:00 +00:00
|
|
|
dtv[obj->tlsindex + 1] = addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
return (tcb);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2006-09-01 06:13:16 +00:00
|
|
|
free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
|
2004-08-03 08:51:00 +00:00
|
|
|
{
|
2006-09-01 06:13:16 +00:00
|
|
|
Elf_Addr *dtv;
|
2004-08-03 08:51:00 +00:00
|
|
|
Elf_Addr tlsstart, tlsend;
|
2006-09-01 06:13:16 +00:00
|
|
|
int dtvsize, i;
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
assert(tcbsize >= TLS_TCB_SIZE);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2006-09-01 06:13:16 +00:00
|
|
|
tlsstart = (Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE;
|
|
|
|
tlsend = tlsstart + tls_static_space;
|
|
|
|
|
|
|
|
dtv = *(Elf_Addr **)tlsstart;
|
2004-08-03 08:51:00 +00:00
|
|
|
dtvsize = dtv[1];
|
|
|
|
for (i = 0; i < dtvsize; i++) {
|
2006-09-01 06:13:16 +00:00
|
|
|
if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) {
|
|
|
|
free((void*)dtv[i+2]);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
}
|
2006-09-01 06:13:16 +00:00
|
|
|
free(dtv);
|
|
|
|
free(tcb);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2012-02-14 00:16:34 +00:00
|
|
|
#if defined(__i386__) || defined(__amd64__) || defined(__sparc64__)
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate Static TLS using the Variant II method.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
2013-12-06 21:39:45 +00:00
|
|
|
size_t size, ralign;
|
2004-08-03 08:51:00 +00:00
|
|
|
char *tls;
|
|
|
|
Elf_Addr *dtv, *olddtv;
|
|
|
|
Elf_Addr segbase, oldsegbase, addr;
|
|
|
|
int i;
|
|
|
|
|
2013-12-06 21:39:45 +00:00
|
|
|
ralign = tcbalign;
|
|
|
|
if (tls_static_max_align > ralign)
|
|
|
|
ralign = tls_static_max_align;
|
|
|
|
size = round(tls_static_space, ralign) + round(tcbsize, ralign);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
assert(tcbsize >= 2*sizeof(Elf_Addr));
|
2013-12-06 21:39:45 +00:00
|
|
|
tls = malloc_aligned(size, ralign);
|
2012-03-22 14:20:51 +00:00
|
|
|
dtv = xcalloc(tls_max_index + 2, sizeof(Elf_Addr));
|
2004-08-03 08:51:00 +00:00
|
|
|
|
2013-12-06 21:39:45 +00:00
|
|
|
segbase = (Elf_Addr)(tls + round(tls_static_space, ralign));
|
2004-08-03 08:51:00 +00:00
|
|
|
((Elf_Addr*)segbase)[0] = segbase;
|
|
|
|
((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv;
|
|
|
|
|
|
|
|
dtv[0] = tls_dtv_generation;
|
|
|
|
dtv[1] = tls_max_index;
|
|
|
|
|
|
|
|
if (oldtls) {
|
|
|
|
/*
|
|
|
|
* Copy the static TLS block over whole.
|
|
|
|
*/
|
|
|
|
oldsegbase = (Elf_Addr) oldtls;
|
|
|
|
memcpy((void *)(segbase - tls_static_space),
|
|
|
|
(const void *)(oldsegbase - tls_static_space),
|
|
|
|
tls_static_space);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If any dynamic TLS blocks have been created tls_get_addr(),
|
|
|
|
* move them over.
|
|
|
|
*/
|
|
|
|
olddtv = ((Elf_Addr**)oldsegbase)[1];
|
|
|
|
for (i = 0; i < olddtv[1]; i++) {
|
|
|
|
if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) {
|
|
|
|
dtv[i+2] = olddtv[i+2];
|
|
|
|
olddtv[i+2] = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We assume that this block was the one we created with
|
|
|
|
* allocate_initial_tls().
|
|
|
|
*/
|
|
|
|
free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr));
|
|
|
|
} else {
|
2016-08-12 18:29:11 +00:00
|
|
|
for (obj = objs; obj != NULL; obj = TAILQ_NEXT(obj, next)) {
|
2016-01-20 07:21:33 +00:00
|
|
|
if (obj->marker || obj->tlsoffset == 0)
|
|
|
|
continue;
|
2004-08-03 08:51:00 +00:00
|
|
|
addr = segbase - obj->tlsoffset;
|
|
|
|
memset((void*) (addr + obj->tlsinitsize),
|
|
|
|
0, obj->tlssize - obj->tlsinitsize);
|
|
|
|
if (obj->tlsinit)
|
|
|
|
memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize);
|
|
|
|
dtv[obj->tlsindex + 1] = addr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (void*) segbase;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
free_tls(void *tls, size_t tcbsize, size_t tcbalign)
|
|
|
|
{
|
|
|
|
Elf_Addr* dtv;
|
2013-12-06 21:39:45 +00:00
|
|
|
size_t size, ralign;
|
2004-08-03 08:51:00 +00:00
|
|
|
int dtvsize, i;
|
|
|
|
Elf_Addr tlsstart, tlsend;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Figure out the size of the initial TLS block so that we can
|
|
|
|
* find stuff which ___tls_get_addr() allocated dynamically.
|
|
|
|
*/
|
2013-12-06 21:39:45 +00:00
|
|
|
ralign = tcbalign;
|
|
|
|
if (tls_static_max_align > ralign)
|
|
|
|
ralign = tls_static_max_align;
|
|
|
|
size = round(tls_static_space, ralign);
|
2004-08-03 08:51:00 +00:00
|
|
|
|
|
|
|
dtv = ((Elf_Addr**)tls)[1];
|
|
|
|
dtvsize = dtv[1];
|
|
|
|
tlsend = (Elf_Addr) tls;
|
|
|
|
tlsstart = tlsend - size;
|
|
|
|
for (i = 0; i < dtvsize; i++) {
|
2013-12-06 21:39:45 +00:00
|
|
|
if (dtv[i + 2] != 0 && (dtv[i + 2] < tlsstart || dtv[i + 2] > tlsend)) {
|
2013-12-07 15:49:16 +00:00
|
|
|
free_aligned((void *)dtv[i + 2]);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-07 15:49:16 +00:00
|
|
|
free_aligned((void *)tlsstart);
|
2007-05-05 08:44:59 +00:00
|
|
|
free((void*) dtv);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2003-05-29 22:58:26 +00:00
|
|
|
|
2004-08-03 08:51:00 +00:00
|
|
|
/*
|
|
|
|
* Allocate TLS block for module with given index.
|
|
|
|
*/
|
|
|
|
void *
|
|
|
|
allocate_module_tls(int index)
|
|
|
|
{
|
|
|
|
Obj_Entry* obj;
|
|
|
|
char* p;
|
|
|
|
|
2016-01-20 07:21:33 +00:00
|
|
|
TAILQ_FOREACH(obj, &obj_list, next) {
|
|
|
|
if (obj->marker)
|
|
|
|
continue;
|
2004-08-03 08:51:00 +00:00
|
|
|
if (obj->tlsindex == index)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!obj) {
|
|
|
|
_rtld_error("Can't find module with TLS index %d", index);
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
2013-12-06 21:39:45 +00:00
|
|
|
p = malloc_aligned(obj->tlssize, obj->tlsalign);
|
2004-08-03 08:51:00 +00:00
|
|
|
memcpy(p, obj->tlsinit, obj->tlsinitsize);
|
|
|
|
memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize);
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
allocate_tls_offset(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
size_t off;
|
|
|
|
|
|
|
|
if (obj->tls_done)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (obj->tlssize == 0) {
|
|
|
|
obj->tls_done = true;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Ensure we use calculate_first_tls_offset, even if the main program doesn't
have TLS program header. This is needed on architectures with Variant I
tls, that is arm, arm64, mips, and powerpc. These place the thread control
block at the start of the buffer and, without this, this data may be
trashed.
This appears to not be an issue on mips or powerpc as they include a second
adjustment to move the thread local data, however this is on arm64 (with a
future change to fix placing this data), and should be on arm. I am unable
to trigger this on arm, even after changing the code to move the data
around to make it more likely to be hit. This is most likely because my
tests didn't use the variable in offset 0.
Reviewed by: kib
MFC after: 1 week
Sponsored by: ABT Systems Ltd
2015-09-01 15:43:56 +00:00
|
|
|
if (tls_last_offset == 0)
|
2004-08-03 08:51:00 +00:00
|
|
|
off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign);
|
|
|
|
else
|
|
|
|
off = calculate_tls_offset(tls_last_offset, tls_last_size,
|
|
|
|
obj->tlssize, obj->tlsalign);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have already fixed the size of the static TLS block, we
|
|
|
|
* must stay within that size. When allocating the static TLS, we
|
|
|
|
* leave a small amount of space spare to be used for dynamically
|
|
|
|
* loading modules which use static TLS.
|
|
|
|
*/
|
2013-12-06 21:39:45 +00:00
|
|
|
if (tls_static_space != 0) {
|
2004-08-03 08:51:00 +00:00
|
|
|
if (calculate_tls_end(off, obj->tlssize) > tls_static_space)
|
|
|
|
return false;
|
2013-12-06 21:39:45 +00:00
|
|
|
} else if (obj->tlsalign > tls_static_max_align) {
|
|
|
|
tls_static_max_align = obj->tlsalign;
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tls_last_offset = obj->tlsoffset = off;
|
|
|
|
tls_last_size = obj->tlssize;
|
|
|
|
obj->tls_done = true;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2005-02-27 12:55:40 +00:00
|
|
|
void
|
|
|
|
free_tls_offset(Obj_Entry *obj)
|
|
|
|
{
|
2010-02-16 02:48:11 +00:00
|
|
|
|
2005-02-27 12:55:40 +00:00
|
|
|
/*
|
|
|
|
* If we were the last thing to allocate out of the static TLS
|
|
|
|
* block, we give our space back to the 'allocator'. This is a
|
|
|
|
* simplistic workaround to allow libGL.so.1 to be loaded and
|
2010-02-16 02:48:11 +00:00
|
|
|
* unloaded multiple times.
|
2005-02-27 12:55:40 +00:00
|
|
|
*/
|
|
|
|
if (calculate_tls_end(obj->tlsoffset, obj->tlssize)
|
|
|
|
== calculate_tls_end(tls_last_offset, tls_last_size)) {
|
|
|
|
tls_last_offset -= obj->tlssize;
|
|
|
|
tls_last_size = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-08-03 08:51:00 +00:00
|
|
|
void *
|
|
|
|
_rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign)
|
|
|
|
{
|
2005-03-20 23:28:25 +00:00
|
|
|
void *ret;
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2005-03-20 23:28:25 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2016-01-20 07:21:33 +00:00
|
|
|
ret = allocate_tls(globallist_curr(TAILQ_FIRST(&obj_list)), oldtls,
|
|
|
|
tcbsize, tcbalign);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2005-03-20 23:28:25 +00:00
|
|
|
return (ret);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign)
|
|
|
|
{
|
2010-12-25 08:51:20 +00:00
|
|
|
RtldLockState lockstate;
|
2005-03-20 23:28:25 +00:00
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
wlock_acquire(rtld_bind_lock, &lockstate);
|
2004-08-03 08:51:00 +00:00
|
|
|
free_tls(tcb, tcbsize, tcbalign);
|
2010-12-25 08:51:20 +00:00
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
2004-08-03 08:51:00 +00:00
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
object_add_name(Obj_Entry *obj, const char *name)
|
|
|
|
{
|
|
|
|
Name_Entry *entry;
|
|
|
|
size_t len;
|
|
|
|
|
|
|
|
len = strlen(name);
|
|
|
|
entry = malloc(sizeof(Name_Entry) + len);
|
|
|
|
|
|
|
|
if (entry != NULL) {
|
|
|
|
strcpy(entry->name, name);
|
|
|
|
STAILQ_INSERT_TAIL(&obj->names, entry, link);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
object_match_name(const Obj_Entry *obj, const char *name)
|
|
|
|
{
|
|
|
|
Name_Entry *entry;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(entry, &obj->names, link) {
|
|
|
|
if (strcmp(name, entry->name) == 0)
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static Obj_Entry *
|
|
|
|
locate_dependency(const Obj_Entry *obj, const char *name)
|
|
|
|
{
|
|
|
|
const Objlist_Entry *entry;
|
|
|
|
const Needed_Entry *needed;
|
|
|
|
|
|
|
|
STAILQ_FOREACH(entry, &list_main, link) {
|
|
|
|
if (object_match_name(entry->obj, name))
|
|
|
|
return entry->obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (needed = obj->needed; needed != NULL; needed = needed->next) {
|
2011-01-30 16:14:09 +00:00
|
|
|
if (strcmp(obj->strtab + needed->name, name) == 0 ||
|
|
|
|
(needed->obj != NULL && object_match_name(needed->obj, name))) {
|
|
|
|
/*
|
|
|
|
* If there is DT_NEEDED for the name we are looking for,
|
|
|
|
* we are all set. Note that object might not be found if
|
|
|
|
* dependency was not loaded yet, so the function can
|
|
|
|
* return NULL here. This is expected and handled
|
2011-01-30 16:21:25 +00:00
|
|
|
* properly by the caller.
|
2011-01-30 16:14:09 +00:00
|
|
|
*/
|
|
|
|
return (needed->obj);
|
|
|
|
}
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
2010-06-28 01:40:56 +00:00
|
|
|
_rtld_error("%s: Unexpected inconsistency: dependency %s not found",
|
|
|
|
obj->path, name);
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2005-12-18 19:43:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
check_object_provided_version(Obj_Entry *refobj, const Obj_Entry *depobj,
|
|
|
|
const Elf_Vernaux *vna)
|
|
|
|
{
|
|
|
|
const Elf_Verdef *vd;
|
|
|
|
const char *vername;
|
|
|
|
|
|
|
|
vername = refobj->strtab + vna->vna_name;
|
|
|
|
vd = depobj->verdef;
|
|
|
|
if (vd == NULL) {
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: version %s required by %s not defined",
|
|
|
|
depobj->path, vername, refobj->path);
|
2005-12-18 19:43:33 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
for (;;) {
|
|
|
|
if (vd->vd_version != VER_DEF_CURRENT) {
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
|
2005-12-18 19:43:33 +00:00
|
|
|
depobj->path, vd->vd_version);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
if (vna->vna_hash == vd->vd_hash) {
|
|
|
|
const Elf_Verdaux *aux = (const Elf_Verdaux *)
|
|
|
|
((char *)vd + vd->vd_aux);
|
|
|
|
if (strcmp(vername, depobj->strtab + aux->vda_name) == 0)
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (vd->vd_next == 0)
|
|
|
|
break;
|
|
|
|
vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
|
|
|
|
}
|
|
|
|
if (vna->vna_flags & VER_FLG_WEAK)
|
|
|
|
return (0);
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: version %s required by %s not found",
|
|
|
|
depobj->path, vername, refobj->path);
|
2005-12-18 19:43:33 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
rtld_verify_object_versions(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
const Elf_Verneed *vn;
|
|
|
|
const Elf_Verdef *vd;
|
|
|
|
const Elf_Verdaux *vda;
|
|
|
|
const Elf_Vernaux *vna;
|
|
|
|
const Obj_Entry *depobj;
|
|
|
|
int maxvernum, vernum;
|
|
|
|
|
2012-03-27 14:10:15 +00:00
|
|
|
if (obj->ver_checked)
|
|
|
|
return (0);
|
|
|
|
obj->ver_checked = true;
|
|
|
|
|
2005-12-18 19:43:33 +00:00
|
|
|
maxvernum = 0;
|
|
|
|
/*
|
|
|
|
* Walk over defined and required version records and figure out
|
|
|
|
* max index used by any of them. Do very basic sanity checking
|
|
|
|
* while there.
|
|
|
|
*/
|
|
|
|
vn = obj->verneed;
|
|
|
|
while (vn != NULL) {
|
|
|
|
if (vn->vn_version != VER_NEED_CURRENT) {
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: Unsupported version %d of Elf_Verneed entry",
|
2005-12-18 19:43:33 +00:00
|
|
|
obj->path, vn->vn_version);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
|
|
|
|
for (;;) {
|
|
|
|
vernum = VER_NEED_IDX(vna->vna_other);
|
|
|
|
if (vernum > maxvernum)
|
|
|
|
maxvernum = vernum;
|
|
|
|
if (vna->vna_next == 0)
|
|
|
|
break;
|
|
|
|
vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
|
|
|
|
}
|
|
|
|
if (vn->vn_next == 0)
|
|
|
|
break;
|
|
|
|
vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
vd = obj->verdef;
|
|
|
|
while (vd != NULL) {
|
|
|
|
if (vd->vd_version != VER_DEF_CURRENT) {
|
2005-12-24 15:37:30 +00:00
|
|
|
_rtld_error("%s: Unsupported version %d of Elf_Verdef entry",
|
|
|
|
obj->path, vd->vd_version);
|
2005-12-18 19:43:33 +00:00
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
vernum = VER_DEF_IDX(vd->vd_ndx);
|
|
|
|
if (vernum > maxvernum)
|
|
|
|
maxvernum = vernum;
|
|
|
|
if (vd->vd_next == 0)
|
|
|
|
break;
|
|
|
|
vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (maxvernum == 0)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Store version information in array indexable by version index.
|
|
|
|
* Verify that object version requirements are satisfied along the
|
|
|
|
* way.
|
|
|
|
*/
|
|
|
|
obj->vernum = maxvernum + 1;
|
2012-03-22 14:20:51 +00:00
|
|
|
obj->vertab = xcalloc(obj->vernum, sizeof(Ver_Entry));
|
2005-12-18 19:43:33 +00:00
|
|
|
|
|
|
|
vd = obj->verdef;
|
|
|
|
while (vd != NULL) {
|
|
|
|
if ((vd->vd_flags & VER_FLG_BASE) == 0) {
|
|
|
|
vernum = VER_DEF_IDX(vd->vd_ndx);
|
|
|
|
assert(vernum <= maxvernum);
|
|
|
|
vda = (const Elf_Verdaux *)((char *)vd + vd->vd_aux);
|
|
|
|
obj->vertab[vernum].hash = vd->vd_hash;
|
|
|
|
obj->vertab[vernum].name = obj->strtab + vda->vda_name;
|
|
|
|
obj->vertab[vernum].file = NULL;
|
|
|
|
obj->vertab[vernum].flags = 0;
|
|
|
|
}
|
|
|
|
if (vd->vd_next == 0)
|
|
|
|
break;
|
|
|
|
vd = (const Elf_Verdef *) ((char *)vd + vd->vd_next);
|
|
|
|
}
|
|
|
|
|
|
|
|
vn = obj->verneed;
|
|
|
|
while (vn != NULL) {
|
|
|
|
depobj = locate_dependency(obj, obj->strtab + vn->vn_file);
|
2011-01-30 16:14:09 +00:00
|
|
|
if (depobj == NULL)
|
|
|
|
return (-1);
|
2005-12-18 19:43:33 +00:00
|
|
|
vna = (const Elf_Vernaux *) ((char *)vn + vn->vn_aux);
|
|
|
|
for (;;) {
|
|
|
|
if (check_object_provided_version(obj, depobj, vna))
|
|
|
|
return (-1);
|
|
|
|
vernum = VER_NEED_IDX(vna->vna_other);
|
|
|
|
assert(vernum <= maxvernum);
|
|
|
|
obj->vertab[vernum].hash = vna->vna_hash;
|
|
|
|
obj->vertab[vernum].name = obj->strtab + vna->vna_name;
|
|
|
|
obj->vertab[vernum].file = obj->strtab + vn->vn_file;
|
|
|
|
obj->vertab[vernum].flags = (vna->vna_other & VER_NEED_HIDDEN) ?
|
|
|
|
VER_INFO_HIDDEN : 0;
|
|
|
|
if (vna->vna_next == 0)
|
|
|
|
break;
|
|
|
|
vna = (const Elf_Vernaux *) ((char *)vna + vna->vna_next);
|
|
|
|
}
|
|
|
|
if (vn->vn_next == 0)
|
|
|
|
break;
|
|
|
|
vn = (const Elf_Verneed *) ((char *)vn + vn->vn_next);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
rtld_verify_versions(const Objlist *objlist)
|
|
|
|
{
|
|
|
|
Objlist_Entry *entry;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = 0;
|
|
|
|
STAILQ_FOREACH(entry, objlist, link) {
|
|
|
|
/*
|
|
|
|
* Skip dummy objects or objects that have their version requirements
|
|
|
|
* already checked.
|
|
|
|
*/
|
|
|
|
if (entry->obj->strtab == NULL || entry->obj->vertab != NULL)
|
|
|
|
continue;
|
|
|
|
if (rtld_verify_object_versions(entry->obj) == -1) {
|
|
|
|
rc = -1;
|
|
|
|
if (ld_tracing == NULL)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-04-07 23:17:00 +00:00
|
|
|
if (rc == 0 || ld_tracing != NULL)
|
|
|
|
rc = rtld_verify_object_versions(&obj_rtld);
|
2005-12-18 19:43:33 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
const Ver_Entry *
|
|
|
|
fetch_ventry(const Obj_Entry *obj, unsigned long symnum)
|
|
|
|
{
|
|
|
|
Elf_Versym vernum;
|
|
|
|
|
|
|
|
if (obj->vertab) {
|
|
|
|
vernum = VER_NDX(obj->versyms[symnum]);
|
|
|
|
if (vernum >= obj->vernum) {
|
|
|
|
_rtld_error("%s: symbol %s has wrong verneed value %d",
|
|
|
|
obj->path, obj->strtab + symnum, vernum);
|
|
|
|
} else if (obj->vertab[vernum].hash != 0) {
|
|
|
|
return &obj->vertab[vernum];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2010-08-17 09:05:39 +00:00
|
|
|
|
2011-01-08 17:11:49 +00:00
|
|
|
int
|
|
|
|
_rtld_get_stack_prot(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (stack_prot);
|
|
|
|
}
|
|
|
|
|
2015-01-03 18:09:53 +00:00
|
|
|
int
|
|
|
|
_rtld_is_dlopened(void *arg)
|
|
|
|
{
|
|
|
|
Obj_Entry *obj;
|
|
|
|
RtldLockState lockstate;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
rlock_acquire(rtld_bind_lock, &lockstate);
|
|
|
|
obj = dlcheck(arg);
|
|
|
|
if (obj == NULL)
|
|
|
|
obj = obj_from_addr(arg);
|
|
|
|
if (obj == NULL) {
|
|
|
|
_rtld_error("No shared object contains address");
|
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
res = obj->dlopened ? 1 : 0;
|
|
|
|
lock_release(rtld_bind_lock, &lockstate);
|
|
|
|
return (res);
|
|
|
|
}
|
|
|
|
|
2017-01-12 15:54:03 +00:00
|
|
|
int
|
|
|
|
obj_enforce_relro(Obj_Entry *obj)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (obj->relro_size > 0 && mprotect(obj->relro_page, obj->relro_size,
|
|
|
|
PROT_READ) == -1) {
|
|
|
|
_rtld_error("%s: Cannot enforce relro protection: %s",
|
|
|
|
obj->path, rtld_strerror(errno));
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2011-01-08 17:11:49 +00:00
|
|
|
static void
|
2011-01-10 16:09:35 +00:00
|
|
|
map_stacks_exec(RtldLockState *lockstate)
|
2011-01-08 17:11:49 +00:00
|
|
|
{
|
|
|
|
void (*thr_map_stacks_exec)(void);
|
|
|
|
|
|
|
|
if ((max_stack_flags & PF_X) == 0 || (stack_prot & PROT_EXEC) != 0)
|
|
|
|
return;
|
|
|
|
thr_map_stacks_exec = (void (*)(void))(uintptr_t)
|
2011-01-10 16:09:35 +00:00
|
|
|
get_program_var_addr("__pthread_map_stacks_exec", lockstate);
|
2011-01-08 17:11:49 +00:00
|
|
|
if (thr_map_stacks_exec != NULL) {
|
|
|
|
stack_prot |= PROT_EXEC;
|
|
|
|
thr_map_stacks_exec();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-12-25 08:51:20 +00:00
|
|
|
void
|
|
|
|
symlook_init(SymLook *dst, const char *name)
|
|
|
|
{
|
|
|
|
|
|
|
|
bzero(dst, sizeof(*dst));
|
|
|
|
dst->name = name;
|
|
|
|
dst->hash = elf_hash(name);
|
2012-04-30 13:31:10 +00:00
|
|
|
dst->hash_gnu = gnu_hash(name);
|
2010-12-25 08:51:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
symlook_init_from_req(SymLook *dst, const SymLook *src)
|
|
|
|
{
|
|
|
|
|
|
|
|
dst->name = src->name;
|
|
|
|
dst->hash = src->hash;
|
2012-04-30 13:31:10 +00:00
|
|
|
dst->hash_gnu = src->hash_gnu;
|
2010-12-25 08:51:20 +00:00
|
|
|
dst->ventry = src->ventry;
|
|
|
|
dst->flags = src->flags;
|
|
|
|
dst->defobj_out = NULL;
|
|
|
|
dst->sym_out = NULL;
|
|
|
|
dst->lockstate = src->lockstate;
|
|
|
|
}
|
|
|
|
|
2017-05-23 10:00:52 +00:00
|
|
|
static int
|
|
|
|
open_binary_fd(const char *argv0, bool search_in_path)
|
|
|
|
{
|
|
|
|
char *pathenv, *pe, binpath[PATH_MAX];
|
|
|
|
int fd;
|
|
|
|
|
|
|
|
if (search_in_path && strchr(argv0, '/') == NULL) {
|
|
|
|
pathenv = getenv("PATH");
|
|
|
|
if (pathenv == NULL) {
|
|
|
|
rtld_printf("-p and no PATH environment variable\n");
|
|
|
|
rtld_die();
|
|
|
|
}
|
|
|
|
pathenv = strdup(pathenv);
|
|
|
|
if (pathenv == NULL) {
|
|
|
|
rtld_printf("Cannot allocate memory\n");
|
|
|
|
rtld_die();
|
|
|
|
}
|
|
|
|
fd = -1;
|
|
|
|
errno = ENOENT;
|
|
|
|
while ((pe = strsep(&pathenv, ":")) != NULL) {
|
2017-07-05 06:12:21 +00:00
|
|
|
if (strlcpy(binpath, pe, sizeof(binpath)) >=
|
2017-05-23 10:00:52 +00:00
|
|
|
sizeof(binpath))
|
|
|
|
continue;
|
|
|
|
if (binpath[0] != '\0' &&
|
2017-07-05 06:12:21 +00:00
|
|
|
strlcat(binpath, "/", sizeof(binpath)) >=
|
2017-05-23 10:00:52 +00:00
|
|
|
sizeof(binpath))
|
|
|
|
continue;
|
2017-07-05 06:12:21 +00:00
|
|
|
if (strlcat(binpath, argv0, sizeof(binpath)) >=
|
2017-05-23 10:00:52 +00:00
|
|
|
sizeof(binpath))
|
|
|
|
continue;
|
|
|
|
fd = open(binpath, O_RDONLY | O_CLOEXEC | O_VERIFY);
|
|
|
|
if (fd != -1 || errno != ENOENT)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
free(pathenv);
|
|
|
|
} else {
|
|
|
|
fd = open(argv0, O_RDONLY | O_CLOEXEC | O_VERIFY);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fd == -1) {
|
|
|
|
rtld_printf("Opening %s: %s\n", argv0,
|
|
|
|
rtld_strerror(errno));
|
|
|
|
rtld_die();
|
|
|
|
}
|
|
|
|
return (fd);
|
|
|
|
}
|
2014-06-20 17:08:32 +00:00
|
|
|
|
2017-05-17 22:51:28 +00:00
|
|
|
/*
|
|
|
|
* Parse a set of command-line arguments.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
parse_args(char* argv[], int argc, bool *use_pathp, int *fdp)
|
|
|
|
{
|
|
|
|
const char *arg;
|
|
|
|
int fd, i, j, arglen;
|
|
|
|
char opt;
|
|
|
|
|
|
|
|
dbg("Parsing command-line arguments");
|
|
|
|
*use_pathp = false;
|
|
|
|
*fdp = -1;
|
|
|
|
|
|
|
|
for (i = 1; i < argc; i++ ) {
|
|
|
|
arg = argv[i];
|
|
|
|
dbg("argv[%d]: '%s'", i, arg);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rtld arguments end with an explicit "--" or with the first
|
|
|
|
* non-prefixed argument.
|
|
|
|
*/
|
|
|
|
if (strcmp(arg, "--") == 0) {
|
|
|
|
i++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (arg[0] != '-')
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All other arguments are single-character options that can
|
|
|
|
* be combined, so we need to search through `arg` for them.
|
|
|
|
*/
|
|
|
|
arglen = strlen(arg);
|
|
|
|
for (j = 1; j < arglen; j++) {
|
|
|
|
opt = arg[j];
|
|
|
|
if (opt == 'h') {
|
|
|
|
print_usage(argv[0]);
|
|
|
|
rtld_die();
|
|
|
|
} else if (opt == 'f') {
|
|
|
|
/*
|
|
|
|
* -f XX can be used to specify a descriptor for the
|
|
|
|
* binary named at the command line (i.e., the later
|
|
|
|
* argument will specify the process name but the
|
|
|
|
* descriptor is what will actually be executed)
|
|
|
|
*/
|
|
|
|
if (j != arglen - 1) {
|
|
|
|
/* -f must be the last option in, e.g., -abcf */
|
|
|
|
_rtld_error("invalid options: %s", arg);
|
|
|
|
rtld_die();
|
|
|
|
}
|
|
|
|
i++;
|
|
|
|
fd = parse_integer(argv[i]);
|
|
|
|
if (fd == -1) {
|
|
|
|
_rtld_error("invalid file descriptor: '%s'",
|
|
|
|
argv[i]);
|
|
|
|
rtld_die();
|
|
|
|
}
|
|
|
|
*fdp = fd;
|
|
|
|
break;
|
|
|
|
} else if (opt == 'p') {
|
|
|
|
*use_pathp = true;
|
|
|
|
} else {
|
|
|
|
rtld_printf("invalid argument: '%s'\n", arg);
|
|
|
|
print_usage(argv[0]);
|
|
|
|
rtld_die();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return (i);
|
|
|
|
}
|
|
|
|
|
2014-06-20 17:08:32 +00:00
|
|
|
/*
|
|
|
|
* Parse a file descriptor number without pulling in more of libc (e.g. atoi).
|
|
|
|
*/
|
|
|
|
static int
|
2017-05-16 13:27:44 +00:00
|
|
|
parse_integer(const char *str)
|
2014-06-20 17:08:32 +00:00
|
|
|
{
|
|
|
|
static const int RADIX = 10; /* XXXJA: possibly support hex? */
|
|
|
|
const char *orig;
|
2017-05-16 13:27:44 +00:00
|
|
|
int n;
|
2014-06-20 17:08:32 +00:00
|
|
|
char c;
|
|
|
|
|
|
|
|
orig = str;
|
2017-05-16 13:27:44 +00:00
|
|
|
n = 0;
|
2014-06-20 17:08:32 +00:00
|
|
|
for (c = *str; c != '\0'; c = *++str) {
|
|
|
|
if (c < '0' || c > '9')
|
|
|
|
return (-1);
|
|
|
|
|
2017-05-16 13:27:44 +00:00
|
|
|
n *= RADIX;
|
|
|
|
n += c - '0';
|
2014-06-20 17:08:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure we actually parsed something. */
|
2017-05-16 13:27:44 +00:00
|
|
|
if (str == orig)
|
2014-06-20 17:08:32 +00:00
|
|
|
return (-1);
|
2017-05-16 13:27:44 +00:00
|
|
|
return (n);
|
2014-06-20 17:08:32 +00:00
|
|
|
}
|
|
|
|
|
2017-05-18 09:31:30 +00:00
|
|
|
static void
|
|
|
|
print_usage(const char *argv0)
|
2017-05-17 22:51:28 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
rtld_printf("Usage: %s [-h] [-f <FD>] [--] <binary> [<args>]\n"
|
|
|
|
"\n"
|
|
|
|
"Options:\n"
|
|
|
|
" -h Display this help message\n"
|
2017-05-23 10:00:52 +00:00
|
|
|
" -p Search in PATH for named binary\n"
|
2017-05-17 22:51:28 +00:00
|
|
|
" -f <FD> Execute <FD> instead of searching for <binary>\n"
|
|
|
|
" -- End of RTLD options\n"
|
|
|
|
" <binary> Name of process to execute\n"
|
|
|
|
" <args> Arguments to the executed process\n", argv0);
|
|
|
|
}
|
|
|
|
|
2010-08-23 15:38:02 +00:00
|
|
|
/*
|
|
|
|
* Overrides for libc_pic-provided functions.
|
|
|
|
*/
|
|
|
|
|
2010-08-17 09:05:39 +00:00
|
|
|
int
|
|
|
|
__getosreldate(void)
|
|
|
|
{
|
|
|
|
size_t len;
|
|
|
|
int oid[2];
|
|
|
|
int error, osrel;
|
|
|
|
|
|
|
|
if (osreldate != 0)
|
|
|
|
return (osreldate);
|
|
|
|
|
|
|
|
oid[0] = CTL_KERN;
|
|
|
|
oid[1] = KERN_OSRELDATE;
|
|
|
|
osrel = 0;
|
|
|
|
len = sizeof(osrel);
|
|
|
|
error = sysctl(oid, 2, &osrel, &len, NULL, 0);
|
|
|
|
if (error == 0 && osrel > 0 && len == sizeof(osrel))
|
|
|
|
osreldate = osrel;
|
|
|
|
return (osreldate);
|
|
|
|
}
|
2010-08-23 15:38:02 +00:00
|
|
|
|
2012-03-24 19:13:10 +00:00
|
|
|
void
|
|
|
|
exit(int status)
|
|
|
|
{
|
|
|
|
|
|
|
|
_exit(status);
|
|
|
|
}
|
|
|
|
|
|
|
|
void (*__cleanup)(void);
|
|
|
|
int __isthreaded = 0;
|
|
|
|
int _thread_autoinit_dummy_decl = 1;
|
|
|
|
|
2010-08-23 15:38:02 +00:00
|
|
|
/*
|
|
|
|
* No unresolved symbols for rtld.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
__pthread_cxa_finalize(struct dl_phdr_info *a)
|
|
|
|
{
|
|
|
|
}
|
2012-03-12 12:15:47 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
__stack_chk_fail(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
_rtld_error("stack overflow detected; terminated");
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2012-03-12 12:15:47 +00:00
|
|
|
}
|
2012-03-24 19:14:44 +00:00
|
|
|
__weak_reference(__stack_chk_fail, __stack_chk_fail_local);
|
2012-03-12 12:15:47 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
__chk_fail(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
_rtld_error("buffer overflow detected; terminated");
|
2015-04-02 21:35:36 +00:00
|
|
|
rtld_die();
|
2012-03-12 12:15:47 +00:00
|
|
|
}
|
2012-03-14 15:39:59 +00:00
|
|
|
|
|
|
|
const char *
|
|
|
|
rtld_strerror(int errnum)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (errnum < 0 || errnum >= sys_nerr)
|
|
|
|
return ("Unknown error");
|
|
|
|
return (sys_errlist[errnum]);
|
|
|
|
}
|